Import Floodlight v0.90
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/FloodlightContext.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/FloodlightContext.java
new file mode 100644
index 0000000..aa4fe6b
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/FloodlightContext.java
@@ -0,0 +1,35 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core;
+
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * This is a context object where floodlight listeners can register 
+ * and later retrieve context information associated with an
+ * event
+ * @author readams
+ */
+public class FloodlightContext {
+    protected ConcurrentHashMap<String, Object> storage =
+            new ConcurrentHashMap<String, Object>();
+
+    public ConcurrentHashMap<String, Object> getStorage() {
+        return storage;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/FloodlightContextStore.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/FloodlightContextStore.java
new file mode 100644
index 0000000..5455284
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/FloodlightContextStore.java
@@ -0,0 +1,34 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core;
+
+public class FloodlightContextStore<V> {
+    
+    @SuppressWarnings("unchecked")
+    public V get(FloodlightContext bc, String key) {
+        return (V)bc.storage.get(key);
+    }
+    
+    public void put(FloodlightContext bc, String key, V value) {
+        bc.storage.put(key, value);
+    }
+    
+    public void remove(FloodlightContext bc, String key) {
+        bc.storage.remove(key);
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/FloodlightProvider.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/FloodlightProvider.java
new file mode 100644
index 0000000..347bf5b
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/FloodlightProvider.java
@@ -0,0 +1,74 @@
+package net.floodlightcontroller.core;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+
+import net.floodlightcontroller.core.internal.Controller;
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.counter.ICounterStoreService;
+import net.floodlightcontroller.perfmon.IPktInProcessingTimeService;
+import net.floodlightcontroller.restserver.IRestApiService;
+import net.floodlightcontroller.storage.IStorageSourceService;
+import net.floodlightcontroller.threadpool.IThreadPoolService;
+
+public class FloodlightProvider implements IFloodlightModule {
+    Controller controller;
+    
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        Collection<Class<? extends IFloodlightService>> services =
+                new ArrayList<Class<? extends IFloodlightService>>(1);
+        services.add(IFloodlightProviderService.class);
+        return services;
+    }
+
+    @Override
+    public Map<Class<? extends IFloodlightService>,
+               IFloodlightService> getServiceImpls() {
+        controller = new Controller();
+        
+        Map<Class<? extends IFloodlightService>,
+            IFloodlightService> m = 
+                new HashMap<Class<? extends IFloodlightService>,
+                            IFloodlightService>();
+        m.put(IFloodlightProviderService.class, controller);
+        return m;
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
+        Collection<Class<? extends IFloodlightService>> dependencies =
+            new ArrayList<Class<? extends IFloodlightService>>(4);
+        dependencies.add(IStorageSourceService.class);
+        dependencies.add(IPktInProcessingTimeService.class);
+        dependencies.add(IRestApiService.class);
+        dependencies.add(ICounterStoreService.class);
+        dependencies.add(IThreadPoolService.class);
+        return dependencies;
+    }
+
+    @Override
+    public void init(FloodlightModuleContext context) throws FloodlightModuleException {
+       controller.setStorageSourceService(
+           context.getServiceImpl(IStorageSourceService.class));
+       controller.setPktInProcessingService(
+           context.getServiceImpl(IPktInProcessingTimeService.class));
+       controller.setCounterStore(
+           context.getServiceImpl(ICounterStoreService.class));
+       controller.setRestApiService(
+           context.getServiceImpl(IRestApiService.class));
+       controller.setThreadPoolService(
+           context.getServiceImpl(IThreadPoolService.class));
+       controller.init(context.getConfigParams(this));
+    }
+
+    @Override
+    public void startUp(FloodlightModuleContext context) {
+        controller.startupComponents();
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IFloodlightProviderService.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IFloodlightProviderService.java
new file mode 100644
index 0000000..1e3ec6f
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IFloodlightProviderService.java
@@ -0,0 +1,210 @@
+/**
+ *    Copyright 2011, Big Switch Networks, Inc. 
+ *    Originally created by David Erickson, Stanford University
+ * 
+ *    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *    not use this file except in compliance with the License. You may obtain
+ *    a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *    License for the specific language governing permissions and limitations
+ *    under the License.
+ **/
+
+package net.floodlightcontroller.core;
+
+import java.util.List;
+import java.util.Map;
+
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.packet.Ethernet;
+
+import org.openflow.protocol.OFMessage;
+import org.openflow.protocol.OFType;
+import org.openflow.protocol.factory.BasicFactory;
+
+/**
+ * The interface exposed by the core bundle that allows you to interact
+ * with connected switches.
+ *
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ */
+public interface IFloodlightProviderService extends IFloodlightService {
+
+    /**
+     * A value stored in the floodlight context containing a parsed packet
+     * representation of the payload of a packet-in message. 
+     */
+    public static final String CONTEXT_PI_PAYLOAD = 
+            "net.floodlightcontroller.core.IFloodlightProvider.piPayload";
+
+    /**
+     * The role of the controller as used by the OF 1.2 and OVS failover and
+     * load-balancing mechanism.
+     */
+    public static enum Role { EQUAL, MASTER, SLAVE };
+    
+    /**
+     * A FloodlightContextStore object that can be used to retrieve the 
+     * packet-in payload
+     */
+    public static final FloodlightContextStore<Ethernet> bcStore = 
+            new FloodlightContextStore<Ethernet>();
+
+    /**
+     * Adds an OpenFlow message listener
+     * @param type The OFType the component wants to listen for
+     * @param listener The component that wants to listen for the message
+     */
+    public void addOFMessageListener(OFType type, IOFMessageListener listener);
+
+    /**
+     * Removes an OpenFlow message listener
+     * @param type The OFType the component no long wants to listen for
+     * @param listener The component that no longer wants to receive the message
+     */
+    public void removeOFMessageListener(OFType type, IOFMessageListener listener);
+    
+    /**
+     * Return a non-modifiable list of all current listeners
+     * @return listeners
+     */
+    public Map<OFType, List<IOFMessageListener>> getListeners();
+
+    /**
+     * Returns an unmodifiable map of all actively connected OpenFlow switches. This doesn't
+     * contain switches that are connected but the controller's in the slave role.
+     * @return the set of actively connected switches
+     */
+    public Map<Long, IOFSwitch> getSwitches();
+    
+    /**
+     * Get the current role of the controller
+     */
+    public Role getRole();
+    
+    /**
+     * Get the current mapping of controller IDs to their IP addresses
+     * Returns a copy of the current mapping. 
+     * @see IHAListener
+     */
+    public Map<String,String> getControllerNodeIPs();
+    
+    /**
+     * Gets the ID of the controller
+     */
+    public String getControllerId();
+    
+    /**
+     * Set the role of the controller
+     */
+    public void setRole(Role role);
+    
+    /**
+     * Add a switch listener
+     * @param listener The module that wants to listen for events
+     */
+    public void addOFSwitchListener(IOFSwitchListener listener);
+
+    /**
+     * Remove a switch listener
+     * @param listener The The module that no longer wants to listen for events
+     */
+    public void removeOFSwitchListener(IOFSwitchListener listener);
+    
+    /**
+     * Adds a listener for HA role events
+     * @param listener The module that wants to listen for events
+     */
+    public void addHAListener(IHAListener listener);
+    
+    /**
+     * Removes a listener for HA role events
+     * @param listener The module that no longer wants to listen for events
+     */
+    public void removeHAListener(IHAListener listener);
+
+    /**
+     * Terminate the process
+     */
+    public void terminate();
+
+    /**
+     * Re-injects an OFMessage back into the packet processing chain
+     * @param sw The switch to use for the message
+     * @param msg the message to inject
+     * @return True if successfully re-injected, false otherwise
+     */
+    public boolean injectOfMessage(IOFSwitch sw, OFMessage msg);
+
+    /**
+     * Re-injects an OFMessage back into the packet processing chain
+     * @param sw The switch to use for the message
+     * @param msg the message to inject
+     * @param bContext a floodlight context to use if required
+     * @return True if successfully re-injected, false otherwise
+     */
+    public boolean injectOfMessage(IOFSwitch sw, OFMessage msg, 
+            FloodlightContext bContext);
+
+    /**
+     * Process written messages through the message listeners for the controller
+     * @param sw The switch being written to
+     * @param m the message 
+     * @param bc any accompanying context object
+     */
+    public void handleOutgoingMessage(IOFSwitch sw, OFMessage m, 
+            FloodlightContext bc);
+
+    /**
+     * Gets the BasicFactory
+     * @return an OpenFlow message factory
+     */
+    public BasicFactory getOFMessageFactory();
+
+    /**
+     * Run the main I/O loop of the Controller.
+     */
+    public void run();
+
+    /**
+     * Add an info provider of a particular type
+     * @param type
+     * @param provider
+     */
+    public void addInfoProvider(String type, IInfoProvider provider);
+
+   /**
+    * Remove an info provider of a particular type
+    * @param type
+    * @param provider
+    */
+   public void removeInfoProvider(String type, IInfoProvider provider);
+   
+   /**
+    * Return information of a particular type (for rest services)
+    * @param type
+    * @return
+    */
+   public Map<String, Object> getControllerInfo(String type);
+   
+   
+   /**
+    * Return the controller start time in  milliseconds
+    * @return
+    */
+   public long getSystemStartTime();
+   
+   /**
+    * Configure controller to always clear the flow table on the switch,
+    * when it connects to controller. This will be true for first time switch
+    * reconnect, as well as a switch re-attaching to Controller after HA
+    * switch over to ACTIVE role
+    */
+   public void setAlwaysClearFlowsOnSwAdd(boolean value);
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IHAListener.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IHAListener.java
new file mode 100644
index 0000000..c76f46a
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IHAListener.java
@@ -0,0 +1,30 @@
+package net.floodlightcontroller.core;
+
+import java.util.Map;
+
+import net.floodlightcontroller.core.IFloodlightProviderService.Role;
+
+public interface IHAListener {
+    /**
+     * Gets called when the controller changes role (i.e. Master -> Slave).
+     * Note that oldRole CAN be null.
+     * @param oldRole The controller's old role
+     * @param newRole The controller's new role
+     */
+    public void roleChanged(Role oldRole, Role newRole);
+    
+    /**
+     * Gets called when the IP addresses of the controller nodes in the 
+     * controller cluster change. All parameters map controller ID to
+     * the controller's IP.
+     *  
+     * @param curControllerNodeIPs The current mapping of controller IDs to IP
+     * @param addedControllerNodeIPs These IPs were added since the last update
+     * @param removedControllerNodeIPs These IPs were removed since the last update
+     */
+    public void controllerNodeIPsChanged(
+    		Map<String, String> curControllerNodeIPs,  
+    		Map<String, String> addedControllerNodeIPs,  
+    		Map<String, String> removedControllerNodeIPs
+    		);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IInfoProvider.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IInfoProvider.java
new file mode 100644
index 0000000..8bfae0d
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IInfoProvider.java
@@ -0,0 +1,34 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core;
+
+import java.util.Map;
+
+/**
+ *
+ *
+ * @author Shudong Zhou
+ */
+public interface IInfoProvider {
+
+    /**
+     * Called when rest API requests information of a particular type
+     * @param type
+     * @return
+     */
+    public Map<String, Object> getInfo(String type);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IListener.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IListener.java
new file mode 100644
index 0000000..1bd6560
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IListener.java
@@ -0,0 +1,52 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core;
+
+public interface IListener<T> {
+    public enum Command {
+        CONTINUE, STOP
+    }
+    
+    /**
+     * The name assigned to this listener
+     * @return
+     */
+    public String getName();
+
+    /**
+     * Check if the module called name is a callback ordering prerequisite
+     * for this module.  In other words, if this function returns true for 
+     * the given name, then this message listener will be called after that
+     * message listener.
+     * @param type the message type to which this applies
+     * @param name the name of the module
+     * @return whether name is a prerequisite.
+     */
+    public boolean isCallbackOrderingPrereq(T type, String name);
+
+    /**
+     * Check if the module called name is a callback ordering post-requisite
+     * for this module.  In other words, if this function returns true for 
+     * the given name, then this message listener will be called before that
+     * message listener.
+     * @param type the message type to which this applies
+     * @param name the name of the module
+     * @return whether name is a post-requisite.
+     */
+    public boolean isCallbackOrderingPostreq(T type, String name);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IOFMessageFilterManagerService.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IOFMessageFilterManagerService.java
new file mode 100644
index 0000000..36b5be3
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IOFMessageFilterManagerService.java
@@ -0,0 +1,7 @@
+package net.floodlightcontroller.core;
+
+import net.floodlightcontroller.core.module.IFloodlightService;
+
+public interface IOFMessageFilterManagerService extends IFloodlightService {
+    // empty for now
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IOFMessageListener.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IOFMessageListener.java
new file mode 100644
index 0000000..00fdac1
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IOFMessageListener.java
@@ -0,0 +1,38 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core;
+
+import org.openflow.protocol.OFMessage;
+import org.openflow.protocol.OFType;
+
+/**
+ *
+ *
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ */
+public interface IOFMessageListener extends IListener<OFType> {
+  /**
+   * This is the method Floodlight uses to call listeners with OpenFlow messages
+   * @param sw the OpenFlow switch that sent this message
+   * @param msg the message
+   * @param cntx a Floodlight message context object you can use to pass 
+   * information between listeners
+   * @return the command to continue or stop the execution
+   */
+  public Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IOFSwitch.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IOFSwitch.java
new file mode 100644
index 0000000..d63624c
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IOFSwitch.java
@@ -0,0 +1,379 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Date;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Future;
+import net.floodlightcontroller.core.IFloodlightProviderService.Role;
+
+import org.jboss.netty.channel.Channel;
+import org.openflow.protocol.OFFeaturesReply;
+import org.openflow.protocol.OFMessage;
+import org.openflow.protocol.OFPhysicalPort;
+import org.openflow.protocol.OFStatisticsRequest;
+import org.openflow.protocol.statistics.OFDescriptionStatistics;
+import org.openflow.protocol.statistics.OFStatistics;
+
+/**
+ *
+ *
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ */
+public interface IOFSwitch {
+    // Attribute keys
+    public static final String SWITCH_DESCRIPTION_FUTURE = "DescriptionFuture";
+    public static final String SWITCH_DESCRIPTION_DATA = "DescriptionData";
+    public static final String SWITCH_SUPPORTS_NX_ROLE = "supportsNxRole";
+    public static final String SWITCH_IS_CORE_SWITCH = "isCoreSwitch";
+    public static final String PROP_FASTWILDCARDS = "FastWildcards";
+    public static final String PROP_REQUIRES_L3_MATCH = "requiresL3Match";
+    public static final String PROP_SUPPORTS_OFPP_TABLE = "supportsOfppTable";
+    public static final String PROP_SUPPORTS_OFPP_FLOOD = "supportsOfppFlood";
+    public static final String PROP_SUPPORTS_NETMASK_TBL = "supportsNetmaskTbl";
+    
+    /**
+     * Writes to the OFMessage to the output stream.
+     * The message will be handed to the floodlightProvider for possible filtering
+     * and processing by message listeners
+     * @param m   
+     * @param bc  
+     * @throws IOException  
+     */
+    public void write(OFMessage m, FloodlightContext bc) throws IOException; 
+    
+    /**
+     * Writes the list of messages to the output stream
+     * The message will be handed to the floodlightProvider for possible filtering
+     * and processing by message listeners.
+     * @param msglist
+     * @param bc
+     * @throws IOException
+     */
+    public void write(List<OFMessage> msglist, FloodlightContext bc) throws IOException;
+    
+    /**
+     * 
+     * @throws IOException
+     */
+    public void disconnectOutputStream();
+
+    /**
+     * FIXME: remove getChannel(). All access to the channel should be through
+     *        wrapper functions in IOFSwitch
+     * @return
+     */
+    public Channel getChannel();
+
+    /**
+     * Returns switch features from features Reply
+     * @return
+     */
+    public int getBuffers();
+    
+    public int getActions();
+    
+    public int getCapabilities();
+    
+    public byte getTables();
+
+    /**
+     * Set the OFFeaturesReply message returned by the switch during initial
+     * handshake.
+     * @param featuresReply
+     */
+    public void setFeaturesReply(OFFeaturesReply featuresReply);
+    
+    /**
+     * Set the SwitchProperties based on it's description
+     * @param description
+     */
+    public void setSwitchProperties(OFDescriptionStatistics description);    
+
+    /**
+     * Get list of all enabled ports. This will typically be different from
+     * the list of ports in the OFFeaturesReply, since that one is a static
+     * snapshot of the ports at the time the switch connected to the controller
+     * whereas this port list also reflects the port status messages that have
+     * been received.
+     * @return Unmodifiable list of ports not backed by the underlying collection
+     */
+    public Collection<OFPhysicalPort> getEnabledPorts();
+    
+    /**
+     * Get list of the port numbers of all enabled ports. This will typically
+     * be different from the list of ports in the OFFeaturesReply, since that
+     * one is a static snapshot of the ports at the time the switch connected 
+     * to the controller whereas this port list also reflects the port status
+     * messages that have been received.
+     * @return Unmodifiable list of ports not backed by the underlying collection
+     */
+    public Collection<Short> getEnabledPortNumbers();
+
+    /**
+     * Retrieve the port object by the port number. The port object
+     * is the one that reflects the port status updates that have been
+     * received, not the one from the features reply.
+     * @param portNumber
+     * @return port object
+     */
+    public OFPhysicalPort getPort(short portNumber);
+    
+    /**
+     * Retrieve the port object by the port name. The port object
+     * is the one that reflects the port status updates that have been
+     * received, not the one from the features reply.
+     * @param portName
+     * @return port object
+     */
+    public OFPhysicalPort getPort(String portName);
+    
+    /**
+     * Add or modify a switch port. This is called by the core controller
+     * code in response to a OFPortStatus message. It should not typically be
+     * called by other floodlight applications.
+     * @param port
+     */
+    public void setPort(OFPhysicalPort port);
+
+    /**
+     * Delete a port for the switch. This is called by the core controller
+     * code in response to a OFPortStatus message. It should not typically be
+     * called by other floodlight applications.
+     * @param portNumber
+     */
+    public void deletePort(short portNumber);
+    
+    /**
+     * Delete a port for the switch. This is called by the core controller
+     * code in response to a OFPortStatus message. It should not typically be
+     * called by other floodlight applications.
+     * @param portName
+     */
+    public void deletePort(String portName);
+    
+    /**
+     * Get list of all ports. This will typically be different from
+     * the list of ports in the OFFeaturesReply, since that one is a static
+     * snapshot of the ports at the time the switch connected to the controller
+     * whereas this port list also reflects the port status messages that have
+     * been received.
+     * @return Unmodifiable list of ports 
+     */
+    public Collection<OFPhysicalPort> getPorts();
+
+    /**
+     * @param portName
+     * @return Whether a port is enabled per latest port status message
+     * (not configured down nor link down nor in spanning tree blocking state)
+     */
+    public boolean portEnabled(short portName);
+    
+    /**
+     * @param portNumber
+     * @return Whether a port is enabled per latest port status message
+     * (not configured down nor link down nor in spanning tree blocking state)
+     */
+    public boolean portEnabled(String portName);
+
+    /**
+     * @param port
+     * @return Whether a port is enabled per latest port status message
+     * (not configured down nor link down nor in spanning tree blocking state)
+     */
+    public boolean portEnabled(OFPhysicalPort port);
+
+    /**
+     * Get the datapathId of the switch
+     * @return
+     */
+    public long getId();
+
+    /**
+     * Get a string version of the ID for this switch
+     * @return
+     */
+    public String getStringId();
+    
+    /**
+     * Retrieves attributes of this switch
+     * @return
+     */
+    public Map<Object, Object> getAttributes();
+
+    /**
+     * Retrieves the date the switch connected to this controller
+     * @return the date
+     */
+    public Date getConnectedSince();
+
+    /**
+     * Returns the next available transaction id
+     * @return
+     */
+    public int getNextTransactionId();
+
+    /**
+     * Returns a Future object that can be used to retrieve the asynchronous
+     * OFStatisticsReply when it is available.
+     *
+     * @param request statistics request
+     * @return Future object wrapping OFStatisticsReply
+     * @throws IOException 
+     */
+    public Future<List<OFStatistics>> getStatistics(OFStatisticsRequest request)
+            throws IOException;
+    
+    /**
+     * Returns a Future object that can be used to retrieve the asynchronous
+     * OFStatisticsReply when it is available.
+     *
+     * @param request statistics request
+     * @return Future object wrapping OFStatisticsReply
+     * @throws IOException 
+     */
+    public Future<OFFeaturesReply> getFeaturesReplyFromSwitch()
+            throws IOException;
+
+    /**
+     * Deliver the featuresReply future reply
+     * @param reply the reply to deliver
+     */
+    void deliverOFFeaturesReply(OFMessage reply);
+
+    /*
+     * Cancel features reply with a specific transction ID
+     * @param transactionId the transaction ID
+     */
+    public void cancelFeaturesReply(int transactionId);
+
+    /**
+     * Check if the switch is still connected;
+     * Only call while holding processMessageLock
+     * @return whether the switch is still disconnected
+     */
+    public boolean isConnected();
+    
+    /**
+     * Set whether the switch is connected
+     * Only call while holding modifySwitchLock
+     * @param connected whether the switch is connected
+     */
+    public void setConnected(boolean connected);
+    
+    /**
+     * Get the current role of the controller for the switch
+     * @return the role of the controller
+     */
+    public Role getRole();
+    
+    /**
+     * Check if the controller is an active controller for the switch.
+     * The controller is active if its role is MASTER or EQUAL.
+     * @return whether the controller is active
+     */
+    public boolean isActive();
+    
+    /**
+     * Deliver the statistics future reply
+     * @param reply the reply to deliver
+     */
+    public void deliverStatisticsReply(OFMessage reply);
+    
+    /**
+     * Cancel the statistics reply with the given transaction ID
+     * @param transactionId the transaction ID
+     */
+    public void cancelStatisticsReply(int transactionId);
+    
+    /**
+     * Cancel all statistics replies
+     */
+    public void cancelAllStatisticsReplies();
+
+    /**
+     * Checks if a specific switch property exists for this switch
+     * @param name name of property
+     * @return value for name
+     */
+    boolean hasAttribute(String name);
+
+    /**
+     * Set properties for switch specific behavior
+     * @param name name of property
+     * @return value for name
+     */
+    Object getAttribute(String name);
+
+    /**
+     * Set properties for switch specific behavior
+     * @param name name of property
+     * @param value value for name
+     */
+    void setAttribute(String name, Object value);
+
+    /**
+     * Set properties for switch specific behavior
+     * @param name name of property
+     * @return current value for name or null (if not present)
+     */
+    Object removeAttribute(String name);
+
+    /**
+     * Clear all flowmods on this switch
+     */
+    public void clearAllFlowMods();
+
+    /**
+     * Update broadcast cache
+     * @param data
+     * @return true if there is a cache hit
+     *         false if there is no cache hit.
+     */
+    public boolean updateBroadcastCache(Long entry, Short port);
+    
+    /**
+     * Get the portBroadcastCacheHits
+     * @return
+     */
+    public Map<Short, Long> getPortBroadcastHits();
+
+    /**
+     * Send a flow statistics request to the switch. This call returns after
+     * sending the stats. request to the switch.
+     * @param request flow statistics request message
+     * @param xid transaction id, must be obtained by using the getXid() API.
+     * @param caller the caller of the API. receive() callback of this 
+     * caller would be called when the reply from the switch is received.
+     * @return the transaction id for the message sent to the switch. The 
+     * transaction id can be used to match the response with the request. Note
+     * that the transaction id is unique only within the scope of this switch.
+     * @throws IOException
+     */
+    public void sendStatsQuery(OFStatisticsRequest request, int xid,
+                            IOFMessageListener caller) throws IOException;
+
+    /**
+     * Flush all flows queued for this switch in the current thread.
+     * NOTE: The contract is limited to the current thread
+     */
+     public void flush();
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IOFSwitchFilter.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IOFSwitchFilter.java
new file mode 100644
index 0000000..134ba98
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IOFSwitchFilter.java
@@ -0,0 +1,38 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core;
+
+/**
+ * Used in conjunction with {@link IOFMessageListener} to allow a listener to
+ * filter an incoming message based on the {@link IOFSwitch} it originated from.
+ * Implementations wanting to use this interface should implement both
+ * IOFMessageListener and IOFSwitchFilter.
+ *
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ */
+public interface IOFSwitchFilter {
+
+    /**
+     * The result of this method call determines whether the
+     * IOFMessageListener's receive method is called or not.
+     *
+     * @param sw switch to filter on
+     * @return true to receive the message, false to ignore
+     */
+    public boolean isInterested(IOFSwitch sw);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IOFSwitchListener.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IOFSwitchListener.java
new file mode 100644
index 0000000..1bc258b
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/IOFSwitchListener.java
@@ -0,0 +1,51 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core;
+
+/**
+ *
+ *
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ */
+public interface IOFSwitchListener {
+
+    /**
+     * Fired when a switch is connected to the controller, and has sent
+     * a features reply.
+     * @param sw
+     */
+    public void addedSwitch(IOFSwitch sw);
+
+    /**
+     * Fired when a switch is disconnected from the controller.
+     * @param sw
+     */
+    public void removedSwitch(IOFSwitch sw);
+    
+    /**
+     * Fired when ports on a switch change (any change to the collection
+     * of OFPhysicalPorts and/or to a particular port)
+     */
+    public void switchPortChanged(Long switchId);
+    
+    /**
+     * The name assigned to this listener
+     * @return
+     */
+    public String getName();
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/Main.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/Main.java
new file mode 100644
index 0000000..91b317a
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/Main.java
@@ -0,0 +1,49 @@
+package net.floodlightcontroller.core;
+
+import org.kohsuke.args4j.CmdLineException;
+import org.kohsuke.args4j.CmdLineParser;
+
+import net.floodlightcontroller.core.internal.CmdLineSettings;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.FloodlightModuleLoader;
+import net.floodlightcontroller.core.module.IFloodlightModuleContext;
+import net.floodlightcontroller.restserver.IRestApiService;
+
+/**
+ * Host for the Floodlight main method
+ * @author alexreimers
+ */
+public class Main {
+
+    /**
+     * Main method to load configuration and modules
+     * @param args
+     * @throws FloodlightModuleException 
+     */
+    public static void main(String[] args) throws FloodlightModuleException {
+        // Setup logger
+        System.setProperty("org.restlet.engine.loggerFacadeClass", 
+                "org.restlet.ext.slf4j.Slf4jLoggerFacade");
+        
+        CmdLineSettings settings = new CmdLineSettings();
+        CmdLineParser parser = new CmdLineParser(settings);
+        try {
+            parser.parseArgument(args);
+        } catch (CmdLineException e) {
+            parser.printUsage(System.out);
+            System.exit(1);
+        }
+        
+        // Load modules
+        FloodlightModuleLoader fml = new FloodlightModuleLoader();
+        IFloodlightModuleContext moduleContext = fml.loadModulesFromConfig(settings.getModuleFile());
+        // Run REST server
+        IRestApiService restApi = moduleContext.getServiceImpl(IRestApiService.class);
+        restApi.run();
+        // Run the main floodlight module
+        IFloodlightProviderService controller =
+                moduleContext.getServiceImpl(IFloodlightProviderService.class);
+        // This call blocks, it has to be the last line in the main
+        controller.run();
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/OFMessageFilterManager.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/OFMessageFilterManager.java
new file mode 100644
index 0000000..391c002
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/OFMessageFilterManager.java
@@ -0,0 +1,529 @@
+/**
+ *    Copyright 2011, Big Switch Networks, Inc.
+ *    Originally created by David Erickson, Stanford University
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *    not use this file except in compliance with the License. You may obtain
+ *    a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *    License for the specific language governing permissions and limitations
+ *    under the License.
+ **/
+
+package net.floodlightcontroller.core;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Timer;
+import java.util.TimerTask;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ScheduledExecutorService;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.openflow.protocol.OFFlowMod;
+import org.openflow.protocol.OFMessage;
+import org.openflow.protocol.OFPacketIn;
+import org.openflow.protocol.OFPacketOut;
+import org.openflow.protocol.OFType;
+import org.openflow.util.HexString;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.ArrayList;
+import org.apache.thrift.TException;
+import org.apache.thrift.transport.TFramedTransport;
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.protocol.TBinaryProtocol;
+import org.apache.thrift.protocol.TProtocol;
+
+import net.floodlightcontroller.core.annotations.LogMessageCategory;
+import net.floodlightcontroller.core.annotations.LogMessageDoc;
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.packet.Ethernet;
+import net.floodlightcontroller.packetstreamer.thrift.*;
+import net.floodlightcontroller.threadpool.IThreadPoolService;
+
+@LogMessageCategory("OpenFlow Message Tracing")
+public class OFMessageFilterManager 
+        implements IOFMessageListener, IFloodlightModule, IOFMessageFilterManagerService {
+
+    /**
+     * @author Srini
+     */
+    protected static Logger log = LoggerFactory.getLogger(OFMessageFilterManager.class);
+
+    // The port and client reference for packet streaming
+    protected int serverPort = 9090;
+    protected final int MaxRetry = 1;
+    protected static TTransport transport = null;
+    protected static PacketStreamer.Client packetClient = null;
+
+    protected IFloodlightProviderService floodlightProvider = null;
+    protected IThreadPoolService threadPool = null;
+    // filter List is a key value pair.  Key is the session id, 
+    // value is the filter rules.
+    protected ConcurrentHashMap<String, 
+                                ConcurrentHashMap<String,
+                                                  String>> filterMap = null;
+    protected ConcurrentHashMap<String, Long> filterTimeoutMap = null;
+    protected Timer timer = null;
+
+    protected int MAX_FILTERS=5;
+    protected long MAX_FILTER_TIME= 300000; // maximum filter time is 5 minutes.
+    protected int TIMER_INTERVAL = 1000;  // 1 second time interval.
+
+    public static final String SUCCESS                     = "0";
+    public static final String FILTER_SETUP_FAILED         = "-1001"; 
+    public static final String FILTER_NOT_FOUND            = "-1002";
+    public static final String FILTER_LIMIT_REACHED        = "-1003";
+    public static final String FILTER_SESSION_ID_NOT_FOUND = "-1004";
+    public static final String SERVICE_UNAVAILABLE         = "-1005";
+
+    public enum FilterResult {
+        /*
+         * FILTER_NOT_DEFINED: Filter is not defined
+         * FILTER_NO_MATCH:    Filter is defined and the packet doesn't 
+         *                     match the filter
+         * FILTER_MATCH:       Filter is defined and the packet matches
+         *                     the filter
+         */
+        FILTER_NOT_DEFINED, FILTER_NO_MATCH, FILTER_MATCH
+    }
+
+    protected String addFilter(ConcurrentHashMap<String,String> f, long delta) {
+
+        // Create unique session ID.  
+        int prime = 33791;
+        String s = null;
+        int i;
+
+        if ((filterMap == null) || (filterTimeoutMap == null))
+            return  String.format("%d", FILTER_SETUP_FAILED);
+
+        for (i=0; i<MAX_FILTERS; ++i) {
+            Integer x = prime + i;
+            s = String.format("%d", x.hashCode());
+            // implies you can use this key for session id.    
+            if (!filterMap.containsKey(s)) break; 
+        }
+
+        if (i==MAX_FILTERS) {
+            return FILTER_LIMIT_REACHED;
+        }
+
+        filterMap.put(s, f);
+        if (filterTimeoutMap.containsKey(s))  filterTimeoutMap.remove(s);
+        filterTimeoutMap.put(s, delta);
+
+        // set the timer as there will be no existing timers. 
+        if (filterMap.size() == 1) { 
+            TimeoutFilterTask task = new TimeoutFilterTask(this);
+            Timer timer = new Timer();
+            timer.schedule (task, TIMER_INTERVAL);                
+            // Keep the listeners to avoid race condition
+            //startListening();
+        }   
+        return s;  // the return string is the session ID.
+    }
+
+    public String setupFilter(String sid, 
+                              ConcurrentHashMap<String,String> f, 
+                              int deltaInMilliSeconds) {
+
+        if (sid == null) {
+            // Delta in filter needs to be milliseconds
+            log.debug("Adding new filter: {} for {} ms", f, deltaInMilliSeconds);
+            return addFilter(f, deltaInMilliSeconds);
+        } else {// this is the session id.
+            // we will ignore the hash map features.
+            if (deltaInMilliSeconds > 0)  
+                return refreshFilter(sid, deltaInMilliSeconds);
+            else 
+                return deleteFilter(sid);
+        }
+    }
+
+    public int timeoutFilters() {                
+        Iterator<String> i = filterTimeoutMap.keySet().iterator();
+
+        while(i.hasNext()) {
+            String s = i.next();
+
+            Long t = filterTimeoutMap.get(s);
+            if (t != null) {
+                i.remove();
+                t -= TIMER_INTERVAL;
+                if (t > 0) {
+                    filterTimeoutMap.put(s, t);
+                } else deleteFilter(s);
+            } else deleteFilter(s);
+        }
+        return filterMap.size();
+    }
+
+    protected String refreshFilter(String s, int delta) {
+        Long t = filterTimeoutMap.get(s);
+        if (t != null) {
+            filterTimeoutMap.remove(s);
+            t += delta;  // time is in milliseconds
+            if (t > MAX_FILTER_TIME) t = MAX_FILTER_TIME;
+            filterTimeoutMap.put(s, t);
+            return SUCCESS;
+        } else return FILTER_SESSION_ID_NOT_FOUND;
+    }
+
+    @LogMessageDoc(level="ERROR",
+                   message="Error while terminating packet " +
+                           "filter session",
+                   explanation="An unknown error occurred while terminating " +
+                   		"a packet filter session.",
+                   recommendation=LogMessageDoc.GENERIC_ACTION)
+    protected String deleteFilter(String sessionId) {
+
+        if (filterMap.containsKey(sessionId)) {
+            filterMap.remove(sessionId);
+            try {
+                if (packetClient != null)
+                    packetClient.terminateSession(sessionId);
+            } catch (TException e) {
+                log.error("Error while terminating packet " +
+                		  "filter session", e);
+            }
+            log.debug("Deleted Filter {}.  # of filters" +
+            		 " remaining: {}", sessionId, filterMap.size());
+            return SUCCESS;
+        } else return FILTER_SESSION_ID_NOT_FOUND;
+    }
+
+    public HashSet<String> getMatchedFilters(OFMessage m, FloodlightContext cntx) {  
+
+        HashSet<String> matchedFilters = new HashSet<String>();
+
+        // This default function is written to match on packet ins and 
+        // packet outs.
+        Ethernet eth = null;
+
+        if (m.getType() == OFType.PACKET_IN) {
+            eth = IFloodlightProviderService.bcStore.get(cntx, 
+                    IFloodlightProviderService.CONTEXT_PI_PAYLOAD);
+        } else if (m.getType() == OFType.PACKET_OUT) {
+            eth = new Ethernet();
+            OFPacketOut p = (OFPacketOut) m;
+            
+            // No MAC match if packetOut doesn't have the packet.
+            if (p.getPacketData() == null) return null;
+            
+            eth.deserialize(p.getPacketData(), 0, p.getPacketData().length);
+        } else if (m.getType() == OFType.FLOW_MOD) {
+            // flow-mod can't be matched by mac.
+            return null;
+        }
+
+        if (eth == null) return null;
+
+        Iterator<String> filterIt = filterMap.keySet().iterator();
+        while (filterIt.hasNext()) {   // for every filter
+            boolean filterMatch = false;
+            String filterSessionId = filterIt.next();
+            Map<String,String> filter = filterMap.get(filterSessionId);
+
+            // If the filter has empty fields, then it is not considered as a match.
+            if (filter == null || filter.isEmpty()) continue;                  
+            Iterator<String> fieldIt = filter.keySet().iterator();
+            while (fieldIt.hasNext()) {   
+                String filterFieldType = fieldIt.next();
+                String filterFieldValue = filter.get(filterFieldType);
+                if (filterFieldType.equals("mac")) {
+
+                    String srcMac = HexString.toHexString(eth.getSourceMACAddress());
+                    String dstMac = HexString.toHexString(eth.getDestinationMACAddress());
+                    log.debug("srcMac: {}, dstMac: {}", srcMac, dstMac);
+
+                    if (filterFieldValue.equals(srcMac) || 
+                            filterFieldValue.equals(dstMac)){
+                        filterMatch = true; 
+                    } else {
+                        filterMatch = false;
+                        break;
+                    }
+                }
+            }
+            if (filterMatch) {
+                matchedFilters.add(filterSessionId);
+            }
+        }
+
+        if (matchedFilters.isEmpty())
+            return null;    
+        else 
+            return matchedFilters;
+    }
+    
+    @LogMessageDoc(level="ERROR",
+                   message="Failed to establish connection with the " +
+                           "packetstreamer server.",
+                   explanation="The message tracing server is not running " +
+                   		"or otherwise unavailable.",
+                   recommendation=LogMessageDoc.CHECK_CONTROLLER)
+    public boolean connectToPSServer() {
+        int numRetries = 0;
+        if (transport != null && transport.isOpen()) {
+            return true;
+        }
+
+        while (numRetries++ < MaxRetry) {
+            try {
+                transport = new TFramedTransport(new TSocket("localhost", 
+                                                             serverPort));
+                transport.open();
+
+                TProtocol protocol = new  TBinaryProtocol(transport);
+                packetClient = new PacketStreamer.Client(protocol);
+
+                log.debug("Have a connection to packetstreamer server " +
+                		  "localhost:{}", serverPort);
+                break;
+            } catch (TException x) {
+                try {
+                    // Wait for 1 second before retry
+                    if (numRetries < MaxRetry) {
+                        Thread.sleep(1000);
+                    }
+                } catch (Exception e) {}
+            } 
+        }
+
+        if (numRetries > MaxRetry) {
+            log.error("Failed to establish connection with the " +
+            		  "packetstreamer server.");
+            return false;
+        }
+        return true;
+    }
+
+    public void disconnectFromPSServer() {
+        if (transport != null && transport.isOpen()) {
+            log.debug("Close the connection to packetstreamer server" +
+            		  " localhost:{}", serverPort);
+            transport.close();
+        }
+    }
+
+    @Override
+    public String getName() {
+        return "messageFilterManager";
+    }
+
+    @Override
+    public boolean isCallbackOrderingPrereq(OFType type, String name) {
+        return (type == OFType.PACKET_IN && name.equals("devicemanager"));
+    }
+
+    @Override
+    public boolean isCallbackOrderingPostreq(OFType type, String name) {
+        return (type == OFType.PACKET_IN && name.equals("learningswitch"));
+    }
+
+    @Override
+    @LogMessageDoc(level="ERROR",
+                   message="Error while sending packet",
+                   explanation="Failed to send a message to the message " +
+                   		"tracing server",
+                   recommendation=LogMessageDoc.CHECK_CONTROLLER)
+    public Command receive(IOFSwitch sw, OFMessage msg, 
+                           FloodlightContext cntx) {
+
+        if (filterMap == null || filterMap.isEmpty()) return Command.CONTINUE;
+
+        HashSet<String> matchedFilters = null;
+        if (log.isDebugEnabled()) {
+            log.debug("Received packet {} from switch {}", 
+                      msg, sw.getStringId());
+        }
+
+        matchedFilters = getMatchedFilters(msg, cntx);
+        if (matchedFilters == null) {
+            return Command.CONTINUE;
+        } else {
+            try {
+                sendPacket(matchedFilters, sw, msg, cntx, true);
+            } catch (Exception e) {
+                log.error("Error while sending packet", e);
+            }
+        }
+        
+        return Command.CONTINUE;
+    }
+
+
+    public class TimeoutFilterTask extends TimerTask {
+
+        OFMessageFilterManager filterManager;
+        ScheduledExecutorService ses = threadPool.getScheduledExecutor();
+
+        public TimeoutFilterTask(OFMessageFilterManager manager) {
+            filterManager = manager;
+        }
+
+        public void run() {
+            int x = filterManager.timeoutFilters();
+
+            if (x > 0) {  // there's at least one filter still active.
+                Timer timer = new Timer();
+                timer.schedule(new TimeoutFilterTask(filterManager), 
+                               TIMER_INTERVAL);
+            } else {
+                // Don't stop the listener to avoid race condition
+                //stopListening();
+            }
+        }
+    }
+
+    public int getNumberOfFilters() {
+        return filterMap.size();
+    }
+
+    public int getMaxFilterSize() {
+        return MAX_FILTERS;
+    }
+
+    protected void sendPacket(HashSet<String> matchedFilters, IOFSwitch sw, 
+            OFMessage msg, FloodlightContext cntx, boolean sync) 
+                    throws TException {
+        Message sendMsg = new Message();
+        Packet packet = new Packet();
+        ChannelBuffer bb;
+        sendMsg.setPacket(packet);
+
+        List<String> sids = new ArrayList<String>(matchedFilters);
+
+        sendMsg.setSessionIDs(sids);
+        packet.setMessageType(OFMessageType.findByValue((msg.getType().ordinal())));
+
+        switch (msg.getType()) {
+            case PACKET_IN:
+                OFPacketIn pktIn = (OFPacketIn)msg;
+                packet.setSwPortTuple(new SwitchPortTuple(sw.getId(), 
+                                                          pktIn.getInPort()));
+                bb = ChannelBuffers.buffer(pktIn.getLength());
+                pktIn.writeTo(bb);
+                packet.setData(OFMessage.getData(sw, msg, cntx));
+                break;
+            case PACKET_OUT:
+                OFPacketOut pktOut = (OFPacketOut)msg;
+                packet.setSwPortTuple(new SwitchPortTuple(sw.getId(), 
+                                                          pktOut.getInPort()));
+                bb = ChannelBuffers.buffer(pktOut.getLength());
+                pktOut.writeTo(bb);
+                packet.setData(OFMessage.getData(sw, msg, cntx));
+                break;
+            case FLOW_MOD:
+                OFFlowMod offlowMod = (OFFlowMod)msg;
+                packet.setSwPortTuple(new SwitchPortTuple(sw.getId(), 
+                                                          offlowMod.
+                                                          getOutPort()));
+                bb = ChannelBuffers.buffer(offlowMod.getLength());
+                offlowMod.writeTo(bb);
+                packet.setData(OFMessage.getData(sw, msg, cntx));
+                break;
+            default:
+                packet.setSwPortTuple(new SwitchPortTuple(sw.getId(), 
+                                                          (short)0));
+                String strData = "Unknown packet";
+                packet.setData(strData.getBytes());
+                break;
+        }
+
+        try {
+            if (transport == null || 
+                !transport.isOpen() || 
+                packetClient == null) {
+                if (!connectToPSServer()) {
+                    // No need to sendPacket if can't make connection to 
+                    // the server
+                    return;
+                }
+            }
+            if (sync) {
+                log.debug("Send packet sync: {}", packet.toString());
+                packetClient.pushMessageSync(sendMsg);
+            } else {
+                log.debug("Send packet sync: ", packet.toString());
+                packetClient.pushMessageAsync(sendMsg);
+            }
+        } catch (Exception e) {
+            log.error("Error while sending packet", e);
+            disconnectFromPSServer();
+            connectToPSServer();
+        }
+    }
+
+    // IFloodlightModule methods
+    
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IOFMessageFilterManagerService.class);
+        return l;
+    }
+
+    @Override
+    public Map<Class<? extends IFloodlightService>, IFloodlightService>
+            getServiceImpls() {
+        Map<Class<? extends IFloodlightService>,
+        IFloodlightService> m = 
+            new HashMap<Class<? extends IFloodlightService>,
+                        IFloodlightService>();
+        // We are the class that implements the service
+        m.put(IOFMessageFilterManagerService.class, this);
+        return m;
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IFloodlightProviderService.class);
+        l.add(IThreadPoolService.class);
+        return l;
+    }
+
+    @Override
+    public void init(FloodlightModuleContext context) 
+            throws FloodlightModuleException {
+        this.floodlightProvider = 
+                context.getServiceImpl(IFloodlightProviderService.class);
+        this.threadPool =
+                context.getServiceImpl(IThreadPoolService.class);
+    }
+
+    @Override
+    public void startUp(FloodlightModuleContext context) {
+        // This is our 'constructor'
+        
+        filterMap = new ConcurrentHashMap<String, ConcurrentHashMap<String,String>>();
+        filterTimeoutMap = new ConcurrentHashMap<String, Long>();
+        serverPort = 
+                Integer.parseInt(System.getProperty("net.floodlightcontroller." +
+                		"packetstreamer.port", "9090"));
+        
+        floodlightProvider.addOFMessageListener(OFType.PACKET_IN, this);
+        floodlightProvider.addOFMessageListener(OFType.PACKET_OUT, this);
+        floodlightProvider.addOFMessageListener(OFType.FLOW_MOD, this);
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/annotations/LogMessageCategory.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/annotations/LogMessageCategory.java
new file mode 100644
index 0000000..e9abf02
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/annotations/LogMessageCategory.java
@@ -0,0 +1,34 @@
+/**
+*    Copyright 2012, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.annotations;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Target;
+
+/**
+ * Annotation used to set the category for log messages for a class
+ * @author readams
+ */
+@Target({ElementType.TYPE, ElementType.METHOD})
+public @interface LogMessageCategory {
+    /**
+     * The category for the log messages for this class
+     * @return
+     */
+    String value() default "Core";
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/annotations/LogMessageDoc.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/annotations/LogMessageDoc.java
new file mode 100644
index 0000000..80af1a7
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/annotations/LogMessageDoc.java
@@ -0,0 +1,71 @@
+/**
+*    Copyright 2012, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.annotations;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Target;
+
+/**
+ * Annotation used to document log messages.  This can be used to generate
+ * documentation on syslog output.
+ * @author readams
+ */
+@Target({ElementType.TYPE, ElementType.METHOD})
+public @interface LogMessageDoc {
+    public static final String NO_ACTION = "No action is required.";
+    public static final String UNKNOWN_ERROR = "An unknown error occured";
+    public static final String GENERIC_ACTION = 
+            "Examine the returned error or exception and take " +
+            "appropriate action.";
+    public static final String CHECK_SWITCH = 
+            "Check the health of the indicated switch.  " + 
+            "Test and troubleshoot IP connectivity.";
+    public static final String CHECK_CONTROLLER = 
+            "Verify controller system health, CPU usage, and memory.  " + 
+            "Rebooting the controller node may help if the controller " +
+            "node is in a distressed state.";
+    public static final String REPORT_CONTROLLER_BUG =
+            "This is likely a defect in the controller.  Please report this " +
+            "issue.  Restarting the controller or switch may help to " +
+            "alleviate.";
+    public static final String REPORT_SWITCH_BUG =
+            "This is likely a defect in the switch.  Please report this " +
+            "issue.  Restarting the controller or switch may help to " +
+            "alleviate.";
+
+    /**
+     * The log level for the log message
+     * @return the log level as a tring
+     */
+    String level() default "INFO";
+    /**
+     * The message that will be printed
+     * @return the message
+     */
+    String message() default UNKNOWN_ERROR;
+    /**
+     * An explanation of the meaning of the log message
+     * @return the explanation
+     */
+    String explanation() default UNKNOWN_ERROR;
+    /**
+     * The recommendated action associated with the log message
+     * @return the recommendation
+     */
+    String recommendation() default NO_ACTION;
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/annotations/LogMessageDocs.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/annotations/LogMessageDocs.java
new file mode 100644
index 0000000..663baf0
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/annotations/LogMessageDocs.java
@@ -0,0 +1,36 @@
+/**
+*    Copyright 2012, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.annotations;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Target;
+
+/**
+ * Annotation used to document log messages.  This can be used to generate
+ * documentation on syslog output.  This version allows multiple log messages
+ * to be documentated on an interface.
+ * @author readams
+ */
+@Target({ElementType.TYPE, ElementType.METHOD})
+public @interface LogMessageDocs {
+    /**
+     * A list of {@link LogMessageDoc} elements
+     * @return the list of log message doc
+     */
+    LogMessageDoc[] value();
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/CmdLineSettings.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/CmdLineSettings.java
new file mode 100644
index 0000000..7641a7c
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/CmdLineSettings.java
@@ -0,0 +1,17 @@
+package net.floodlightcontroller.core.internal;
+
+import org.kohsuke.args4j.Option;
+
+/**
+ * Expresses the port settings of OpenFlow controller.
+ */
+public class CmdLineSettings {
+    public static final String DEFAULT_CONFIG_FILE = "config/floodlight.properties";
+
+    @Option(name="-cf", aliases="--configFile", metaVar="FILE", usage="Floodlight configuration file")
+    private String configFile = DEFAULT_CONFIG_FILE;
+    
+    public String getModuleFile() {
+    	return configFile;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/Controller.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/Controller.java
new file mode 100644
index 0000000..90eff6f
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/Controller.java
@@ -0,0 +1,2239 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.internal;
+
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
+import java.util.ArrayList;
+import java.nio.channels.ClosedChannelException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Properties;
+import java.util.Set;
+import java.util.Stack;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArraySet;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import net.floodlightcontroller.core.FloodlightContext;
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.core.IHAListener;
+import net.floodlightcontroller.core.IInfoProvider;
+import net.floodlightcontroller.core.IOFMessageListener;
+import net.floodlightcontroller.core.IListener.Command;
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.IOFSwitchFilter;
+import net.floodlightcontroller.core.IOFSwitchListener;
+import net.floodlightcontroller.core.annotations.LogMessageDoc;
+import net.floodlightcontroller.core.annotations.LogMessageDocs;
+import net.floodlightcontroller.core.internal.OFChannelState.HandshakeState;
+import net.floodlightcontroller.core.util.ListenerDispatcher;
+import net.floodlightcontroller.core.web.CoreWebRoutable;
+import net.floodlightcontroller.counter.ICounterStoreService;
+import net.floodlightcontroller.packet.Ethernet;
+import net.floodlightcontroller.perfmon.IPktInProcessingTimeService;
+import net.floodlightcontroller.restserver.IRestApiService;
+import net.floodlightcontroller.storage.IResultSet;
+import net.floodlightcontroller.storage.IStorageSourceListener;
+import net.floodlightcontroller.storage.IStorageSourceService;
+import net.floodlightcontroller.storage.OperatorPredicate;
+import net.floodlightcontroller.storage.StorageException;
+import net.floodlightcontroller.threadpool.IThreadPoolService;
+
+import org.jboss.netty.bootstrap.ServerBootstrap;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelHandlerContext;
+import org.jboss.netty.channel.ChannelPipelineFactory;
+import org.jboss.netty.channel.ChannelStateEvent;
+import org.jboss.netty.channel.ChannelUpstreamHandler;
+import org.jboss.netty.channel.Channels;
+import org.jboss.netty.channel.ExceptionEvent;
+import org.jboss.netty.channel.MessageEvent;
+import org.jboss.netty.channel.group.ChannelGroup;
+import org.jboss.netty.channel.group.DefaultChannelGroup;
+import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
+import org.jboss.netty.handler.timeout.IdleStateAwareChannelUpstreamHandler;
+import org.jboss.netty.handler.timeout.IdleStateEvent;
+import org.jboss.netty.handler.timeout.ReadTimeoutException;
+import org.openflow.protocol.OFEchoReply;
+import org.openflow.protocol.OFError;
+import org.openflow.protocol.OFError.OFBadActionCode;
+import org.openflow.protocol.OFError.OFBadRequestCode;
+import org.openflow.protocol.OFError.OFErrorType;
+import org.openflow.protocol.OFError.OFFlowModFailedCode;
+import org.openflow.protocol.OFError.OFHelloFailedCode;
+import org.openflow.protocol.OFError.OFPortModFailedCode;
+import org.openflow.protocol.OFError.OFQueueOpFailedCode;
+import org.openflow.protocol.OFFeaturesReply;
+import org.openflow.protocol.OFGetConfigReply;
+import org.openflow.protocol.OFMessage;
+import org.openflow.protocol.OFPacketIn;
+import org.openflow.protocol.OFPhysicalPort;
+import org.openflow.protocol.OFPortStatus;
+import org.openflow.protocol.OFPortStatus.OFPortReason;
+import org.openflow.protocol.OFSetConfig;
+import org.openflow.protocol.OFStatisticsRequest;
+import org.openflow.protocol.OFSwitchConfig;
+import org.openflow.protocol.OFType;
+import org.openflow.protocol.OFVendor;
+import org.openflow.protocol.factory.BasicFactory;
+import org.openflow.protocol.factory.MessageParseException;
+import org.openflow.protocol.statistics.OFDescriptionStatistics;
+import org.openflow.protocol.statistics.OFStatistics;
+import org.openflow.protocol.statistics.OFStatisticsType;
+import org.openflow.protocol.vendor.OFBasicVendorDataType;
+import org.openflow.protocol.vendor.OFBasicVendorId;
+import org.openflow.protocol.vendor.OFVendorId;
+import org.openflow.util.HexString;
+import org.openflow.util.U16;
+import org.openflow.util.U32;
+import org.openflow.vendor.nicira.OFNiciraVendorData;
+import org.openflow.vendor.nicira.OFRoleReplyVendorData;
+import org.openflow.vendor.nicira.OFRoleRequestVendorData;
+import org.openflow.vendor.nicira.OFRoleVendorData;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * The main controller class.  Handles all setup and network listeners
+ */
+public class Controller implements IFloodlightProviderService, 
+            IStorageSourceListener {
+    
+    protected static Logger log = LoggerFactory.getLogger(Controller.class);
+
+    private static final String ERROR_DATABASE = 
+            "The controller could not communicate with the system database.";
+    
+    protected BasicFactory factory;
+    protected ConcurrentMap<OFType,
+                            ListenerDispatcher<OFType,IOFMessageListener>> 
+                                messageListeners;
+    // The activeSwitches map contains only those switches that are actively
+    // being controlled by us -- it doesn't contain switches that are
+    // in the slave role
+    protected ConcurrentHashMap<Long, IOFSwitch> activeSwitches;
+    // connectedSwitches contains all connected switches, including ones where
+    // we're a slave controller. We need to keep track of them so that we can
+    // send role request messages to switches when our role changes to master
+    // We add a switch to this set after it successfully completes the
+    // handshake. Access to this Set needs to be synchronized with roleChanger
+    protected HashSet<OFSwitchImpl> connectedSwitches;
+    
+    // The controllerNodeIPsCache maps Controller IDs to their IP address. 
+    // It's only used by handleControllerNodeIPsChanged
+    protected HashMap<String, String> controllerNodeIPsCache;
+    
+    protected Set<IOFSwitchListener> switchListeners;
+    protected Set<IHAListener> haListeners;
+    protected Map<String, List<IInfoProvider>> providerMap;
+    protected BlockingQueue<IUpdate> updates;
+    
+    // Module dependencies
+    protected IRestApiService restApi;
+    protected ICounterStoreService counterStore = null;
+    protected IStorageSourceService storageSource;
+    protected IPktInProcessingTimeService pktinProcTime;
+    protected IThreadPoolService threadPool;
+    
+    // Configuration options
+    protected int openFlowPort = 6633;
+    protected int workerThreads = 0;
+    // The id for this controller node. Should be unique for each controller
+    // node in a controller cluster.
+    protected String controllerId = "localhost";
+    
+    // The current role of the controller.
+    // If the controller isn't configured to support roles, then this is null.
+    protected Role role;
+    // A helper that handles sending and timeout handling for role requests
+    protected RoleChanger roleChanger;
+    
+    // Start time of the controller
+    protected long systemStartTime;
+    
+    // Flag to always flush flow table on switch reconnect (HA or otherwise)
+    protected boolean alwaysClearFlowsOnSwAdd = false;
+    
+    // Storage table names
+    protected static final String CONTROLLER_TABLE_NAME = "controller_controller";
+    protected static final String CONTROLLER_ID = "id";
+    
+    protected static final String SWITCH_TABLE_NAME = "controller_switch";
+    protected static final String SWITCH_DATAPATH_ID = "dpid";
+    protected static final String SWITCH_SOCKET_ADDRESS = "socket_address";
+    protected static final String SWITCH_IP = "ip";
+    protected static final String SWITCH_CONTROLLER_ID = "controller_id";
+    protected static final String SWITCH_ACTIVE = "active";
+    protected static final String SWITCH_CONNECTED_SINCE = "connected_since";
+    protected static final String SWITCH_CAPABILITIES = "capabilities";
+    protected static final String SWITCH_BUFFERS = "buffers";
+    protected static final String SWITCH_TABLES = "tables";
+    protected static final String SWITCH_ACTIONS = "actions";
+
+    protected static final String SWITCH_CONFIG_TABLE_NAME = "controller_switchconfig";
+    protected static final String SWITCH_CONFIG_CORE_SWITCH = "core_switch";
+    
+    protected static final String PORT_TABLE_NAME = "controller_port";
+    protected static final String PORT_ID = "id";
+    protected static final String PORT_SWITCH = "switch_id";
+    protected static final String PORT_NUMBER = "number";
+    protected static final String PORT_HARDWARE_ADDRESS = "hardware_address";
+    protected static final String PORT_NAME = "name";
+    protected static final String PORT_CONFIG = "config";
+    protected static final String PORT_STATE = "state";
+    protected static final String PORT_CURRENT_FEATURES = "current_features";
+    protected static final String PORT_ADVERTISED_FEATURES = "advertised_features";
+    protected static final String PORT_SUPPORTED_FEATURES = "supported_features";
+    protected static final String PORT_PEER_FEATURES = "peer_features";
+    
+    protected static final String CONTROLLER_INTERFACE_TABLE_NAME = "controller_controllerinterface";
+    protected static final String CONTROLLER_INTERFACE_ID = "id";
+    protected static final String CONTROLLER_INTERFACE_CONTROLLER_ID = "controller_id";
+    protected static final String CONTROLLER_INTERFACE_TYPE = "type";
+    protected static final String CONTROLLER_INTERFACE_NUMBER = "number";
+    protected static final String CONTROLLER_INTERFACE_DISCOVERED_IP = "discovered_ip";
+    
+    
+    
+    // Perf. related configuration
+    protected static final int SEND_BUFFER_SIZE = 4 * 1024 * 1024;
+    protected static final int BATCH_MAX_SIZE = 100;
+    protected static final boolean ALWAYS_DECODE_ETH = true;
+
+    /**
+     *  Updates handled by the main loop 
+     */
+    protected interface IUpdate {
+        /** 
+         * Calls the appropriate listeners
+         */
+        public void dispatch();
+    }
+    public enum SwitchUpdateType {
+        ADDED,
+        REMOVED,
+        PORTCHANGED
+    }
+    /**
+     * Update message indicating a switch was added or removed 
+     */
+    protected class SwitchUpdate implements IUpdate {
+        public IOFSwitch sw;
+        public SwitchUpdateType switchUpdateType;
+        public SwitchUpdate(IOFSwitch sw, SwitchUpdateType switchUpdateType) {
+            this.sw = sw;
+            this.switchUpdateType = switchUpdateType;
+        }
+        public void dispatch() {
+            if (log.isTraceEnabled()) {
+                log.trace("Dispatching switch update {} {}",
+                        sw, switchUpdateType);
+            }
+            if (switchListeners != null) {
+                for (IOFSwitchListener listener : switchListeners) {
+                    switch(switchUpdateType) {
+                        case ADDED:
+                            listener.addedSwitch(sw);
+                            break;
+                        case REMOVED:
+                            listener.removedSwitch(sw);
+                            break;
+                        case PORTCHANGED:
+                            listener.switchPortChanged(sw.getId());
+                            break;
+                    }
+                }
+            }
+        }
+    }
+    
+    /**
+     * Update message indicating controller's role has changed
+     */
+    protected class HARoleUpdate implements IUpdate {
+        public Role oldRole;
+        public Role newRole;
+        public HARoleUpdate(Role newRole, Role oldRole) {
+            this.oldRole = oldRole;
+            this.newRole = newRole;
+        }
+        public void dispatch() {
+            // Make sure that old and new roles are different.
+            if (oldRole == newRole) {
+                if (log.isTraceEnabled()) {
+                    log.trace("HA role update ignored as the old and " +
+                              "new roles are the same. newRole = {}" +
+                              "oldRole = {}", newRole, oldRole);
+                }
+                return;
+            }
+            if (log.isTraceEnabled()) {
+                log.trace("Dispatching HA Role update newRole = {}, oldRole = {}",
+                          newRole, oldRole);
+            }
+            if (haListeners != null) {
+                for (IHAListener listener : haListeners) {
+                        listener.roleChanged(oldRole, newRole);
+                }
+            }
+        }
+    }
+    
+    /**
+     * Update message indicating
+     * IPs of controllers in controller cluster have changed.
+     */
+    protected class HAControllerNodeIPUpdate implements IUpdate {
+        public Map<String,String> curControllerNodeIPs;
+        public Map<String,String> addedControllerNodeIPs;
+        public Map<String,String> removedControllerNodeIPs;
+        public HAControllerNodeIPUpdate(
+                HashMap<String,String> curControllerNodeIPs,  
+                HashMap<String,String> addedControllerNodeIPs,  
+                HashMap<String,String> removedControllerNodeIPs) {
+            this.curControllerNodeIPs = curControllerNodeIPs;
+            this.addedControllerNodeIPs = addedControllerNodeIPs;
+            this.removedControllerNodeIPs = removedControllerNodeIPs;
+        }
+        public void dispatch() {
+            if (log.isTraceEnabled()) {
+                log.trace("Dispatching HA Controller Node IP update "
+                        + "curIPs = {}, addedIPs = {}, removedIPs = {}",
+                        new Object[] { curControllerNodeIPs, addedControllerNodeIPs,
+                            removedControllerNodeIPs }
+                        );
+            }
+            if (haListeners != null) {
+                for (IHAListener listener: haListeners) {
+                    listener.controllerNodeIPsChanged(curControllerNodeIPs,
+                            addedControllerNodeIPs, removedControllerNodeIPs);
+                }
+            }
+        }
+    }
+    
+    // ***************
+    // Getters/Setters
+    // ***************
+    
+    public void setStorageSourceService(IStorageSourceService storageSource) {
+        this.storageSource = storageSource;
+    }
+    
+    public void setCounterStore(ICounterStoreService counterStore) {
+        this.counterStore = counterStore;
+    }
+    
+    public void setPktInProcessingService(IPktInProcessingTimeService pits) {
+        this.pktinProcTime = pits;
+    }
+    
+    public void setRestApiService(IRestApiService restApi) {
+        this.restApi = restApi;
+    }
+    
+    public void setThreadPoolService(IThreadPoolService tp) {
+        this.threadPool = tp;
+    }
+
+    @Override
+    public Role getRole() {
+        synchronized(roleChanger) {
+            return role;
+        }
+    }
+    
+    @Override
+    public void setRole(Role role) {
+        if (role == null) throw new NullPointerException("Role can not be null.");
+        if (role == Role.MASTER && this.role == Role.SLAVE) {
+            // Reset db state to Inactive for all switches. 
+            updateAllInactiveSwitchInfo();
+        }
+        
+        // Need to synchronize to ensure a reliable ordering on role request
+        // messages send and to ensure the list of connected switches is stable
+        // RoleChanger will handle the actual sending of the message and 
+        // timeout handling
+        // @see RoleChanger
+        synchronized(roleChanger) {
+            if (role.equals(this.role)) {
+                log.debug("Ignoring role change: role is already {}", role);
+                return;
+            }
+
+            Role oldRole = this.role;
+            this.role = role;
+            
+            log.debug("Submitting role change request to role {}", role);
+            roleChanger.submitRequest(connectedSwitches, role);
+            
+            // Enqueue an update for our listeners.
+            try {
+                this.updates.put(new HARoleUpdate(role, oldRole));
+            } catch (InterruptedException e) {
+                log.error("Failure adding update to queue", e);
+            }
+        }
+    }
+    
+    
+    
+    // **********************
+    // ChannelUpstreamHandler
+    // **********************
+    
+    /**
+     * Return a new channel handler for processing a switch connections
+     * @param state The channel state object for the connection
+     * @return the new channel handler
+     */
+    protected ChannelUpstreamHandler getChannelHandler(OFChannelState state) {
+        return new OFChannelHandler(state);
+    }
+    
+    /**
+     * Channel handler deals with the switch connection and dispatches
+     * switch messages to the appropriate locations.
+     * @author readams
+     */
+    protected class OFChannelHandler 
+        extends IdleStateAwareChannelUpstreamHandler {
+        protected OFSwitchImpl sw;
+        protected OFChannelState state;
+        
+        public OFChannelHandler(OFChannelState state) {
+            this.state = state;
+        }
+
+        @Override
+        @LogMessageDoc(message="New switch connection from {ip address}",
+                       explanation="A new switch has connected from the " + 
+                                "specified IP address")
+        public void channelConnected(ChannelHandlerContext ctx,
+                                     ChannelStateEvent e) throws Exception {
+            log.info("New switch connection from {}",
+                     e.getChannel().getRemoteAddress());
+            
+            sw = new OFSwitchImpl();
+            sw.setChannel(e.getChannel());
+            sw.setFloodlightProvider(Controller.this);
+            sw.setThreadPoolService(threadPool);
+            
+            List<OFMessage> msglist = new ArrayList<OFMessage>(1);
+            msglist.add(factory.getMessage(OFType.HELLO));
+            e.getChannel().write(msglist);
+
+        }
+
+        @Override
+        @LogMessageDoc(message="Disconnected switch {switch information}",
+                       explanation="The specified switch has disconnected.")
+        public void channelDisconnected(ChannelHandlerContext ctx,
+                                        ChannelStateEvent e) throws Exception {
+            if (sw != null && state.hsState == HandshakeState.READY) {
+                if (activeSwitches.containsKey(sw.getId())) {
+                    // It's safe to call removeSwitch even though the map might
+                    // not contain this particular switch but another with the 
+                    // same DPID
+                    removeSwitch(sw);
+                }
+                synchronized(roleChanger) {
+                    connectedSwitches.remove(sw);
+                }
+                sw.setConnected(false);
+            }
+            log.info("Disconnected switch {}", sw);
+        }
+
+        @Override
+        @LogMessageDocs({
+            @LogMessageDoc(level="ERROR",
+                    message="Disconnecting switch {switch} due to read timeout",
+                    explanation="The connected switch has failed to send any " + 
+                                "messages or respond to echo requests",
+                    recommendation=LogMessageDoc.CHECK_SWITCH),
+            @LogMessageDoc(level="ERROR",
+                    message="Disconnecting switch {switch}: failed to " + 
+                            "complete handshake",
+                    explanation="The switch did not respond correctly " + 
+                                "to handshake messages",
+                    recommendation=LogMessageDoc.CHECK_SWITCH),
+            @LogMessageDoc(level="ERROR",
+                    message="Disconnecting switch {switch} due to IO Error: {}",
+                    explanation="There was an error communicating with the switch",
+                    recommendation=LogMessageDoc.CHECK_SWITCH),
+            @LogMessageDoc(level="ERROR",
+                    message="Disconnecting switch {switch} due to switch " + 
+                            "state error: {error}",
+                    explanation="The switch sent an unexpected message",
+                    recommendation=LogMessageDoc.CHECK_SWITCH),
+            @LogMessageDoc(level="ERROR",
+                    message="Disconnecting switch {switch} due to " +
+                            "message parse failure",
+                    explanation="Could not parse a message from the switch",
+                    recommendation=LogMessageDoc.CHECK_SWITCH),
+            @LogMessageDoc(level="ERROR",
+                    message="Terminating controller due to storage exception",
+                    explanation=ERROR_DATABASE,
+                    recommendation=LogMessageDoc.CHECK_CONTROLLER),
+            @LogMessageDoc(level="ERROR",
+                    message="Could not process message: queue full",
+                    explanation="OpenFlow messages are arriving faster than " +
+                                " the controller can process them.",
+                    recommendation=LogMessageDoc.CHECK_CONTROLLER),
+            @LogMessageDoc(level="ERROR",
+                    message="Error while processing message " +
+                            "from switch {switch} {cause}",
+                    explanation="An error occurred processing the switch message",
+                    recommendation=LogMessageDoc.GENERIC_ACTION)
+        })
+        public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e)
+                throws Exception {
+            if (e.getCause() instanceof ReadTimeoutException) {
+                // switch timeout
+                log.error("Disconnecting switch {} due to read timeout", sw);
+                ctx.getChannel().close();
+            } else if (e.getCause() instanceof HandshakeTimeoutException) {
+                log.error("Disconnecting switch {}: failed to complete handshake", 
+                          sw);
+                ctx.getChannel().close();
+            } else if (e.getCause() instanceof ClosedChannelException) {
+                //log.warn("Channel for sw {} already closed", sw);
+            } else if (e.getCause() instanceof IOException) {
+                log.error("Disconnecting switch {} due to IO Error: {}",
+                          sw, e.getCause().getMessage());
+                ctx.getChannel().close();
+            } else if (e.getCause() instanceof SwitchStateException) {
+                log.error("Disconnecting switch {} due to switch state error: {}", 
+                          sw, e.getCause().getMessage());
+                ctx.getChannel().close();
+            } else if (e.getCause() instanceof MessageParseException) {
+                log.error("Disconnecting switch " + sw +
+                          " due to message parse failure", 
+                          e.getCause());
+                ctx.getChannel().close();
+            } else if (e.getCause() instanceof StorageException) {
+                log.error("Terminating controller due to storage exception", 
+                          e.getCause());
+                terminate();
+            } else if (e.getCause() instanceof RejectedExecutionException) {
+                log.warn("Could not process message: queue full");
+            } else {
+                log.error("Error while processing message from switch " + sw,
+                          e.getCause());
+                ctx.getChannel().close();
+            }
+        }
+
+        @Override
+        public void channelIdle(ChannelHandlerContext ctx, IdleStateEvent e)
+                throws Exception {
+            List<OFMessage> msglist = new ArrayList<OFMessage>(1);
+            msglist.add(factory.getMessage(OFType.ECHO_REQUEST));
+            e.getChannel().write(msglist);
+        }
+
+        @Override
+        public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
+                throws Exception {
+            if (e.getMessage() instanceof List) {
+                @SuppressWarnings("unchecked")
+                List<OFMessage> msglist = (List<OFMessage>)e.getMessage();
+
+                for (OFMessage ofm : msglist) {
+                    try {
+                        processOFMessage(ofm);
+                    }
+                    catch (Exception ex) {
+                        // We are the last handler in the stream, so run the 
+                        // exception through the channel again by passing in 
+                        // ctx.getChannel().
+                        Channels.fireExceptionCaught(ctx.getChannel(), ex);
+                    }
+                }
+
+                // Flush all flow-mods/packet-out generated from this "train"
+                OFSwitchImpl.flush_all();
+            }
+        }
+        
+        /**
+         * Process the request for the switch description
+         */
+        @LogMessageDoc(level="ERROR",
+                message="Exception in reading description " + 
+                        " during handshake {exception}",
+                explanation="Could not process the switch description string",
+                recommendation=LogMessageDoc.CHECK_SWITCH)
+        void processSwitchDescReply() {
+            try {
+                // Read description, if it has been updated
+                @SuppressWarnings("unchecked")
+                Future<List<OFStatistics>> desc_future =
+                    (Future<List<OFStatistics>>)sw.
+                        getAttribute(IOFSwitch.SWITCH_DESCRIPTION_FUTURE);
+                List<OFStatistics> values = 
+                        desc_future.get(0, TimeUnit.MILLISECONDS);
+                if (values != null) {
+                    OFDescriptionStatistics description = 
+                            new OFDescriptionStatistics();
+                    ChannelBuffer data = 
+                            ChannelBuffers.buffer(description.getLength());
+                    for (OFStatistics f : values) {
+                        f.writeTo(data);
+                        description.readFrom(data);
+                        break; // SHOULD be a list of length 1
+                    }
+                    sw.setAttribute(IOFSwitch.SWITCH_DESCRIPTION_DATA, 
+                                    description);
+                    sw.setSwitchProperties(description);
+                    data = null;
+
+                    // At this time, also set other switch properties from storage
+                    boolean is_core_switch = false;
+                    IResultSet resultSet = null;
+                    try {
+                        String swid = sw.getStringId();
+                        resultSet = 
+                                storageSource.getRow(SWITCH_CONFIG_TABLE_NAME, swid);
+                        for (Iterator<IResultSet> it = 
+                                resultSet.iterator(); it.hasNext();) {
+                            // In case of multiple rows, use the status
+                            // in last row?
+                            Map<String, Object> row = it.next().getRow();
+                            if (row.containsKey(SWITCH_CONFIG_CORE_SWITCH)) {
+                                if (log.isDebugEnabled()) {
+                                    log.debug("Reading SWITCH_IS_CORE_SWITCH " + 
+                                              "config for switch={}, is-core={}",
+                                              sw, row.get(SWITCH_CONFIG_CORE_SWITCH));
+                                }
+                                String ics = 
+                                        (String)row.get(SWITCH_CONFIG_CORE_SWITCH);
+                                is_core_switch = ics.equals("true");
+                            }
+                        }
+                    }
+                    finally {
+                        if (resultSet != null)
+                            resultSet.close();
+                    }
+                    if (is_core_switch) {
+                        sw.setAttribute(IOFSwitch.SWITCH_IS_CORE_SWITCH, 
+                                        new Boolean(true));
+                    }
+                }
+                sw.removeAttribute(IOFSwitch.SWITCH_DESCRIPTION_FUTURE);
+                state.hasDescription = true;
+                checkSwitchReady();
+            }
+            catch (InterruptedException ex) {
+                // Ignore
+            }
+            catch (TimeoutException ex) {
+                // Ignore
+            } catch (Exception ex) {
+                log.error("Exception in reading description " + 
+                          " during handshake", ex);
+            }
+        }
+
+        /**
+         * Send initial switch setup information that we need before adding
+         * the switch
+         * @throws IOException
+         */
+        void sendHelloConfiguration() throws IOException {
+            // Send initial Features Request
+            sw.write(factory.getMessage(OFType.FEATURES_REQUEST), null);
+        }
+        
+        /**
+         * Send the configuration requests we can only do after we have
+         * the features reply
+         * @throws IOException
+         */
+        void sendFeatureReplyConfiguration() throws IOException {
+            // Ensure we receive the full packet via PacketIn
+            OFSetConfig config = (OFSetConfig) factory
+                    .getMessage(OFType.SET_CONFIG);
+            config.setMissSendLength((short) 0xffff)
+            .setLengthU(OFSwitchConfig.MINIMUM_LENGTH);
+            sw.write(config, null);
+            sw.write(factory.getMessage(OFType.GET_CONFIG_REQUEST),
+                    null);
+
+            // Get Description to set switch-specific flags
+            OFStatisticsRequest req = new OFStatisticsRequest();
+            req.setStatisticType(OFStatisticsType.DESC);
+            req.setLengthU(req.getLengthU());
+            Future<List<OFStatistics>> dfuture = 
+                    sw.getStatistics(req);
+            sw.setAttribute(IOFSwitch.SWITCH_DESCRIPTION_FUTURE,
+                    dfuture);
+
+        }
+        
+        protected void checkSwitchReady() {
+            if (state.hsState == HandshakeState.FEATURES_REPLY &&
+                    state.hasDescription && state.hasGetConfigReply) {
+                
+                state.hsState = HandshakeState.READY;
+                
+                synchronized(roleChanger) {
+                    // We need to keep track of all of the switches that are connected
+                    // to the controller, in any role, so that we can later send the 
+                    // role request messages when the controller role changes.
+                    // We need to be synchronized while doing this: we must not 
+                    // send a another role request to the connectedSwitches until
+                    // we were able to add this new switch to connectedSwitches 
+                    // *and* send the current role to the new switch.
+                    connectedSwitches.add(sw);
+                    
+                    if (role != null) {
+                        // Send a role request if role support is enabled for the controller
+                        // This is a probe that we'll use to determine if the switch
+                        // actually supports the role request message. If it does we'll
+                        // get back a role reply message. If it doesn't we'll get back an
+                        // OFError message. 
+                        // If role is MASTER we will promote switch to active
+                        // list when we receive the switch's role reply messages
+                        log.debug("This controller's role is {}, " + 
+                                "sending initial role request msg to {}",
+                                role, sw);
+                        Collection<OFSwitchImpl> swList = new ArrayList<OFSwitchImpl>(1);
+                        swList.add(sw);
+                        roleChanger.submitRequest(swList, role);
+                    } 
+                    else {
+                        // Role supported not enabled on controller (for now)
+                        // automatically promote switch to active state. 
+                        log.debug("This controller's role is null, " + 
+                                "not sending role request msg to {}",
+                                role, sw);
+                        // Need to clear FlowMods before we add the switch
+                        // and dispatch updates otherwise we have a race condition.
+                        sw.clearAllFlowMods();
+                        addSwitch(sw);
+                        state.firstRoleReplyReceived = true;
+                    }
+                }
+            }
+        }
+                
+        /* Handle a role reply message we received from the switch. Since
+         * netty serializes message dispatch we don't need to synchronize 
+         * against other receive operations from the same switch, so no need
+         * to synchronize addSwitch(), removeSwitch() operations from the same
+         * connection. 
+         * FIXME: However, when a switch with the same DPID connects we do
+         * need some synchronization. However, handling switches with same
+         * DPID needs to be revisited anyways (get rid of r/w-lock and synchronous
+         * removedSwitch notification):1
+         * 
+         */
+        @LogMessageDoc(level="ERROR",
+                message="Invalid role value in role reply message",
+                explanation="Was unable to set the HA role (master or slave) " +
+                        "for the controller.",
+                recommendation=LogMessageDoc.CHECK_CONTROLLER)
+        protected void handleRoleReplyMessage(OFVendor vendorMessage,
+                                    OFRoleReplyVendorData roleReplyVendorData) {
+            // Map from the role code in the message to our role enum
+            int nxRole = roleReplyVendorData.getRole();
+            Role role = null;
+            switch (nxRole) {
+                case OFRoleVendorData.NX_ROLE_OTHER:
+                    role = Role.EQUAL;
+                    break;
+                case OFRoleVendorData.NX_ROLE_MASTER:
+                    role = Role.MASTER;
+                    break;
+                case OFRoleVendorData.NX_ROLE_SLAVE:
+                    role = Role.SLAVE;
+                    break;
+                default:
+                    log.error("Invalid role value in role reply message");
+                    sw.getChannel().close();
+                    return;
+            }
+            
+            log.debug("Handling role reply for role {} from {}. " +
+                      "Controller's role is {} ", 
+                      new Object[] { role, sw, Controller.this.role} 
+                      );
+            
+            sw.deliverRoleReply(vendorMessage.getXid(), role);
+            
+            boolean isActive = activeSwitches.containsKey(sw.getId());
+            if (!isActive && sw.isActive()) {
+                // Transition from SLAVE to MASTER.
+                
+                if (!state.firstRoleReplyReceived || 
+                    getAlwaysClearFlowsOnSwAdd()) {
+                    // This is the first role-reply message we receive from
+                    // this switch or roles were disabled when the switch
+                    // connected: 
+                    // Delete all pre-existing flows for new connections to 
+                    // the master
+                    //
+                    // FIXME: Need to think more about what the test should 
+                    // be for when we flush the flow-table? For example, 
+                    // if all the controllers are temporarily in the backup 
+                    // role (e.g. right after a failure of the master 
+                    // controller) at the point the switch connects, then 
+                    // all of the controllers will initially connect as 
+                    // backup controllers and not flush the flow-table. 
+                    // Then when one of them is promoted to master following
+                    // the master controller election the flow-table
+                    // will still not be flushed because that's treated as 
+                    // a failover event where we don't want to flush the 
+                    // flow-table. The end result would be that the flow 
+                    // table for a newly connected switch is never
+                    // flushed. Not sure how to handle that case though...
+                    sw.clearAllFlowMods();
+                    log.debug("First role reply from master switch {}, " +
+                              "clear FlowTable to active switch list",
+                             HexString.toHexString(sw.getId()));
+                }
+                
+                // Some switches don't seem to update us with port
+                // status messages while in slave role.
+                readSwitchPortStateFromStorage(sw);                
+                
+                // Only add the switch to the active switch list if 
+                // we're not in the slave role. Note that if the role 
+                // attribute is null, then that means that the switch 
+                // doesn't support the role request messages, so in that
+                // case we're effectively in the EQUAL role and the 
+                // switch should be included in the active switch list.
+                addSwitch(sw);
+                log.debug("Added master switch {} to active switch list",
+                         HexString.toHexString(sw.getId()));
+
+            } 
+            else if (isActive && !sw.isActive()) {
+                // Transition from MASTER to SLAVE: remove switch 
+                // from active switch list. 
+                log.debug("Removed slave switch {} from active switch" +
+                          " list", HexString.toHexString(sw.getId()));
+                removeSwitch(sw);
+            }
+            
+            // Indicate that we have received a role reply message. 
+            state.firstRoleReplyReceived = true;
+        }
+
+        protected boolean handleVendorMessage(OFVendor vendorMessage) {
+            boolean shouldHandleMessage = false;
+            int vendor = vendorMessage.getVendor();
+            switch (vendor) {
+                case OFNiciraVendorData.NX_VENDOR_ID:
+                    OFNiciraVendorData niciraVendorData =
+                        (OFNiciraVendorData)vendorMessage.getVendorData();
+                    int dataType = niciraVendorData.getDataType();
+                    switch (dataType) {
+                        case OFRoleReplyVendorData.NXT_ROLE_REPLY:
+                            OFRoleReplyVendorData roleReplyVendorData =
+                                    (OFRoleReplyVendorData) niciraVendorData;
+                            handleRoleReplyMessage(vendorMessage, 
+                                                   roleReplyVendorData);
+                            break;
+                        default:
+                            log.warn("Unhandled Nicira VENDOR message; " +
+                                     "data type = {}", dataType);
+                            break;
+                    }
+                    break;
+                default:
+                    log.warn("Unhandled VENDOR message; vendor id = {}", vendor);
+                    break;
+            }
+            
+            return shouldHandleMessage;
+        }
+
+        /**
+         * Dispatch an Openflow message from a switch to the appropriate
+         * handler.
+         * @param m The message to process
+         * @throws IOException
+         * @throws SwitchStateException 
+         */
+        @LogMessageDocs({
+            @LogMessageDoc(level="WARN",
+                    message="Config Reply from {switch} has " +
+                            "miss length set to {length}",
+                    explanation="The controller requires that the switch " +
+                            "use a miss length of 0xffff for correct " +
+                            "function",
+                    recommendation="Use a different switch to ensure " +
+                            "correct function"),
+            @LogMessageDoc(level="WARN",
+                    message="Received ERROR from sw {switch} that "
+                            +"indicates roles are not supported "
+                            +"but we have received a valid "
+                            +"role reply earlier",
+                    explanation="The switch sent a confusing message to the" +
+                            "controller")
+        })
+        protected void processOFMessage(OFMessage m)
+                throws IOException, SwitchStateException {
+            boolean shouldHandleMessage = false;
+            
+            switch (m.getType()) {
+                case HELLO:
+                    if (log.isTraceEnabled())
+                        log.trace("HELLO from {}", sw);
+                    
+                    if (state.hsState.equals(HandshakeState.START)) {
+                        state.hsState = HandshakeState.HELLO;
+                        sendHelloConfiguration();
+                    } else {
+                        throw new SwitchStateException("Unexpected HELLO from " 
+                                                       + sw);
+                    }
+                    break;
+                case ECHO_REQUEST:
+                    OFEchoReply reply =
+                        (OFEchoReply) factory.getMessage(OFType.ECHO_REPLY);
+                    reply.setXid(m.getXid());
+                    sw.write(reply, null);
+                    break;
+                case ECHO_REPLY:
+                    break;
+                case FEATURES_REPLY:
+                    if (log.isTraceEnabled())
+                        log.trace("Features Reply from {}", sw);
+                    
+                    sw.setFeaturesReply((OFFeaturesReply) m);
+                    if (state.hsState.equals(HandshakeState.HELLO)) {
+                        sendFeatureReplyConfiguration();
+                        state.hsState = HandshakeState.FEATURES_REPLY;
+                        // uncomment to enable "dumb" switches like cbench
+                        // state.hsState = HandshakeState.READY;
+                        // addSwitch(sw);
+                    } else {
+                        // return results to rest api caller
+                        sw.deliverOFFeaturesReply(m);
+                        // update database */
+                        updateActiveSwitchInfo(sw);
+                    }
+                    break;
+                case GET_CONFIG_REPLY:
+                    if (log.isTraceEnabled())
+                        log.trace("Get config reply from {}", sw);
+                    
+                    if (!state.hsState.equals(HandshakeState.FEATURES_REPLY)) {
+                        String em = "Unexpected GET_CONFIG_REPLY from " + sw;
+                        throw new SwitchStateException(em);
+                    }
+                    OFGetConfigReply cr = (OFGetConfigReply) m;
+                    if (cr.getMissSendLength() == (short)0xffff) {
+                        log.trace("Config Reply from {} confirms " + 
+                                  "miss length set to 0xffff", sw);
+                    } else {
+                        log.warn("Config Reply from {} has " +
+                                 "miss length set to {}", 
+                                 sw, cr.getMissSendLength() & 0xffff);
+                    }
+                    state.hasGetConfigReply = true;
+                    checkSwitchReady();
+                    break;
+                case VENDOR:
+                    shouldHandleMessage = handleVendorMessage((OFVendor)m);
+                    break;
+                case ERROR:
+                    // TODO: we need better error handling. Especially for 
+                    // request/reply style message (stats, roles) we should have
+                    // a unified way to lookup the xid in the error message. 
+                    // This will probable involve rewriting the way we handle
+                    // request/reply style messages.
+                    OFError error = (OFError) m;
+                    boolean shouldLogError = true;
+                    // TODO: should we check that firstRoleReplyReceived is false,
+                    // i.e., check only whether the first request fails?
+                    if (sw.checkFirstPendingRoleRequestXid(error.getXid())) {
+                        boolean isBadVendorError =
+                            (error.getErrorType() == OFError.OFErrorType.
+                                    OFPET_BAD_REQUEST.getValue());
+                        // We expect to receive a bad vendor error when 
+                        // we're connected to a switch that doesn't support 
+                        // the Nicira vendor extensions (i.e. not OVS or 
+                        // derived from OVS).  By protocol, it should also be
+                        // BAD_VENDOR, but too many switch implementations
+                        // get it wrong and we can already check the xid()
+                        // so we can ignore the type with confidence that this
+                        // is not a spurious error
+                        shouldLogError = !isBadVendorError;
+                        if (isBadVendorError) {
+                            if (state.firstRoleReplyReceived && (role != null)) {
+                                log.warn("Received ERROR from sw {} that "
+                                          +"indicates roles are not supported "
+                                          +"but we have received a valid "
+                                          +"role reply earlier", sw);
+                            }
+                            state.firstRoleReplyReceived = true;
+                            sw.deliverRoleRequestNotSupported(error.getXid());
+                            synchronized(roleChanger) {
+                                if (sw.role == null && Controller.this.role==Role.SLAVE) {
+                                    // the switch doesn't understand role request
+                                    // messages and the current controller role is
+                                    // slave. We need to disconnect the switch. 
+                                    // @see RoleChanger for rationale
+                                    sw.getChannel().close();
+                                }
+                                else if (sw.role == null) {
+                                    // Controller's role is master: add to
+                                    // active 
+                                    // TODO: check if clearing flow table is
+                                    // right choice here.
+                                    // Need to clear FlowMods before we add the switch
+                                    // and dispatch updates otherwise we have a race condition.
+                                    // TODO: switch update is async. Won't we still have a potential
+                                    //       race condition? 
+                                    sw.clearAllFlowMods();
+                                    addSwitch(sw);
+                                }
+                            }
+                        }
+                        else {
+                            // TODO: Is this the right thing to do if we receive
+                            // some other error besides a bad vendor error? 
+                            // Presumably that means the switch did actually
+                            // understand the role request message, but there 
+                            // was some other error from processing the message.
+                            // OF 1.2 specifies a OFPET_ROLE_REQUEST_FAILED
+                            // error code, but it doesn't look like the Nicira 
+                            // role request has that. Should check OVS source 
+                            // code to see if it's possible for any other errors
+                            // to be returned.
+                            // If we received an error the switch is not
+                            // in the correct role, so we need to disconnect it. 
+                            // We could also resend the request but then we need to
+                            // check if there are other pending request in which
+                            // case we shouldn't resend. If we do resend we need
+                            // to make sure that the switch eventually accepts one
+                            // of our requests or disconnect the switch. This feels
+                            // cumbersome. 
+                            sw.getChannel().close();
+                        }
+                    }
+                    // Once we support OF 1.2, we'd add code to handle it here.
+                    //if (error.getXid() == state.ofRoleRequestXid) {
+                    //}
+                    if (shouldLogError)
+                        logError(sw, error);
+                    break;
+                case STATS_REPLY:
+                    if (state.hsState.ordinal() < 
+                        HandshakeState.FEATURES_REPLY.ordinal()) {
+                        String em = "Unexpected STATS_REPLY from " + sw;
+                        throw new SwitchStateException(em);
+                    }
+                    sw.deliverStatisticsReply(m);
+                    if (sw.hasAttribute(IOFSwitch.SWITCH_DESCRIPTION_FUTURE)) {
+                        processSwitchDescReply();
+                    }
+                    break;
+                case PORT_STATUS:
+                    // We want to update our port state info even if we're in 
+                    // the slave role, but we only want to update storage if 
+                    // we're the master (or equal).
+                    boolean updateStorage = state.hsState.
+                                                equals(HandshakeState.READY) &&
+                                                (sw.getRole() != Role.SLAVE);
+                    handlePortStatusMessage(sw, (OFPortStatus)m, updateStorage);
+                    shouldHandleMessage = true;
+                    break;
+
+                default:
+                    shouldHandleMessage = true;
+                    break;
+            }
+            
+            if (shouldHandleMessage) {
+                sw.getListenerReadLock().lock();
+                try {
+                    if (sw.isConnected()) {
+                        if (!state.hsState.equals(HandshakeState.READY)) {
+                            log.debug("Ignoring message type {} received " + 
+                                      "from switch {} before switch is " + 
+                                      "fully configured.", m.getType(), sw);
+                        }
+                        // Check if the controller is in the slave role for the 
+                        // switch. If it is, then don't dispatch the message to 
+                        // the listeners.
+                        // TODO: Should we dispatch messages that we expect to 
+                        // receive when we're in the slave role, e.g. port 
+                        // status messages? Since we're "hiding" switches from 
+                        // the listeners when we're in the slave role, then it 
+                        // seems a little weird to dispatch port status messages
+                        // to them. On the other hand there might be special 
+                        // modules that care about all of the connected switches
+                        // and would like to receive port status notifications.
+                        else if (sw.getRole() == Role.SLAVE) {
+                            // Don't log message if it's a port status message 
+                            // since we expect to receive those from the switch 
+                            // and don't want to emit spurious messages.
+                            if (m.getType() != OFType.PORT_STATUS) {
+                                log.debug("Ignoring message type {} received " +
+                                        "from switch {} while in the slave role.",
+                                        m.getType(), sw);
+                            }
+                        } else {
+                            handleMessage(sw, m, null);
+                        }
+                    }
+                }
+                finally {
+                    sw.getListenerReadLock().unlock();
+                }
+            }
+        }
+    }
+
+    // ****************
+    // Message handlers
+    // ****************
+    
+    protected void handlePortStatusMessage(IOFSwitch sw,
+                                           OFPortStatus m,
+                                           boolean updateStorage) {
+        short portNumber = m.getDesc().getPortNumber();
+        OFPhysicalPort port = m.getDesc();
+        if (m.getReason() == (byte)OFPortReason.OFPPR_MODIFY.ordinal()) {
+            sw.setPort(port);
+            if (updateStorage)
+                updatePortInfo(sw, port);
+            log.debug("Port #{} modified for {}", portNumber, sw);
+        } else if (m.getReason() == (byte)OFPortReason.OFPPR_ADD.ordinal()) {
+            sw.setPort(port);
+            if (updateStorage)
+                updatePortInfo(sw, port);
+            log.debug("Port #{} added for {}", portNumber, sw);
+        } else if (m.getReason() == 
+                   (byte)OFPortReason.OFPPR_DELETE.ordinal()) {
+            sw.deletePort(portNumber);
+            if (updateStorage)
+                removePortInfo(sw, portNumber);
+            log.debug("Port #{} deleted for {}", portNumber, sw);
+        }
+        SwitchUpdate update = new SwitchUpdate(sw, SwitchUpdateType.PORTCHANGED);
+        try {
+            this.updates.put(update);
+        } catch (InterruptedException e) {
+            log.error("Failure adding update to queue", e);
+        }
+    }
+    
+    /**
+     * flcontext_cache - Keep a thread local stack of contexts
+     */
+    protected static final ThreadLocal<Stack<FloodlightContext>> flcontext_cache =
+        new ThreadLocal <Stack<FloodlightContext>> () {
+            @Override
+            protected Stack<FloodlightContext> initialValue() {
+                return new Stack<FloodlightContext>();
+            }
+        };
+
+    /**
+     * flcontext_alloc - pop a context off the stack, if required create a new one
+     * @return FloodlightContext
+     */
+    protected static FloodlightContext flcontext_alloc() {
+        FloodlightContext flcontext = null;
+
+        if (flcontext_cache.get().empty()) {
+            flcontext = new FloodlightContext();
+        }
+        else {
+            flcontext = flcontext_cache.get().pop();
+        }
+
+        return flcontext;
+    }
+
+    /**
+     * flcontext_free - Free the context to the current thread
+     * @param flcontext
+     */
+    protected void flcontext_free(FloodlightContext flcontext) {
+        flcontext.getStorage().clear();
+        flcontext_cache.get().push(flcontext);
+    }
+
+    /**
+     * Handle replies to certain OFMessages, and pass others off to listeners
+     * @param sw The switch for the message
+     * @param m The message
+     * @param bContext The floodlight context. If null then floodlight context would
+     * be allocated in this function
+     * @throws IOException
+     */
+    @LogMessageDocs({
+        @LogMessageDoc(level="ERROR",
+                message="Ignoring PacketIn (Xid = {xid}) because the data" +
+                        " field is empty.",
+                explanation="The switch sent an improperly-formatted PacketIn" +
+                        " message",
+                recommendation=LogMessageDoc.CHECK_SWITCH),
+        @LogMessageDoc(level="WARN",
+                message="Unhandled OF Message: {} from {}",
+                explanation="The switch sent a message not handled by " +
+                        "the controller")
+    })
+    protected void handleMessage(IOFSwitch sw, OFMessage m,
+                                 FloodlightContext bContext)
+            throws IOException {
+        Ethernet eth = null;
+
+        switch (m.getType()) {
+            case PACKET_IN:
+                OFPacketIn pi = (OFPacketIn)m;
+                
+                if (pi.getPacketData().length <= 0) {
+                    log.error("Ignoring PacketIn (Xid = " + pi.getXid() + 
+                              ") because the data field is empty.");
+                    return;
+                }
+                
+                if (Controller.ALWAYS_DECODE_ETH) {
+                    eth = new Ethernet();
+                    eth.deserialize(pi.getPacketData(), 0,
+                            pi.getPacketData().length);
+                    counterStore.updatePacketInCounters(sw, m, eth);
+                }
+                // fall through to default case...
+
+            default:
+                
+                List<IOFMessageListener> listeners = null;
+                if (messageListeners.containsKey(m.getType())) {
+                    listeners = messageListeners.get(m.getType()).
+                            getOrderedListeners();
+                }
+                        
+                FloodlightContext bc = null;
+                if (listeners != null) {
+                    // Check if floodlight context is passed from the calling 
+                    // function, if so use that floodlight context, otherwise 
+                    // allocate one
+                    if (bContext == null) {
+                        bc = flcontext_alloc();
+                    } else {
+                        bc = bContext;
+                    }
+                    if (eth != null) {
+                        IFloodlightProviderService.bcStore.put(bc, 
+                                IFloodlightProviderService.CONTEXT_PI_PAYLOAD, 
+                                eth);
+                    }
+                    
+                    // Get the starting time (overall and per-component) of 
+                    // the processing chain for this packet if performance
+                    // monitoring is turned on
+                    pktinProcTime.bootstrap(listeners);
+                    pktinProcTime.recordStartTimePktIn();                     
+                    Command cmd;
+                    for (IOFMessageListener listener : listeners) {
+                        if (listener instanceof IOFSwitchFilter) {
+                            if (!((IOFSwitchFilter)listener).isInterested(sw)) {
+                                continue;
+                            }
+                        }
+
+                        pktinProcTime.recordStartTimeComp(listener);
+                        cmd = listener.receive(sw, m, bc);
+                        pktinProcTime.recordEndTimeComp(listener);
+                        
+                        if (Command.STOP.equals(cmd)) {
+                            break;
+                        }
+                    }
+                    pktinProcTime.recordEndTimePktIn(sw, m, bc);
+                } else {
+                    log.warn("Unhandled OF Message: {} from {}", m, sw);
+                }
+                
+                if ((bContext == null) && (bc != null)) flcontext_free(bc);
+        }
+    }
+    
+    /**
+     * Log an OpenFlow error message from a switch
+     * @param sw The switch that sent the error
+     * @param error The error message
+     */
+    @LogMessageDoc(level="ERROR",
+            message="Error {error type} {error code} from {switch}",
+            explanation="The switch responded with an unexpected error" +
+                    "to an OpenFlow message from the controller",
+            recommendation="This could indicate improper network operation. " +
+                    "If the problem persists restarting the switch and " +
+                    "controller may help."
+            )
+    protected void logError(IOFSwitch sw, OFError error) {
+        int etint = 0xffff & error.getErrorType();
+        if (etint < 0 || etint >= OFErrorType.values().length) {
+            log.error("Unknown error code {} from sw {}", etint, sw);
+        }
+        OFErrorType et = OFErrorType.values()[etint];
+        switch (et) {
+            case OFPET_HELLO_FAILED:
+                OFHelloFailedCode hfc = 
+                    OFHelloFailedCode.values()[0xffff & error.getErrorCode()];
+                log.error("Error {} {} from {}", new Object[] {et, hfc, sw});
+                break;
+            case OFPET_BAD_REQUEST:
+                OFBadRequestCode brc = 
+                    OFBadRequestCode.values()[0xffff & error.getErrorCode()];
+                log.error("Error {} {} from {}", new Object[] {et, brc, sw});
+                break;
+            case OFPET_BAD_ACTION:
+                OFBadActionCode bac =
+                    OFBadActionCode.values()[0xffff & error.getErrorCode()];
+                log.error("Error {} {} from {}", new Object[] {et, bac, sw});
+                break;
+            case OFPET_FLOW_MOD_FAILED:
+                OFFlowModFailedCode fmfc =
+                    OFFlowModFailedCode.values()[0xffff & error.getErrorCode()];
+                log.error("Error {} {} from {}", new Object[] {et, fmfc, sw});
+                break;
+            case OFPET_PORT_MOD_FAILED:
+                OFPortModFailedCode pmfc =
+                    OFPortModFailedCode.values()[0xffff & error.getErrorCode()];
+                log.error("Error {} {} from {}", new Object[] {et, pmfc, sw});
+                break;
+            case OFPET_QUEUE_OP_FAILED:
+                OFQueueOpFailedCode qofc =
+                    OFQueueOpFailedCode.values()[0xffff & error.getErrorCode()];
+                log.error("Error {} {} from {}", new Object[] {et, qofc, sw});
+                break;
+            default:
+                break;
+        }
+    }
+    
+    /**
+     * Add a switch to the active switch list and call the switch listeners.
+     * This happens either when a switch first connects (and the controller is
+     * not in the slave role) or when the role of the controller changes from
+     * slave to master.
+     * @param sw the switch that has been added
+     */
+    // TODO: need to rethink locking and the synchronous switch update.
+    //       We can / should also handle duplicate DPIDs in connectedSwitches
+    @LogMessageDoc(level="ERROR",
+            message="New switch added {switch} for already-added switch {switch}",
+            explanation="A switch with the same DPID as another switch " +
+                    "connected to the controller.  This can be caused by " +
+                    "multiple switches configured with the same DPID, or " +
+                    "by a switch reconnected very quickly after " +
+                    "disconnecting.",
+            recommendation="If this happens repeatedly, it is likely there " +
+                    "are switches with duplicate DPIDs on the network.  " +
+                    "Reconfigure the appropriate switches.  If it happens " +
+                    "very rarely, then it is likely this is a transient " +
+                    "network problem that can be ignored."
+            )
+    protected void addSwitch(IOFSwitch sw) {
+        // TODO: is it safe to modify the HashMap without holding 
+        // the old switch's lock?
+        OFSwitchImpl oldSw = (OFSwitchImpl) this.activeSwitches.put(sw.getId(), sw);
+        if (sw == oldSw) {
+            // Note == for object equality, not .equals for value
+            log.info("New add switch for pre-existing switch {}", sw);
+            return;
+        }
+        
+        if (oldSw != null) {
+            oldSw.getListenerWriteLock().lock();
+            try {
+                log.error("New switch added {} for already-added switch {}",
+                          sw, oldSw);
+                // Set the connected flag to false to suppress calling
+                // the listeners for this switch in processOFMessage
+                oldSw.setConnected(false);
+                
+                oldSw.cancelAllStatisticsReplies();
+                
+                updateInactiveSwitchInfo(oldSw);
+    
+                // we need to clean out old switch state definitively 
+                // before adding the new switch
+                // FIXME: It seems not completely kosher to call the
+                // switch listeners here. I thought one of the points of
+                // having the asynchronous switch update mechanism was so
+                // the addedSwitch and removedSwitch were always called
+                // from a single thread to simplify concurrency issues
+                // for the listener.
+                if (switchListeners != null) {
+                    for (IOFSwitchListener listener : switchListeners) {
+                        listener.removedSwitch(oldSw);
+                    }
+                }
+                // will eventually trigger a removeSwitch(), which will cause
+                // a "Not removing Switch ... already removed debug message.
+                // TODO: Figure out a way to handle this that avoids the
+                // spurious debug message.
+                oldSw.getChannel().close();
+            }
+            finally {
+                oldSw.getListenerWriteLock().unlock();
+            }
+        }
+        
+        updateActiveSwitchInfo(sw);
+        SwitchUpdate update = new SwitchUpdate(sw, SwitchUpdateType.ADDED);
+        try {
+            this.updates.put(update);
+        } catch (InterruptedException e) {
+            log.error("Failure adding update to queue", e);
+        }
+    }
+
+    /**
+     * Remove a switch from the active switch list and call the switch listeners.
+     * This happens either when the switch is disconnected or when the
+     * controller's role for the switch changes from master to slave.
+     * @param sw the switch that has been removed
+     */
+    protected void removeSwitch(IOFSwitch sw) {
+        // No need to acquire the listener lock, since
+        // this method is only called after netty has processed all
+        // pending messages
+        log.debug("removeSwitch: {}", sw);
+        if (!this.activeSwitches.remove(sw.getId(), sw) || !sw.isConnected()) {
+            log.debug("Not removing switch {}; already removed", sw);
+            return;
+        }
+        // We cancel all outstanding statistics replies if the switch transition
+        // from active. In the future we might allow statistics requests 
+        // from slave controllers. Then we need to move this cancelation
+        // to switch disconnect
+        sw.cancelAllStatisticsReplies();
+            
+        // FIXME: I think there's a race condition if we call updateInactiveSwitchInfo
+        // here if role support is enabled. In that case if the switch is being
+        // removed because we've been switched to being in the slave role, then I think
+        // it's possible that the new master may have already been promoted to master
+        // and written out the active switch state to storage. If we now execute
+        // updateInactiveSwitchInfo we may wipe out all of the state that was
+        // written out by the new master. Maybe need to revisit how we handle all
+        // of the switch state that's written to storage.
+        
+        updateInactiveSwitchInfo(sw);
+        SwitchUpdate update = new SwitchUpdate(sw, SwitchUpdateType.REMOVED);
+        try {
+            this.updates.put(update);
+        } catch (InterruptedException e) {
+            log.error("Failure adding update to queue", e);
+        }
+    }
+    
+    // ***************
+    // IFloodlightProvider
+    // ***************
+    
+    @Override
+    public synchronized void addOFMessageListener(OFType type, 
+                                                  IOFMessageListener listener) {
+        ListenerDispatcher<OFType, IOFMessageListener> ldd = 
+            messageListeners.get(type);
+        if (ldd == null) {
+            ldd = new ListenerDispatcher<OFType, IOFMessageListener>();
+            messageListeners.put(type, ldd);
+        }
+        ldd.addListener(type, listener);
+    }
+
+    @Override
+    public synchronized void removeOFMessageListener(OFType type,
+                                                     IOFMessageListener listener) {
+        ListenerDispatcher<OFType, IOFMessageListener> ldd = 
+            messageListeners.get(type);
+        if (ldd != null) {
+            ldd.removeListener(listener);
+        }
+    }
+    
+    private void logListeners() {
+        for (Map.Entry<OFType,
+                       ListenerDispatcher<OFType, 
+                                          IOFMessageListener>> entry
+             : messageListeners.entrySet()) {
+            
+            OFType type = entry.getKey();
+            ListenerDispatcher<OFType, IOFMessageListener> ldd = 
+                    entry.getValue();
+            
+            StringBuffer sb = new StringBuffer();
+            sb.append("OFListeners for ");
+            sb.append(type);
+            sb.append(": ");
+            for (IOFMessageListener l : ldd.getOrderedListeners()) {
+                sb.append(l.getName());
+                sb.append(",");
+            }
+            log.debug(sb.toString());            
+        }
+    }
+    
+    public void removeOFMessageListeners(OFType type) {
+        messageListeners.remove(type);
+    }
+
+    @Override
+    public Map<Long, IOFSwitch> getSwitches() {
+        return Collections.unmodifiableMap(this.activeSwitches);
+    }
+
+    @Override
+    public void addOFSwitchListener(IOFSwitchListener listener) {
+        this.switchListeners.add(listener);
+    }
+
+    @Override
+    public void removeOFSwitchListener(IOFSwitchListener listener) {
+        this.switchListeners.remove(listener);
+    }
+
+    @Override
+    public Map<OFType, List<IOFMessageListener>> getListeners() {
+        Map<OFType, List<IOFMessageListener>> lers = 
+            new HashMap<OFType, List<IOFMessageListener>>();
+        for(Entry<OFType, ListenerDispatcher<OFType, IOFMessageListener>> e : 
+            messageListeners.entrySet()) {
+            lers.put(e.getKey(), e.getValue().getOrderedListeners());
+        }
+        return Collections.unmodifiableMap(lers);
+    }
+    
+    @Override
+    @LogMessageDocs({
+        @LogMessageDoc(message="Failed to inject OFMessage {message} onto " +
+                "a null switch",
+                explanation="Failed to process a message because the switch " +
+                " is no longer connected."),
+        @LogMessageDoc(level="ERROR",
+                message="Error reinjecting OFMessage on switch {switch}",
+                explanation="An I/O error occured while attempting to " +
+                        "process an OpenFlow message",
+                recommendation=LogMessageDoc.CHECK_SWITCH)
+    })
+    public boolean injectOfMessage(IOFSwitch sw, OFMessage msg,
+                                   FloodlightContext bc) {
+        if (sw == null) {
+            log.info("Failed to inject OFMessage {} onto a null switch", msg);
+            return false;
+        }
+        
+        // FIXME: Do we need to be able to inject messages to switches
+        // where we're the slave controller (i.e. they're connected but
+        // not active)?
+        // FIXME: Don't we need synchronization logic here so we're holding
+        // the listener read lock when we call handleMessage? After some
+        // discussions it sounds like the right thing to do here would be to
+        // inject the message as a netty upstream channel event so it goes
+        // through the normal netty event processing, including being
+        // handled 
+        if (!activeSwitches.containsKey(sw.getId())) return false;
+        
+        try {
+            // Pass Floodlight context to the handleMessages()
+            handleMessage(sw, msg, bc);
+        } catch (IOException e) {
+            log.error("Error reinjecting OFMessage on switch {}", 
+                      HexString.toHexString(sw.getId()));
+            return false;
+        }
+        return true;
+    }
+
+    @Override
+    @LogMessageDoc(message="Calling System.exit",
+                   explanation="The controller is terminating")
+    public synchronized void terminate() {
+        log.info("Calling System.exit");
+        System.exit(1);
+    }
+    
+    @Override
+    public boolean injectOfMessage(IOFSwitch sw, OFMessage msg) {
+        // call the overloaded version with floodlight context set to null    
+        return injectOfMessage(sw, msg, null);
+    }
+    
+    @Override
+    public void handleOutgoingMessage(IOFSwitch sw, OFMessage m,
+                                      FloodlightContext bc) {
+        if (log.isTraceEnabled()) {
+            String str = OFMessage.getDataAsString(sw, m, bc);
+            log.trace("{}", str);
+        }
+
+        List<IOFMessageListener> listeners = null;
+        if (messageListeners.containsKey(m.getType())) {
+            listeners = 
+                    messageListeners.get(m.getType()).getOrderedListeners();
+        }
+            
+        if (listeners != null) {                
+            for (IOFMessageListener listener : listeners) {
+                if (listener instanceof IOFSwitchFilter) {
+                    if (!((IOFSwitchFilter)listener).isInterested(sw)) {
+                        continue;
+                    }
+                }
+                if (Command.STOP.equals(listener.receive(sw, m, bc))) {
+                    break;
+                }
+            }
+        }
+    }
+
+    @Override
+    public BasicFactory getOFMessageFactory() {
+        return factory;
+    }
+    
+    @Override
+    public String getControllerId() {
+        return controllerId;
+    }
+    
+    // **************
+    // Initialization
+    // **************
+
+    protected void updateAllInactiveSwitchInfo() {
+        if (role == Role.SLAVE) {
+            return;
+        }
+        String controllerId = getControllerId();
+        String[] switchColumns = { SWITCH_DATAPATH_ID,
+                                   SWITCH_CONTROLLER_ID,
+                                   SWITCH_ACTIVE };
+        String[] portColumns = { PORT_ID, PORT_SWITCH };
+        IResultSet switchResultSet = null;
+        try {
+            OperatorPredicate op = 
+                    new OperatorPredicate(SWITCH_CONTROLLER_ID,
+                                          OperatorPredicate.Operator.EQ,
+                                          controllerId);
+            switchResultSet = 
+                    storageSource.executeQuery(SWITCH_TABLE_NAME,
+                                               switchColumns,
+                                               op, null);
+            while (switchResultSet.next()) {
+                IResultSet portResultSet = null;
+                try {
+                    String datapathId =
+                            switchResultSet.getString(SWITCH_DATAPATH_ID);
+                    switchResultSet.setBoolean(SWITCH_ACTIVE, Boolean.FALSE);
+                    op = new OperatorPredicate(PORT_SWITCH, 
+                                               OperatorPredicate.Operator.EQ,
+                                               datapathId);
+                    portResultSet = 
+                            storageSource.executeQuery(PORT_TABLE_NAME,
+                                                       portColumns,
+                                                       op, null);
+                    while (portResultSet.next()) {
+                        portResultSet.deleteRow();
+                    }
+                    portResultSet.save();
+                }
+                finally {
+                    if (portResultSet != null)
+                        portResultSet.close();
+                }
+            }
+            switchResultSet.save();
+        }
+        finally {
+            if (switchResultSet != null)
+                switchResultSet.close();
+        }
+    }
+    
+    protected void updateControllerInfo() {
+        updateAllInactiveSwitchInfo();
+        
+        // Write out the controller info to the storage source
+        Map<String, Object> controllerInfo = new HashMap<String, Object>();
+        String id = getControllerId();
+        controllerInfo.put(CONTROLLER_ID, id);
+        storageSource.updateRow(CONTROLLER_TABLE_NAME, controllerInfo);
+    }
+    
+    protected void updateActiveSwitchInfo(IOFSwitch sw) {
+        if (role == Role.SLAVE) {
+            return;
+        }
+        // Obtain the row info for the switch
+        Map<String, Object> switchInfo = new HashMap<String, Object>();
+        String datapathIdString = sw.getStringId();
+        switchInfo.put(SWITCH_DATAPATH_ID, datapathIdString);
+        String controllerId = getControllerId();
+        switchInfo.put(SWITCH_CONTROLLER_ID, controllerId);
+        Date connectedSince = sw.getConnectedSince();
+        switchInfo.put(SWITCH_CONNECTED_SINCE, connectedSince);
+        Channel channel = sw.getChannel();
+        SocketAddress socketAddress = channel.getRemoteAddress();
+        if (socketAddress != null) {
+            String socketAddressString = socketAddress.toString();
+            switchInfo.put(SWITCH_SOCKET_ADDRESS, socketAddressString);
+            if (socketAddress instanceof InetSocketAddress) {
+                InetSocketAddress inetSocketAddress =
+                        (InetSocketAddress)socketAddress;
+                InetAddress inetAddress = inetSocketAddress.getAddress();
+                String ip = inetAddress.getHostAddress();
+                switchInfo.put(SWITCH_IP, ip);
+            }
+        }
+        
+        // Write out the switch features info
+        long capabilities = U32.f(sw.getCapabilities());
+        switchInfo.put(SWITCH_CAPABILITIES, capabilities);
+        long buffers = U32.f(sw.getBuffers());
+        switchInfo.put(SWITCH_BUFFERS, buffers);
+        long tables = U32.f(sw.getTables());
+        switchInfo.put(SWITCH_TABLES, tables);
+        long actions = U32.f(sw.getActions());
+        switchInfo.put(SWITCH_ACTIONS, actions);
+        switchInfo.put(SWITCH_ACTIVE, Boolean.TRUE);
+        
+        // Update the switch
+        storageSource.updateRowAsync(SWITCH_TABLE_NAME, switchInfo);
+        
+        // Update the ports
+        for (OFPhysicalPort port: sw.getPorts()) {
+            updatePortInfo(sw, port);
+        }
+    }
+    
+    protected void updateInactiveSwitchInfo(IOFSwitch sw) {
+        if (role == Role.SLAVE) {
+            return;
+        }
+        log.debug("Update DB with inactiveSW {}", sw);
+        // Update the controller info in the storage source to be inactive
+        Map<String, Object> switchInfo = new HashMap<String, Object>();
+        String datapathIdString = sw.getStringId();
+        switchInfo.put(SWITCH_DATAPATH_ID, datapathIdString);
+        //switchInfo.put(SWITCH_CONNECTED_SINCE, null);
+        switchInfo.put(SWITCH_ACTIVE, Boolean.FALSE);
+        storageSource.updateRowAsync(SWITCH_TABLE_NAME, switchInfo);
+    }
+
+    protected void updatePortInfo(IOFSwitch sw, OFPhysicalPort port) {
+        if (role == Role.SLAVE) {
+            return;
+        }
+        String datapathIdString = sw.getStringId();
+        Map<String, Object> portInfo = new HashMap<String, Object>();
+        int portNumber = U16.f(port.getPortNumber());
+        String id = datapathIdString + "|" + portNumber;
+        portInfo.put(PORT_ID, id);
+        portInfo.put(PORT_SWITCH, datapathIdString);
+        portInfo.put(PORT_NUMBER, portNumber);
+        byte[] hardwareAddress = port.getHardwareAddress();
+        String hardwareAddressString = HexString.toHexString(hardwareAddress);
+        portInfo.put(PORT_HARDWARE_ADDRESS, hardwareAddressString);
+        String name = port.getName();
+        portInfo.put(PORT_NAME, name);
+        long config = U32.f(port.getConfig());
+        portInfo.put(PORT_CONFIG, config);
+        long state = U32.f(port.getState());
+        portInfo.put(PORT_STATE, state);
+        long currentFeatures = U32.f(port.getCurrentFeatures());
+        portInfo.put(PORT_CURRENT_FEATURES, currentFeatures);
+        long advertisedFeatures = U32.f(port.getAdvertisedFeatures());
+        portInfo.put(PORT_ADVERTISED_FEATURES, advertisedFeatures);
+        long supportedFeatures = U32.f(port.getSupportedFeatures());
+        portInfo.put(PORT_SUPPORTED_FEATURES, supportedFeatures);
+        long peerFeatures = U32.f(port.getPeerFeatures());
+        portInfo.put(PORT_PEER_FEATURES, peerFeatures);
+        storageSource.updateRowAsync(PORT_TABLE_NAME, portInfo);
+    }
+    
+    /**
+     * Read switch port data from storage and write it into a switch object
+     * @param sw the switch to update
+     */
+    protected void readSwitchPortStateFromStorage(OFSwitchImpl sw) {
+        OperatorPredicate op = 
+                new OperatorPredicate(PORT_SWITCH, 
+                                      OperatorPredicate.Operator.EQ,
+                                      sw.getStringId());
+        IResultSet portResultSet = 
+                storageSource.executeQuery(PORT_TABLE_NAME,
+                                           null, op, null);
+        //Map<Short, OFPhysicalPort> oldports = 
+        //        new HashMap<Short, OFPhysicalPort>();
+        //oldports.putAll(sw.getPorts());
+
+        while (portResultSet.next()) {
+            try {
+                OFPhysicalPort p = new OFPhysicalPort();
+                p.setPortNumber((short)portResultSet.getInt(PORT_NUMBER));
+                p.setName(portResultSet.getString(PORT_NAME));
+                p.setConfig((int)portResultSet.getLong(PORT_CONFIG));
+                p.setState((int)portResultSet.getLong(PORT_STATE));
+                String portMac = portResultSet.getString(PORT_HARDWARE_ADDRESS);
+                p.setHardwareAddress(HexString.fromHexString(portMac));
+                p.setCurrentFeatures((int)portResultSet.
+                                     getLong(PORT_CURRENT_FEATURES));
+                p.setAdvertisedFeatures((int)portResultSet.
+                                        getLong(PORT_ADVERTISED_FEATURES));
+                p.setSupportedFeatures((int)portResultSet.
+                                       getLong(PORT_SUPPORTED_FEATURES));
+                p.setPeerFeatures((int)portResultSet.
+                                  getLong(PORT_PEER_FEATURES));
+                //oldports.remove(Short.valueOf(p.getPortNumber()));
+                sw.setPort(p);
+            } catch (NullPointerException e) {
+                // ignore
+            }
+        }
+        SwitchUpdate update = new SwitchUpdate(sw, SwitchUpdateType.PORTCHANGED);
+        try {
+            this.updates.put(update);
+        } catch (InterruptedException e) {
+            log.error("Failure adding update to queue", e);
+        }
+    }
+    
+    protected void removePortInfo(IOFSwitch sw, short portNumber) {
+        if (role == Role.SLAVE) {
+            return;
+        }
+        String datapathIdString = sw.getStringId();
+        String id = datapathIdString + "|" + portNumber;
+        storageSource.deleteRowAsync(PORT_TABLE_NAME, id);
+    }
+
+    /**
+     * Sets the initial role based on properties in the config params.
+     * It looks for two different properties.
+     * If the "role" property is specified then the value should be
+     * either "EQUAL", "MASTER", or "SLAVE" and the role of the
+     * controller is set to the specified value. If the "role" property
+     * is not specified then it looks next for the "role.path" property.
+     * In this case the value should be the path to a property file in
+     * the file system that contains a property called "floodlight.role"
+     * which can be one of the values listed above for the "role" property.
+     * The idea behind the "role.path" mechanism is that you have some
+     * separate heartbeat and master controller election algorithm that
+     * determines the role of the controller. When a role transition happens,
+     * it updates the current role in the file specified by the "role.path"
+     * file. Then if floodlight restarts for some reason it can get the
+     * correct current role of the controller from the file.
+     * @param configParams The config params for the FloodlightProvider service
+     * @return A valid role if role information is specified in the
+     *         config params, otherwise null
+     */
+    @LogMessageDocs({
+        @LogMessageDoc(message="Controller role set to {role}",
+                explanation="Setting the initial HA role to "),
+        @LogMessageDoc(level="ERROR",
+                message="Invalid current role value: {role}",
+                explanation="An invalid HA role value was read from the " + 
+                            "properties file",
+                recommendation=LogMessageDoc.CHECK_CONTROLLER)
+    })
+    protected Role getInitialRole(Map<String, String> configParams) {
+        Role role = null;
+        String roleString = configParams.get("role");
+        if (roleString == null) {
+            String rolePath = configParams.get("rolepath");
+            if (rolePath != null) {
+                Properties properties = new Properties();
+                try {
+                    properties.load(new FileInputStream(rolePath));
+                    roleString = properties.getProperty("floodlight.role");
+                }
+                catch (IOException exc) {
+                    // Don't treat it as an error if the file specified by the
+                    // rolepath property doesn't exist. This lets us enable the
+                    // HA mechanism by just creating/setting the floodlight.role
+                    // property in that file without having to modify the
+                    // floodlight properties.
+                }
+            }
+        }
+        
+        if (roleString != null) {
+            // Canonicalize the string to the form used for the enum constants
+            roleString = roleString.trim().toUpperCase();
+            try {
+                role = Role.valueOf(roleString);
+            }
+            catch (IllegalArgumentException exc) {
+                log.error("Invalid current role value: {}", roleString);
+            }
+        }
+        
+        log.info("Controller role set to {}", role);
+        
+        return role;
+    }
+    
+    /**
+     * Tell controller that we're ready to accept switches loop
+     * @throws IOException 
+     */
+    @LogMessageDocs({
+        @LogMessageDoc(message="Listening for switch connections on {address}",
+                explanation="The controller is ready and listening for new" +
+                        " switch connections"),
+        @LogMessageDoc(message="Storage exception in controller " + 
+                        "updates loop; terminating process",
+                explanation=ERROR_DATABASE,
+                recommendation=LogMessageDoc.CHECK_CONTROLLER),
+        @LogMessageDoc(level="ERROR",
+                message="Exception in controller updates loop",
+                explanation="Failed to dispatch controller event",
+                recommendation=LogMessageDoc.GENERIC_ACTION)
+    })
+    public void run() {
+        if (log.isDebugEnabled()) {
+            logListeners();
+        }
+        
+        try {            
+           final ServerBootstrap bootstrap = createServerBootStrap();
+
+            bootstrap.setOption("reuseAddr", true);
+            bootstrap.setOption("child.keepAlive", true);
+            bootstrap.setOption("child.tcpNoDelay", true);
+            bootstrap.setOption("child.sendBufferSize", Controller.SEND_BUFFER_SIZE);
+
+            ChannelPipelineFactory pfact = 
+                    new OpenflowPipelineFactory(this, null);
+            bootstrap.setPipelineFactory(pfact);
+            InetSocketAddress sa = new InetSocketAddress(openFlowPort);
+            final ChannelGroup cg = new DefaultChannelGroup();
+            cg.add(bootstrap.bind(sa));
+            
+            log.info("Listening for switch connections on {}", sa);
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+
+        // main loop
+        while (true) {
+            try {
+                IUpdate update = updates.take();
+                update.dispatch();
+            } catch (InterruptedException e) {
+                return;
+            } catch (StorageException e) {
+                log.error("Storage exception in controller " + 
+                          "updates loop; terminating process", e);
+                return;
+            } catch (Exception e) {
+                log.error("Exception in controller updates loop", e);
+            }
+        }
+    }
+
+    private ServerBootstrap createServerBootStrap() {
+        if (workerThreads == 0) {
+            return new ServerBootstrap(
+                    new NioServerSocketChannelFactory(
+                            Executors.newCachedThreadPool(),
+                            Executors.newCachedThreadPool()));
+        } else {
+            return new ServerBootstrap(
+                    new NioServerSocketChannelFactory(
+                            Executors.newCachedThreadPool(),
+                            Executors.newCachedThreadPool(), workerThreads));
+        }
+    }
+    
+    public void setConfigParams(Map<String, String> configParams) {
+        String ofPort = configParams.get("openflowport");
+        if (ofPort != null) {
+            this.openFlowPort = Integer.parseInt(ofPort);
+        }
+        log.debug("OpenFlow port set to {}", this.openFlowPort);
+        String threads = configParams.get("workerthreads");
+        if (threads != null) {
+            this.workerThreads = Integer.parseInt(threads);
+        }
+        log.debug("Number of worker threads set to {}", this.workerThreads);
+        String controllerId = configParams.get("controllerid");
+        if (controllerId != null) {
+            this.controllerId = controllerId;
+        }
+        log.debug("ControllerId set to {}", this.controllerId);
+    }
+
+    private void initVendorMessages() {
+        // Configure openflowj to be able to parse the role request/reply
+        // vendor messages.
+        OFBasicVendorId niciraVendorId = new OFBasicVendorId(
+                OFNiciraVendorData.NX_VENDOR_ID, 4);
+        OFVendorId.registerVendorId(niciraVendorId);
+        OFBasicVendorDataType roleRequestVendorData =
+                new OFBasicVendorDataType(
+                        OFRoleRequestVendorData.NXT_ROLE_REQUEST,
+                        OFRoleRequestVendorData.getInstantiable());
+        niciraVendorId.registerVendorDataType(roleRequestVendorData);
+        OFBasicVendorDataType roleReplyVendorData =
+                new OFBasicVendorDataType(
+                        OFRoleReplyVendorData.NXT_ROLE_REPLY,
+                        OFRoleReplyVendorData.getInstantiable());
+         niciraVendorId.registerVendorDataType(roleReplyVendorData);
+    }
+    
+    /**
+     * Initialize internal data structures
+     */
+    public void init(Map<String, String> configParams) {
+        // These data structures are initialized here because other
+        // module's startUp() might be called before ours
+        this.messageListeners =
+                new ConcurrentHashMap<OFType, 
+                                      ListenerDispatcher<OFType, 
+                                                         IOFMessageListener>>();
+        this.switchListeners = new CopyOnWriteArraySet<IOFSwitchListener>();
+        this.haListeners = new CopyOnWriteArraySet<IHAListener>();
+        this.activeSwitches = new ConcurrentHashMap<Long, IOFSwitch>();
+        this.connectedSwitches = new HashSet<OFSwitchImpl>();
+        this.controllerNodeIPsCache = new HashMap<String, String>();
+        this.updates = new LinkedBlockingQueue<IUpdate>();
+        this.factory = new BasicFactory();
+        this.providerMap = new HashMap<String, List<IInfoProvider>>();
+        setConfigParams(configParams);
+        this.role = getInitialRole(configParams);
+        this.roleChanger = new RoleChanger();
+        initVendorMessages();
+        this.systemStartTime = System.currentTimeMillis();
+    }
+    
+    /**
+     * Startup all of the controller's components
+     */
+    @LogMessageDoc(message="Waiting for storage source",
+                explanation="The system database is not yet ready",
+                recommendation="If this message persists, this indicates " +
+                        "that the system database has failed to start. " +
+                        LogMessageDoc.CHECK_CONTROLLER)
+    public void startupComponents() {
+        // Create the table names we use
+        storageSource.createTable(CONTROLLER_TABLE_NAME, null);
+        storageSource.createTable(SWITCH_TABLE_NAME, null);
+        storageSource.createTable(PORT_TABLE_NAME, null);
+        storageSource.createTable(CONTROLLER_INTERFACE_TABLE_NAME, null);
+        storageSource.createTable(SWITCH_CONFIG_TABLE_NAME, null);
+        storageSource.setTablePrimaryKeyName(CONTROLLER_TABLE_NAME,
+                                             CONTROLLER_ID);
+        storageSource.setTablePrimaryKeyName(SWITCH_TABLE_NAME,
+                                             SWITCH_DATAPATH_ID);
+        storageSource.setTablePrimaryKeyName(PORT_TABLE_NAME, PORT_ID);
+        storageSource.setTablePrimaryKeyName(CONTROLLER_INTERFACE_TABLE_NAME, 
+                                             CONTROLLER_INTERFACE_ID);
+        storageSource.addListener(CONTROLLER_INTERFACE_TABLE_NAME, this);
+        
+        while (true) {
+            try {
+                updateControllerInfo();
+                break;
+            }
+            catch (StorageException e) {
+                log.info("Waiting for storage source");
+                try {
+                    Thread.sleep(1000);
+                } catch (InterruptedException e1) {
+                }
+            }
+        }
+       
+        // Add our REST API
+        restApi.addRestletRoutable(new CoreWebRoutable());
+    }
+
+    @Override
+    public void addInfoProvider(String type, IInfoProvider provider) {
+        if (!providerMap.containsKey(type)) {
+            providerMap.put(type, new ArrayList<IInfoProvider>());
+        }
+        providerMap.get(type).add(provider);
+    }
+
+    @Override
+    public void removeInfoProvider(String type, IInfoProvider provider) {
+        if (!providerMap.containsKey(type)) {
+            log.debug("Provider type {} doesn't exist.", type);
+            return;
+        }
+        
+        providerMap.get(type).remove(provider);
+    }
+    
+    public Map<String, Object> getControllerInfo(String type) {
+        if (!providerMap.containsKey(type)) return null;
+        
+        Map<String, Object> result = new LinkedHashMap<String, Object>();
+        for (IInfoProvider provider : providerMap.get(type)) {
+            result.putAll(provider.getInfo(type));
+        }
+        
+        return result;
+    }
+
+    @Override
+    public void addHAListener(IHAListener listener) {
+        this.haListeners.add(listener);
+    }
+
+    @Override
+    public void removeHAListener(IHAListener listener) {
+        this.haListeners.remove(listener);
+    }
+    
+    
+    /**
+     * Handle changes to the controller nodes IPs and dispatch update. 
+     */
+    @SuppressWarnings("unchecked")
+    protected void handleControllerNodeIPChanges() {
+        HashMap<String,String> curControllerNodeIPs = new HashMap<String,String>();
+        HashMap<String,String> addedControllerNodeIPs = new HashMap<String,String>();
+        HashMap<String,String> removedControllerNodeIPs =new HashMap<String,String>();
+        String[] colNames = { CONTROLLER_INTERFACE_CONTROLLER_ID, 
+                           CONTROLLER_INTERFACE_TYPE, 
+                           CONTROLLER_INTERFACE_NUMBER, 
+                           CONTROLLER_INTERFACE_DISCOVERED_IP };
+        synchronized(controllerNodeIPsCache) {
+            // We currently assume that interface Ethernet0 is the relevant
+            // controller interface. Might change.
+            // We could (should?) implement this using 
+            // predicates, but creating the individual and compound predicate
+            // seems more overhead then just checking every row. Particularly, 
+            // since the number of rows is small and changes infrequent
+            IResultSet res = storageSource.executeQuery(CONTROLLER_INTERFACE_TABLE_NAME,
+                    colNames,null, null);
+            while (res.next()) {
+                if (res.getString(CONTROLLER_INTERFACE_TYPE).equals("Ethernet") &&
+                        res.getInt(CONTROLLER_INTERFACE_NUMBER) == 0) {
+                    String controllerID = res.getString(CONTROLLER_INTERFACE_CONTROLLER_ID);
+                    String discoveredIP = res.getString(CONTROLLER_INTERFACE_DISCOVERED_IP);
+                    String curIP = controllerNodeIPsCache.get(controllerID);
+                    
+                    curControllerNodeIPs.put(controllerID, discoveredIP);
+                    if (curIP == null) {
+                        // new controller node IP
+                        addedControllerNodeIPs.put(controllerID, discoveredIP);
+                    } 
+                    else if (curIP != discoveredIP) {
+                        // IP changed                    
+                        removedControllerNodeIPs.put(controllerID, curIP);
+                        addedControllerNodeIPs.put(controllerID, discoveredIP);
+                    }
+                }
+            }
+            // Now figure out if rows have been deleted. We can't use the
+            // rowKeys from rowsDeleted directly, since the tables primary
+            // key is a compound that we can't disassemble
+            Set<String> curEntries = curControllerNodeIPs.keySet();
+            Set<String> removedEntries = controllerNodeIPsCache.keySet();
+            removedEntries.removeAll(curEntries);
+            for (String removedControllerID : removedEntries)
+                removedControllerNodeIPs.put(removedControllerID, controllerNodeIPsCache.get(removedControllerID));
+            controllerNodeIPsCache = (HashMap<String, String>) curControllerNodeIPs.clone();
+            HAControllerNodeIPUpdate update = new HAControllerNodeIPUpdate(
+                                curControllerNodeIPs, addedControllerNodeIPs,
+                                removedControllerNodeIPs);
+            if (!removedControllerNodeIPs.isEmpty() || !addedControllerNodeIPs.isEmpty()) {
+                try {
+                    this.updates.put(update);
+                } catch (InterruptedException e) {
+                    log.error("Failure adding update to queue", e);
+                }
+            }
+        }
+    }
+    
+    @Override
+    public Map<String, String> getControllerNodeIPs() {
+        // We return a copy of the mapping so we can guarantee that
+        // the mapping return is the same as one that will be (or was)
+        // dispatched to IHAListeners
+        HashMap<String,String> retval = new HashMap<String,String>();
+        synchronized(controllerNodeIPsCache) {
+            retval.putAll(controllerNodeIPsCache);
+        }
+        return retval;
+    }
+
+    @Override
+    public void rowsModified(String tableName, Set<Object> rowKeys) {
+        if (tableName.equals(CONTROLLER_INTERFACE_TABLE_NAME)) {
+            handleControllerNodeIPChanges();
+        }
+        
+    }
+
+    @Override
+    public void rowsDeleted(String tableName, Set<Object> rowKeys) {
+        if (tableName.equals(CONTROLLER_INTERFACE_TABLE_NAME)) {
+            handleControllerNodeIPChanges();
+        }
+    }
+
+    @Override
+    public long getSystemStartTime() {
+        return (this.systemStartTime);
+    }
+
+    @Override
+    public void setAlwaysClearFlowsOnSwAdd(boolean value) {
+        this.alwaysClearFlowsOnSwAdd = value;
+    }
+    
+    public boolean getAlwaysClearFlowsOnSwAdd() {
+        return this.alwaysClearFlowsOnSwAdd;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/HandshakeTimeoutException.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/HandshakeTimeoutException.java
new file mode 100644
index 0000000..421ec1a
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/HandshakeTimeoutException.java
@@ -0,0 +1,29 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.internal;
+
+/**
+ * Exception is thrown when the handshake fails to complete 
+ * before a specified time
+ * @author readams
+ */
+public class HandshakeTimeoutException extends Exception {
+
+    private static final long serialVersionUID = 6859880268940337312L;
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/HandshakeTimeoutHandler.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/HandshakeTimeoutHandler.java
new file mode 100644
index 0000000..6d3335f
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/HandshakeTimeoutHandler.java
@@ -0,0 +1,101 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.internal;
+
+import java.util.concurrent.TimeUnit;
+
+import net.floodlightcontroller.core.internal.OFChannelState.HandshakeState;
+
+import org.jboss.netty.channel.ChannelHandlerContext;
+import org.jboss.netty.channel.ChannelStateEvent;
+import org.jboss.netty.channel.Channels;
+import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
+import org.jboss.netty.util.ExternalResourceReleasable;
+import org.jboss.netty.util.Timeout;
+import org.jboss.netty.util.Timer;
+import org.jboss.netty.util.TimerTask;
+
+/**
+ * Trigger a timeout if a switch fails to complete handshake soon enough
+ */
+public class HandshakeTimeoutHandler 
+    extends SimpleChannelUpstreamHandler
+    implements ExternalResourceReleasable {
+    static final HandshakeTimeoutException EXCEPTION = 
+            new HandshakeTimeoutException();
+    
+    final OFChannelState state;
+    final Timer timer;
+    final long timeoutNanos;
+    volatile Timeout timeout;
+    
+    public HandshakeTimeoutHandler(OFChannelState state, Timer timer,
+                                   long timeoutSeconds) {
+        super();
+        this.state = state;
+        this.timer = timer;
+        this.timeoutNanos = TimeUnit.SECONDS.toNanos(timeoutSeconds);
+
+    }
+    
+    @Override
+    public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent e)
+            throws Exception {
+        if (timeoutNanos > 0) {
+            timeout = timer.newTimeout(new HandshakeTimeoutTask(ctx), 
+                                       timeoutNanos, TimeUnit.NANOSECONDS);
+        }
+        ctx.sendUpstream(e);
+    }
+    
+    @Override
+    public void channelClosed(ChannelHandlerContext ctx, ChannelStateEvent e)
+            throws Exception {
+        if (timeout != null) {
+            timeout.cancel();
+            timeout = null;
+        }
+    }
+
+    @Override
+    public void releaseExternalResources() {
+        timer.stop();
+    }
+    
+    private final class HandshakeTimeoutTask implements TimerTask {
+
+        private final ChannelHandlerContext ctx;
+
+        HandshakeTimeoutTask(ChannelHandlerContext ctx) {
+            this.ctx = ctx;
+        }
+
+        @Override
+        public void run(Timeout timeout) throws Exception {
+            if (timeout.isCancelled()) {
+                return;
+            }
+
+            if (!ctx.getChannel().isOpen()) {
+                return;
+            }
+            if (!state.hsState.equals(HandshakeState.READY))
+                Channels.fireExceptionCaught(ctx, EXCEPTION);
+        }
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/IOFSwitchFeatures.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/IOFSwitchFeatures.java
new file mode 100644
index 0000000..ddbb0ef
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/IOFSwitchFeatures.java
@@ -0,0 +1,9 @@
+package net.floodlightcontroller.core.internal;
+
+import org.openflow.protocol.statistics.OFDescriptionStatistics;
+
+import net.floodlightcontroller.core.IOFSwitch;
+
+public interface IOFSwitchFeatures {
+    public void setFromDescription(IOFSwitch sw, OFDescriptionStatistics description);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OFChannelState.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OFChannelState.java
new file mode 100644
index 0000000..ad5a377
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OFChannelState.java
@@ -0,0 +1,64 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.internal;
+
+/**
+ * Wrapper class to hold state for the OpenFlow switch connection
+ * @author readams
+ */
+class OFChannelState {
+
+    /**
+     * State for handling the switch handshake
+     */
+    protected enum HandshakeState {
+        /**
+         * Beginning state
+         */
+        START,
+
+        /**
+         * Received HELLO from switch
+         */
+        HELLO,
+
+        /**
+         * We've received the features reply
+         * Waiting for Config and Description reply
+         */
+        FEATURES_REPLY,
+
+        /**
+         * Switch is ready for processing messages
+         */
+        READY
+
+    }
+
+    protected volatile HandshakeState hsState = HandshakeState.START;
+    protected boolean hasGetConfigReply = false;
+    protected boolean hasDescription = false;
+    
+    // The firstRoleReplyRecevied flag indicates if we have received the
+    // first role reply message on this connection (in response to the 
+    // role request sent after the handshake). If role support is disabled
+    // on the controller we also set this flag to true. 
+    // The flag is used to decide if the flow table should be wiped
+    // @see Controller.handleRoleReplyMessage()
+    protected boolean firstRoleReplyReceived = false;
+}
\ No newline at end of file
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OFFeaturesReplyFuture.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OFFeaturesReplyFuture.java
new file mode 100644
index 0000000..eca67bd
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OFFeaturesReplyFuture.java
@@ -0,0 +1,72 @@
+/**
+ *    Copyright 2012, Big Switch Networks, Inc. 
+ * 
+ *    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *    not use this file except in compliance with the License. You may obtain
+ *    a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *    License for the specific language governing permissions and limitations
+ *    under the License.
+ **/
+
+package net.floodlightcontroller.core.internal;
+
+import java.util.concurrent.TimeUnit;
+
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.threadpool.IThreadPoolService;
+
+import org.openflow.protocol.OFFeaturesReply;
+import org.openflow.protocol.OFMessage;
+import org.openflow.protocol.OFType;
+
+/**
+ * A concrete implementation that handles asynchronously receiving
+ * OFFeaturesReply
+ * 
+ * @author Shudong Zhou
+ */
+public class OFFeaturesReplyFuture extends
+        OFMessageFuture<OFFeaturesReply> {
+
+    protected volatile boolean finished;
+
+    public OFFeaturesReplyFuture(IThreadPoolService tp,
+            IOFSwitch sw, int transactionId) {
+        super(tp, sw, OFType.FEATURES_REPLY, transactionId);
+        init();
+    }
+
+    public OFFeaturesReplyFuture(IThreadPoolService tp,
+            IOFSwitch sw, int transactionId, long timeout, TimeUnit unit) {
+        super(tp, sw, OFType.FEATURES_REPLY, transactionId, timeout, unit);
+        init();
+    }
+
+    private void init() {
+        this.finished = false;
+        this.result = null;
+    }
+
+    @Override
+    protected void handleReply(IOFSwitch sw, OFMessage msg) {
+        this.result = (OFFeaturesReply) msg;
+        this.finished = true;
+    }
+
+    @Override
+    protected boolean isFinished() {
+        return finished;
+    }
+
+    @Override
+    protected void unRegister() {
+        super.unRegister();
+        sw.cancelFeaturesReply(transactionId);
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OFMessageDecoder.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OFMessageDecoder.java
new file mode 100644
index 0000000..295e967
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OFMessageDecoder.java
@@ -0,0 +1,60 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.internal;
+
+import java.util.List;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelHandlerContext;
+import org.jboss.netty.handler.codec.frame.FrameDecoder;
+import org.openflow.protocol.OFMessage;
+import org.openflow.protocol.factory.BasicFactory;
+import org.openflow.protocol.factory.OFMessageFactory;
+
+/**
+ * Decode an openflow message from a Channel, for use in a netty
+ * pipeline
+ * @author readams
+ */
+public class OFMessageDecoder extends FrameDecoder {
+
+    OFMessageFactory factory = new BasicFactory();
+    
+    @Override
+    protected Object decode(ChannelHandlerContext ctx, Channel channel,
+                            ChannelBuffer buffer) throws Exception {
+        if (!channel.isConnected()) {
+            // In testing, I see decode being called AFTER decode last.
+            // This check avoids that from reading curroupted frames
+            return null;
+        }
+
+        List<OFMessage> message = factory.parseMessage(buffer);
+        return message;
+    }
+
+    @Override
+    protected Object decodeLast(ChannelHandlerContext ctx, Channel channel,
+                            ChannelBuffer buffer) throws Exception {
+        // This is not strictly needed atthis time. It is used to detect
+        // connection reset detection from netty (for debug)
+        return null;
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OFMessageEncoder.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OFMessageEncoder.java
new file mode 100644
index 0000000..6be5f9a
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OFMessageEncoder.java
@@ -0,0 +1,56 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.internal;
+
+import java.util.List;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelHandlerContext;
+import org.jboss.netty.handler.codec.oneone.OneToOneEncoder;
+import org.openflow.protocol.OFMessage;
+
+/**
+ * Encode an openflow message for output into a ChannelBuffer, for use in a
+ * netty pipeline
+ * @author readams
+ */
+public class OFMessageEncoder extends OneToOneEncoder {
+
+    @Override
+    protected Object encode(ChannelHandlerContext ctx, Channel channel,
+                            Object msg) throws Exception {
+        if (!(  msg instanceof List))
+            return msg;
+
+        @SuppressWarnings("unchecked")
+        List<OFMessage> msglist = (List<OFMessage>)msg;
+        int size = 0;
+        for (OFMessage ofm :  msglist) {
+                size += ofm.getLengthU();
+        }
+
+        ChannelBuffer buf = ChannelBuffers.buffer(size);;
+        for (OFMessage ofm :  msglist) {
+            ofm.writeTo(buf);
+        }
+        return buf;
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OFMessageFuture.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OFMessageFuture.java
new file mode 100644
index 0000000..f01e179
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OFMessageFuture.java
@@ -0,0 +1,170 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.internal;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.openflow.protocol.OFMessage;
+import org.openflow.protocol.OFType;
+
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.threadpool.IThreadPoolService;
+
+/**
+ * A Future object used to retrieve asynchronous OFMessage replies. Unregisters
+ * and cancels itself by default after 60 seconds. This class is meant to be
+ * sub-classed and proper behavior added to the handleReply method, and
+ * termination of the Future to be handled in the isFinished method.
+ *
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ */
+public abstract class OFMessageFuture<V> implements Future<V> {
+
+    protected IThreadPoolService threadPool;
+    protected volatile boolean canceled;
+    protected CountDownLatch latch;
+    protected OFType responseType;
+    protected volatile V result;
+    protected IOFSwitch sw;
+    protected Runnable timeoutTimer;
+    protected int transactionId;
+    protected static final long DEFAULT_TIMEOUT = 60;
+    protected static final TimeUnit DEFAULT_TIMEOUT_UNIT = TimeUnit.SECONDS;
+
+    public OFMessageFuture(IThreadPoolService tp,
+            IOFSwitch sw, OFType responseType, int transactionId) {
+        this(tp, sw, responseType, transactionId, 
+                 DEFAULT_TIMEOUT, DEFAULT_TIMEOUT_UNIT);
+    }
+
+    public OFMessageFuture(IThreadPoolService tp,
+            IOFSwitch sw, OFType responseType, int transactionId, long timeout, TimeUnit unit) {
+        this.threadPool = tp;
+        this.canceled = false;
+        this.latch = new CountDownLatch(1);
+        this.responseType = responseType;
+        this.sw = sw;
+        this.transactionId = transactionId;
+
+        final OFMessageFuture<V> future = this;
+        timeoutTimer = new Runnable() {
+            @Override
+            public void run() {
+                if (timeoutTimer == this)
+                    future.cancel(true);
+            }
+        };
+        threadPool.getScheduledExecutor().schedule(timeoutTimer, timeout, unit);
+    }
+
+    protected void unRegister() {
+        this.timeoutTimer = null;
+    }
+
+  
+    public void deliverFuture(IOFSwitch sw, OFMessage msg) {
+        if (transactionId == msg.getXid()) {
+            handleReply(sw, msg);
+            if (isFinished()) {
+                unRegister();
+                this.latch.countDown();
+            }
+        }
+    }
+
+    /**
+     * Used to handle the specific expected message this Future was reigstered
+     * for, the specified msg parameter is guaranteed to match the type and
+     * transaction id specified.
+     * @param sw
+     * @param msg
+     * @return
+     */
+    protected abstract void handleReply(IOFSwitch sw, OFMessage msg);
+
+    /**
+     * Called directly after handleReply, subclasses implement this method to
+     * indicate when the future can deregister itself from receiving future
+     * messages, and when it is safe to return the results to any waiting
+     * threads.
+     * @return when this Future has completed its work
+     */
+    protected abstract boolean isFinished();
+
+    /* (non-Javadoc)
+     * @see java.util.concurrent.Future#cancel(boolean)
+     */
+    @Override
+    public boolean cancel(boolean mayInterruptIfRunning) {
+        if (isDone()) {
+            return false;
+        } else {
+            unRegister();
+            canceled = true;
+            this.latch.countDown();
+            return !isDone();
+        }
+    }
+
+    /* (non-Javadoc)
+     * @see java.util.concurrent.Future#isCancelled()
+     */
+    @Override
+    public boolean isCancelled() {
+        return canceled;
+    }
+
+    /* (non-Javadoc)
+     * @see java.util.concurrent.Future#isDone()
+     */
+    @Override
+    public boolean isDone() {
+        return this.latch.getCount() == 0;
+    }
+
+    /* (non-Javadoc)
+     * @see java.util.concurrent.Future#get()
+     */
+    @Override
+    public V get() throws InterruptedException, ExecutionException {
+        this.latch.await();
+        return result;
+    }
+
+    /* (non-Javadoc)
+     * @see java.util.concurrent.Future#get(long, java.util.concurrent.TimeUnit)
+     */
+    @Override
+    public V get(long timeout, TimeUnit unit) throws InterruptedException,
+            ExecutionException, TimeoutException {
+        this.latch.await(timeout, unit);
+        return result;
+    }
+
+    public int getTransactionId() {
+        return transactionId;
+    }
+
+    public void setTransactionId(int transactionId) {
+        this.transactionId = transactionId;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OFStatisticsFuture.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OFStatisticsFuture.java
new file mode 100644
index 0000000..4d3f733
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OFStatisticsFuture.java
@@ -0,0 +1,80 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.internal;
+
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.TimeUnit;
+
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.threadpool.IThreadPoolService;
+
+import org.openflow.protocol.OFMessage;
+import org.openflow.protocol.OFStatisticsReply;
+import org.openflow.protocol.OFType;
+import org.openflow.protocol.statistics.OFStatistics;
+
+/**
+ * A concrete implementation that handles asynchronously receiving OFStatistics
+ * 
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ */
+public class OFStatisticsFuture extends
+        OFMessageFuture<List<OFStatistics>> {
+
+    protected volatile boolean finished;
+
+    public OFStatisticsFuture(IThreadPoolService tp,
+            IOFSwitch sw, int transactionId) {
+        super(tp, sw, OFType.STATS_REPLY, transactionId);
+        init();
+    }
+
+    public OFStatisticsFuture(IThreadPoolService tp,
+            IOFSwitch sw, int transactionId, long timeout, TimeUnit unit) {
+        super(tp, sw, OFType.STATS_REPLY, transactionId, timeout, unit);
+        init();
+    }
+
+    private void init() {
+        this.finished = false;
+        this.result = new CopyOnWriteArrayList<OFStatistics>();
+    }
+
+    @Override
+    protected void handleReply(IOFSwitch sw, OFMessage msg) {
+        OFStatisticsReply sr = (OFStatisticsReply) msg;
+        synchronized (this.result) {
+            this.result.addAll(sr.getStatistics());
+            if ((sr.getFlags() & 0x1) == 0) {
+                this.finished = true;
+            }
+        }
+    }
+
+    @Override
+    protected boolean isFinished() {
+        return finished;
+    }
+    
+    @Override
+    protected void unRegister() {
+        super.unRegister();
+        sw.cancelStatisticsReply(transactionId);
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OFSwitchImpl.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OFSwitchImpl.java
new file mode 100644
index 0000000..dff00fd
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OFSwitchImpl.java
@@ -0,0 +1,857 @@
+/**
+*    Copyright 2012, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.internal;
+
+import java.io.IOException;
+import java.net.SocketAddress;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import net.floodlightcontroller.core.FloodlightContext;
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.core.IOFMessageListener;
+import net.floodlightcontroller.core.IFloodlightProviderService.Role;
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.annotations.LogMessageDoc;
+import net.floodlightcontroller.core.annotations.LogMessageDocs;
+import net.floodlightcontroller.core.web.serializers.DPIDSerializer;
+import net.floodlightcontroller.threadpool.IThreadPoolService;
+import net.floodlightcontroller.util.TimedCache;
+
+import org.codehaus.jackson.annotate.JsonIgnore;
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+import org.codehaus.jackson.map.ser.ToStringSerializer;
+import org.jboss.netty.channel.Channel;
+import org.openflow.protocol.OFFeaturesReply;
+import org.openflow.protocol.OFFeaturesRequest;
+import org.openflow.protocol.OFFlowMod;
+import org.openflow.protocol.OFMatch;
+import org.openflow.protocol.OFMessage;
+import org.openflow.protocol.OFPhysicalPort;
+import org.openflow.protocol.OFPort;
+import org.openflow.protocol.OFType;
+import org.openflow.protocol.OFVendor;
+import org.openflow.protocol.OFPhysicalPort.OFPortConfig;
+import org.openflow.protocol.OFPhysicalPort.OFPortState;
+import org.openflow.protocol.OFStatisticsRequest;
+import org.openflow.protocol.statistics.OFDescriptionStatistics;
+import org.openflow.protocol.statistics.OFStatistics;
+import org.openflow.util.HexString;
+import org.openflow.util.U16;
+import org.openflow.vendor.nicira.OFNiciraVendorData;
+import org.openflow.vendor.nicira.OFRoleRequestVendorData;
+import org.openflow.vendor.nicira.OFRoleVendorData;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This is the internal representation of an openflow switch.
+ */
+public class OFSwitchImpl implements IOFSwitch {
+    // TODO: should we really do logging in the class or should we throw
+    // exception that can then be handled by callers?
+    protected static Logger log = LoggerFactory.getLogger(OFSwitchImpl.class);
+
+    private static final String HA_CHECK_SWITCH = 
+            "Check the health of the indicated switch.  If the problem " +
+            "persists or occurs repeatedly, it likely indicates a defect " +
+            "in the switch HA implementation.";
+    
+    protected ConcurrentMap<Object, Object> attributes;
+    protected IFloodlightProviderService floodlightProvider;
+    protected IThreadPoolService threadPool;
+    protected Date connectedSince;
+    protected String stringId;
+    protected Channel channel;
+    protected AtomicInteger transactionIdSource;
+    // Lock to protect modification of the port maps. We only need to 
+    // synchronize on modifications. For read operations we are fine since
+    // we rely on ConcurrentMaps which works for our use case.
+    private Object portLock;
+    // Map port numbers to the appropriate OFPhysicalPort
+    protected ConcurrentHashMap<Short, OFPhysicalPort> portsByNumber;
+    // Map port names to the appropriate OFPhyiscalPort
+    // XXX: The OF spec doesn't specify if port names need to be unique but
+    //      according it's always the case in practice. 
+    protected ConcurrentHashMap<String, OFPhysicalPort> portsByName;
+    protected Map<Integer,OFStatisticsFuture> statsFutureMap;
+    protected Map<Integer, IOFMessageListener> iofMsgListenersMap;
+    protected Map<Integer,OFFeaturesReplyFuture> featuresFutureMap;
+    protected boolean connected;
+    protected Role role;
+    protected TimedCache<Long> timedCache;
+    protected ReentrantReadWriteLock listenerLock;
+    protected ConcurrentMap<Short, Long> portBroadcastCacheHitMap;
+    /**
+     * When sending a role request message, the role request is added
+     * to this queue. If a role reply is received this queue is checked to 
+     * verify that the reply matches the expected reply. We require in order
+     * delivery of replies. That's why we use a Queue. 
+     * The RoleChanger uses a timeout to ensure we receive a timely reply.
+     * 
+     * Need to synchronize on this instance if a request is sent, received, 
+     * checked. 
+     */
+    protected LinkedList<PendingRoleRequestEntry> pendingRoleRequests;
+    
+    /* Switch features from initial featuresReply */
+    protected int capabilities;
+    protected int buffers;
+    protected int actions;
+    protected byte tables;
+    protected long datapathId;
+
+    public static IOFSwitchFeatures switchFeatures;
+    protected static final ThreadLocal<Map<OFSwitchImpl,List<OFMessage>>> local_msg_buffer =
+            new ThreadLocal<Map<OFSwitchImpl,List<OFMessage>>>() {
+            @Override
+            protected Map<OFSwitchImpl,List<OFMessage>> initialValue() {
+                return new HashMap<OFSwitchImpl,List<OFMessage>>();
+            }
+    };
+    
+    // for managing our map sizes
+    protected static final int MAX_MACS_PER_SWITCH  = 1000;
+    
+    protected static class PendingRoleRequestEntry {
+        protected int xid;
+        protected Role role;
+        // cookie is used to identify the role "generation". roleChanger uses
+        protected long cookie;
+        public PendingRoleRequestEntry(int xid, Role role, long cookie) {
+            this.xid = xid;
+            this.role = role;
+            this.cookie = cookie;
+        }
+    }
+    
+    public OFSwitchImpl() {
+        this.stringId = null;
+        this.attributes = new ConcurrentHashMap<Object, Object>();
+        this.connectedSince = new Date();
+        this.transactionIdSource = new AtomicInteger();
+        this.portLock = new Object();
+        this.portsByNumber = new ConcurrentHashMap<Short, OFPhysicalPort>();
+        this.portsByName = new ConcurrentHashMap<String, OFPhysicalPort>();
+        this.connected = true;
+        this.statsFutureMap = new ConcurrentHashMap<Integer,OFStatisticsFuture>();
+        this.featuresFutureMap = new ConcurrentHashMap<Integer,OFFeaturesReplyFuture>();
+        this.iofMsgListenersMap = new ConcurrentHashMap<Integer,IOFMessageListener>();
+        this.role = null;
+        this.timedCache = new TimedCache<Long>(100, 5*1000 );  // 5 seconds interval
+        this.listenerLock = new ReentrantReadWriteLock();
+        this.portBroadcastCacheHitMap = new ConcurrentHashMap<Short, Long>();
+        this.pendingRoleRequests = new LinkedList<OFSwitchImpl.PendingRoleRequestEntry>();
+        
+        // Defaults properties for an ideal switch
+        this.setAttribute(PROP_FASTWILDCARDS, OFMatch.OFPFW_ALL);
+        this.setAttribute(PROP_SUPPORTS_OFPP_FLOOD, new Boolean(true));
+        this.setAttribute(PROP_SUPPORTS_OFPP_TABLE, new Boolean(true));
+    }
+    
+
+    @Override
+    public Object getAttribute(String name) {
+        if (this.attributes.containsKey(name)) {
+            return this.attributes.get(name);
+        }
+        return null;
+    }
+    
+    @Override
+    public void setAttribute(String name, Object value) {
+        this.attributes.put(name, value);
+        return;
+    }
+
+    @Override
+    public Object removeAttribute(String name) {
+        return this.attributes.remove(name);
+    }
+    
+    @Override
+    public boolean hasAttribute(String name) {
+        return this.attributes.containsKey(name);
+    }
+        
+    @Override
+    @JsonIgnore
+    public Channel getChannel() {
+        return this.channel;
+    }
+
+    @JsonIgnore
+    public void setChannel(Channel channel) {
+        this.channel = channel;
+    }
+    
+    @Override
+    public void write(OFMessage m, FloodlightContext bc) throws IOException {
+        Map<OFSwitchImpl,List<OFMessage>> msg_buffer_map = local_msg_buffer.get();
+        List<OFMessage> msg_buffer = msg_buffer_map.get(this);
+        if (msg_buffer == null) {
+            msg_buffer = new ArrayList<OFMessage>();
+            msg_buffer_map.put(this, msg_buffer);
+        }
+
+        this.floodlightProvider.handleOutgoingMessage(this, m, bc);
+        msg_buffer.add(m);
+
+        if ((msg_buffer.size() >= Controller.BATCH_MAX_SIZE) ||
+            ((m.getType() != OFType.PACKET_OUT) && (m.getType() != OFType.FLOW_MOD))) {
+            this.write(msg_buffer);
+            msg_buffer.clear();
+        }
+    }
+
+    @Override
+    @LogMessageDoc(level="WARN",
+                   message="Sending OF message that modifies switch " +
+                           "state while in the slave role: {switch}",
+                   explanation="An application has sent a message to a switch " +
+                   		"that is not valid when the switch is in a slave role",
+                   recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
+    public void write(List<OFMessage> msglist, 
+                      FloodlightContext bc) throws IOException {
+        for (OFMessage m : msglist) {
+            if (role == Role.SLAVE) {
+                switch (m.getType()) {
+                    case PACKET_OUT:
+                    case FLOW_MOD:
+                    case PORT_MOD:
+                        log.warn("Sending OF message that modifies switch " +
+                        		 "state while in the slave role: {}", 
+                        		 m.getType().name());
+                        break;
+                    default:
+                        break;
+                }
+            }
+            this.floodlightProvider.handleOutgoingMessage(this, m, bc);
+        }
+        this.write(msglist);
+    }
+
+    public void write(List<OFMessage> msglist) throws IOException {
+        this.channel.write(msglist);
+    }
+    
+    @Override
+    public void disconnectOutputStream() {
+        channel.close();
+    }
+
+    @Override
+    @JsonIgnore
+    public void setFeaturesReply(OFFeaturesReply featuresReply) {
+        synchronized(portLock) {
+            if (stringId == null) {
+                /* ports are updated via port status message, so we
+                 * only fill in ports on initial connection.
+                 */
+                for (OFPhysicalPort port : featuresReply.getPorts()) {
+                    setPort(port);
+                }
+            }
+            this.datapathId = featuresReply.getDatapathId();
+            this.capabilities = featuresReply.getCapabilities();
+            this.buffers = featuresReply.getBuffers();
+            this.actions = featuresReply.getActions();
+            this.tables = featuresReply.getTables();
+            this.stringId = HexString.toHexString(this.datapathId);
+        }
+    }
+
+    @Override
+    @JsonIgnore
+    public Collection<OFPhysicalPort> getEnabledPorts() {
+        List<OFPhysicalPort> result = new ArrayList<OFPhysicalPort>();
+        for (OFPhysicalPort port : portsByNumber.values()) {
+            if (portEnabled(port)) {
+                result.add(port);
+            }
+        }
+        return result;
+    }
+    
+    @Override
+    @JsonIgnore
+    public Collection<Short> getEnabledPortNumbers() {
+        List<Short> result = new ArrayList<Short>();
+        for (OFPhysicalPort port : portsByNumber.values()) {
+            if (portEnabled(port)) {
+                result.add(port.getPortNumber());
+            }
+        }
+        return result;
+    }
+
+    @Override
+    public OFPhysicalPort getPort(short portNumber) {
+        return portsByNumber.get(portNumber);
+    }
+    
+    @Override
+    public OFPhysicalPort getPort(String portName) {
+        return portsByName.get(portName);
+    }
+
+    @Override
+    @JsonIgnore
+    public void setPort(OFPhysicalPort port) {
+        synchronized(portLock) {
+            portsByNumber.put(port.getPortNumber(), port);
+            portsByName.put(port.getName(), port);
+        }
+    }
+    
+    @Override
+    @JsonProperty("ports")
+    public Collection<OFPhysicalPort> getPorts() {
+        return Collections.unmodifiableCollection(portsByNumber.values());
+    }
+    
+    @Override
+    public void deletePort(short portNumber) {
+        synchronized(portLock) {
+            portsByName.remove(portsByNumber.get(portNumber).getName());
+            portsByNumber.remove(portNumber);
+        }
+    }
+    
+    @Override
+    public void deletePort(String portName) {
+        synchronized(portLock) {
+            portsByNumber.remove(portsByName.get(portName).getPortNumber());
+            portsByName.remove(portName);
+        }
+    }
+
+    @Override
+    public boolean portEnabled(short portNumber) {
+        if (portsByNumber.get(portNumber) == null) return false;
+        return portEnabled(portsByNumber.get(portNumber));
+    }
+    
+    @Override
+    public boolean portEnabled(String portName) {
+        if (portsByName.get(portName) == null) return false;
+        return portEnabled(portsByName.get(portName));
+    }
+    
+    @Override
+    public boolean portEnabled(OFPhysicalPort port) {
+        if (port == null)
+            return false;
+        if ((port.getConfig() & OFPortConfig.OFPPC_PORT_DOWN.getValue()) > 0)
+            return false;
+        if ((port.getState() & OFPortState.OFPPS_LINK_DOWN.getValue()) > 0)
+            return false;
+        // Port STP state doesn't work with multiple VLANs, so ignore it for now
+        //if ((port.getState() & OFPortState.OFPPS_STP_MASK.getValue()) == OFPortState.OFPPS_STP_BLOCK.getValue())
+        //    return false;
+        return true;
+    }
+    
+    @Override
+    @JsonSerialize(using=DPIDSerializer.class)
+    @JsonProperty("dpid")
+    public long getId() {
+        if (this.stringId == null)
+            throw new RuntimeException("Features reply has not yet been set");
+        return this.datapathId;
+    }
+
+    @JsonIgnore
+    @Override
+    public String getStringId() {
+        return stringId;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#toString()
+     */
+    @Override
+    public String toString() {
+        return "OFSwitchImpl [" + channel.getRemoteAddress() + " DPID[" + ((stringId != null) ? stringId : "?") + "]]";
+    }
+
+    @Override
+    public ConcurrentMap<Object, Object> getAttributes() {
+        return this.attributes;
+    }
+
+    @Override
+    public Date getConnectedSince() {
+        return connectedSince;
+    }
+
+    @JsonIgnore
+    @Override
+    public int getNextTransactionId() {
+        return this.transactionIdSource.incrementAndGet();
+    }
+
+    @Override
+    public void sendStatsQuery(OFStatisticsRequest request, int xid,
+                                IOFMessageListener caller) throws IOException {
+        request.setXid(xid);
+        this.iofMsgListenersMap.put(xid, caller);
+        List<OFMessage> msglist = new ArrayList<OFMessage>(1);
+        msglist.add(request);
+        this.channel.write(msglist);
+        return;
+    }
+
+    @Override
+    public Future<List<OFStatistics>> getStatistics(OFStatisticsRequest request) throws IOException {
+        request.setXid(getNextTransactionId());
+        OFStatisticsFuture future = new OFStatisticsFuture(threadPool, this, request.getXid());
+        this.statsFutureMap.put(request.getXid(), future);
+        List<OFMessage> msglist = new ArrayList<OFMessage>(1);
+        msglist.add(request);
+        this.channel.write(msglist);
+        return future;
+    }
+
+    @Override
+    public void deliverStatisticsReply(OFMessage reply) {
+        OFStatisticsFuture future = this.statsFutureMap.get(reply.getXid());
+        if (future != null) {
+            future.deliverFuture(this, reply);
+            // The future will ultimately unregister itself and call
+            // cancelStatisticsReply
+            return;
+        }
+        /* Transaction id was not found in statsFutureMap.check the other map */
+        IOFMessageListener caller = this.iofMsgListenersMap.get(reply.getXid());
+        if (caller != null) {
+            caller.receive(this, reply, null);
+        }
+    }
+
+    @Override
+    public void cancelStatisticsReply(int transactionId) {
+        if (null ==  this.statsFutureMap.remove(transactionId)) {
+            this.iofMsgListenersMap.remove(transactionId);
+        }
+    }
+
+    @Override
+    public void cancelAllStatisticsReplies() {
+        /* we don't need to be synchronized here. Even if another thread
+         * modifies the map while we're cleaning up the future will eventuall
+         * timeout */
+        for (OFStatisticsFuture f : statsFutureMap.values()) {
+            f.cancel(true);
+        }
+        statsFutureMap.clear();
+        iofMsgListenersMap.clear();
+    }
+ 
+    
+    /**
+     * @param floodlightProvider the floodlightProvider to set
+     */
+    @JsonIgnore
+    public void setFloodlightProvider(IFloodlightProviderService floodlightProvider) {
+        this.floodlightProvider = floodlightProvider;
+    }
+    
+    @JsonIgnore
+    public void setThreadPoolService(IThreadPoolService tp) {
+        this.threadPool = tp;
+    }
+
+    @JsonIgnore
+    @Override
+    public synchronized boolean isConnected() {
+        return connected;
+    }
+
+    @Override
+    @JsonIgnore
+    public synchronized void setConnected(boolean connected) {
+        this.connected = connected;
+    }
+    
+    @Override
+    public Role getRole() {
+        return role;
+    }
+    
+    @JsonIgnore
+    @Override
+    public boolean isActive() {
+        return (role != Role.SLAVE);
+    }
+    
+    @Override
+    @JsonIgnore
+    public void setSwitchProperties(OFDescriptionStatistics description) {
+        if (switchFeatures != null) {
+            switchFeatures.setFromDescription(this, description);
+        }
+    }
+
+    @Override
+    @LogMessageDoc(level="ERROR",
+                   message="Failed to clear all flows on switch {switch}",
+                   explanation="An I/O error occured while trying to clear " +
+                   		"flows on the switch.",
+                   recommendation=LogMessageDoc.CHECK_SWITCH)
+    public void clearAllFlowMods() {
+        // Delete all pre-existing flows
+        OFMatch match = new OFMatch().setWildcards(OFMatch.OFPFW_ALL);
+        OFMessage fm = ((OFFlowMod) floodlightProvider.getOFMessageFactory()
+            .getMessage(OFType.FLOW_MOD))
+                .setMatch(match)
+            .setCommand(OFFlowMod.OFPFC_DELETE)
+            .setOutPort(OFPort.OFPP_NONE)
+            .setLength(U16.t(OFFlowMod.MINIMUM_LENGTH));
+        try {
+            List<OFMessage> msglist = new ArrayList<OFMessage>(1);
+            msglist.add(fm);
+            channel.write(msglist);
+        } catch (Exception e) {
+            log.error("Failed to clear all flows on switch " + this, e);
+        }
+    }
+
+    @Override
+    public boolean updateBroadcastCache(Long entry, Short port) {
+        if (timedCache.update(entry)) {
+            Long count = portBroadcastCacheHitMap.putIfAbsent(port, new Long(1));
+            if (count != null) {
+                count++;
+            }
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    @Override
+    @JsonIgnore
+    public Map<Short, Long> getPortBroadcastHits() {
+    	return this.portBroadcastCacheHitMap;
+    }
+    
+
+    @Override
+    public void flush() {
+        Map<OFSwitchImpl,List<OFMessage>> msg_buffer_map = local_msg_buffer.get();
+        List<OFMessage> msglist = msg_buffer_map.get(this);
+        if ((msglist != null) && (msglist.size() > 0)) {
+            try {
+                this.write(msglist);
+            } catch (IOException e) {
+                // TODO: log exception
+                e.printStackTrace();
+            }
+            msglist.clear();
+        }
+    }
+
+    public static void flush_all() {
+        Map<OFSwitchImpl,List<OFMessage>> msg_buffer_map = local_msg_buffer.get();
+        for (OFSwitchImpl sw : msg_buffer_map.keySet()) {
+            sw.flush();
+        }
+    }
+
+    /**
+     * Return a read lock that must be held while calling the listeners for
+     * messages from the switch. Holding the read lock prevents the active
+     * switch list from being modified out from under the listeners.
+     * @return 
+     */
+    @JsonIgnore
+    public Lock getListenerReadLock() {
+        return listenerLock.readLock();
+    }
+
+    /**
+     * Return a write lock that must be held when the controllers modifies the
+     * list of active switches. This is to ensure that the active switch list
+     * doesn't change out from under the listeners as they are handling a
+     * message from the switch.
+     * @return
+     */
+    @JsonIgnore
+    public Lock getListenerWriteLock() {
+        return listenerLock.writeLock();
+    }
+
+    /**
+     * Get the IP Address for the switch
+     * @return the inet address
+     */
+    @JsonSerialize(using=ToStringSerializer.class)
+    public SocketAddress getInetAddress() {
+        return channel.getRemoteAddress();
+    }
+    
+    /**
+     * Send NX role request message to the switch requesting the specified role.
+     * 
+     * This method should ONLY be called by @see RoleChanger.submitRequest(). 
+     * 
+     * After sending the request add it to the queue of pending request. We
+     * use the queue to later verify that we indeed receive the correct reply.
+     * @param sw switch to send the role request message to
+     * @param role role to request
+     * @param cookie an opaque value that will be stored in the pending queue so
+     *        RoleChanger can check for timeouts.
+     * @return transaction id of the role request message that was sent
+     */
+    protected int sendNxRoleRequest(Role role, long cookie)
+            throws IOException {
+        synchronized(pendingRoleRequests) {
+            // Convert the role enum to the appropriate integer constant used
+            // in the NX role request message
+            int nxRole = 0;
+            switch (role) {
+                case EQUAL:
+                    nxRole = OFRoleVendorData.NX_ROLE_OTHER;
+                    break;
+                case MASTER:
+                    nxRole = OFRoleVendorData.NX_ROLE_MASTER;
+                    break;
+                case SLAVE:
+                    nxRole = OFRoleVendorData.NX_ROLE_SLAVE;
+                    break;
+                default:
+                    log.error("Invalid Role specified for switch {}."
+                              + " Disconnecting.", this);
+                    // TODO: should throw an error
+                    return 0;
+            }
+            
+            // Construct the role request message
+            OFVendor roleRequest = (OFVendor)floodlightProvider.
+                    getOFMessageFactory().getMessage(OFType.VENDOR);
+            int xid = this.getNextTransactionId();
+            roleRequest.setXid(xid);
+            roleRequest.setVendor(OFNiciraVendorData.NX_VENDOR_ID);
+            OFRoleRequestVendorData roleRequestData = new OFRoleRequestVendorData();
+            roleRequestData.setRole(nxRole);
+            roleRequest.setVendorData(roleRequestData);
+            roleRequest.setLengthU(OFVendor.MINIMUM_LENGTH + 
+                                   roleRequestData.getLength());
+            
+            // Send it to the switch
+            List<OFMessage> msglist = new ArrayList<OFMessage>(1);
+            msglist.add(roleRequest);
+            // FIXME: should this use this.write() in order for messages to
+            // be processed by handleOutgoingMessage()
+            this.channel.write(msglist);
+            
+            pendingRoleRequests.add(new PendingRoleRequestEntry(xid, role, cookie));
+            return xid;
+        }
+    }
+    
+    /** 
+     * Deliver a RoleReply message to this switch. Checks if the reply 
+     * message matches the expected reply (head of the pending request queue). 
+     * We require in-order delivery of replies. If there's any deviation from
+     * our expectations we disconnect the switch. 
+     * 
+     * We must not check the received role against the controller's current
+     * role because there's no synchronization but that's fine @see RoleChanger
+     * 
+     * Will be called by the OFChannelHandler's receive loop
+     * 
+     * @param xid Xid of the reply message
+     * @param role The Role in the the reply message
+     */
+    @LogMessageDocs({
+        @LogMessageDoc(level="ERROR",
+                message="Switch {switch}: received unexpected role reply for " +
+                        "Role {role}" + 
+                        " Disconnecting switch",
+                explanation="The switch sent an unexpected HA role reply",
+                recommendation=HA_CHECK_SWITCH),                           
+        @LogMessageDoc(level="ERROR",
+                message="Switch {switch}: expected role reply with " +
+                        "Xid {xid}, got {xid}. Disconnecting switch",
+                explanation="The switch sent an unexpected HA role reply",
+                recommendation=HA_CHECK_SWITCH),                           
+        @LogMessageDoc(level="ERROR",
+                message="Switch {switch}: expected role reply with " +
+                        "Role {role}, got {role}. Disconnecting switch",
+                explanation="The switch sent an unexpected HA role reply",
+                recommendation=HA_CHECK_SWITCH)                           
+    })
+    protected void deliverRoleReply(int xid, Role role) {
+        synchronized(pendingRoleRequests) {
+            PendingRoleRequestEntry head = pendingRoleRequests.poll();
+            if (head == null) {
+                // Maybe don't disconnect if the role reply we received is 
+                // for the same role we are already in. 
+                log.error("Switch {}: received unexpected role reply for Role {}" + 
+                          " Disconnecting switch", this, role );
+                this.channel.close();
+            }
+            else if (head.xid != xid) {
+                // check xid before role!!
+                log.error("Switch {}: expected role reply with " +
+                       "Xid {}, got {}. Disconnecting switch",
+                       new Object[] { this, head.xid, xid } );
+                this.channel.close();
+            }
+            else if (head.role != role) {
+                log.error("Switch {}: expected role reply with " +
+                       "Role {}, got {}. Disconnecting switch",
+                       new Object[] { this, head.role, role } );
+                this.channel.close();
+            }
+            else {
+                log.debug("Received role reply message from {}, setting role to {}",
+                          this, role);
+                if (this.role == null && getAttribute(SWITCH_SUPPORTS_NX_ROLE) == null) {
+                    // The first role reply we received. Set the attribute
+                    // that the switch supports roles
+                    setAttribute(SWITCH_SUPPORTS_NX_ROLE, true);
+                }
+                this.role = role;
+            }
+        }
+    }
+    
+    /** 
+     * Checks whether the given xid matches the xid of the first pending
+     * role request. 
+     * @param xid
+     * @return 
+     */
+    protected boolean checkFirstPendingRoleRequestXid (int xid) {
+        synchronized(pendingRoleRequests) {
+            PendingRoleRequestEntry head = pendingRoleRequests.peek();
+            if (head == null)
+                return false;
+            else 
+                return head.xid == xid;
+        }
+    }
+    
+    /**
+     * Checks whether the given request cookie matches the cookie of the first 
+     * pending request
+     * @param cookie
+     * @return
+     */
+    protected boolean checkFirstPendingRoleRequestCookie(long cookie) {
+        synchronized(pendingRoleRequests) {
+            PendingRoleRequestEntry head = pendingRoleRequests.peek();
+            if (head == null)
+                return false;
+            else 
+                return head.cookie == cookie;
+        }
+    }
+    
+    /**
+     * Called if we receive a vendor error message indicating that roles
+     * are not supported by the switch. If the xid matches the first pending
+     * one, we'll mark the switch as not supporting roles and remove the head.
+     * Otherwise we ignore it.
+     * @param xid
+     */
+    protected void deliverRoleRequestNotSupported(int xid) {
+        synchronized(pendingRoleRequests) {
+            PendingRoleRequestEntry head = pendingRoleRequests.poll();
+            this.role = null;
+            if (head!=null && head.xid == xid) {
+                setAttribute(SWITCH_SUPPORTS_NX_ROLE, false);
+            }
+            else {
+                this.channel.close();
+            }
+        }
+    }
+
+    @Override
+    public Future<OFFeaturesReply> getFeaturesReplyFromSwitch()
+            throws IOException {
+        OFMessage request = new OFFeaturesRequest();
+        request.setXid(getNextTransactionId());
+        OFFeaturesReplyFuture future =
+                new OFFeaturesReplyFuture(threadPool, this, request.getXid());
+        this.featuresFutureMap.put(request.getXid(), future);
+        List<OFMessage> msglist = new ArrayList<OFMessage>(1);
+        msglist.add(request);
+        this.channel.write(msglist);
+        return future;
+    }
+
+    @Override
+    public void deliverOFFeaturesReply(OFMessage reply) {
+        OFFeaturesReplyFuture future = this.featuresFutureMap.get(reply.getXid());
+        if (future != null) {
+            future.deliverFuture(this, reply);
+            // The future will ultimately unregister itself and call
+            // cancelFeaturesReply
+            return;
+        }
+        log.error("Switch {}: received unexpected featureReply", this);
+    }
+
+    @Override
+    public void cancelFeaturesReply(int transactionId) {
+        this.featuresFutureMap.remove(transactionId);
+    }
+
+
+    @Override
+    public int getBuffers() {
+        return buffers;
+    }
+
+
+    @Override
+    public int getActions() {
+        return actions;
+    }
+
+
+    @Override
+    public int getCapabilities() {
+        return capabilities;
+    }
+
+
+    @Override
+    public byte getTables() {
+        return tables;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OpenflowPipelineFactory.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OpenflowPipelineFactory.java
new file mode 100644
index 0000000..5fb5c34
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/OpenflowPipelineFactory.java
@@ -0,0 +1,71 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.internal;
+
+import java.util.concurrent.ThreadPoolExecutor;
+
+import org.jboss.netty.channel.ChannelPipeline;
+import org.jboss.netty.channel.ChannelPipelineFactory;
+import org.jboss.netty.channel.Channels;
+import org.jboss.netty.handler.execution.ExecutionHandler;
+import org.jboss.netty.handler.timeout.IdleStateHandler;
+import org.jboss.netty.handler.timeout.ReadTimeoutHandler;
+import org.jboss.netty.util.HashedWheelTimer;
+import org.jboss.netty.util.Timer;
+
+/**
+ * Creates a ChannelPipeline for a server-side openflow channel
+ * @author readams
+ */
+public class OpenflowPipelineFactory implements ChannelPipelineFactory {
+
+    protected Controller controller;
+    protected ThreadPoolExecutor pipelineExecutor;
+    protected Timer timer;
+    protected IdleStateHandler idleHandler;
+    protected ReadTimeoutHandler readTimeoutHandler;
+    
+    public OpenflowPipelineFactory(Controller controller,
+                                   ThreadPoolExecutor pipelineExecutor) {
+        super();
+        this.controller = controller;
+        this.pipelineExecutor = pipelineExecutor;
+        this.timer = new HashedWheelTimer();
+        this.idleHandler = new IdleStateHandler(timer, 20, 25, 0);
+        this.readTimeoutHandler = new ReadTimeoutHandler(timer, 30);
+    }
+ 
+    @Override
+    public ChannelPipeline getPipeline() throws Exception {
+        OFChannelState state = new OFChannelState();
+        
+        ChannelPipeline pipeline = Channels.pipeline();
+        pipeline.addLast("ofmessagedecoder", new OFMessageDecoder());
+        pipeline.addLast("ofmessageencoder", new OFMessageEncoder());
+        pipeline.addLast("idle", idleHandler);
+        pipeline.addLast("timeout", readTimeoutHandler);
+        pipeline.addLast("handshaketimeout",
+                         new HandshakeTimeoutHandler(state, timer, 15));
+        if (pipelineExecutor != null)
+            pipeline.addLast("pipelineExecutor",
+                             new ExecutionHandler(pipelineExecutor));
+        pipeline.addLast("handler", controller.getChannelHandler(state));
+        return pipeline;
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/RoleChanger.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/RoleChanger.java
new file mode 100644
index 0000000..6378136
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/RoleChanger.java
@@ -0,0 +1,321 @@
+package net.floodlightcontroller.core.internal;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.concurrent.DelayQueue;
+import java.util.concurrent.Delayed;
+import java.util.concurrent.TimeUnit;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import net.floodlightcontroller.core.IFloodlightProviderService.Role;
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.annotations.LogMessageDoc;
+
+/** 
+ * This class handles sending of RoleRequest messages to all connected switches.
+ * 
+ * Handling Role Requests is tricky. Roles are hard state on the switch and
+ * we can't query it so we need to make sure that we have consistent states
+ * on the switches. Whenever we send a role request to the set of connected 
+ * switches we need to make sure that we've sent the request to all of them 
+ * before we process the next change request. If a new switch connects, we 
+ * need to send it the current role and need to make sure that the current
+ * role doesn't change while we are doing it. We achieve this by synchronizing
+ * all these actions on Controller.roleChanger
+ * On the receive side: we need to make sure that we receive a reply for each 
+ * request we send and that the reply is consistent with the request we sent. 
+ * We'd also like to send the role request to the switch asynchronously in a
+ * separate thread so we don't block the REST API or other callers.
+ * 
+ * There are potential ways to relax these synchronization requirements:
+ * - "Generation ID" for each role request. However, this would be most useful
+ *   if it were global for the whole cluster
+ * - Regularly resend the controller's current role. Don't know whether this
+ *   might have adverse effects on the switch. 
+ *   
+ * Caveats:
+ * - No way to know if another controller (not in our controller cluster) 
+ *   sends MASTER requests to connected switches. Then we would drop to
+ *   slave role without knowing it. Could regularly resend the current role. 
+ *   Ideally the switch would notify us if it demoted us. What happens if
+ *   the other controller also regularly resends the same role request? 
+ *   Or if the health check determines that
+ *   a controller is dead but the controller is still talking to switches (maybe
+ *   just its health check failed) and resending the master role request.... 
+ *   We could try to detect if a switch demoted us to slave even if we think
+ *   we are master (error messages on packet outs, e.g., when sending LLDPs)
+ * 
+ *
+ * The general model of Role Request handling is as follows:
+ * 
+ * - All role request messages are handled by this class. Class Controller 
+ *   submits a role change request and the request gets queued. submitRequest
+ *   takes a Collection of switches to which to send the request. We make a copy
+ *   of this list. 
+ * - A thread takes these change requests from the queue and sends them to 
+ *   all the switches (using our copy of the switch list). 
+ * - The OFSwitchImpl sends the request over the wire and puts the request
+ *   into a queue of pending request (storing xid and role). We start a timeout 
+ *   to make sure we eventually receive a reply from the switch. We use a single
+ *   timeout for each request submitted using submitRequest()
+ * - After the timeout triggers we go over the list of switches again and
+ *   check that a response has been received (by checking the head of the 
+ *   OFSwitchImpl's queue of pending requests)
+ * - We handle requests and timeouts in the same thread. We use a priority queue
+ *   to schedule them so we are guaranteed that they are processed in 
+ *   the same order as they are submitted. If a request times out we drop
+ *   the connection to this switch. 
+ * - Since we decouple submission of role change requests and actually sending
+ *   them we cannot check a received role reply against the controller's current 
+ *   role because the controller's current role could have changed again. 
+ * - Receiving Role Reply messages is handled by OFChannelHandler and
+ *   OFSwitchImpl directly. The OFSwitchImpl checks if the received request 
+ *   is as expected (xid and role match the head of the pending queue in 
+ *   OFSwitchImpl). If so
+ *   the switch updates its role. Otherwise the connection is dropped. If this
+ *   is the first reply, the SWITCH_SUPPORTS_NX_ROLE attribute is set.
+ *   Next, we call addSwitch(), removeSwitch() to update the list of active
+ *   switches if appropriate.
+ * - If we receive an Error indicating that roles are not supported by the 
+ *   switch, we set the SWITCH_SUPPORTS_NX_ROLE to false. We keep the 
+ *   switch connection alive while in MASTER and EQUAL role. 
+ *   (TODO: is this the right behavior for EQUAL??). If the role changes to
+ *   SLAVE the switch connection is dropped (remember: only if the switch
+ *   doesn't support role requests)  
+ *   The expected behavior is that the switch will probably try to reconnect
+ *   repeatedly (with some sort of exponential backoff), but after a  while 
+ *   will give-up and move on to the next controller-IP configured on the 
+ *   switch. This is the serial failover mechanism from OpenFlow spec v1.0.
+ *   
+ * New switch connection:
+ * - Switch handshake is done without sending any role request messages.
+ * - After handshake completes, switch is added to the list of connected switches
+ *   and we send the first role request message if role
+ *   requests are enabled. If roles are disabled automatically promote switch to
+ *   active switch list and clear FlowTable.
+ * - When we receive the first reply we proceed as above. In addition, if
+ *   the role request is for MASTER we wipe the flow table. We do not wipe
+ *   the flow table if the switch connected while role supported was disabled
+ *   on the controller. 
+ *
+ */
+public class RoleChanger {
+    // FIXME: Upon closer inspection DelayQueue seems to be somewhat broken. 
+    // We are required to implement a compareTo based on getDelay() and 
+    // getDelay() must return the remaining delay, thus it needs to use the 
+    // current time. So x1.compareTo(x1) can never return 0 as some time
+    // will have passed between evaluating both getDelays(). This is even worse
+    // if the thread happens to be preempted between calling the getDelay()
+    // For the time being we enforce a small delay between subsequent
+    // role request messages and hope that's long enough to not screw up
+    // ordering. In the long run we might want to use two threads and two queues
+    // (one for requests, one for timeouts)
+    // Sigh. 
+    protected DelayQueue<RoleChangeTask> pendingTasks;
+    protected long lastSubmitTime;
+    protected Thread workerThread;
+    protected long timeout;
+    protected static long DEFAULT_TIMEOUT = 15L*1000*1000*1000L; // 15s
+    protected static Logger log = LoggerFactory.getLogger(RoleChanger.class);
+    /** 
+     * A queued task to be handled by the Role changer thread. 
+     */
+    protected static class RoleChangeTask implements Delayed {
+        protected enum Type { 
+            /** This is a request. Dispatch the role update to switches */
+            REQUEST,
+            /** This is a timeout task. Check if all switches have 
+                correctly replied to the previously dispatched role request */
+            TIMEOUT
+        }
+        // The set of switches to work on
+        public Collection<OFSwitchImpl> switches;
+        public Role role;
+        public Type type;
+        // the time when the task should run as nanoTime() 
+        public long deadline;
+        public RoleChangeTask(Collection<OFSwitchImpl> switches, Role role, long deadline) {
+            this.switches = switches;
+            this.role = role;
+            this.type = Type.REQUEST;
+            this.deadline = deadline;
+        }
+        @Override
+        public int compareTo(Delayed o) {
+            Long timeRemaining = getDelay(TimeUnit.NANOSECONDS);
+            return timeRemaining.compareTo(o.getDelay(TimeUnit.NANOSECONDS));
+        }
+        @Override
+        public long getDelay(TimeUnit tu) {
+            long timeRemaining = deadline - System.nanoTime();
+            return tu.convert(timeRemaining, TimeUnit.NANOSECONDS);
+        }
+    }
+    
+    @LogMessageDoc(level="ERROR",
+                   message="RoleRequestWorker task had an uncaught exception.",
+                   explanation="An unknown occured while processing an HA " +
+                   		"role change event.",
+                   recommendation=LogMessageDoc.GENERIC_ACTION)                              
+    protected class RoleRequestWorker extends Thread  {
+        @Override
+        public void run() {
+            RoleChangeTask t;
+            boolean interrupted = false;
+            log.trace("RoleRequestWorker thread started");
+            try {
+                while (true) {
+                    try {
+                        t = pendingTasks.take();
+                    } catch (InterruptedException e) {
+                        // see http://www.ibm.com/developerworks/java/library/j-jtp05236/index.html
+                        interrupted = true;
+                        continue;
+                    }
+                    if (t.type == RoleChangeTask.Type.REQUEST) {
+                        sendRoleRequest(t.switches, t.role, t.deadline);
+                        // Queue the timeout
+                        t.type = RoleChangeTask.Type.TIMEOUT;
+                        t.deadline += timeout;
+                        pendingTasks.put(t);
+                    }
+                    else {
+                        verifyRoleReplyReceived(t.switches, t.deadline);
+                    }
+                }
+            }
+            catch (Exception e) {
+                // Should never get here
+                log.error("RoleRequestWorker task had an uncaught exception. ", 
+                          e);
+            }
+            finally {
+                // Be nice in case we earlier caught InterruptedExecution
+                if (interrupted)
+                    Thread.currentThread().interrupt();
+            }
+        } // end loop
+    }
+    
+    public RoleChanger() {
+        this.pendingTasks = new DelayQueue<RoleChangeTask>();
+        this.workerThread = new Thread(new RoleRequestWorker());
+        this.timeout = DEFAULT_TIMEOUT;
+        this.workerThread.start();
+    }
+    
+    
+    public synchronized void submitRequest(Collection<OFSwitchImpl> switches, Role role) {
+        long deadline = System.nanoTime();
+        // Grrr. stupid DelayQueue. Make sre we have at least 10ms between 
+        // role request messages.
+        if (deadline - lastSubmitTime < 10 * 1000*1000) 
+            deadline = lastSubmitTime + 10 * 1000*1000;
+        // make a copy of the list 
+        ArrayList<OFSwitchImpl> switches_copy = new ArrayList<OFSwitchImpl>(switches);
+        RoleChangeTask req = new RoleChangeTask(switches_copy, role, deadline);
+        pendingTasks.put(req);
+        lastSubmitTime = deadline;
+    }
+    
+    /**
+     * Send a role request message to switches. This checks the capabilities 
+     * of the switch for understanding role request messaging. Currently we only 
+     * support the OVS-style role request message, but once the controller 
+     * supports OF 1.2, this function will also handle sending out the 
+     * OF 1.2-style role request message.
+    * @param switches the collection of switches to send the request too
+     * @param role the role to request
+     */
+    @LogMessageDoc(level="WARN",
+            message="Failed to send role request message " + 
+                    "to switch {switch}: {message}. Disconnecting",
+            explanation="An I/O error occurred while attempting to change " +
+            		"the switch HA role.",
+            recommendation=LogMessageDoc.CHECK_SWITCH)                              
+    protected void sendRoleRequest(Collection<OFSwitchImpl> switches,
+                                   Role role, long cookie) {
+        // There are three cases to consider:
+        //
+        // 1) If the controller role at the point the switch connected was
+        //    null/disabled, then we never sent the role request probe to the
+        //    switch and therefore never set the SWITCH_SUPPORTS_NX_ROLE
+        //    attribute for the switch, so supportsNxRole is null. In that
+        //    case since we're now enabling role support for the controller
+        //    we should send out the role request probe/update to the switch.
+        //
+        // 2) If supportsNxRole == Boolean.TRUE then that means we've already
+        //    sent the role request probe to the switch and it replied with
+        //    a role reply message, so we know it supports role request
+        //    messages. Now we're changing the role and we want to send
+        //    it another role request message to inform it of the new role
+        //    for the controller.
+        //
+        // 3) If supportsNxRole == Boolean.FALSE, then that means we sent the
+        //    role request probe to the switch but it responded with an error
+        //    indicating that it didn't understand the role request message.
+        //    In that case we don't want to send it another role request that
+        //    it (still) doesn't understand. But if the new role of the
+        //    controller is SLAVE, then we don't want the switch to remain
+        //    connected to this controller. It might support the older serial
+        //    failover model for HA support, so we want to terminate the
+        //    connection and get it to initiate a connection with another
+        //    controller in its list of controllers. Eventually (hopefully, if
+        //    things are configured correctly) it will walk down its list of
+        //    controllers and connect to the current master controller.
+        Iterator<OFSwitchImpl> iter = switches.iterator();
+        while(iter.hasNext()) {
+            OFSwitchImpl sw = iter.next();
+            try {
+                Boolean supportsNxRole = (Boolean)
+                        sw.getAttribute(IOFSwitch.SWITCH_SUPPORTS_NX_ROLE);
+                if ((supportsNxRole == null) || supportsNxRole) {
+                    // Handle cases #1 and #2
+                    sw.sendNxRoleRequest(role, cookie);
+                } else {
+                    // Handle case #3
+                    if (role == Role.SLAVE) {
+                        log.debug("Disconnecting switch {} that doesn't support " +
+                        "role request messages from a controller that went to SLAVE mode");
+                        // Closing the channel should result in a call to
+                        // channelDisconnect which updates all state 
+                        sw.getChannel().close();
+                        iter.remove();
+                    }
+                }
+            } catch (IOException e) {
+                log.warn("Failed to send role request message " + 
+                         "to switch {}: {}. Disconnecting",
+                         sw, e);
+                sw.getChannel().close();
+                iter.remove();
+            }
+        }
+    }
+    
+    /**
+     * Verify that switches have received a role reply message we sent earlier
+     * @param switches the collection of switches to send the request too
+     * @param cookie the cookie of the request
+     */
+    @LogMessageDoc(level="WARN",
+            message="Timeout while waiting for role reply from switch {switch}."
+                    + " Disconnecting",
+            explanation="Timed out waiting for the switch to respond to " +
+            		"a request to change the HA role.",
+            recommendation=LogMessageDoc.CHECK_SWITCH)                              
+    protected void verifyRoleReplyReceived(Collection<OFSwitchImpl> switches,
+                                   long cookie) {
+        for (OFSwitchImpl sw: switches) {
+            if (sw.checkFirstPendingRoleRequestCookie(cookie)) {
+                sw.getChannel().close();
+                log.warn("Timeout while waiting for role reply from switch {}."
+                         + " Disconnecting", sw);
+            }
+        }
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/SwitchStateException.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/SwitchStateException.java
new file mode 100644
index 0000000..d2a928e
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/internal/SwitchStateException.java
@@ -0,0 +1,43 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.internal;
+
+/**
+ * 
+ */
+public class SwitchStateException extends Exception {
+
+    private static final long serialVersionUID = 9153954512470002631L;
+
+    public SwitchStateException() {
+        super();
+    }
+
+    public SwitchStateException(String arg0, Throwable arg1) {
+        super(arg0, arg1);
+    }
+
+    public SwitchStateException(String arg0) {
+        super(arg0);
+    }
+
+    public SwitchStateException(Throwable arg0) {
+        super(arg0);
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/module/FloodlightModuleContext.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/module/FloodlightModuleContext.java
new file mode 100644
index 0000000..0cbae32
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/module/FloodlightModuleContext.java
@@ -0,0 +1,104 @@
+package net.floodlightcontroller.core.module;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * The service registry for an IFloodlightProvider.
+ * @author alexreimers
+ */
+public class FloodlightModuleContext implements IFloodlightModuleContext {
+	protected Map<Class<? extends IFloodlightService>, IFloodlightService> serviceMap;
+	protected Map<Class<? extends IFloodlightModule>, Map<String, String>> configParams;
+	protected Collection<IFloodlightModule> moduleSet;
+	
+	/**
+	 * Creates the ModuleContext for use with this IFloodlightProvider.
+	 * This will be used as a module registry for all IFloodlightModule(s).
+	 */
+	public FloodlightModuleContext() {
+		serviceMap = 
+		        new HashMap<Class<? extends IFloodlightService>,
+		                              IFloodlightService>();
+		configParams =
+		        new HashMap<Class<? extends IFloodlightModule>,
+		                        Map<String, String>>();
+	}
+	
+	/**
+	 * Adds a IFloodlightModule for this Context.
+	 * @param clazz the service class
+	 * @param service The IFloodlightService to add to the registry
+	 */
+	public void addService(Class<? extends IFloodlightService> clazz, 
+	                       IFloodlightService service) {
+		serviceMap.put(clazz, service);
+	}
+	
+	@SuppressWarnings("unchecked")
+    @Override
+	public <T extends IFloodlightService> T getServiceImpl(Class<T> service) {
+	    IFloodlightService s = serviceMap.get(service);
+		return (T)s;
+	}
+	
+	@Override
+	public Collection<Class<? extends IFloodlightService>> getAllServices() {
+	    return serviceMap.keySet();
+	}
+	
+	@Override
+	public Collection<IFloodlightModule> getAllModules() {
+	    return moduleSet;
+	}
+	
+	public void setModuleSet(Collection<IFloodlightModule> modSet) {
+	    this.moduleSet = modSet;
+	}
+	
+	/**
+	 * Gets the configuration parameter map for a module
+	 * @param module The module to get the configuration map for, usually yourself
+	 * @return A map containing all the configuration parameters for the module, may be empty
+	 */
+	@Override
+	public Map<String, String> getConfigParams(IFloodlightModule module) {
+	    Map<String, String> retMap = configParams.get(module.getClass());
+	    if (retMap == null) {
+	        // Return an empty map if none exists so the module does not
+	        // need to null check the map
+	        retMap = new HashMap<String, String>();
+	        configParams.put(module.getClass(), retMap);
+	    }
+
+	    // also add any configuration parameters for superclasses, but
+	    // only if more specific configuration does not override it
+	    for (Class<? extends IFloodlightModule> c : configParams.keySet()) {
+	        if (c.isInstance(module)) {
+	            for (Map.Entry<String, String> ent : configParams.get(c).entrySet()) {
+	                if (!retMap.containsKey(ent.getKey())) {
+	                    retMap.put(ent.getKey(), ent.getValue());
+	                }
+	            }
+	        }
+	    }
+
+	    return retMap;
+	}
+	
+	/**
+	 * Adds a configuration parameter for a module
+	 * @param mod The fully qualified module name to add the parameter to
+	 * @param key The configuration parameter key
+	 * @param value The configuration parameter value
+	 */
+	public void addConfigParam(IFloodlightModule mod, String key, String value) {
+	    Map<String, String> moduleParams = configParams.get(mod.getClass());
+	    if (moduleParams == null) {
+	        moduleParams = new HashMap<String, String>();
+	        configParams.put(mod.getClass(), moduleParams);
+	    }
+	    moduleParams.put(key, value);
+	}
+ }
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/module/FloodlightModuleException.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/module/FloodlightModuleException.java
new file mode 100644
index 0000000..20ccc86
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/module/FloodlightModuleException.java
@@ -0,0 +1,9 @@
+package net.floodlightcontroller.core.module;
+
+public class FloodlightModuleException extends Exception {
+	private static final long serialVersionUID = 1L;
+
+	public FloodlightModuleException(String error) {
+		super(error);
+	}
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/module/FloodlightModuleLoader.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/module/FloodlightModuleLoader.java
new file mode 100644
index 0000000..45fe997
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/module/FloodlightModuleLoader.java
@@ -0,0 +1,444 @@
+package net.floodlightcontroller.core.module;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Properties;
+import java.util.Queue;
+import java.util.ServiceConfigurationError;
+import java.util.ServiceLoader;
+import java.util.Set;
+
+import net.floodlightcontroller.core.annotations.LogMessageDoc;
+import net.floodlightcontroller.core.annotations.LogMessageDocs;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Finds all Floodlight modules in the class path and loads/starts them.
+ * @author alexreimers
+ *
+ */
+public class FloodlightModuleLoader {
+    protected static Logger logger = 
+            LoggerFactory.getLogger(FloodlightModuleLoader.class);
+
+    protected static Map<Class<? extends IFloodlightService>,
+                  Collection<IFloodlightModule>> serviceMap;
+    protected static Map<IFloodlightModule,
+                  Collection<Class<? extends 
+                                   IFloodlightService>>> moduleServiceMap;
+    protected static Map<String, IFloodlightModule> moduleNameMap;
+    protected static Object lock = new Object();
+    
+    protected FloodlightModuleContext floodlightModuleContext;
+	
+    public static final String COMPILED_CONF_FILE = 
+            "floodlightdefault.properties";
+    public static final String FLOODLIGHT_MODULES_KEY =
+            "floodlight.modules";
+    
+	public FloodlightModuleLoader() {
+	    floodlightModuleContext = new FloodlightModuleContext();
+	}
+	
+	/**
+	 * Finds all IFloodlightModule(s) in the classpath. It creates 3 Maps.
+	 * serviceMap -> Maps a service to a module
+	 * moduleServiceMap -> Maps a module to all the services it provides
+	 * moduleNameMap -> Maps the string name to the module
+	 * @throws FloodlightModuleException If two modules are specified in the configuration
+	 * that provide the same service.
+	 */
+	protected static void findAllModules(Collection<String> mList) throws FloodlightModuleException {
+	    synchronized (lock) {
+	        if (serviceMap != null) return;
+	        serviceMap = 
+	                new HashMap<Class<? extends IFloodlightService>,
+	                            Collection<IFloodlightModule>>();
+	        moduleServiceMap = 
+	                new HashMap<IFloodlightModule,
+	                            Collection<Class<? extends 
+	                                       IFloodlightService>>>();
+	        moduleNameMap = new HashMap<String, IFloodlightModule>();
+	        
+	        // Get all the current modules in the classpath
+	        ClassLoader cl = Thread.currentThread().getContextClassLoader();
+	        ServiceLoader<IFloodlightModule> moduleLoader
+	            = ServiceLoader.load(IFloodlightModule.class, cl);
+	        // Iterate for each module, iterate through and add it's services
+	        Iterator<IFloodlightModule> moduleIter = moduleLoader.iterator();
+	        while (moduleIter.hasNext()) {
+	        	IFloodlightModule m = null;
+	        	try {
+	        		m = moduleIter.next();
+	        	} catch (ServiceConfigurationError sce) {
+	        		logger.debug("Could not find module");
+	        		//moduleIter.remove();
+	        		continue;
+	        	}
+	        //}
+	        //for (IFloodlightModule m : moduleLoader) {
+	            if (logger.isDebugEnabled()) {
+	                logger.debug("Found module " + m.getClass().getName());
+	            }
+
+	            // Set up moduleNameMap
+	            moduleNameMap.put(m.getClass().getCanonicalName(), m);
+
+	            // Set up serviceMap
+	            Collection<Class<? extends IFloodlightService>> servs =
+	                    m.getModuleServices();
+	            if (servs != null) {
+	                moduleServiceMap.put(m, servs);
+	                for (Class<? extends IFloodlightService> s : servs) {
+	                    Collection<IFloodlightModule> mods = 
+	                            serviceMap.get(s);
+	                    if (mods == null) {
+	                        mods = new ArrayList<IFloodlightModule>();
+	                        serviceMap.put(s, mods);
+	                    }
+	                    mods.add(m);
+	                    // Make sure they haven't specified duplicate modules in the config
+	                    int dupInConf = 0;
+	                    for (IFloodlightModule cMod : mods) {
+	                        if (mList.contains(cMod.getClass().getCanonicalName()))
+	                            dupInConf += 1;
+	                    }
+	                    
+	                    if (dupInConf > 1) {
+	                        String duplicateMods = "";
+                            for (IFloodlightModule mod : mods) {
+                                duplicateMods += mod.getClass().getCanonicalName() + ", ";
+                            }
+	                        throw new FloodlightModuleException("ERROR! The configuraiton" +
+	                                " file specifies more than one module that provides the service " +
+	                                s.getCanonicalName() +". Please specify only ONE of the " +
+	                                "following modules in the config file: " + duplicateMods);
+	                    }
+	                }
+	            }
+	        }
+	    }
+	}
+	
+	/**
+	 * Loads the modules from a specified configuration file.
+	 * @param fName The configuration file path
+	 * @return An IFloodlightModuleContext with all the modules to be started
+	 * @throws FloodlightModuleException
+	 */
+	@LogMessageDocs({
+	    @LogMessageDoc(level="INFO",
+	            message="Loading modules from file {file name}",
+	            explanation="The controller is initializing its module " +
+	                    "configuration from the specified properties file"),
+	    @LogMessageDoc(level="INFO",
+	            message="Loading default modules",
+	            explanation="The controller is initializing its module " +
+	                    "configuration to the default configuration"),
+	    @LogMessageDoc(level="ERROR",
+	            message="Could not load module configuration file",
+	            explanation="The controller failed to read the " +
+	            		"module configuration file",
+	            recommendation="Verify that the module configuration is " +
+	            		"present. " + LogMessageDoc.CHECK_CONTROLLER),
+	    @LogMessageDoc(level="ERROR",
+                message="Could not load default modules",
+                explanation="The controller failed to read the default " +
+                        "module configuration",
+                recommendation=LogMessageDoc.CHECK_CONTROLLER)
+	})
+	public IFloodlightModuleContext loadModulesFromConfig(String fName) 
+	        throws FloodlightModuleException {
+	    Properties prop = new Properties();
+	    
+	    File f = new File(fName);
+	    if (f.isFile()) {
+            logger.info("Loading modules from file {}", fName);
+            try {
+                prop.load(new FileInputStream(fName));
+            } catch (Exception e) {
+                logger.error("Could not load module configuration file", e);
+                System.exit(1);
+            }
+        } else {
+            logger.info("Loading default modules");
+            InputStream is = this.getClass().getClassLoader().
+                                    getResourceAsStream(COMPILED_CONF_FILE);
+            try {
+                prop.load(is);
+            } catch (IOException e) {
+                logger.error("Could not load default modules", e);
+                System.exit(1);
+            }
+        }
+        
+        String moduleList = prop.getProperty(FLOODLIGHT_MODULES_KEY)
+                                .replaceAll("\\s", "");
+        Collection<String> configMods = new ArrayList<String>();
+        configMods.addAll(Arrays.asList(moduleList.split(",")));
+        return loadModulesFromList(configMods, prop);
+	}
+	
+	/**
+	 * Loads modules (and their dependencies) specified in the list
+	 * @param mList The array of fully qualified module names
+	 * @param ignoreList The list of Floodlight services NOT to 
+	 * load modules for. Used for unit testing.
+	 * @return The ModuleContext containing all the loaded modules
+	 * @throws FloodlightModuleException
+	 */
+	protected IFloodlightModuleContext loadModulesFromList(Collection<String> configMods, Properties prop, 
+			Collection<IFloodlightService> ignoreList) throws FloodlightModuleException {
+		logger.debug("Starting module loader");
+		if (logger.isDebugEnabled() && ignoreList != null)
+			logger.debug("Not loading module services " + ignoreList.toString());
+
+        findAllModules(configMods);
+        
+        Collection<IFloodlightModule> moduleSet = new ArrayList<IFloodlightModule>();
+        Map<Class<? extends IFloodlightService>, IFloodlightModule> moduleMap =
+                new HashMap<Class<? extends IFloodlightService>,
+                            IFloodlightModule>();
+
+        Queue<String> moduleQ = new LinkedList<String>();
+        // Add the explicitly configured modules to the q
+        moduleQ.addAll(configMods);
+        Set<String> modsVisited = new HashSet<String>();
+        
+        while (!moduleQ.isEmpty()) {
+            String moduleName = moduleQ.remove();
+            if (modsVisited.contains(moduleName))
+                continue;
+            modsVisited.add(moduleName);
+            IFloodlightModule module = moduleNameMap.get(moduleName);
+            if (module == null) {
+                throw new FloodlightModuleException("Module " + 
+                        moduleName + " not found");
+            }
+            // If the module provies a service that is in the
+            // services ignorelist don't load it.
+            if ((ignoreList != null) && (module.getModuleServices() != null)) {
+            	for (IFloodlightService ifs : ignoreList) {
+            		for (Class<?> intsIgnore : ifs.getClass().getInterfaces()) {
+            			//System.out.println(intsIgnore.getName());
+        				// Check that the interface extends IFloodlightService
+        				//if (intsIgnore.isAssignableFrom(IFloodlightService.class)) {
+            			//System.out.println(module.getClass().getName());
+    					if (intsIgnore.isAssignableFrom(module.getClass())) {
+    						// We now ignore loading this module.
+    						logger.debug("Not loading module " + 
+    									 module.getClass().getCanonicalName() +
+    									 " because interface " +
+    									 intsIgnore.getCanonicalName() +
+    									 " is in the ignore list.");
+    						
+    						continue;
+    					}
+        				//}
+            		}
+            	}
+            }
+            
+            // Add the module to be loaded
+            addModule(moduleMap, moduleSet, module);
+            // Add it's dep's to the queue
+            Collection<Class<? extends IFloodlightService>> deps = 
+                    module.getModuleDependencies();
+            if (deps != null) {
+                for (Class<? extends IFloodlightService> c : deps) {
+                    IFloodlightModule m = moduleMap.get(c);
+                    if (m == null) {
+                        Collection<IFloodlightModule> mods = serviceMap.get(c);
+                        // Make sure only one module is loaded
+                        if ((mods == null) || (mods.size() == 0)) {
+                            throw new FloodlightModuleException("ERROR! Could not " +
+                                    "find an IFloodlightModule that provides service " +
+                                    c.toString());
+                        } else if (mods.size() == 1) {
+                            IFloodlightModule mod = mods.iterator().next();
+                            if (!modsVisited.contains(mod.getClass().getCanonicalName()))
+                                moduleQ.add(mod.getClass().getCanonicalName());
+                        } else {
+                            boolean found = false;
+                            for (IFloodlightModule moduleDep : mods) {
+                                if (configMods.contains(moduleDep.getClass().getCanonicalName())) {
+                                    // Module will be loaded, we can continue
+                                    found = true;
+                                    break;
+                                }
+                            }
+                            if (!found) {
+                                String duplicateMods = "";
+                                for (IFloodlightModule mod : mods) {
+                                    duplicateMods += mod.getClass().getCanonicalName() + ", ";
+                                }
+                                throw new FloodlightModuleException("ERROR! Found more " + 
+                                    "than one (" + mods.size() + ") IFloodlightModules that provides " +
+                                    "service " + c.toString() + 
+                                    ". Please specify one of the following modules in the config: " + 
+                                    duplicateMods);
+                            }
+                        }
+                    }
+                }
+            }
+        }
+        
+        floodlightModuleContext.setModuleSet(moduleSet);
+        parseConfigParameters(prop);
+        initModules(moduleSet);
+        startupModules(moduleSet);
+        
+        return floodlightModuleContext;
+	}
+	
+	/**
+	 * Loads modules (and their dependencies) specified in the list.
+	 * @param configMods The collection of fully qualified module names to load.
+	 * @param prop The list of properties that are configuration options.
+	 * @return The ModuleContext containing all the loaded modules.
+	 * @throws FloodlightModuleException
+	 */
+	public IFloodlightModuleContext loadModulesFromList(Collection<String> configMods, Properties prop) 
+            throws FloodlightModuleException {
+		return loadModulesFromList(configMods, prop, null);
+    }
+	
+	/**
+	 * Add a module to the set of modules to load and register its services
+	 * @param moduleMap the module map
+	 * @param moduleSet the module set
+	 * @param module the module to add
+	 */
+	protected void addModule(Map<Class<? extends IFloodlightService>, 
+                                           IFloodlightModule> moduleMap,
+                            Collection<IFloodlightModule> moduleSet,
+                            IFloodlightModule module) {
+        if (!moduleSet.contains(module)) {
+            Collection<Class<? extends IFloodlightService>> servs =
+                    moduleServiceMap.get(module);
+            if (servs != null) {
+                for (Class<? extends IFloodlightService> c : servs)
+                    moduleMap.put(c, module);
+            }
+            moduleSet.add(module);
+        }
+	}
+
+    /**
+     * Allocate  service implementations and then init all the modules
+     * @param moduleSet The set of modules to call their init function on
+     * @throws FloodlightModuleException If a module can not properly be loaded
+     */
+    protected void initModules(Collection<IFloodlightModule> moduleSet) 
+                                           throws FloodlightModuleException {
+        for (IFloodlightModule module : moduleSet) {            
+            // Get the module's service instance(s)
+            Map<Class<? extends IFloodlightService>, 
+                IFloodlightService> simpls = module.getServiceImpls();
+
+            // add its services to the context
+            if (simpls != null) {
+                for (Entry<Class<? extends IFloodlightService>, 
+                        IFloodlightService> s : simpls.entrySet()) {
+                    if (logger.isDebugEnabled()) {
+                        logger.debug("Setting " + s.getValue() + 
+                                     "  as provider for " + 
+                                     s.getKey().getCanonicalName());
+                    }
+                    if (floodlightModuleContext.getServiceImpl(s.getKey()) == null) {
+                        floodlightModuleContext.addService(s.getKey(),
+                                                           s.getValue());
+                    } else {
+                        throw new FloodlightModuleException("Cannot set "
+                                                            + s.getValue()
+                                                            + " as the provider for "
+                                                            + s.getKey().getCanonicalName()
+                                                            + " because "
+                                                            + floodlightModuleContext.getServiceImpl(s.getKey())
+                                                            + " already provides it");
+                    }
+                }
+            }
+        }
+        
+        for (IFloodlightModule module : moduleSet) {
+            // init the module
+            if (logger.isDebugEnabled()) {
+                logger.debug("Initializing " + 
+                             module.getClass().getCanonicalName());
+            }
+            module.init(floodlightModuleContext);
+        }
+    }
+    
+    /**
+     * Call each loaded module's startup method
+     * @param moduleSet the module set to start up
+     */
+    protected void startupModules(Collection<IFloodlightModule> moduleSet) {
+        for (IFloodlightModule m : moduleSet) {
+            if (logger.isDebugEnabled()) {
+                logger.debug("Starting " + m.getClass().getCanonicalName());
+            }
+            m.startUp(floodlightModuleContext);
+        }
+    }
+    
+    /**
+     * Parses configuration parameters for each module
+     * @param prop The properties file to use
+     */
+    @LogMessageDoc(level="WARN",
+                   message="Module {module} not found or loaded. " +
+                           "Not adding configuration option {key} = {value}",
+                   explanation="Ignoring a configuration parameter for a " +
+                   		"module that is not loaded.")
+    protected void parseConfigParameters(Properties prop) {
+    	if (prop == null) return;
+    	
+        Enumeration<?> e = prop.propertyNames();
+        while (e.hasMoreElements()) {
+            String key = (String) e.nextElement();
+            // Ignore module list key
+            if (key.equals(FLOODLIGHT_MODULES_KEY)) {
+                continue;
+            }
+            
+            String configValue = null;
+            int lastPeriod = key.lastIndexOf(".");
+            String moduleName = key.substring(0, lastPeriod);
+            String configKey = key.substring(lastPeriod + 1);
+            // Check to see if it's overridden on the command line
+            String systemKey = System.getProperty(key);
+            if (systemKey != null) {
+                configValue = systemKey;
+            } else {
+                configValue = prop.getProperty(key);
+            }
+            
+            IFloodlightModule mod = moduleNameMap.get(moduleName);
+            if (mod == null) {
+                logger.warn("Module {} not found or loaded. " +
+                		    "Not adding configuration option {} = {}", 
+                            new Object[]{moduleName, configKey, configValue});
+            } else {
+                floodlightModuleContext.addConfigParam(mod, configKey, configValue);
+            }
+        }
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/module/IFloodlightModule.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/module/IFloodlightModule.java
new file mode 100644
index 0000000..f8b196b
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/module/IFloodlightModule.java
@@ -0,0 +1,74 @@
+package net.floodlightcontroller.core.module;
+
+import java.util.Collection;
+import java.util.Map;
+
+
+/**
+ * Defines an interface for loadable Floodlight modules.
+ * 
+ * At a high level, these functions are called in the following order:
+ * <ol>
+ * <li> getServices() : what services does this module provide
+ * <li> getDependencies() : list the dependencies
+ * <li> init() : internal initializations (don't touch other modules)
+ * <li> startUp() : external initializations (<em>do</em> touch other modules)
+ * </ol>
+ * 
+ * @author alexreimers
+ */
+public interface IFloodlightModule {
+	
+	/**
+	 * Return the list of interfaces that this module implements.
+	 * All interfaces must inherit IFloodlightService
+	 * @return
+	 */
+	
+	public Collection<Class<? extends IFloodlightService>> getModuleServices();
+	
+	/**
+	 * Instantiate (as needed) and return objects that implement each
+	 * of the services exported by this module.  The map returned maps
+	 * the implemented service to the object.  The object could be the
+	 * same object or different objects for different exported services.
+	 * @return The map from service interface class to service implementation
+	 */
+	public Map<Class<? extends IFloodlightService>,
+	           IFloodlightService> getServiceImpls();
+	
+	/**
+	 * Get a list of Modules that this module depends on.  The module system
+	 * will ensure that each these dependencies is resolved before the 
+	 * subsequent calls to init().
+	 * @return The Collection of IFloodlightServices that this module depends
+	 *         on.
+	 */
+	
+	public Collection<Class<? extends IFloodlightService>> getModuleDependencies();
+	
+	/**
+	 * This is a hook for each module to do its <em>internal</em> initialization, 
+	 * e.g., call setService(context.getService("Service"))
+	 * 
+	 * All module dependencies are resolved when this is called, but not every module 
+	 * is initialized.
+	 * 
+	 * @param context
+	 * @throws FloodlightModuleException
+	 */
+	
+	void init(FloodlightModuleContext context) throws FloodlightModuleException;
+	
+	/**
+	 * This is a hook for each module to do its <em>external</em> initializations,
+	 * e.g., register for callbacks or query for state in other modules
+	 * 
+	 * It is expected that this function will not block and that modules that want
+	 * non-event driven CPU will spawn their own threads.
+	 * 
+	 * @param context
+	 */
+	
+	void startUp(FloodlightModuleContext context); 
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/module/IFloodlightModuleContext.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/module/IFloodlightModuleContext.java
new file mode 100644
index 0000000..2c058a7
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/module/IFloodlightModuleContext.java
@@ -0,0 +1,35 @@
+package net.floodlightcontroller.core.module;
+
+import java.util.Collection;
+import java.util.Map;
+
+	
+public interface IFloodlightModuleContext {	
+    /**
+     * Retrieves a casted version of a module from the registry.
+     * @param name The IFloodlightService object type
+     * @return The IFloodlightService
+     * @throws FloodlightModuleException If the module was not found 
+     * or a ClassCastException was encountered.
+     */
+    public <T extends IFloodlightService> T getServiceImpl(Class<T> service);
+    
+    /**
+     * Returns all loaded services
+     * @return A collection of service classes that have been loaded
+     */
+    public Collection<Class<? extends IFloodlightService>> getAllServices();
+    
+    /**
+     * Returns all loaded modules
+     * @return All Floodlight modules that are going to be loaded
+     */
+    public Collection<IFloodlightModule> getAllModules();
+    
+    /**
+     * Gets module specific configuration parameters.
+     * @param module The module to get the configuration parameters for
+     * @return A key, value map of the configuration options
+     */
+    public Map<String, String> getConfigParams(IFloodlightModule module);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/module/IFloodlightService.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/module/IFloodlightService.java
new file mode 100644
index 0000000..5974b3a
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/module/IFloodlightService.java
@@ -0,0 +1,11 @@
+package net.floodlightcontroller.core.module;
+
+/**
+ * This is the base interface for any IFloodlightModule package that provides 
+ * a service.
+ * @author alexreimers
+ *
+ */
+public abstract interface IFloodlightService {
+    // This space is intentionally left blank....don't touch it
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/module/ModuleLoaderResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/module/ModuleLoaderResource.java
new file mode 100644
index 0000000..a73a17f
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/module/ModuleLoaderResource.java
@@ -0,0 +1,104 @@
+package net.floodlightcontroller.core.module;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Returns list of modules loaded by Floodlight.
+ * @author Rob Sherwood
+ */
+public class ModuleLoaderResource extends ServerResource {
+    protected static Logger log = 
+            LoggerFactory.getLogger(ModuleLoaderResource.class);
+    
+    /**
+     * Retrieves information about loaded modules.
+     * @return Information about loaded modules.
+     */
+    @Get("json")
+    public Map<String, Object> retrieve() {
+    	return retrieveInternal(false);
+    }
+    
+    /**
+     * Retrieves all modules and their dependencies available
+     * to Floodlight.
+     * @param loadedOnly Whether to return all modules available or only the ones loaded.
+     * @return Information about modules available or loaded.
+     */
+    public Map<String, Object> retrieveInternal(boolean loadedOnly) {    
+        Map<String, Object> model = new HashMap<String, Object>();
+
+        Set<String> loadedModules = new HashSet<String>();
+        for (Object val : getContext().getAttributes().values()) {
+        	if ((val instanceof IFloodlightModule) || (val instanceof IFloodlightService)) {
+        		String serviceImpl = val.getClass().getCanonicalName();
+        		loadedModules.add(serviceImpl);
+        		// log.debug("Tracking serviceImpl " + serviceImpl);
+        	}
+        }
+
+        for (String moduleName : 
+        				FloodlightModuleLoader.moduleNameMap.keySet() ) {
+        	Map<String,Object> moduleInfo = new HashMap<String, Object>();
+
+        	IFloodlightModule module = 
+        				FloodlightModuleLoader.moduleNameMap.get(
+        						moduleName);
+        		
+        	Collection<Class<? extends IFloodlightService>> deps = 
+        			module.getModuleDependencies();
+        	if ( deps == null)
+            	deps = new HashSet<Class<? extends IFloodlightService>>();
+        	Map<String,Object> depsMap = new HashMap<String, Object> ();
+        	for (Class<? extends IFloodlightService> service : deps) {
+        		Object serviceImpl = getContext().getAttributes().get(service.getCanonicalName());
+        		if (serviceImpl != null)
+        			depsMap.put(service.getCanonicalName(), serviceImpl.getClass().getCanonicalName());
+        		else
+        			depsMap.put(service.getCanonicalName(), "<unresolved>");
+
+        	}
+            moduleInfo.put("depends", depsMap);
+        	
+            Collection<Class<? extends IFloodlightService>> provides = 
+            		module.getModuleServices();
+        	if ( provides == null)
+            	provides = new HashSet<Class<? extends IFloodlightService>>();
+        	Map<String,Object> providesMap = new HashMap<String,Object>();
+        	for (Class<? extends IFloodlightService> service : provides) {
+        		providesMap.put(service.getCanonicalName(), module.getServiceImpls().get(service).getClass().getCanonicalName());
+        	}
+        	moduleInfo.put("provides", providesMap);            		
+
+    		moduleInfo.put("loaded", false);	// not loaded, by default
+
+        	// check if this module is loaded directly
+        	if (loadedModules.contains(module.getClass().getCanonicalName())) {
+        		moduleInfo.put("loaded", true);  			
+        	} else {
+        		// if not, then maybe one of the services it exports is loaded
+        		for (Class<? extends IFloodlightService> service : provides) {
+        			String modString = module.getServiceImpls().get(service).getClass().getCanonicalName();
+        			if (loadedModules.contains(modString))
+                		moduleInfo.put("loaded", true);
+        			/* else 
+        				log.debug("ServiceImpl not loaded " + modString); */
+        		}
+        	}
+
+        	if ((Boolean)moduleInfo.get("loaded")|| !loadedOnly )
+        		model.put(moduleName, moduleInfo);
+        }            
+        return model;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/types/MacVlanPair.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/types/MacVlanPair.java
new file mode 100644
index 0000000..7a44f1d
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/types/MacVlanPair.java
@@ -0,0 +1,44 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.types;
+
+public class MacVlanPair {
+    public Long mac;
+    public Short vlan;
+    public MacVlanPair(Long mac, Short vlan) {
+        this.mac = mac;
+        this.vlan = vlan;
+    }
+    
+    public long getMac() {
+        return mac.longValue();
+    }
+    
+    public short getVlan() {
+        return vlan.shortValue();
+    }
+    
+    public boolean equals(Object o) {
+        return (o instanceof MacVlanPair) && (mac.equals(((MacVlanPair) o).mac))
+            && (vlan.equals(((MacVlanPair) o).vlan));
+    }
+    
+    public int hashCode() {
+        return mac.hashCode() ^ vlan.hashCode();
+    }
+}
\ No newline at end of file
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/types/SwitchMessagePair.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/types/SwitchMessagePair.java
new file mode 100644
index 0000000..0e91bc9
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/types/SwitchMessagePair.java
@@ -0,0 +1,40 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.types;
+
+import org.openflow.protocol.OFMessage;
+
+import net.floodlightcontroller.core.IOFSwitch;
+
+public class SwitchMessagePair {
+    private final IOFSwitch sw;
+    private final OFMessage msg;
+    
+    public SwitchMessagePair(IOFSwitch sw, OFMessage msg) {
+        this.sw = sw;
+        this.msg = msg;
+    }
+    
+    public IOFSwitch getSwitch() {
+        return this.sw;
+    }
+    
+    public OFMessage getMessage() {
+        return this.msg;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/util/AppCookie.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/util/AppCookie.java
new file mode 100644
index 0000000..210823e
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/util/AppCookie.java
@@ -0,0 +1,54 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.util;
+
+/***
+ * FIXME Need a system for registering/binding applications to a unique ID
+ * 
+ * @author capveg
+ *
+ */
+
+public class AppCookie {
+    static final int APP_ID_BITS = 12;
+    static final int APP_ID_SHIFT = (64 - APP_ID_BITS);
+    // we have bits 13-31 unused here ... that's ok!
+    static final int USER_BITS = 32;
+    static final int USER_SHIFT = 0;
+
+
+    /**
+     * Encapsulate an application ID and a user block of stuff into a cookie
+     * 
+     * @param application An ID to identify the application
+     * @param user Some application specific data
+     * @return a cookie for use in OFFlowMod.setCookie()
+     */
+    
+    static public long makeCookie(int application, int user) {
+        return ((application & ((1L << APP_ID_BITS) - 1)) << APP_ID_SHIFT) | user;
+    }
+    
+    static public int extractApp(long cookie) {
+        return (int)((cookie>> APP_ID_SHIFT) & ((1L << APP_ID_BITS) - 1));
+    }
+    
+    static public int extractUser(long cookie) {
+        return (int)((cookie>> USER_SHIFT) & ((1L << USER_BITS) - 1));
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/util/ListenerDispatcher.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/util/ListenerDispatcher.java
new file mode 100644
index 0000000..58b543c
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/util/ListenerDispatcher.java
@@ -0,0 +1,136 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.util;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import net.floodlightcontroller.core.IListener;
+import net.floodlightcontroller.core.annotations.LogMessageDoc;
+
+/**
+ * Maintain lists of listeners ordered by dependency.  
+ * 
+ * @author readams
+ *
+ */
+public class ListenerDispatcher<U, T extends IListener<U>> {
+    protected static Logger logger = LoggerFactory.getLogger(ListenerDispatcher.class);
+    List<T> listeners = null;
+    
+    private void visit(List<T> newlisteners, U type, HashSet<T> visited, 
+                       List<T> ordering, T listener) {
+        if (!visited.contains(listener)) {
+            visited.add(listener);
+            
+            for (T i : newlisteners) {
+                if (ispre(type, i, listener)) {
+                    visit(newlisteners, type, visited, ordering, i);
+                }
+            }
+            ordering.add(listener);
+        }
+    }
+    
+    private boolean ispre(U type, T l1, T l2) {
+        return (l2.isCallbackOrderingPrereq(type, l1.getName()) ||
+                l1.isCallbackOrderingPostreq(type, l2.getName()));
+    }
+    
+    /**
+     * Add a listener to the list of listeners
+     * @param listener
+     */
+    @LogMessageDoc(level="ERROR",
+                   message="No listener dependency solution: " +
+                           "No listeners without incoming dependencies",
+                   explanation="The set of listeners installed " +
+                   		"have dependencies with no solution",
+                   recommendation="Install a different set of listeners " +
+                   		"or install all dependencies.  This is a defect in " +
+                   		"the controller installation.")
+    public void addListener(U type, T listener) {
+        List<T> newlisteners = new ArrayList<T>();
+        if (listeners != null)
+            newlisteners.addAll(listeners);
+
+        newlisteners.add(listener);
+        // Find nodes without outgoing edges
+        List<T> terminals = new ArrayList<T>(); 
+        for (T i : newlisteners) {
+            boolean isterm = true;
+            for (T j : newlisteners) {
+                if (ispre(type, i, j)) {
+                    isterm = false;
+                    break;
+                }
+            }
+            if (isterm) {
+                terminals.add(i);
+            }
+        }
+        
+        if (terminals.size() == 0) {
+            logger.error("No listener dependency solution: " +
+            		     "No listeners without incoming dependencies");
+            listeners = newlisteners;
+            return;
+        }
+        
+        // visit depth-first traversing in the opposite order from
+        // the dependencies.  Note we will not generally detect cycles
+        HashSet<T> visited = new HashSet<T>();
+        List<T> ordering = new ArrayList<T>(); 
+        for (T term : terminals) {
+            visit(newlisteners, type, visited, ordering, term);
+        }
+        listeners = ordering;
+    }
+
+    /**
+     * Remove the given listener
+     * @param listener the listener to remove
+     */
+    public void removeListener(T listener) {
+        if (listeners != null) {
+            List<T> newlisteners = new ArrayList<T>();
+            newlisteners.addAll(listeners);
+            newlisteners.remove(listener);
+            listeners = newlisteners;
+        }
+    }
+    
+    /**
+     * Clear all listeners
+     */
+    public void clearListeners() {
+        listeners = new ArrayList<T>();
+    }
+    
+    /** 
+     * Get the ordered list of listeners ordered by dependencies 
+     * @return
+     */
+    public List<T> getOrderedListeners() {
+        return listeners;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/util/MutableInteger.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/util/MutableInteger.java
new file mode 100644
index 0000000..0f070fa
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/util/MutableInteger.java
@@ -0,0 +1,55 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.util;
+
+public class MutableInteger extends Number {
+    private static final long serialVersionUID = 1L;
+    int mutableInt;
+    
+    public MutableInteger(int value) {
+        this.mutableInt = value;
+    }
+    
+    public void setValue(int value) {
+        this.mutableInt = value;
+    }
+    
+    @Override
+    public double doubleValue() {
+        return (double) mutableInt;
+    }
+
+    @Override
+    public float floatValue() {
+        // TODO Auto-generated method stub
+        return (float) mutableInt;
+    }
+
+    @Override
+    public int intValue() {
+        // TODO Auto-generated method stub
+        return mutableInt;
+    }
+
+    @Override
+    public long longValue() {
+        // TODO Auto-generated method stub
+        return (long) mutableInt;
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/util/SingletonTask.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/util/SingletonTask.java
new file mode 100644
index 0000000..07729e5
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/util/SingletonTask.java
@@ -0,0 +1,162 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.util;
+
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import net.floodlightcontroller.core.annotations.LogMessageDoc;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This allows you to represent a task that should be queued for future execution
+ * but where you only want the task to complete once in response to some sequence 
+ * of events.  For example, if you get a change notification and want to reload state,
+ * you only want to reload the state once, at the end, and don't want to queue
+ * an update for every notification that might come in.
+ * 
+ * The semantics are as follows:
+ * * If the task hasn't begun yet, do not queue a new task
+ * * If the task has begun, set a bit to restart it after the current task finishes
+ */
+public class SingletonTask {
+    protected static Logger logger = LoggerFactory.getLogger(SingletonTask.class);
+            
+    protected static class SingletonTaskContext  {
+        protected boolean taskShouldRun = false;
+        protected boolean taskRunning = false;
+
+        protected SingletonTaskWorker waitingTask = null;
+    }
+
+    protected static class SingletonTaskWorker implements Runnable  {
+        SingletonTask parent;
+        boolean canceled = false;
+        long nextschedule = 0;
+
+        public SingletonTaskWorker(SingletonTask parent) {
+            super();
+            this.parent = parent;
+        }
+
+        @Override
+        @LogMessageDoc(level="ERROR",
+                       message="Exception while executing task",
+                       recommendation=LogMessageDoc.GENERIC_ACTION)
+        public void run() {
+            synchronized (parent.context) {
+                if (canceled || !parent.context.taskShouldRun)
+                    return;
+
+                parent.context.taskRunning = true;
+                parent.context.taskShouldRun = false;
+            }
+
+            try {
+                parent.task.run();
+            } catch (Exception e) {
+                logger.error("Exception while executing task", e);
+            }
+
+            synchronized (parent.context) {
+                parent.context.taskRunning = false;
+
+                if (parent.context.taskShouldRun) {
+                    long now = System.nanoTime();
+                    if ((nextschedule <= 0 || (nextschedule - now) <= 0)) {
+                        parent.ses.execute(this);
+                    } else {
+                        parent.ses.schedule(this, 
+                                            nextschedule-now, 
+                                            TimeUnit.NANOSECONDS);
+                    }
+                }
+            }
+        }
+    }
+
+    protected SingletonTaskContext context = new SingletonTaskContext();
+    protected Runnable task;
+    protected ScheduledExecutorService ses;
+
+
+    /**
+     * Construct a new SingletonTask for the given runnable.  The context
+     * is used to manage the state of the task execution and can be shared
+     * by more than one instance of the runnable.
+     * @param context
+     * @param Task
+     */
+    public SingletonTask(ScheduledExecutorService ses,
+            Runnable task) {
+        super();
+        this.task = task;
+        this.ses = ses;
+    }
+
+    /**
+     * Schedule the task to run if there's not already a task scheduled
+     * If there is such a task waiting that has not already started, it
+     * cancel that task and reschedule it to run at the given time.  If the
+     * task is already started, it will cause the task to be rescheduled once
+     * it completes to run after delay from the time of reschedule.
+     * 
+     * @param delay the delay in scheduling
+     * @param unit the timeunit of the delay
+     */
+    public void reschedule(long delay, TimeUnit unit) {
+        boolean needQueue = true;
+        SingletonTaskWorker stw = null;
+
+        synchronized (context) {
+            if (context.taskRunning || context.taskShouldRun) {
+                if (context.taskRunning) {
+                    // schedule to restart at the right time
+                    if (delay > 0) {
+                        long now = System.nanoTime();
+                        long then = 
+                            now + TimeUnit.NANOSECONDS.convert(delay, unit);
+                        context.waitingTask.nextschedule = then;
+                    } else {
+                        context.waitingTask.nextschedule = 0;
+                    }
+                    needQueue = false;
+                } else {
+                    // cancel and requeue
+                    context.waitingTask.canceled = true;
+                    context.waitingTask = null;
+                }
+            }
+
+            context.taskShouldRun = true;
+
+            if (needQueue) {
+                stw = context.waitingTask = new SingletonTaskWorker(this);                    
+            }
+        }
+
+        if (needQueue) {
+            if (delay <= 0)
+                ses.execute(stw);
+            else
+                ses.schedule(stw, delay, unit);
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/AllSwitchStatisticsResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/AllSwitchStatisticsResource.java
new file mode 100644
index 0000000..d012fc8
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/AllSwitchStatisticsResource.java
@@ -0,0 +1,174 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.web;
+
+import java.lang.Thread.State;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.core.types.MacVlanPair;
+
+import org.openflow.protocol.OFFeaturesReply;
+import org.openflow.protocol.statistics.OFStatistics;
+import org.openflow.protocol.statistics.OFStatisticsType;
+import org.openflow.util.HexString;
+import org.restlet.resource.Get;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Return switch statistics information for all switches
+ * @author readams
+ */
+public class AllSwitchStatisticsResource extends SwitchResourceBase {
+    protected static Logger log = 
+        LoggerFactory.getLogger(AllSwitchStatisticsResource.class);
+    
+    @Get("json")
+    public Map<String, Object> retrieve() {    
+        String statType = (String) getRequestAttributes().get("statType");
+        return retrieveInternal(statType);
+    }
+        
+    public Map<String, Object> retrieveInternal(String statType) {
+        HashMap<String, Object> model = new HashMap<String, Object>();
+
+        OFStatisticsType type = null;
+        REQUESTTYPE rType = null;
+        
+        if (statType.equals("port")) {
+            type = OFStatisticsType.PORT;
+            rType = REQUESTTYPE.OFSTATS;
+        } else if (statType.equals("queue")) {
+            type = OFStatisticsType.QUEUE;
+            rType = REQUESTTYPE.OFSTATS;
+        } else if (statType.equals("flow")) {
+            type = OFStatisticsType.FLOW;
+            rType = REQUESTTYPE.OFSTATS;
+        } else if (statType.equals("aggregate")) {
+            type = OFStatisticsType.AGGREGATE;
+            rType = REQUESTTYPE.OFSTATS;
+        } else if (statType.equals("desc")) {
+            type = OFStatisticsType.DESC;
+            rType = REQUESTTYPE.OFSTATS;
+        } else if (statType.equals("table")) {
+            type = OFStatisticsType.TABLE;
+            rType = REQUESTTYPE.OFSTATS;
+        } else if (statType.equals("features")) {
+            rType = REQUESTTYPE.OFFEATURES;
+        } else {
+            return model;
+        }
+        
+        IFloodlightProviderService floodlightProvider = 
+                (IFloodlightProviderService)getContext().getAttributes().
+                    get(IFloodlightProviderService.class.getCanonicalName());        
+        Long[] switchDpids = floodlightProvider.getSwitches().keySet().toArray(new Long[0]);
+        List<GetConcurrentStatsThread> activeThreads = new ArrayList<GetConcurrentStatsThread>(switchDpids.length);
+        List<GetConcurrentStatsThread> pendingRemovalThreads = new ArrayList<GetConcurrentStatsThread>();
+        GetConcurrentStatsThread t;
+        for (Long l : switchDpids) {
+            t = new GetConcurrentStatsThread(l, rType, type);
+            activeThreads.add(t);
+            t.start();
+        }
+        
+        // Join all the threads after the timeout. Set a hard timeout
+        // of 12 seconds for the threads to finish. If the thread has not
+        // finished the switch has not replied yet and therefore we won't 
+        // add the switch's stats to the reply.
+        for (int iSleepCycles = 0; iSleepCycles < 12; iSleepCycles++) {
+            for (GetConcurrentStatsThread curThread : activeThreads) {
+                if (curThread.getState() == State.TERMINATED) {
+                    if (rType == REQUESTTYPE.OFSTATS) {
+                        model.put(HexString.toHexString(curThread.getSwitchId()), curThread.getStatisticsReply());
+                    } else if (rType == REQUESTTYPE.OFFEATURES) {
+                        model.put(HexString.toHexString(curThread.getSwitchId()), curThread.getFeaturesReply());
+                    }
+                    pendingRemovalThreads.add(curThread);
+                }
+            }
+            
+            // remove the threads that have completed the queries to the switches
+            for (GetConcurrentStatsThread curThread : pendingRemovalThreads) {
+                activeThreads.remove(curThread);
+            }
+            // clear the list so we don't try to double remove them
+            pendingRemovalThreads.clear();
+            
+            // if we are done finish early so we don't always get the worst case
+            if (activeThreads.isEmpty()) {
+                break;
+            }
+            
+            // sleep for 1 s here
+            try {
+                Thread.sleep(1000);
+            } catch (InterruptedException e) {
+                log.error("Interrupted while waiting for statistics", e);
+            }
+        }
+        
+        return model;
+    }
+    
+    protected class GetConcurrentStatsThread extends Thread {
+        private List<OFStatistics> switchReply;
+        private long switchId;
+        private OFStatisticsType statType;
+        private REQUESTTYPE requestType;
+        private OFFeaturesReply featuresReply;
+        private Map<MacVlanPair, Short> switchTable;
+        
+        public GetConcurrentStatsThread(long switchId, REQUESTTYPE requestType, OFStatisticsType statType) {
+            this.switchId = switchId;
+            this.requestType = requestType;
+            this.statType = statType;
+            this.switchReply = null;
+            this.featuresReply = null;
+            this.switchTable = null;
+        }
+        
+        public List<OFStatistics> getStatisticsReply() {
+            return switchReply;
+        }
+        
+        public OFFeaturesReply getFeaturesReply() {
+            return featuresReply;
+        }
+        
+        public Map<MacVlanPair, Short> getSwitchTable() {
+            return switchTable;
+        }
+        
+        public long getSwitchId() {
+            return switchId;
+        }
+        
+        public void run() {
+            if ((requestType == REQUESTTYPE.OFSTATS) && (statType != null)) {
+                switchReply = getSwitchStatistics(switchId, statType);
+            } else if (requestType == REQUESTTYPE.OFFEATURES) {
+                featuresReply = getSwitchFeaturesReply(switchId);
+            }
+        }
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/ControllerMemoryResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/ControllerMemoryResource.java
new file mode 100644
index 0000000..bcb2bd1
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/ControllerMemoryResource.java
@@ -0,0 +1,39 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.web;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+
+/**
+ * Retrieve floodlight memory state
+ * @author readams
+ */
+public class ControllerMemoryResource extends ServerResource {
+    @Get("json")
+    public Map<String, Object> retrieve() {
+        HashMap<String, Object> model = new HashMap<String, Object>();
+        Runtime runtime = Runtime.getRuntime();
+        model.put("total", new Long(runtime.totalMemory()));
+        model.put("free", new Long(runtime.freeMemory()));
+        return model;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/ControllerRoleResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/ControllerRoleResource.java
new file mode 100644
index 0000000..652058e
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/ControllerRoleResource.java
@@ -0,0 +1,57 @@
+package net.floodlightcontroller.core.web;
+
+import org.restlet.data.Status;
+import org.restlet.resource.ServerResource;
+
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.core.IFloodlightProviderService.Role;
+import net.floodlightcontroller.core.annotations.LogMessageDoc;
+
+import org.restlet.resource.Get;
+import org.restlet.resource.Post;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ControllerRoleResource extends ServerResource {
+
+    protected static Logger log = LoggerFactory.getLogger(ControllerRoleResource.class);
+
+    @Get("json")
+    public RoleInfo getRole() {
+        IFloodlightProviderService floodlightProvider = 
+                (IFloodlightProviderService)getContext().getAttributes().
+                    get(IFloodlightProviderService.class.getCanonicalName());
+        return new RoleInfo(floodlightProvider.getRole());
+    }
+    
+    @Post("json")
+    @LogMessageDoc(level="WARN",
+                   message="Invalid role value specified in REST API to " +
+                      "set controller role",
+                   explanation="An HA role change request was malformed.",
+                   recommendation=LogMessageDoc.CHECK_CONTROLLER)
+    public void setRole(RoleInfo roleInfo) {
+        //Role role = Role.lookupRole(roleInfo.getRole());
+        Role role = null;
+        try {
+            role = Role.valueOf(roleInfo.getRole().toUpperCase());
+        }
+        catch (IllegalArgumentException e) {
+            // The role value in the REST call didn't match a valid
+            // role name, so just leave the role as null and handle
+            // the error below.
+        }
+        if (role == null) {
+            log.warn ("Invalid role value specified in REST API to " +
+            		  "set controller role");
+            setStatus(Status.CLIENT_ERROR_BAD_REQUEST, "Invalid role value");
+            return;
+        }
+        
+        IFloodlightProviderService floodlightProvider = 
+                (IFloodlightProviderService)getContext().getAttributes().
+                    get(IFloodlightProviderService.class.getCanonicalName());
+        
+        floodlightProvider.setRole(role);
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/ControllerSummaryResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/ControllerSummaryResource.java
new file mode 100644
index 0000000..20fbf85
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/ControllerSummaryResource.java
@@ -0,0 +1,40 @@
+/**
+*    Copyright 2012, Big Switch Networks, Inc. 
+*    Originally created by Shudong Zhou, Big Switch Networks
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.web;
+
+import java.util.Map;
+
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+
+import net.floodlightcontroller.core.IFloodlightProviderService;
+
+/**
+ * Get summary counters registered by all modules
+ * @author shudongz
+ */
+public class ControllerSummaryResource extends ServerResource {
+    @Get("json")
+    public Map<String, Object> retrieve() {
+        IFloodlightProviderService floodlightProvider = 
+            (IFloodlightProviderService)getContext().getAttributes().
+                get(IFloodlightProviderService.class.getCanonicalName());
+        return floodlightProvider.getControllerInfo("summary");
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/ControllerSwitchesResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/ControllerSwitchesResource.java
new file mode 100644
index 0000000..454f566
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/ControllerSwitchesResource.java
@@ -0,0 +1,81 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.web;
+
+import java.util.Collections;
+import java.util.Iterator;
+
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.util.FilterIterator;
+
+import org.openflow.util.HexString;
+import org.restlet.data.Form;
+import org.restlet.data.Status;
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+
+/**
+ * Get a list of switches connected to the controller
+ * @author readams
+ */
+public class ControllerSwitchesResource extends ServerResource {
+    public static final String DPID_ERROR = 
+            "Invalid Switch DPID: must be a 64-bit quantity, expressed in " + 
+            "hex as AA:BB:CC:DD:EE:FF:00:11";
+    
+    @Get("json")
+    public Iterator<IOFSwitch> retrieve() {
+        IFloodlightProviderService floodlightProvider = 
+                (IFloodlightProviderService)getContext().getAttributes().
+                    get(IFloodlightProviderService.class.getCanonicalName());
+
+        Long switchDPID = null;
+        
+        Form form = getQuery();
+        String dpid = form.getFirstValue("dpid", true);
+        if (dpid != null) {
+            try {
+                switchDPID = HexString.toLong(dpid);
+            } catch (Exception e) {
+                setStatus(Status.CLIENT_ERROR_BAD_REQUEST, DPID_ERROR);
+                return null;
+            }
+        }
+        if (switchDPID != null) {
+            IOFSwitch sw = 
+                    floodlightProvider.getSwitches().get(switchDPID);
+            if (sw != null)
+                return Collections.singleton(sw).iterator();
+            return Collections.<IOFSwitch>emptySet().iterator();
+        }
+        final String dpidStartsWith = 
+                form.getFirstValue("dpid__startswith", true);
+        Iterator<IOFSwitch> switer = 
+                floodlightProvider.getSwitches().values().iterator();
+        if (dpidStartsWith != null) {
+            return new FilterIterator<IOFSwitch>(switer) {
+                @Override
+                protected boolean matches(IOFSwitch value) {
+                    return value.getStringId().startsWith(dpidStartsWith);
+                }
+            };
+        } 
+        return switer;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/CoreWebRoutable.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/CoreWebRoutable.java
new file mode 100644
index 0000000..45ef6e9
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/CoreWebRoutable.java
@@ -0,0 +1,65 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.web;
+
+import net.floodlightcontroller.core.module.ModuleLoaderResource;
+import net.floodlightcontroller.restserver.RestletRoutable;
+
+import org.restlet.Context;
+import org.restlet.Restlet;
+import org.restlet.routing.Router;
+
+/**
+ * Creates a router to handle all the core web URIs
+ * @author readams
+ */
+public class CoreWebRoutable implements RestletRoutable {
+    @Override
+    public String basePath() {
+        return "/wm/core";
+    }
+
+    @Override
+    public Restlet getRestlet(Context context) {
+        Router router = new Router(context);
+        router.attach("/module/all/json", ModuleLoaderResource.class);
+        router.attach("/module/loaded/json", LoadedModuleLoaderResource.class);
+        router.attach("/switch/{switchId}/role/json", SwitchRoleResource.class);
+        router.attach("/switch/all/{statType}/json", AllSwitchStatisticsResource.class);
+        router.attach("/switch/{switchId}/{statType}/json", SwitchStatisticsResource.class);
+        router.attach("/controller/switches/json", ControllerSwitchesResource.class);
+        router.attach("/counter/{counterTitle}/json", CounterResource.class);
+        router.attach("/counter/{switchId}/{counterName}/json", SwitchCounterResource.class);
+        router.attach("/counter/categories/{switchId}/{counterName}/{layer}/json", SwitchCounterCategoriesResource.class);
+        router.attach("/memory/json", ControllerMemoryResource.class);
+        router.attach("/packettrace/json", PacketTraceResource.class);
+        // Get the last {count} events from the event histories
+        router.attach("/event-history/topology-switch/{count}/json",
+                EventHistoryTopologySwitchResource.class);
+        router.attach("/event-history/topology-link/{count}/json",
+                EventHistoryTopologyLinkResource.class);
+        router.attach("/event-history/topology-cluster/{count}/json",
+                EventHistoryTopologyClusterResource.class);
+        router.attach("/storage/tables/json", StorageSourceTablesResource.class);
+        router.attach("/controller/summary/json", ControllerSummaryResource.class);
+        router.attach("/role/json", ControllerRoleResource.class);
+        router.attach("/health/json", HealthCheckResource.class);
+        router.attach("/system/uptime/json", SystemUptimeResource.class);
+        return router;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/CounterResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/CounterResource.java
new file mode 100644
index 0000000..fb680d7
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/CounterResource.java
@@ -0,0 +1,70 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.web;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import net.floodlightcontroller.counter.CounterValue;
+import net.floodlightcontroller.counter.ICounter;
+
+import org.restlet.resource.Get;
+
+public class CounterResource extends CounterResourceBase {
+    @Get("json")
+    public Map<String, Object> retrieve() {
+        String counterTitle = 
+            (String) getRequestAttributes().get("counterTitle");
+        Map<String, Object> model = new HashMap<String,Object>();
+        CounterValue v;
+        if (counterTitle.equalsIgnoreCase("all")) {
+            Map<String, ICounter> counters = this.counterStore.getAll();
+            if (counters != null) {
+                Iterator<Map.Entry<String, ICounter>> it = 
+                    counters.entrySet().iterator();
+                while (it.hasNext()) {
+                    Entry<String, ICounter> entry = it.next();
+                    String counterName = entry.getKey();
+                    v = entry.getValue().getCounterValue();
+
+                    if (CounterValue.CounterType.LONG == v.getType()) {
+                        model.put(counterName, v.getLong());
+                    } else if (v.getType() == CounterValue.CounterType.DOUBLE) {
+                        model.put(counterName, v.getDouble());
+                    }   
+                }   
+            }   
+        } else {
+            ICounter counter = this.counterStore.getCounter(counterTitle);
+            if (counter != null) {
+                v = counter.getCounterValue();
+            } else {
+                v = new CounterValue(CounterValue.CounterType.LONG);
+            }   
+
+            if (CounterValue.CounterType.LONG == v.getType()) {
+                model.put(counterTitle, v.getLong());
+            } else if (v.getType() == CounterValue.CounterType.DOUBLE) {
+                model.put(counterTitle, v.getDouble());
+            }   
+        }
+        return model;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/CounterResourceBase.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/CounterResourceBase.java
new file mode 100644
index 0000000..70e90ed
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/CounterResourceBase.java
@@ -0,0 +1,35 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.web;
+
+import net.floodlightcontroller.counter.ICounterStoreService;
+
+import org.restlet.resource.ResourceException;
+import org.restlet.resource.ServerResource;
+
+public class CounterResourceBase extends ServerResource {
+    protected ICounterStoreService counterStore;
+    
+    @Override
+    protected void doInit() throws ResourceException {
+        super.doInit();
+        counterStore = 
+            (ICounterStoreService)getContext().getAttributes().
+                get(ICounterStoreService.class.getCanonicalName());
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/EventHistoryTopologyClusterResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/EventHistoryTopologyClusterResource.java
new file mode 100644
index 0000000..1be942c
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/EventHistoryTopologyClusterResource.java
@@ -0,0 +1,45 @@
+package net.floodlightcontroller.core.web;
+
+import net.floodlightcontroller.linkdiscovery.ILinkDiscoveryService;
+import net.floodlightcontroller.linkdiscovery.internal.EventHistoryTopologyCluster;
+import net.floodlightcontroller.linkdiscovery.internal.LinkDiscoveryManager;
+import net.floodlightcontroller.util.EventHistory;
+
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * @author subrata
+ *
+ */
+public class EventHistoryTopologyClusterResource extends ServerResource {
+    // TODO - Move this to the LinkDiscovery rest API
+    protected static Logger log = 
+            LoggerFactory.getLogger(EventHistoryTopologyClusterResource.class);
+
+    @Get("json")
+    public EventHistory<EventHistoryTopologyCluster> handleEvHistReq() {
+
+        // Get the event history count. Last <count> events would be returned
+        String evHistCount = (String)getRequestAttributes().get("count");
+        int    count = EventHistory.EV_HISTORY_DEFAULT_SIZE;
+        try {
+            count = Integer.parseInt(evHistCount);
+        }
+        catch(NumberFormatException nFE) {
+            // Invalid input for event count - use default value
+        }
+
+        LinkDiscoveryManager topoManager =
+                (LinkDiscoveryManager)getContext().getAttributes().
+                get(ILinkDiscoveryService.class.getCanonicalName());
+        if (topoManager != null) {
+            return new EventHistory<EventHistoryTopologyCluster>(
+                    topoManager.evHistTopologyCluster, count);
+        }
+        
+        return null;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/EventHistoryTopologyLinkResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/EventHistoryTopologyLinkResource.java
new file mode 100644
index 0000000..4a21070
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/EventHistoryTopologyLinkResource.java
@@ -0,0 +1,45 @@
+package net.floodlightcontroller.core.web;
+
+import net.floodlightcontroller.linkdiscovery.ILinkDiscoveryService;
+import net.floodlightcontroller.linkdiscovery.internal.EventHistoryTopologyLink;
+import net.floodlightcontroller.linkdiscovery.internal.LinkDiscoveryManager;
+import net.floodlightcontroller.util.EventHistory;
+
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * @author subrata
+ *
+ */
+public class EventHistoryTopologyLinkResource extends ServerResource {
+    // TODO - Move this to the DeviceManager Rest API
+    protected static Logger log = 
+            LoggerFactory.getLogger(EventHistoryTopologyLinkResource.class);
+
+    @Get("json")
+    public EventHistory<EventHistoryTopologyLink> handleEvHistReq() {
+
+        // Get the event history count. Last <count> events would be returned
+        String evHistCount = (String)getRequestAttributes().get("count");
+        int    count = EventHistory.EV_HISTORY_DEFAULT_SIZE;
+        try {
+            count = Integer.parseInt(evHistCount);
+        }
+        catch(NumberFormatException nFE) {
+            // Invalid input for event count - use default value
+        }
+
+        LinkDiscoveryManager linkDiscoveryManager =
+                (LinkDiscoveryManager)getContext().getAttributes().
+                get(ILinkDiscoveryService.class.getCanonicalName());
+        if (linkDiscoveryManager != null) {
+            return new EventHistory<EventHistoryTopologyLink>(
+                    linkDiscoveryManager.evHistTopologyLink, count);
+        }
+        
+        return null;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/EventHistoryTopologySwitchResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/EventHistoryTopologySwitchResource.java
new file mode 100644
index 0000000..1c95e2c
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/EventHistoryTopologySwitchResource.java
@@ -0,0 +1,37 @@
+package net.floodlightcontroller.core.web;
+
+import net.floodlightcontroller.linkdiscovery.ILinkDiscoveryService;
+import net.floodlightcontroller.linkdiscovery.internal.EventHistoryTopologySwitch;
+import net.floodlightcontroller.linkdiscovery.internal.LinkDiscoveryManager;
+import net.floodlightcontroller.util.EventHistory;
+
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+
+/**
+ * @author subrata
+ *
+ */
+public class EventHistoryTopologySwitchResource extends ServerResource {
+
+    @Get("json")
+    public EventHistory<EventHistoryTopologySwitch> handleEvHistReq() {
+
+        // Get the event history count. Last <count> events would be returned
+        String evHistCount = (String)getRequestAttributes().get("count");
+        int    count = EventHistory.EV_HISTORY_DEFAULT_SIZE;
+        try {
+            count = Integer.parseInt(evHistCount);
+        }
+        catch(NumberFormatException nFE) {
+            // Invalid input for event count - use default value
+        }
+
+        LinkDiscoveryManager topoManager =
+           (LinkDiscoveryManager)getContext().getAttributes().
+               get(ILinkDiscoveryService.class.getCanonicalName());
+
+        return new EventHistory<EventHistoryTopologySwitch>(
+                                topoManager.evHistTopologySwitch, count);
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/HealthCheckResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/HealthCheckResource.java
new file mode 100644
index 0000000..12ee545
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/HealthCheckResource.java
@@ -0,0 +1,36 @@
+package net.floodlightcontroller.core.web;
+
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+
+public class HealthCheckResource extends ServerResource {
+    
+    public static class HealthCheckInfo {
+        
+        protected boolean healthy;
+        
+        public HealthCheckInfo() {
+            this.healthy = true;
+        }
+        
+        public boolean isHealthy() {
+            return healthy;
+        }
+        
+        public void setHealthy(boolean healthy) {
+            this.healthy = healthy;
+        }
+    }
+    
+    @Get("json")
+    public HealthCheckInfo healthCheck() {
+        // Currently this is the simplest possible health check -- basically
+        // just that the controller is still running and able to respond to
+        // REST calls.
+        // Eventually this should be more sophisticated and do things
+        // like monitoring internal data structures of the controller
+        // (e.g. async storage queue length).
+        return new HealthCheckInfo();
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/LoadedModuleLoaderResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/LoadedModuleLoaderResource.java
new file mode 100644
index 0000000..38367c3
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/LoadedModuleLoaderResource.java
@@ -0,0 +1,19 @@
+package net.floodlightcontroller.core.web;
+
+import java.util.Map;
+
+import org.restlet.resource.Get;
+
+import net.floodlightcontroller.core.module.ModuleLoaderResource;
+
+public class LoadedModuleLoaderResource extends ModuleLoaderResource {
+	/**
+	 * Retrieves information about all modules available
+	 * to Floodlight.
+	 * @return Information about all modules available.
+	 */
+    @Get("json")
+    public Map<String, Object> retrieve() {
+    	return retrieveInternal(true);
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/PacketTraceResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/PacketTraceResource.java
new file mode 100644
index 0000000..85da942
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/PacketTraceResource.java
@@ -0,0 +1,118 @@
+package net.floodlightcontroller.core.web;
+
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.restlet.data.Status;
+import org.restlet.resource.Post;
+import org.restlet.resource.ServerResource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import net.floodlightcontroller.core.OFMessageFilterManager;
+
+public class PacketTraceResource extends ServerResource {
+    protected static Logger log = LoggerFactory.getLogger(PacketTraceResource.class);
+    
+    public static class FilterParameters {
+
+        protected String sessionId = null;
+        protected String mac = null;
+        protected Integer period = null;
+        protected String direction = null;
+        protected String output = null;
+        
+        public String getSessionId() {
+            return sessionId;
+        }
+        public void setSessionId(String sessionId) {
+            this.sessionId = sessionId;
+        }
+        public String getMac() {
+            return mac;
+        }
+        public void setMac(String mac) {
+            this.mac = mac;
+        }
+        public Integer getPeriod() {
+            return period;
+        }
+        public void setPeriod(Integer period) {
+            this.period = period;
+        }
+        public String getDirection() {
+            return direction;
+        }
+        public void setDirection(String direction) {
+            this.direction = direction;
+        }
+        public String getOutput() {
+            return output;
+        }
+        public void setOutput(String output) {
+            this.output = output;
+        }
+
+        public String toString() {
+            return "SessionID: " + sessionId +
+                   "\tmac" + mac +
+                   "\tperiod" + period +
+                   "\tdirection" + direction +
+                   "\toutput" + output;
+        }
+    }
+    
+    public static class PacketTraceOutput {
+        protected String sessionId = null;
+
+        public String getSessionId() {
+            return sessionId;
+        }
+
+        public void setSessionId(String sessionId) {
+            this.sessionId = sessionId;
+        }
+    }
+    
+    @Post("json")
+    public PacketTraceOutput packettrace(FilterParameters fp) {
+        
+        ConcurrentHashMap <String,String> filter = new ConcurrentHashMap<String,String> ();
+        String sid = null;
+        PacketTraceOutput output = new PacketTraceOutput();
+        OFMessageFilterManager manager = 
+                (OFMessageFilterManager)getContext()
+                    .getAttributes().
+                        get(OFMessageFilterManager.class.getCanonicalName());
+
+        if (manager == null) {
+            sid = null;
+            setStatus(Status.SERVER_ERROR_SERVICE_UNAVAILABLE);
+        }
+        
+        if (fp.getSessionId() != null) {
+            filter.put("sessionId", fp.getSessionId());
+        }
+        if (fp.getMac() != null) {
+            filter.put("mac", fp.getMac());
+        }
+        if (fp.getDirection() != null) {
+            filter.put("direction", fp.getDirection());
+        }
+        
+        if (filter.isEmpty()) {
+            setStatus(Status.CLIENT_ERROR_BAD_REQUEST);
+        } else {
+            if (log.isDebugEnabled()) {
+                log.debug ("Call setupFilter: sid:{} filter:{}, period:{}", 
+                           new Object[] {fp.getSessionId(), filter, 
+                                         fp.getPeriod()*1000});
+            }
+            sid = manager.setupFilter(fp.getSessionId(), filter, 
+                                      fp.getPeriod()*1000);
+            output.setSessionId(sid);
+            setStatus(Status.SUCCESS_OK);
+        }
+        
+        return output;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/RoleInfo.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/RoleInfo.java
new file mode 100644
index 0000000..e600ea0
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/RoleInfo.java
@@ -0,0 +1,26 @@
+package net.floodlightcontroller.core.web;
+
+import net.floodlightcontroller.core.IFloodlightProviderService.Role;
+
+public class RoleInfo {
+    protected String role;
+    
+    public RoleInfo() {
+    }
+    
+    public RoleInfo(String role) {
+        setRole(role);
+    }
+    
+    public RoleInfo(Role role) {
+        this.role = (role != null) ? role.name() : "DISABLED";
+    }
+    
+    public String getRole() {
+        return role;
+    }
+    
+    public void setRole(String role) {
+        this.role = role;
+    }
+}
\ No newline at end of file
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/StorageSourceTablesResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/StorageSourceTablesResource.java
new file mode 100644
index 0000000..51f514f
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/StorageSourceTablesResource.java
@@ -0,0 +1,18 @@
+package net.floodlightcontroller.core.web;
+
+import java.util.Set;
+
+import net.floodlightcontroller.storage.IStorageSourceService;
+
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+
+public class StorageSourceTablesResource extends ServerResource {
+    @Get("json")
+    public Set<String> retrieve() {
+        IStorageSourceService storageSource = (IStorageSourceService)getContext().
+                getAttributes().get(IStorageSourceService.class.getCanonicalName());
+        Set<String> allTableNames = storageSource.getAllTableNames();
+        return allTableNames;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/SwitchCounterCategoriesResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/SwitchCounterCategoriesResource.java
new file mode 100644
index 0000000..f14d706
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/SwitchCounterCategoriesResource.java
@@ -0,0 +1,87 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.web;
+
+import java.io.UnsupportedEncodingException;
+import java.net.URLDecoder;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.openflow.util.HexString;
+import org.restlet.resource.Get;
+
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.counter.CounterStore.NetworkLayer;
+import net.floodlightcontroller.counter.ICounterStoreService;
+
+/**
+ * Get the counter categories for a particular switch
+ * @author readams
+ */
+public class SwitchCounterCategoriesResource extends CounterResourceBase {
+    @Get("json")
+    public Map<String, Object> retrieve() {
+        IFloodlightProviderService floodlightProvider = 
+                (IFloodlightProviderService)getContext().getAttributes().
+                    get(IFloodlightProviderService.class.getCanonicalName());
+        HashMap<String,Object> model = new HashMap<String,Object>();
+        
+        String switchID = (String) getRequestAttributes().get("switchId");
+        String counterName = (String) getRequestAttributes().get("counterName");
+        String layer = (String) getRequestAttributes().get("layer");
+
+        Long[] switchDpids;
+        if (switchID.equalsIgnoreCase("all")) {
+            switchDpids = floodlightProvider.getSwitches().keySet().toArray(new Long[0]);
+            for (Long dpid : switchDpids) {
+                switchID = HexString.toHexString(dpid);
+
+                getOneSwitchCounterCategoriesJson(model, switchID, counterName, layer);
+            }
+        } else {
+            getOneSwitchCounterCategoriesJson(model, switchID, counterName, layer);
+        }
+        
+        return model;
+    }
+    
+    protected void getOneSwitchCounterCategoriesJson(Map<String, Object> model,
+                                                     String switchID,
+                                                     String counterName, 
+                                                     String layer) {
+        String fullCounterName = "";      
+        NetworkLayer nl = NetworkLayer.L3;
+        
+        try {
+            counterName = URLDecoder.decode(counterName, "UTF-8");
+            layer = URLDecoder.decode(layer, "UTF-8");
+            fullCounterName = switchID + ICounterStoreService.TitleDelimitor + counterName;
+        } catch (UnsupportedEncodingException e) {
+            //Just leave counterTitle undecoded if there is an issue - fail silently
+        }
+
+        if (layer.compareToIgnoreCase("4") == 0) {
+            nl = NetworkLayer.L4;
+        }
+        List<String> categories = this.counterStore.getAllCategories(fullCounterName, nl);
+        if (categories != null) {
+            model.put(fullCounterName + "." + layer, categories);
+        }
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/SwitchCounterResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/SwitchCounterResource.java
new file mode 100644
index 0000000..188836d
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/SwitchCounterResource.java
@@ -0,0 +1,83 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.web;
+
+import java.io.UnsupportedEncodingException;
+import java.net.URLDecoder;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.openflow.util.HexString;
+import org.restlet.resource.Get;
+
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.counter.ICounter;
+import net.floodlightcontroller.counter.ICounterStoreService;
+
+/**
+ * Get counters for a particular switch 
+ * @author readams
+ */
+public class SwitchCounterResource extends CounterResourceBase {
+    @Get("json")
+    public Map<String, Object> retrieve() {
+        IFloodlightProviderService floodlightProvider = 
+                (IFloodlightProviderService)getContext().getAttributes().
+                    get(IFloodlightProviderService.class.getCanonicalName());
+        HashMap<String,Object> model = new HashMap<String,Object>();
+        
+        String switchID = (String) getRequestAttributes().get("switchId");
+        String counterName = (String) getRequestAttributes().get("counterName");
+
+        Long[] switchDpids;
+        if (switchID.equalsIgnoreCase("all")) {
+            switchDpids = floodlightProvider.getSwitches().keySet().toArray(new Long[0]);
+            getOneSwitchCounterJson(model, ICounterStoreService.CONTROLLER_NAME, counterName);
+            for (Long dpid : switchDpids) {
+                switchID = HexString.toHexString(dpid);
+
+                getOneSwitchCounterJson(model, switchID, counterName);
+            }
+        } else {
+            getOneSwitchCounterJson(model, switchID, counterName);
+        }
+        return model;
+    }
+    
+    protected void getOneSwitchCounterJson(Map<String, Object> model, 
+                                           String switchID, String counterName) {
+        String fullCounterName = "";      
+        
+        try {
+            counterName = URLDecoder.decode(counterName, "UTF-8");
+            fullCounterName = 
+                switchID + ICounterStoreService.TitleDelimitor + counterName;
+        } catch (UnsupportedEncodingException e) {
+            //Just leave counterTitle undecoded if there is an issue - fail silently
+        }
+
+        ICounter counter = this.counterStore.getCounter(fullCounterName);
+        Map<String, Long> sample = new HashMap<String, Long> ();
+        if (counter != null) {
+            sample.put(counter.getCounterDate().toString(), 
+                       counter.getCounterValue().getLong());
+            model.put(switchID, sample);
+        }
+    }
+    
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/SwitchResourceBase.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/SwitchResourceBase.java
new file mode 100644
index 0000000..d810024
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/SwitchResourceBase.java
@@ -0,0 +1,157 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.web;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.annotations.LogMessageDoc;
+
+import org.openflow.protocol.OFFeaturesReply;
+import org.openflow.protocol.OFMatch;
+import org.openflow.protocol.OFPort;
+import org.openflow.protocol.OFStatisticsRequest;
+import org.openflow.protocol.statistics.OFAggregateStatisticsRequest;
+import org.openflow.protocol.statistics.OFFlowStatisticsRequest;
+import org.openflow.protocol.statistics.OFPortStatisticsRequest;
+import org.openflow.protocol.statistics.OFQueueStatisticsRequest;
+import org.openflow.protocol.statistics.OFStatistics;
+import org.openflow.protocol.statistics.OFStatisticsType;
+import org.openflow.util.HexString;
+import org.restlet.resource.ResourceException;
+import org.restlet.resource.ServerResource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Base class for server resources related to switches
+ * @author readams
+ *
+ */
+public class SwitchResourceBase extends ServerResource {
+    protected static Logger log = LoggerFactory.getLogger(SwitchResourceBase.class);
+    
+    public enum REQUESTTYPE {
+        OFSTATS,
+        OFFEATURES
+    }
+    
+    @Override
+    protected void doInit() throws ResourceException {
+        super.doInit();
+        
+    }
+    
+    @LogMessageDoc(level="ERROR",
+                   message="Failure retrieving statistics from switch {switch}",
+                   explanation="An error occurred while retrieving statistics" +
+                   		"from the switch",
+                   recommendation=LogMessageDoc.CHECK_SWITCH + " " +
+                   		LogMessageDoc.GENERIC_ACTION)
+    protected List<OFStatistics> getSwitchStatistics(long switchId, 
+                                                     OFStatisticsType statType) {
+        IFloodlightProviderService floodlightProvider = 
+                (IFloodlightProviderService)getContext().getAttributes().
+                    get(IFloodlightProviderService.class.getCanonicalName());
+        
+        IOFSwitch sw = floodlightProvider.getSwitches().get(switchId);
+        Future<List<OFStatistics>> future;
+        List<OFStatistics> values = null;
+        if (sw != null) {
+            OFStatisticsRequest req = new OFStatisticsRequest();
+            req.setStatisticType(statType);
+            int requestLength = req.getLengthU();
+            if (statType == OFStatisticsType.FLOW) {
+                OFFlowStatisticsRequest specificReq = new OFFlowStatisticsRequest();
+                OFMatch match = new OFMatch();
+                match.setWildcards(0xffffffff);
+                specificReq.setMatch(match);
+                specificReq.setOutPort(OFPort.OFPP_NONE.getValue());
+                specificReq.setTableId((byte) 0xff);
+                req.setStatistics(Collections.singletonList((OFStatistics)specificReq));
+                requestLength += specificReq.getLength();
+            } else if (statType == OFStatisticsType.AGGREGATE) {
+                OFAggregateStatisticsRequest specificReq = new OFAggregateStatisticsRequest();
+                OFMatch match = new OFMatch();
+                match.setWildcards(0xffffffff);
+                specificReq.setMatch(match);
+                specificReq.setOutPort(OFPort.OFPP_NONE.getValue());
+                specificReq.setTableId((byte) 0xff);
+                req.setStatistics(Collections.singletonList((OFStatistics)specificReq));
+                requestLength += specificReq.getLength();
+            } else if (statType == OFStatisticsType.PORT) {
+                OFPortStatisticsRequest specificReq = new OFPortStatisticsRequest();
+                specificReq.setPortNumber((short)OFPort.OFPP_NONE.getValue());
+                req.setStatistics(Collections.singletonList((OFStatistics)specificReq));
+                requestLength += specificReq.getLength();
+            } else if (statType == OFStatisticsType.QUEUE) {
+                OFQueueStatisticsRequest specificReq = new OFQueueStatisticsRequest();
+                specificReq.setPortNumber((short)OFPort.OFPP_ALL.getValue());
+                // LOOK! openflowj does not define OFPQ_ALL! pulled this from openflow.h
+                // note that I haven't seen this work yet though...
+                specificReq.setQueueId(0xffffffff);
+                req.setStatistics(Collections.singletonList((OFStatistics)specificReq));
+                requestLength += specificReq.getLength();
+            } else if (statType == OFStatisticsType.DESC ||
+                       statType == OFStatisticsType.TABLE) {
+                // pass - nothing todo besides set the type above
+            }
+            req.setLengthU(requestLength);
+            try {
+                future = sw.getStatistics(req);
+                values = future.get(10, TimeUnit.SECONDS);
+            } catch (Exception e) {
+                log.error("Failure retrieving statistics from switch " + sw, e);
+            }
+        }
+        return values;
+    }
+
+    protected List<OFStatistics> getSwitchStatistics(String switchId, OFStatisticsType statType) {
+        return getSwitchStatistics(HexString.toLong(switchId), statType);
+    }
+    
+    protected OFFeaturesReply getSwitchFeaturesReply(long switchId) {
+        IFloodlightProviderService floodlightProvider = 
+                (IFloodlightProviderService)getContext().getAttributes().
+                get(IFloodlightProviderService.class.getCanonicalName());
+
+        IOFSwitch sw = floodlightProvider.getSwitches().get(switchId);
+        Future<OFFeaturesReply> future;
+        OFFeaturesReply featuresReply = null;
+        if (sw != null) {
+            try {
+                future = sw.getFeaturesReplyFromSwitch();
+                featuresReply = future.get(10, TimeUnit.SECONDS);
+            } catch (Exception e) {
+                log.error("Failure getting features reply from switch" + sw, e);
+            }
+        }
+
+        return featuresReply;
+    }
+
+    protected OFFeaturesReply getSwitchFeaturesReply(String switchId) {
+        return getSwitchFeaturesReply(HexString.toLong(switchId));
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/SwitchRoleResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/SwitchRoleResource.java
new file mode 100644
index 0000000..0d73f93
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/SwitchRoleResource.java
@@ -0,0 +1,46 @@
+package net.floodlightcontroller.core.web;
+
+import java.util.HashMap;
+
+import org.openflow.util.HexString;
+import org.restlet.resource.ServerResource;
+
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.core.IOFSwitch;
+
+import org.restlet.resource.Get;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class SwitchRoleResource extends ServerResource {
+
+    protected static Logger log = LoggerFactory.getLogger(SwitchRoleResource.class);
+
+    @Get("json")
+    public Object getRole() {
+        IFloodlightProviderService floodlightProvider = 
+                (IFloodlightProviderService)getContext().getAttributes().
+                    get(IFloodlightProviderService.class.getCanonicalName());
+
+        String switchId = (String) getRequestAttributes().get("switchId");
+        
+        RoleInfo roleInfo;
+        
+        if (switchId.equalsIgnoreCase("all")) {
+            HashMap<String,RoleInfo> model = new HashMap<String,RoleInfo>();
+            for (IOFSwitch sw: floodlightProvider.getSwitches().values()) {
+            	switchId = sw.getStringId();
+            	roleInfo = new RoleInfo(sw.getRole());
+            	model.put(switchId, roleInfo);
+            }
+            return model;
+        }
+        
+    	Long dpid = HexString.toLong(switchId);
+    	IOFSwitch sw = floodlightProvider.getSwitches().get(dpid);
+    	if (sw == null)
+    		return null;
+    	roleInfo = new RoleInfo(sw.getRole());
+    	return roleInfo;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/SwitchStatisticsResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/SwitchStatisticsResource.java
new file mode 100644
index 0000000..57771f7
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/SwitchStatisticsResource.java
@@ -0,0 +1,63 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.web;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.openflow.protocol.statistics.OFStatisticsType;
+import org.restlet.resource.Get;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Return switch statistics information for specific switches
+ * @author readams
+ */
+public class SwitchStatisticsResource extends SwitchResourceBase {
+    protected static Logger log = 
+        LoggerFactory.getLogger(SwitchStatisticsResource.class);
+
+    @Get("json")
+    public Map<String, Object> retrieve() {
+        HashMap<String,Object> result = new HashMap<String,Object>();
+        Object values = null;
+        
+        String switchId = (String) getRequestAttributes().get("switchId");
+        String statType = (String) getRequestAttributes().get("statType");
+        
+        if (statType.equals("port")) {
+            values = getSwitchStatistics(switchId, OFStatisticsType.PORT);
+        } else if (statType.equals("queue")) {
+            values = getSwitchStatistics(switchId, OFStatisticsType.QUEUE);
+        } else if (statType.equals("flow")) {
+            values = getSwitchStatistics(switchId, OFStatisticsType.FLOW);
+        } else if (statType.equals("aggregate")) {
+            values = getSwitchStatistics(switchId, OFStatisticsType.AGGREGATE);
+        } else if (statType.equals("desc")) {
+            values = getSwitchStatistics(switchId, OFStatisticsType.DESC);
+        } else if (statType.equals("table")) {
+            values = getSwitchStatistics(switchId, OFStatisticsType.TABLE);
+        } else if (statType.equals("features")) {
+            values = getSwitchFeaturesReply(switchId);
+        }
+
+        result.put(switchId, values);
+        return result;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/SystemUptimeResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/SystemUptimeResource.java
new file mode 100644
index 0000000..fe4b967
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/SystemUptimeResource.java
@@ -0,0 +1,31 @@
+package net.floodlightcontroller.core.web;
+
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+
+
+
+public class SystemUptimeResource extends ServerResource {
+	
+	public class UptimeRest {
+		long systemUptimeMsec;
+
+		public long getSystemUptimeMsec() {
+			return systemUptimeMsec;
+		}
+	}
+	
+	@Get("json")
+	public UptimeRest retrieve() {
+		IFloodlightProviderService floodlightProvider = 
+			(IFloodlightProviderService)getContext().getAttributes().
+			get(IFloodlightProviderService.class.getCanonicalName());
+		
+		UptimeRest uptime = new UptimeRest();
+		uptime.systemUptimeMsec = 
+		   System.currentTimeMillis() - floodlightProvider.getSystemStartTime();
+		
+		return (uptime);
+	}
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/serializers/ByteArrayMACSerializer.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/serializers/ByteArrayMACSerializer.java
new file mode 100644
index 0000000..66c33f5
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/serializers/ByteArrayMACSerializer.java
@@ -0,0 +1,40 @@
+/**
+*    Copyright 2011,2012 Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.web.serializers;
+
+import java.io.IOException;
+
+import org.codehaus.jackson.JsonGenerator;
+import org.codehaus.jackson.JsonProcessingException;
+import org.codehaus.jackson.map.JsonSerializer;
+import org.codehaus.jackson.map.SerializerProvider;
+import org.openflow.util.HexString;
+
+/**
+ * Serialize a MAC as colon-separated hexadecimal
+ */
+public class ByteArrayMACSerializer extends JsonSerializer<byte[]> {
+
+    @Override
+    public void serialize(byte[] mac, JsonGenerator jGen,
+                          SerializerProvider serializer)
+                                  throws IOException, JsonProcessingException {
+        jGen.writeString(HexString.toHexString(mac));
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/serializers/DPIDSerializer.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/serializers/DPIDSerializer.java
new file mode 100644
index 0000000..e74cc01
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/serializers/DPIDSerializer.java
@@ -0,0 +1,40 @@
+/**
+*    Copyright 2011,2012 Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.web.serializers;
+
+import java.io.IOException;
+
+import org.codehaus.jackson.JsonGenerator;
+import org.codehaus.jackson.JsonProcessingException;
+import org.codehaus.jackson.map.JsonSerializer;
+import org.codehaus.jackson.map.SerializerProvider;
+import org.openflow.util.HexString;
+
+/**
+ * Serialize a DPID as colon-separated hexadecimal
+ */
+public class DPIDSerializer extends JsonSerializer<Long> {
+
+    @Override
+    public void serialize(Long dpid, JsonGenerator jGen,
+                          SerializerProvider serializer)
+                                  throws IOException, JsonProcessingException {
+        jGen.writeString(HexString.toHexString(dpid, 8));
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/serializers/IPv4Serializer.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/serializers/IPv4Serializer.java
new file mode 100644
index 0000000..f4a5877
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/serializers/IPv4Serializer.java
@@ -0,0 +1,41 @@
+/**
+*    Copyright 2011,2012 Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.web.serializers;
+
+import java.io.IOException;
+
+import net.floodlightcontroller.packet.IPv4;
+
+import org.codehaus.jackson.JsonGenerator;
+import org.codehaus.jackson.JsonProcessingException;
+import org.codehaus.jackson.map.JsonSerializer;
+import org.codehaus.jackson.map.SerializerProvider;
+
+/**
+ * Serialize an integer as an IPv4 Address in dotted decimal format
+ */
+public class IPv4Serializer extends JsonSerializer<Integer> {
+
+    @Override
+    public void serialize(Integer i, JsonGenerator jGen,
+                          SerializerProvider serializer)
+                                  throws IOException, JsonProcessingException {
+        jGen.writeString(IPv4.fromIPv4Address(i));
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/serializers/MACSerializer.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/serializers/MACSerializer.java
new file mode 100644
index 0000000..a7c9fb7
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/serializers/MACSerializer.java
@@ -0,0 +1,40 @@
+/**
+*    Copyright 2011,2012 Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.web.serializers;
+
+import java.io.IOException;
+
+import org.codehaus.jackson.JsonGenerator;
+import org.codehaus.jackson.JsonProcessingException;
+import org.codehaus.jackson.map.JsonSerializer;
+import org.codehaus.jackson.map.SerializerProvider;
+import org.openflow.util.HexString;
+
+/**
+ * Serialize a MAC as colon-separated hexadecimal
+ */
+public class MACSerializer extends JsonSerializer<Long> {
+
+    @Override
+    public void serialize(Long dpid, JsonGenerator jGen,
+                          SerializerProvider serializer)
+                                  throws IOException, JsonProcessingException {
+        jGen.writeString(HexString.toHexString(dpid, 6));
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/serializers/UShortSerializer.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/serializers/UShortSerializer.java
new file mode 100644
index 0000000..c125c76
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/core/web/serializers/UShortSerializer.java
@@ -0,0 +1,40 @@
+/**
+*    Copyright 2011,2012 Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.core.web.serializers;
+
+import java.io.IOException;
+
+import org.codehaus.jackson.JsonGenerator;
+import org.codehaus.jackson.JsonProcessingException;
+import org.codehaus.jackson.map.JsonSerializer;
+import org.codehaus.jackson.map.SerializerProvider;
+
+/**
+ * Serialize a short value as an unsigned short
+ */
+public class UShortSerializer extends JsonSerializer<Short> {
+
+    @Override
+    public void serialize(Short s, JsonGenerator jGen,
+                          SerializerProvider serializer) throws IOException,
+                                                  JsonProcessingException {
+        if (s == null) jGen.writeNull();
+        else jGen.writeNumber(s.shortValue() & 0xffff);
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/ConcurrentCounter.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/ConcurrentCounter.java
new file mode 100644
index 0000000..cdec1e0
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/ConcurrentCounter.java
@@ -0,0 +1,205 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+/**
+ * 
+ */
+package net.floodlightcontroller.counter;
+
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Queue;
+import java.util.Set;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import net.floodlightcontroller.counter.CounterValue.CounterType;
+
+
+/**
+ * This module needs to be updated with CounterValue.
+ * 
+ * This is a crumby attempt at a highly concurrent implementation of the Counter interface.
+ * 
+ * (Help! Help!  Someone please re-write me!  This will almost certainly break at high loads.)
+ * 
+ * The gist is that this class, ConcurrentCounter, keeps an internal highly transient buffer that is occasionally flushed
+ * in to a set of CountBuffers (circular buffers) which store a longer term historical view of the count values at different
+ * moments in time.
+ * 
+ * This Counter implementation may be a bit over-engineered...  The goal here was to present an implementation that is very
+ * predictable with respect to memory and CPU time and, at the same time, present a very fast increment() method.  The reasoning
+ * here is that this will be a go-to class when it comes to debugging, particularly in high-load situations where logging
+ * may introduce so much variability to the system that it foils the results.
+ * 
+ * @author kyle
+ *
+ */
+public class ConcurrentCounter implements ICounter {
+
+  protected static final Map<DateSpan, Integer> MAX_HISTORY = new HashMap<DateSpan, Integer>();
+  static {
+    MAX_HISTORY.put(DateSpan.REALTIME, new Integer(1));
+    MAX_HISTORY.put(DateSpan.SECONDS, new Integer(120));
+    MAX_HISTORY.put(DateSpan.MINUTES, new Integer(60));
+    MAX_HISTORY.put(DateSpan.HOURS, new Integer(48));
+    MAX_HISTORY.put(DateSpan.DAYS, new Integer(60));
+    MAX_HISTORY.put(DateSpan.WEEKS, new Integer(2)); 
+  }
+  
+  protected static Set<ConcurrentCounter> liveCounters;
+  
+  static {
+    liveCounters = Collections.newSetFromMap(new ConcurrentHashMap<ConcurrentCounter, Boolean>()); //nifty way to get concurrent hash set
+    //Set a background thread to flush any liveCounters every 100 milliseconds
+    Executors.newScheduledThreadPool(1).scheduleAtFixedRate(new Runnable() {
+        public void run() {
+            for(ConcurrentCounter c : liveCounters) {
+                c.flush();
+            }
+        }}, 100, 100, TimeUnit.MILLISECONDS);
+  }
+
+  /**
+   * Very simple data structure to store off a single count entry at a single point in time
+   * @author kyle
+   *
+   */
+  protected static final class CountAtom {
+    protected Date date;
+    protected Long delta;
+    
+    protected CountAtom(Date date, Long delta) {
+      this.date = date;
+      this.delta = delta;
+    }
+    
+    public String toString() {
+      return "[" + this.date + ": " + this.delta + "]";
+    }
+  }
+
+  
+  protected Queue<CountAtom> unprocessedCountBuffer;
+  protected Map<DateSpan, CountBuffer> counts;
+  protected Date startDate;
+  
+  /**
+   * Factory method to create a new counter instance.  (Design note - 
+   * use a factory pattern here as it may be necessary to hook in other
+   * registrations around counter objects as they are created.)
+   * 
+   * @param startDate
+   * @return
+   */
+  public static ICounter createCounter(Date startDate) {
+    ConcurrentCounter cc = new ConcurrentCounter(startDate);
+    ConcurrentCounter.liveCounters.add(cc);
+    return cc;
+    
+  }
+  
+  /**
+   * Protected constructor - use createCounter factory method instead
+   * @param startDate
+   */
+  protected ConcurrentCounter(Date startDate) {
+    init(startDate);
+  }
+  
+  protected void init(Date startDate) {
+    this.startDate = startDate;
+    this.unprocessedCountBuffer = new ConcurrentLinkedQueue<CountAtom>();
+    this.counts = new HashMap<DateSpan, CountBuffer>();
+      
+    for(DateSpan ds : DateSpan.values()) {
+      CountBuffer cb = new CountBuffer(startDate, ds, MAX_HISTORY.get(ds));
+      counts.put(ds, cb);
+    }
+  }
+  /**
+   * This is the key method that has to be both fast and very thread-safe.
+   */
+  @Override
+  public void increment() {
+    this.increment(new Date(), (long)1);
+  }
+  
+  @Override
+  public void increment(Date d, long delta) {
+    this.unprocessedCountBuffer.add(new CountAtom(d, delta));
+  }
+  
+  @Override
+  public void setCounter(Date d, CounterValue value) {
+      // To be done later
+  }
+  
+  /**
+   * Reset the value.
+   */
+  @Override
+  public void reset(Date startDate) {
+    init(startDate);
+  }
+  
+  /**
+   * Flushes values out of the internal buffer and in to structures
+   * that can be fetched with a call to snapshot()
+   */
+  public synchronized void flush() {
+    for(CountAtom c = this.unprocessedCountBuffer.poll(); c != null; c = this.unprocessedCountBuffer.poll()) {
+      for(DateSpan ds : DateSpan.values()) {
+        CountBuffer cb = counts.get(ds);
+        cb.increment(c.date, c.delta);
+      }
+    }
+  }
+  
+  @Override
+  public CounterValue getCounterValue() {
+      // To be done later
+      //CountSeries cs = counts.get(DateSpan.REALTIME).snapshot();
+      //return cs.getSeries()[0];
+      return new CounterValue(CounterType.LONG);
+  }
+  
+  @Override
+  public Date getCounterDate() {
+      // To be done later
+      //CountSeries cs = counts.get(DateSpan.REALTIME).snapshot();
+      //return cs.getSeries()[0];
+      return new Date();
+  }
+  
+  @Override
+  /**
+   * This method returns a disconnected copy of the underlying CountSeries corresponding to dateSpan.
+   */
+  public CountSeries snapshot(DateSpan dateSpan) {
+    flush();
+    CountSeries cs = counts.get(dateSpan).snapshot();
+    return cs;
+  }
+
+  
+  
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/CountBuffer.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/CountBuffer.java
new file mode 100644
index 0000000..fa45862
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/CountBuffer.java
@@ -0,0 +1,125 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.counter;
+
+import java.util.Date;
+
+import net.floodlightcontroller.counter.ICounter.DateSpan;
+
+
+/**
+ * Implements a circular buffer to store the last x time-based counter values.  This is pretty crumby
+ * implementation, basically wrapping everything with synchronized blocks, in order to ensure that threads
+ * which will be updating the series don't result in a thread which is reading the series getting stuck with
+ * a start date which does not correspond to the count values in getSeries.
+ * 
+ * This could probably use a re-think...
+ * 
+ * @author kyle
+ *
+ */
+public class CountBuffer {
+  protected long[] counterValues;
+  protected Date startDate;
+  protected DateSpan dateSpan;
+  protected int currentIndex;
+  protected int seriesLength;
+
+
+  public CountBuffer(Date startDate, DateSpan dateSpan, int seriesLength) {
+    this.seriesLength = seriesLength;
+    this.counterValues = new long[seriesLength];
+    this.dateSpan = dateSpan;
+    
+    this.startDate = startDate;
+    this.currentIndex = 0;
+  }
+  
+  /**
+   * Increment the count associated with Date d, forgetting some of the older count values if necessary to ensure
+   * that the total span of time covered by this series corresponds to DateSpan * seriesLength (circular buffer).
+   * 
+   * Note - fails silently if the Date falls prior to the start of the tracked count values.
+   * 
+   * Note - this should be a reasonably fast method, though it will have to block if there is another thread reading the
+   * series at the same time.
+   * 
+   * @param d
+   * @param delta
+   */
+  public synchronized void increment(Date d, long delta) {
+
+    long dsMillis = CountSeries.dateSpanToMilliseconds(this.dateSpan);
+    Date endDate = new Date(startDate.getTime() + seriesLength * dsMillis - 1);
+
+    if(d.getTime() < startDate.getTime()) {
+      return; //silently fail rather than insert a count at a time older than the history buffer we're keeping
+    }
+    else if (d.getTime() >= startDate.getTime() && d.getTime() <= endDate.getTime()) {
+        int index = (int)  (( d.getTime() - startDate.getTime() ) / dsMillis); // java rounds down on long/long
+        int modIndex = (index + currentIndex) % seriesLength;
+        long currentValue = counterValues[modIndex];
+        counterValues[modIndex] = currentValue + delta;
+    }
+    else if (d.getTime() > endDate.getTime()) {
+      //Initialize new buckets
+      int newBuckets = (int)((d.getTime() - endDate.getTime()) / dsMillis) + 1; // java rounds down on long/long
+      for(int i = 0; i < newBuckets; i++) {
+        int modIndex = (i + currentIndex) % seriesLength;
+        counterValues[modIndex] = 0;
+      }
+      //Update internal vars
+      this.startDate = new Date(startDate.getTime() + dsMillis * newBuckets);
+      this.currentIndex = (currentIndex + newBuckets) % this.seriesLength;    
+
+      //Call again (date should be in the range this time)
+      this.increment(d, delta);
+    }
+  }
+  
+  /**
+   * Relatively slow method, expected to be called primarily from UI rather than from in-packet-path.
+   * 
+   * @return the count values associated with each time interval starting with startDate and demarc'ed by dateSpan
+   */
+  public long[] getSeries() { //synchronized here should lock on 'this', implying that it shares the lock with increment
+    long[] ret = new long[this.seriesLength];
+    for(int i = 0; i < this.seriesLength; i++) {
+      int modIndex = (currentIndex + i) % this.seriesLength;
+      ret[i] = this.counterValues[modIndex];
+    }
+    return ret;
+  }
+
+  
+  /**
+   * Returns an immutable count series that represents a snapshot of this
+   * series at a specific moment in time.
+   * @return
+   */
+  public synchronized CountSeries snapshot() {
+    long[] cvs = new long[this.seriesLength];
+    for(int i = 0; i < this.seriesLength; i++) {
+      int modIndex = (this.currentIndex + i) % this.seriesLength;
+      cvs[i] = this.counterValues[modIndex];
+    }
+
+    return new CountSeries(this.startDate, this.dateSpan, cvs);
+  }
+  
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/CountSeries.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/CountSeries.java
new file mode 100644
index 0000000..e8a547a
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/CountSeries.java
@@ -0,0 +1,88 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.counter;
+
+import java.util.Arrays;
+import java.util.Date;
+
+import net.floodlightcontroller.counter.ICounter.DateSpan;
+
+/**
+ * Simple immutable class to store a series of historic counter values
+ * 
+ * This could probably use a re-think...
+ * 
+ * @author kyle
+ *
+ */
+public class CountSeries {  
+  protected long[] counterValues;
+  protected Date startDate;
+  protected DateSpan dateSpan;
+  
+  public CountSeries(Date startDate, DateSpan dateSpan, long[] counterValues) {
+    this.counterValues = counterValues.clone();
+    this.dateSpan = dateSpan;    
+    this.startDate = startDate;
+  }
+  
+
+  public long[] getSeries() { //synchronized here should lock on 'this', implying that it shares the lock with increment
+    return this.counterValues.clone();
+  }
+  
+  /**
+   * Returns the startDate of this series.  The first long in getSeries represents the sum of deltas from increment calls with dates
+   * that correspond to >= startDate and < startDate + DateSpan.
+   * @return
+   */
+  public Date getStartDate() {//synchronized here should lock on 'this', implying that it shares the lock with increment
+    return this.startDate;
+  }
+  
+  public String toString() {
+    String ret = "{start: " + this.startDate + ", span: " + this.dateSpan + ", series: " + Arrays.toString(getSeries()) + "}";
+    return ret;
+  }
+  
+  /**
+   * Return a long that is the number of milliseconds in a ds (second/minute/hour/day/week).  (Utility method.)
+   * 
+   * @param ds
+   * @return
+   */
+  public static final long dateSpanToMilliseconds(DateSpan ds) {
+    long delta = 1;
+    switch(ds) {
+	    case WEEKS:
+	    	delta *= 7;
+	    case DAYS:
+	    	delta *= 24;
+	    case HOURS:
+	    	delta *= 60;
+	    case MINUTES:
+	    	delta *= 60;
+	    case SECONDS:
+	    	delta *= 1000;
+	    default:
+	    	break;
+    }
+    return delta;
+  }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/CounterStore.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/CounterStore.java
new file mode 100644
index 0000000..26d1302
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/CounterStore.java
@@ -0,0 +1,461 @@
+/**
+ *    Copyright 2011, Big Switch Networks, Inc. 
+ *    Originally created by David Erickson, Stanford University
+ * 
+ *    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *    not use this file except in compliance with the License. You may obtain
+ *    a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *    License for the specific language governing permissions and limitations
+ *    under the License.
+ **/
+
+/**
+ * Implements a very simple central store for system counters
+ */
+package net.floodlightcontroller.counter;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import javax.annotation.PostConstruct;
+
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.counter.CounterValue.CounterType;
+import net.floodlightcontroller.packet.Ethernet;
+import net.floodlightcontroller.packet.IPv4;
+
+import org.openflow.protocol.OFMessage;
+import org.openflow.protocol.OFPacketIn;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * @author kyle
+ *
+ */
+public class CounterStore implements IFloodlightModule, ICounterStoreService {
+    protected static Logger log = LoggerFactory.getLogger(CounterStore.class);
+
+    public enum NetworkLayer {
+        L2, L3, L4
+    }
+
+    protected class CounterEntry {
+        protected ICounter counter;
+        String title;
+    }
+
+    /**
+     * A map of counterName --> Counter
+     */
+    protected ConcurrentHashMap<String, CounterEntry> nameToCEIndex = 
+            new ConcurrentHashMap<String, CounterEntry>();
+
+    protected ICounter heartbeatCounter;
+    protected ICounter randomCounter;
+
+    /**
+     * Counter Categories grouped by network layers
+     * NetworkLayer -> CounterToCategories
+     */
+    protected static Map<NetworkLayer, Map<String, List<String>>> layeredCategories = 
+            new ConcurrentHashMap<NetworkLayer, Map<String, List<String>>> ();
+
+    public void updatePacketInCounters(IOFSwitch sw, OFMessage m, Ethernet eth) {
+        OFPacketIn packet = (OFPacketIn)m;
+        
+        // Make sure there is data
+        if (packet.getPacketData().length <= 0) return;
+        
+        /* Extract the etherType and protocol field for IPv4 packet.
+         */
+        String etherType = String.format("%04x", eth.getEtherType());
+        
+        /*
+         * Valid EtherType must be greater than or equal to 0x0600
+         * It is V1 Ethernet Frame if EtherType < 0x0600
+         */
+        if (eth.getEtherType() < 0x0600) {
+            etherType = "0599";
+        }
+
+        if (TypeAliases.l3TypeAliasMap != null && 
+            TypeAliases.l3TypeAliasMap.containsKey(etherType)) {
+            etherType = TypeAliases.l3TypeAliasMap.get(etherType);
+        } else {
+            etherType = "L3_" + etherType;
+        }
+        String switchIdHex = sw.getStringId();
+   
+        String packetName = m.getType().toClass().getName();
+        packetName = packetName.substring(packetName.lastIndexOf('.')+1); 
+        
+        // Construct controller counter for the packet_in
+        String controllerCounterName =
+            CounterStore.createCounterName(CONTROLLER_NAME, 
+                                           -1,
+                                           packetName);
+    
+        String controllerL3CategoryCounterName = 
+            CounterStore.createCounterName(CONTROLLER_NAME, 
+                                           -1,
+                                           packetName, 
+                                           etherType, 
+                                           NetworkLayer.L3);
+
+        String l2Type = null;
+        if (eth.isBroadcast()) {
+        	l2Type = BROADCAST;
+        } else if (eth.isMulticast()) {
+        	l2Type = MULTICAST;
+        } else {
+        	l2Type = UNICAST;
+        }
+        
+        // Construct both port and switch L3 counter for the packet_in
+    	String controllerL2CategoryCounterName = CounterStore.createCounterName(CONTROLLER_NAME, 
+                -1,
+                packetName, 
+                l2Type, 
+                NetworkLayer.L2);
+    	String switchL2CategoryCounterName = CounterStore.createCounterName(switchIdHex, 
+                -1, 
+                packetName, 
+                l2Type, 
+                NetworkLayer.L2);
+    	String portL2CategoryCounterName = CounterStore.createCounterName(switchIdHex, 
+                packet.getInPort(),
+                packetName, 
+                l2Type, 
+                NetworkLayer.L2);
+        
+        // Construct both port and switch L3 counter for the packet_in
+        String portCounterName =
+                CounterStore.createCounterName(switchIdHex, 
+                                               packet.getInPort(),
+                                               packetName);
+        String switchCounterName =
+                CounterStore.createCounterName(switchIdHex, 
+                                               -1,
+                                               packetName);
+        
+        String portL3CategoryCounterName = 
+                CounterStore.createCounterName(switchIdHex, 
+                                               packet.getInPort(),
+                                               packetName, 
+                                               etherType, 
+                                               NetworkLayer.L3);
+        String switchL3CategoryCounterName =
+                CounterStore.createCounterName(switchIdHex, 
+                                               -1, 
+                                               packetName, 
+                                               etherType, 
+                                               NetworkLayer.L3);
+
+        // Controller counters
+        ICounter controllerCounter = getCounter(controllerCounterName);
+        if (controllerCounter == null) {
+            controllerCounter = createCounter(controllerCounterName, 
+                                              CounterType.LONG);
+        }
+        controllerCounter.increment();
+        ICounter portCounter = getCounter(portCounterName);
+        if (portCounter == null) {
+            portCounter = createCounter(portCounterName, 
+                                        CounterType.LONG);
+        }
+        portCounter.increment();
+        ICounter switchCounter = getCounter(switchCounterName);
+        if (switchCounter == null) {
+            switchCounter = createCounter(switchCounterName, 
+                                          CounterType.LONG);
+        }
+        switchCounter.increment();
+
+        // L2 counters
+        ICounter controllerL2Counter = getCounter(controllerL2CategoryCounterName);
+        if (controllerL2Counter == null) {
+            controllerL2Counter = createCounter(controllerL2CategoryCounterName,
+                                                CounterType.LONG);
+        }
+        controllerL2Counter.increment();
+        ICounter switchL2Counter = getCounter(switchL2CategoryCounterName);
+        if (switchL2Counter == null) {
+            switchL2Counter = createCounter(switchL2CategoryCounterName,
+                                            CounterType.LONG);
+        }
+        switchL2Counter.increment();
+        ICounter portL2Counter = getCounter(portL2CategoryCounterName);
+        if (portL2Counter == null) {
+            portL2Counter = createCounter(portL2CategoryCounterName,
+                                          CounterType.LONG);
+        }
+        portL2Counter.increment();
+
+        // L3 counters
+        ICounter controllerL3Counter = getCounter(controllerL3CategoryCounterName);
+        if (controllerL3Counter == null) {
+            controllerL3Counter = createCounter(controllerL3CategoryCounterName,
+                                                CounterType.LONG);
+        }
+        controllerL3Counter.increment();
+        ICounter portL3Counter = getCounter(portL3CategoryCounterName);
+        if (portL3Counter == null) {
+            portL3Counter = createCounter(portL3CategoryCounterName,
+                                          CounterType.LONG);
+        }
+        portL3Counter.increment();
+        ICounter switchL3Counter = getCounter(switchL3CategoryCounterName);
+        if (switchL3Counter == null) {
+            switchL3Counter = createCounter(switchL3CategoryCounterName,
+                                            CounterType.LONG);
+        }
+        switchL3Counter.increment();
+
+        // L4 counters
+        if (etherType.compareTo(CounterStore.L3ET_IPV4) == 0) {
+            IPv4 ipV4 = (IPv4)eth.getPayload();
+            String l4Type = String.format("%02x", ipV4.getProtocol());
+            if (TypeAliases.l4TypeAliasMap != null && 
+                    TypeAliases.l4TypeAliasMap.containsKey(l4Type)) {
+                l4Type = TypeAliases.l4TypeAliasMap.get(l4Type);
+            } else {
+                l4Type = "L4_" + l4Type;
+            }
+            String controllerL4CategoryCounterName = 
+                    CounterStore.createCounterName(CONTROLLER_NAME, 
+                                                   -1, 
+                                                   packetName, 
+                                                   l4Type, 
+                                                   NetworkLayer.L4);
+            String portL4CategoryCounterName =
+                    CounterStore.createCounterName(switchIdHex, 
+                                                   packet.getInPort(), 
+                                                   packetName, 
+                                                   l4Type, 
+                                                   NetworkLayer.L4);
+            String switchL4CategoryCounterName = 
+                    CounterStore.createCounterName(switchIdHex, 
+                                                   -1, 
+                                                   packetName, 
+                                                   l4Type, 
+                                                   NetworkLayer.L4);
+            ICounter controllerL4Counter = getCounter(controllerL4CategoryCounterName);
+            if (controllerL4Counter == null) {
+                controllerL4Counter = createCounter(controllerL4CategoryCounterName, 
+                                                    CounterType.LONG);
+            }
+            controllerL4Counter.increment();
+            ICounter portL4Counter = getCounter(portL4CategoryCounterName);
+            if (portL4Counter == null) {
+                portL4Counter = createCounter(portL4CategoryCounterName, 
+                                              CounterType.LONG);
+            }
+            portL4Counter.increment();
+            ICounter switchL4Counter = getCounter(switchL4CategoryCounterName);
+            if (switchL4Counter == null) {
+                switchL4Counter = createCounter(switchL4CategoryCounterName, 
+                                                CounterType.LONG);
+            }
+            switchL4Counter.increment();
+        }
+    }
+    
+    /**
+     * This method can only be used to update packetOut and flowmod counters
+     * 
+     * @param sw
+     * @param ofMsg
+     */
+    public void updatePktOutFMCounterStore(IOFSwitch sw, OFMessage ofMsg) {
+        String packetName = ofMsg.getType().toClass().getName();
+        packetName = packetName.substring(packetName.lastIndexOf('.')+1);
+        // flowmod is per switch and controller. portid = -1
+        String controllerFMCounterName = CounterStore.createCounterName(CONTROLLER_NAME, -1, packetName);  
+        ICounter counter = getCounter(controllerFMCounterName);
+        if (counter == null) {
+            counter = createCounter(controllerFMCounterName, CounterValue.CounterType.LONG);
+        }
+        counter.increment();
+
+        String switchFMCounterName = CounterStore.createCounterName(sw.getStringId(), -1, packetName);
+        counter = getCounter(switchFMCounterName);
+        if (counter == null) {
+            counter = createCounter(switchFMCounterName, CounterValue.CounterType.LONG);
+        }
+        counter.increment();
+    }
+
+
+    /**
+     * Create a title based on switch ID, portID, vlanID, and counterName
+     * If portID is -1, the title represents the given switch only
+     * If portID is a non-negative number, the title represents the port on the given switch
+     */
+    public static String createCounterName(String switchID, int portID, String counterName) {
+        if (portID < 0) {
+            return switchID + TitleDelimitor + counterName;
+        } else {
+            return switchID + TitleDelimitor + portID + TitleDelimitor + counterName;
+        }
+    }
+
+    /**
+     * Create a title based on switch ID, portID, vlanID, counterName, and subCategory
+     * If portID is -1, the title represents the given switch only
+     * If portID is a non-negative number, the title represents the port on the given switch
+     * For example: PacketIns can be further categorized based on L2 etherType or L3 protocol
+     */
+    public static String createCounterName(String switchID, int portID, String counterName,
+            String subCategory, NetworkLayer layer) {
+        String fullCounterName = "";
+        String groupCounterName = "";
+
+        if (portID < 0) {
+            groupCounterName = switchID + TitleDelimitor + counterName;
+            fullCounterName = groupCounterName + TitleDelimitor + subCategory;
+        } else {
+            groupCounterName = switchID + TitleDelimitor + portID + TitleDelimitor + counterName;
+            fullCounterName = groupCounterName + TitleDelimitor + subCategory;
+        }
+
+        Map<String, List<String>> counterToCategories;      
+        if (layeredCategories.containsKey(layer)) {
+            counterToCategories = layeredCategories.get(layer);
+        } else {
+            counterToCategories = new ConcurrentHashMap<String, List<String>> ();
+            layeredCategories.put(layer, counterToCategories);
+        }
+
+        List<String> categories;
+        if (counterToCategories.containsKey(groupCounterName)) {
+            categories = counterToCategories.get(groupCounterName);
+        } else {
+            categories = new ArrayList<String>();
+            counterToCategories.put(groupCounterName, categories);
+        }
+
+        if (!categories.contains(subCategory)) {
+            categories.add(subCategory);
+        }
+        return fullCounterName;
+    }
+
+    @Override
+    public List<String> getAllCategories(String counterName, NetworkLayer layer) {
+        if (layeredCategories.containsKey(layer)) {
+            Map<String, List<String>> counterToCategories = layeredCategories.get(layer);
+            if (counterToCategories.containsKey(counterName)) {
+                return counterToCategories.get(counterName);
+            }
+        }
+        return null;
+    }
+    
+    @Override
+    public ICounter createCounter(String key, CounterValue.CounterType type) {
+        CounterEntry ce;
+        ICounter c;
+
+        c = SimpleCounter.createCounter(new Date(), type);
+        ce = new CounterEntry();
+        ce.counter = c;
+        ce.title = key;
+        nameToCEIndex.putIfAbsent(key, ce);
+        
+        return nameToCEIndex.get(key).counter;
+    }
+
+    /**
+     * Post construction init method to kick off the health check and random (test) counter threads
+     */
+    @PostConstruct
+    public void startUp() {
+        this.heartbeatCounter = this.createCounter("CounterStore heartbeat", CounterValue.CounterType.LONG);
+        this.randomCounter = this.createCounter("CounterStore random", CounterValue.CounterType.LONG);
+        //Set a background thread to flush any liveCounters every 100 milliseconds
+        Executors.newScheduledThreadPool(1).scheduleAtFixedRate(new Runnable() {
+            public void run() {
+                heartbeatCounter.increment();
+                randomCounter.increment(new Date(), (long) (Math.random() * 100)); //TODO - pull this in to random timing
+            }}, 100, 100, TimeUnit.MILLISECONDS);
+    }
+    
+    @Override
+    public ICounter getCounter(String key) {
+        CounterEntry counter = nameToCEIndex.get(key);
+        if (counter != null) {
+            return counter.counter;
+        } else {
+            return null;
+        }
+    }
+
+    /* (non-Javadoc)
+     * @see net.floodlightcontroller.counter.ICounterStoreService#getAll()
+     */
+    @Override
+    public Map<String, ICounter> getAll() {
+        Map<String, ICounter> ret = new ConcurrentHashMap<String, ICounter>();
+        for(Map.Entry<String, CounterEntry> counterEntry : this.nameToCEIndex.entrySet()) {
+            String key = counterEntry.getKey();
+            ICounter counter = counterEntry.getValue().counter;
+            ret.put(key, counter);
+        }
+        return ret;
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        Collection<Class<? extends IFloodlightService>> services =
+                new ArrayList<Class<? extends IFloodlightService>>(1);
+        services.add(ICounterStoreService.class);
+        return services;
+    }
+
+    @Override
+    public Map<Class<? extends IFloodlightService>, IFloodlightService>
+            getServiceImpls() {
+        Map<Class<? extends IFloodlightService>,
+            IFloodlightService> m = 
+                new HashMap<Class<? extends IFloodlightService>,
+                    IFloodlightService>();
+        m.put(ICounterStoreService.class, this);
+        return m;
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
+        // no-op, no dependencies
+        return null;
+    }
+
+    @Override
+    public void init(FloodlightModuleContext context)
+                                 throws FloodlightModuleException {
+        // no-op for now
+    }
+
+    @Override
+    public void startUp(FloodlightModuleContext context) {
+        // no-op for now
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/CounterValue.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/CounterValue.java
new file mode 100644
index 0000000..1852d5c
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/CounterValue.java
@@ -0,0 +1,102 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.counter;
+
+/**
+ * The class defines the counter value type and value
+ * 
+ * @author Kanzhe
+ *
+ */
+public class CounterValue { 
+  public enum CounterType {
+      LONG,
+      DOUBLE
+  }
+  
+  protected CounterType type; 
+  protected long longValue;
+  protected double doubleValue;
+  
+  public CounterValue(CounterType type) {
+    this.type = CounterType.LONG;
+    this.longValue = 0;    
+    this.doubleValue = 0.0;
+  }
+  
+  /**
+   * This method is only applicable to type long.
+   * Setter() should be used for type double
+   */
+  public void increment(long delta) {
+      if (this.type == CounterType.LONG) {
+          this.longValue += delta;
+      } else {
+          throw new IllegalArgumentException("Invalid counter type. This counter is not a long type.");
+      }
+  }
+  
+  public void setLongValue(long value) {
+      if (this.type == CounterType.LONG) {
+          this.longValue = value;
+      } else {
+          throw new IllegalArgumentException("Invalid counter type. This counter is not a long type.");
+      }
+  }
+  
+  public void setDoubleValue(double value) {
+      if (this.type == CounterType.DOUBLE) {
+          this.doubleValue = value;
+      } else {
+          throw new IllegalArgumentException("Invalid counter type. This counter is not a double type.");
+      }
+  }
+  
+  public long getLong() {
+      if (this.type == CounterType.LONG) {
+          return this.longValue;
+      } else {
+          throw new IllegalArgumentException("Invalid counter type. This counter is not a long type.");
+      }
+  }
+  
+  public double getDouble() {
+      if (this.type == CounterType.DOUBLE) {
+          return this.doubleValue;
+      } else {
+          throw new IllegalArgumentException("Invalid counter type. This counter is not a double type.");
+      }
+  }
+  
+
+  public CounterType getType() {
+    return this.type;
+  }
+  
+  public String toString() {
+    String ret = "{type: ";
+    if (this.type == CounterType.DOUBLE) {
+        ret += "Double" + ", value: " + this.doubleValue + "}";
+    } else {
+        ret += "Long" + ", value: " + this.longValue + "}";
+    }
+    return ret;
+  }
+
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/ICounter.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/ICounter.java
new file mode 100644
index 0000000..625bebd
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/ICounter.java
@@ -0,0 +1,80 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+/**
+ * Simple interface for a counter whose value can be retrieved in several different
+ * time increments (last x seconds, minutes, hours, days)
+ */
+package net.floodlightcontroller.counter;
+
+import java.util.Date;
+
+/**
+ * @author kyle
+ *
+ */
+public interface ICounter {
+  
+  /**
+   * Most commonly used method
+   */
+  public void increment();
+  
+  /**
+   * Used primarily for testing - no performance guarantees
+   */
+  public void increment(Date d, long delta);
+  
+  /**
+   * Counter value setter
+   */
+  public void setCounter(Date d, CounterValue value);
+  
+  /**
+   * Return the most current value
+   */
+  public Date getCounterDate();
+  
+  /**
+   * Return the most current value
+   */
+  public CounterValue getCounterValue();
+  
+  /**
+   * Reset the value
+   */
+  public void reset(Date d);
+  
+  /**
+   * Returns a CountSeries that is a snapshot of the counter's values for the given dateSpan.  (Further changes
+   * to this counter won't be reflected in the CountSeries that comes  back.)
+   * 
+   * @param dateSpan
+   * @return
+   */
+  public CountSeries snapshot(DateSpan dateSpan);
+  
+
+  public static enum DateSpan {
+    REALTIME,
+    SECONDS,
+    MINUTES,
+    HOURS,
+    DAYS,
+    WEEKS
+  }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/ICounterStoreService.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/ICounterStoreService.java
new file mode 100644
index 0000000..c89eee0
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/ICounterStoreService.java
@@ -0,0 +1,71 @@
+package net.floodlightcontroller.counter;
+
+import java.util.List;
+import java.util.Map;
+
+import org.openflow.protocol.OFMessage;
+
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.counter.CounterStore.NetworkLayer;
+import net.floodlightcontroller.packet.Ethernet;
+
+public interface ICounterStoreService extends IFloodlightService {
+
+	public final static String CONTROLLER_NAME = "controller";
+    public final static String TitleDelimitor = "__";
+
+    /** Broadcast and multicast */
+    public final static String BROADCAST = "broadcast";
+    public final static String MULTICAST = "multicast";
+    public final static String UNICAST = "unicast";
+    
+    /** L2 EtherType subCategories */
+    public final static String L3ET_IPV4 = "L3_IPv4";
+
+    /**
+     * Update packetIn counters
+     * 
+     * @param sw
+     * @param m
+     * @param eth
+     */
+    public void updatePacketInCounters(IOFSwitch sw, OFMessage m, Ethernet eth);
+    
+    /**
+     * This method can only be used to update packetOut and flowmod counters
+     * 
+     * @param sw
+     * @param ofMsg
+     */
+    public void updatePktOutFMCounterStore(IOFSwitch sw, OFMessage ofMsg);
+    
+    /**
+     * Retrieve a list of subCategories by counterName.
+     * null if nothing.
+     */
+    public List<String> getAllCategories(String counterName,
+                                         NetworkLayer layer);
+
+    /**
+     * Create a new ICounter and set the title.  Note that the title must be 
+     * unique, otherwise this will throw an IllegalArgumentException.
+     * 
+     * @param key
+     * @param type
+     * @return
+     */
+    public ICounter createCounter(String key, CounterValue.CounterType type);
+
+    /**
+     * Retrieves a counter with the given title, or null if none can be found.
+     */
+    public ICounter getCounter(String key);
+
+    /**
+     * Returns an immutable map of title:counter with all of the counters in the store.
+     * 
+     * (Note - this method may be slow - primarily for debugging/UI)
+     */
+    public Map<String, ICounter> getAll();
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/NullCounterStore.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/NullCounterStore.java
new file mode 100644
index 0000000..fed8c1e
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/NullCounterStore.java
@@ -0,0 +1,104 @@
+package net.floodlightcontroller.counter;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.openflow.protocol.OFMessage;
+
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.counter.CounterStore.NetworkLayer;
+import net.floodlightcontroller.counter.CounterValue.CounterType;
+import net.floodlightcontroller.packet.Ethernet;
+
+/**
+ * An ICounsterStoreService implementation that does nothing.
+ * This is used mainly for performance testing or if you don't
+ * want to use the counterstore.
+ * @author alexreimers
+ *
+ */
+public class NullCounterStore implements IFloodlightModule,
+        ICounterStoreService {
+
+    private ICounter emptyCounter;
+    private List<String> emptyList;
+    private Map<String, ICounter> emptyMap;
+    
+    @Override
+    public void updatePacketInCounters(IOFSwitch sw, OFMessage m, Ethernet eth) {
+        // no-op
+    }
+
+    @Override
+    public void updatePktOutFMCounterStore(IOFSwitch sw, OFMessage ofMsg) {
+        // no-op
+    }
+
+    @Override
+    public List<String>
+            getAllCategories(String counterName, NetworkLayer layer) {
+        return emptyList;
+    }
+
+    @Override
+    public ICounter createCounter(String key, CounterType type) {
+        return emptyCounter;
+    }
+
+    @Override
+    public ICounter getCounter(String key) {
+        return emptyCounter;
+    }
+
+    @Override
+    public Map<String, ICounter> getAll() {
+        return emptyMap;
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        Collection<Class<? extends IFloodlightService>> services =
+                new ArrayList<Class<? extends IFloodlightService>>(1);
+        services.add(ICounterStoreService.class);
+        return services;
+    }
+
+    @Override
+    public Map<Class<? extends IFloodlightService>, IFloodlightService>
+            getServiceImpls() {
+        Map<Class<? extends IFloodlightService>,
+            IFloodlightService> m = 
+                new HashMap<Class<? extends IFloodlightService>,
+                        IFloodlightService>();
+        m.put(ICounterStoreService.class, this);
+        return m;
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>>
+            getModuleDependencies() {
+        // None, return null
+        return null;
+    }
+
+    @Override
+    public void init(FloodlightModuleContext context)
+                             throws FloodlightModuleException {
+        emptyCounter = new SimpleCounter(new Date(), CounterType.LONG);
+        emptyList = new ArrayList<String>();
+        emptyMap = new HashMap<String, ICounter>();
+    }
+
+    @Override
+    public void startUp(FloodlightModuleContext context) {
+        // no-op
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/SimpleCounter.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/SimpleCounter.java
new file mode 100644
index 0000000..01a0428
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/SimpleCounter.java
@@ -0,0 +1,137 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+/**
+ * 
+ */
+package net.floodlightcontroller.counter;
+
+import java.util.Date;
+
+
+
+/**
+ * This is a simple counter implementation that doesn't support data series.
+ * The idea is that floodlight only keeps the realtime value for each counter,
+ * statd, a statistics collection daemon, samples counters at a user-defined interval
+ * and pushes the values to a database, which keeps time-based data series. 
+ * @author Kanzhe
+ *
+ */
+public class SimpleCounter implements ICounter {
+
+  protected CounterValue counter;
+  protected Date samplingTime;
+  protected Date startDate;
+  
+  /**
+   * Factory method to create a new counter instance.  
+   * 
+   * @param startDate
+   * @return
+   */
+  public static ICounter createCounter(Date startDate, CounterValue.CounterType type) {
+    SimpleCounter cc = new SimpleCounter(startDate, type);
+    return cc;
+  }
+  
+  /**
+   * Factory method to create a copy of a counter instance.  
+   * 
+   * @param startDate
+   * @return
+   */
+  public static ICounter createCounter(ICounter copy) {
+    if (copy == null ||
+        copy.getCounterDate() == null ||
+        copy.getCounterValue() == null) {
+        return null;
+    }
+
+     SimpleCounter cc = new SimpleCounter(copy.getCounterDate(),
+            copy.getCounterValue().getType());
+     cc.setCounter(copy.getCounterDate(), copy.getCounterValue());
+     return cc;
+  }
+  
+  /**
+   * Protected constructor - use createCounter factory method instead
+   * @param startDate
+   */
+  protected SimpleCounter(Date startDate, CounterValue.CounterType type) {
+    init(startDate, type);
+  }
+  
+  protected void init(Date startDate, CounterValue.CounterType type) {
+    this.startDate = startDate;
+    this.samplingTime = new Date();
+    this.counter = new CounterValue(type);
+  }
+  
+  /**
+   * This is the key method that has to be both fast and very thread-safe.
+   */
+  @Override
+  synchronized public void increment() {
+    this.increment(new Date(), (long)1);
+  }
+  
+  @Override
+  synchronized public void increment(Date d, long delta) {
+    this.samplingTime = d;
+    this.counter.increment(delta);
+  }
+  
+  synchronized public void setCounter(Date d, CounterValue value) {
+      this.samplingTime = d;
+      this.counter = value;
+  }
+  
+  /**
+   * This is the method to retrieve the current value.
+   */
+  @Override
+  synchronized public CounterValue getCounterValue() {
+    return this.counter;
+  }
+
+  /**
+   * This is the method to retrieve the last sampling time.
+   */
+  @Override
+  synchronized public Date getCounterDate() {
+    return this.samplingTime;
+  }
+  
+  /**
+   * Reset value.
+   */
+  @Override
+  synchronized public void reset(Date startDate) {
+    init(startDate, this.counter.getType());
+  }
+  
+  @Override
+  /**
+   * This method only returns the real-time value.
+   */
+  synchronized public CountSeries snapshot(DateSpan dateSpan) {
+    long[] values = new long[1];
+    values[0] = this.counter.getLong();
+    return new CountSeries(this.samplingTime, DateSpan.DAYS, values);
+  }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/TypeAliases.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/TypeAliases.java
new file mode 100644
index 0000000..0d7e2b5
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/counter/TypeAliases.java
@@ -0,0 +1,190 @@
+package net.floodlightcontroller.counter;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Class to contain some statically initialized data
+ * @author readams
+ *
+ */
+public class TypeAliases {
+    protected static final Map<String,String> l3TypeAliasMap = 
+            new HashMap<String, String>();
+    static {
+        l3TypeAliasMap.put("0599", "L3_V1Ether");
+        l3TypeAliasMap.put("0800", "L3_IPv4");
+        l3TypeAliasMap.put("0806", "L3_ARP");
+        l3TypeAliasMap.put("8035", "L3_RARP");
+        l3TypeAliasMap.put("809b", "L3_AppleTalk");
+        l3TypeAliasMap.put("80f3", "L3_AARP");
+        l3TypeAliasMap.put("8100", "L3_802_1Q");
+        l3TypeAliasMap.put("8137", "L3_Novell_IPX");
+        l3TypeAliasMap.put("8138", "L3_Novell");
+        l3TypeAliasMap.put("86dd", "L3_IPv6");
+        l3TypeAliasMap.put("8847", "L3_MPLS_uni");
+        l3TypeAliasMap.put("8848", "L3_MPLS_multi");
+        l3TypeAliasMap.put("8863", "L3_PPPoE_DS");
+        l3TypeAliasMap.put("8864", "L3_PPPoE_SS");
+        l3TypeAliasMap.put("886f", "L3_MSFT_NLB");
+        l3TypeAliasMap.put("8870", "L3_Jumbo");
+        l3TypeAliasMap.put("889a", "L3_HyperSCSI");
+        l3TypeAliasMap.put("88a2", "L3_ATA_Ethernet");
+        l3TypeAliasMap.put("88a4", "L3_EtherCAT");
+        l3TypeAliasMap.put("88a8", "L3_802_1ad");
+        l3TypeAliasMap.put("88ab", "L3_Ether_Powerlink");
+        l3TypeAliasMap.put("88cc", "L3_LLDP");
+        l3TypeAliasMap.put("88cd", "L3_SERCOS_III");
+        l3TypeAliasMap.put("88e5", "L3_802_1ae");
+        l3TypeAliasMap.put("88f7", "L3_IEEE_1588");
+        l3TypeAliasMap.put("8902", "L3_802_1ag_CFM");
+        l3TypeAliasMap.put("8906", "L3_FCoE");
+        l3TypeAliasMap.put("9000", "L3_Loop");
+        l3TypeAliasMap.put("9100", "L3_Q_in_Q");
+        l3TypeAliasMap.put("cafe", "L3_LLT");
+    }
+    
+    protected static final Map<String,String> l4TypeAliasMap = 
+            new HashMap<String, String>();
+    static {
+        l4TypeAliasMap.put("00", "L4_HOPOPT");
+        l4TypeAliasMap.put("01", "L4_ICMP");
+        l4TypeAliasMap.put("02", "L4_IGAP_IGMP_RGMP");
+        l4TypeAliasMap.put("03", "L4_GGP");
+        l4TypeAliasMap.put("04", "L4_IP");
+        l4TypeAliasMap.put("05", "L4_ST");
+        l4TypeAliasMap.put("06", "L4_TCP");
+        l4TypeAliasMap.put("07", "L4_UCL");
+        l4TypeAliasMap.put("08", "L4_EGP");
+        l4TypeAliasMap.put("09", "L4_IGRP");
+        l4TypeAliasMap.put("0a", "L4_BBN");
+        l4TypeAliasMap.put("0b", "L4_NVP");
+        l4TypeAliasMap.put("0c", "L4_PUP");
+        l4TypeAliasMap.put("0d", "L4_ARGUS");
+        l4TypeAliasMap.put("0e", "L4_EMCON");
+        l4TypeAliasMap.put("0f", "L4_XNET");
+        l4TypeAliasMap.put("10", "L4_Chaos");
+        l4TypeAliasMap.put("11", "L4_UDP");
+        l4TypeAliasMap.put("12", "L4_TMux");
+        l4TypeAliasMap.put("13", "L4_DCN");
+        l4TypeAliasMap.put("14", "L4_HMP");
+        l4TypeAliasMap.put("15", "L4_Packet_Radio");
+        l4TypeAliasMap.put("16", "L4_XEROX_NS_IDP");
+        l4TypeAliasMap.put("17", "L4_Trunk_1");
+        l4TypeAliasMap.put("18", "L4_Trunk_2");
+        l4TypeAliasMap.put("19", "L4_Leaf_1");
+        l4TypeAliasMap.put("1a", "L4_Leaf_2");
+        l4TypeAliasMap.put("1b", "L4_RDP");
+        l4TypeAliasMap.put("1c", "L4_IRTP");
+        l4TypeAliasMap.put("1d", "L4_ISO_TP4");
+        l4TypeAliasMap.put("1e", "L4_NETBLT");
+        l4TypeAliasMap.put("1f", "L4_MFE");
+        l4TypeAliasMap.put("20", "L4_MERIT");
+        l4TypeAliasMap.put("21", "L4_DCCP");
+        l4TypeAliasMap.put("22", "L4_Third_Party_Connect");
+        l4TypeAliasMap.put("23", "L4_IDPR");
+        l4TypeAliasMap.put("24", "L4_XTP");
+        l4TypeAliasMap.put("25", "L4_Datagram_Delivery");
+        l4TypeAliasMap.put("26", "L4_IDPR");
+        l4TypeAliasMap.put("27", "L4_TP");
+        l4TypeAliasMap.put("28", "L4_ILTP");
+        l4TypeAliasMap.put("29", "L4_IPv6_over_IPv4");
+        l4TypeAliasMap.put("2a", "L4_SDRP");
+        l4TypeAliasMap.put("2b", "L4_IPv6_RH");
+        l4TypeAliasMap.put("2c", "L4_IPv6_FH");
+        l4TypeAliasMap.put("2d", "L4_IDRP");
+        l4TypeAliasMap.put("2e", "L4_RSVP");
+        l4TypeAliasMap.put("2f", "L4_GRE");
+        l4TypeAliasMap.put("30", "L4_DSR");
+        l4TypeAliasMap.put("31", "L4_BNA");
+        l4TypeAliasMap.put("32", "L4_ESP");
+        l4TypeAliasMap.put("33", "L4_AH");
+        l4TypeAliasMap.put("34", "L4_I_NLSP");
+        l4TypeAliasMap.put("35", "L4_SWIPE");
+        l4TypeAliasMap.put("36", "L4_NARP");
+        l4TypeAliasMap.put("37", "L4_Minimal_Encapsulation");
+        l4TypeAliasMap.put("38", "L4_TLSP");
+        l4TypeAliasMap.put("39", "L4_SKIP");
+        l4TypeAliasMap.put("3a", "L4_ICMPv6");
+        l4TypeAliasMap.put("3b", "L4_IPv6_No_Next_Header");
+        l4TypeAliasMap.put("3c", "L4_IPv6_Destination_Options");
+        l4TypeAliasMap.put("3d", "L4_Any_host_IP");
+        l4TypeAliasMap.put("3e", "L4_CFTP");
+        l4TypeAliasMap.put("3f", "L4_Any_local");
+        l4TypeAliasMap.put("40", "L4_SATNET");
+        l4TypeAliasMap.put("41", "L4_Kryptolan");
+        l4TypeAliasMap.put("42", "L4_MIT_RVDP");
+        l4TypeAliasMap.put("43", "L4_Internet_Pluribus");
+        l4TypeAliasMap.put("44", "L4_Distributed_FS");
+        l4TypeAliasMap.put("45", "L4_SATNET");
+        l4TypeAliasMap.put("46", "L4_VISA");
+        l4TypeAliasMap.put("47", "L4_IP_Core");
+        l4TypeAliasMap.put("4a", "L4_Wang_Span");
+        l4TypeAliasMap.put("4b", "L4_Packet_Video");
+        l4TypeAliasMap.put("4c", "L4_Backroom_SATNET");
+        l4TypeAliasMap.put("4d", "L4_SUN_ND");
+        l4TypeAliasMap.put("4e", "L4_WIDEBAND_Monitoring");
+        l4TypeAliasMap.put("4f", "L4_WIDEBAND_EXPAK");
+        l4TypeAliasMap.put("50", "L4_ISO_IP");
+        l4TypeAliasMap.put("51", "L4_VMTP");
+        l4TypeAliasMap.put("52", "L4_SECURE_VMTP");
+        l4TypeAliasMap.put("53", "L4_VINES");
+        l4TypeAliasMap.put("54", "L4_TTP");
+        l4TypeAliasMap.put("55", "L4_NSFNET_IGP");
+        l4TypeAliasMap.put("56", "L4_Dissimilar_GP");
+        l4TypeAliasMap.put("57", "L4_TCF");
+        l4TypeAliasMap.put("58", "L4_EIGRP");
+        l4TypeAliasMap.put("59", "L4_OSPF");
+        l4TypeAliasMap.put("5a", "L4_Sprite_RPC");
+        l4TypeAliasMap.put("5b", "L4_Locus_ARP");
+        l4TypeAliasMap.put("5c", "L4_MTP");
+        l4TypeAliasMap.put("5d", "L4_AX");
+        l4TypeAliasMap.put("5e", "L4_IP_within_IP");
+        l4TypeAliasMap.put("5f", "L4_Mobile_ICP");
+        l4TypeAliasMap.put("61", "L4_EtherIP");
+        l4TypeAliasMap.put("62", "L4_Encapsulation_Header");
+        l4TypeAliasMap.put("64", "L4_GMTP");
+        l4TypeAliasMap.put("65", "L4_IFMP");
+        l4TypeAliasMap.put("66", "L4_PNNI");
+        l4TypeAliasMap.put("67", "L4_PIM");
+        l4TypeAliasMap.put("68", "L4_ARIS");
+        l4TypeAliasMap.put("69", "L4_SCPS");
+        l4TypeAliasMap.put("6a", "L4_QNX");
+        l4TypeAliasMap.put("6b", "L4_Active_Networks");
+        l4TypeAliasMap.put("6c", "L4_IPPCP");
+        l4TypeAliasMap.put("6d", "L4_SNP");
+        l4TypeAliasMap.put("6e", "L4_Compaq_Peer_Protocol");
+        l4TypeAliasMap.put("6f", "L4_IPX_in_IP");
+        l4TypeAliasMap.put("70", "L4_VRRP");
+        l4TypeAliasMap.put("71", "L4_PGM");
+        l4TypeAliasMap.put("72", "L4_0_hop");
+        l4TypeAliasMap.put("73", "L4_L2TP");
+        l4TypeAliasMap.put("74", "L4_DDX");
+        l4TypeAliasMap.put("75", "L4_IATP");
+        l4TypeAliasMap.put("76", "L4_ST");
+        l4TypeAliasMap.put("77", "L4_SRP");
+        l4TypeAliasMap.put("78", "L4_UTI");
+        l4TypeAliasMap.put("79", "L4_SMP");
+        l4TypeAliasMap.put("7a", "L4_SM");
+        l4TypeAliasMap.put("7b", "L4_PTP");
+        l4TypeAliasMap.put("7c", "L4_ISIS");
+        l4TypeAliasMap.put("7d", "L4_FIRE");
+        l4TypeAliasMap.put("7e", "L4_CRTP");
+        l4TypeAliasMap.put("7f", "L4_CRUDP");
+        l4TypeAliasMap.put("80", "L4_SSCOPMCE");
+        l4TypeAliasMap.put("81", "L4_IPLT");
+        l4TypeAliasMap.put("82", "L4_SPS");
+        l4TypeAliasMap.put("83", "L4_PIPE");
+        l4TypeAliasMap.put("84", "L4_SCTP");
+        l4TypeAliasMap.put("85", "L4_Fibre_Channel");
+        l4TypeAliasMap.put("86", "L4_RSVP_E2E_IGNORE");
+        l4TypeAliasMap.put("87", "L4_Mobility_Header");
+        l4TypeAliasMap.put("88", "L4_UDP_Lite");
+        l4TypeAliasMap.put("89", "L4_MPLS");
+        l4TypeAliasMap.put("8a", "L4_MANET");
+        l4TypeAliasMap.put("8b", "L4_HIP");
+        l4TypeAliasMap.put("8c", "L4_Shim6");
+        l4TypeAliasMap.put("8d", "L4_WESP");
+        l4TypeAliasMap.put("8e", "L4_ROHC");
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/IDevice.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/IDevice.java
new file mode 100644
index 0000000..95969f8
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/IDevice.java
@@ -0,0 +1,98 @@
+/**
+*    Copyright 2011,2012 Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.devicemanager;
+
+import java.util.Date;
+
+
+/**
+ * Represents an independent device on the network.  A device consists of a 
+ * set of entities, and all the information known about a given device comes
+ * only from merging all associated entities for that device.
+ * @author readams
+ */
+public interface IDevice {
+    /**
+     * Get the primary key for this device.
+     * @return the primary key
+     */
+    public Long getDeviceKey();
+    
+    /**
+     * Get the MAC address of the device as a Long value.
+     * @return the MAC address for the device
+     */
+    public long getMACAddress();
+
+    /**
+     * Get the MAC address of the device as a String value.
+     * @return the MAC address for the device
+     */
+    public String getMACAddressString();
+    
+    /**
+     * Get all unique VLAN IDs for the device.  If the device has untagged 
+     * entities, then the value -1 will be returned.
+     * @return an array containing all unique VLAN IDs for the device.
+     */
+    public Short[] getVlanId();
+    
+    /**
+     * Get all unique IPv4 addresses associated with the device.
+     * @return an array containing the unique IPv4 addresses for the device.
+     */
+    public Integer[] getIPv4Addresses();
+    
+    /**
+     * Get all unique attachment points associated with the device.  This will
+     * not include any blocked attachment points.
+     * @return an array containing all unique attachment points for the device
+     */
+    public SwitchPort[] getAttachmentPoints();
+    
+    /**
+     * Get all unique attachment points associated with the device.
+     * @param includeError whether to include blocked attachment points.
+     * Blocked attachment points should not be used for forwarding, but
+     * could be useful to show to a user
+     * @return an array containing all unique attachment points for the device
+     */
+    public SwitchPort[] getAttachmentPoints(boolean includeError);
+
+    /**
+     * Returns all unique VLAN IDs for the device that were observed on 
+     * the given switch port
+     * @param swp the switch port to query
+     * @return an array containing the unique VLAN IDs
+     */
+    public Short[] getSwitchPortVlanIds(SwitchPort swp);
+    
+    /**
+     * Get the most recent timestamp for this device
+     * @return the last seen timestamp
+     */
+    public Date getLastSeen();
+    
+    /**
+     * Get the entity class for the device.
+     * @return the entity class
+     * @see IEntityClassifierService
+     */
+    public IEntityClass getEntityClass();
+    
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/IDeviceListener.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/IDeviceListener.java
new file mode 100644
index 0000000..3c3d599
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/IDeviceListener.java
@@ -0,0 +1,61 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.devicemanager;
+
+/**
+ * Implementors of this interface can receive updates from DeviceManager about
+ * the state of devices under its control.
+ *
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ */
+public interface IDeviceListener {
+    /**
+     * Called when a new Device is found
+     * @param device the device that changed
+     */
+    public void deviceAdded(IDevice device);
+
+    /**
+     * Called when a Device is removed, this typically occurs when the port the
+     * Device is attached to goes down, or the switch it is attached to is
+     * removed.
+     * @param device the device that changed
+     */
+    public void deviceRemoved(IDevice device);
+
+    /**
+     * Called when a Device has moved to a new location on the network. Note
+     * that either the switch or the port or both has changed.
+     *
+     * @param device the device that changed
+     */
+    public void deviceMoved(IDevice device);
+    
+    /**
+     * Called when a network address has been added or remove from a device
+     * 
+     * @param device the device that changed
+     */
+    public void deviceIPV4AddrChanged(IDevice device);
+    
+    /**
+     * Called when a VLAN tag for the device has been added or removed
+     * @param device the device that changed
+     */
+    public void deviceVlanChanged(IDevice device);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/IDeviceService.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/IDeviceService.java
new file mode 100755
index 0000000..ad29a94
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/IDeviceService.java
@@ -0,0 +1,202 @@
+/**
+*    Copyright 2011,2012, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.devicemanager;
+
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.Iterator;
+
+import net.floodlightcontroller.core.FloodlightContextStore;
+import net.floodlightcontroller.core.module.IFloodlightService;
+
+/**
+ * Device manager allows interacting with devices on the network.  Note
+ * that under normal circumstances, {@link Device} objects should be retrieved
+ * from the {@link FloodlightContext} rather than from {@link IDeviceManager}.
+ */
+public interface IDeviceService extends IFloodlightService {
+    /**
+     * Fields used in devices for indexes and querying
+     * @see IDeviceService#addIndex
+     */
+    enum DeviceField {
+        MAC, IPV4, VLAN, SWITCH, PORT
+    }
+
+    /**
+     * The source device for the current packet-in, if applicable.
+     */
+    public static final String CONTEXT_SRC_DEVICE = 
+            "net.floodlightcontroller.devicemanager.srcDevice"; 
+
+    /**
+     * The destination device for the current packet-in, if applicable.
+     */
+    public static final String CONTEXT_DST_DEVICE = 
+            "net.floodlightcontroller.devicemanager.dstDevice"; 
+
+    /**
+     * A FloodlightContextStore object that can be used to interact with the 
+     * FloodlightContext information created by BVS manager.
+     */
+    public static final FloodlightContextStore<IDevice> fcStore = 
+        new FloodlightContextStore<IDevice>();
+
+    /**
+     * Get the device with the given device key.
+     * 
+     * @param deviceKey the key to search for
+     * @return the device associated with the key, or null if no such device
+     * @see IDevice#getDeviceKey()
+     */
+    public IDevice getDevice(Long deviceKey);
+    
+    /**
+     * Search for a device exactly matching the provided device fields. This 
+     * is the same lookup process that is used for packet_in processing and 
+     * device learning. Thus, findDevice() can be used to match flow entries
+     * from switches to devices. 
+     * Only the key fields as defined by the {@link IEntityClassifierService} will
+     * be important in this search. All key fields MUST be supplied. 
+     * 
+     *{@link queryDevices()} might be more appropriate!
+     * 
+     * @param macAddress The MAC address
+     * @param vlan the VLAN. Null means no VLAN and is valid even if VLAN is a 
+     *        key field.
+     * @param ipv4Address the ipv4 address
+     * @param switchDPID the switch DPID
+     * @param switchPort the switch port
+     * @return an {@link IDevice} or null if no device is found.
+     * @see IDeviceManager#setEntityClassifier(IEntityClassifierService)
+     * @throws IllegalArgumentException if not all key fields of the
+     * current {@link IEntityClassifierService} are specified.
+     */
+    public IDevice findDevice(long macAddress, Short vlan,
+                              Integer ipv4Address, Long switchDPID,
+                              Integer switchPort)
+                              throws IllegalArgumentException;
+    
+    /**
+     * Get a destination device using entity fields that corresponds with
+     * the given source device.  The source device is important since
+     * there could be ambiguity in the destination device without the
+     * attachment point information. 
+     * 
+     * @param source the source device.  The returned destination will be
+     * in the same entity class as the source.
+     * @param macAddress The MAC address for the destination
+     * @param vlan the VLAN if available
+     * @param ipv4Address The IP address if available.
+     * @return an {@link IDevice} or null if no device is found.
+     * @see IDeviceService#findDevice(long, Short, Integer, Long, 
+     * Integer)
+     * @throws IllegalArgumentException if not all key fields of the
+     * source's {@link IEntityClass} are specified.
+     */
+    public IDevice findDestDevice(IDevice source,
+                                  long macAddress, Short vlan,
+                                  Integer ipv4Address)
+                                  throws IllegalArgumentException;
+
+    /**
+     * Get an unmodifiable collection view over all devices currently known.
+     * @return the collection of all devices
+     */
+    public Collection<? extends IDevice> getAllDevices();
+
+    /**
+     * Create an index over a set of fields.  This allows efficient lookup
+     * of devices when querying using the indexed set of specified fields.
+     * The index must be registered before any device learning takes place,
+     * or it may be incomplete.  It's OK if this is called multiple times with
+     * the same fields; only one index will be created for each unique set of 
+     * fields.
+     * 
+     * @param perClass set to true if the index should be maintained for each
+     * entity class separately.
+     * @param keyFields the set of fields on which to index
+     */
+    public void addIndex(boolean perClass,
+                         EnumSet<DeviceField> keyFields);
+    
+    /**
+     * Find devices that match the provided query.  Any fields that are
+     * null will not be included in the query.  If there is an index for 
+     * the query, then it will be performed efficiently using the index.
+     * Otherwise, there will be a full scan of the device list.
+     * 
+     * @param macAddress The MAC address
+     * @param vlan the VLAN
+     * @param ipv4Address the ipv4 address
+     * @param switchDPID the switch DPID
+     * @param switchPort the switch port
+     * @return an iterator over a set of devices matching the query
+     * @see IDeviceService#queryClassDevices(IEntityClass, Long, 
+     * Short, Integer, Long, Integer)
+     */
+    public Iterator<? extends IDevice> queryDevices(Long macAddress,
+                                                    Short vlan,
+                                                    Integer ipv4Address, 
+                                                    Long switchDPID,
+                                                    Integer switchPort);
+
+    /**
+     * Find devices that match the provided query.  Only the index for
+     * the class of the specified reference device will be searched.  
+     * Any fields that are null will not be included in the query.  If
+     * there is an index for the query, then it will be performed
+     * efficiently using the index. Otherwise, there will be a full scan
+     * of the device list.
+     * 
+     * @param reference The reference device to refer to when finding
+     * entity classes.
+     * @param macAddress The MAC address
+     * @param vlan the VLAN
+     * @param ipv4Address the ipv4 address
+     * @param switchDPID the switch DPID
+     * @param switchPort the switch port
+    * @return an iterator over a set of devices matching the query
+     * @see IDeviceService#queryClassDevices(Long, 
+     * Short, Integer, Long, Integer)
+     */
+    public Iterator<? extends IDevice> queryClassDevices(IDevice reference,
+                                                         Long macAddress,
+                                                         Short vlan,
+                                                         Integer ipv4Address, 
+                                                         Long switchDPID,
+                                                         Integer switchPort);
+    
+    /**
+     * Adds a listener to listen for IDeviceManagerServices notifications
+     * 
+     * @param listener The listener that wants the notifications
+     */
+    public void addListener(IDeviceListener listener);
+    
+    /**
+     * Specify points in the network where attachment points are not to
+     * be learned.
+     * @param sw
+     * @param port
+     */
+	public void addSuppressAPs(long swId, short port);
+
+	public void removeSuppressAPs(long swId, short port);
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/IEntityClass.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/IEntityClass.java
new file mode 100644
index 0000000..bb077f1
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/IEntityClass.java
@@ -0,0 +1,59 @@
+/**
+*    Copyright 2011,2012, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.devicemanager;
+
+import java.util.EnumSet;
+
+import net.floodlightcontroller.devicemanager.IDeviceService.DeviceField;
+import net.floodlightcontroller.devicemanager.internal.Device;
+
+/**
+ * Entities within an entity class are grouped into {@link Device} objects
+ * based on the {@link IEntityClass}, and the key fields specified by the entity 
+ * class. A set of entities are considered to be the same device if and only 
+ * if they belong to the same entity class and they match on all key fields 
+ * for that entity class. A field is effectively wildcarded by not including 
+ * it in the list of key fields returned by {@link IEntityClassifierService} and/or 
+ * {@link IEntityClass}.
+ * 
+ * Note that if you're not using static objects, you'll need to override
+ * {@link Object#equals(Object)} and {@link Object#hashCode()}.
+ * 
+ * @author readams
+ *
+ */
+public interface IEntityClass {
+    /**
+     * Return the set of key fields for this entity class.  Entities 
+     * belonging to this class that differ in fields not included in 
+     * this collection will be considered the same device.  The key 
+     * fields for an entity class must not change unless associated 
+     * with a flush of that entity class.
+     * 
+     * @return a set containing the fields that should not
+     * be wildcarded.  May be null to indicate that all fields are key fields.
+     */
+    EnumSet<DeviceField> getKeyFields();
+    
+    /**
+     * Returns a user-friendly, unique name for this EntityClass
+     * @return the name of the entity class
+     */
+    String getName();
+}
+
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/IEntityClassListener.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/IEntityClassListener.java
new file mode 100644
index 0000000..6029af1
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/IEntityClassListener.java
@@ -0,0 +1,35 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.devicemanager;
+
+import java.util.Set;
+
+/**
+ * Implementors of this interface can receive updates from DeviceManager about
+ * the changes entity Classes.
+ *
+ * @author Ananth Suryanarayana (Ananth.Suryanarayana@bigswitch.com)
+ */
+public interface IEntityClassListener {
+
+    /**
+     * Process entity classes change event.
+     * @param  entityClassNames Set of entity classes changed
+     */
+    public void entityClassChanged(Set<String> entityClassNames);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/IEntityClassifierService.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/IEntityClassifierService.java
new file mode 100644
index 0000000..2569a7d
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/IEntityClassifierService.java
@@ -0,0 +1,108 @@
+/**
+*    Copyright 2011,2012, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.devicemanager;
+
+import java.util.Collection;
+import java.util.EnumSet;
+
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.devicemanager.IDeviceService.DeviceField;
+import net.floodlightcontroller.devicemanager.internal.Entity;
+
+/**
+ * A component that wishes to participate in entity classification needs to 
+ * implement the IEntityClassifier interface, and register with the Device
+ * Manager as an entity classifier. An entity is classified by the classifier
+ * into an {@link IEntityClass} 
+ * 
+ * @author readams
+ */
+public interface IEntityClassifierService extends IFloodlightService {
+    /**
+    * Classify the given entity into an IEntityClass.  It is important
+    * that the key fields returned by {@link IEntityClassifierService#getKeyFields()}
+    * be sufficient for classifying entities.  That is, if two entities are
+    * identical except for a field that is not a key field, they must be
+    * assigned the same class.  Furthermore, entity classification must be
+    * transitive: For all entities x, y, z, if x and y belong to a class c, and 
+    * y and z belong class c, then x and z must belong to class c.
+    * 
+    * @param entity the entity to classify
+    * @return the IEntityClass resulting from the classification.
+    * @see IEntityClassifierService#getKeyFields()
+    */
+   IEntityClass classifyEntity(Entity entity);
+
+   /**
+    * Return the most general list of fields that should be used as key 
+    * fields.  If devices differ in any fields not listed here, they can
+    * never be considered a different device by any {@link IEntityClass} 
+    * returned by {@link IEntityClassifierService#classifyEntity}.  The key fields
+    * for an entity classifier must not change unless associated with a 
+    * flush of all entity state.  The list of key fields must be the union
+    * of all key fields that could be returned by
+    * {@link IEntityClass#getKeyFields()}.
+    * 
+    * @return a set containing the fields that should not be
+    * wildcarded.  May be null to indicate that all fields are key fields.
+    * @see {@link IEntityClass#getKeyFields()}
+    * @see {@link IEntityClassifierService#classifyEntity}
+    */
+   EnumSet<DeviceField> getKeyFields();
+
+   /**
+    * Reclassify the given entity into a class.  When reclassifying entities,
+    * it can be helpful to take into account the current classification either
+    * as an optimization or to allow flushing any cached state tied to the key
+    * for that device.  The entity will be assigned to a new device with a new
+    * object if the entity class returned is different from the entity class for
+    * curDevice.
+    * 
+    * <p>Note that you must take steps to ensure you always return classes
+    * in some consistent ordering.
+
+    * @param curDevice the device currently associated with the entity
+    * @param entity the entity to reclassify
+    * @return the IEntityClass resulting from the classification
+    */
+   IEntityClass reclassifyEntity(IDevice curDevice,
+                                             Entity entity);
+
+   /**
+    * Once reclassification is complete for a device, this method will be
+    * called. If any entities within the device changed their classification,
+    * it will split into one or more new devices for each of the entities.  If
+    * two devices are merged because of a reclassification, then this will be
+    * called on each of the devices, with the same device in the newDevices 
+    * collection.
+    * 
+    * @param oldDevice the original device object
+    * @param newDevices all the new devices derived from the entities of the
+    * old device.  If null, the old device was unchanged.
+    */
+   void deviceUpdate(IDevice oldDevice, 
+                     Collection<? extends IDevice> newDevices);
+
+   /**
+    * Adds a listener to listen for IEntityClassifierServices notifications
+    *
+    * @param listener The listener that wants the notifications
+    */
+   public void addListener(IEntityClassListener listener);
+}
+
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/SwitchPort.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/SwitchPort.java
new file mode 100644
index 0000000..7426163
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/SwitchPort.java
@@ -0,0 +1,136 @@
+/**
+*    Copyright 2012 Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.devicemanager;
+
+import net.floodlightcontroller.core.web.serializers.DPIDSerializer;
+
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+import org.codehaus.jackson.map.ser.ToStringSerializer;
+
+/**
+ * A simple switch DPID/port pair
+ * @author readams
+ *
+ */
+public class SwitchPort {
+    @JsonSerialize(using=ToStringSerializer.class)
+    public enum ErrorStatus {
+        DUPLICATE_DEVICE("duplicate-device");
+        
+        private String value;
+        ErrorStatus(String v) {
+            value = v;
+        }
+        
+        @Override
+        public String toString() {
+            return value;
+        }
+
+        public static ErrorStatus fromString(String str) {
+            for (ErrorStatus m : ErrorStatus.values()) {
+                if (m.value.equals(str)) {
+                    return m;
+                }
+            }
+            return null;
+        }
+    }
+    
+    protected long switchDPID;
+    protected int port;
+    ErrorStatus errorStatus;
+
+    /**
+     * Simple constructor
+     * @param switchDPID the dpid
+     * @param port the port
+     * @param errorStatus any error status for the switch port
+     */
+    public SwitchPort(long switchDPID, int port, ErrorStatus errorStatus) {
+        super();
+        this.switchDPID = switchDPID;
+        this.port = port;
+        this.errorStatus = errorStatus;
+    }
+
+    /**
+     * Simple constructor
+     * @param switchDPID the dpid
+     * @param port the port
+     */
+    public SwitchPort(long switchDPID, int port) {
+        super();
+        this.switchDPID = switchDPID;
+        this.port = port;
+        this.errorStatus = null;
+    }
+    
+    // ***************
+    // Getters/Setters
+    // ***************
+
+    @JsonSerialize(using=DPIDSerializer.class)
+    public long getSwitchDPID() {
+        return switchDPID;
+    }
+    
+    public int getPort() {
+        return port;
+    }
+    
+    public ErrorStatus getErrorStatus() {
+        return errorStatus;
+    }
+
+    // ******
+    // Object
+    // ******
+    
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result
+                        + ((errorStatus == null)
+                                ? 0
+                                : errorStatus.hashCode());
+        result = prime * result + port;
+        result = prime * result + (int) (switchDPID ^ (switchDPID >>> 32));
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) return true;
+        if (obj == null) return false;
+        if (getClass() != obj.getClass()) return false;
+        SwitchPort other = (SwitchPort) obj;
+        if (errorStatus != other.errorStatus) return false;
+        if (port != other.port) return false;
+        if (switchDPID != other.switchDPID) return false;
+        return true;
+    }
+
+    @Override
+    public String toString() {
+        return "SwitchPort [switchDPID=" + switchDPID + ", port=" + port
+                + ", errorStatus=" + errorStatus + "]";
+    }
+
+}
\ No newline at end of file
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/AttachmentPoint.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/AttachmentPoint.java
new file mode 100644
index 0000000..a08a3a5
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/AttachmentPoint.java
@@ -0,0 +1,124 @@
+/**
+ *    Copyright 2011,2012 Big Switch Networks, Inc.
+ *    Originally created by David Erickson, Stanford University
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *    not use this file except in compliance with the License. You may obtain
+ *    a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *    License for the specific language governing permissions and limitations
+ *    under the License.
+ **/
+
+/**
+ * @author Srini
+ */
+
+package net.floodlightcontroller.devicemanager.internal;
+
+public class AttachmentPoint {
+    long  sw;
+    short port;
+    long  activeSince;
+    long  lastSeen;
+
+    // Timeout for moving attachment points from OF/broadcast
+    // domain to another.
+    public static final long INACTIVITY_INTERVAL = 30000; // 30 seconds
+    public static final long EXTERNAL_TO_EXTERNAL_TIMEOUT = 5000;  // 5 seconds
+    public static final long OPENFLOW_TO_EXTERNAL_TIMEOUT = 30000; // 30 seconds
+    public static final long CONSISTENT_TIMEOUT = 30000;           // 30 seconds
+
+    public AttachmentPoint(long sw, short port, long activeSince,
+                           long lastSeen) {
+        this.sw = sw;
+        this.port = port;
+        this.activeSince = activeSince;
+        this.lastSeen = lastSeen;
+    }
+
+    public AttachmentPoint(long sw, short port, long lastSeen) {
+        this.sw = sw;
+        this.port = port;
+        this.lastSeen = lastSeen;
+        this.activeSince = lastSeen;
+    }
+
+    public AttachmentPoint(AttachmentPoint ap) {
+        this.sw = ap.sw;
+        this.port = ap.port;
+        this.activeSince = ap.activeSince;
+        this.lastSeen = ap.lastSeen;
+    }
+
+    public long getSw() {
+        return sw;
+    }
+    public void setSw(long sw) {
+        this.sw = sw;
+    }
+    public short getPort() {
+        return port;
+    }
+    public void setPort(short port) {
+        this.port = port;
+    }
+    public long getActiveSince() {
+        return activeSince;
+    }
+    public void setActiveSince(long activeSince) {
+        this.activeSince = activeSince;
+    }
+    public long getLastSeen() {
+        return lastSeen;
+    }
+    public void setLastSeen(long lastSeen) {
+        if (this.lastSeen + INACTIVITY_INTERVAL < lastSeen)
+            this.activeSince = lastSeen;
+        if (this.lastSeen < lastSeen)
+            this.lastSeen = lastSeen;
+    }
+
+    /**
+     *  Hash is generated using only switch and port
+     */
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + port;
+        result = prime * result + (int) (sw ^ (sw >>> 32));
+        return result;
+    }
+
+    /**
+     * Compares only the switch and port
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        AttachmentPoint other = (AttachmentPoint) obj;
+        if (port != other.port)
+            return false;
+        if (sw != other.sw)
+            return false;
+        return true;
+    }
+
+    @Override
+    public String toString() {
+        return "AttachmentPoint [sw=" + sw + ", port=" + port
+               + ", activeSince=" + activeSince + ", lastSeen=" + lastSeen
+               + "]";
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/DefaultEntityClassifier.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/DefaultEntityClassifier.java
new file mode 100644
index 0000000..faed0d4
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/DefaultEntityClassifier.java
@@ -0,0 +1,138 @@
+/**
+*    Copyright 2011,2012, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.devicemanager.internal;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Map;
+
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.devicemanager.IDevice;
+import net.floodlightcontroller.devicemanager.IDeviceService;
+import net.floodlightcontroller.devicemanager.IDeviceService.DeviceField;
+import net.floodlightcontroller.devicemanager.IEntityClass;
+import net.floodlightcontroller.devicemanager.IEntityClassListener;
+import net.floodlightcontroller.devicemanager.IEntityClassifierService;
+
+/**
+ * This is a default entity classifier that simply classifies all
+ * entities into a fixed entity class, with key fields of MAC and VLAN.
+ * @author readams
+ */
+public class DefaultEntityClassifier implements
+        IEntityClassifierService,
+        IFloodlightModule 
+{
+    /**
+     * A default fixed entity class
+     */
+    protected static class DefaultEntityClass implements IEntityClass {
+        String name;
+
+        public DefaultEntityClass(String name) {
+            this.name = name;
+        }
+
+        @Override
+        public EnumSet<IDeviceService.DeviceField> getKeyFields() {
+            return keyFields;
+        }
+
+        @Override
+        public String getName() {
+            return name;
+        }
+    }
+    
+    protected static EnumSet<DeviceField> keyFields;
+    static {
+        keyFields = EnumSet.of(DeviceField.MAC, DeviceField.VLAN);
+    }
+    protected static DefaultEntityClass entityClass =
+        new DefaultEntityClass("DefaultEntityClass");
+
+    @Override
+    public IEntityClass classifyEntity(Entity entity) {
+        return entityClass;
+    }
+
+    @Override
+    public IEntityClass reclassifyEntity(IDevice curDevice,
+                                                     Entity entity) {
+        return entityClass;
+    }
+
+    @Override
+    public void deviceUpdate(IDevice oldDevice, 
+                             Collection<? extends IDevice> newDevices) {
+        // no-op
+    }
+
+    @Override
+    public EnumSet<DeviceField> getKeyFields() {
+        return keyFields;
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IEntityClassifierService.class);
+        return l;
+    }
+
+    @Override
+    public Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls() {
+        Map<Class<? extends IFloodlightService>,
+        IFloodlightService> m = 
+        new HashMap<Class<? extends IFloodlightService>,
+                    IFloodlightService>();
+        // We are the class that implements the service
+        m.put(IEntityClassifierService.class, this);
+        return m;
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>>
+            getModuleDependencies() {
+        // No dependencies
+        return null;
+    }
+
+    @Override
+    public void init(FloodlightModuleContext context)
+                                                 throws FloodlightModuleException {
+        // no-op
+    }
+
+    @Override
+    public void startUp(FloodlightModuleContext context) {
+        // no-op
+    }
+
+    @Override
+    public void addListener(IEntityClassListener listener) {
+        // no-op
+        
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/Device.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/Device.java
new file mode 100755
index 0000000..645125e
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/Device.java
@@ -0,0 +1,725 @@
+/**
+ *    Copyright 2011,2012 Big Switch Networks, Inc.
+ *    Originally created by David Erickson, Stanford University
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *    not use this file except in compliance with the License. You may obtain
+ *    a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *    License for the specific language governing permissions and limitations
+ *    under the License.
+ **/
+
+package net.floodlightcontroller.devicemanager.internal;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Date;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeSet;
+
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+import org.openflow.util.HexString;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import net.floodlightcontroller.devicemanager.IDeviceService.DeviceField;
+import net.floodlightcontroller.devicemanager.web.DeviceSerializer;
+import net.floodlightcontroller.devicemanager.IDevice;
+import net.floodlightcontroller.devicemanager.IEntityClass;
+import net.floodlightcontroller.devicemanager.SwitchPort;
+import net.floodlightcontroller.devicemanager.SwitchPort.ErrorStatus;
+import net.floodlightcontroller.packet.Ethernet;
+import net.floodlightcontroller.packet.IPv4;
+import net.floodlightcontroller.topology.ITopologyService;
+
+/**
+ * Concrete implementation of {@link IDevice}
+ * @author readams
+ */
+@JsonSerialize(using=DeviceSerializer.class)
+public class Device implements IDevice {
+    protected static Logger log =
+            LoggerFactory.getLogger(Device.class);
+
+    protected Long deviceKey;
+    protected DeviceManagerImpl deviceManager;
+
+    protected Entity[] entities;
+    protected IEntityClass entityClass;
+
+    protected String macAddressString;
+
+    /**
+     * These are the old attachment points for the device that were
+     * valid no more than INACTIVITY_TIME ago.
+     */
+    protected List<AttachmentPoint> oldAPs;
+    /**
+     * The current attachment points for the device.
+     */
+    protected List<AttachmentPoint> attachmentPoints;
+    // ************
+    // Constructors
+    // ************
+
+    /**
+     * Create a device from an entities
+     * @param deviceManager the device manager for this device
+     * @param deviceKey the unique identifier for this device object
+     * @param entity the initial entity for the device
+     * @param entityClass the entity classes associated with the entity
+     */
+    public Device(DeviceManagerImpl deviceManager,
+                  Long deviceKey,
+                  Entity entity,
+                  IEntityClass entityClass) {
+        this.deviceManager = deviceManager;
+        this.deviceKey = deviceKey;
+        this.entities = new Entity[] {entity};
+        this.macAddressString =
+                HexString.toHexString(entity.getMacAddress(), 6);
+        this.entityClass = entityClass;
+        Arrays.sort(this.entities);
+
+        this.oldAPs = null;
+        this.attachmentPoints = null;
+
+        if (entity.getSwitchDPID() != null &&
+                entity.getSwitchPort() != null){
+            long sw = entity.getSwitchDPID();
+            short port = entity.getSwitchPort().shortValue();
+
+            if (deviceManager.isValidAttachmentPoint(sw, port)) {
+                AttachmentPoint ap;
+                ap = new AttachmentPoint(sw, port,
+entity.getLastSeenTimestamp().getTime());
+
+                this.attachmentPoints = new ArrayList<AttachmentPoint>();
+                this.attachmentPoints.add(ap);
+            }
+        }
+    }
+
+    /**
+     * Create a device from a set of entities
+     * @param deviceManager the device manager for this device
+     * @param deviceKey the unique identifier for this device object
+     * @param entities the initial entities for the device
+     * @param entityClass the entity class associated with the entities
+     */
+    public Device(DeviceManagerImpl deviceManager,
+                  Long deviceKey,
+                  Collection<AttachmentPoint> oldAPs,
+                  Collection<AttachmentPoint> attachmentPoints,
+                  Collection<Entity> entities,
+                  IEntityClass entityClass) {
+        this.deviceManager = deviceManager;
+        this.deviceKey = deviceKey;
+        this.entities = entities.toArray(new Entity[entities.size()]);
+        this.oldAPs = null;
+        this.attachmentPoints = null;
+        if (oldAPs != null) {
+            this.oldAPs =
+                    new ArrayList<AttachmentPoint>(oldAPs);
+        }
+        if (attachmentPoints != null) {
+            this.attachmentPoints =
+                    new ArrayList<AttachmentPoint>(attachmentPoints);
+        }
+        this.macAddressString =
+                HexString.toHexString(this.entities[0].getMacAddress(), 6);
+        this.entityClass = entityClass;
+        Arrays.sort(this.entities);
+    }
+
+    /**
+     * Construct a new device consisting of the entities from the old device
+     * plus an additional entity
+     * @param device the old device object
+     * @param newEntity the entity to add. newEntity must be have the same
+     *        entity class as device
+     */
+    public Device(Device device,
+                  Entity newEntity) {
+        this.deviceManager = device.deviceManager;
+        this.deviceKey = device.deviceKey;
+        this.entities = Arrays.<Entity>copyOf(device.entities,
+                                              device.entities.length + 1);
+        this.entities[this.entities.length - 1] = newEntity;
+        Arrays.sort(this.entities);
+        this.oldAPs = null;
+        if (device.oldAPs != null) {
+            this.oldAPs =
+                    new ArrayList<AttachmentPoint>(device.oldAPs);
+        }
+        this.attachmentPoints = null;
+        if (device.attachmentPoints != null) {
+            this.attachmentPoints =
+                    new ArrayList<AttachmentPoint>(device.attachmentPoints);
+        }
+
+        this.macAddressString =
+                HexString.toHexString(this.entities[0].getMacAddress(), 6);
+
+        this.entityClass = device.entityClass;
+    }
+
+    /**
+     * Given a list of attachment points (apList), the procedure would return
+     * a map of attachment points for each L2 domain.  L2 domain id is the key.
+     * @param apList
+     * @return
+     */
+    private Map<Long, AttachmentPoint> getAPMap(List<AttachmentPoint> apList) {
+
+        if (apList == null) return null;
+        ITopologyService topology = deviceManager.topology;
+
+        // Get the old attachment points and sort them.
+        List<AttachmentPoint>oldAP = new ArrayList<AttachmentPoint>();
+        if (apList != null) oldAP.addAll(apList);
+
+        // Remove invalid attachment points before sorting.
+        List<AttachmentPoint>tempAP =
+                new ArrayList<AttachmentPoint>();
+        for(AttachmentPoint ap: oldAP) {
+            if (deviceManager.isValidAttachmentPoint(ap.getSw(), ap.getPort())){
+                tempAP.add(ap);
+            }
+        }
+        oldAP = tempAP;
+
+        Collections.sort(oldAP, deviceManager.apComparator);
+
+        // Map of attachment point by L2 domain Id.
+        Map<Long, AttachmentPoint> apMap = new HashMap<Long, AttachmentPoint>();
+
+        for(int i=0; i<oldAP.size(); ++i) {
+            AttachmentPoint ap = oldAP.get(i);
+            // if this is not a valid attachment point, continue
+            if (!deviceManager.isValidAttachmentPoint(ap.getSw(),
+                                                      ap.getPort()))
+                continue;
+
+            long id = topology.getL2DomainId(ap.getSw());
+            apMap.put(id, ap);
+        }
+
+        if (apMap.isEmpty()) return null;
+        return apMap;
+    }
+
+    /**
+     * Remove all attachment points that are older than INACTIVITY_INTERVAL
+     * from the list.
+     * @param apList
+     * @return
+     */
+    private boolean removeExpiredAttachmentPoints(List<AttachmentPoint>apList) {
+
+        List<AttachmentPoint> expiredAPs = new ArrayList<AttachmentPoint>();
+
+        if (apList == null) return false;
+
+        for(AttachmentPoint ap: apList) {
+            if (ap.getLastSeen() + AttachmentPoint.INACTIVITY_INTERVAL <
+                    System.currentTimeMillis())
+                expiredAPs.add(ap);
+        }
+        if (expiredAPs.size() > 0) {
+            apList.removeAll(expiredAPs);
+            return true;
+        } else return false;
+    }
+
+    /**
+     * Get a list of duplicate attachment points, given a list of old attachment
+     * points and one attachment point per L2 domain. Given a true attachment
+     * point in the L2 domain, say trueAP, another attachment point in the
+     * same L2 domain, say ap, is duplicate if:
+     * 1. ap is inconsistent with trueAP, and
+     * 2. active time of ap is after that of trueAP; and
+     * 3. last seen time of ap is within the last INACTIVITY_INTERVAL
+     * @param oldAPList
+     * @param apMap
+     * @return
+     */
+    List<AttachmentPoint> getDuplicateAttachmentPoints(List<AttachmentPoint>oldAPList,
+                                                       Map<Long, AttachmentPoint>apMap) {
+        ITopologyService topology = deviceManager.topology;
+        List<AttachmentPoint> dupAPs = new ArrayList<AttachmentPoint>();
+        long timeThreshold = System.currentTimeMillis() -
+                AttachmentPoint.INACTIVITY_INTERVAL;
+
+        if (oldAPList == null || apMap == null)
+            return dupAPs;
+
+        for(AttachmentPoint ap: oldAPList) {
+            long id = topology.getL2DomainId(ap.getSw());
+            AttachmentPoint trueAP = apMap.get(id);
+
+            if (trueAP == null) continue;
+            boolean c = (topology.isConsistent(trueAP.getSw(), trueAP.getPort(),
+                                              ap.getSw(), ap.getPort()));
+            boolean active = (ap.getActiveSince() > trueAP.getActiveSince());
+            boolean last = ap.getLastSeen() > timeThreshold;
+            if (!c && active && last) {
+                dupAPs.add(ap);
+            }
+        }
+
+        return dupAPs;
+    }
+
+    /**
+     * Update the known attachment points.  This method is called whenever
+     * topology changes. The method returns true if there's any change to
+     * the list of attachment points -- which indicates a possible device
+     * move.
+     * @return
+     */
+    protected boolean updateAttachmentPoint() {
+        boolean moved = false;
+
+        if (attachmentPoints == null || attachmentPoints.isEmpty())
+            return false;
+
+        List<AttachmentPoint> apList = new ArrayList<AttachmentPoint>();
+        if (attachmentPoints != null) apList.addAll(attachmentPoints);
+        Map<Long, AttachmentPoint> newMap = getAPMap(apList);
+        if (newMap == null || newMap.size() != apList.size()) {
+            moved = true;
+        }
+
+        // Prepare the new attachment point list.
+        if (moved) {
+            List<AttachmentPoint> newAPList =
+                    new ArrayList<AttachmentPoint>();
+            if (newMap != null) newAPList.addAll(newMap.values());
+            this.attachmentPoints = newAPList;
+        }
+
+        // Set the oldAPs to null.
+        this.oldAPs = null;
+        return moved;
+    }
+
+    /**
+     * Update the list of attachment points given that a new packet-in
+     * was seen from (sw, port) at time (lastSeen).  The return value is true
+     * if there was any change to the list of attachment points for the device
+     * -- which indicates a device move.
+     * @param sw
+     * @param port
+     * @param lastSeen
+     * @return
+     */
+    protected boolean updateAttachmentPoint(long sw, short port, long lastSeen){
+        ITopologyService topology = deviceManager.topology;
+        List<AttachmentPoint> oldAPList;
+        List<AttachmentPoint> apList;
+        boolean oldAPFlag = false;
+
+        if (!deviceManager.isValidAttachmentPoint(sw, port)) return false;
+        AttachmentPoint newAP = new AttachmentPoint(sw, port, lastSeen);
+
+        //Copy the oldAP and ap list.
+        apList = new ArrayList<AttachmentPoint>();
+        if (attachmentPoints != null) apList.addAll(attachmentPoints);
+        oldAPList = new ArrayList<AttachmentPoint>();
+        if (oldAPs != null) oldAPList.addAll(oldAPs);
+
+        // if the sw, port is in old AP, remove it from there
+        // and update the lastSeen in that object.
+        if (oldAPList.contains(newAP)) {
+            int index = oldAPList.indexOf(newAP);
+            newAP = oldAPList.remove(index);
+            newAP.setLastSeen(lastSeen);
+            this.oldAPs = oldAPList;
+            oldAPFlag = true;
+        }
+
+        // newAP now contains the new attachment point.
+
+        // Get the APMap is null or empty.
+        Map<Long, AttachmentPoint> apMap = getAPMap(apList);
+        if (apMap == null || apMap.isEmpty()) {
+            apList.add(newAP);
+            attachmentPoints = apList;
+            return true;
+        }
+
+        long id = topology.getL2DomainId(sw);
+        AttachmentPoint oldAP = apMap.get(id);
+
+        if (oldAP == null) // No attachment on this L2 domain.
+        {
+            apList = new ArrayList<AttachmentPoint>();
+            apList.addAll(apMap.values());
+            apList.add(newAP);
+            this.attachmentPoints = apList;
+            return true; // new AP found on an L2 island.
+        }
+
+        // There is already a known attachment point on the same L2 island.
+        // we need to compare oldAP and newAP.
+        if (oldAP.equals(newAP)) {
+            // nothing to do here. just the last seen has to be changed.
+            if (newAP.lastSeen > oldAP.lastSeen) {
+                oldAP.setLastSeen(newAP.lastSeen);
+            }
+            this.attachmentPoints =
+                    new ArrayList<AttachmentPoint>(apMap.values());
+            return false; // nothing to do here.
+        }
+
+        int x = deviceManager.apComparator.compare(oldAP, newAP);
+        if (x < 0) {
+            // newAP replaces oldAP.
+            apMap.put(id, newAP);
+            this.attachmentPoints =
+                    new ArrayList<AttachmentPoint>(apMap.values());
+
+            oldAPList = new ArrayList<AttachmentPoint>();
+            if (oldAPs != null) oldAPList.addAll(oldAPs);
+            oldAPList.add(oldAP);
+            this.oldAPs = oldAPList;
+            if (!topology.isInSameBroadcastDomain(oldAP.getSw(), oldAP.getPort(),
+                                                  newAP.getSw(), newAP.getPort()))
+                return true; // attachment point changed.
+        } else  if (oldAPFlag) {
+            // retain oldAP  as is.  Put the newAP in oldAPs for flagging
+            // possible duplicates.
+                oldAPList = new ArrayList<AttachmentPoint>();
+                if (oldAPs != null) oldAPList.addAll(oldAPs);
+                // Add ot oldAPList only if it was picked up from the oldAPList
+                oldAPList.add(newAP);
+                this.oldAPs = oldAPList;
+        }
+        return false;
+    }
+
+    /**
+     * Delete (sw,port) from the list of list of attachment points
+     * and oldAPs.
+     * @param sw
+     * @param port
+     * @return
+     */
+    public boolean deleteAttachmentPoint(long sw, short port) {
+        AttachmentPoint ap = new AttachmentPoint(sw, port, 0);
+
+        if (this.oldAPs != null) {
+            ArrayList<AttachmentPoint> apList = new ArrayList<AttachmentPoint>();
+            apList.addAll(this.oldAPs);
+            int index = apList.indexOf(ap);
+            if (index > 0) {
+                apList.remove(index);
+                this.oldAPs = apList;
+            }
+        }
+
+        if (this.attachmentPoints != null) {
+            ArrayList<AttachmentPoint> apList = new ArrayList<AttachmentPoint>();
+            apList.addAll(this.attachmentPoints);
+            int index = apList.indexOf(ap);
+            if (index > 0) {
+                apList.remove(index);
+                this.attachmentPoints = apList;
+                return true;
+            }
+        }
+        return false;
+    }
+
+    public boolean deleteAttachmentPoint(long sw) {
+        boolean deletedFlag;
+        ArrayList<AttachmentPoint> apList;
+        ArrayList<AttachmentPoint> modifiedList;
+
+        // Delete the APs on switch sw in oldAPs.
+        deletedFlag = false;
+        apList = new ArrayList<AttachmentPoint>();
+        if (this.oldAPs != null)
+            apList.addAll(this.oldAPs);
+        modifiedList = new ArrayList<AttachmentPoint>();
+
+        for(AttachmentPoint ap: apList) {
+            if (ap.getSw() == sw) {
+                deletedFlag = true;
+            } else {
+                modifiedList.add(ap);
+            }
+        }
+
+        if (deletedFlag) {
+            this.oldAPs = modifiedList;
+        }
+
+        // Delete the APs on switch sw in attachmentPoints.
+        deletedFlag = false;
+        apList = new ArrayList<AttachmentPoint>();
+        if (this.attachmentPoints != null)
+            apList.addAll(this.attachmentPoints);
+        modifiedList = new ArrayList<AttachmentPoint>();
+
+        for(AttachmentPoint ap: apList) {
+            if (ap.getSw() == sw) {
+                deletedFlag = true;
+            } else {
+                modifiedList.add(ap);
+            }
+        }
+
+        if (deletedFlag) {
+            this.attachmentPoints = modifiedList;
+            return true;
+        }
+
+        return false;
+    }
+
+
+    @Override
+    public SwitchPort[] getAttachmentPoints() {
+        return getAttachmentPoints(false);
+    }
+
+    @Override
+    public SwitchPort[] getAttachmentPoints(boolean includeError) {
+        List<SwitchPort> sp = new ArrayList<SwitchPort>();
+        SwitchPort [] returnSwitchPorts = new SwitchPort[] {};
+        if (attachmentPoints == null) return returnSwitchPorts;
+        if (attachmentPoints.isEmpty()) return returnSwitchPorts;
+
+
+        // copy ap list.
+        List<AttachmentPoint> apList;
+        apList = new ArrayList<AttachmentPoint>();
+        if (attachmentPoints != null) apList.addAll(attachmentPoints);
+        // get AP map.
+        Map<Long, AttachmentPoint> apMap = getAPMap(apList);
+
+        if (apMap != null) {
+            for(AttachmentPoint ap: apMap.values()) {
+                SwitchPort swport = new SwitchPort(ap.getSw(),
+                                                   ap.getPort());
+                    sp.add(swport);
+            }
+        }
+
+        if (!includeError)
+            return sp.toArray(new SwitchPort[sp.size()]);
+
+        List<AttachmentPoint> oldAPList;
+        oldAPList = new ArrayList<AttachmentPoint>();
+
+        if (oldAPs != null) oldAPList.addAll(oldAPs);
+
+        if (removeExpiredAttachmentPoints(oldAPList))
+            this.oldAPs = oldAPList;
+
+        List<AttachmentPoint> dupList;
+        dupList = this.getDuplicateAttachmentPoints(oldAPList, apMap);
+        if (dupList != null) {
+            for(AttachmentPoint ap: dupList) {
+                SwitchPort swport = new SwitchPort(ap.getSw(),
+                                                   ap.getPort(),
+                                                   ErrorStatus.DUPLICATE_DEVICE);
+                    sp.add(swport);
+            }
+        }
+        return sp.toArray(new SwitchPort[sp.size()]);
+    }
+
+    // *******
+    // IDevice
+    // *******
+
+    @Override
+    public Long getDeviceKey() {
+        return deviceKey;
+    }
+
+    @Override
+    public long getMACAddress() {
+        // we assume only one MAC per device for now.
+        return entities[0].getMacAddress();
+    }
+
+    @Override
+    public String getMACAddressString() {
+        return macAddressString;
+    }
+
+    @Override
+    public Short[] getVlanId() {
+        if (entities.length == 1) {
+            if (entities[0].getVlan() != null) {
+                return new Short[]{ entities[0].getVlan() };
+            } else {
+                return new Short[] { Short.valueOf((short)-1) };
+            }
+        }
+
+        TreeSet<Short> vals = new TreeSet<Short>();
+        for (Entity e : entities) {
+            if (e.getVlan() == null)
+                vals.add((short)-1);
+            else
+                vals.add(e.getVlan());
+        }
+        return vals.toArray(new Short[vals.size()]);
+    }
+
+    static final EnumSet<DeviceField> ipv4Fields = EnumSet.of(DeviceField.IPV4);
+
+    @Override
+    public Integer[] getIPv4Addresses() {
+        // XXX - TODO we can cache this result.  Let's find out if this
+        // is really a performance bottleneck first though.
+
+        TreeSet<Integer> vals = new TreeSet<Integer>();
+        for (Entity e : entities) {
+            if (e.getIpv4Address() == null) continue;
+
+            // We have an IP address only if among the devices within the class
+            // we have the most recent entity with that IP.
+            boolean validIP = true;
+            Iterator<Device> devices =
+                    deviceManager.queryClassByEntity(entityClass, ipv4Fields, e);
+            while (devices.hasNext()) {
+                Device d = devices.next();
+                if (deviceKey.equals(d.getDeviceKey())) 
+                    continue;
+                for (Entity se : d.entities) {
+                    if (se.getIpv4Address() != null &&
+                            se.getIpv4Address().equals(e.getIpv4Address()) &&
+                            se.getLastSeenTimestamp() != null &&
+                            0 < se.getLastSeenTimestamp().
+                            compareTo(e.getLastSeenTimestamp())) {
+                        validIP = false;
+                        break;
+                    }
+                }
+                if (!validIP)
+                    break;
+            }
+
+            if (validIP)
+                vals.add(e.getIpv4Address());
+        }
+
+        return vals.toArray(new Integer[vals.size()]);
+    }
+
+    @Override
+    public Short[] getSwitchPortVlanIds(SwitchPort swp) {
+        TreeSet<Short> vals = new TreeSet<Short>();
+        for (Entity e : entities) {
+            if (e.switchDPID == swp.getSwitchDPID() 
+                    && e.switchPort == swp.getPort()) {
+                if (e.getVlan() == null)
+                    vals.add(Ethernet.VLAN_UNTAGGED);
+                else
+                    vals.add(e.getVlan());
+            }
+        }
+        return vals.toArray(new Short[vals.size()]);
+    }
+
+    @Override
+    public Date getLastSeen() {
+        Date d = null;
+        for (int i = 0; i < entities.length; i++) {
+            if (d == null ||
+                    entities[i].getLastSeenTimestamp().compareTo(d) > 0)
+                d = entities[i].getLastSeenTimestamp();
+        }
+        return d;
+    }
+
+    // ***************
+    // Getters/Setters
+    // ***************
+
+    @Override
+    public IEntityClass getEntityClass() {
+        return entityClass;
+    }
+
+    public Entity[] getEntities() {
+        return entities;
+    }
+
+    // ***************
+    // Utility Methods
+    // ***************
+
+    /**
+     * Check whether the device contains the specified entity
+     * @param entity the entity to search for
+     * @return the index of the entity, or <0 if not found
+     */
+    protected int entityIndex(Entity entity) {
+        return Arrays.binarySearch(entities, entity);
+    }
+
+    // ******
+    // Object
+    // ******
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + Arrays.hashCode(entities);
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) return true;
+        if (obj == null) return false;
+        if (getClass() != obj.getClass()) return false;
+        Device other = (Device) obj;
+        if (!deviceKey.equals(other.deviceKey)) return false;
+        if (!Arrays.equals(entities, other.entities)) return false;
+        return true;
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder builder = new StringBuilder();
+        builder.append("Device [deviceKey=");
+        builder.append(deviceKey);
+        builder.append(", entityClass=");
+        builder.append(entityClass.getName());
+        builder.append(", MAC=");
+        builder.append(macAddressString);
+        builder.append(", IPs=[");
+        boolean isFirst = true;
+        for (Integer ip: getIPv4Addresses()) {
+            if (!isFirst)
+                builder.append(", ");
+            isFirst = false;
+            builder.append(IPv4.fromIPv4Address(ip));
+        }
+        builder.append("], APs=");
+        builder.append(Arrays.toString(getAttachmentPoints(true)));
+        builder.append("]");
+        return builder.toString();
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceIndex.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceIndex.java
new file mode 100644
index 0000000..0d8ea75
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceIndex.java
@@ -0,0 +1,116 @@
+/**
+*    Copyright 2012 Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.devicemanager.internal;
+
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.Iterator;
+
+import net.floodlightcontroller.devicemanager.IDeviceService.DeviceField;
+
+/**
+ * An index that maps key fields of an entity to device keys
+ */
+public abstract class DeviceIndex {
+    /**
+     * The key fields for this index
+     */
+    protected EnumSet<DeviceField> keyFields;
+
+    /**
+     * Construct a new device index using the provided key fields
+     * @param keyFields the key fields to use
+     */
+    public DeviceIndex(EnumSet<DeviceField> keyFields) {
+        super();
+        this.keyFields = keyFields;
+    }
+
+    /**
+     * Find all device keys in the index that match the given entity
+     * on all the key fields for this index
+     * @param e the entity to search for
+     * @return an iterator over device keys
+     */
+    public abstract Iterator<Long> queryByEntity(Entity entity);
+    
+    /**
+     * Get all device keys in the index.  If certain devices exist
+     * multiple times, then these devices may be returned multiple times
+     * @return an iterator over device keys
+     */
+    public abstract Iterator<Long> getAll();
+
+    /**
+     * Attempt to update an index with the entities in the provided
+     * {@link Device}.  If the update fails because of a concurrent update,
+     * will return false.
+     * @param device the device to update
+     * @param deviceKey the device key for the device
+     * @return true if the update succeeded, false otherwise.
+     */
+    public abstract boolean updateIndex(Device device, Long deviceKey);
+
+    /**
+     * Add a mapping from the given entity to the given device key.  This
+     * update will not fail because of a concurrent update 
+     * @param device the device to update
+     * @param deviceKey the device key for the device
+     */
+    public abstract void updateIndex(Entity entity, Long deviceKey);
+
+    /**
+     * Remove the entry for the given entity
+     * @param entity the entity to remove
+     */
+    public abstract void removeEntity(Entity entity);
+
+    /**
+     * Remove the given device key from the index for the given entity
+     * @param entity the entity to search for
+     * @param deviceKey the key to remove
+     */
+    public abstract void removeEntity(Entity entity, Long deviceKey);
+    
+    /**
+     * Remove the give device from the index only if this the collection
+     * of others does not contain an entity that is identical on all the key
+     * fields for this index.
+     * @param entity the entity to search for
+     * @param deviceKey the key to remove
+     * @param others the others against which to check
+     */
+    public void removeEntityIfNeeded(Entity entity, Long deviceKey,
+                                     Collection<Entity> others) {
+        IndexedEntity ie = new IndexedEntity(keyFields, entity);
+        for (Entity o : others) {
+            IndexedEntity oio = new IndexedEntity(keyFields, o);
+            if (oio.equals(ie)) return;
+        }
+
+        Iterator<Long> keyiter = this.queryByEntity(entity);
+        while (keyiter.hasNext()) {
+                Long key = keyiter.next();
+                if (key.equals(deviceKey)) {
+                    removeEntity(entity, deviceKey);
+                    break;
+                }
+        }
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceIndexInterator.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceIndexInterator.java
new file mode 100644
index 0000000..2015bbe
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceIndexInterator.java
@@ -0,0 +1,59 @@
+/**
+*    Copyright 2012, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.devicemanager.internal;
+
+import java.util.Iterator;
+
+/**
+ * An iterator for handling device index queries
+ */
+public class DeviceIndexInterator implements Iterator<Device> {
+    private DeviceManagerImpl deviceManager;
+    private Iterator<Long> subIterator;
+
+    /**
+     * Construct a new device index iterator referring to a device manager
+     * instance and an iterator over device keys
+     * 
+     * @param deviceManager the device manager
+     * @param subIterator an iterator over device keys
+     */
+    public DeviceIndexInterator(DeviceManagerImpl deviceManager,
+                                Iterator<Long> subIterator) {
+        super();
+        this.deviceManager = deviceManager;
+        this.subIterator = subIterator;
+    }
+
+    @Override
+    public boolean hasNext() {
+        return subIterator.hasNext();
+    }
+
+    @Override
+    public Device next() {
+        Long next = subIterator.next();
+        return deviceManager.deviceMap.get(next);
+    }
+
+    @Override
+    public void remove() {
+        subIterator.remove();
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceIterator.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceIterator.java
new file mode 100644
index 0000000..2cbea66
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceIterator.java
@@ -0,0 +1,117 @@
+/**
+*    Copyright 2012, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.devicemanager.internal;
+
+import java.util.Arrays;
+import java.util.Iterator;
+
+import net.floodlightcontroller.devicemanager.IEntityClass;
+import net.floodlightcontroller.devicemanager.SwitchPort;
+import net.floodlightcontroller.util.FilterIterator;
+
+/**
+ * An iterator for handling device queries
+ */
+public class DeviceIterator extends FilterIterator<Device> {
+    private IEntityClass[] entityClasses;
+    
+    private Long macAddress;
+    private Short vlan;
+    private Integer ipv4Address; 
+    private Long switchDPID;
+    private Integer switchPort;
+    
+    /**
+     * Construct a new device iterator over the key fields
+     * @param subIterator an iterator over the full data structure to scan
+     * @param entityClasses the entity classes to search for
+     * @param macAddress The MAC address
+     * @param vlan the VLAN
+     * @param ipv4Address the ipv4 address
+     * @param switchDPID the switch DPID
+     * @param switchPort the switch port
+     */
+    public DeviceIterator(Iterator<Device> subIterator, 
+                          IEntityClass[] entityClasses,
+                          Long macAddress,
+                          Short vlan, 
+                          Integer ipv4Address, 
+                          Long switchDPID,
+                          Integer switchPort) {
+        super(subIterator);
+        this.entityClasses = entityClasses;
+        this.subIterator = subIterator;
+        this.macAddress = macAddress;
+        this.vlan = vlan;
+        this.ipv4Address = ipv4Address;
+        this.switchDPID = switchDPID;
+        this.switchPort = switchPort;
+    }
+
+    @Override
+    protected boolean matches(Device value) {
+        boolean match;
+        if (entityClasses != null) {
+            IEntityClass clazz = value.getEntityClass();
+            if (clazz == null) return false;
+
+            match = false;
+            for (IEntityClass entityClass : entityClasses) {
+                if (clazz.equals(entityClass)) {
+                    match = true;
+                    break;
+                }
+            }
+            if (!match) return false;                
+        }
+        if (macAddress != null) {
+            if (macAddress.longValue() != value.getMACAddress())
+                return false;
+        }
+        if (vlan != null) {
+            Short[] vlans = value.getVlanId();
+            if (Arrays.binarySearch(vlans, vlan) < 0) 
+                return false;
+        }
+        if (ipv4Address != null) {
+            Integer[] ipv4Addresses = value.getIPv4Addresses();
+            if (Arrays.binarySearch(ipv4Addresses, ipv4Address) < 0) 
+                return false;
+        }
+        if (switchDPID != null || switchPort != null) {
+            SwitchPort[] sps = value.getAttachmentPoints();
+            if (sps == null) return false;
+            
+            match = false;
+            for (SwitchPort sp : sps) {
+                if (switchDPID != null) {
+                    if (switchDPID.longValue() != sp.getSwitchDPID())
+                        return false;
+                }
+                if (switchPort != null) {
+                    if (switchPort.intValue() != sp.getPort())
+                        return false;
+                }
+                match = true;
+                break;
+            }
+            if (!match) return false;
+        }
+        return true;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImpl.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImpl.java
new file mode 100755
index 0000000..feccdc4
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImpl.java
@@ -0,0 +1,1717 @@
+/**
+ *    Copyright 2011,2012 Big Switch Networks, Inc.
+ *    Originally created by David Erickson, Stanford University
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *    not use this file except in compliance with the License. You may obtain
+ *    a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *    License for the specific language governing permissions and limitations
+ *    under the License.
+ **/
+
+package net.floodlightcontroller.devicemanager.internal;
+
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Date;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Map;
+import java.util.Queue;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import net.floodlightcontroller.core.FloodlightContext;
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.core.IHAListener;
+import net.floodlightcontroller.core.IInfoProvider;
+import net.floodlightcontroller.core.IOFMessageListener;
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.IFloodlightProviderService.Role;
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.core.util.SingletonTask;
+import net.floodlightcontroller.devicemanager.IDevice;
+import net.floodlightcontroller.devicemanager.IDeviceService;
+import net.floodlightcontroller.devicemanager.IEntityClass;
+import net.floodlightcontroller.devicemanager.IEntityClassListener;
+import net.floodlightcontroller.devicemanager.IEntityClassifierService;
+import net.floodlightcontroller.devicemanager.IDeviceListener;
+import net.floodlightcontroller.devicemanager.SwitchPort;
+import net.floodlightcontroller.devicemanager.web.DeviceRoutable;
+import net.floodlightcontroller.flowcache.IFlowReconcileListener;
+import net.floodlightcontroller.flowcache.IFlowReconcileService;
+import net.floodlightcontroller.flowcache.OFMatchReconcile;
+import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.LDUpdate;
+import net.floodlightcontroller.packet.ARP;
+import net.floodlightcontroller.packet.DHCP;
+import net.floodlightcontroller.packet.Ethernet;
+import net.floodlightcontroller.packet.IPv4;
+import net.floodlightcontroller.packet.UDP;
+import net.floodlightcontroller.restserver.IRestApiService;
+import net.floodlightcontroller.storage.IStorageSourceService;
+import net.floodlightcontroller.threadpool.IThreadPoolService;
+import net.floodlightcontroller.topology.ITopologyListener;
+import net.floodlightcontroller.topology.ITopologyService;
+import net.floodlightcontroller.util.MultiIterator;
+import static net.floodlightcontroller.devicemanager.internal.
+DeviceManagerImpl.DeviceUpdate.Change.*;
+
+import org.openflow.protocol.OFMatchWithSwDpid;
+import org.openflow.protocol.OFMessage;
+import org.openflow.protocol.OFPacketIn;
+import org.openflow.protocol.OFType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * DeviceManager creates Devices based upon MAC addresses seen in the network.
+ * It tracks any network addresses mapped to the Device, and its location
+ * within the network.
+ * @author readams
+ */
+public class DeviceManagerImpl implements
+IDeviceService, IOFMessageListener, ITopologyListener,
+IFloodlightModule, IEntityClassListener,
+IFlowReconcileListener, IInfoProvider, IHAListener {
+    protected static Logger logger =
+            LoggerFactory.getLogger(DeviceManagerImpl.class);
+
+    protected IFloodlightProviderService floodlightProvider;
+    protected ITopologyService topology;
+    protected IStorageSourceService storageSource;
+    protected IRestApiService restApi;
+    protected IThreadPoolService threadPool;
+    protected IFlowReconcileService flowReconcileMgr;
+
+    /**
+     * Time in milliseconds before entities will expire
+     */
+    protected static final int ENTITY_TIMEOUT = 60*60*1000;
+
+    /**
+     * Time in seconds between cleaning up old entities/devices
+     */
+    protected static final int ENTITY_CLEANUP_INTERVAL = 60*60;
+
+    /**
+     * This is the master device map that maps device IDs to {@link Device}
+     * objects.
+     */
+    protected ConcurrentHashMap<Long, Device> deviceMap;
+
+    /**
+     * Counter used to generate device keys
+     */
+    protected long deviceKeyCounter = 0;
+
+    /**
+     * Lock for incrementing the device key counter
+     */
+    protected Object deviceKeyLock = new Object();
+
+    /**
+     * This is the primary entity index that contains all entities
+     */
+    protected DeviceUniqueIndex primaryIndex;
+
+    /**
+     * This stores secondary indices over the fields in the devices
+     */
+    protected Map<EnumSet<DeviceField>, DeviceIndex> secondaryIndexMap;
+
+    /**
+     * This map contains state for each of the {@ref IEntityClass}
+     * that exist
+     */
+    protected ConcurrentHashMap<String, ClassState> classStateMap;
+
+    /**
+     * This is the list of indices we want on a per-class basis
+     */
+    protected Set<EnumSet<DeviceField>> perClassIndices;
+
+    /**
+     * The entity classifier currently in use
+     */
+    protected IEntityClassifierService entityClassifier;
+
+    /**
+     * Used to cache state about specific entity classes
+     */
+    protected class ClassState {
+
+        /**
+         * The class index
+         */
+        protected DeviceUniqueIndex classIndex;
+
+        /**
+         * This stores secondary indices over the fields in the device for the
+         * class
+         */
+        protected Map<EnumSet<DeviceField>, DeviceIndex> secondaryIndexMap;
+
+        /**
+         * Allocate a new {@link ClassState} object for the class
+         * @param clazz the class to use for the state
+         */
+        public ClassState(IEntityClass clazz) {
+            EnumSet<DeviceField> keyFields = clazz.getKeyFields();
+            EnumSet<DeviceField> primaryKeyFields =
+                    entityClassifier.getKeyFields();
+            boolean keyFieldsMatchPrimary =
+                    primaryKeyFields.equals(keyFields);
+
+            if (!keyFieldsMatchPrimary)
+                classIndex = new DeviceUniqueIndex(keyFields);
+
+            secondaryIndexMap =
+                    new HashMap<EnumSet<DeviceField>, DeviceIndex>();
+            for (EnumSet<DeviceField> fields : perClassIndices) {
+                secondaryIndexMap.put(fields,
+                                      new DeviceMultiIndex(fields));
+            }
+        }
+    }
+
+    /**
+     * Device manager event listeners
+     */
+    protected Set<IDeviceListener> deviceListeners;
+
+    /**
+     * A device update event to be dispatched
+     */
+    protected static class DeviceUpdate {
+        public enum Change {
+            ADD, DELETE, CHANGE;
+        }
+
+        /**
+         * The affected device
+         */
+        protected IDevice device;
+
+        /**
+         * The change that was made
+         */
+        protected Change change;
+
+        /**
+         * If not added, then this is the list of fields changed
+         */
+        protected EnumSet<DeviceField> fieldsChanged;
+
+        public DeviceUpdate(IDevice device, Change change,
+                            EnumSet<DeviceField> fieldsChanged) {
+            super();
+            this.device = device;
+            this.change = change;
+            this.fieldsChanged = fieldsChanged;
+        }
+
+        @Override
+        public String toString() {
+            String devIdStr = device.getEntityClass().getName() + "::" +
+                    device.getMACAddressString();
+            return "DeviceUpdate [device=" + devIdStr + ", change=" + change
+                   + ", fieldsChanged=" + fieldsChanged + "]";
+        }
+        
+    }
+
+    /**
+     * AttachmentPointComparator
+     * 
+     * Compares two attachment points and returns the latest one.
+     * It is assumed that the two attachment points are in the same
+     * L2 domain.
+     * 
+     * @author srini
+     */
+    protected class AttachmentPointComparator
+    implements Comparator<AttachmentPoint> {
+        public AttachmentPointComparator() {
+            super();
+        }
+
+        @Override
+        public int compare(AttachmentPoint oldAP, AttachmentPoint newAP) {
+
+            //First compare based on L2 domain ID; 
+            long oldSw = oldAP.getSw();
+            short oldPort = oldAP.getPort();
+            long oldDomain = topology.getL2DomainId(oldSw);
+            boolean oldBD = topology.isBroadcastDomainPort(oldSw, oldPort);
+
+            long newSw = newAP.getSw();
+            short newPort = newAP.getPort();
+            long newDomain = topology.getL2DomainId(newSw);
+            boolean newBD = topology.isBroadcastDomainPort(newSw, newPort);
+
+            if (oldDomain < newDomain) return -1;
+            else if (oldDomain > newDomain) return 1;
+
+            // We expect that the last seen of the new AP is higher than
+            // old AP, if it is not, just reverse and send the negative
+            // of the result.
+            if (oldAP.getActiveSince() > newAP.getActiveSince())
+                return -compare(newAP, oldAP);
+
+            long activeOffset = 0;
+            if (!topology.isConsistent(oldSw, oldPort, newSw, newPort)) {
+                if (!newBD && oldBD) {
+                    return -1;
+                }
+                if (newBD && oldBD) {
+                    activeOffset = AttachmentPoint.EXTERNAL_TO_EXTERNAL_TIMEOUT;
+                }
+                else if (newBD && !oldBD){
+                    activeOffset = AttachmentPoint.OPENFLOW_TO_EXTERNAL_TIMEOUT;
+                }
+
+            } else {
+                // The attachment point is consistent.
+                activeOffset = AttachmentPoint.CONSISTENT_TIMEOUT;
+            }
+
+
+            if ((newAP.getActiveSince() > oldAP.getLastSeen() + activeOffset) ||
+                    (newAP.getLastSeen() > oldAP.getLastSeen() +
+                            AttachmentPoint.INACTIVITY_INTERVAL)) {
+                return -1;
+            }
+            return 1;
+        }
+    }
+    /**
+     * Comparator for sorting by cluster ID
+     */
+    public AttachmentPointComparator apComparator;
+
+    /**
+     * Switch ports where attachment points shouldn't be learned
+     */
+    private Set<SwitchPort> suppressAPs;
+
+    /**
+     * Periodic task to clean up expired entities
+     */
+    public SingletonTask entityCleanupTask;
+
+    // *********************
+    // IDeviceManagerService
+    // *********************
+
+    @Override
+    public IDevice getDevice(Long deviceKey) {
+        return deviceMap.get(deviceKey);
+    }
+
+    @Override
+    public IDevice findDevice(long macAddress, Short vlan,
+                              Integer ipv4Address, Long switchDPID,
+                              Integer switchPort)
+                              throws IllegalArgumentException {
+        if (vlan != null && vlan.shortValue() <= 0)
+            vlan = null;
+        if (ipv4Address != null && ipv4Address == 0)
+            ipv4Address = null;
+        Entity e = new Entity(macAddress, vlan, ipv4Address, switchDPID,
+                              switchPort, null);
+        if (!allKeyFieldsPresent(e, entityClassifier.getKeyFields())) {
+            throw new IllegalArgumentException("Not all key fields specified."
+                      + " Required fields: " + entityClassifier.getKeyFields());
+        }
+        return findDeviceByEntity(e);
+    }
+
+    @Override
+    public IDevice findDestDevice(IDevice source, long macAddress,
+                                  Short vlan, Integer ipv4Address) 
+                                  throws IllegalArgumentException {
+        if (vlan != null && vlan.shortValue() <= 0)
+            vlan = null;
+        if (ipv4Address != null && ipv4Address == 0)
+            ipv4Address = null;
+        Entity e = new Entity(macAddress, vlan, ipv4Address,
+                              null, null, null);
+        if (source == null || 
+                !allKeyFieldsPresent(e, source.getEntityClass().getKeyFields())) {
+            throw new IllegalArgumentException("Not all key fields and/or "
+                    + " no source device specified. Required fields: " + 
+                    entityClassifier.getKeyFields());
+        }
+        return findDestByEntity(source, e);
+    }
+
+    @Override
+    public Collection<? extends IDevice> getAllDevices() {
+        return Collections.unmodifiableCollection(deviceMap.values());
+    }
+
+    @Override
+    public void addIndex(boolean perClass,
+                         EnumSet<DeviceField> keyFields) {
+        if (perClass) {
+            perClassIndices.add(keyFields);
+        } else {
+            secondaryIndexMap.put(keyFields,
+                                  new DeviceMultiIndex(keyFields));
+        }
+    }
+
+    @Override
+    public Iterator<? extends IDevice> queryDevices(Long macAddress,
+                                                    Short vlan,
+                                                    Integer ipv4Address,
+                                                    Long switchDPID,
+                                                    Integer switchPort) {
+        DeviceIndex index = null;
+        if (secondaryIndexMap.size() > 0) {
+            EnumSet<DeviceField> keys =
+                    getEntityKeys(macAddress, vlan, ipv4Address,
+                                  switchDPID, switchPort);
+            index = secondaryIndexMap.get(keys);
+        }
+
+        Iterator<Device> deviceIterator = null;
+        if (index == null) {
+            // Do a full table scan
+            deviceIterator = deviceMap.values().iterator();
+        } else {
+            // index lookup
+            Entity entity = new Entity((macAddress == null ? 0 : macAddress),
+                                       vlan,
+                                       ipv4Address,
+                                       switchDPID,
+                                       switchPort,
+                                       null);
+            deviceIterator =
+                    new DeviceIndexInterator(this, index.queryByEntity(entity));
+        }
+
+        DeviceIterator di =
+                new DeviceIterator(deviceIterator,
+                                   null,
+                                   macAddress,
+                                   vlan,
+                                   ipv4Address,
+                                   switchDPID,
+                                   switchPort);
+        return di;
+    }
+
+    @Override
+    public Iterator<? extends IDevice> queryClassDevices(IDevice reference,
+                                                         Long macAddress,
+                                                         Short vlan,
+                                                         Integer ipv4Address,
+                                                         Long switchDPID,
+                                                         Integer switchPort) {
+        IEntityClass entityClass = reference.getEntityClass();
+        ArrayList<Iterator<Device>> iterators =
+                new ArrayList<Iterator<Device>>();
+        ClassState classState = getClassState(entityClass);
+        
+        DeviceIndex index = null;
+        if (classState.secondaryIndexMap.size() > 0) {
+            EnumSet<DeviceField> keys =
+                    getEntityKeys(macAddress, vlan, ipv4Address,
+                                  switchDPID, switchPort);
+            index = classState.secondaryIndexMap.get(keys);
+        }
+        
+        Iterator<Device> iter;
+        if (index == null) {
+            index = classState.classIndex;
+            if (index == null) {
+                // scan all devices
+                return new DeviceIterator(deviceMap.values().iterator(),
+                                          new IEntityClass[] { entityClass },
+                                          macAddress, vlan, ipv4Address,
+                                          switchDPID, switchPort);
+            } else {
+                // scan the entire class
+                iter = new DeviceIndexInterator(this, index.getAll());
+            }
+        } else {
+            // index lookup
+            Entity entity =
+                    new Entity((macAddress == null ? 0 : macAddress),
+                               vlan,
+                               ipv4Address,
+                               switchDPID,
+                               switchPort,
+                               null);
+            iter = new DeviceIndexInterator(this,
+                                            index.queryByEntity(entity));
+        }
+        iterators.add(iter);
+        
+        return new MultiIterator<Device>(iterators.iterator());
+    }
+    
+    protected Iterator<Device> getDeviceIteratorForQuery(Long macAddress,
+                                                        Short vlan,
+                                                        Integer ipv4Address,
+                                                        Long switchDPID,
+                                                        Integer switchPort) {
+        DeviceIndex index = null;
+        if (secondaryIndexMap.size() > 0) {
+            EnumSet<DeviceField> keys =
+                getEntityKeys(macAddress, vlan, ipv4Address,
+                            switchDPID, switchPort);
+            index = secondaryIndexMap.get(keys);
+        }
+
+        Iterator<Device> deviceIterator = null;
+        if (index == null) {
+            // Do a full table scan
+            deviceIterator = deviceMap.values().iterator();
+        } else {
+            // index lookup
+            Entity entity = new Entity((macAddress == null ? 0 : macAddress),
+                                vlan,
+                                ipv4Address,
+                                switchDPID,
+                                switchPort,
+                                null);
+            deviceIterator =
+                new DeviceIndexInterator(this, index.queryByEntity(entity));
+        }
+
+        DeviceIterator di =
+            new DeviceIterator(deviceIterator,
+                                null,
+                                macAddress,
+                                vlan,
+                                ipv4Address,
+                                switchDPID,
+                                switchPort);
+        return di;
+    }
+
+    @Override
+    public void addListener(IDeviceListener listener) {
+        deviceListeners.add(listener);
+    }
+
+    // *************
+    // IInfoProvider
+    // *************
+
+    @Override
+    public Map<String, Object> getInfo(String type) {
+        if (!"summary".equals(type))
+            return null;
+
+        Map<String, Object> info = new HashMap<String, Object>();
+        info.put("# hosts", deviceMap.size());
+        return info;
+    }
+
+    // ******************
+    // IOFMessageListener
+    // ******************
+
+    @Override
+    public String getName() {
+        return "devicemanager";
+    }
+
+    @Override
+    public boolean isCallbackOrderingPrereq(OFType type, String name) {
+        return ((type == OFType.PACKET_IN || type == OFType.FLOW_MOD)
+                && name.equals("topology"));
+    }
+
+    @Override
+    public boolean isCallbackOrderingPostreq(OFType type, String name) {
+        return false;
+    }
+
+    @Override
+    public Command receive(IOFSwitch sw, OFMessage msg,
+                           FloodlightContext cntx) {
+        switch (msg.getType()) {
+            case PACKET_IN:
+                return this.processPacketInMessage(sw,
+                                                   (OFPacketIn) msg, cntx);
+            default:
+                break;
+        }
+        return Command.CONTINUE;
+    }
+
+    // ***************
+    // IFlowReconcileListener
+    // ***************
+    @Override
+    public Command reconcileFlows(ArrayList<OFMatchReconcile> ofmRcList) {
+        ListIterator<OFMatchReconcile> iter = ofmRcList.listIterator();
+        while (iter.hasNext()) {
+            OFMatchReconcile ofm = iter.next();
+            
+            // Remove the STOPPed flow.
+            if (Command.STOP == reconcileFlow(ofm)) {
+                iter.remove();
+            }
+        }
+        
+        if (ofmRcList.size() > 0) {
+            return Command.CONTINUE;
+        } else {
+            return Command.STOP;
+        }
+    }
+
+    protected Command reconcileFlow(OFMatchReconcile ofm) {
+        // Extract source entity information
+        Entity srcEntity =
+                getEntityFromFlowMod(ofm.ofmWithSwDpid, true);
+        if (srcEntity == null)
+            return Command.STOP;
+
+        // Find the device by source entity
+        Device srcDevice = findDeviceByEntity(srcEntity);
+        if (srcDevice == null)
+            return Command.STOP;
+
+        // Store the source device in the context
+        fcStore.put(ofm.cntx, CONTEXT_SRC_DEVICE, srcDevice);
+
+        // Find the device matching the destination from the entity
+        // classes of the source.
+        Entity dstEntity = getEntityFromFlowMod(ofm.ofmWithSwDpid, false);
+        Device dstDevice = null;
+        if (dstEntity != null) {
+            dstDevice = findDestByEntity(srcDevice, dstEntity);
+            if (dstDevice != null)
+                fcStore.put(ofm.cntx, CONTEXT_DST_DEVICE, dstDevice);
+        }
+        if (logger.isTraceEnabled()) {
+            logger.trace("Reconciling flow: match={}, srcEntity={}, srcDev={}, " 
+                         + "dstEntity={}, dstDev={}",
+                         new Object[] {ofm.ofmWithSwDpid.getOfMatch(),
+                                       srcEntity, srcDevice, 
+                                       dstEntity, dstDevice } );
+        }
+        return Command.CONTINUE;
+    }
+
+    // *****************
+    // IFloodlightModule
+    // *****************
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        Collection<Class<? extends IFloodlightService>> l =
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IDeviceService.class);
+        return l;
+    }
+
+    @Override
+    public Map<Class<? extends IFloodlightService>, IFloodlightService>
+    getServiceImpls() {
+        Map<Class<? extends IFloodlightService>,
+        IFloodlightService> m =
+        new HashMap<Class<? extends IFloodlightService>,
+        IFloodlightService>();
+        // We are the class that implements the service
+        m.put(IDeviceService.class, this);
+        return m;
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
+        Collection<Class<? extends IFloodlightService>> l =
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IFloodlightProviderService.class);
+        l.add(IStorageSourceService.class);
+        l.add(ITopologyService.class);
+        l.add(IRestApiService.class);
+        l.add(IThreadPoolService.class);
+        l.add(IFlowReconcileService.class);
+        l.add(IEntityClassifierService.class);
+        return l;
+    }
+
+    @Override
+    public void init(FloodlightModuleContext fmc) {
+        this.perClassIndices =
+                new HashSet<EnumSet<DeviceField>>();
+        addIndex(true, EnumSet.of(DeviceField.IPV4));
+
+        this.deviceListeners = new HashSet<IDeviceListener>();
+        this.suppressAPs =
+                Collections.synchronizedSet(new HashSet<SwitchPort>());
+
+        this.floodlightProvider =
+                fmc.getServiceImpl(IFloodlightProviderService.class);
+        this.storageSource =
+                fmc.getServiceImpl(IStorageSourceService.class);
+        this.topology =
+                fmc.getServiceImpl(ITopologyService.class);
+        this.restApi = fmc.getServiceImpl(IRestApiService.class);
+        this.threadPool = fmc.getServiceImpl(IThreadPoolService.class);
+        this.flowReconcileMgr = fmc.getServiceImpl(IFlowReconcileService.class);
+        this.entityClassifier = fmc.getServiceImpl(IEntityClassifierService.class);
+    }
+
+    @Override
+    public void startUp(FloodlightModuleContext fmc) {
+        primaryIndex = new DeviceUniqueIndex(entityClassifier.getKeyFields());
+        secondaryIndexMap = new HashMap<EnumSet<DeviceField>, DeviceIndex>();
+
+        deviceMap = new ConcurrentHashMap<Long, Device>();
+        classStateMap =
+                new ConcurrentHashMap<String, ClassState>();
+        apComparator = new AttachmentPointComparator();
+
+        floodlightProvider.addOFMessageListener(OFType.PACKET_IN, this);
+        floodlightProvider.addHAListener(this);
+        if (topology != null)
+            topology.addListener(this);
+        flowReconcileMgr.addFlowReconcileListener(this);
+        entityClassifier.addListener(this);
+
+        Runnable ecr = new Runnable() {
+            @Override
+            public void run() {
+                cleanupEntities();
+                entityCleanupTask.reschedule(ENTITY_CLEANUP_INTERVAL,
+                                             TimeUnit.SECONDS);
+            }
+        };
+        ScheduledExecutorService ses = threadPool.getScheduledExecutor();
+        entityCleanupTask = new SingletonTask(ses, ecr);
+        entityCleanupTask.reschedule(ENTITY_CLEANUP_INTERVAL,
+                                     TimeUnit.SECONDS);
+
+        if (restApi != null) {
+            restApi.addRestletRoutable(new DeviceRoutable());
+        } else {
+            logger.debug("Could not instantiate REST API");
+        }
+    }
+
+    // ***************
+    // IHAListener
+    // ***************
+
+    @Override
+    public void roleChanged(Role oldRole, Role newRole) {
+        switch(newRole) {
+            case SLAVE:
+                logger.debug("Resetting device state because of role change");
+                startUp(null);
+                break;
+            default:
+                break;
+        }
+    }
+
+    @Override
+    public void controllerNodeIPsChanged(
+                                         Map<String, String> curControllerNodeIPs,
+                                         Map<String, String> addedControllerNodeIPs,
+                                         Map<String, String> removedControllerNodeIPs) {
+        // no-op
+    }
+
+    // ****************
+    // Internal methods
+    // ****************
+
+    protected Command processPacketInMessage(IOFSwitch sw, OFPacketIn pi,
+                                             FloodlightContext cntx) {
+        Ethernet eth =
+                IFloodlightProviderService.bcStore.
+                get(cntx,IFloodlightProviderService.CONTEXT_PI_PAYLOAD);
+
+        // Extract source entity information
+        Entity srcEntity =
+                getSourceEntityFromPacket(eth, sw.getId(), pi.getInPort());
+        if (srcEntity == null)
+            return Command.STOP;
+
+        // Learn/lookup device information
+        Device srcDevice = learnDeviceByEntity(srcEntity);
+        if (srcDevice == null)
+            return Command.STOP;
+
+        // Store the source device in the context
+        fcStore.put(cntx, CONTEXT_SRC_DEVICE, srcDevice);
+
+        // Find the device matching the destination from the entity
+        // classes of the source.
+        Entity dstEntity = getDestEntityFromPacket(eth);
+        Device dstDevice = null;
+        if (dstEntity != null) {
+            dstDevice =
+                    findDestByEntity(srcDevice, dstEntity);
+            if (dstDevice != null)
+                fcStore.put(cntx, CONTEXT_DST_DEVICE, dstDevice);
+        }
+
+       if (logger.isTraceEnabled()) {
+           logger.trace("Received PI: {} on switch {}, port {} *** eth={}" +
+                        " *** srcDev={} *** dstDev={} *** ",
+                        new Object[] { pi, sw.getStringId(), pi.getInPort(), eth,
+                        srcDevice, dstDevice });
+       }
+        return Command.CONTINUE;
+    }
+
+    /**
+     * Check whether the given attachment point is valid given the current
+     * topology
+     * @param switchDPID the DPID
+     * @param switchPort the port
+     * @return true if it's a valid attachment point
+     */
+    public boolean isValidAttachmentPoint(long switchDPID,
+                                             int switchPort) {
+        if (topology.isAttachmentPointPort(switchDPID,
+                                           (short)switchPort) == false)
+            return false;
+
+        if (suppressAPs.contains(new SwitchPort(switchDPID, switchPort)))
+            return false;
+
+        return true;
+    }
+
+    /**
+     * Get IP address from packet if the packet is either an ARP 
+     * or a DHCP packet
+     * @param eth
+     * @param dlAddr
+     * @return
+     */
+    private int getSrcNwAddr(Ethernet eth, long dlAddr) {
+        if (eth.getPayload() instanceof ARP) {
+            ARP arp = (ARP) eth.getPayload();
+            if ((arp.getProtocolType() == ARP.PROTO_TYPE_IP) &&
+                    (Ethernet.toLong(arp.getSenderHardwareAddress()) == dlAddr)) {
+                return IPv4.toIPv4Address(arp.getSenderProtocolAddress());
+            }
+        } else if (eth.getPayload() instanceof IPv4) {
+            IPv4 ipv4 = (IPv4) eth.getPayload();
+            if (ipv4.getPayload() instanceof UDP) {
+                UDP udp = (UDP)ipv4.getPayload();
+                if (udp.getPayload() instanceof DHCP) {
+                    DHCP dhcp = (DHCP)udp.getPayload();
+                    if (dhcp.getOpCode() == DHCP.OPCODE_REPLY) {
+                        return ipv4.getSourceAddress();
+                    }
+                }
+            }
+        }
+        return 0;
+    }
+
+    /**
+     * Parse an entity from an {@link Ethernet} packet.
+     * @param eth the packet to parse
+     * @param sw the switch on which the packet arrived
+     * @param pi the original packetin
+     * @return the entity from the packet
+     */
+    protected Entity getSourceEntityFromPacket(Ethernet eth,
+                                             long swdpid,
+                                             int port) {
+        byte[] dlAddrArr = eth.getSourceMACAddress();
+        long dlAddr = Ethernet.toLong(dlAddrArr);
+
+        // Ignore broadcast/multicast source
+        if ((dlAddrArr[0] & 0x1) != 0)
+            return null;
+
+        short vlan = eth.getVlanID();
+        int nwSrc = getSrcNwAddr(eth, dlAddr);
+        return new Entity(dlAddr,
+                          ((vlan >= 0) ? vlan : null),
+                          ((nwSrc != 0) ? nwSrc : null),
+                          swdpid,
+                          port,
+                          new Date());
+    }
+
+    /**
+     * Get a (partial) entity for the destination from the packet.
+     * @param eth
+     * @return
+     */
+    protected Entity getDestEntityFromPacket(Ethernet eth) {
+        byte[] dlAddrArr = eth.getDestinationMACAddress();
+        long dlAddr = Ethernet.toLong(dlAddrArr);
+        short vlan = eth.getVlanID();
+        int nwDst = 0;
+
+        // Ignore broadcast/multicast destination
+        if ((dlAddrArr[0] & 0x1) != 0)
+            return null;
+
+        if (eth.getPayload() instanceof IPv4) {
+            IPv4 ipv4 = (IPv4) eth.getPayload();
+            nwDst = ipv4.getDestinationAddress();
+        }
+
+        return new Entity(dlAddr,
+                          ((vlan >= 0) ? vlan : null),
+                          ((nwDst != 0) ? nwDst : null),
+                          null,
+                          null,
+                          null);
+    }
+
+    /**
+     * Parse an entity from an OFMatchWithSwDpid.
+     * @param ofmWithSwDpid
+     * @return the entity from the packet
+     */
+    private Entity getEntityFromFlowMod(OFMatchWithSwDpid ofmWithSwDpid,
+                boolean isSource) {
+        byte[] dlAddrArr = ofmWithSwDpid.getOfMatch().getDataLayerSource();
+        int nwSrc = ofmWithSwDpid.getOfMatch().getNetworkSource();
+        if (!isSource) {
+            dlAddrArr = ofmWithSwDpid.getOfMatch().getDataLayerDestination();
+            nwSrc = ofmWithSwDpid.getOfMatch().getNetworkDestination();
+        }
+
+        long dlAddr = Ethernet.toLong(dlAddrArr);
+
+        // Ignore broadcast/multicast source
+        if ((dlAddrArr[0] & 0x1) != 0)
+            return null;
+
+        Long swDpid = null;
+        Short inPort = null;
+        
+        if (isSource) {
+            swDpid = ofmWithSwDpid.getSwitchDataPathId();
+            inPort = ofmWithSwDpid.getOfMatch().getInputPort();
+        }
+
+        boolean learnap = true;
+        if (swDpid == null ||
+            inPort == null ||
+            !isValidAttachmentPoint(swDpid, inPort)) {
+            // If this is an internal port or we otherwise don't want
+            // to learn on these ports.  In the future, we should
+            // handle this case by labeling flows with something that
+            // will give us the entity class.  For now, we'll do our
+            // best assuming attachment point information isn't used
+            // as a key field.
+            learnap = false;
+        }
+
+        short vlan = ofmWithSwDpid.getOfMatch().getDataLayerVirtualLan();
+        return new Entity(dlAddr,
+                          ((vlan >= 0) ? vlan : null),
+                          ((nwSrc != 0) ? nwSrc : null),
+                          (learnap ? swDpid : null),
+                          (learnap ? (int)inPort : null),
+                          new Date());
+    }
+    /**
+     * Look up a {@link Device} based on the provided {@link Entity}. We first
+     * check the primary index. If we do not find an entry there we classify
+     * the device into its IEntityClass and query the classIndex. 
+     * This implies that all key field of the current IEntityClassifier must 
+     * be present in the entity for the lookup to succeed!
+     * @param entity the entity to search for
+     * @return The {@link Device} object if found
+     */
+    protected Device findDeviceByEntity(Entity entity) {
+        // Look up the fully-qualified entity to see if it already
+        // exists in the primary entity index.
+        Long deviceKey = primaryIndex.findByEntity(entity);
+        IEntityClass entityClass = null;
+
+        if (deviceKey == null) {
+            // If the entity does not exist in the primary entity index,
+            // use the entity classifier for find the classes for the
+            // entity. Look up the entity in the returned class'
+            // class entity index.
+            entityClass = entityClassifier.classifyEntity(entity);
+            if (entityClass == null) {
+                return null;
+            }
+            ClassState classState = getClassState(entityClass);
+
+            if (classState.classIndex != null) {
+                deviceKey =
+                        classState.classIndex.findByEntity(entity);
+            }
+        }
+        if (deviceKey == null) return null;
+        return deviceMap.get(deviceKey);
+    }
+
+    /**
+     * Get a destination device using entity fields that corresponds with
+     * the given source device.  The source device is important since
+     * there could be ambiguity in the destination device without the
+     * attachment point information.
+     * @param source the source device.  The returned destination will be
+     * in the same entity class as the source.
+     * @param dstEntity the entity to look up
+     * @return an {@link Device} or null if no device is found.
+     */
+    protected Device findDestByEntity(IDevice source,
+                                      Entity dstEntity) {
+        
+        // Look  up the fully-qualified entity to see if it 
+        // exists in the primary entity index
+        Long deviceKey = primaryIndex.findByEntity(dstEntity);
+        
+        if (deviceKey == null) {
+            // This could happen because:
+            // 1) no destination known, or a broadcast destination
+            // 2) if we have attachment point key fields since
+            // attachment point information isn't available for
+            // destination devices.
+            // For the second case, we'll need to match up the
+            // destination device with the class of the source
+            // device.
+            ClassState classState = getClassState(source.getEntityClass());
+            if (classState.classIndex == null) {
+                return null;
+            }
+            deviceKey = classState.classIndex.findByEntity(dstEntity);
+        }
+        if (deviceKey == null) return null;
+        return deviceMap.get(deviceKey);
+    }
+    
+
+    /**
+     * Look up a {@link Device} within a particular entity class based on
+     * the provided {@link Entity}.
+     * @param clazz the entity class to search for the entity
+     * @param entity the entity to search for
+     * @return The {@link Device} object if found
+    private Device findDeviceInClassByEntity(IEntityClass clazz,
+                                               Entity entity) {
+        // XXX - TODO
+        throw new UnsupportedOperationException();
+    }
+     */
+
+    /**
+     * Look up a {@link Device} based on the provided {@link Entity}.  Also
+     * learns based on the new entity, and will update existing devices as
+     * required.
+     *
+     * @param entity the {@link Entity}
+     * @return The {@link Device} object if found
+     */
+    protected Device learnDeviceByEntity(Entity entity) {
+        ArrayList<Long> deleteQueue = null;
+        LinkedList<DeviceUpdate> deviceUpdates = null;
+        Device device = null;
+
+        // we may need to restart the learning process if we detect
+        // concurrent modification.  Note that we ensure that at least
+        // one thread should always succeed so we don't get into infinite
+        // starvation loops
+        while (true) {
+            deviceUpdates = null;
+
+            // Look up the fully-qualified entity to see if it already
+            // exists in the primary entity index.
+            Long deviceKey = primaryIndex.findByEntity(entity);
+            IEntityClass entityClass = null;
+
+            if (deviceKey == null) {
+                // If the entity does not exist in the primary entity index,
+                // use the entity classifier for find the classes for the
+                // entity. Look up the entity in the returned class'
+                // class entity index.
+                entityClass = entityClassifier.classifyEntity(entity);
+                if (entityClass == null) {
+                    // could not classify entity. No device
+                    return null;
+                }
+                ClassState classState = getClassState(entityClass);
+
+                if (classState.classIndex != null) {
+                    deviceKey =
+                            classState.classIndex.findByEntity(entity);
+                }
+            }
+            if (deviceKey != null) {
+                // If the primary or secondary index contains the entity
+                // use resulting device key to look up the device in the
+                // device map, and use the referenced Device below.
+                device = deviceMap.get(deviceKey);
+                if (device == null)
+                    throw new IllegalStateException("Corrupted device index");
+            } else {
+                // If the secondary index does not contain the entity,
+                // create a new Device object containing the entity, and
+                // generate a new device ID. However, we first check if 
+                // the entity is allowed (e.g., for spoofing protection)
+                if (!isEntityAllowed(entity, entityClass)) {
+                    logger.info("PacketIn is not allowed {} {}", 
+                                entityClass.getName(), entity);
+                    return null;
+                }
+                synchronized (deviceKeyLock) {
+                    deviceKey = Long.valueOf(deviceKeyCounter++);
+                }
+                device = allocateDevice(deviceKey, entity, entityClass);
+                if (logger.isDebugEnabled()) {
+                    logger.debug("New device created: {} deviceKey={}, entity={}",
+                                 new Object[]{device, deviceKey, entity});
+                }
+
+                // Add the new device to the primary map with a simple put
+                deviceMap.put(deviceKey, device);
+
+                // update indices
+                if (!updateIndices(device, deviceKey)) {
+                    if (deleteQueue == null)
+                        deleteQueue = new ArrayList<Long>();
+                    deleteQueue.add(deviceKey);
+                    continue;
+                }
+
+                updateSecondaryIndices(entity, entityClass, deviceKey);
+
+                // generate new device update
+                deviceUpdates =
+                        updateUpdates(deviceUpdates,
+                                      new DeviceUpdate(device, ADD, null));
+
+                break;
+            }
+
+            if (!isEntityAllowed(entity, device.getEntityClass())) {
+                logger.info("PacketIn is not allowed {} {}", 
+                            device.getEntityClass().getName(), entity);
+                return null;
+            }
+            int entityindex = -1;
+            if ((entityindex = device.entityIndex(entity)) >= 0) {
+                // update timestamp on the found entity
+                Date lastSeen = entity.getLastSeenTimestamp();
+                if (lastSeen == null) lastSeen = new Date();
+                device.entities[entityindex].setLastSeenTimestamp(lastSeen);
+                if (device.entities[entityindex].getSwitchDPID() != null &&
+                        device.entities[entityindex].getSwitchPort() != null) {
+                    long sw = device.entities[entityindex].getSwitchDPID();
+                    short port = device.entities[entityindex].getSwitchPort().shortValue();
+
+                    boolean moved =
+                            device.updateAttachmentPoint(sw,
+                                                         port,
+                                                         lastSeen.getTime());
+
+                    if (moved) {
+                        sendDeviceMovedNotification(device);
+                        if (logger.isTraceEnabled()) {
+                            logger.trace("Device moved: attachment points {}," +
+                                    "entities {}", device.attachmentPoints,
+                                    device.entities);
+                        }
+                    } else {
+                        if (logger.isTraceEnabled()) {
+                            logger.trace("Device attachment point NOT updated: " +
+                                         "attachment points {}," +
+                                         "entities {}", device.attachmentPoints,
+                                         device.entities);
+                        }
+                    }
+                }
+                break;
+            } else {
+                boolean moved = false;
+                Device newDevice = allocateDevice(device, entity);
+                if (entity.getSwitchDPID() != null && entity.getSwitchPort() != null) {
+                    moved = newDevice.updateAttachmentPoint(entity.getSwitchDPID(),
+                                                            entity.getSwitchPort().shortValue(),
+                                                            entity.getLastSeenTimestamp().getTime());
+                }
+
+                // generate updates
+                EnumSet<DeviceField> changedFields =
+                        findChangedFields(device, entity);
+                if (changedFields.size() > 0)
+                    deviceUpdates =
+                    updateUpdates(deviceUpdates,
+                                  new DeviceUpdate(newDevice, CHANGE,
+                                                   changedFields));
+
+                // update the device map with a replace call
+                boolean res = deviceMap.replace(deviceKey, device, newDevice);
+                // If replace returns false, restart the process from the
+                // beginning (this implies another thread concurrently
+                // modified this Device).
+                if (!res)
+                    continue;
+
+                device = newDevice;
+
+                // update indices
+                if (!updateIndices(device, deviceKey)) {
+                    continue;
+                }
+                updateSecondaryIndices(entity,
+                                       device.getEntityClass(),
+                                       deviceKey);
+
+                if (moved) {
+                    sendDeviceMovedNotification(device);
+                    if (logger.isDebugEnabled()) {
+                        logger.debug("Device moved: attachment points {}," +
+                                "entities {}", device.attachmentPoints,
+                                device.entities);
+                    }
+                } else {
+                    if (logger.isDebugEnabled()) {
+                        logger.debug("Device attachment point updated: " +
+                                     "attachment points {}," +
+                                     "entities {}", device.attachmentPoints,
+                                     device.entities);
+                    }
+                }
+                break;
+            }
+        }
+
+        if (deleteQueue != null) {
+            for (Long l : deleteQueue) {
+                Device dev = deviceMap.get(l);
+                this.deleteDevice(dev);
+                
+
+                // generate new device update
+                deviceUpdates =
+                        updateUpdates(deviceUpdates,
+                                      new DeviceUpdate(dev, DELETE, null));
+            }
+        }
+
+        processUpdates(deviceUpdates);
+
+        return device;
+    }
+
+    protected boolean isEntityAllowed(Entity entity, IEntityClass entityClass) {
+        return true;
+    }
+
+    protected EnumSet<DeviceField> findChangedFields(Device device,
+                                                     Entity newEntity) {
+        EnumSet<DeviceField> changedFields =
+                EnumSet.of(DeviceField.IPV4,
+                           DeviceField.VLAN,
+                           DeviceField.SWITCH);
+
+        if (newEntity.getIpv4Address() == null)
+            changedFields.remove(DeviceField.IPV4);
+        if (newEntity.getVlan() == null)
+            changedFields.remove(DeviceField.VLAN);
+        if (newEntity.getSwitchDPID() == null ||
+                newEntity.getSwitchPort() == null)
+            changedFields.remove(DeviceField.SWITCH);
+
+        if (changedFields.size() == 0) return changedFields;
+
+        for (Entity entity : device.getEntities()) {
+            if (newEntity.getIpv4Address() == null ||
+                    (entity.getIpv4Address() != null &&
+                    entity.getIpv4Address().equals(newEntity.getIpv4Address())))
+                changedFields.remove(DeviceField.IPV4);
+            if (newEntity.getVlan() == null ||
+                    (entity.getVlan() != null &&
+                    entity.getVlan().equals(newEntity.getVlan())))
+                changedFields.remove(DeviceField.VLAN);
+            if (newEntity.getSwitchDPID() == null ||
+                    newEntity.getSwitchPort() == null ||
+                    (entity.getSwitchDPID() != null &&
+                    entity.getSwitchPort() != null &&
+                    entity.getSwitchDPID().equals(newEntity.getSwitchDPID()) &&
+                    entity.getSwitchPort().equals(newEntity.getSwitchPort())))
+                changedFields.remove(DeviceField.SWITCH);
+        }
+
+        return changedFields;
+    }
+
+    /**
+     * Send update notifications to listeners
+     * @param updates the updates to process.
+     */
+    protected void processUpdates(Queue<DeviceUpdate> updates) {
+        if (updates == null) return;
+        DeviceUpdate update = null;
+        while (null != (update = updates.poll())) {
+            if (logger.isTraceEnabled()) {
+                logger.trace("Dispatching device update: {}", update);
+            }
+            for (IDeviceListener listener : deviceListeners) {
+                switch (update.change) {
+                    case ADD:
+                        listener.deviceAdded(update.device);
+                        break;
+                    case DELETE:
+                        listener.deviceRemoved(update.device);
+                        break;
+                    case CHANGE:
+                        for (DeviceField field : update.fieldsChanged) {
+                            switch (field) {
+                                case IPV4:
+                                    listener.deviceIPV4AddrChanged(update.device);
+                                    break;
+                                case SWITCH:
+                                case PORT:
+                                    //listener.deviceMoved(update.device);
+                                    break;
+                                case VLAN:
+                                    listener.deviceVlanChanged(update.device);
+                                    break;
+                                default:
+                                    logger.debug("Unknown device field changed {}",
+                                                update.fieldsChanged.toString());
+                                    break;
+                            }
+                        }
+                        break;
+                }
+            }
+        }
+    }
+    
+    /**
+     * Check if the entity e has all the keyFields set. Returns false if not
+     * @param e entity to check 
+     * @param keyFields the key fields to check e against
+     * @return
+     */
+    protected boolean allKeyFieldsPresent(Entity e, EnumSet<DeviceField> keyFields) {
+        for (DeviceField f : keyFields) {
+            switch (f) {
+                case MAC:
+                    // MAC address is always present
+                    break;
+                case IPV4:
+                    if (e.ipv4Address == null) return false;
+                    break;
+                case SWITCH:
+                    if (e.switchDPID == null) return false;
+                    break;
+                case PORT:
+                    if (e.switchPort == null) return false;
+                    break;
+                case VLAN:
+                    // FIXME: vlan==null is ambiguous: it can mean: not present
+                    // or untagged
+                    //if (e.vlan == null) return false;
+                    break;
+                default:
+                    // we should never get here. unless somebody extended 
+                    // DeviceFields
+                    throw new IllegalStateException();
+            }
+        }
+        return true;
+    }
+
+    private LinkedList<DeviceUpdate>
+    updateUpdates(LinkedList<DeviceUpdate> list, DeviceUpdate update) {
+        if (update == null) return list;
+        if (list == null)
+            list = new LinkedList<DeviceUpdate>();
+        list.add(update);
+
+        return list;
+    }
+
+    /**
+     * Get the secondary index for a class.  Will return null if the
+     * secondary index was created concurrently in another thread.
+     * @param clazz the class for the index
+     * @return
+     */
+    private ClassState getClassState(IEntityClass clazz) {
+        ClassState classState = classStateMap.get(clazz.getName());
+        if (classState != null) return classState;
+
+        classState = new ClassState(clazz);
+        ClassState r = classStateMap.putIfAbsent(clazz.getName(), classState);
+        if (r != null) {
+            // concurrent add
+            return r;
+        }
+        return classState;
+    }
+
+    /**
+     * Update both the primary and class indices for the provided device.
+     * If the update fails because of an concurrent update, will return false.
+     * @param device the device to update
+     * @param deviceKey the device key for the device
+     * @return true if the update succeeded, false otherwise.
+     */
+    private boolean updateIndices(Device device, Long deviceKey) {
+        if (!primaryIndex.updateIndex(device, deviceKey)) {
+            return false;
+        }
+        IEntityClass entityClass = device.getEntityClass();
+        ClassState classState = getClassState(entityClass);
+
+        if (classState.classIndex != null) {
+            if (!classState.classIndex.updateIndex(device,
+                                                   deviceKey))
+                return false;
+        }
+    return true;
+    }
+
+    /**
+     * Update the secondary indices for the given entity and associated
+     * entity classes
+     * @param entity the entity to update
+     * @param entityClass the entity class for the entity
+     * @param deviceKey the device key to set up
+     */
+    private void updateSecondaryIndices(Entity entity,
+                                        IEntityClass entityClass,
+                                        Long deviceKey) {
+        for (DeviceIndex index : secondaryIndexMap.values()) {
+            index.updateIndex(entity, deviceKey);
+        }
+        ClassState state = getClassState(entityClass);
+        for (DeviceIndex index : state.secondaryIndexMap.values()) {
+            index.updateIndex(entity, deviceKey);
+        }
+    }
+
+    // *********************
+    // IEntityClassListener
+    // *********************
+    @Override
+    public void entityClassChanged (Set<String> entityClassNames) {
+        /* iterate through the devices, reclassify the devices that belong
+         * to these entity class names
+         */
+        Iterator<Device> diter = deviceMap.values().iterator();
+        while (diter.hasNext()) {
+            Device d = diter.next();
+            if (d.getEntityClass() == null ||
+                entityClassNames.contains(d.getEntityClass().getName()))
+                reclassifyDevice(d);
+        }
+    }
+
+    /**
+     * Clean up expired entities/devices
+     */
+    protected void cleanupEntities () {
+
+        Calendar c = Calendar.getInstance();
+        c.add(Calendar.MILLISECOND, -ENTITY_TIMEOUT);
+        Date cutoff = c.getTime();
+
+        ArrayList<Entity> toRemove = new ArrayList<Entity>();
+        ArrayList<Entity> toKeep = new ArrayList<Entity>();
+
+        Iterator<Device> diter = deviceMap.values().iterator();
+        LinkedList<DeviceUpdate> deviceUpdates =
+                new LinkedList<DeviceUpdate>();
+
+        while (diter.hasNext()) {
+            Device d = diter.next();
+
+            while (true) {
+                deviceUpdates.clear();
+                toRemove.clear();
+                toKeep.clear();
+                for (Entity e : d.getEntities()) {
+                    if (e.getLastSeenTimestamp() != null &&
+                         0 > e.getLastSeenTimestamp().compareTo(cutoff)) {
+                        // individual entity needs to be removed
+                        toRemove.add(e);
+                    } else {
+                        toKeep.add(e);
+                    }
+                }
+                if (toRemove.size() == 0) {
+                    break;
+                }
+
+                for (Entity e : toRemove) {
+                    removeEntity(e, d.getEntityClass(), d.deviceKey, toKeep);
+                }
+
+                if (toKeep.size() > 0) {
+                    Device newDevice = allocateDevice(d.getDeviceKey(),
+                                                      d.oldAPs,
+                                                      d.attachmentPoints,
+                                                      toKeep,
+                                                      d.entityClass);
+
+                    EnumSet<DeviceField> changedFields =
+                            EnumSet.noneOf(DeviceField.class);
+                    for (Entity e : toRemove) {
+                        changedFields.addAll(findChangedFields(newDevice, e));
+                    }
+                    if (changedFields.size() > 0)
+                        deviceUpdates.add(new DeviceUpdate(d, CHANGE,
+                                                           changedFields));
+
+                    if (!deviceMap.replace(newDevice.getDeviceKey(),
+                                           d,
+                                           newDevice)) {
+                        // concurrent modification; try again
+                        // need to use device that is the map now for the next
+                        // iteration
+                        d = deviceMap.get(d.getDeviceKey());
+                        if (null != d)
+                            continue;
+                    }
+                } else {
+                    deviceUpdates.add(new DeviceUpdate(d, DELETE, null));
+                    if (!deviceMap.remove(d.getDeviceKey(), d))
+                        // concurrent modification; try again
+                        // need to use device that is the map now for the next
+                        // iteration
+                        d = deviceMap.get(d.getDeviceKey());
+                        if (null != d)
+                            continue;
+                }
+                processUpdates(deviceUpdates);
+                break;
+            }
+        }
+    }
+
+    protected void removeEntity(Entity removed,
+                              IEntityClass entityClass,
+                              Long deviceKey,
+                              Collection<Entity> others) {
+        for (DeviceIndex index : secondaryIndexMap.values()) {
+            index.removeEntityIfNeeded(removed, deviceKey, others);
+        }
+        ClassState classState = getClassState(entityClass);
+        for (DeviceIndex index : classState.secondaryIndexMap.values()) {
+            index.removeEntityIfNeeded(removed, deviceKey, others);
+        }
+
+        primaryIndex.removeEntityIfNeeded(removed, deviceKey, others);
+
+        if (classState.classIndex != null) {
+            classState.classIndex.removeEntityIfNeeded(removed,
+                                                       deviceKey,
+                                                       others);
+        }
+    }
+    
+    /**
+     * method to delete a given device, remove all entities first and then
+     * finally delete the device itself.
+     * @param device
+     */
+    protected void deleteDevice(Device device) {
+        ArrayList<Entity> emptyToKeep = new ArrayList<Entity>();
+        for (Entity entity : device.getEntities()) {
+            this.removeEntity(entity, device.getEntityClass(), 
+                device.getDeviceKey(), emptyToKeep);
+        }
+        if (!deviceMap.remove(device.getDeviceKey(), device)) {
+            if (logger.isDebugEnabled())
+                logger.debug("device map does not have this device -" + 
+                    device.toString());
+        }
+    }
+
+    private EnumSet<DeviceField> getEntityKeys(Long macAddress,
+                                               Short vlan,
+                                               Integer ipv4Address,
+                                               Long switchDPID,
+                                               Integer switchPort) {
+        // FIXME: vlan==null is a valid search. Need to handle this
+        // case correctly. Note that the code will still work correctly. 
+        // But we might do a full device search instead of using an index.
+        EnumSet<DeviceField> keys = EnumSet.noneOf(DeviceField.class);
+        if (macAddress != null) keys.add(DeviceField.MAC);
+        if (vlan != null) keys.add(DeviceField.VLAN);
+        if (ipv4Address != null) keys.add(DeviceField.IPV4);
+        if (switchDPID != null) keys.add(DeviceField.SWITCH);
+        if (switchPort != null) keys.add(DeviceField.PORT);
+        return keys;
+    }
+
+
+    protected Iterator<Device> queryClassByEntity(IEntityClass clazz,
+                                                  EnumSet<DeviceField> keyFields,
+                                                  Entity entity) {
+        ClassState classState = getClassState(clazz);
+        DeviceIndex index = classState.secondaryIndexMap.get(keyFields);
+        if (index == null) return Collections.<Device>emptySet().iterator();
+        return new DeviceIndexInterator(this, index.queryByEntity(entity));
+    }
+
+    protected Device allocateDevice(Long deviceKey,
+                                    Entity entity,
+                                    IEntityClass entityClass) {
+        return new Device(this, deviceKey, entity, entityClass);
+    }
+
+    // TODO: FIX THIS.
+    protected Device allocateDevice(Long deviceKey,
+                                    List<AttachmentPoint> aps,
+                                    List<AttachmentPoint> trueAPs,
+                                    Collection<Entity> entities,
+                                    IEntityClass entityClass) {
+        return new Device(this, deviceKey, aps, trueAPs, entities, entityClass);
+    }
+
+    protected Device allocateDevice(Device device,
+                                    Entity entity) {
+        return new Device(device, entity);
+    }
+    
+    protected Device allocateDevice(Device device, Set <Entity> entities) {
+        List <AttachmentPoint> newPossibleAPs = 
+                new ArrayList<AttachmentPoint>();
+        List <AttachmentPoint> newAPs = 
+                new ArrayList<AttachmentPoint>();
+        for (Entity entity : entities) { 
+            if (entity.switchDPID != null && entity.switchPort != null) {
+                AttachmentPoint aP = 
+                        new AttachmentPoint(entity.switchDPID.longValue(), 
+                                    entity.switchPort.shortValue(), 0);
+                newPossibleAPs.add(aP);
+            }
+        }
+        if (device.attachmentPoints != null) {
+            for (AttachmentPoint oldAP : device.attachmentPoints) {
+                if (newPossibleAPs.contains(oldAP)) {
+                    newAPs.add(oldAP);
+                }
+            }
+        }
+        if (newAPs.isEmpty())
+            newAPs = null;
+        Device d = new Device(this, device.getDeviceKey(),newAPs, null,
+                        entities, device.getEntityClass());
+        d.updateAttachmentPoint();
+        return d;
+    }
+
+    @Override
+    public void addSuppressAPs(long swId, short port) {
+        suppressAPs.add(new SwitchPort(swId, port));
+    }
+
+    @Override
+    public void removeSuppressAPs(long swId, short port) {
+        suppressAPs.remove(new SwitchPort(swId, port));
+    }
+
+    /**
+     * Topology listener method.
+     */
+    @Override
+    public void topologyChanged() {
+        Iterator<Device> diter = deviceMap.values().iterator();
+        List<LDUpdate> updateList = topology.getLastLinkUpdates();
+        if (updateList != null) {
+            if (logger.isTraceEnabled()) {
+                for(LDUpdate update: updateList) {
+                    logger.trace("Topo update: {}", update);
+                }
+            }
+        }
+
+        while (diter.hasNext()) {
+            Device d = diter.next();
+            if (d.updateAttachmentPoint()) {
+                if (logger.isDebugEnabled()) {
+                    logger.debug("Attachment point changed for device: {}", d);
+                }
+                sendDeviceMovedNotification(d);
+            }
+        }
+    }
+
+    /**
+     * Send update notifications to listeners
+     * @param updates the updates to process.
+     */
+    protected void sendDeviceMovedNotification(Device d) {
+        for (IDeviceListener listener : deviceListeners) {
+            listener.deviceMoved(d);
+        }
+    }
+    
+    /**
+     * this method will reclassify and reconcile a device - possibilities
+     * are - create new device(s), remove entities from this device. If the 
+     * device entity class did not change then it returns false else true.
+     * @param device
+     */
+    protected boolean reclassifyDevice(Device device)
+    {
+        // first classify all entities of this device
+        if (device == null) {
+            logger.debug("In reclassify for null device");
+            return false;
+        }
+        boolean needToReclassify = false;
+        for (Entity entity : device.entities) {
+            IEntityClass entityClass = 
+                    this.entityClassifier.classifyEntity(entity);
+            if (entityClass == null || device.getEntityClass() == null) {
+                needToReclassify = true;                
+                break;
+            }
+            if (!entityClass.getName().
+                    equals(device.getEntityClass().getName())) {
+                needToReclassify = true;
+                break;
+            }
+        }
+        if (needToReclassify == false) {
+            return false;
+        }
+            
+        LinkedList<DeviceUpdate> deviceUpdates =
+                new LinkedList<DeviceUpdate>();
+        // delete this device and then re-learn all the entities
+        this.deleteDevice(device);
+        deviceUpdates.add(new DeviceUpdate(device, 
+                DeviceUpdate.Change.DELETE, null));
+        if (!deviceUpdates.isEmpty())
+            processUpdates(deviceUpdates);
+        for (Entity entity: device.entities ) {
+            this.learnDeviceByEntity(entity);
+        }
+        return true;
+    }   
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceMultiIndex.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceMultiIndex.java
new file mode 100644
index 0000000..c6aa980
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceMultiIndex.java
@@ -0,0 +1,108 @@
+/**
+*    Copyright 2012 Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.devicemanager.internal;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import net.floodlightcontroller.devicemanager.IDeviceService.DeviceField;
+import net.floodlightcontroller.util.IterableIterator;
+
+/**
+ * An index that maps key fields of an entity to device keys, with multiple
+ * device keys allowed per entity
+ */
+public class DeviceMultiIndex extends DeviceIndex {
+    /**
+     * The index
+     */
+    private ConcurrentHashMap<IndexedEntity, Collection<Long>> index;
+
+    /**
+     * @param keyFields
+     */
+    public DeviceMultiIndex(EnumSet<DeviceField> keyFields) {
+        super(keyFields);
+        index = new ConcurrentHashMap<IndexedEntity, Collection<Long>>();
+    }
+
+    // ***********
+    // DeviceIndex
+    // ***********
+
+    @Override
+    public Iterator<Long> queryByEntity(Entity entity) {
+        IndexedEntity ie = new IndexedEntity(keyFields, entity);
+        Collection<Long> devices = index.get(ie);
+        if (devices != null)
+            return devices.iterator();
+        
+        return Collections.<Long>emptySet().iterator();
+    }
+    
+    @Override
+    public Iterator<Long> getAll() {
+        Iterator<Collection<Long>> iter = index.values().iterator();
+        return new IterableIterator<Long>(iter);
+    }
+    
+    @Override
+    public boolean updateIndex(Device device, Long deviceKey) {
+        for (Entity e : device.entities) {
+            updateIndex(e, deviceKey);
+        }
+        return true;
+    }
+    
+    @Override
+    public void updateIndex(Entity entity, Long deviceKey) {
+        Collection<Long> devices = null;
+
+        IndexedEntity ie = new IndexedEntity(keyFields, entity);
+        if (!ie.hasNonNullKeys()) return;
+
+        devices = index.get(ie);
+        if (devices == null) {
+            Map<Long,Boolean> chm = new ConcurrentHashMap<Long,Boolean>();
+            devices = Collections.newSetFromMap(chm);
+            Collection<Long> r = index.putIfAbsent(ie, devices);
+            if (r != null)
+                devices = r;
+        }
+        
+        devices.add(deviceKey);
+    }
+
+    @Override
+    public void removeEntity(Entity entity) {
+        IndexedEntity ie = new IndexedEntity(keyFields, entity);
+        index.remove(ie);        
+    }
+
+    @Override
+    public void removeEntity(Entity entity, Long deviceKey) {
+        IndexedEntity ie = new IndexedEntity(keyFields, entity);
+        Collection<Long> devices = index.get(ie);
+        if (devices != null)
+            devices.remove(deviceKey);
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceUniqueIndex.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceUniqueIndex.java
new file mode 100644
index 0000000..4f2d3f8
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceUniqueIndex.java
@@ -0,0 +1,116 @@
+/**
+*    Copyright 2012 Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.devicemanager.internal;
+
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.Iterator;
+import java.util.concurrent.ConcurrentHashMap;
+
+import net.floodlightcontroller.devicemanager.IDeviceService.DeviceField;
+
+/**
+ * An index that maps key fields of an entity uniquely to a device key
+ */
+public class DeviceUniqueIndex extends DeviceIndex {
+    /**
+     * The index
+     */
+    private ConcurrentHashMap<IndexedEntity, Long> index;
+
+    /**
+     * Construct a new device index using the provided key fields
+     * @param keyFields the key fields to use
+     */
+    public DeviceUniqueIndex(EnumSet<DeviceField> keyFields) {
+        super(keyFields);
+        index = new ConcurrentHashMap<IndexedEntity, Long>();
+    }
+
+    // ***********
+    // DeviceIndex
+    // ***********
+
+    @Override
+    public Iterator<Long> queryByEntity(Entity entity) {
+        final Long deviceKey = findByEntity(entity);
+        if (deviceKey != null)
+            return Collections.<Long>singleton(deviceKey).iterator();
+        
+        return Collections.<Long>emptySet().iterator();
+    }
+    
+    @Override
+    public Iterator<Long> getAll() {
+        return index.values().iterator();
+    }
+
+    @Override
+    public boolean updateIndex(Device device, Long deviceKey) {
+        for (Entity e : device.entities) {
+            IndexedEntity ie = new IndexedEntity(keyFields, e);
+            if (!ie.hasNonNullKeys()) continue;
+
+            Long ret = index.putIfAbsent(ie, deviceKey);
+            if (ret != null && !ret.equals(deviceKey)) {
+                // If the return value is non-null, then fail the insert 
+                // (this implies that a device using this entity has 
+                // already been created in another thread).
+                return false;
+            }
+        }
+        return true;
+    }
+    
+    @Override
+    public void updateIndex(Entity entity, Long deviceKey) {
+        IndexedEntity ie = new IndexedEntity(keyFields, entity);
+        if (!ie.hasNonNullKeys()) return;
+        index.put(ie, deviceKey);
+    }
+
+    @Override
+    public void removeEntity(Entity entity) {
+        IndexedEntity ie = new IndexedEntity(keyFields, entity);
+        index.remove(ie);
+    }
+
+    @Override
+    public void removeEntity(Entity entity, Long deviceKey) {
+        IndexedEntity ie = new IndexedEntity(keyFields, entity);
+        index.remove(ie, deviceKey);
+    }
+
+    // **************
+    // Public Methods
+    // **************
+
+    /**
+     * Look up a {@link Device} based on the provided {@link Entity}.
+     * @param entity the entity to search for
+     * @return The key for the {@link Device} object if found
+     */
+    public Long findByEntity(Entity entity) {
+        IndexedEntity ie = new IndexedEntity(keyFields, entity);
+        Long deviceKey = index.get(ie);
+        if (deviceKey == null)
+            return null;
+        return deviceKey;
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/Entity.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/Entity.java
new file mode 100644
index 0000000..36c5471
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/Entity.java
@@ -0,0 +1,279 @@
+/**
+*    Copyright 2011,2012, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.devicemanager.internal;
+
+import java.util.Date;
+
+import net.floodlightcontroller.core.web.serializers.IPv4Serializer;
+import net.floodlightcontroller.core.web.serializers.MACSerializer;
+import net.floodlightcontroller.core.web.serializers.DPIDSerializer;
+import net.floodlightcontroller.packet.IPv4;
+
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+import org.openflow.util.HexString;
+
+/**
+ * An entity on the network is a visible trace of a device that corresponds
+ * to a packet received from a particular interface on the edge of a network,
+ * with a particular VLAN tag, and a particular MAC address, along with any
+ * other packet characteristics we might want to consider as helpful for
+ * disambiguating devices.
+ * 
+ * Entities are the most basic element of devices; devices consist of one or
+ * more entities.  Entities are immutable once created, except for the last
+ * seen timestamp.
+ *  
+ * @author readams
+ *
+ */
+public class Entity implements Comparable<Entity> {
+    /**
+     * Timeout for computing {@link Entity#activeSince}.
+     * @see {@link Entity#activeSince}
+     */
+    protected static int ACTIVITY_TIMEOUT = 30000;
+    
+    /**
+     * The MAC address associated with this entity
+     */
+    protected long macAddress;
+    
+    /**
+     * The IP address associated with this entity, or null if no IP learned
+     * from the network observation associated with this entity
+     */
+    protected Integer ipv4Address;
+    
+    /**
+     * The VLAN tag on this entity, or null if untagged
+     */
+    protected Short vlan;
+    
+    /**
+     * The DPID of the switch for the ingress point for this entity,
+     * or null if not present
+     */
+    protected Long switchDPID;
+    
+    /**
+     * The port number of the switch for the ingress point for this entity,
+     * or null if not present
+     */
+    protected Integer switchPort;
+    
+    /**
+     * The last time we observed this entity on the network
+     */
+    protected Date lastSeenTimestamp;
+
+    /**
+     * The time between {@link Entity#activeSince} and 
+     * {@link Entity#lastSeenTimestamp} is a period of activity for this
+     * entity where it was observed repeatedly.  If, when the entity is
+     * observed, the  is longer ago than the activity timeout, 
+     * {@link Entity#lastSeenTimestamp} and {@link Entity#activeSince} will 
+     * be set to the current time.
+     */
+    protected Date activeSince;
+    
+    private int hashCode = 0;
+
+    // ************
+    // Constructors
+    // ************
+    
+    /**
+     * Create a new entity
+     * 
+     * @param macAddress
+     * @param vlan
+     * @param ipv4Address
+     * @param switchDPID
+     * @param switchPort
+     * @param lastSeenTimestamp
+     */
+    public Entity(long macAddress, Short vlan, 
+                  Integer ipv4Address, Long switchDPID, Integer switchPort, 
+                  Date lastSeenTimestamp) {
+        this.macAddress = macAddress;
+        this.ipv4Address = ipv4Address;
+        this.vlan = vlan;
+        this.switchDPID = switchDPID;
+        this.switchPort = switchPort;
+        this.lastSeenTimestamp = lastSeenTimestamp;
+        this.activeSince = lastSeenTimestamp;
+    }
+
+    // ***************
+    // Getters/Setters
+    // ***************
+
+    @JsonSerialize(using=MACSerializer.class)
+    public long getMacAddress() {
+        return macAddress;
+    }
+
+    @JsonSerialize(using=IPv4Serializer.class)
+    public Integer getIpv4Address() {
+        return ipv4Address;
+    }
+
+    public Short getVlan() {
+        return vlan;
+    }
+
+    @JsonSerialize(using=DPIDSerializer.class)
+    public Long getSwitchDPID() {
+        return switchDPID;
+    }
+
+    public Integer getSwitchPort() {
+        return switchPort;
+    }
+
+    public Date getLastSeenTimestamp() {
+        return lastSeenTimestamp;
+    }
+
+    /**
+     * Set the last seen timestamp and also update {@link Entity#activeSince}
+     * if appropriate
+     * @param lastSeenTimestamp the new last seen timestamp
+     * @see {@link Entity#activeSince}
+     */
+    public void setLastSeenTimestamp(Date lastSeenTimestamp) {
+        if (activeSince == null ||
+            (activeSince.getTime() +  ACTIVITY_TIMEOUT) <
+                lastSeenTimestamp.getTime())
+            this.activeSince = lastSeenTimestamp;
+        this.lastSeenTimestamp = lastSeenTimestamp;
+    }
+
+    public Date getActiveSince() {
+        return activeSince;
+    }
+
+    public void setActiveSince(Date activeSince) {
+        this.activeSince = activeSince;
+    }
+    
+    @Override
+    public int hashCode() {
+        if (hashCode != 0) return hashCode;
+        final int prime = 31;
+        hashCode = 1;
+        hashCode = prime * hashCode
+                 + ((ipv4Address == null) ? 0 : ipv4Address.hashCode());
+        hashCode = prime * hashCode + (int) (macAddress ^ (macAddress >>> 32));
+        hashCode = prime * hashCode
+                 + ((switchDPID == null) ? 0 : switchDPID.hashCode());
+        hashCode = prime * hashCode
+                 + ((switchPort == null) ? 0 : switchPort.hashCode());
+        hashCode = prime * hashCode + ((vlan == null) ? 0 : vlan.hashCode());
+        return hashCode;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) return true;
+        if (obj == null) return false;
+        if (getClass() != obj.getClass()) return false;
+        Entity other = (Entity) obj;
+        if (hashCode() != other.hashCode()) return false;
+        if (ipv4Address == null) {
+            if (other.ipv4Address != null) return false;
+        } else if (!ipv4Address.equals(other.ipv4Address)) return false;
+        if (macAddress != other.macAddress) return false;
+        if (switchDPID == null) {
+            if (other.switchDPID != null) return false;
+        } else if (!switchDPID.equals(other.switchDPID)) return false;
+        if (switchPort == null) {
+            if (other.switchPort != null) return false;
+        } else if (!switchPort.equals(other.switchPort)) return false;
+        if (vlan == null) {
+            if (other.vlan != null) return false;
+        } else if (!vlan.equals(other.vlan)) return false;
+        return true;
+    }
+
+    
+    
+    @Override
+    public String toString() {
+        StringBuilder builder = new StringBuilder();
+        builder.append("Entity [macAddress=");
+        builder.append(HexString.toHexString(macAddress, 6));
+        builder.append(", ipv4Address=");
+        builder.append(IPv4.fromIPv4Address(ipv4Address==null ?
+                       0 : ipv4Address.intValue()));
+        builder.append(", vlan=");
+        builder.append(vlan);
+        builder.append(", switchDPID=");
+        builder.append(switchDPID);
+        builder.append(", switchPort=");
+        builder.append(switchPort);
+        builder.append(", lastSeenTimestamp=");
+        builder.append(lastSeenTimestamp == null? "null" : lastSeenTimestamp.getTime());
+        builder.append(", activeSince=");
+        builder.append(activeSince == null? "null" : activeSince.getTime());
+        builder.append("]");
+        return builder.toString();
+    }
+
+    @Override
+    public int compareTo(Entity o) {
+        if (macAddress < o.macAddress) return -1;
+        if (macAddress > o.macAddress) return 1;
+
+        int r;
+        if (switchDPID == null)
+            r = o.switchDPID == null ? 0 : -1;
+        else if (o.switchDPID == null)
+            r = 1;
+        else
+            r = switchDPID.compareTo(o.switchDPID);
+        if (r != 0) return r;
+
+        if (switchPort == null)
+            r = o.switchPort == null ? 0 : -1;
+        else if (o.switchPort == null)
+            r = 1;
+        else
+            r = switchPort.compareTo(o.switchPort);
+        if (r != 0) return r;
+
+        if (ipv4Address == null)
+            r = o.ipv4Address == null ? 0 : -1;
+        else if (o.ipv4Address == null)
+            r = 1;
+        else
+            r = ipv4Address.compareTo(o.ipv4Address);
+        if (r != 0) return r;
+
+        if (vlan == null)
+            r = o.vlan == null ? 0 : -1;
+        else if (o.vlan == null)
+            r = 1;
+        else
+            r = vlan.compareTo(o.vlan);
+        if (r != 0) return r;
+
+        return 0;
+    }
+    
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/IndexedEntity.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/IndexedEntity.java
new file mode 100644
index 0000000..3e0829d
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/internal/IndexedEntity.java
@@ -0,0 +1,155 @@
+package net.floodlightcontroller.devicemanager.internal;
+
+import java.util.EnumSet;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import net.floodlightcontroller.devicemanager.IDeviceService;
+import net.floodlightcontroller.devicemanager.IDeviceService.DeviceField;
+
+
+/**
+ * This is a thin wrapper around {@link Entity} that allows overriding
+ * the behavior of {@link Object#hashCode()} and {@link Object#equals(Object)}
+ * so that the keying behavior in a hash map can be changed dynamically
+ * @author readams
+ */
+public class IndexedEntity {
+    protected EnumSet<DeviceField> keyFields;
+    protected Entity entity;
+    private int hashCode = 0;
+    protected static Logger logger =
+            LoggerFactory.getLogger(IndexedEntity.class);
+    /**
+     * Create a new {@link IndexedEntity} for the given {@link Entity} using 
+     * the provided key fields.
+     * @param keyFields The key fields that will be used for computing
+     * {@link IndexedEntity#hashCode()} and {@link IndexedEntity#equals(Object)}
+     * @param entity the entity to wrap
+     */
+    public IndexedEntity(EnumSet<DeviceField> keyFields, Entity entity) {
+        super();
+        this.keyFields = keyFields;
+        this.entity = entity;
+    }
+
+    /**
+     * Check whether this entity has non-null values in any of its key fields
+     * @return true if any key fields have a non-null value
+     */
+    public boolean hasNonNullKeys() {
+        for (DeviceField f : keyFields) {
+            switch (f) {
+                case MAC:
+                    return true;
+                case IPV4:
+                    if (entity.ipv4Address != null) return true;
+                    break;
+                case SWITCH:
+                    if (entity.switchDPID != null) return true;
+                    break;
+                case PORT:
+                    if (entity.switchPort != null) return true;
+                    break;
+                case VLAN:
+                    if (entity.vlan != null) return true;
+                    break;
+            }
+        }
+        return false;
+    }
+    
+    @Override
+    public int hashCode() {
+    	
+        if (hashCode != 0) {
+        	return hashCode;
+        }
+
+        final int prime = 31;
+        hashCode = 1;
+        for (DeviceField f : keyFields) {
+            switch (f) {
+                case MAC:
+                    hashCode = prime * hashCode
+                        + (int) (entity.macAddress ^ 
+                                (entity.macAddress >>> 32));
+                    break;
+                case IPV4:
+                    hashCode = prime * hashCode
+                        + ((entity.ipv4Address == null) 
+                            ? 0 
+                            : entity.ipv4Address.hashCode());
+                    break;
+                case SWITCH:
+                    hashCode = prime * hashCode
+                        + ((entity.switchDPID == null) 
+                            ? 0 
+                            : entity.switchDPID.hashCode());
+                    break;
+                case PORT:
+                    hashCode = prime * hashCode
+                        + ((entity.switchPort == null) 
+                            ? 0 
+                            : entity.switchPort.hashCode());
+                    break;
+                case VLAN:
+                    hashCode = prime * hashCode 
+                        + ((entity.vlan == null) 
+                            ? 0 
+                            : entity.vlan.hashCode());
+                    break;
+            }
+        }
+        return hashCode;
+    }
+    
+    @Override
+    public boolean equals(Object obj) {
+       if (this == obj) return true;
+        if (obj == null) return false;
+        if (getClass() != obj.getClass()) return false;
+        IndexedEntity other = (IndexedEntity) obj;
+        
+        if (!keyFields.equals(other.keyFields))
+            return false;
+
+        for (IDeviceService.DeviceField f : keyFields) {
+            switch (f) {
+                case MAC:
+                    if (entity.macAddress != other.entity.macAddress)
+                        return false;
+                    break;
+                case IPV4:
+                    if (entity.ipv4Address == null) {
+                        if (other.entity.ipv4Address != null) return false;
+                    } else if (!entity.ipv4Address.
+                            equals(other.entity.ipv4Address)) return false;
+                    break;
+                case SWITCH:
+                    if (entity.switchDPID == null) {
+                        if (other.entity.switchDPID != null) return false;
+                    } else if (!entity.switchDPID.
+                            equals(other.entity.switchDPID)) return false;
+                    break;
+                case PORT:
+                    if (entity.switchPort == null) {
+                        if (other.entity.switchPort != null) return false;
+                    } else if (!entity.switchPort.
+                            equals(other.entity.switchPort)) return false;
+                    break;
+                case VLAN:
+                    if (entity.vlan == null) {
+                        if (other.entity.vlan != null) return false;
+                    } else if (!entity.vlan.
+                            equals(other.entity.vlan)) return false;
+                    break;
+            }
+        }
+        
+        return true;
+    }
+    
+    
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/web/AbstractDeviceResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/web/AbstractDeviceResource.java
new file mode 100644
index 0000000..58e79e4
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/web/AbstractDeviceResource.java
@@ -0,0 +1,197 @@
+/**
+*    Copyright 2012, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.devicemanager.web;
+
+import java.util.Iterator;
+
+import net.floodlightcontroller.devicemanager.IDevice;
+import net.floodlightcontroller.devicemanager.IDeviceService;
+import net.floodlightcontroller.devicemanager.SwitchPort;
+import net.floodlightcontroller.devicemanager.internal.Device;
+import net.floodlightcontroller.packet.IPv4;
+import net.floodlightcontroller.util.FilterIterator;
+
+import org.openflow.util.HexString;
+import org.restlet.data.Form;
+import org.restlet.data.Status;
+import org.restlet.resource.ServerResource;
+
+/**
+ * Resource for querying and displaying devices that exist in the system
+ */
+public abstract class AbstractDeviceResource extends ServerResource {
+    public static final String MAC_ERROR = 
+            "Invalid MAC address: must be a 48-bit quantity, " + 
+            "expressed in hex as AA:BB:CC:DD:EE:FF";
+    public static final String VLAN_ERROR = 
+            "Invalid VLAN: must be an integer in the range 0-4095";
+    public static final String IPV4_ERROR = 
+            "Invalid IPv4 address: must be in dotted decimal format, " + 
+            "234.0.59.1";
+    public static final String DPID_ERROR = 
+            "Invalid Switch DPID: must be a 64-bit quantity, expressed in " + 
+            "hex as AA:BB:CC:DD:EE:FF:00:11";
+    public static final String PORT_ERROR = 
+            "Invalid Port: must be a positive integer";
+    
+    public Iterator<? extends IDevice> getDevices() {
+        IDeviceService deviceManager = 
+                (IDeviceService)getContext().getAttributes().
+                    get(IDeviceService.class.getCanonicalName());  
+                
+        Long macAddress = null;
+        Short vlan = null;
+        Integer ipv4Address = null;
+        Long switchDPID = null;
+        Integer switchPort = null;
+        
+        Form form = getQuery();
+        String macAddrStr = form.getFirstValue("mac", true);
+        String vlanStr = form.getFirstValue("vlan", true);
+        String ipv4Str = form.getFirstValue("ipv4", true);
+        String dpid = form.getFirstValue("dpid", true);
+        String port = form.getFirstValue("port", true);
+        
+        if (macAddrStr != null) {
+            try {
+                macAddress = HexString.toLong(macAddrStr);
+            } catch (Exception e) {
+                setStatus(Status.CLIENT_ERROR_BAD_REQUEST, MAC_ERROR);
+                return null;
+            }
+        }
+        if (vlanStr != null) {
+            try {
+                vlan = Short.parseShort(vlanStr);
+                if (vlan > 4095 || vlan < 0) {
+                    setStatus(Status.CLIENT_ERROR_BAD_REQUEST, VLAN_ERROR);
+                    return null;
+                }
+            } catch (Exception e) {
+                setStatus(Status.CLIENT_ERROR_BAD_REQUEST, VLAN_ERROR);
+                return null;
+            }
+        }
+        if (ipv4Str != null) {
+            try {
+                ipv4Address = IPv4.toIPv4Address(ipv4Str);
+            } catch (Exception e) {
+                setStatus(Status.CLIENT_ERROR_BAD_REQUEST, IPV4_ERROR);
+                return null;
+            }
+        }
+        if (dpid != null) {
+            try {
+                switchDPID = HexString.toLong(dpid);
+            } catch (Exception e) {
+                setStatus(Status.CLIENT_ERROR_BAD_REQUEST, DPID_ERROR);
+                return null;
+            }
+        }
+        if (port != null) {
+            try {
+                switchPort = Integer.parseInt(port);
+                if (switchPort < 0) {
+                    setStatus(Status.CLIENT_ERROR_BAD_REQUEST, PORT_ERROR);
+                    return null;
+                }
+            } catch (Exception e) {
+                setStatus(Status.CLIENT_ERROR_BAD_REQUEST, PORT_ERROR);
+                return null;
+            }
+        }
+        
+        @SuppressWarnings("unchecked")
+        Iterator<Device> diter = (Iterator<Device>)
+                deviceManager.queryDevices(macAddress, 
+                                           vlan, 
+                                           ipv4Address, 
+                                           switchDPID, 
+                                           switchPort);
+        
+        final String macStartsWith = 
+                form.getFirstValue("mac__startswith", true);
+        final String vlanStartsWith = 
+                form.getFirstValue("vlan__startswith", true);
+        final String ipv4StartsWith = 
+                form.getFirstValue("ipv4__startswith", true);
+        final String dpidStartsWith = 
+                form.getFirstValue("dpid__startswith", true);
+        final String portStartsWith = 
+                form.getFirstValue("port__startswith", true);
+        
+        return new FilterIterator<Device>(diter) {
+            @Override
+            protected boolean matches(Device value) {
+                if (macStartsWith != null) {
+                    if (!value.getMACAddressString().startsWith(macStartsWith))
+                        return false;
+                }
+                if (vlanStartsWith != null) {
+                    boolean match = false;
+                    for (Short v : value.getVlanId()) {
+                        if (v != null && 
+                            v.toString().startsWith(vlanStartsWith)) {
+                            match = true;
+                            break;
+                        }
+                    }
+                    if (!match) return false;
+                }
+                if (ipv4StartsWith != null) {
+                    boolean match = false;
+                    for (Integer v : value.getIPv4Addresses()) {
+                        String str = IPv4.fromIPv4Address(v);
+                        if (v != null && 
+                            str.startsWith(ipv4StartsWith)) {
+                            match = true;
+                            break;
+                        }
+                    }
+                    if (!match) return false;
+                }
+                if (dpidStartsWith != null) {
+                    boolean match = false;
+                    for (SwitchPort v : value.getAttachmentPoints(true)) {
+                        String str = 
+                                HexString.toHexString(v.getSwitchDPID(), 8);
+                        if (v != null && 
+                            str.startsWith(dpidStartsWith)) {
+                            match = true;
+                            break;
+                        }
+                    }
+                    if (!match) return false;
+                }
+                if (portStartsWith != null) {
+                    boolean match = false;
+                    for (SwitchPort v : value.getAttachmentPoints(true)) {
+                        String str = Integer.toString(v.getPort());
+                        if (v != null && 
+                            str.startsWith(portStartsWith)) {
+                            match = true;
+                            break;
+                        }
+                    }
+                    if (!match) return false;
+                }
+                return true;
+            }
+        };
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/web/DeviceEntityResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/web/DeviceEntityResource.java
new file mode 100644
index 0000000..2783a26
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/web/DeviceEntityResource.java
@@ -0,0 +1,55 @@
+/**
+*    Copyright 2012, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.devicemanager.web;
+
+import java.util.Iterator;
+
+import net.floodlightcontroller.devicemanager.IDevice;
+import net.floodlightcontroller.devicemanager.internal.Device;
+import net.floodlightcontroller.devicemanager.internal.Entity;
+
+import org.restlet.resource.Get;
+
+/**
+ * Resource for querying and displaying internal debug information on
+ * network entities associated with devices
+ */
+public class DeviceEntityResource extends AbstractDeviceResource {
+    @Get("json")
+    public Iterator<Entity[]> getDeviceEntities() {
+        final Iterator<? extends IDevice> devices = super.getDevices();
+        return new Iterator<Entity[]>() {
+
+            @Override
+            public boolean hasNext() {
+                return devices.hasNext();
+            }
+
+            @Override
+            public Entity[] next() {
+                Device d = (Device)devices.next();
+                return d.getEntities();
+            }
+
+            @Override
+            public void remove() {
+                throw new UnsupportedOperationException();
+            }
+        };
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/web/DeviceResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/web/DeviceResource.java
new file mode 100644
index 0000000..c479af0
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/web/DeviceResource.java
@@ -0,0 +1,33 @@
+/**
+*    Copyright 2012, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.devicemanager.web;
+
+import java.util.Iterator;
+
+import net.floodlightcontroller.devicemanager.IDevice;
+import org.restlet.resource.Get;
+
+/**
+ * Resource for querying and displaying devices that exist in the system
+ */
+public class DeviceResource extends AbstractDeviceResource {
+    @Get("json")
+    public Iterator<? extends IDevice> getDevices() {
+        return super.getDevices();
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/web/DeviceRoutable.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/web/DeviceRoutable.java
new file mode 100644
index 0000000..9a76505
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/web/DeviceRoutable.java
@@ -0,0 +1,44 @@
+/**
+*    Copyright 2012, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.devicemanager.web;
+
+import org.restlet.Context;
+import org.restlet.Restlet;
+import org.restlet.routing.Router;
+
+import net.floodlightcontroller.restserver.RestletRoutable;
+
+/**
+ * Routable for device rest api
+ */
+public class DeviceRoutable implements RestletRoutable {
+
+    @Override
+    public String basePath() {
+        return "/wm/device";
+    }
+    
+    @Override
+    public Restlet getRestlet(Context context) {
+        Router router = new Router(context);
+        router.attach("/", DeviceResource.class);
+        router.attach("/debug", DeviceEntityResource.class);
+        return router;
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/web/DeviceSerializer.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/web/DeviceSerializer.java
new file mode 100644
index 0000000..66bdaef
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/devicemanager/web/DeviceSerializer.java
@@ -0,0 +1,70 @@
+/**
+*    Copyright 2012 Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.devicemanager.web;
+
+import java.io.IOException;
+
+import net.floodlightcontroller.devicemanager.SwitchPort;
+import net.floodlightcontroller.devicemanager.internal.Device;
+import net.floodlightcontroller.packet.IPv4;
+
+import org.codehaus.jackson.JsonGenerator;
+import org.codehaus.jackson.JsonProcessingException;
+import org.codehaus.jackson.map.JsonSerializer;
+import org.codehaus.jackson.map.SerializerProvider;
+import org.openflow.util.HexString;
+
+/**
+ * Serialize a device object
+ */
+public class DeviceSerializer extends JsonSerializer<Device> {
+
+    @Override
+    public void serialize(Device device, JsonGenerator jGen,
+                          SerializerProvider serializer) throws IOException,
+            JsonProcessingException {
+        jGen.writeStartObject();
+        
+        jGen.writeStringField("entityClass", device.getEntityClass().getName());
+        
+        jGen.writeArrayFieldStart("mac");
+        jGen.writeString(HexString.toHexString(device.getMACAddress(), 6));
+        jGen.writeEndArray();
+
+        jGen.writeArrayFieldStart("ipv4");
+        for (Integer ip : device.getIPv4Addresses())
+            jGen.writeString(IPv4.fromIPv4Address(ip));
+        jGen.writeEndArray();
+
+        jGen.writeArrayFieldStart("vlan");
+        for (Short vlan : device.getVlanId())
+            if (vlan >= 0)
+                jGen.writeNumber(vlan);
+        jGen.writeEndArray();
+        jGen.writeArrayFieldStart("attachmentPoint");
+        for (SwitchPort ap : device.getAttachmentPoints(true)) {
+            serializer.defaultSerializeValue(ap, jGen);
+        }
+        jGen.writeEndArray();
+
+        jGen.writeNumberField("lastSeen", device.getLastSeen().getTime());
+        
+        jGen.writeEndObject();
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/Firewall.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/Firewall.java
new file mode 100644
index 0000000..3f8ff6c
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/Firewall.java
@@ -0,0 +1,667 @@
+package net.floodlightcontroller.firewall;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import org.openflow.protocol.OFMessage;
+import org.openflow.protocol.OFPacketIn;
+import org.openflow.protocol.OFType;
+
+import net.floodlightcontroller.core.FloodlightContext;
+import net.floodlightcontroller.core.IOFMessageListener;
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.devicemanager.IDeviceService;
+
+import java.util.ArrayList;
+import net.floodlightcontroller.packet.Ethernet;
+import net.floodlightcontroller.packet.IPv4;
+import net.floodlightcontroller.restserver.IRestApiService;
+import net.floodlightcontroller.routing.IRoutingDecision;
+import net.floodlightcontroller.routing.RoutingDecision;
+import net.floodlightcontroller.storage.IResultSet;
+import net.floodlightcontroller.storage.IStorageSourceService;
+import net.floodlightcontroller.storage.StorageException;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Stateless firewall implemented as a Google Summer of Code project.
+ * Configuration done through REST API
+ * 
+ * @author Amer Tahir
+ * @edited KC Wang
+ */
+public class Firewall implements IFirewallService, IOFMessageListener,
+        IFloodlightModule {
+
+    // service modules needed
+    protected IFloodlightProviderService floodlightProvider;
+    protected IStorageSourceService storageSource;
+    protected IRestApiService restApi;
+    protected static Logger logger;
+
+    protected List<FirewallRule> rules; // protected by synchronized
+    protected boolean enabled;
+    protected int subnet_mask = IPv4.toIPv4Address("255.255.255.0");
+
+    // constant strings for storage/parsing
+    public static final String TABLE_NAME = "controller_firewallrules";
+    public static final String COLUMN_RULEID = "ruleid";
+    public static final String COLUMN_DPID = "dpid";
+    public static final String COLUMN_IN_PORT = "in_port";
+    public static final String COLUMN_DL_SRC = "dl_src";
+    public static final String COLUMN_DL_DST = "dl_dst";
+    public static final String COLUMN_DL_TYPE = "dl_type";
+    public static final String COLUMN_NW_SRC_PREFIX = "nw_src_prefix";
+    public static final String COLUMN_NW_SRC_MASKBITS = "nw_src_maskbits";
+    public static final String COLUMN_NW_DST_PREFIX = "nw_dst_prefix";
+    public static final String COLUMN_NW_DST_MASKBITS = "nw_dst_maskbits";
+    public static final String COLUMN_NW_PROTO = "nw_proto";
+    public static final String COLUMN_TP_SRC = "tp_src";
+    public static final String COLUMN_TP_DST = "tp_dst";
+    public static final String COLUMN_WILDCARD_DPID = "wildcard_dpid";
+    public static final String COLUMN_WILDCARD_IN_PORT = "wildcard_in_port";
+    public static final String COLUMN_WILDCARD_DL_SRC = "wildcard_dl_src";
+    public static final String COLUMN_WILDCARD_DL_DST = "wildcard_dl_dst";
+    public static final String COLUMN_WILDCARD_DL_TYPE = "wildcard_dl_type";
+    public static final String COLUMN_WILDCARD_NW_SRC = "wildcard_nw_src";
+    public static final String COLUMN_WILDCARD_NW_DST = "wildcard_nw_dst";
+    public static final String COLUMN_WILDCARD_NW_PROTO = "wildcard_nw_proto";
+    public static final String COLUMN_WILDCARD_TP_SRC = "wildcard_tp_src";
+    public static final String COLUMN_WILDCARD_TP_DST = "wildcard_tp_dst";
+    public static final String COLUMN_PRIORITY = "priority";
+    public static final String COLUMN_ACTION = "action";
+    public static String ColumnNames[] = { COLUMN_RULEID, COLUMN_DPID,
+            COLUMN_IN_PORT, COLUMN_DL_SRC, COLUMN_DL_DST, COLUMN_DL_TYPE,
+            COLUMN_NW_SRC_PREFIX, COLUMN_NW_SRC_MASKBITS, COLUMN_NW_DST_PREFIX,
+            COLUMN_NW_DST_MASKBITS, COLUMN_NW_PROTO, COLUMN_TP_SRC,
+            COLUMN_TP_DST, COLUMN_WILDCARD_DPID, COLUMN_WILDCARD_IN_PORT,
+            COLUMN_WILDCARD_DL_SRC, COLUMN_WILDCARD_DL_DST,
+            COLUMN_WILDCARD_DL_TYPE, COLUMN_WILDCARD_NW_SRC,
+            COLUMN_WILDCARD_NW_DST, COLUMN_WILDCARD_NW_PROTO, COLUMN_PRIORITY,
+            COLUMN_ACTION };
+
+    @Override
+    public String getName() {
+        return "firewall";
+    }
+
+    @Override
+    public boolean isCallbackOrderingPrereq(OFType type, String name) {
+        // no prereq
+        return false;
+    }
+
+    @Override
+    public boolean isCallbackOrderingPostreq(OFType type, String name) {
+        return (type.equals(OFType.PACKET_IN) && name.equals("forwarding"));
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        Collection<Class<? extends IFloodlightService>> l = new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IFirewallService.class);
+        return l;
+    }
+
+    @Override
+    public Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls() {
+        Map<Class<? extends IFloodlightService>, IFloodlightService> m = new HashMap<Class<? extends IFloodlightService>, IFloodlightService>();
+        // We are the class that implements the service
+        m.put(IFirewallService.class, this);
+        return m;
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
+        Collection<Class<? extends IFloodlightService>> l = new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IFloodlightProviderService.class);
+        l.add(IStorageSourceService.class);
+        l.add(IRestApiService.class);
+        return l;
+    }
+
+    /**
+     * Reads the rules from the storage and creates a sorted arraylist of
+     * FirewallRule from them.
+     * 
+     * Similar to getStorageRules(), which only reads contents for REST GET and
+     * does no parsing, checking, nor putting into FirewallRule objects
+     * 
+     * @return the sorted arraylist of FirewallRule instances (rules from
+     *         storage)
+     */
+    protected ArrayList<FirewallRule> readRulesFromStorage() {
+        ArrayList<FirewallRule> l = new ArrayList<FirewallRule>();
+
+        try {
+            Map<String, Object> row;
+
+            // (..., null, null) for no predicate, no ordering
+            IResultSet resultSet = storageSource.executeQuery(TABLE_NAME,
+                    ColumnNames, null, null);
+
+            // put retrieved rows into FirewallRules
+            for (Iterator<IResultSet> it = resultSet.iterator(); it.hasNext();) {
+                row = it.next().getRow();
+                // now, parse row
+                FirewallRule r = new FirewallRule();
+                if (!row.containsKey(COLUMN_RULEID)
+                        || !row.containsKey(COLUMN_DPID)) {
+                    logger.error(
+                            "skipping entry with missing required 'ruleid' or 'switchid' entry: {}",
+                            row);
+                    return l;
+                }
+                try {
+                    r.ruleid = Integer
+                            .parseInt((String) row.get(COLUMN_RULEID));
+                    r.dpid = Long.parseLong((String) row.get(COLUMN_DPID));
+
+                    for (String key : row.keySet()) {
+                        if (row.get(key) == null)
+                            continue;
+                        if (key.equals(COLUMN_RULEID)
+                                || key.equals(COLUMN_DPID)
+                                || key.equals("id")) {
+                            continue; // already handled
+                        } 
+                        
+                        else if (key.equals(COLUMN_IN_PORT)) {
+                            r.in_port = Short.parseShort((String) row
+                                    .get(COLUMN_IN_PORT));
+                        } 
+                        
+                        else if (key.equals(COLUMN_DL_SRC)) {
+                            r.dl_src = Long.parseLong((String) row
+                                    .get(COLUMN_DL_SRC));
+                        } 
+                        
+                        else if (key.equals(COLUMN_DL_DST)) {
+                            r.dl_dst = Long.parseLong((String) row
+                                    .get(COLUMN_DL_DST));
+                        } 
+                        
+                        else if (key.equals(COLUMN_DL_TYPE)) {
+                            r.dl_type = Short.parseShort((String) row
+                                    .get(COLUMN_DL_TYPE));
+                        } 
+                        
+                        else if (key.equals(COLUMN_NW_SRC_PREFIX)) {
+                            r.nw_src_prefix = Integer.parseInt((String) row
+                                    .get(COLUMN_NW_SRC_PREFIX));
+                        } 
+                        
+                        else if (key.equals(COLUMN_NW_SRC_MASKBITS)) {
+                            r.nw_src_maskbits = Integer.parseInt((String) row
+                                    .get(COLUMN_NW_SRC_MASKBITS));
+                        } 
+                        
+                        else if (key.equals(COLUMN_NW_DST_PREFIX)) {
+                            r.nw_dst_prefix = Integer.parseInt((String) row
+                                    .get(COLUMN_NW_DST_PREFIX));
+                        } 
+                        
+                        else if (key.equals(COLUMN_NW_DST_MASKBITS)) {
+                            r.nw_dst_maskbits = Integer.parseInt((String) row
+                                    .get(COLUMN_NW_DST_MASKBITS));
+                        } 
+                        
+                        else if (key.equals(COLUMN_NW_PROTO)) {
+                            r.nw_proto = Short.parseShort((String) row
+                                    .get(COLUMN_NW_PROTO));
+                        } 
+                        
+                        else if (key.equals(COLUMN_TP_SRC)) {
+                            r.tp_src = Short.parseShort((String) row
+                                    .get(COLUMN_TP_SRC));
+                        } 
+                        
+                        else if (key.equals(COLUMN_TP_DST)) {
+                            r.tp_dst = Short.parseShort((String) row
+                                    .get(COLUMN_TP_DST));
+                        } 
+                        
+                        else if (key.equals(COLUMN_WILDCARD_DPID)) {
+                            r.wildcard_dpid = Boolean.parseBoolean((String) row
+                                    .get(COLUMN_WILDCARD_DPID));
+                        } 
+                        
+                        else if (key.equals(COLUMN_WILDCARD_IN_PORT)) {
+                            r.wildcard_in_port = Boolean
+                                    .parseBoolean((String) row
+                                            .get(COLUMN_WILDCARD_IN_PORT));
+                        } 
+                        
+                        else if (key.equals(COLUMN_WILDCARD_DL_SRC)) {
+                            r.wildcard_dl_src = Boolean
+                                    .parseBoolean((String) row
+                                            .get(COLUMN_WILDCARD_DL_SRC));
+                        } 
+                        
+                        else if (key.equals(COLUMN_WILDCARD_DL_DST)) {
+                            r.wildcard_dl_dst = Boolean
+                                    .parseBoolean((String) row
+                                            .get(COLUMN_WILDCARD_DL_DST));
+                        } 
+                        
+                        else if (key.equals(COLUMN_WILDCARD_DL_TYPE)) {
+                            r.wildcard_dl_type = Boolean
+                                    .parseBoolean((String) row
+                                            .get(COLUMN_WILDCARD_DL_TYPE));
+                        } 
+                        
+                        else if (key.equals(COLUMN_WILDCARD_NW_SRC)) {
+                            r.wildcard_nw_src = Boolean
+                                    .parseBoolean((String) row
+                                            .get(COLUMN_WILDCARD_NW_SRC));
+                        } 
+                        
+                        else if (key.equals(COLUMN_WILDCARD_NW_DST)) {
+                            r.wildcard_nw_dst = Boolean
+                                    .parseBoolean((String) row
+                                            .get(COLUMN_WILDCARD_NW_DST));
+                        } 
+                        
+                        else if (key.equals(COLUMN_WILDCARD_NW_PROTO)) {
+                            r.wildcard_nw_proto = Boolean
+                                    .parseBoolean((String) row
+                                            .get(COLUMN_WILDCARD_NW_PROTO));
+                        } 
+                        
+                        else if (key.equals(COLUMN_PRIORITY)) {
+                            r.priority = Integer.parseInt((String) row
+                                    .get(COLUMN_PRIORITY));
+                        } 
+                        
+                        else if (key.equals(COLUMN_ACTION)) {
+                            int tmp = Integer.parseInt((String) row.get(COLUMN_ACTION));
+                            if (tmp == FirewallRule.FirewallAction.DENY.ordinal())
+                                r.action = FirewallRule.FirewallAction.DENY;
+                            else if (tmp == FirewallRule.FirewallAction.ALLOW.ordinal())
+                                r.action = FirewallRule.FirewallAction.ALLOW;
+                            else {
+                                r.action = null;
+                                logger.error("action not recognized");
+                            }
+                        }
+                    }
+                } catch (ClassCastException e) {
+                    logger.error(
+                            "skipping rule {} with bad data : "
+                                    + e.getMessage(), r.ruleid);
+                }
+                if (r.action != null)
+                    l.add(r);
+            }
+        } catch (StorageException e) {
+            logger.error("failed to access storage: {}", e.getMessage());
+            // if the table doesn't exist, then wait to populate later via
+            // setStorageSource()
+        }
+
+        // now, sort the list based on priorities
+        Collections.sort(l);
+
+        return l;
+    }
+
+    @Override
+    public void init(FloodlightModuleContext context)
+            throws FloodlightModuleException {
+        floodlightProvider = context
+                .getServiceImpl(IFloodlightProviderService.class);
+        storageSource = context.getServiceImpl(IStorageSourceService.class);
+        restApi = context.getServiceImpl(IRestApiService.class);
+        rules = new ArrayList<FirewallRule>();
+        logger = LoggerFactory.getLogger(Firewall.class);
+
+        // start disabled
+        enabled = false;
+    }
+
+    @Override
+    public void startUp(FloodlightModuleContext context) {
+        // register REST interface
+        restApi.addRestletRoutable(new FirewallWebRoutable());
+
+        // always place firewall in pipeline at bootup
+        floodlightProvider.addOFMessageListener(OFType.PACKET_IN, this);
+
+        // storage, create table and read rules
+        storageSource.createTable(TABLE_NAME, null);
+        storageSource.setTablePrimaryKeyName(TABLE_NAME, COLUMN_RULEID);
+        synchronized (rules) {
+            this.rules = readRulesFromStorage();
+        }
+    }
+
+    @Override
+    public Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx) {
+        if (!this.enabled)
+            return Command.CONTINUE;
+
+        switch (msg.getType()) {
+        case PACKET_IN:
+            IRoutingDecision decision = null;
+            if (cntx != null) {
+                decision = IRoutingDecision.rtStore.get(cntx,
+                        IRoutingDecision.CONTEXT_DECISION);
+
+                return this.processPacketInMessage(sw, (OFPacketIn) msg,
+                        decision, cntx);
+            }
+            break;
+        default:
+            break;
+        }
+
+        return Command.CONTINUE;
+    }
+
+    @Override
+    public void enableFirewall(boolean enabled) {
+        logger.info("Setting firewall to {}", enabled);
+        this.enabled = enabled;
+    }
+
+    @Override
+    public List<FirewallRule> getRules() {
+        return this.rules;
+    }
+
+    // Only used to serve REST GET
+    // Similar to readRulesFromStorage(), which actually checks and stores
+    // record into FirewallRule list
+    @Override
+    public List<Map<String, Object>> getStorageRules() {
+        ArrayList<Map<String, Object>> l = new ArrayList<Map<String, Object>>();
+        try {
+            // null1=no predicate, null2=no ordering
+            IResultSet resultSet = storageSource.executeQuery(TABLE_NAME,
+                    ColumnNames, null, null);
+            for (Iterator<IResultSet> it = resultSet.iterator(); it.hasNext();) {
+                l.add(it.next().getRow());
+            }
+        } catch (StorageException e) {
+            logger.error("failed to access storage: {}", e.getMessage());
+            // if the table doesn't exist, then wait to populate later via
+            // setStorageSource()
+        }
+        return l;
+    }
+
+    @Override
+    public String getSubnetMask() {
+        return IPv4.fromIPv4Address(this.subnet_mask);
+    }
+
+    @Override
+    public void setSubnetMask(String newMask) {
+        if (newMask.trim().isEmpty())
+            return;
+        this.subnet_mask = IPv4.toIPv4Address(newMask.trim());
+    }
+
+    @Override
+    public synchronized void addRule(FirewallRule rule) {
+        
+        // generate random ruleid for each newly created rule
+        // may want to return to caller if useful
+        // may want to check conflict
+        rule.ruleid = rule.genID();
+        
+        int i = 0;
+        // locate the position of the new rule in the sorted arraylist
+        for (i = 0; i < this.rules.size(); i++) {
+            if (this.rules.get(i).priority >= rule.priority)
+                break;
+        }
+        // now, add rule to the list
+        if (i <= this.rules.size()) {
+            this.rules.add(i, rule);
+        } else {
+            this.rules.add(rule);
+        }
+        // add rule to database
+        Map<String, Object> entry = new HashMap<String, Object>();
+        entry.put(COLUMN_RULEID, Integer.toString(rule.ruleid));
+        entry.put(COLUMN_DPID, Long.toString(rule.dpid));
+        entry.put(COLUMN_IN_PORT, Short.toString(rule.in_port));
+        entry.put(COLUMN_DL_SRC, Long.toString(rule.dl_src));
+        entry.put(COLUMN_DL_DST, Long.toString(rule.dl_dst));
+        entry.put(COLUMN_DL_TYPE, Short.toString(rule.dl_type));
+        entry.put(COLUMN_NW_SRC_PREFIX, Integer.toString(rule.nw_src_prefix));
+        entry.put(COLUMN_NW_SRC_MASKBITS, Integer.toString(rule.nw_src_maskbits));
+        entry.put(COLUMN_NW_DST_PREFIX, Integer.toString(rule.nw_dst_prefix));
+        entry.put(COLUMN_NW_DST_MASKBITS, Integer.toString(rule.nw_dst_maskbits));
+        entry.put(COLUMN_NW_PROTO, Short.toString(rule.nw_proto));
+        entry.put(COLUMN_TP_SRC, Integer.toString(rule.tp_src));
+        entry.put(COLUMN_TP_DST, Integer.toString(rule.tp_dst));
+        entry.put(COLUMN_WILDCARD_DPID,
+                Boolean.toString(rule.wildcard_dpid));
+        entry.put(COLUMN_WILDCARD_IN_PORT,
+                Boolean.toString(rule.wildcard_in_port));
+        entry.put(COLUMN_WILDCARD_DL_SRC,
+                Boolean.toString(rule.wildcard_dl_src));
+        entry.put(COLUMN_WILDCARD_DL_DST,
+                Boolean.toString(rule.wildcard_dl_dst));
+        entry.put(COLUMN_WILDCARD_DL_TYPE,
+                Boolean.toString(rule.wildcard_dl_type));
+        entry.put(COLUMN_WILDCARD_NW_SRC,
+                Boolean.toString(rule.wildcard_nw_src));
+        entry.put(COLUMN_WILDCARD_NW_DST,
+                Boolean.toString(rule.wildcard_nw_dst));
+        entry.put(COLUMN_WILDCARD_NW_PROTO,
+                Boolean.toString(rule.wildcard_nw_proto));
+        entry.put(COLUMN_WILDCARD_TP_SRC,
+                Boolean.toString(rule.wildcard_tp_src));
+        entry.put(COLUMN_WILDCARD_TP_DST,
+                Boolean.toString(rule.wildcard_tp_dst));
+        entry.put(COLUMN_PRIORITY, Integer.toString(rule.priority));
+        entry.put(COLUMN_ACTION, Integer.toString(rule.action.ordinal()));
+        storageSource.insertRow(TABLE_NAME, entry);
+    }
+
+    @Override
+    public synchronized void deleteRule(int ruleid) {
+        Iterator<FirewallRule> iter = this.rules.iterator();
+        while (iter.hasNext()) {
+            FirewallRule r = iter.next();
+            if (r.ruleid == ruleid) {
+                // found the rule, now remove it
+                iter.remove();
+                break;
+            }
+        }
+        // delete from database
+        storageSource.deleteRow(TABLE_NAME, Integer.toString(ruleid));
+    }
+
+    /**
+     * Iterates over the firewall rules and tries to match them with the
+     * incoming packet (flow). Uses the FirewallRule class's matchWithFlow
+     * method to perform matching. It maintains a pair of wildcards (allow and
+     * deny) which are assigned later to the firewall's decision, where 'allow'
+     * wildcards are applied if the matched rule turns out to be an ALLOW rule
+     * and 'deny' wildcards are applied otherwise. Wildcards are applied to
+     * firewall decision to optimize flows in the switch, ensuring least number
+     * of flows per firewall rule. So, if a particular field is not "ANY" (i.e.
+     * not wildcarded) in a higher priority rule, then if a lower priority rule
+     * matches the packet and wildcards it, it can't be wildcarded in the
+     * switch's flow entry, because otherwise some packets matching the higher
+     * priority rule might escape the firewall. The reason for keeping different
+     * two different wildcards is that if a field is not wildcarded in a higher
+     * priority allow rule, the same field shouldn't be wildcarded for packets
+     * matching the lower priority deny rule (non-wildcarded fields in higher
+     * priority rules override the wildcarding of those fields in lower priority
+     * rules of the opposite type). So, to ensure that wildcards are
+     * appropriately set for different types of rules (allow vs. deny), separate
+     * wildcards are maintained. Iteration is performed on the sorted list of
+     * rules (sorted in decreasing order of priority).
+     * 
+     * @param sw
+     *            the switch instance
+     * @param pi
+     *            the incoming packet data structure
+     * @param cntx
+     *            the floodlight context
+     * @return an instance of RuleWildcardsPair that specify rule that matches
+     *         and the wildcards for the firewall decision
+     */
+    protected RuleWildcardsPair matchWithRule(IOFSwitch sw, OFPacketIn pi,
+            FloodlightContext cntx) {
+        FirewallRule matched_rule = null;
+        Ethernet eth = IFloodlightProviderService.bcStore.get(cntx,
+                IFloodlightProviderService.CONTEXT_PI_PAYLOAD);
+        WildcardsPair wildcards = new WildcardsPair();
+
+        synchronized (rules) {
+            Iterator<FirewallRule> iter = this.rules.iterator();
+            FirewallRule rule = null;
+            // iterate through list to find a matching firewall rule
+            while (iter.hasNext()) {
+                // get next rule from list
+                rule = iter.next();
+
+                // check if rule matches
+                if (rule.matchesFlow(sw.getId(), pi.getInPort(), eth, wildcards) == true) {
+                    matched_rule = rule;
+                    break;
+                }
+            }
+        }
+
+        // make a pair of rule and wildcards, then return it
+        RuleWildcardsPair ret = new RuleWildcardsPair();
+        ret.rule = matched_rule;
+        if (matched_rule == null || matched_rule.action == FirewallRule.FirewallAction.DENY) {
+            ret.wildcards = wildcards.drop;
+        } else {
+            ret.wildcards = wildcards.allow;
+        }
+        return ret;
+    }
+
+    /**
+     * Checks whether an IP address is a broadcast address or not (determines
+     * using subnet mask)
+     * 
+     * @param IPAddress
+     *            the IP address to check
+     * @return true if it is a broadcast address, false otherwise
+     */
+    protected boolean IPIsBroadcast(int IPAddress) {
+        // inverted subnet mask
+        int inv_subnet_mask = ~this.subnet_mask;
+        return ((IPAddress & inv_subnet_mask) == inv_subnet_mask);
+    }
+
+    public Command processPacketInMessage(IOFSwitch sw, OFPacketIn pi,
+            IRoutingDecision decision, FloodlightContext cntx) {
+        Ethernet eth = IFloodlightProviderService.bcStore.get(cntx,
+                IFloodlightProviderService.CONTEXT_PI_PAYLOAD);
+
+        // Allowing L2 broadcast + ARP broadcast request (also deny malformed
+        // broadcasts -> L2 broadcast + L3 unicast)
+        if (eth.isBroadcast() == true) {
+            boolean allowBroadcast = true;
+            // the case to determine if we have L2 broadcast + L3 unicast
+            // don't allow this broadcast packet if such is the case (malformed
+            // packet)
+            if (eth.getEtherType() == Ethernet.TYPE_IPv4
+                    && this.IPIsBroadcast(((IPv4) eth.getPayload())
+                            .getDestinationAddress()) == false) {
+                allowBroadcast = false;
+            }
+            if (allowBroadcast == true) {
+                if (logger.isTraceEnabled())
+                    logger.trace("Allowing broadcast traffic for PacketIn={}",
+                            pi);
+                                        
+                decision = new RoutingDecision(sw.getId(), pi.getInPort()
+                		, IDeviceService.fcStore.
+                        get(cntx, IDeviceService.CONTEXT_SRC_DEVICE),
+                        IRoutingDecision.RoutingAction.MULTICAST);
+                decision.addToContext(cntx);
+            } else {
+                if (logger.isTraceEnabled())
+                    logger.trace(
+                            "Blocking malformed broadcast traffic for PacketIn={}",
+                            pi);
+
+                decision = new RoutingDecision(sw.getId(), pi.getInPort()
+                		, IDeviceService.fcStore.
+                        get(cntx, IDeviceService.CONTEXT_SRC_DEVICE),
+                        IRoutingDecision.RoutingAction.DROP);
+                decision.addToContext(cntx);
+            }
+            return Command.CONTINUE;
+        }
+        /*
+         * ARP response (unicast) can be let through without filtering through
+         * rules by uncommenting the code below
+         */
+        /*
+         * else if (eth.getEtherType() == Ethernet.TYPE_ARP) {
+         * logger.info("allowing ARP traffic"); decision = new
+         * FirewallDecision(IRoutingDecision.RoutingAction.FORWARD_OR_FLOOD);
+         * decision.addToContext(cntx); return Command.CONTINUE; }
+         */
+
+        // check if we have a matching rule for this packet/flow
+        // and no decision is taken yet
+        if (decision == null) {
+            RuleWildcardsPair match_ret = this.matchWithRule(sw, pi, cntx);
+            FirewallRule rule = match_ret.rule;
+
+            if (rule == null || rule.action == FirewallRule.FirewallAction.DENY) {
+                decision = new RoutingDecision(sw.getId(), pi.getInPort()
+                		, IDeviceService.fcStore.
+                        get(cntx, IDeviceService.CONTEXT_SRC_DEVICE),
+                        IRoutingDecision.RoutingAction.DROP);
+                decision.setWildcards(match_ret.wildcards);
+                decision.addToContext(cntx);
+                if (logger.isTraceEnabled()) {
+                    if (rule == null)
+                        logger.trace(
+                                "No firewall rule found for PacketIn={}, blocking flow",
+                                pi);
+                    else if (rule.action == FirewallRule.FirewallAction.DENY) {
+                        logger.trace("Deny rule={} match for PacketIn={}",
+                                rule, pi);
+                    }
+                }
+            } else {
+                decision = new RoutingDecision(sw.getId(), pi.getInPort()
+                		, IDeviceService.fcStore.
+                        get(cntx, IDeviceService.CONTEXT_SRC_DEVICE),
+                        IRoutingDecision.RoutingAction.FORWARD_OR_FLOOD);
+                decision.setWildcards(match_ret.wildcards);
+                decision.addToContext(cntx);
+                if (logger.isTraceEnabled())
+                    logger.trace("Allow rule={} match for PacketIn={}", rule,
+                            pi);
+            }
+        }
+
+        return Command.CONTINUE;
+    }
+
+    @Override
+    public boolean isEnabled() {
+        return enabled;
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/FirewallResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/FirewallResource.java
new file mode 100644
index 0000000..1f4d71a
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/FirewallResource.java
@@ -0,0 +1,125 @@
+package net.floodlightcontroller.firewall;
+
+import java.io.IOException;
+
+import org.codehaus.jackson.JsonParseException;
+import org.codehaus.jackson.JsonParser;
+import org.codehaus.jackson.JsonToken;
+import org.codehaus.jackson.map.MappingJsonFactory;
+import org.restlet.resource.Post;
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class FirewallResource extends ServerResource {
+    protected static Logger log = LoggerFactory.getLogger(FirewallResource.class);
+    
+    @Get("json")
+    public Object handleRequest() {
+        IFirewallService firewall = 
+                (IFirewallService)getContext().getAttributes().
+                get(IFirewallService.class.getCanonicalName());
+
+        String op = (String) getRequestAttributes().get("op");
+
+        // REST API check status
+        if (op.equalsIgnoreCase("status")) {
+            if (firewall.isEnabled())
+                return "{\"result\" : \"firewall enabled\"}";
+            else
+                return "{\"result\" : \"firewall disabled\"}";
+        }
+
+        // REST API enable firewall
+        if (op.equalsIgnoreCase("enable")) {
+            firewall.enableFirewall(true);
+            return "{\"status\" : \"success\", \"details\" : \"firewall running\"}";
+        } 
+        
+        // REST API disable firewall
+        if (op.equalsIgnoreCase("disable")) {
+            firewall.enableFirewall(false);
+            return "{\"status\" : \"success\", \"details\" : \"firewall stopped\"}";
+        } 
+        
+        // REST API retrieving rules from storage
+        // currently equivalent to /wm/firewall/rules/json
+        if (op.equalsIgnoreCase("storageRules")) {
+            return firewall.getStorageRules();
+        } 
+        
+        // REST API set local subnet mask -- this only makes sense for one subnet
+        // will remove later
+        if (op.equalsIgnoreCase("subnet-mask")) {
+            return firewall.getSubnetMask();
+        }
+
+        // no known options found
+        return "{\"status\" : \"failure\", \"details\" : \"invalid operation\"}";
+    }
+    
+    /**
+     * Allows setting of subnet mask
+     * @param fmJson The Subnet Mask in JSON format.
+     * @return A string status message
+     */
+    @Post
+    public String handlePost(String fmJson) {
+        IFirewallService firewall = 
+                (IFirewallService)getContext().getAttributes().
+                get(IFirewallService.class.getCanonicalName());
+
+        String newMask;
+        try {
+            newMask = jsonExtractSubnetMask(fmJson);
+        } catch (IOException e) {
+            log.error("Error parsing new subnet mask: " + fmJson, e);
+            e.printStackTrace();
+            return "{\"status\" : \"Error! Could not parse new subnet mask, see log for details.\"}";
+        }
+        firewall.setSubnetMask(newMask);
+        return ("{\"status\" : \"subnet mask set\"}");
+    }
+    
+    /**
+     * Extracts subnet mask from a JSON string
+     * @param fmJson The JSON formatted string
+     * @return The subnet mask
+     * @throws IOException If there was an error parsing the JSON
+     */
+    public static String jsonExtractSubnetMask(String fmJson) throws IOException {
+        String subnet_mask = "";
+        MappingJsonFactory f = new MappingJsonFactory();
+        JsonParser jp;
+
+        try {
+            jp = f.createJsonParser(fmJson);
+        } catch (JsonParseException e) {
+            throw new IOException(e);
+        }
+
+        jp.nextToken();
+        if (jp.getCurrentToken() != JsonToken.START_OBJECT) {
+            throw new IOException("Expected START_OBJECT");
+        }
+
+        while (jp.nextToken() != JsonToken.END_OBJECT) {
+            if (jp.getCurrentToken() != JsonToken.FIELD_NAME) {
+                throw new IOException("Expected FIELD_NAME");
+            }
+
+            String n = jp.getCurrentName();
+            jp.nextToken();
+            if (jp.getText().equals("")) 
+                continue;
+
+            if (n == "subnet-mask") {
+                subnet_mask = jp.getText();
+                break;
+            }
+        }
+
+        return subnet_mask;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/FirewallRule.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/FirewallRule.java
new file mode 100644
index 0000000..d9b2612
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/FirewallRule.java
@@ -0,0 +1,392 @@
+package net.floodlightcontroller.firewall;
+
+import org.openflow.protocol.OFMatch;
+
+import net.floodlightcontroller.packet.Ethernet;
+import net.floodlightcontroller.packet.IPacket;
+import net.floodlightcontroller.packet.IPv4;
+import net.floodlightcontroller.packet.TCP;
+import net.floodlightcontroller.packet.UDP;
+
+public class FirewallRule implements Comparable<FirewallRule> {
+    public int ruleid;
+
+    public long dpid; 
+    public short in_port; 
+    public long dl_src; 
+    public long dl_dst; 
+    public short dl_type; 
+    public int nw_src_prefix; 
+    public int nw_src_maskbits;
+    public int nw_dst_prefix;
+    public int nw_dst_maskbits;
+    public short nw_proto;
+    public short tp_src;
+    public short tp_dst;
+
+    public boolean wildcard_dpid;
+    public boolean wildcard_in_port; 
+    public boolean wildcard_dl_src;
+    public boolean wildcard_dl_dst;
+    public boolean wildcard_dl_type;
+    public boolean wildcard_nw_src;
+    public boolean wildcard_nw_dst;
+    public boolean wildcard_nw_proto;
+    public boolean wildcard_tp_src;
+    public boolean wildcard_tp_dst;
+
+    public int priority = 0;
+
+    public FirewallAction action;
+
+    public enum FirewallAction {
+        /*
+         * DENY: Deny rule
+         * ALLOW: Allow rule
+         */
+        DENY, ALLOW
+    }
+
+    public FirewallRule() {
+        this.in_port = 0; 
+        this.dl_src = 0;
+        this.nw_src_prefix = 0;
+        this.nw_src_maskbits = 0; 
+        this.dl_dst = 0;
+        this.nw_proto = 0;
+        this.tp_src = 0;
+        this.tp_dst = 0;
+        this.dl_dst = 0;
+        this.nw_dst_prefix = 0;
+        this.nw_dst_maskbits = 0; 
+        this.dpid = -1;
+        this.wildcard_dpid = true; 
+        this.wildcard_in_port = true; 
+        this.wildcard_dl_src = true; 
+        this.wildcard_dl_dst = true; 
+        this.wildcard_dl_type = true; 
+        this.wildcard_nw_src = true; 
+        this.wildcard_nw_dst = true; 
+        this.wildcard_nw_proto = true; 
+        this.wildcard_tp_src = true; 
+        this.wildcard_tp_dst = true; 
+        this.priority = 0; 
+        this.action = FirewallAction.ALLOW; 
+        this.ruleid = 0; 
+    }
+
+    /**
+     * Generates a unique ID for the instance
+     * 
+     * @return int representing the unique id
+     */
+    public int genID() {
+        int uid = this.hashCode();
+        if (uid < 0) {
+            uid = Math.abs(uid);
+            uid = uid * 15551;
+        }
+        return uid;
+    }
+
+    /**
+     * Comparison method for Collections.sort method
+     * 
+     * @param rule
+     *            the rule to compare with
+     * @return number representing the result of comparison 0 if equal negative
+     *         if less than 'rule' greater than zero if greater priority rule
+     *         than 'rule'
+     */
+    @Override
+    public int compareTo(FirewallRule rule) {
+        return this.priority - rule.priority;
+    }
+
+    /**
+     * Determines if this instance matches an existing rule instance
+     * 
+     * @param r
+     *            : the FirewallRule instance to compare with
+     * @return boolean: true if a match is found
+     **/
+    public boolean isSameAs(FirewallRule r) {
+        if (this.action != r.action
+                || this.wildcard_dl_type != r.wildcard_dl_type
+                || (this.wildcard_dl_type == false && this.dl_type == r.dl_type)
+                || this.wildcard_tp_src != r.wildcard_tp_src
+                || (this.wildcard_tp_src == false && this.tp_src != r.tp_src)
+                || this.wildcard_tp_dst != r.wildcard_tp_dst
+                || (this.wildcard_tp_dst == false &&this.tp_dst != r.tp_dst)
+                || this.wildcard_dpid != r.wildcard_dpid
+                || (this.wildcard_dpid == false && this.dpid != r.dpid)
+                || this.wildcard_in_port != r.wildcard_in_port
+                || (this.wildcard_in_port == false && this.in_port != r.in_port)
+                || this.wildcard_nw_src != r.wildcard_nw_src
+                || (this.wildcard_nw_src == false && (this.nw_src_prefix != r.nw_src_prefix || this.nw_src_maskbits != r.nw_src_maskbits))
+                || this.wildcard_dl_src != r.wildcard_dl_src
+                || (this.wildcard_dl_src == false && this.dl_src != r.dl_src)
+                || this.wildcard_nw_proto != r.wildcard_nw_proto
+                || (this.wildcard_nw_proto == false && this.nw_proto != r.nw_proto)
+                || this.wildcard_nw_dst != r.wildcard_nw_dst
+                || (this.wildcard_nw_dst == false && (this.nw_dst_prefix != r.nw_dst_prefix || this.nw_dst_maskbits != r.nw_dst_maskbits))
+                || this.wildcard_dl_dst != r.wildcard_dl_dst                
+                || (this.wildcard_dl_dst == false && this.dl_dst != r.dl_dst)) {
+            return false;
+        }
+        return true;
+    }
+
+    /**
+     * Matches this rule to a given flow - incoming packet
+     * 
+     * @param switchDpid
+     *            the Id of the connected switch
+     * @param inPort
+     *            the switch port where the packet originated from
+     * @param packet
+     *            the Ethernet packet that arrives at the switch
+     * @param wildcards
+     *            the pair of wildcards (allow and deny) given by Firewall
+     *            module that is used by the Firewall module's matchWithRule
+     *            method to derive wildcards for the decision to be taken
+     * @return true if the rule matches the given packet-in, false otherwise
+     */
+    public boolean matchesFlow(long switchDpid, short inPort, Ethernet packet,
+            WildcardsPair wildcards) {
+        IPacket pkt = packet.getPayload();
+
+        // dl_type type
+        IPv4 pkt_ip = null;
+
+        // nw_proto types
+        TCP pkt_tcp = null;
+        UDP pkt_udp = null;
+
+        // tp_src and tp_dst (tp port numbers)
+        short pkt_tp_src = 0;
+        short pkt_tp_dst = 0;
+
+        // switchID matches?
+        if (wildcard_dpid == false && dpid != switchDpid)
+            return false;
+
+        // in_port matches?
+        if (wildcard_in_port == false && in_port != inPort)
+            return false;
+        if (action == FirewallRule.FirewallAction.DENY) {
+            wildcards.drop &= ~OFMatch.OFPFW_IN_PORT;
+        } else {
+            wildcards.allow &= ~OFMatch.OFPFW_IN_PORT;
+        }
+
+        // mac address (src and dst) match?
+        if (wildcard_dl_src == false
+                && dl_src != packet.getSourceMAC().toLong())
+            return false;
+        if (action == FirewallRule.FirewallAction.DENY) {
+            wildcards.drop &= ~OFMatch.OFPFW_DL_SRC;
+        } else {
+            wildcards.allow &= ~OFMatch.OFPFW_DL_SRC;
+        }
+
+        if (wildcard_dl_dst == false
+                && dl_dst != packet.getDestinationMAC().toLong())
+            return false;
+        if (action == FirewallRule.FirewallAction.DENY) {
+            wildcards.drop &= ~OFMatch.OFPFW_DL_DST;
+        } else {
+            wildcards.allow &= ~OFMatch.OFPFW_DL_DST;
+        }
+
+        // dl_type check: ARP, IP
+
+        // if this is not an ARP rule but the pkt is ARP,
+        // return false match - no need to continue protocol specific check
+        if (wildcard_dl_type == false) {
+            if (dl_type == Ethernet.TYPE_ARP) {
+                if (packet.getEtherType() != Ethernet.TYPE_ARP)
+                    return false;
+                else {
+                    if (action == FirewallRule.FirewallAction.DENY) {
+                        wildcards.drop &= ~OFMatch.OFPFW_DL_TYPE;
+                    } else {
+                        wildcards.allow &= ~OFMatch.OFPFW_DL_TYPE;
+                    }
+                }
+            } else if (dl_type == Ethernet.TYPE_IPv4) {
+                if (packet.getEtherType() != Ethernet.TYPE_IPv4)
+                    return false;
+                else {
+                    if (action == FirewallRule.FirewallAction.DENY) {
+                        wildcards.drop &= ~OFMatch.OFPFW_NW_PROTO;
+                    } else {
+                        wildcards.allow &= ~OFMatch.OFPFW_NW_PROTO;
+                    }
+                    // IP packets, proceed with ip address check
+                    pkt_ip = (IPv4) pkt;
+
+                    // IP addresses (src and dst) match?
+                    if (wildcard_nw_src == false
+                            && this.matchIPAddress(nw_src_prefix,
+                                    nw_src_maskbits, pkt_ip.getSourceAddress()) == false)
+                        return false;
+                    if (action == FirewallRule.FirewallAction.DENY) {
+                        wildcards.drop &= ~OFMatch.OFPFW_NW_SRC_ALL;
+                        wildcards.drop |= (nw_src_maskbits << OFMatch.OFPFW_NW_SRC_SHIFT);
+                    } else {
+                        wildcards.allow &= ~OFMatch.OFPFW_NW_SRC_ALL;
+                        wildcards.allow |= (nw_src_maskbits << OFMatch.OFPFW_NW_SRC_SHIFT);
+                    }
+
+                    if (wildcard_nw_dst == false
+                            && this.matchIPAddress(nw_dst_prefix,
+                                    nw_dst_maskbits,
+                                    pkt_ip.getDestinationAddress()) == false)
+                        return false;
+                    if (action == FirewallRule.FirewallAction.DENY) {
+                        wildcards.drop &= ~OFMatch.OFPFW_NW_DST_ALL;
+                        wildcards.drop |= (nw_dst_maskbits << OFMatch.OFPFW_NW_DST_SHIFT);
+                    } else {
+                        wildcards.allow &= ~OFMatch.OFPFW_NW_DST_ALL;
+                        wildcards.allow |= (nw_dst_maskbits << OFMatch.OFPFW_NW_DST_SHIFT);
+                    }
+
+                    // nw_proto check
+                    if (wildcard_nw_proto == false) {
+                        if (nw_proto == IPv4.PROTOCOL_TCP) {
+                            if (pkt_ip.getProtocol() != IPv4.PROTOCOL_TCP)
+                                return false;
+                            else {
+                                pkt_tcp = (TCP) pkt_ip.getPayload();
+                                pkt_tp_src = pkt_tcp.getSourcePort();
+                                pkt_tp_dst = pkt_tcp.getDestinationPort();
+                            }
+                        } else if (nw_proto == IPv4.PROTOCOL_UDP) {
+                            if (pkt_ip.getProtocol() != IPv4.PROTOCOL_UDP)
+                                return false;
+                            else {
+                                pkt_udp = (UDP) pkt_ip.getPayload();
+                                pkt_tp_src = pkt_udp.getSourcePort();
+                                pkt_tp_dst = pkt_udp.getDestinationPort();
+                            }
+                        } else if (nw_proto == IPv4.PROTOCOL_ICMP) {
+                            if (pkt_ip.getProtocol() != IPv4.PROTOCOL_ICMP)
+                                return false;
+                            else {
+                                // nothing more needed for ICMP
+                            }
+                        }
+                        if (action == FirewallRule.FirewallAction.DENY) {
+                            wildcards.drop &= ~OFMatch.OFPFW_NW_PROTO;
+                        } else {
+                            wildcards.allow &= ~OFMatch.OFPFW_NW_PROTO;
+                        }
+
+                        // TCP/UDP source and destination ports match?
+                        if (pkt_tcp != null || pkt_udp != null) {
+                            // does the source port match?
+                            if (tp_src != 0 && tp_src != pkt_tp_src)
+                                return false;
+                            if (action == FirewallRule.FirewallAction.DENY) {
+                                wildcards.drop &= ~OFMatch.OFPFW_TP_SRC;
+                            } else {
+                                wildcards.allow &= ~OFMatch.OFPFW_TP_SRC;
+                            }
+
+                            // does the destination port match?
+                            if (tp_dst != 0 && tp_dst != pkt_tp_dst)
+                                return false;
+                            if (action == FirewallRule.FirewallAction.DENY) {
+                                wildcards.drop &= ~OFMatch.OFPFW_TP_DST;
+                            } else {
+                                wildcards.allow &= ~OFMatch.OFPFW_TP_DST;
+                            }
+                        }
+                    }
+
+                }
+            } else {
+                // non-IP packet - not supported - report no match
+                return false;
+            }
+        }
+        if (action == FirewallRule.FirewallAction.DENY) {
+            wildcards.drop &= ~OFMatch.OFPFW_DL_TYPE;
+        } else {
+            wildcards.allow &= ~OFMatch.OFPFW_DL_TYPE;
+        }
+
+        // all applicable checks passed
+        return true;
+    }
+
+    /**
+     * Determines if rule's CIDR address matches IP address of the packet
+     * 
+     * @param rulePrefix
+     *            prefix part of the CIDR address
+     * @param ruleBits
+     *            the size of mask of the CIDR address
+     * @param packetAddress
+     *            the IP address of the incoming packet to match with
+     * @return true if CIDR address matches the packet's IP address, false
+     *         otherwise
+     */
+    protected boolean matchIPAddress(int rulePrefix, int ruleBits,
+            int packetAddress) {
+        boolean matched = true;
+
+        int rule_iprng = 32 - ruleBits;
+        int rule_ipint = rulePrefix;
+        int pkt_ipint = packetAddress;
+        // if there's a subnet range (bits to be wildcarded > 0)
+        if (rule_iprng > 0) {
+            // right shift bits to remove rule_iprng of LSB that are to be
+            // wildcarded
+            rule_ipint = rule_ipint >> rule_iprng;
+            pkt_ipint = pkt_ipint >> rule_iprng;
+            // now left shift to return to normal range, except that the
+            // rule_iprng number of LSB
+            // are now zeroed
+            rule_ipint = rule_ipint << rule_iprng;
+            pkt_ipint = pkt_ipint << rule_iprng;
+        }
+        // check if we have a match
+        if (rule_ipint != pkt_ipint)
+            matched = false;
+
+        return matched;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 2521;
+        int result = super.hashCode();
+        result = prime * result + (int) dpid;
+        result = prime * result + in_port;
+        result = prime * result + (int) dl_src;
+        result = prime * result + (int) dl_dst;
+        result = prime * result + dl_type;
+        result = prime * result + nw_src_prefix;
+        result = prime * result + nw_src_maskbits;
+        result = prime * result + nw_dst_prefix;
+        result = prime * result + nw_dst_maskbits;
+        result = prime * result + nw_proto;
+        result = prime * result + tp_src;
+        result = prime * result + tp_dst;
+        result = prime * result + action.ordinal();
+        result = prime * result + priority;
+        result = prime * result + (new Boolean(wildcard_dpid)).hashCode();
+        result = prime * result + (new Boolean(wildcard_in_port)).hashCode();
+        result = prime * result + (new Boolean(wildcard_dl_src)).hashCode();
+        result = prime * result + (new Boolean(wildcard_dl_dst)).hashCode();
+        result = prime * result + (new Boolean(wildcard_dl_type)).hashCode();
+        result = prime * result + (new Boolean(wildcard_nw_src)).hashCode();
+        result = prime * result + (new Boolean(wildcard_nw_dst)).hashCode();
+        result = prime * result + (new Boolean(wildcard_nw_proto)).hashCode();
+        result = prime * result + (new Boolean(wildcard_tp_src)).hashCode();
+        result = prime * result + (new Boolean(wildcard_tp_dst)).hashCode();
+        return result;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/FirewallRulesResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/FirewallRulesResource.java
new file mode 100644
index 0000000..7a31d38
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/FirewallRulesResource.java
@@ -0,0 +1,292 @@
+package net.floodlightcontroller.firewall;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+
+import org.codehaus.jackson.JsonParseException;
+import org.codehaus.jackson.JsonParser;
+import org.codehaus.jackson.JsonToken;
+import org.codehaus.jackson.map.MappingJsonFactory;
+import org.openflow.util.HexString;
+import org.restlet.resource.Delete;
+import org.restlet.resource.Post;
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import net.floodlightcontroller.packet.Ethernet;
+import net.floodlightcontroller.packet.IPv4;
+
+public class FirewallRulesResource extends ServerResource {
+    protected static Logger log = LoggerFactory.getLogger(FirewallRulesResource.class);
+
+    @Get("json")
+    public Object handleRequest() {
+        IFirewallService firewall = 
+                (IFirewallService)getContext().getAttributes().
+                get(IFirewallService.class.getCanonicalName());
+
+        return firewall.getRules();
+    }
+
+    /**
+     * Takes a Firewall Rule string in JSON format and parses it into
+     * our firewall rule data structure, then adds it to the firewall.
+     * @param fmJson The Firewall rule entry in JSON format.
+     * @return A string status message
+     */
+    @Post
+    public String store(String fmJson) {
+        IFirewallService firewall = 
+                (IFirewallService)getContext().getAttributes().
+                get(IFirewallService.class.getCanonicalName());
+
+        FirewallRule rule;
+        try {
+            rule = jsonToFirewallRule(fmJson);
+        } catch (IOException e) {
+            log.error("Error parsing firewall rule: " + fmJson, e);
+            e.printStackTrace();
+            return "{\"status\" : \"Error! Could not parse firewall rule, see log for details.\"}";
+        }
+        String status = null;
+        if (checkRuleExists(rule, firewall.getRules())) {
+            status = "Error! A similar firewall rule already exists.";
+            log.error(status);
+        } else {
+            // add rule to firewall
+            firewall.addRule(rule);
+            status = "Rule added";
+        }
+        return ("{\"status\" : \"" + status + "\"}");
+    }
+
+    /**
+     * Takes a Firewall Rule string in JSON format and parses it into
+     * our firewall rule data structure, then deletes it from the firewall.
+     * @param fmJson The Firewall rule entry in JSON format.
+     * @return A string status message
+     */
+    
+    @Delete
+    public String remove(String fmJson) {
+        IFirewallService firewall = 
+                (IFirewallService)getContext().getAttributes().
+                get(IFirewallService.class.getCanonicalName());
+
+        FirewallRule rule;
+        try {
+            rule = jsonToFirewallRule(fmJson);
+        } catch (IOException e) {
+            log.error("Error parsing firewall rule: " + fmJson, e);
+            e.printStackTrace();
+            return "{\"status\" : \"Error! Could not parse firewall rule, see log for details.\"}";
+        }
+        String status = null;
+        boolean exists = false;
+        Iterator<FirewallRule> iter = firewall.getRules().iterator();
+        while (iter.hasNext()) {
+            FirewallRule r = iter.next();
+            if (r.ruleid == rule.ruleid) {
+                exists = true;
+                break;
+            }
+        }
+        if (!exists) {
+            status = "Error! Can't delete, a rule with this ID doesn't exist.";
+            log.error(status);
+        } else {
+            // delete rule from firewall
+            firewall.deleteRule(rule.ruleid);
+            status = "Rule deleted";
+        }
+        return ("{\"status\" : \"" + status + "\"}");
+    }
+
+    /**
+     * Turns a JSON formatted Firewall Rule string into a FirewallRule instance
+     * @param fmJson The JSON formatted static firewall rule
+     * @return The FirewallRule instance
+     * @throws IOException If there was an error parsing the JSON
+     */
+     
+    public static FirewallRule jsonToFirewallRule(String fmJson) throws IOException {
+        FirewallRule rule = new FirewallRule();
+        MappingJsonFactory f = new MappingJsonFactory();
+        JsonParser jp;
+
+        try {
+            jp = f.createJsonParser(fmJson);
+        } catch (JsonParseException e) {
+            throw new IOException(e);
+        }
+
+        jp.nextToken();
+        if (jp.getCurrentToken() != JsonToken.START_OBJECT) {
+            throw new IOException("Expected START_OBJECT");
+        }
+
+        while (jp.nextToken() != JsonToken.END_OBJECT) {
+            if (jp.getCurrentToken() != JsonToken.FIELD_NAME) {
+                throw new IOException("Expected FIELD_NAME");
+            }
+
+            String n = jp.getCurrentName();
+            jp.nextToken();
+            if (jp.getText().equals("")) 
+                continue;
+
+            String tmp;
+            
+            // This is currently only applicable for remove().  In store(), ruleid takes a random number
+            if (n == "ruleid") {
+                rule.ruleid = Integer.parseInt((String)jp.getText());
+            }
+            
+            // This assumes user having dpid info for involved switches
+            else if (n == "switchid") {
+                tmp = jp.getText();
+                if (tmp.equalsIgnoreCase("-1") == false) {
+                    // user inputs hex format dpid 
+                    rule.dpid = HexString.toLong(tmp);                    
+                    rule.wildcard_dpid = false;
+                }
+            } 
+            
+            else if (n == "src-inport") {
+                rule.in_port = Short.parseShort(jp.getText());
+                rule.wildcard_in_port = false;
+            } 
+            
+            else if (n == "src-mac") {
+                tmp = jp.getText();
+                if (tmp.equalsIgnoreCase("ANY") == false) {
+                    rule.wildcard_dl_src = false;
+                    rule.dl_src = Ethernet.toLong(Ethernet.toMACAddress(tmp));
+                }
+            } 
+            
+            else if (n == "dst-mac") {
+                tmp = jp.getText();
+                if (tmp.equalsIgnoreCase("ANY") == false) {
+                    rule.wildcard_dl_dst = false;
+                    rule.dl_dst = Ethernet.toLong(Ethernet.toMACAddress(tmp));
+                }
+            } 
+            
+            else if (n == "dl-type") {
+                tmp = jp.getText();
+                if (tmp.equalsIgnoreCase("ARP")) {
+                    rule.wildcard_dl_type = false;
+                    rule.dl_type = Ethernet.TYPE_ARP;
+                }
+            } 
+            
+            else if (n == "src-ip") {
+                tmp = jp.getText();
+                if (tmp.equalsIgnoreCase("ANY") == false) {
+                    rule.wildcard_nw_src = false;
+                    rule.wildcard_dl_type = false;
+                    rule.dl_type = Ethernet.TYPE_IPv4;
+                    int[] cidr = IPCIDRToPrefixBits(tmp);
+                    rule.nw_src_prefix = cidr[0];
+                    rule.nw_src_maskbits = cidr[1];
+                }
+            } 
+            
+            else if (n == "dst-ip") {
+                tmp = jp.getText();
+                if (tmp.equalsIgnoreCase("ANY") == false) {
+                    rule.wildcard_nw_dst = false;
+                    rule.wildcard_dl_type = false;
+                    rule.dl_type = Ethernet.TYPE_IPv4;
+                    int[] cidr = IPCIDRToPrefixBits(tmp);
+                    rule.nw_dst_prefix = cidr[0];
+                    rule.nw_dst_maskbits = cidr[1];
+                }
+            } 
+            
+            else if (n == "nw-proto") {
+                tmp = jp.getText();
+                if (tmp.equalsIgnoreCase("TCP")) {
+                    rule.wildcard_nw_proto = false;
+                    rule.nw_proto = IPv4.PROTOCOL_TCP;
+                    rule.wildcard_dl_type = false;
+                    rule.dl_type = Ethernet.TYPE_IPv4;
+                } else if (tmp.equalsIgnoreCase("UDP")) {
+                    rule.wildcard_nw_proto = false;
+                    rule.nw_proto = IPv4.PROTOCOL_UDP;
+                    rule.wildcard_dl_type = false;
+                    rule.dl_type = Ethernet.TYPE_IPv4;
+                } else if (tmp.equalsIgnoreCase("ICMP")) {
+                    rule.wildcard_nw_proto = false;
+                    rule.nw_proto = IPv4.PROTOCOL_ICMP;
+                    rule.wildcard_dl_type = false;
+                    rule.dl_type = Ethernet.TYPE_IPv4;
+                } 
+            } 
+            
+            else if (n == "tp-src") {
+                rule.wildcard_tp_src = false;
+                rule.tp_src = Short.parseShort(jp.getText());
+            } 
+            
+            else if (n == "tp-dst") {
+                rule.wildcard_tp_dst = false;
+                rule.tp_dst = Short.parseShort(jp.getText());
+            } 
+            
+            else if (n == "priority") {
+                rule.priority = Integer.parseInt(jp.getText());
+            } 
+            
+            else if (n == "action") {
+                if (jp.getText().equalsIgnoreCase("allow") == true) {
+                    rule.action = FirewallRule.FirewallAction.ALLOW;
+                } else if (jp.getText().equalsIgnoreCase("deny") == true) {
+                    rule.action = FirewallRule.FirewallAction.DENY;
+                }
+            }
+        }
+
+        return rule;
+    }
+
+    public static int[] IPCIDRToPrefixBits(String cidr) {
+        int ret[] = new int[2];
+
+        // as IP can also be a prefix rather than an absolute address
+        // split it over "/" to get the bit range
+        String[] parts = cidr.split("/");
+        String cidr_prefix = parts[0].trim();
+        int cidr_bits = 0;
+        if (parts.length == 2) {
+            try {
+                cidr_bits = Integer.parseInt(parts[1].trim());
+            } catch (Exception exp) {
+                cidr_bits = 32;
+            }
+        }
+        ret[0] = IPv4.toIPv4Address(cidr_prefix);
+        ret[1] = cidr_bits;
+
+        return ret;
+    }
+
+    public static boolean checkRuleExists(FirewallRule rule, List<FirewallRule> rules) {
+        Iterator<FirewallRule> iter = rules.iterator();
+        while (iter.hasNext()) {
+            FirewallRule r = iter.next();
+
+            // check if we find a similar rule
+            if (rule.isSameAs(r)) {
+                return true;
+            }
+        }
+
+        // no rule matched, so it doesn't exist in the rules
+        return false;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/FirewallWebRoutable.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/FirewallWebRoutable.java
new file mode 100644
index 0000000..3a9beab
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/FirewallWebRoutable.java
@@ -0,0 +1,26 @@
+package net.floodlightcontroller.firewall;
+
+import net.floodlightcontroller.restserver.RestletRoutable;
+import org.restlet.Context;
+import org.restlet.routing.Router;
+
+public class FirewallWebRoutable implements RestletRoutable {
+    /**
+     * Create the Restlet router and bind to the proper resources.
+     */
+    @Override
+    public Router getRestlet(Context context) {
+        Router router = new Router(context);
+        router.attach("/module/{op}/json", FirewallResource.class);
+        router.attach("/rules/json", FirewallRulesResource.class);
+        return router;
+    }
+
+    /**
+     * Set the base path for the Firewall
+     */
+    @Override
+    public String basePath() {
+        return "/wm/firewall";
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/IFirewallService.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/IFirewallService.java
new file mode 100644
index 0000000..ae9d89f
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/IFirewallService.java
@@ -0,0 +1,56 @@
+package net.floodlightcontroller.firewall;
+
+import java.util.List;
+import java.util.Map;
+
+import net.floodlightcontroller.core.module.IFloodlightService;
+
+public interface IFirewallService extends IFloodlightService {
+
+    /**
+     * Enables/disables the firewall.
+     * @param enable Whether to enable or disable the firewall.
+     */
+    public void enableFirewall(boolean enable);
+
+    /**
+     * Returns operational status of the firewall
+     * @return boolean enabled;
+     */
+    public boolean isEnabled();
+ 
+    /**
+     * Returns all of the firewall rules
+     * @return List of all rules
+     */
+    public List<FirewallRule> getRules();
+    
+    /**
+     * Returns the subnet mask
+     * @return subnet mask
+     */
+    public String getSubnetMask();
+    
+    /**
+     * Sets the subnet mask
+     * @param newMask The new subnet mask
+     */
+    public void setSubnetMask(String newMask);
+
+    /**
+     * Returns all of the firewall rules in storage
+     * for debugging and unit-testing purposes
+     * @return List of all rules in storage
+     */
+    public List<Map<String, Object>> getStorageRules();
+
+    /**
+     * Adds a new Firewall rule
+     */
+    public void addRule(FirewallRule rule);
+
+    /**
+     * Deletes a Firewall rule
+     */
+    public void deleteRule(int ruleid);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/RuleWildcardsPair.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/RuleWildcardsPair.java
new file mode 100644
index 0000000..3fab409
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/RuleWildcardsPair.java
@@ -0,0 +1,8 @@
+package net.floodlightcontroller.firewall;
+
+import org.openflow.protocol.OFMatch;
+
+public class RuleWildcardsPair {
+    public FirewallRule rule;
+    public int wildcards = OFMatch.OFPFW_ALL;
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/WildcardsPair.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/WildcardsPair.java
new file mode 100644
index 0000000..2e5f123
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/firewall/WildcardsPair.java
@@ -0,0 +1,8 @@
+package net.floodlightcontroller.firewall;
+
+import org.openflow.protocol.OFMatch;
+
+public class WildcardsPair {
+    public int allow = OFMatch.OFPFW_ALL;
+    public int drop = OFMatch.OFPFW_ALL;
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/FCQueryObj.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/FCQueryObj.java
new file mode 100644
index 0000000..cce3401
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/FCQueryObj.java
@@ -0,0 +1,117 @@
+package net.floodlightcontroller.flowcache;
+
+import java.util.Arrays;
+
+import net.floodlightcontroller.devicemanager.IDevice;
+import net.floodlightcontroller.flowcache.IFlowCacheService.FCQueryEvType;
+
+
+/**
+ * The Class FCQueryObj.
+ */
+public class FCQueryObj {
+
+    /** The caller of the flow cache query. */
+    public IFlowQueryHandler fcQueryHandler;
+    /** The application instance name. */
+    public String applInstName;
+    /** The vlan Id. */
+    public Short[] vlans;
+    /** The destination device. */
+    public IDevice dstDevice;
+    /** The source device. */
+    public IDevice srcDevice;
+    /** The caller name */
+    public String callerName;
+    /** Event type that triggered this flow query submission */
+    public FCQueryEvType evType;
+    /** The caller opaque data. Returned unchanged in the query response
+     * via the callback. The type of this object could be different for
+     * different callers */
+    public Object callerOpaqueObj;
+
+    /**
+     * Instantiates a new flow cache query object
+     */
+    public FCQueryObj(IFlowQueryHandler fcQueryHandler,
+            String        applInstName,
+            Short         vlan,
+            IDevice       srcDevice,
+            IDevice       dstDevice,
+            String        callerName,
+            FCQueryEvType evType,
+            Object        callerOpaqueObj) {
+        this.fcQueryHandler    = fcQueryHandler;
+        this.applInstName     = applInstName;
+        this.srcDevice        = srcDevice;
+        this.dstDevice        = dstDevice;
+        this.callerName       = callerName;
+        this.evType           = evType;
+        this.callerOpaqueObj  = callerOpaqueObj;
+        
+        if (vlan != null) {
+        	this.vlans = new Short[] { vlan };
+        } else {
+	        if (srcDevice != null) {
+	        	this.vlans = srcDevice.getVlanId();
+	        } else if (dstDevice != null) {
+	            this.vlans = dstDevice.getVlanId();
+	        }
+        }
+    }
+
+    @Override
+    public String toString() {
+        return "FCQueryObj [fcQueryCaller=" + fcQueryHandler
+                + ", applInstName="
+                + applInstName + ", vlans=" + Arrays.toString(vlans)
+                + ", dstDevice=" + dstDevice + ", srcDevice="
+                + srcDevice + ", callerName=" + callerName + ", evType="
+                + evType + ", callerOpaqueObj=" + callerOpaqueObj + "]";
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        FCQueryObj other = (FCQueryObj) obj;
+        if (applInstName == null) {
+            if (other.applInstName != null)
+                return false;
+        } else if (!applInstName.equals(other.applInstName))
+            return false;
+        if (callerName == null) {
+            if (other.callerName != null)
+                return false;
+        } else if (!callerName.equals(other.callerName))
+            return false;
+        if (callerOpaqueObj == null) {
+            if (other.callerOpaqueObj != null)
+                return false;
+        } else if (!callerOpaqueObj.equals(other.callerOpaqueObj))
+            return false;
+        if (dstDevice == null) {
+            if (other.dstDevice != null)
+                return false;
+        } else if (!dstDevice.equals(other.dstDevice))
+            return false;
+        if (evType != other.evType)
+            return false;
+        if (fcQueryHandler != other.fcQueryHandler)
+            return false;
+        if (srcDevice == null) {
+            if (other.srcDevice != null)
+                return false;
+        } else if (!srcDevice.equals(other.srcDevice))
+            return false;
+        if (!Arrays.equals(vlans, other.vlans))
+            return false;
+        return true;
+    }
+    
+    
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/FlowCacheQueryResp.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/FlowCacheQueryResp.java
new file mode 100644
index 0000000..b01aedf
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/FlowCacheQueryResp.java
@@ -0,0 +1,54 @@
+package net.floodlightcontroller.flowcache;
+
+import java.util.ArrayList;
+
+/**
+ * Object to return flows in response to a query message to BigFlowCache.
+ * This object is passed in the flowQueryRespHandler() callback.
+ */
+public class FlowCacheQueryResp {
+
+    /** query object provided by the caller, returned unchanged. */
+    public FCQueryObj  queryObj;
+    /** 
+     * Set to true if more flows could be returned for this query in
+     * additional callbacks. Set of false in the last callback for the
+     * query. 
+     */
+    public boolean     moreFlag;
+    
+    /**
+     * Set to true if the response has been sent to handler
+     */
+    public boolean     hasSent;
+    
+    /** 
+     * The flow list. If there are large number of flows to be returned
+     * then they may be returned in multiple callbacks.
+     */
+    public ArrayList<QRFlowCacheObj> qrFlowCacheObjList;
+
+    /**
+     * Instantiates a new big flow cache query response.
+     *
+     * @param query the flow cache query object as given by the caller of
+     * flow cache submit query API.
+     */
+    public FlowCacheQueryResp(FCQueryObj query) {
+        qrFlowCacheObjList = new ArrayList<QRFlowCacheObj>();
+        queryObj    = query;
+        moreFlag    = false;
+        hasSent     = false;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#toString()
+     */
+    @Override
+    public String toString() {
+        String s = queryObj.toString() + "; moreFlasg=" + moreFlag +
+                   "; hasSent=" + hasSent;
+        s += "; FlowCount=" + Integer.toString(qrFlowCacheObjList.size());
+        return s;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/FlowReconcileManager.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/FlowReconcileManager.java
new file mode 100644
index 0000000..d5d323d
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/FlowReconcileManager.java
@@ -0,0 +1,440 @@
+package net.floodlightcontroller.flowcache;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Queue;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.core.util.ListenerDispatcher;
+import net.floodlightcontroller.core.util.SingletonTask;
+import net.floodlightcontroller.counter.CounterStore;
+import net.floodlightcontroller.counter.ICounter;
+import net.floodlightcontroller.counter.ICounterStoreService;
+import net.floodlightcontroller.counter.SimpleCounter;
+import net.floodlightcontroller.devicemanager.IDevice;
+import net.floodlightcontroller.flowcache.IFlowCacheService.FCQueryEvType;
+import net.floodlightcontroller.flowcache.IFlowReconcileListener;
+import net.floodlightcontroller.flowcache.OFMatchReconcile;
+import net.floodlightcontroller.threadpool.IThreadPoolService;
+
+import org.openflow.protocol.OFType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class FlowReconcileManager 
+        implements IFloodlightModule, IFlowReconcileService {
+
+    /** The logger. */
+    private static Logger logger =
+                        LoggerFactory.getLogger(FlowReconcileManager.class);
+    
+    /** Reference to dependent modules */
+    protected IThreadPoolService threadPool;
+    protected ICounterStoreService counterStore;
+
+    /**
+     * The list of flow reconcile listeners that have registered to get
+     * flow reconcile callbacks. Such callbacks are invoked, for example, when
+     * a switch with existing flow-mods joins this controller and those flows
+     * need to be reconciled with the current configuration of the controller.
+     */
+    protected ListenerDispatcher<OFType, IFlowReconcileListener>
+                                               flowReconcileListeners;
+
+    /** A FIFO queue to keep all outstanding flows for reconciliation */
+    Queue<OFMatchReconcile> flowQueue;
+    
+    /** Asynchronous task to feed the flowReconcile pipeline */
+    protected SingletonTask flowReconcileTask;
+    
+    String controllerPktInCounterName;
+    protected SimpleCounter lastPacketInCounter;
+    
+    protected static int MAX_SYSTEM_LOAD_PER_SECOND = 50000;
+    /** a minimum flow reconcile rate so that it won't stave */
+    protected static int MIN_FLOW_RECONCILE_PER_SECOND = 1000;
+    
+    /** once per second */
+    protected static int FLOW_RECONCILE_DELAY_MILLISEC = 10;
+    protected Date lastReconcileTime;
+    
+    /** Config to enable or disable flowReconcile */
+    protected static final String EnableConfigKey = "enable";
+    protected boolean flowReconcileEnabled;
+    
+    public int flowReconcileThreadRunCount;
+    
+    @Override
+    public synchronized void addFlowReconcileListener(
+                IFlowReconcileListener listener) {
+        flowReconcileListeners.addListener(OFType.FLOW_MOD, listener);
+
+        if (logger.isTraceEnabled()) {
+            StringBuffer sb = new StringBuffer();
+            sb.append("FlowMod listeners: ");
+            for (IFlowReconcileListener l :
+                flowReconcileListeners.getOrderedListeners()) {
+                sb.append(l.getName());
+                sb.append(",");
+            }
+            logger.trace(sb.toString());
+        }
+    }
+
+    @Override
+    public synchronized void removeFlowReconcileListener(
+                IFlowReconcileListener listener) {
+        flowReconcileListeners.removeListener(listener);
+    }
+    
+    @Override
+    public synchronized void clearFlowReconcileListeners() {
+        flowReconcileListeners.clearListeners();
+    }
+    
+    /**
+     * Add to-be-reconciled flow to the queue.
+     *
+     * @param ofmRcIn the ofm rc in
+     */
+    public void reconcileFlow(OFMatchReconcile ofmRcIn) {
+        if (ofmRcIn == null) return;
+        
+        // Make a copy before putting on the queue.
+        OFMatchReconcile myOfmRc = new OFMatchReconcile(ofmRcIn);
+    
+        flowQueue.add(myOfmRc);
+    
+        Date currTime = new Date();
+        long delay = 0;
+
+        /** schedule reconcile task immidiately if it has been more than 1 sec
+         *  since the last run. Otherwise, schedule the reconcile task in
+         *  DELAY_MILLISEC.
+         */
+        if (currTime.after(new Date(lastReconcileTime.getTime() + 1000))) {
+            delay = 0;
+        } else {
+            delay = FLOW_RECONCILE_DELAY_MILLISEC;
+        }
+        flowReconcileTask.reschedule(delay, TimeUnit.MILLISECONDS);
+    
+        if (logger.isTraceEnabled()) {
+            logger.trace("Reconciling flow: {}, total: {}",
+                myOfmRc.toString(), flowQueue.size());
+        }
+    }
+    
+    @Override
+    public void updateFlowForDestinationDevice(IDevice device,
+                                            IFlowQueryHandler handler,
+                                            FCQueryEvType fcEvType) {
+        // NO-OP
+    }
+
+    @Override
+    public void updateFlowForSourceDevice(IDevice device,
+                                          IFlowQueryHandler handler,
+                                          FCQueryEvType fcEvType) {
+        // NO-OP
+    }
+    
+    @Override
+    public void flowQueryGenericHandler(FlowCacheQueryResp flowResp) {
+        if (flowResp.queryObj.evType != FCQueryEvType.GET) {
+            OFMatchReconcile ofmRc = new OFMatchReconcile();;
+            /* Re-provision these flows */
+            for (QRFlowCacheObj entry : flowResp.qrFlowCacheObjList) {
+                /* reconcile the flows in entry */
+                entry.toOFMatchReconcile(ofmRc,
+                        flowResp.queryObj.applInstName,
+                        OFMatchReconcile.ReconcileAction.UPDATE_PATH);
+                reconcileFlow(ofmRc);
+            }
+        }
+        return;
+    }
+    
+    // IFloodlightModule
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        Collection<Class<? extends IFloodlightService>> l = 
+            new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IFlowReconcileService.class);
+        return l;
+    }
+
+    @Override
+    public Map<Class<? extends IFloodlightService>, IFloodlightService> 
+                                                            getServiceImpls() {
+        Map<Class<? extends IFloodlightService>,
+        IFloodlightService> m = 
+            new HashMap<Class<? extends IFloodlightService>,
+                IFloodlightService>();
+        m.put(IFlowReconcileService.class, this);
+        return m;
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>> 
+                                                    getModuleDependencies() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IThreadPoolService.class);
+        l.add(ICounterStoreService.class);
+        return null;
+    }
+
+    @Override
+    public void init(FloodlightModuleContext context)
+            throws FloodlightModuleException {
+        threadPool = context.getServiceImpl(IThreadPoolService.class);
+        counterStore = context.getServiceImpl(ICounterStoreService.class);
+    
+        flowQueue = new ConcurrentLinkedQueue<OFMatchReconcile>();
+        flowReconcileListeners = 
+                new ListenerDispatcher<OFType, IFlowReconcileListener>();
+        
+        Map<String, String> configParam = context.getConfigParams(this);
+        String enableValue = configParam.get(EnableConfigKey);
+        // Set flowReconcile default to true
+        flowReconcileEnabled = true;
+        if (enableValue != null &&
+            enableValue.equalsIgnoreCase("false")) {
+            flowReconcileEnabled = false;
+        }
+        
+        flowReconcileThreadRunCount = 0;
+        lastReconcileTime = new Date(0);
+        logger.debug("FlowReconcile is {}", flowReconcileEnabled);
+    }
+
+    @Override
+    public void startUp(FloodlightModuleContext context) {
+        // thread to do flow reconcile
+        ScheduledExecutorService ses = threadPool.getScheduledExecutor();
+        flowReconcileTask = new SingletonTask(ses, new Runnable() {
+            @Override
+            public void run() {
+                try {
+                    if (doReconcile()) {
+                        flowReconcileTask.reschedule(
+                            FLOW_RECONCILE_DELAY_MILLISEC,
+                            TimeUnit.MILLISECONDS);
+                    }
+                } catch (Exception e) {
+                    logger.warn("Exception in doReconcile(): {}",
+                                e.getMessage());
+                    e.printStackTrace();
+                }
+            }
+        });
+        
+        String packetInName = OFType.PACKET_IN.toClass().getName();
+        packetInName = packetInName.substring(packetInName.lastIndexOf('.')+1); 
+        
+        // Construct controller counter for the packet_in
+        controllerPktInCounterName =
+            CounterStore.createCounterName(ICounterStoreService.CONTROLLER_NAME, 
+                                           -1,
+                                           packetInName);
+    }
+    
+    /**
+     * Feed the flows into the flow reconciliation pipeline.
+     * @return true if more flows to be reconciled
+     *         false if no more flows to be reconciled.
+     */
+    protected boolean doReconcile() {
+        if (!flowReconcileEnabled) {
+            return false;
+        }
+    
+        // Record the execution time.
+        lastReconcileTime = new Date();
+    
+        ArrayList<OFMatchReconcile> ofmRcList =
+                        new ArrayList<OFMatchReconcile>();
+        
+        // Get the maximum number of flows that can be reconciled.
+        int reconcileCapacity = getCurrentCapacity();
+        if (logger.isTraceEnabled()) {
+            logger.trace("Reconcile capacity {} flows", reconcileCapacity);
+        }
+        while (!flowQueue.isEmpty() && reconcileCapacity > 0) {
+            OFMatchReconcile ofmRc = flowQueue.poll();
+            reconcileCapacity--;
+            if (ofmRc != null) {
+                ofmRcList.add(ofmRc);
+                if (logger.isTraceEnabled()) {
+                    logger.trace("Add flow {} to be the reconcileList", ofmRc.cookie);
+                }
+            } else {
+                break;
+            }
+        }
+        
+        // Run the flow through all the flow reconcile listeners
+        IFlowReconcileListener.Command retCmd;
+        if (ofmRcList.size() > 0) {
+            List<IFlowReconcileListener> listeners =
+                flowReconcileListeners.getOrderedListeners();
+            if (listeners == null) {
+                if (logger.isTraceEnabled()) {
+                    logger.trace("No flowReconcile listener");
+                }
+                return false;
+            }
+        
+            for (IFlowReconcileListener flowReconciler :
+                flowReconcileListeners.getOrderedListeners()) {
+                if (logger.isTraceEnabled()) {
+                    logger.trace("Reconciling flow: call listener {}",
+                            flowReconciler.getName());
+                }
+                retCmd = flowReconciler.reconcileFlows(ofmRcList);
+                if (retCmd == IFlowReconcileListener.Command.STOP) {
+                    break;
+                }
+            }
+            flowReconcileThreadRunCount++;
+        } else {
+            if (logger.isTraceEnabled()) {
+                logger.trace("No flow to be reconciled.");
+            }
+        }
+        
+        // Return true if there are more flows to be reconciled
+        if (flowQueue.isEmpty()) {
+            return false;
+        } else {
+            if (logger.isTraceEnabled()) {
+                logger.trace("{} more flows to be reconciled.",
+                            flowQueue.size());
+            }
+            return true;
+        }
+    }
+    
+    /**
+     * Compute the maximum number of flows to be reconciled.
+     * 
+     * It computes the packetIn increment from the counter values in
+     * the counter store;
+     * Then computes the rate based on the elapsed time
+     * from the last query;
+     * Then compute the max flow reconcile rate by subtracting the packetIn
+     * rate from the hard-coded max system rate.
+     * If the system rate is reached or less than MIN_FLOW_RECONCILE_PER_SECOND,
+     * set the maximum flow reconcile rate to the MIN_FLOW_RECONCILE_PER_SECOND
+     * to prevent starvation.
+     * Then convert the rate to an absolute number for the
+     * FLOW_RECONCILE_PERIOD.
+     * @return
+     */
+    protected int getCurrentCapacity() {
+        ICounter pktInCounter =
+            counterStore.getCounter(controllerPktInCounterName);
+        int minFlows = MIN_FLOW_RECONCILE_PER_SECOND *
+                        FLOW_RECONCILE_DELAY_MILLISEC / 1000;
+        
+        // If no packetInCounter, then there shouldn't be any flow.
+        if (pktInCounter == null ||
+            pktInCounter.getCounterDate() == null ||
+            pktInCounter.getCounterValue() == null) {
+            logger.debug("counter {} doesn't exist",
+                        controllerPktInCounterName);
+            return minFlows;
+        }
+        
+        // Haven't get any counter yet.
+        if (lastPacketInCounter == null) {
+            logger.debug("First time get the count for {}",
+                        controllerPktInCounterName);
+            lastPacketInCounter = (SimpleCounter)
+            SimpleCounter.createCounter(pktInCounter);
+            return minFlows;
+        }
+        
+        int pktInRate = getPktInRate(pktInCounter, new Date());
+        
+        // Update the last packetInCounter
+        lastPacketInCounter = (SimpleCounter)
+        SimpleCounter.createCounter(pktInCounter);
+        int capacity = minFlows;
+        if ((pktInRate + MIN_FLOW_RECONCILE_PER_SECOND) <=
+                               MAX_SYSTEM_LOAD_PER_SECOND) {
+            capacity = (MAX_SYSTEM_LOAD_PER_SECOND - pktInRate)
+                    * FLOW_RECONCILE_DELAY_MILLISEC / 1000;
+        }
+        
+        if (logger.isTraceEnabled()) {
+            logger.trace("Capacity is {}", capacity);
+        }
+        return capacity;
+    }
+    
+    protected int getPktInRate(ICounter newCnt, Date currentTime) {
+        if (newCnt == null ||
+            newCnt.getCounterDate() == null ||
+            newCnt.getCounterValue() == null) {
+            return 0;
+        }
+    
+        // Somehow the system time is messed up. return max packetIn rate
+        // to reduce the system load.
+        if (newCnt.getCounterDate().before(
+                lastPacketInCounter.getCounterDate())) {
+            logger.debug("Time is going backward. new {}, old {}",
+                    newCnt.getCounterDate(),
+                    lastPacketInCounter.getCounterDate());
+            return MAX_SYSTEM_LOAD_PER_SECOND;
+        }
+    
+        long elapsedTimeInSecond = (currentTime.getTime() -
+                    lastPacketInCounter.getCounterDate().getTime()) / 1000;
+        if (elapsedTimeInSecond == 0) {
+            // This should never happen. Check to avoid division by zero.
+            return 0;
+        }
+    
+        long diff = 0;
+        switch (newCnt.getCounterValue().getType()) {
+            case LONG:
+                long newLong = newCnt.getCounterValue().getLong();
+                long oldLong = lastPacketInCounter.getCounterValue().getLong();
+                if (newLong < oldLong) {
+                    // Roll over event
+                    diff = Long.MAX_VALUE - oldLong + newLong;
+                } else {
+                    diff = newLong - oldLong;
+                }
+                break;
+    
+            case DOUBLE:
+                double newDouble = newCnt.getCounterValue().getDouble();
+                double oldDouble = lastPacketInCounter.getCounterValue().getDouble();
+                if (newDouble < oldDouble) {
+                    // Roll over event
+                    diff = (long)(Double.MAX_VALUE - oldDouble + newDouble);
+                } else {
+                    diff = (long)(newDouble - oldDouble);
+                }
+                break;
+        }
+    
+        return (int)(diff/elapsedTimeInSecond);
+    }
+}
+
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/IFlowCacheService.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/IFlowCacheService.java
new file mode 100644
index 0000000..8e44ed3
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/IFlowCacheService.java
@@ -0,0 +1,185 @@
+package net.floodlightcontroller.flowcache;
+
+import org.openflow.protocol.OFMatchWithSwDpid;
+
+import net.floodlightcontroller.core.FloodlightContext;
+import net.floodlightcontroller.core.FloodlightContextStore;
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.devicemanager.SwitchPort;
+import net.floodlightcontroller.core.module.IFloodlightService;
+
+/**
+ * The Interface IFlowCache.
+ * <p>
+ * public interface APIs to Big Switch Flow-Cache Service. Flow-Cache maintains
+ * the network-level flows that are currently deployed in the underlying 
+ * network. The flow cache can be queried using various filters by using the
+ * corresponding APIs.
+ * 
+ * @author subrata
+ *
+ */
+public interface IFlowCacheService extends IFloodlightService {
+
+    public static final String FLOWCACHE_APP_NAME = 
+        "net.floodlightcontroller.flowcache.appName";
+    public static final String FLOWCACHE_APP_INSTANCE_NAME = 
+        "net.floodlightcontroller.flowcache.appInstanceName";
+
+    /**
+     * The flow cache query event type indicating the event that triggered the
+     * query. The callerOpaqueObj can be keyed based on this event type
+     */
+    public static enum FCQueryEvType {
+        /** The GET query. Flows need not be reconciled for this query type */
+        GET,
+        /** A new App was added. */
+        APP_ADDED,
+        /** An App was deleted. */
+        APP_DELETED,
+        /** Interface rule of an app was modified */
+        APP_INTERFACE_RULE_CHANGED,
+        /** Some App configuration was changed */
+        APP_CONFIG_CHANGED,
+        /** An ACL was added */
+        ACL_ADDED,
+        /** An ACL was deleted */
+        ACL_DELETED,
+        /** An ACL rule was added */
+        ACL_RULE_ADDED,
+        /** An ACL rule was deleted */
+        ACL_RULE_DELETED,
+        /** ACL configuration was changed */
+        ACL_CONFIG_CHANGED,
+        /** device had moved to a different port in the network */
+        DEVICE_MOVED,
+        /** device's property had changed, such as tag assignment */
+        DEVICE_PROPERTY_CHANGED,
+        /** Link down */
+        LINK_DOWN,
+        /** Periodic scan of switch flow table */
+        PERIODIC_SCAN,
+    }
+    
+    /**
+     * A FloodlightContextStore object that can be used to interact with the 
+     * FloodlightContext information about flowCache.
+     */
+    public static final FloodlightContextStore<String> fcStore = 
+        new FloodlightContextStore<String>();
+    
+    /**
+     * Submit a flow cache query with query parameters specified in FCQueryObj
+     * object. The query object can be created using one of the newFCQueryObj 
+     * helper functions in IFlowCache interface. 
+     * <p>
+     * The queried flows are returned via the flowQueryRespHandler() callback 
+     * that the caller must implement. The caller can match the query with
+     * the response using unique callerOpaqueData which remains unchanged
+     * in the request and response callback.
+     *
+     * @see  com.bigswitch.floodlight.flowcache#flowQueryRespHandler
+     * @param query the flow cache query object as input
+     * 
+     */
+    public void submitFlowCacheQuery(FCQueryObj query);
+
+    /**
+     * Deactivates all flows in the flow cache for which the source switch
+     * matches the given switchDpid. Note that the flows are NOT deleted
+     * from the cache.
+     *
+     * @param switchDpid Data-path identifier of the source switch
+     */
+    public void deactivateFlowCacheBySwitch(long switchDpid);
+
+    /**
+     * Deletes all flows in the flow cache for which the source switch
+     * matches the given switchDpid. 
+     * 
+     * @param switchDpid Data-path identifier of the source switch
+     */
+    public void deleteFlowCacheBySwitch(long switchDpid);
+
+    /**
+     * Add a flow to the flow-cache - called when a flow-mod is about to be
+     * written to a set of switches. If it returns false then it should not
+     * be written to the switches. If it returns true then the cookie returned
+     * should be used for the flow mod sent to the switches.
+     *
+     * @param appInstName Application instance name
+     * @param ofm openflow match object
+     * @param cookie openflow-mod cookie
+     * @param swPort SwitchPort object
+     * @param priority openflow match priority
+     * @param action action taken on the matched packets (PERMIT or DENY)
+     * @return true:  flow should be written to the switch(es)
+     *         false: flow should not be written to the switch(es). false is
+     *                returned, for example, when the flow was recently
+     *                written to the flow-cache and hence it is dampened to
+     *                avoid frequent writes of the same flow to the switches
+     *                This case can typically arise for the flows written at the
+     *                internal ports as they are heavily wild-carded.
+     */
+    public boolean addFlow(String appInstName, OFMatchWithSwDpid ofm, 
+                           Long cookie, long srcSwDpid, 
+                           short inPort, short priority, byte action);
+
+    /**
+     * Add a flow to the flow-cache - called when a flow-mod is about to be
+     * written to a set of switches. If it returns false then it should not
+     * be written to the switches. If it returns true then the cookie returned
+     * should be used for the flow mod sent to the switches.
+     *
+     * @param cntx the cntx
+     * @param ofm the ofm
+     * @param cookie the cookie
+     * @param swPort the sw port
+     * @param priority the priority
+     * @param action the action
+     * @return true:  flow should be written to the switch(es)
+     * false: flow should not be written to the switch(es). false is
+     * returned, for example, when the flow was recently
+     * written to the flow-cache and hence it is dampened to
+     * avoid frequent writes of the same flow to the switches
+     * This case can typically arise for the flows written at the
+     * internal ports as they are heavily wild-carded.
+     */
+    public boolean addFlow(FloodlightContext cntx, OFMatchWithSwDpid ofm, 
+                           Long cookie, SwitchPort swPort, 
+                           short priority, byte action);
+
+    /**
+     * Move the specified flow from its current application instance to a 
+     * different application instance. This API can be used when a flow moves
+     * to a different application instance when the application instance
+     * configuration changes or when a device moves to a different part in
+     * the network that belongs to a different application instance.
+     * <p>
+     * Note that, if the flow was not found in the current application 
+     * instance then the flow is not moved to the new application instance.
+     * 
+     * @param ofMRc the object containing the flow match and new application
+     * instance name.
+     * @return true is the flow was found in the flow cache in the current 
+     * application instance; false if the flow was not found in the flow-cache
+     * in the current application instance.
+     */
+    public boolean moveFlowToDifferentApplInstName(OFMatchReconcile ofMRc);
+
+    /**
+     * Delete all flow from the specified switch
+     * @param sw
+     */
+    public void deleteAllFlowsAtASourceSwitch(IOFSwitch sw);
+    
+    /**
+     * Post a request to update flowcache from a switch.
+     * This is an asynchronous operation.
+     * It queries the switch for stats and updates the flowcache asynchronously
+     * with the response.
+     * @param swDpid
+     * @param delay_ms
+     */
+    public void querySwitchFlowTable(long swDpid);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/IFlowQueryHandler.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/IFlowQueryHandler.java
new file mode 100644
index 0000000..5d1b1a9
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/IFlowQueryHandler.java
@@ -0,0 +1,21 @@
+package net.floodlightcontroller.flowcache;
+
+public interface IFlowQueryHandler {
+    /**
+     * This callback function is called in response to a flow query request
+     * submitted to the flow cache service. The module handling this callback
+     * can be different from the one that submitted the query. In the flow
+     * query object used for submitting the flow query, the identity of the
+     * callback handler is passed. When flow cache service has all or some
+     * of the flows that needs to be returned then this callback is called
+     * for the appropriate module. The respone contains a boolean more flag 
+     * that indicates if there are additional flows that may be returned
+     * via additional callback calls.
+     *
+     * @param resp the response object containing the original flow query 
+     * object, partial or complete list of flows that we queried and some 
+     * metadata such as the more flag described aboce.
+     *
+     */
+    public void flowQueryRespHandler(FlowCacheQueryResp resp);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/IFlowReconcileListener.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/IFlowReconcileListener.java
new file mode 100644
index 0000000..f1100ed
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/IFlowReconcileListener.java
@@ -0,0 +1,40 @@
+package net.floodlightcontroller.flowcache;
+
+import java.util.ArrayList;
+
+import net.floodlightcontroller.core.IListener;
+import org.openflow.protocol.OFType;
+
+/**
+ * The Interface IFlowReconciler.
+ *
+ * @author subrata
+ */
+public interface IFlowReconcileListener extends IListener<OFType> {
+    /**
+     * Given an input OFMatch, this method applies the policy of the reconciler
+     * and returns a the same input OFMatch structure modified. Additional
+     * OFMatches, if needed, are returned in OFMatch-list. All the OFMatches
+     * are assumed to have "PERMIT" action.
+     *
+     * @param ofmRcList  input flow matches, to be updated to be consistent with
+     *                   the policies of this reconciler 
+     *                   Additional OFMatch-es can be added to the "list" as
+     *                   needed. 
+     *                   For example after a new ACL application, one flow-match
+     *                   may result in multiple flow-matches
+     *                   The method must also update the ReconcileAction
+     *                   member in ofmRcList entries to indicate if the
+     *                   flow needs to be modified, deleted or left unchanged
+     *                   OR of a new entry is to be added after flow 
+     *                   reconciliation
+     *
+     *
+     * @return   Command.CONTINUE if the OFMatch should be sent to the
+     *           next flow reconciler. 
+     *           Command.STOP if the OFMatch shouldn't be processed
+     *           further. In this case the no reconciled flow-mods would 
+     *           be programmed
+     */
+    public Command reconcileFlows(ArrayList<OFMatchReconcile> ofmRcList);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/IFlowReconcileService.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/IFlowReconcileService.java
new file mode 100644
index 0000000..f48c4e0
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/IFlowReconcileService.java
@@ -0,0 +1,75 @@
+/**
+ * Provides Flow Reconcile service to other modules that need to reconcile
+ * flows.
+ */
+package net.floodlightcontroller.flowcache;
+
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.devicemanager.IDevice;
+import net.floodlightcontroller.flowcache.IFlowCacheService.FCQueryEvType;
+
+public interface IFlowReconcileService extends IFloodlightService {
+    /**
+     * Add a flow reconcile listener
+     * @param listener The module that can reconcile flows
+     */
+    public void addFlowReconcileListener(IFlowReconcileListener listener);
+
+    /**
+     * Remove a flow reconcile listener
+     * @param listener The module that no longer reconcile flows
+     */
+    public void removeFlowReconcileListener(IFlowReconcileListener listener);
+    
+    /**
+     * Remove all flow reconcile listeners
+     */
+    public void clearFlowReconcileListeners();
+    
+    /**
+     * Reconcile flow. Returns false if no modified flow-mod need to be 
+     * programmed if cluster ID is providced then pnly flows in the given 
+     * cluster are reprogrammed
+     *
+     * @param ofmRcIn the ofm rc in
+     */
+    public void reconcileFlow(OFMatchReconcile ofmRcIn);
+    
+    /**
+     * Updates the flows to a device after the device moved to a new location
+     * <p>
+     * Queries the flow-cache to get all the flows destined to the given device.
+     * Reconciles each of these flows by potentially reprogramming them to its
+     * new attachment point
+     *
+     * @param device      device that has moved
+     * @param handler	  handler to process the flows
+     * @param fcEvType    Event type that triggered the update
+     *
+     */
+    public void updateFlowForDestinationDevice(IDevice device,
+            IFlowQueryHandler handler,
+    		FCQueryEvType fcEvType);
+    
+    /**
+     * Updates the flows from a device
+     * <p>
+     * Queries the flow-cache to get all the flows source from the given device.
+     * Reconciles each of these flows by potentially reprogramming them to its
+     * new attachment point
+     *
+     * @param device      device where the flow originates
+     * @param handler	  handler to process the flows
+     * @param fcEvType    Event type that triggered the update
+     *
+     */
+    public void updateFlowForSourceDevice(IDevice device,
+            IFlowQueryHandler handler,
+    		FCQueryEvType fcEvType);
+
+    /**
+     * Generic flow query handler to insert FlowMods into the reconcile pipeline.
+     * @param flowResp
+     */
+    public void flowQueryGenericHandler(FlowCacheQueryResp flowResp);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/OFMatchReconcile.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/OFMatchReconcile.java
new file mode 100644
index 0000000..68831f4
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/OFMatchReconcile.java
@@ -0,0 +1,84 @@
+package net.floodlightcontroller.flowcache;
+
+import net.floodlightcontroller.core.FloodlightContext;
+import org.openflow.protocol.OFMatchWithSwDpid;
+
+/**
+ * OFMatchReconcile class to indicate result of a flow-reconciliation.
+ */
+public class OFMatchReconcile  {
+ 
+    /**
+     * The enum ReconcileAction. Specifies the result of reconciliation of a 
+     * flow.
+     */
+    public enum ReconcileAction {
+
+        /** Delete the flow-mod from the switch */
+        DROP,
+        /** Leave the flow-mod as-is. */
+        NO_CHANGE,
+        /** Program this new flow mod. */
+        NEW_ENTRY,
+        /** 
+         * Reprogram the flow mod as the path of the flow might have changed,
+         * for example when a host is moved or when a link goes down. */
+        UPDATE_PATH,
+        /* Flow is now in a different BVS */
+        APP_INSTANCE_CHANGED,
+        /* Delete the flow-mod - used to delete, for example, drop flow-mods
+         * when the source and destination are in the same BVS after a 
+         * configuration change */
+        DELETE
+    }
+
+    /** The open flow match after reconciliation. */
+    public OFMatchWithSwDpid ofmWithSwDpid;
+    /** flow mod. priority */
+    public short priority;
+    /** Action of this flow-mod PERMIT or DENY */
+    public byte action;
+    /** flow mod. cookie */
+    public long cookie;
+    /** The application instance name. */
+    public String appInstName;
+    /**
+     * The new application instance name. This is null unless the flow
+     * has moved to a different BVS due to BVS config change or device
+     * move to a different switch port etc.*/
+    public String newAppInstName;
+    /** The reconcile action. */
+    public ReconcileAction rcAction;
+
+    // The context for the reconcile action
+    public FloodlightContext cntx;
+    
+    /**
+     * Instantiates a new oF match reconcile object.
+     */
+    public OFMatchReconcile() {
+        ofmWithSwDpid      = new OFMatchWithSwDpid();
+        rcAction = ReconcileAction.NO_CHANGE;
+        cntx = new FloodlightContext();
+    }
+    
+    public OFMatchReconcile(OFMatchReconcile copy) {
+        ofmWithSwDpid =
+            new OFMatchWithSwDpid(copy.ofmWithSwDpid.getOfMatch(),
+                    copy.ofmWithSwDpid.getSwitchDataPathId());
+        priority = copy.priority;
+        action = copy.action;
+        cookie = copy.cookie;
+        appInstName = copy.appInstName;
+        newAppInstName = copy.newAppInstName;
+        rcAction = copy.rcAction;
+        cntx = new FloodlightContext();
+    }
+    
+    @Override
+    public String toString() {
+        return "OFMatchReconcile [" + ofmWithSwDpid + " priority=" + priority + " action=" + action + 
+                " cookie=" + cookie + " appInstName=" + appInstName + " newAppInstName=" + newAppInstName + 
+                " ReconcileAction=" + rcAction + "]";
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/PendingSwRespKey.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/PendingSwRespKey.java
new file mode 100644
index 0000000..767ce94
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/PendingSwRespKey.java
@@ -0,0 +1,42 @@
+package net.floodlightcontroller.flowcache;
+
+public class PendingSwRespKey {
+    long swDpid;
+    int  transId;
+
+    public PendingSwRespKey(long swDpid, int transId) {
+        this.swDpid  = swDpid;
+        this.transId = transId;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 97;
+        Long dpid   = swDpid;
+        Integer tid = transId;
+        return (tid.hashCode()*prime + dpid.hashCode());
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) {
+            return true;
+        }
+        if (obj == null) {
+            return false;
+        }
+        if (!(obj instanceof PendingSwRespKey)) {
+            return false;
+        }
+        PendingSwRespKey other = (PendingSwRespKey) obj;
+        if ((swDpid != other.swDpid) || (transId != other.transId)) {
+            return false;
+        }
+        return true;
+    }
+
+    @Override
+    public String toString() {
+        return Long.toHexString(swDpid)+","+Integer.toString(transId);
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/PendingSwitchResp.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/PendingSwitchResp.java
new file mode 100644
index 0000000..d6f264f
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/PendingSwitchResp.java
@@ -0,0 +1,24 @@
+package net.floodlightcontroller.flowcache;
+
+import net.floodlightcontroller.flowcache.IFlowCacheService.FCQueryEvType;
+
+/**
+ * The Class PendingSwitchResp. This object is used to track the pending
+ * responses to switch flow table queries.
+ */
+public class PendingSwitchResp {
+    protected FCQueryEvType evType;
+
+    public PendingSwitchResp(
+            FCQueryEvType evType) {
+        this.evType      = evType;
+    }
+    
+    public FCQueryEvType getEvType() {
+        return evType;
+    }
+
+    public void setEvType(FCQueryEvType evType) {
+        this.evType = evType;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/QRFlowCacheObj.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/QRFlowCacheObj.java
new file mode 100644
index 0000000..5121f8b
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/flowcache/QRFlowCacheObj.java
@@ -0,0 +1,67 @@
+package net.floodlightcontroller.flowcache;
+
+
+import org.openflow.protocol.OFMatchWithSwDpid;
+
+/**
+ * Used in BigFlowCacheQueryResp as query result.
+ * Used to return one flow when queried by one of the big flow cache APIs.
+ * One of these QRFlowCacheObj is returned for each combination of
+ * priority and action.
+ *
+ * @author subrata
+ */
+public class QRFlowCacheObj {
+
+    /** The open flow match object. */
+    public OFMatchWithSwDpid ofmWithSwDpid;
+    /** The flow-mod priority. */
+    public short   priority;
+    /** flow-mod cookie */
+    public long    cookie;
+    /** The action - PERMIT or DENY. */
+    public byte    action;
+    /** The reserved byte to align with 8 bytes. */
+    public byte    reserved;
+
+    /**
+     * Instantiates a new flow cache query object.
+     *
+     * @param priority the priority
+     * @param action the action
+     */
+    public QRFlowCacheObj(short priority, byte action, long cookie) {
+        ofmWithSwDpid = new OFMatchWithSwDpid();
+        this.action   = action;
+        this.priority = priority;
+        this.cookie   = cookie;
+    }
+
+    /**
+     * Populate a given OFMatchReconcile object from the values of this
+     * class.
+     *
+     * @param ofmRc the given OFMatchReconcile object
+     * @param appInstName the application instance name
+     * @param rcAction the reconcile action
+     */
+    public   void toOFMatchReconcile(OFMatchReconcile ofmRc,
+                            String appInstName, OFMatchReconcile.ReconcileAction rcAction) {
+        ofmRc.ofmWithSwDpid   = ofmWithSwDpid; // not copying
+        ofmRc.appInstName     = appInstName;
+        ofmRc.rcAction        = rcAction;
+        ofmRc.priority        = priority;
+        ofmRc.cookie          = cookie;
+        ofmRc.action          = action;
+    }
+    
+    @Override
+    public String toString() {
+        String str = "ofmWithSwDpid: " + this.ofmWithSwDpid.toString() + " ";
+        str += "priority: " + this.priority + " ";
+        str += "cookie: " + this.cookie + " ";
+        str += "action: " + this.action + " ";
+        str += "reserved: " + this.reserved + " ";
+        return str;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/forwarding/Forwarding.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/forwarding/Forwarding.java
new file mode 100644
index 0000000..3fc7ae9
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/forwarding/Forwarding.java
@@ -0,0 +1,453 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.forwarding;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import net.floodlightcontroller.core.FloodlightContext;
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.devicemanager.IDevice;
+import net.floodlightcontroller.devicemanager.IDeviceService;
+import net.floodlightcontroller.devicemanager.SwitchPort;
+import net.floodlightcontroller.core.annotations.LogMessageCategory;
+import net.floodlightcontroller.core.annotations.LogMessageDoc;
+import net.floodlightcontroller.core.annotations.LogMessageDocs;
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.core.util.AppCookie;
+import net.floodlightcontroller.counter.ICounterStoreService;
+import net.floodlightcontroller.packet.Ethernet;
+import net.floodlightcontroller.routing.ForwardingBase;
+import net.floodlightcontroller.routing.IRoutingDecision;
+import net.floodlightcontroller.routing.IRoutingService;
+import net.floodlightcontroller.routing.Route;
+import net.floodlightcontroller.topology.ITopologyService;
+
+import org.openflow.protocol.OFFlowMod;
+import org.openflow.protocol.OFMatch;
+import org.openflow.protocol.OFPacketIn;
+import org.openflow.protocol.OFPacketOut;
+import org.openflow.protocol.OFPort;
+import org.openflow.protocol.OFType;
+import org.openflow.protocol.action.OFAction;
+import org.openflow.protocol.action.OFActionOutput;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@LogMessageCategory("Flow Programming")
+public class Forwarding extends ForwardingBase implements IFloodlightModule {
+    protected static Logger log = LoggerFactory.getLogger(Forwarding.class);
+
+    @Override
+    @LogMessageDoc(level="ERROR",
+                   message="Unexpected decision made for this packet-in={}",
+                   explanation="An unsupported PacketIn decision has been " +
+                   		"passed to the flow programming component",
+                   recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
+    public Command processPacketInMessage(IOFSwitch sw, OFPacketIn pi, IRoutingDecision decision, 
+                                          FloodlightContext cntx) {
+        Ethernet eth = IFloodlightProviderService.bcStore.get(cntx, 
+                                   IFloodlightProviderService.CONTEXT_PI_PAYLOAD);
+        
+        // If a decision has been made we obey it
+        // otherwise we just forward
+        if (decision != null) {
+            if (log.isTraceEnabled()) {
+                log.trace("Forwaring decision={} was made for PacketIn={}",
+                        decision.getRoutingAction().toString(),
+                        pi);
+            }
+            
+            switch(decision.getRoutingAction()) {
+                case NONE:
+                    // don't do anything
+                    return Command.CONTINUE;
+                case FORWARD_OR_FLOOD:
+                case FORWARD:
+                    doForwardFlow(sw, pi, cntx, false);
+                    return Command.CONTINUE;
+                case MULTICAST:
+                    // treat as broadcast
+                    doFlood(sw, pi, cntx);
+                    return Command.CONTINUE;
+                case DROP:
+                    doDropFlow(sw, pi, decision, cntx);
+                    return Command.CONTINUE;
+                default:
+                    log.error("Unexpected decision made for this packet-in={}",
+                            pi, decision.getRoutingAction());
+                    return Command.CONTINUE;
+            }
+        } else {
+            if (log.isTraceEnabled()) {
+                log.trace("No decision was made for PacketIn={}, forwarding",
+                        pi);
+            }
+            
+            if (eth.isBroadcast() || eth.isMulticast()) {
+                // For now we treat multicast as broadcast
+                doFlood(sw, pi, cntx);
+            } else {
+                doForwardFlow(sw, pi, cntx, false);
+            }
+        }
+        
+        return Command.CONTINUE;
+    }
+    
+    @LogMessageDoc(level="ERROR",
+            message="Failure writing drop flow mod",
+            explanation="An I/O error occured while trying to write a " +
+            		"drop flow mod to a switch",
+            recommendation=LogMessageDoc.CHECK_SWITCH)
+    protected void doDropFlow(IOFSwitch sw, OFPacketIn pi, IRoutingDecision decision, FloodlightContext cntx) {
+        // initialize match structure and populate it using the packet
+        OFMatch match = new OFMatch();
+        match.loadFromPacket(pi.getPacketData(), pi.getInPort());
+        if (decision.getWildcards() != null) {
+            match.setWildcards(decision.getWildcards());
+        }
+        
+        // Create flow-mod based on packet-in and src-switch
+        OFFlowMod fm =
+                (OFFlowMod) floodlightProvider.getOFMessageFactory()
+                                              .getMessage(OFType.FLOW_MOD);
+        List<OFAction> actions = new ArrayList<OFAction>(); // Set no action to
+                                                            // drop
+        long cookie = AppCookie.makeCookie(FORWARDING_APP_ID, 0);
+        
+        fm.setCookie(cookie)
+          .setHardTimeout((short) 0)
+          .setIdleTimeout((short) 5)
+          .setBufferId(OFPacketOut.BUFFER_ID_NONE)
+          .setMatch(match)
+          .setActions(actions)
+          .setLengthU(OFFlowMod.MINIMUM_LENGTH); // +OFActionOutput.MINIMUM_LENGTH);
+
+        try {
+            if (log.isDebugEnabled()) {
+                log.debug("write drop flow-mod sw={} match={} flow-mod={}",
+                          new Object[] { sw, match, fm });
+            }
+            messageDamper.write(sw, fm, cntx);
+        } catch (IOException e) {
+            log.error("Failure writing drop flow mod", e);
+        }
+    }
+    
+    protected void doForwardFlow(IOFSwitch sw, OFPacketIn pi, 
+                                 FloodlightContext cntx,
+                                 boolean requestFlowRemovedNotifn) {    
+        OFMatch match = new OFMatch();
+        match.loadFromPacket(pi.getPacketData(), pi.getInPort());
+
+        // Check if we have the location of the destination
+        IDevice dstDevice = 
+                IDeviceService.fcStore.
+                    get(cntx, IDeviceService.CONTEXT_DST_DEVICE);
+        
+        if (dstDevice != null) {
+            IDevice srcDevice =
+                    IDeviceService.fcStore.
+                        get(cntx, IDeviceService.CONTEXT_SRC_DEVICE);
+            Long srcIsland = topology.getL2DomainId(sw.getId());
+            
+            if (srcDevice == null) {
+                log.debug("No device entry found for source device");
+                return;
+            }
+            if (srcIsland == null) {
+                log.debug("No openflow island found for source {}/{}", 
+                          sw.getStringId(), pi.getInPort());
+                return;
+            }
+
+            // Validate that we have a destination known on the same island
+            // Validate that the source and destination are not on the same switchport
+            boolean on_same_island = false;
+            boolean on_same_if = false;
+            for (SwitchPort dstDap : dstDevice.getAttachmentPoints()) {
+                long dstSwDpid = dstDap.getSwitchDPID();
+                Long dstIsland = topology.getL2DomainId(dstSwDpid);
+                if ((dstIsland != null) && dstIsland.equals(srcIsland)) {
+                    on_same_island = true;
+                    if ((sw.getId() == dstSwDpid) &&
+                        (pi.getInPort() == dstDap.getPort())) {
+                        on_same_if = true;
+                    }
+                    break;
+                }
+            }
+            
+            if (!on_same_island) {
+                // Flood since we don't know the dst device
+                if (log.isTraceEnabled()) {
+                    log.trace("No first hop island found for destination " + 
+                              "device {}, Action = flooding", dstDevice);
+                }
+                doFlood(sw, pi, cntx);
+                return;
+            }            
+            
+            if (on_same_if) {
+                if (log.isTraceEnabled()) {
+                    log.trace("Both source and destination are on the same " + 
+                              "switch/port {}/{}, Action = NOP", 
+                              sw.toString(), pi.getInPort());
+                }
+                return;
+            }
+
+            // Install all the routes where both src and dst have attachment
+            // points.  Since the lists are stored in sorted order we can 
+            // traverse the attachment points in O(m+n) time
+            SwitchPort[] srcDaps = srcDevice.getAttachmentPoints();
+            Arrays.sort(srcDaps, clusterIdComparator);
+            SwitchPort[] dstDaps = dstDevice.getAttachmentPoints();
+            Arrays.sort(dstDaps, clusterIdComparator);
+
+            int iSrcDaps = 0, iDstDaps = 0;
+
+            while ((iSrcDaps < srcDaps.length) && (iDstDaps < dstDaps.length)) {
+                SwitchPort srcDap = srcDaps[iSrcDaps];
+                SwitchPort dstDap = dstDaps[iDstDaps];
+                Long srcCluster = 
+                        topology.getL2DomainId(srcDap.getSwitchDPID());
+                Long dstCluster = 
+                        topology.getL2DomainId(dstDap.getSwitchDPID());
+
+                int srcVsDest = srcCluster.compareTo(dstCluster);
+                if (srcVsDest == 0) {
+                    if (!srcDap.equals(dstDap) && 
+                        (srcCluster != null) && 
+                        (dstCluster != null)) {
+                        Route route = 
+                                routingEngine.getRoute(srcDap.getSwitchDPID(),
+                                                       (short)srcDap.getPort(),
+                                                       dstDap.getSwitchDPID(),
+                                                       (short)dstDap.getPort());
+                        if (route != null) {
+                            if (log.isTraceEnabled()) {
+                                log.trace("pushRoute match={} route={} " + 
+                                          "destination={}:{}",
+                                          new Object[] {match, route, 
+                                                        dstDap.getSwitchDPID(),
+                                                        dstDap.getPort()});
+                            }
+                            long cookie = 
+                                    AppCookie.makeCookie(FORWARDING_APP_ID, 0);
+                            
+                         // if there is prior routing decision use wildcard                                                     
+                            Integer wildcard_hints = null;
+                            IRoutingDecision decision = null;
+                            if (cntx != null) {
+                                decision = IRoutingDecision.rtStore
+                                        .get(cntx,
+                                                IRoutingDecision.CONTEXT_DECISION);
+                            }
+                            if (decision != null) {
+                                wildcard_hints = decision.getWildcards();
+                            } else {
+                            	// L2 only wildcard if there is no prior route decision
+                                wildcard_hints = ((Integer) sw
+                                        .getAttribute(IOFSwitch.PROP_FASTWILDCARDS))
+                                        .intValue()
+                                        & ~OFMatch.OFPFW_IN_PORT
+                                        & ~OFMatch.OFPFW_DL_VLAN
+                                        & ~OFMatch.OFPFW_DL_SRC
+                                        & ~OFMatch.OFPFW_DL_DST
+                                        & ~OFMatch.OFPFW_NW_SRC_MASK
+                                        & ~OFMatch.OFPFW_NW_DST_MASK;
+                            }
+
+                            pushRoute(route, match, wildcard_hints, pi, sw.getId(), cookie, 
+                                      cntx, requestFlowRemovedNotifn, false,
+                                      OFFlowMod.OFPFC_ADD);
+                        }
+                    }
+                    iSrcDaps++;
+                    iDstDaps++;
+                } else if (srcVsDest < 0) {
+                    iSrcDaps++;
+                } else {
+                    iDstDaps++;
+                }
+            }
+        } else {
+            // Flood since we don't know the dst device
+            doFlood(sw, pi, cntx);
+        }
+    }
+
+    /**
+     * Creates a OFPacketOut with the OFPacketIn data that is flooded on all ports unless 
+     * the port is blocked, in which case the packet will be dropped.
+     * @param sw The switch that receives the OFPacketIn
+     * @param pi The OFPacketIn that came to the switch
+     * @param cntx The FloodlightContext associated with this OFPacketIn
+     */
+    @LogMessageDoc(level="ERROR",
+                   message="Failure writing PacketOut " +
+                   		"switch={switch} packet-in={packet-in} " +
+                   		"packet-out={packet-out}",
+                   explanation="An I/O error occured while writing a packet " +
+                   		"out message to the switch",
+                   recommendation=LogMessageDoc.CHECK_SWITCH)
+    protected void doFlood(IOFSwitch sw, OFPacketIn pi, FloodlightContext cntx) {
+        if (topology.isIncomingBroadcastAllowed(sw.getId(),
+                                                pi.getInPort()) == false) {
+            if (log.isTraceEnabled()) {
+                log.trace("doFlood, drop broadcast packet, pi={}, " + 
+                          "from a blocked port, srcSwitch=[{},{}], linkInfo={}",
+                          new Object[] {pi, sw.getId(),pi.getInPort()});
+            }
+            return;
+        }
+
+        // Set Action to flood
+        OFPacketOut po = 
+            (OFPacketOut) floodlightProvider.getOFMessageFactory().getMessage(OFType.PACKET_OUT);
+        List<OFAction> actions = new ArrayList<OFAction>();
+        if (sw.hasAttribute(IOFSwitch.PROP_SUPPORTS_OFPP_FLOOD)) {
+            actions.add(new OFActionOutput(OFPort.OFPP_FLOOD.getValue(), 
+                                           (short)0xFFFF));
+        } else {
+            actions.add(new OFActionOutput(OFPort.OFPP_ALL.getValue(), 
+                                           (short)0xFFFF));
+        }
+        po.setActions(actions);
+        po.setActionsLength((short) OFActionOutput.MINIMUM_LENGTH);
+
+        // set buffer-id, in-port and packet-data based on packet-in
+        short poLength = (short)(po.getActionsLength() + OFPacketOut.MINIMUM_LENGTH);
+        po.setBufferId(pi.getBufferId());
+        po.setInPort(pi.getInPort());
+        if (pi.getBufferId() == OFPacketOut.BUFFER_ID_NONE) {
+            byte[] packetData = pi.getPacketData();
+            poLength += packetData.length;
+            po.setPacketData(packetData);
+        }
+        po.setLength(poLength);
+        
+        try {
+            if (log.isTraceEnabled()) {
+                log.trace("Writing flood PacketOut switch={} packet-in={} packet-out={}",
+                          new Object[] {sw, pi, po});
+            }
+            messageDamper.write(sw, po, cntx);
+        } catch (IOException e) {
+            log.error("Failure writing PacketOut switch={} packet-in={} packet-out={}",
+                    new Object[] {sw, pi, po}, e);
+        }            
+
+        return;
+    }
+    
+    // IFloodlightModule methods
+    
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        // We don't export any services
+        return null;
+    }
+
+    @Override
+    public Map<Class<? extends IFloodlightService>, IFloodlightService>
+            getServiceImpls() {
+        // We don't have any services
+        return null;
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IFloodlightProviderService.class);
+        l.add(IDeviceService.class);
+        l.add(IRoutingService.class);
+        l.add(ITopologyService.class);
+        l.add(ICounterStoreService.class);
+        return l;
+    }
+
+    @Override
+    @LogMessageDocs({
+        @LogMessageDoc(level="WARN",
+                message="Error parsing flow idle timeout, " +
+                        "using default of {number} seconds",
+                explanation="The properties file contains an invalid " +
+                        "flow idle timeout",
+                recommendation="Correct the idle timeout in the " +
+                        "properties file."),
+        @LogMessageDoc(level="WARN",
+                message="Error parsing flow hard timeout, " +
+                        "using default of {number} seconds",
+                explanation="The properties file contains an invalid " +
+                            "flow hard timeout",
+                recommendation="Correct the hard timeout in the " +
+                                "properties file.")
+    })
+    public void init(FloodlightModuleContext context) throws FloodlightModuleException {
+        super.init();
+        this.floodlightProvider = context.getServiceImpl(IFloodlightProviderService.class);
+        this.deviceManager = context.getServiceImpl(IDeviceService.class);
+        this.routingEngine = context.getServiceImpl(IRoutingService.class);
+        this.topology = context.getServiceImpl(ITopologyService.class);
+        this.counterStore = context.getServiceImpl(ICounterStoreService.class);
+        
+        // read our config options
+        Map<String, String> configOptions = context.getConfigParams(this);
+        try {
+            String idleTimeout = configOptions.get("idletimeout");
+            if (idleTimeout != null) {
+                FLOWMOD_DEFAULT_IDLE_TIMEOUT = Short.parseShort(idleTimeout);
+            }
+        } catch (NumberFormatException e) {
+            log.warn("Error parsing flow idle timeout, " +
+            		 "using default of {} seconds",
+                     FLOWMOD_DEFAULT_IDLE_TIMEOUT);
+        }
+        try {
+            String hardTimeout = configOptions.get("hardtimeout");
+            if (hardTimeout != null) {
+                FLOWMOD_DEFAULT_HARD_TIMEOUT = Short.parseShort(hardTimeout);
+            }
+        } catch (NumberFormatException e) {
+            log.warn("Error parsing flow hard timeout, " +
+            		 "using default of {} seconds",
+                     FLOWMOD_DEFAULT_HARD_TIMEOUT);
+        }
+        log.debug("FlowMod idle timeout set to {} seconds", 
+                  FLOWMOD_DEFAULT_IDLE_TIMEOUT);
+        log.debug("FlowMod hard timeout set to {} seconds", 
+                  FLOWMOD_DEFAULT_HARD_TIMEOUT);
+    }
+
+    @Override
+    public void startUp(FloodlightModuleContext context) {
+        super.startUp();
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/hub/Hub.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/hub/Hub.java
new file mode 100644
index 0000000..3618351
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/hub/Hub.java
@@ -0,0 +1,144 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.hub;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+
+import net.floodlightcontroller.core.FloodlightContext;
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.core.IOFMessageListener;
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+
+import org.openflow.protocol.OFMessage;
+import org.openflow.protocol.OFPacketIn;
+import org.openflow.protocol.OFPacketOut;
+import org.openflow.protocol.OFPort;
+import org.openflow.protocol.OFType;
+import org.openflow.protocol.action.OFAction;
+import org.openflow.protocol.action.OFActionOutput;
+import org.openflow.util.U16;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ *
+ * @author David Erickson (daviderickson@cs.stanford.edu) - 04/04/10
+ */
+public class Hub implements IFloodlightModule, IOFMessageListener {
+    protected static Logger log = LoggerFactory.getLogger(Hub.class);
+
+    protected IFloodlightProviderService floodlightProvider;
+
+    /**
+     * @param floodlightProvider the floodlightProvider to set
+     */
+    public void setFloodlightProvider(IFloodlightProviderService floodlightProvider) {
+        this.floodlightProvider = floodlightProvider;
+    }
+
+    @Override
+    public String getName() {
+        return Hub.class.getPackage().getName();
+    }
+
+    public Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx) {
+        OFPacketIn pi = (OFPacketIn) msg;
+        OFPacketOut po = (OFPacketOut) floodlightProvider.getOFMessageFactory()
+                .getMessage(OFType.PACKET_OUT);
+        po.setBufferId(pi.getBufferId())
+            .setInPort(pi.getInPort());
+
+        // set actions
+        OFActionOutput action = new OFActionOutput()
+            .setPort((short) OFPort.OFPP_FLOOD.getValue());
+        po.setActions(Collections.singletonList((OFAction)action));
+        po.setActionsLength((short) OFActionOutput.MINIMUM_LENGTH);
+
+        // set data if is is included in the packetin
+        if (pi.getBufferId() == 0xffffffff) {
+            byte[] packetData = pi.getPacketData();
+            po.setLength(U16.t(OFPacketOut.MINIMUM_LENGTH
+                    + po.getActionsLength() + packetData.length));
+            po.setPacketData(packetData);
+        } else {
+            po.setLength(U16.t(OFPacketOut.MINIMUM_LENGTH
+                    + po.getActionsLength()));
+        }
+        try {
+            sw.write(po, cntx);
+        } catch (IOException e) {
+            log.error("Failure writing PacketOut", e);
+        }
+
+        return Command.CONTINUE;
+    }
+
+    @Override
+    public boolean isCallbackOrderingPrereq(OFType type, String name) {
+        return false;
+    }
+
+    @Override
+    public boolean isCallbackOrderingPostreq(OFType type, String name) {
+        return false;
+    }
+
+    // IFloodlightModule
+    
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        // We don't provide any services, return null
+        return null;
+    }
+
+    @Override
+    public Map<Class<? extends IFloodlightService>, IFloodlightService>
+            getServiceImpls() {
+        // We don't provide any services, return null
+        return null;
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>>
+            getModuleDependencies() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IFloodlightProviderService.class);
+        return l;
+    }
+
+    @Override
+    public void init(FloodlightModuleContext context)
+            throws FloodlightModuleException {
+        floodlightProvider =
+                context.getServiceImpl(IFloodlightProviderService.class);
+    }
+
+    @Override
+    public void startUp(FloodlightModuleContext context) {
+        floodlightProvider.addOFMessageListener(OFType.PACKET_IN, this);
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/jython/JythonDebugInterface.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/jython/JythonDebugInterface.java
new file mode 100644
index 0000000..19a97b5
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/jython/JythonDebugInterface.java
@@ -0,0 +1,68 @@
+package net.floodlightcontroller.jython;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+
+public class JythonDebugInterface implements IFloodlightModule {
+    protected static Logger log = LoggerFactory.getLogger(JythonDebugInterface.class);
+    protected JythonServer debug_server;
+    protected static int JYTHON_PORT = 6655;
+    
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        // We don't export services
+        return null;
+    }
+
+    @Override
+    public Map<Class<? extends IFloodlightService>, IFloodlightService>
+            getServiceImpls() {
+        // We don't export services
+        return null;
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>>
+            getModuleDependencies() {
+        // We don't have any dependencies
+        return null;
+    }
+
+    @Override
+    public void init(FloodlightModuleContext context)
+             throws FloodlightModuleException {
+        // no-op
+    }
+
+    @Override
+    public void startUp(FloodlightModuleContext context) {
+        Map<String, Object> locals = new HashMap<String, Object>();     
+        // add all existing module references to the debug server
+        for (Class<? extends IFloodlightService> s : context.getAllServices()) {
+            // Put only the last part of the name
+            String[] bits = s.getCanonicalName().split("\\.");
+            String name = bits[bits.length-1];
+            locals.put(name, context.getServiceImpl(s));
+        }
+        
+        // read our config options
+        Map<String, String> configOptions = context.getConfigParams(this);
+        int port = JYTHON_PORT;
+        String portNum = configOptions.get("port");
+        if (portNum != null) {
+            port = Integer.parseInt(portNum);
+        }
+        
+        JythonServer debug_server = new JythonServer(port, locals);
+        debug_server.start();
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/jython/JythonServer.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/jython/JythonServer.java
new file mode 100644
index 0000000..fc35b15
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/jython/JythonServer.java
@@ -0,0 +1,63 @@
+package net.floodlightcontroller.jython;
+
+import java.net.URL;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.python.util.PythonInterpreter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class starts a thread that runs a jython interpreter that
+ * can be used for debug (or even development).
+ *
+ * @author mandeepdhami
+ *
+ */
+public class JythonServer extends Thread {
+    protected static Logger log = LoggerFactory.getLogger(JythonServer.class);
+
+	int port;
+	Map<String, Object> locals;
+	
+	/**
+	 * @param port_ Port to use for jython server
+	 * @param locals_ Locals to add to the interpreters top level name space
+	 */
+	public JythonServer(int port_, Map<String, Object> locals_) {
+		this.port = port_ ;
+		this.locals = locals_;
+		if (this.locals == null) {
+			this.locals = new HashMap<String, Object>();
+		}
+		this.locals.put("log", JythonServer.log);
+		this.setName("debugserver");
+	}
+
+    /**
+     * The main thread for this class invoked by Thread.run()
+     *
+     * @see java.lang.Thread#run()
+     */
+    public void run() {
+        PythonInterpreter p = new PythonInterpreter();
+        for (String name : this.locals.keySet()) {
+            p.set(name, this.locals.get(name));
+        }
+
+        URL jarUrl = JythonServer.class.getProtectionDomain().getCodeSource().getLocation();
+        String jarPath = jarUrl.getPath();
+        if (jarUrl.getProtocol().equals("file")) {
+            // If URL is of type file, assume that we are in dev env and set path to python dir.
+            // else use the jar file as is
+            jarPath = jarPath + "../../src/main/python/";
+        }
+
+        p.exec("import sys");
+        p.exec("sys.path.append('" + jarPath + "')");
+        p.exec("from debugserver import run_server");
+        p.exec("run_server(" + this.port + ", '0.0.0.0', locals())");
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/learningswitch/ILearningSwitchService.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/learningswitch/ILearningSwitchService.java
new file mode 100644
index 0000000..71f6625
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/learningswitch/ILearningSwitchService.java
@@ -0,0 +1,15 @@
+package net.floodlightcontroller.learningswitch;
+
+import java.util.Map;
+
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.core.types.MacVlanPair;
+
+public interface ILearningSwitchService extends IFloodlightService {
+    /**
+     * Returns the LearningSwitch's learned host table
+     * @return The learned host table
+     */
+    public Map<IOFSwitch, Map<MacVlanPair,Short>> getTable();
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/learningswitch/LearningSwitch.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/learningswitch/LearningSwitch.java
new file mode 100644
index 0000000..005708d
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/learningswitch/LearningSwitch.java
@@ -0,0 +1,508 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+/**
+ * Floodlight
+ * A BSD licensed, Java based OpenFlow controller
+ *
+ * Floodlight is a Java based OpenFlow controller originally written by David Erickson at Stanford
+ * University. It is available under the BSD license.
+ *
+ * For documentation, forums, issue tracking and more visit:
+ *
+ * http://www.openflowhub.org/display/Floodlight/Floodlight+Home
+ **/
+
+package net.floodlightcontroller.learningswitch;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import net.floodlightcontroller.core.FloodlightContext;
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.core.IOFMessageListener;
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.core.types.MacVlanPair;
+import net.floodlightcontroller.counter.ICounterStoreService;
+import net.floodlightcontroller.packet.Ethernet;
+import net.floodlightcontroller.restserver.IRestApiService;
+
+import org.openflow.protocol.OFError;
+import org.openflow.protocol.OFFlowMod;
+import org.openflow.protocol.OFFlowRemoved;
+import org.openflow.protocol.OFMatch;
+import org.openflow.protocol.OFMessage;
+import org.openflow.protocol.OFPacketIn;
+import org.openflow.protocol.OFPacketOut;
+import org.openflow.protocol.OFPort;
+import org.openflow.protocol.OFType;
+import org.openflow.protocol.action.OFAction;
+import org.openflow.protocol.action.OFActionOutput;
+import org.openflow.util.HexString;
+import org.openflow.util.LRULinkedHashMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class LearningSwitch 
+    implements IFloodlightModule, ILearningSwitchService, IOFMessageListener {
+    protected static Logger log = LoggerFactory.getLogger(LearningSwitch.class);
+    
+    // Module dependencies
+    protected IFloodlightProviderService floodlightProvider;
+    protected ICounterStoreService counterStore;
+    protected IRestApiService restApi;
+    
+    // Stores the learned state for each switch
+    protected Map<IOFSwitch, Map<MacVlanPair,Short>> macVlanToSwitchPortMap;
+
+    // flow-mod - for use in the cookie
+    public static final int LEARNING_SWITCH_APP_ID = 1;
+    // LOOK! This should probably go in some class that encapsulates
+    // the app cookie management
+    public static final int APP_ID_BITS = 12;
+    public static final int APP_ID_SHIFT = (64 - APP_ID_BITS);
+    public static final long LEARNING_SWITCH_COOKIE = (long) (LEARNING_SWITCH_APP_ID & ((1 << APP_ID_BITS) - 1)) << APP_ID_SHIFT;
+    
+    // more flow-mod defaults 
+    protected static final short IDLE_TIMEOUT_DEFAULT = 5;
+    protected static final short HARD_TIMEOUT_DEFAULT = 0;
+    protected static final short PRIORITY_DEFAULT = 100;
+    
+    // for managing our map sizes
+    protected static final int MAX_MACS_PER_SWITCH  = 1000;    
+
+    // normally, setup reverse flow as well. Disable only for using cbench for comparison with NOX etc.
+    protected static final boolean LEARNING_SWITCH_REVERSE_FLOW = true;
+    
+    /**
+     * @param floodlightProvider the floodlightProvider to set
+     */
+    public void setFloodlightProvider(IFloodlightProviderService floodlightProvider) {
+        this.floodlightProvider = floodlightProvider;
+    }
+    
+    @Override
+    public String getName() {
+        return "learningswitch";
+    }
+
+    /**
+     * Adds a host to the MAC/VLAN->SwitchPort mapping
+     * @param sw The switch to add the mapping to
+     * @param mac The MAC address of the host to add
+     * @param vlan The VLAN that the host is on
+     * @param portVal The switchport that the host is on
+     */
+    protected void addToPortMap(IOFSwitch sw, long mac, short vlan, short portVal) {
+        Map<MacVlanPair,Short> swMap = macVlanToSwitchPortMap.get(sw);
+        
+        if (vlan == (short) 0xffff) {
+            // OFMatch.loadFromPacket sets VLAN ID to 0xffff if the packet contains no VLAN tag;
+            // for our purposes that is equivalent to the default VLAN ID 0
+            vlan = 0;
+        }
+        
+        if (swMap == null) {
+            // May be accessed by REST API so we need to make it thread safe
+            swMap = Collections.synchronizedMap(new LRULinkedHashMap<MacVlanPair,Short>(MAX_MACS_PER_SWITCH));
+            macVlanToSwitchPortMap.put(sw, swMap);
+        }
+        swMap.put(new MacVlanPair(mac, vlan), portVal);
+    }
+    
+    /**
+     * Removes a host from the MAC/VLAN->SwitchPort mapping
+     * @param sw The switch to remove the mapping from
+     * @param mac The MAC address of the host to remove
+     * @param vlan The VLAN that the host is on
+     */
+    protected void removeFromPortMap(IOFSwitch sw, long mac, short vlan) {
+        if (vlan == (short) 0xffff) {
+            vlan = 0;
+        }
+        Map<MacVlanPair,Short> swMap = macVlanToSwitchPortMap.get(sw);
+        if (swMap != null)
+            swMap.remove(new MacVlanPair(mac, vlan));
+    }
+
+    /**
+     * Get the port that a MAC/VLAN pair is associated with
+     * @param sw The switch to get the mapping from
+     * @param mac The MAC address to get
+     * @param vlan The VLAN number to get
+     * @return The port the host is on
+     */
+    public Short getFromPortMap(IOFSwitch sw, long mac, short vlan) {
+        if (vlan == (short) 0xffff) {
+            vlan = 0;
+        }
+        Map<MacVlanPair,Short> swMap = macVlanToSwitchPortMap.get(sw);
+        if (swMap != null)
+            return swMap.get(new MacVlanPair(mac, vlan));
+        
+        // if none found
+        return null;
+    }
+    
+    /**
+     * Clears the MAC/VLAN -> SwitchPort map for all switches
+     */
+    public void clearLearnedTable() {
+        macVlanToSwitchPortMap.clear();
+    }
+    
+    /**
+     * Clears the MAC/VLAN -> SwitchPort map for a single switch
+     * @param sw The switch to clear the mapping for
+     */
+    public void clearLearnedTable(IOFSwitch sw) {
+        Map<MacVlanPair, Short> swMap = macVlanToSwitchPortMap.get(sw);
+        if (swMap != null)
+            swMap.clear();
+    }
+    
+    @Override
+    public synchronized Map<IOFSwitch, Map<MacVlanPair,Short>> getTable() {
+        return macVlanToSwitchPortMap;
+    }
+    
+    /**
+     * Writes a OFFlowMod to a switch.
+     * @param sw The switch tow rite the flowmod to.
+     * @param command The FlowMod actions (add, delete, etc).
+     * @param bufferId The buffer ID if the switch has buffered the packet.
+     * @param match The OFMatch structure to write.
+     * @param outPort The switch port to output it to.
+     */
+    private void writeFlowMod(IOFSwitch sw, short command, int bufferId,
+            OFMatch match, short outPort) {
+        // from openflow 1.0 spec - need to set these on a struct ofp_flow_mod:
+        // struct ofp_flow_mod {
+        //    struct ofp_header header;
+        //    struct ofp_match match; /* Fields to match */
+        //    uint64_t cookie; /* Opaque controller-issued identifier. */
+        //
+        //    /* Flow actions. */
+        //    uint16_t command; /* One of OFPFC_*. */
+        //    uint16_t idle_timeout; /* Idle time before discarding (seconds). */
+        //    uint16_t hard_timeout; /* Max time before discarding (seconds). */
+        //    uint16_t priority; /* Priority level of flow entry. */
+        //    uint32_t buffer_id; /* Buffered packet to apply to (or -1).
+        //                           Not meaningful for OFPFC_DELETE*. */
+        //    uint16_t out_port; /* For OFPFC_DELETE* commands, require
+        //                          matching entries to include this as an
+        //                          output port. A value of OFPP_NONE
+        //                          indicates no restriction. */
+        //    uint16_t flags; /* One of OFPFF_*. */
+        //    struct ofp_action_header actions[0]; /* The action length is inferred
+        //                                            from the length field in the
+        //                                            header. */
+        //    };
+           
+        OFFlowMod flowMod = (OFFlowMod) floodlightProvider.getOFMessageFactory().getMessage(OFType.FLOW_MOD);
+        flowMod.setMatch(match);
+        flowMod.setCookie(LearningSwitch.LEARNING_SWITCH_COOKIE);
+        flowMod.setCommand(command);
+        flowMod.setIdleTimeout(LearningSwitch.IDLE_TIMEOUT_DEFAULT);
+        flowMod.setHardTimeout(LearningSwitch.HARD_TIMEOUT_DEFAULT);
+        flowMod.setPriority(LearningSwitch.PRIORITY_DEFAULT);
+        flowMod.setBufferId(bufferId);
+        flowMod.setOutPort((command == OFFlowMod.OFPFC_DELETE) ? outPort : OFPort.OFPP_NONE.getValue());
+        flowMod.setFlags((command == OFFlowMod.OFPFC_DELETE) ? 0 : (short) (1 << 0)); // OFPFF_SEND_FLOW_REM
+
+        // set the ofp_action_header/out actions:
+        // from the openflow 1.0 spec: need to set these on a struct ofp_action_output:
+        // uint16_t type; /* OFPAT_OUTPUT. */
+        // uint16_t len; /* Length is 8. */
+        // uint16_t port; /* Output port. */
+        // uint16_t max_len; /* Max length to send to controller. */
+        // type/len are set because it is OFActionOutput,
+        // and port, max_len are arguments to this constructor
+        flowMod.setActions(Arrays.asList((OFAction) new OFActionOutput(outPort, (short) 0xffff)));
+        flowMod.setLength((short) (OFFlowMod.MINIMUM_LENGTH + OFActionOutput.MINIMUM_LENGTH));
+
+        if (log.isTraceEnabled()) {
+            log.trace("{} {} flow mod {}", 
+                      new Object[]{ sw, (command == OFFlowMod.OFPFC_DELETE) ? "deleting" : "adding", flowMod });
+        }
+
+        counterStore.updatePktOutFMCounterStore(sw, flowMod);
+        
+        // and write it out
+        try {
+            sw.write(flowMod, null);
+        } catch (IOException e) {
+            log.error("Failed to write {} to switch {}", new Object[]{ flowMod, sw }, e);
+        }
+    }
+    
+    /**
+     * Writes an OFPacketOut message to a switch.
+     * @param sw The switch to write the PacketOut to.
+     * @param packetInMessage The corresponding PacketIn.
+     * @param egressPort The switchport to output the PacketOut.
+     */
+    private void writePacketOutForPacketIn(IOFSwitch sw, 
+                                          OFPacketIn packetInMessage, 
+                                          short egressPort) {
+        // from openflow 1.0 spec - need to set these on a struct ofp_packet_out:
+        // uint32_t buffer_id; /* ID assigned by datapath (-1 if none). */
+        // uint16_t in_port; /* Packet's input port (OFPP_NONE if none). */
+        // uint16_t actions_len; /* Size of action array in bytes. */
+        // struct ofp_action_header actions[0]; /* Actions. */
+        /* uint8_t data[0]; */ /* Packet data. The length is inferred
+                                  from the length field in the header.
+                                  (Only meaningful if buffer_id == -1.) */
+        
+        OFPacketOut packetOutMessage = (OFPacketOut) floodlightProvider.getOFMessageFactory().getMessage(OFType.PACKET_OUT);
+        short packetOutLength = (short)OFPacketOut.MINIMUM_LENGTH; // starting length
+
+        // Set buffer_id, in_port, actions_len
+        packetOutMessage.setBufferId(packetInMessage.getBufferId());
+        packetOutMessage.setInPort(packetInMessage.getInPort());
+        packetOutMessage.setActionsLength((short)OFActionOutput.MINIMUM_LENGTH);
+        packetOutLength += OFActionOutput.MINIMUM_LENGTH;
+        
+        // set actions
+        List<OFAction> actions = new ArrayList<OFAction>(1);      
+        actions.add(new OFActionOutput(egressPort, (short) 0));
+        packetOutMessage.setActions(actions);
+
+        // set data - only if buffer_id == -1
+        if (packetInMessage.getBufferId() == OFPacketOut.BUFFER_ID_NONE) {
+            byte[] packetData = packetInMessage.getPacketData();
+            packetOutMessage.setPacketData(packetData); 
+            packetOutLength += (short)packetData.length;
+        }
+        
+        // finally, set the total length
+        packetOutMessage.setLength(packetOutLength);              
+            
+        // and write it out
+        try {
+        	counterStore.updatePktOutFMCounterStore(sw, packetOutMessage);
+            sw.write(packetOutMessage, null);
+        } catch (IOException e) {
+            log.error("Failed to write {} to switch {}: {}", new Object[]{ packetOutMessage, sw, e });
+        }
+    }
+    
+    /**
+     * Processes a OFPacketIn message. If the switch has learned the MAC/VLAN to port mapping
+     * for the pair it will write a FlowMod for. If the mapping has not been learned the 
+     * we will flood the packet.
+     * @param sw
+     * @param pi
+     * @param cntx
+     * @return
+     */
+    private Command processPacketInMessage(IOFSwitch sw, OFPacketIn pi, FloodlightContext cntx) {
+        // Read in packet data headers by using OFMatch
+        OFMatch match = new OFMatch();
+        match.loadFromPacket(pi.getPacketData(), pi.getInPort());
+        Long sourceMac = Ethernet.toLong(match.getDataLayerSource());
+        Long destMac = Ethernet.toLong(match.getDataLayerDestination());
+        Short vlan = match.getDataLayerVirtualLan();
+        if ((destMac & 0xfffffffffff0L) == 0x0180c2000000L) {
+            if (log.isTraceEnabled()) {
+                log.trace("ignoring packet addressed to 802.1D/Q reserved addr: switch {} vlan {} dest MAC {}",
+                          new Object[]{ sw, vlan, HexString.toHexString(destMac) });
+            }
+            return Command.STOP;
+        }
+        if ((sourceMac & 0x010000000000L) == 0) {
+            // If source MAC is a unicast address, learn the port for this MAC/VLAN
+            this.addToPortMap(sw, sourceMac, vlan, pi.getInPort());
+        }
+        
+        // Now output flow-mod and/or packet
+        Short outPort = getFromPortMap(sw, destMac, vlan);
+        if (outPort == null) {
+            // If we haven't learned the port for the dest MAC/VLAN, flood it
+            // Don't flood broadcast packets if the broadcast is disabled.
+            // XXX For LearningSwitch this doesn't do much. The sourceMac is removed
+            //     from port map whenever a flow expires, so you would still see
+            //     a lot of floods.
+            this.writePacketOutForPacketIn(sw, pi, OFPort.OFPP_FLOOD.getValue());
+        } else if (outPort == match.getInputPort()) {
+            log.trace("ignoring packet that arrived on same port as learned destination:"
+                    + " switch {} vlan {} dest MAC {} port {}",
+                    new Object[]{ sw, vlan, HexString.toHexString(destMac), outPort });
+        } else {
+            // Add flow table entry matching source MAC, dest MAC, VLAN and input port
+            // that sends to the port we previously learned for the dest MAC/VLAN.  Also
+            // add a flow table entry with source and destination MACs reversed, and
+            // input and output ports reversed.  When either entry expires due to idle
+            // timeout, remove the other one.  This ensures that if a device moves to
+            // a different port, a constant stream of packets headed to the device at
+            // its former location does not keep the stale entry alive forever.
+            // FIXME: current HP switches ignore DL_SRC and DL_DST fields, so we have to match on
+            // NW_SRC and NW_DST as well
+            match.setWildcards(((Integer)sw.getAttribute(IOFSwitch.PROP_FASTWILDCARDS)).intValue()
+                    & ~OFMatch.OFPFW_IN_PORT
+                    & ~OFMatch.OFPFW_DL_VLAN & ~OFMatch.OFPFW_DL_SRC & ~OFMatch.OFPFW_DL_DST
+                    & ~OFMatch.OFPFW_NW_SRC_MASK & ~OFMatch.OFPFW_NW_DST_MASK);
+            this.writeFlowMod(sw, OFFlowMod.OFPFC_ADD, pi.getBufferId(), match, outPort);
+            if (LEARNING_SWITCH_REVERSE_FLOW) {
+                this.writeFlowMod(sw, OFFlowMod.OFPFC_ADD, -1, match.clone()
+                    .setDataLayerSource(match.getDataLayerDestination())
+                    .setDataLayerDestination(match.getDataLayerSource())
+                    .setNetworkSource(match.getNetworkDestination())
+                    .setNetworkDestination(match.getNetworkSource())
+                    .setTransportSource(match.getTransportDestination())
+                    .setTransportDestination(match.getTransportSource())
+                    .setInputPort(outPort),
+                    match.getInputPort());
+            }
+        }
+        return Command.CONTINUE;
+    }
+
+    /**
+     * Processes a flow removed message. We will delete the learned MAC/VLAN mapping from
+     * the switch's table.
+     * @param sw The switch that sent the flow removed message.
+     * @param flowRemovedMessage The flow removed message.
+     * @return Whether to continue processing this message or stop.
+     */
+    private Command processFlowRemovedMessage(IOFSwitch sw, OFFlowRemoved flowRemovedMessage) {
+        if (flowRemovedMessage.getCookie() != LearningSwitch.LEARNING_SWITCH_COOKIE) {
+            return Command.CONTINUE;
+        }
+        if (log.isTraceEnabled()) {
+            log.trace("{} flow entry removed {}", sw, flowRemovedMessage);
+        }
+        OFMatch match = flowRemovedMessage.getMatch();
+        // When a flow entry expires, it means the device with the matching source
+        // MAC address and VLAN either stopped sending packets or moved to a different
+        // port.  If the device moved, we can't know where it went until it sends
+        // another packet, allowing us to re-learn its port.  Meanwhile we remove
+        // it from the macVlanToPortMap to revert to flooding packets to this device.
+        this.removeFromPortMap(sw, Ethernet.toLong(match.getDataLayerSource()),
+            match.getDataLayerVirtualLan());
+        
+        // Also, if packets keep coming from another device (e.g. from ping), the
+        // corresponding reverse flow entry will never expire on its own and will
+        // send the packets to the wrong port (the matching input port of the
+        // expired flow entry), so we must delete the reverse entry explicitly.
+        this.writeFlowMod(sw, OFFlowMod.OFPFC_DELETE, -1, match.clone()
+                .setWildcards(((Integer)sw.getAttribute(IOFSwitch.PROP_FASTWILDCARDS)).intValue()
+                        & ~OFMatch.OFPFW_DL_VLAN & ~OFMatch.OFPFW_DL_SRC & ~OFMatch.OFPFW_DL_DST
+                        & ~OFMatch.OFPFW_NW_SRC_MASK & ~OFMatch.OFPFW_NW_DST_MASK)
+                .setDataLayerSource(match.getDataLayerDestination())
+                .setDataLayerDestination(match.getDataLayerSource())
+                .setNetworkSource(match.getNetworkDestination())
+                .setNetworkDestination(match.getNetworkSource())
+                .setTransportSource(match.getTransportDestination())
+                .setTransportDestination(match.getTransportSource()),
+                match.getInputPort());
+        return Command.CONTINUE;
+    }
+    
+    // IOFMessageListener
+    
+    @Override
+    public Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx) {
+        switch (msg.getType()) {
+            case PACKET_IN:
+                return this.processPacketInMessage(sw, (OFPacketIn) msg, cntx);
+            case FLOW_REMOVED:
+                return this.processFlowRemovedMessage(sw, (OFFlowRemoved) msg);
+            case ERROR:
+                log.info("received an error {} from switch {}", (OFError) msg, sw);
+                return Command.CONTINUE;
+            default:
+            	break;
+        }
+        log.error("received an unexpected message {} from switch {}", msg, sw);
+        return Command.CONTINUE;
+    }
+
+    @Override
+    public boolean isCallbackOrderingPrereq(OFType type, String name) {
+        return false;
+    }
+
+    @Override
+    public boolean isCallbackOrderingPostreq(OFType type, String name) {
+        return false;
+    }
+
+    // IFloodlightModule
+    
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(ILearningSwitchService.class);
+        return l;
+    }
+
+    @Override
+    public Map<Class<? extends IFloodlightService>, IFloodlightService>
+            getServiceImpls() {
+        Map<Class<? extends IFloodlightService>,
+            IFloodlightService> m = 
+                new HashMap<Class<? extends IFloodlightService>,
+                    IFloodlightService>();
+        m.put(ILearningSwitchService.class, this);
+        return m;
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>>
+            getModuleDependencies() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IFloodlightProviderService.class);
+        l.add(ICounterStoreService.class);
+        l.add(IRestApiService.class);
+        return l;
+    }
+
+    @Override
+    public void init(FloodlightModuleContext context)
+            throws FloodlightModuleException {
+        macVlanToSwitchPortMap = 
+                new ConcurrentHashMap<IOFSwitch, Map<MacVlanPair,Short>>();
+        floodlightProvider =
+                context.getServiceImpl(IFloodlightProviderService.class);
+        counterStore =
+                context.getServiceImpl(ICounterStoreService.class);
+        restApi =
+                context.getServiceImpl(IRestApiService.class);
+    }
+
+    @Override
+    public void startUp(FloodlightModuleContext context) {
+        floodlightProvider.addOFMessageListener(OFType.PACKET_IN, this);
+        floodlightProvider.addOFMessageListener(OFType.FLOW_REMOVED, this);
+        floodlightProvider.addOFMessageListener(OFType.ERROR, this);
+        restApi.addRestletRoutable(new LearningSwitchWebRoutable());
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/learningswitch/LearningSwitchTable.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/learningswitch/LearningSwitchTable.java
new file mode 100644
index 0000000..19f8bf5
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/learningswitch/LearningSwitchTable.java
@@ -0,0 +1,69 @@
+package net.floodlightcontroller.learningswitch;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.types.MacVlanPair;
+
+import org.openflow.util.HexString;
+import org.restlet.data.Status;
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class LearningSwitchTable extends ServerResource {
+    protected static Logger log = LoggerFactory.getLogger(LearningSwitchTable.class);
+    
+    protected Map<String, Object> formatTableEntry(MacVlanPair key, short port) {
+        Map<String, Object> entry = new HashMap<String, Object>();
+        entry.put("mac", HexString.toHexString(key.mac));
+        entry.put("vlan", key.vlan);
+        entry.put("port", port);
+        return entry;
+    }
+    
+    protected List<Map<String, Object>> getOneSwitchTable(Map<MacVlanPair, Short> switchMap) {
+        List<Map<String, Object>> switchTable = new ArrayList<Map<String, Object>>();
+        for (Entry<MacVlanPair, Short> entry : switchMap.entrySet()) {
+            switchTable.add(formatTableEntry(entry.getKey(), entry.getValue()));
+        }
+        return switchTable;
+    }
+    
+    @Get("json")
+    public Map<String, List<Map<String, Object>>> getSwitchTableJson() {
+        ILearningSwitchService lsp = 
+                (ILearningSwitchService)getContext().getAttributes().
+                    get(ILearningSwitchService.class.getCanonicalName());
+
+        Map<IOFSwitch, Map<MacVlanPair,Short>> table = lsp.getTable();
+        Map<String, List<Map<String, Object>>> allSwitchTableJson = new HashMap<String, List<Map<String, Object>>>();
+        
+        String switchId = (String) getRequestAttributes().get("switch");
+        if (switchId.toLowerCase().equals("all")) {
+            for (IOFSwitch sw : table.keySet()) {
+                allSwitchTableJson.put(HexString.toHexString(sw.getId()), getOneSwitchTable(table.get(sw)));
+            }
+        } else {
+            try {
+                IFloodlightProviderService floodlightProvider = 
+                        (IFloodlightProviderService)getContext().getAttributes().
+                            get(IFloodlightProviderService.class.getCanonicalName());
+                long dpid = HexString.toLong(switchId);
+                IOFSwitch sw = floodlightProvider.getSwitches().get(dpid);
+                allSwitchTableJson.put(HexString.toHexString(sw.getId()), getOneSwitchTable(table.get(sw)));
+            } catch (NumberFormatException e) {
+                log.error("Could not decode switch ID = " + switchId);
+                setStatus(Status.CLIENT_ERROR_BAD_REQUEST);
+            }
+        }
+            
+        return allSwitchTableJson;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/learningswitch/LearningSwitchWebRoutable.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/learningswitch/LearningSwitchWebRoutable.java
new file mode 100644
index 0000000..76c30cb
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/learningswitch/LearningSwitchWebRoutable.java
@@ -0,0 +1,22 @@
+package net.floodlightcontroller.learningswitch;
+
+import org.restlet.Context;
+import org.restlet.Restlet;
+import org.restlet.routing.Router;
+
+import net.floodlightcontroller.restserver.RestletRoutable;
+
+public class LearningSwitchWebRoutable implements RestletRoutable {
+
+    @Override
+    public Restlet getRestlet(Context context) {
+        Router router = new Router(context);
+        router.attach("/table/{switch}/json", LearningSwitchTable.class);
+        return router;
+    }
+
+    @Override
+    public String basePath() {
+        return "/wm/learningswitch";
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkDiscovery.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkDiscovery.java
new file mode 100644
index 0000000..f172f63
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkDiscovery.java
@@ -0,0 +1,162 @@
+package net.floodlightcontroller.linkdiscovery;
+
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+import org.codehaus.jackson.map.ser.ToStringSerializer;
+import org.openflow.util.HexString;
+
+public interface ILinkDiscovery {
+
+    @JsonSerialize(using=ToStringSerializer.class)
+    public enum UpdateOperation {
+        LINK_UPDATED("Link Updated"),
+        LINK_REMOVED("Link Removed"),
+        SWITCH_UPDATED("Switch Updated"),
+        SWITCH_REMOVED("Switch Removed"),
+        PORT_UP("Port Up"),
+        PORT_DOWN("Port Down");
+        
+        private String value;
+        UpdateOperation(String v) {
+            value = v;
+        }
+        
+        @Override
+        public String toString() {
+            return value;
+        }
+    }
+
+    public class LDUpdate {
+        protected long src;
+        protected short srcPort;
+        protected long dst;
+        protected short dstPort;
+        protected SwitchType srcType;
+        protected LinkType type;
+        protected UpdateOperation operation;
+
+        public LDUpdate(long src, short srcPort,
+                      long dst, short dstPort,
+                      ILinkDiscovery.LinkType type,
+                      UpdateOperation operation) {
+            this.src = src;
+            this.srcPort = srcPort;
+            this.dst = dst;
+            this.dstPort = dstPort;
+            this.type = type;
+            this.operation = operation;
+        }
+
+        public LDUpdate(LDUpdate old) {
+            this.src = old.src;
+            this.srcPort = old.srcPort;
+            this.dst = old.dst;
+            this.dstPort = old.dstPort;
+            this.srcType = old.srcType;
+            this.type = old.type;
+            this.operation = old.operation;
+        }
+
+        // For updtedSwitch(sw)
+        public LDUpdate(long switchId, SwitchType stype, UpdateOperation oper ){
+            this.operation = oper;
+            this.src = switchId;
+            this.srcType = stype;
+        }
+
+        // For port up or port down.
+        public LDUpdate(long sw, short port, UpdateOperation operation) {
+            this.src = sw;
+            this.srcPort = port;
+            this.operation = operation;
+        }
+
+        public long getSrc() {
+            return src;
+        }
+
+        public short getSrcPort() {
+            return srcPort;
+        }
+
+        public long getDst() {
+            return dst;
+        }
+
+        public short getDstPort() {
+            return dstPort;
+        }
+
+        public SwitchType getSrcType() {
+            return srcType;
+        }
+
+        public LinkType getType() {
+            return type;
+        }
+
+        public UpdateOperation getOperation() {
+            return operation;
+        }
+
+        public void setOperation(UpdateOperation operation) {
+            this.operation = operation;
+        }
+        
+        @Override
+        public String toString() {
+            switch (operation) {
+            case LINK_REMOVED:
+            case LINK_UPDATED:
+                return "LDUpdate [operation=" + operation +
+                        ", src=" + HexString.toHexString(src)
+                        + ", srcPort=" + srcPort
+                        + ", dst=" + HexString.toHexString(dst) 
+                        + ", dstPort=" + dstPort
+                        + ", type=" + type + "]";
+            case PORT_DOWN:
+            case PORT_UP:
+                return "LDUpdate [operation=" + operation +
+                        ", src=" + HexString.toHexString(src)
+                        + ", srcPort=" + srcPort + "]";
+            case SWITCH_REMOVED:
+            case SWITCH_UPDATED:
+                return "LDUpdate [operation=" + operation +
+                        ", src=" + HexString.toHexString(src) + "]";
+            default:
+                return "LDUpdate: Unknown update.";
+            }
+        }
+    }
+
+    public enum SwitchType {
+        BASIC_SWITCH, CORE_SWITCH
+    };
+
+    public enum LinkType {
+        INVALID_LINK {
+        	@Override
+        	public String toString() {
+        		return "invalid";
+        	}
+        }, 
+        DIRECT_LINK{
+        	@Override
+        	public String toString() {
+        		return "internal";
+        	}
+        }, 
+        MULTIHOP_LINK {
+        	@Override
+        	public String toString() {
+        		return "external";
+        	}
+        }, 
+        TUNNEL {
+        	@Override
+        	public String toString() {
+        		return "tunnel";
+        	}
+        }
+    };
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkDiscoveryListener.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkDiscoveryListener.java
new file mode 100644
index 0000000..35779a2
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkDiscoveryListener.java
@@ -0,0 +1,23 @@
+/**
+ *    Copyright 2011, Big Switch Networks, Inc. 
+ *    Originally created by David Erickson, Stanford University
+ * 
+ *    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *    not use this file except in compliance with the License. You may obtain
+ *    a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *    License for the specific language governing permissions and limitations
+ *    under the License.
+ **/
+
+package net.floodlightcontroller.linkdiscovery;
+
+public interface ILinkDiscoveryListener extends ILinkDiscovery{
+
+    public void linkDiscoveryUpdate(LDUpdate update);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkDiscoveryService.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkDiscoveryService.java
new file mode 100644
index 0000000..4145592
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkDiscoveryService.java
@@ -0,0 +1,84 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.linkdiscovery;
+
+import java.util.Map;
+import java.util.Set;
+
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.routing.Link;
+import net.floodlightcontroller.topology.NodePortTuple;
+
+
+public interface ILinkDiscoveryService extends IFloodlightService {
+    /**
+     * Retrieves a map of all known link connections between OpenFlow switches
+     * and the associated info (valid time, port states) for the link.
+     */
+    public Map<Link, LinkInfo> getLinks();
+
+    /**
+     * Returns link type of a given link
+     * @param info
+     * @return
+     */
+    public ILinkDiscovery.LinkType getLinkType(Link lt, LinkInfo info);
+
+    /**
+     * Returns an unmodifiable map from switch id to a set of all links with it 
+     * as an endpoint.
+     */
+    public Map<Long, Set<Link>> getSwitchLinks();
+
+    /**
+     * Adds a listener to listen for ILinkDiscoveryService messages
+     * @param listener The listener that wants the notifications
+     */
+    public void addListener(ILinkDiscoveryListener listener);
+
+    /**
+     * Retrieves a set of all switch ports on which lldps are suppressed.
+     */
+    public Set<NodePortTuple> getSuppressLLDPsInfo();
+
+    /**
+     * Adds a switch port to suppress lldp set
+     */
+    public void AddToSuppressLLDPs(long sw, short port);
+
+    /**
+     * Removes a switch port from suppress lldp set
+     */
+    public void RemoveFromSuppressLLDPs(long sw, short port);
+
+    /**
+     * Get the set of quarantined ports on a switch
+     */
+    public Set<Short> getQuarantinedPorts(long sw);
+
+    /**
+     * Get the status of auto port fast feature.
+     */
+    public boolean isAutoPortFastFeature();
+
+    /**
+     * Set the state for auto port fast feature.
+     * @param autoPortFastFeature
+     */
+    public void setAutoPortFastFeature(boolean autoPortFastFeature);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/LinkInfo.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/LinkInfo.java
new file mode 100644
index 0000000..9c0dd1a
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/LinkInfo.java
@@ -0,0 +1,182 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc.*    Originally created by David Erickson, Stanford University
+**    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.linkdiscovery;
+
+import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.LinkType;
+
+import org.openflow.protocol.OFPhysicalPort.OFPortState;
+
+public class LinkInfo {
+
+    public LinkInfo(Long firstSeenTime,
+                    Long lastLldpReceivedTime,
+                    Long lastBddpReceivedTime,
+                    int srcPortState,
+                    int dstPortState) {
+        super();
+        this.srcPortState = srcPortState;
+        this.dstPortState = dstPortState;
+        this.firstSeenTime = firstSeenTime;
+        this.lastLldpReceivedTime = lastLldpReceivedTime;
+        this.lastBddpReceivedTime = lastBddpReceivedTime;
+    }
+
+    protected Integer srcPortState;
+    protected Integer dstPortState;
+    protected Long firstSeenTime;
+    protected Long lastLldpReceivedTime; /* Standard LLLDP received time */
+    protected Long lastBddpReceivedTime; /* Modified LLDP received time  */
+
+    /** The port states stored here are topology's last knowledge of
+     * the state of the port. This mostly mirrors the state
+     * maintained in the port list in IOFSwitch (i.e. the one returned
+     * from getPort), except that during a port status message the
+     * IOFSwitch port state will already have been updated with the
+     * new port state, so topology needs to keep its own copy so that
+     * it can determine if the port state has changed and therefore
+     * requires the new state to be written to storage.
+     */
+
+
+
+    public boolean linkStpBlocked() {
+        return ((srcPortState & OFPortState.OFPPS_STP_MASK.getValue()) == OFPortState.OFPPS_STP_BLOCK.getValue()) ||
+            ((dstPortState & OFPortState.OFPPS_STP_MASK.getValue()) == OFPortState.OFPPS_STP_BLOCK.getValue());
+    }
+
+    public Long getFirstSeenTime() {
+        return firstSeenTime;
+    }
+
+    public void setFirstSeenTime(Long firstSeenTime) {
+        this.firstSeenTime = firstSeenTime;
+    }
+
+    public Long getUnicastValidTime() {
+        return lastLldpReceivedTime;
+    }
+
+    public void setUnicastValidTime(Long unicastValidTime) {
+        this.lastLldpReceivedTime = unicastValidTime;
+    }
+
+    public Long getMulticastValidTime() {
+        return lastBddpReceivedTime;
+    }
+
+    public void setMulticastValidTime(Long multicastValidTime) {
+        this.lastBddpReceivedTime = multicastValidTime;
+    }
+
+    public Integer getSrcPortState() {
+        return srcPortState;
+    }
+
+    public void setSrcPortState(Integer srcPortState) {
+        this.srcPortState = srcPortState;
+    }
+
+    public Integer getDstPortState() {
+        return dstPortState;
+    }
+
+    public void setDstPortState(int dstPortState) {
+        this.dstPortState = dstPortState;
+    }
+
+    public LinkType getLinkType() {
+        if (lastLldpReceivedTime != null) {
+            return LinkType.DIRECT_LINK;
+        } else if (lastBddpReceivedTime != null) {
+            return LinkType.MULTIHOP_LINK;
+        }
+        return LinkType.INVALID_LINK;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#hashCode()
+     */
+    @Override
+    public int hashCode() {
+        final int prime = 5557;
+        int result = 1;
+        result = prime * result + ((firstSeenTime == null) ? 0 : firstSeenTime.hashCode());
+        result = prime * result + ((lastLldpReceivedTime == null) ? 0 : lastLldpReceivedTime.hashCode());
+        result = prime * result + ((lastBddpReceivedTime == null) ? 0 : lastBddpReceivedTime.hashCode());
+        result = prime * result + ((srcPortState == null) ? 0 : srcPortState.hashCode());
+        result = prime * result + ((dstPortState == null) ? 0 : dstPortState.hashCode());
+        return result;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#equals(java.lang.Object)
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (!(obj instanceof LinkInfo))
+            return false;
+        LinkInfo other = (LinkInfo) obj;
+
+        if (firstSeenTime == null) {
+            if (other.firstSeenTime != null)
+                return false;
+        } else if (!firstSeenTime.equals(other.firstSeenTime))
+            return false;
+
+        if (lastLldpReceivedTime == null) {
+            if (other.lastLldpReceivedTime != null)
+                return false;
+        } else if (!lastLldpReceivedTime.equals(other.lastLldpReceivedTime))
+            return false;
+
+        if (lastBddpReceivedTime == null) {
+            if (other.lastBddpReceivedTime != null)
+                return false;
+        } else if (!lastBddpReceivedTime.equals(other.lastBddpReceivedTime))
+            return false;
+
+        if (srcPortState == null) {
+            if (other.srcPortState != null)
+                return false;
+        } else if (!srcPortState.equals(other.srcPortState))
+            return false;
+
+        if (dstPortState == null) {
+            if (other.dstPortState != null)
+                return false;
+        } else if (!dstPortState.equals(other.dstPortState))
+            return false;
+
+        return true;
+    }
+
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#toString()
+     */
+    @Override
+    public String toString() {
+        return "LinkInfo [unicastValidTime=" + ((lastLldpReceivedTime == null) ? "null" : lastLldpReceivedTime)
+                + ", multicastValidTime=" + ((lastBddpReceivedTime == null) ? "null" : lastBddpReceivedTime)
+                + ", srcPortState=" + ((srcPortState == null) ? "null" : srcPortState)
+                + ", dstPortState=" + ((dstPortState == null) ? "null" : srcPortState)
+                + "]";
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/internal/EventHistoryTopologyCluster.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/internal/EventHistoryTopologyCluster.java
new file mode 100644
index 0000000..d57e987
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/internal/EventHistoryTopologyCluster.java
@@ -0,0 +1,43 @@
+package net.floodlightcontroller.linkdiscovery.internal;
+
+import net.floodlightcontroller.core.web.serializers.DPIDSerializer;
+
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+/***
+ * Topology Cluster merge/split event history related classes and members
+ * @author subrata
+ *
+ */
+public class EventHistoryTopologyCluster {
+    // The following fields are not stored as String to save memory
+    // They should be converted to appropriate human-readable strings by 
+    // the front end (e.g. in cli in Python)
+    public long     dpid;
+    public long     clusterIdOld; // Switch with dpid moved from cluster x to y
+    public long     clusterIdNew;
+    public String   reason;
+    
+    @JsonProperty("Switch")
+    @JsonSerialize(using=DPIDSerializer.class)
+    public long getDpid() {
+        return dpid;
+    }
+    @JsonProperty("OldClusterId")
+    @JsonSerialize(using=DPIDSerializer.class)
+    public long getClusterIdOld() {
+        return clusterIdOld;
+    }
+    @JsonProperty("NewClusterId")
+    @JsonSerialize(using=DPIDSerializer.class)
+    public long getClusterIdNew() {
+        return clusterIdNew;
+    }
+    @JsonProperty("Reason")
+    public String getReason() {
+        return reason;
+    }
+    
+    
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/internal/EventHistoryTopologyLink.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/internal/EventHistoryTopologyLink.java
new file mode 100644
index 0000000..98796ed
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/internal/EventHistoryTopologyLink.java
@@ -0,0 +1,62 @@
+package net.floodlightcontroller.linkdiscovery.internal;
+
+import net.floodlightcontroller.core.web.serializers.DPIDSerializer;
+
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+/***
+ * Topology link up/down event history related classes and members
+ * @author subrata
+ *
+ */
+public class EventHistoryTopologyLink {
+    // The following fields are not stored as String to save memory
+    // They should be converted to appropriate human-readable strings by 
+    // the front end (e.g. in cli in Python)
+    public long     srcSwDpid;
+    public long     dstSwDpid;
+    public int      srcPortState;
+    public int      dstPortState;
+    public int      srcSwport;
+    public int      dstSwport;
+    public String   linkType;
+    public String   reason;
+    
+    @JsonProperty("Source-Switch")
+    @JsonSerialize(using=DPIDSerializer.class)
+    public long getSrcSwDpid() {
+        return srcSwDpid;
+    }
+    @JsonProperty("Dest-Switch")
+    @JsonSerialize(using=DPIDSerializer.class)
+    public long getDstSwDpid() {
+        return dstSwDpid;
+    }
+    @JsonProperty("SrcPortState")
+    public int getSrcPortState() {
+        return srcPortState;
+    }
+    @JsonProperty("DstPortState")
+    public int getDstPortState() {
+        return dstPortState;
+    }
+    @JsonProperty("SrcPort")
+    public int getSrcSwport() {
+        return srcSwport;
+    }
+    @JsonProperty("DstPort")
+    public int getDstSwport() {
+        return dstSwport;
+    }
+    @JsonProperty("LinkType")
+    public String getLinkType() {
+        return linkType;
+    }
+    @JsonProperty("Reason")
+    public String getReason() {
+        return reason;
+    }
+    
+    
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/internal/EventHistoryTopologySwitch.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/internal/EventHistoryTopologySwitch.java
new file mode 100644
index 0000000..001942b
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/internal/EventHistoryTopologySwitch.java
@@ -0,0 +1,43 @@
+package net.floodlightcontroller.linkdiscovery.internal;
+
+import net.floodlightcontroller.core.web.serializers.DPIDSerializer;
+import net.floodlightcontroller.core.web.serializers.IPv4Serializer;
+
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+/***
+ * Topology Switch event history related classes and members
+ * @author subrata
+ *
+ */
+public class EventHistoryTopologySwitch {
+    // The following fields are not stored as String to save memory
+    // They should be converted to appropriate human-readable strings by 
+    // the front end (e.g. in cli in Python)
+    public long     dpid;
+    public int  ipv4Addr;
+    public int    l4Port;
+    public String   reason;
+    
+    @JsonProperty("Switch")
+    @JsonSerialize(using=DPIDSerializer.class)
+    public long getDpid() {
+        return dpid;
+    }
+    @JsonProperty("IpAddr")
+    @JsonSerialize(using=IPv4Serializer.class)
+    public int getIpv4Addr() {
+        return ipv4Addr;
+    }
+    @JsonProperty("Port")
+    public int getL4Port() {
+        return l4Port;
+    }
+    @JsonProperty("Reason")
+    public String getReason() {
+        return reason;
+    }
+    
+    
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/internal/LinkDiscoveryManager.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/internal/LinkDiscoveryManager.java
new file mode 100644
index 0000000..bcb8b35
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/internal/LinkDiscoveryManager.java
@@ -0,0 +1,2062 @@
+/**
+ *    Copyright 2011, Big Switch Networks, Inc.
+ *    Originally created by David Erickson, Stanford University
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *    not use this file except in compliance with the License. You may obtain
+ *    a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *    License for the specific language governing permissions and limitations
+ *    under the License.
+ **/
+
+package net.floodlightcontroller.linkdiscovery.internal;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.NetworkInterface;
+import java.net.SocketAddress;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import net.floodlightcontroller.core.FloodlightContext;
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.core.IFloodlightProviderService.Role;
+import net.floodlightcontroller.core.IHAListener;
+import net.floodlightcontroller.core.IInfoProvider;
+import net.floodlightcontroller.core.IOFMessageListener;
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.IOFSwitchListener;
+import net.floodlightcontroller.core.annotations.LogMessageCategory;
+import net.floodlightcontroller.core.annotations.LogMessageDoc;
+import net.floodlightcontroller.core.annotations.LogMessageDocs;
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.core.util.SingletonTask;
+import net.floodlightcontroller.linkdiscovery.ILinkDiscovery;
+import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.LinkType;
+import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.SwitchType;
+import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.LDUpdate;
+import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.UpdateOperation;
+import net.floodlightcontroller.linkdiscovery.web.LinkDiscoveryWebRoutable;
+import net.floodlightcontroller.linkdiscovery.ILinkDiscoveryListener;
+import net.floodlightcontroller.linkdiscovery.ILinkDiscoveryService;
+import net.floodlightcontroller.linkdiscovery.LinkInfo;
+import net.floodlightcontroller.packet.BSN;
+import net.floodlightcontroller.packet.Ethernet;
+import net.floodlightcontroller.packet.IPv4;
+import net.floodlightcontroller.packet.LLDP;
+import net.floodlightcontroller.packet.LLDPTLV;
+import net.floodlightcontroller.restserver.IRestApiService;
+import net.floodlightcontroller.routing.Link;
+import net.floodlightcontroller.storage.IResultSet;
+import net.floodlightcontroller.storage.IStorageSourceService;
+import net.floodlightcontroller.storage.IStorageSourceListener;
+import net.floodlightcontroller.storage.OperatorPredicate;
+import net.floodlightcontroller.storage.StorageException;
+import net.floodlightcontroller.threadpool.IThreadPoolService;
+import net.floodlightcontroller.topology.NodePortTuple;
+import net.floodlightcontroller.util.EventHistory;
+import net.floodlightcontroller.util.EventHistory.EvAction;
+
+import org.openflow.protocol.OFMessage;
+import org.openflow.protocol.OFPacketIn;
+import org.openflow.protocol.OFPacketOut;
+import org.openflow.protocol.OFPhysicalPort;
+import org.openflow.protocol.OFPhysicalPort.OFPortConfig;
+import org.openflow.protocol.OFPhysicalPort.OFPortState;
+import org.openflow.protocol.OFPort;
+import org.openflow.protocol.OFPortStatus;
+import org.openflow.protocol.OFPortStatus.OFPortReason;
+import org.openflow.protocol.OFType;
+import org.openflow.protocol.action.OFAction;
+import org.openflow.protocol.action.OFActionOutput;
+import org.openflow.util.HexString;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class sends out LLDP messages containing the sending switch's datapath
+ * id as well as the outgoing port number.  Received LLrescDP messages that
+ * match a known switch cause a new LinkTuple to be created according to the
+ * invariant rules listed below.  This new LinkTuple is also passed to routing
+ * if it exists to trigger updates.
+ *
+ * This class also handles removing links that are associated to switch ports
+ * that go down, and switches that are disconnected.
+ *
+ * Invariants:
+ *  -portLinks and switchLinks will not contain empty Sets outside of
+ *   critical sections
+ *  -portLinks contains LinkTuples where one of the src or dst
+ *   SwitchPortTuple matches the map key
+ *  -switchLinks contains LinkTuples where one of the src or dst
+ *   SwitchPortTuple's id matches the switch id
+ *  -Each LinkTuple will be indexed into switchLinks for both
+ *   src.id and dst.id, and portLinks for each src and dst
+ *  -The updates queue is only added to from within a held write lock
+ */
+@LogMessageCategory("Network Topology")
+public class LinkDiscoveryManager
+implements IOFMessageListener, IOFSwitchListener, 
+IStorageSourceListener, ILinkDiscoveryService,
+IFloodlightModule, IInfoProvider, IHAListener {
+    protected static Logger log = LoggerFactory.getLogger(LinkDiscoveryManager.class);
+
+    // Names of table/fields for links in the storage API
+    private static final String LINK_TABLE_NAME = "controller_link";
+    private static final String LINK_ID = "id";
+    private static final String LINK_SRC_SWITCH = "src_switch_id";
+    private static final String LINK_SRC_PORT = "src_port";
+    private static final String LINK_SRC_PORT_STATE = "src_port_state";
+    private static final String LINK_DST_SWITCH = "dst_switch_id";
+    private static final String LINK_DST_PORT = "dst_port";
+    private static final String LINK_DST_PORT_STATE = "dst_port_state";
+    private static final String LINK_VALID_TIME = "valid_time";
+    private static final String LINK_TYPE = "link_type";
+    private static final String SWITCH_CONFIG_TABLE_NAME = "controller_switchconfig";
+    private static final String SWITCH_CONFIG_CORE_SWITCH = "core_switch";
+
+    protected IFloodlightProviderService floodlightProvider;
+    protected IStorageSourceService storageSource;
+    protected IThreadPoolService threadPool;
+    protected IRestApiService restApi;
+
+
+    // LLDP and BDDP fields
+    private static final byte[] LLDP_STANDARD_DST_MAC_STRING = 
+            HexString.fromHexString("01:80:c2:00:00:0e");
+    private static final long LINK_LOCAL_MASK  = 0xfffffffffff0L;
+    private static final long LINK_LOCAL_VALUE = 0x0180c2000000L;
+
+    // BigSwitch OUI is 5C:16:C7, so 5D:16:C7 is the multicast version
+    // private static final String LLDP_BSN_DST_MAC_STRING = "5d:16:c7:00:00:01";
+    private static final String LLDP_BSN_DST_MAC_STRING = "ff:ff:ff:ff:ff:ff";
+
+
+    // Direction TLVs are used to indicate if the LLDPs were sent 
+    // periodically or in response to a recieved LLDP
+    private static final byte TLV_DIRECTION_TYPE = 0x73;
+    private static final short TLV_DIRECTION_LENGTH = 1;  // 1 byte
+    private static final byte TLV_DIRECTION_VALUE_FORWARD[] = {0x01};
+    private static final byte TLV_DIRECTION_VALUE_REVERSE[] = {0x02};
+    private static final LLDPTLV forwardTLV 
+    = new LLDPTLV().
+    setType((byte)TLV_DIRECTION_TYPE).
+    setLength((short)TLV_DIRECTION_LENGTH).
+    setValue(TLV_DIRECTION_VALUE_FORWARD);
+
+    private static final LLDPTLV reverseTLV 
+    = new LLDPTLV().
+    setType((byte)TLV_DIRECTION_TYPE).
+    setLength((short)TLV_DIRECTION_LENGTH).
+    setValue(TLV_DIRECTION_VALUE_REVERSE);
+
+    // Link discovery task details.
+    protected SingletonTask discoveryTask;
+    protected final int DISCOVERY_TASK_INTERVAL = 1; 
+    protected final int LINK_TIMEOUT = 35; // timeout as part of LLDP process.
+    protected final int LLDP_TO_ALL_INTERVAL = 15 ; //15 seconds.
+    protected long lldpClock = 0;
+    // This value is intentionally kept higher than LLDP_TO_ALL_INTERVAL.
+    // If we want to identify link failures faster, we could decrease this
+    // value to a small number, say 1 or 2 sec.
+    protected final int LLDP_TO_KNOWN_INTERVAL= 20; // LLDP frequency for known links
+
+    protected LLDPTLV controllerTLV;
+    protected ReentrantReadWriteLock lock;
+    int lldpTimeCount = 0;
+
+    /**
+     * Flag to indicate if automatic port fast is enabled or not.
+     * Default is set to false -- Initialized in the init method as well.
+     */
+    boolean autoPortFastFeature = false;
+
+    /**
+     * Map from link to the most recent time it was verified functioning
+     */
+    protected Map<Link, LinkInfo> links;
+
+    /**
+     * Map from switch id to a set of all links with it as an endpoint
+     */
+    protected Map<Long, Set<Link>> switchLinks;
+
+    /**
+     * Map from a id:port to the set of links containing it as an endpoint
+     */
+    protected Map<NodePortTuple, Set<Link>> portLinks;
+
+    /**
+     * Set of link tuples over which multicast LLDPs are received
+     * and unicast LLDPs are not received.
+     */
+    protected Map<NodePortTuple, Set<Link>> portBroadcastDomainLinks;
+
+    protected volatile boolean shuttingDown = false;
+
+    /* topology aware components are called in the order they were added to the
+     * the array */
+    protected ArrayList<ILinkDiscoveryListener> linkDiscoveryAware;
+    protected BlockingQueue<LDUpdate> updates;
+    protected Thread updatesThread;
+
+    /**
+     * List of ports through which LLDP/BDDPs are not sent.
+     */
+    protected Set<NodePortTuple> suppressLinkDiscovery;
+
+    /** A list of ports that are quarantined for discovering links through
+     * them.  Data traffic from these ports are not allowed until the ports
+     * are released from quarantine.
+     */
+    protected LinkedBlockingQueue<NodePortTuple> quarantineQueue;
+    protected LinkedBlockingQueue<NodePortTuple> maintenanceQueue;
+    /**
+     * Quarantine task
+     */
+    protected SingletonTask bddpTask;
+    protected final int BDDP_TASK_INTERVAL = 100; // 100 ms.
+    protected final int BDDP_TASK_SIZE = 5;       // # of ports per iteration
+
+    /**
+     * Map of broadcast domain ports and the last time a BDDP was either
+     * sent or received on that port.
+     */
+    protected Map<NodePortTuple, Long> broadcastDomainPortTimeMap;
+
+    /** 
+     * Get the LLDP sending period in seconds.
+     * @return LLDP sending period in seconds.
+     */
+    public int getLldpFrequency() {
+        return LLDP_TO_KNOWN_INTERVAL;
+    }
+
+    /**
+     * Get the LLDP timeout value in seconds
+     * @return LLDP timeout value in seconds
+     */
+    public int getLldpTimeout() {
+        return LINK_TIMEOUT;
+    }
+
+    public Map<NodePortTuple, Set<Link>> getPortLinks() {
+        return portLinks;
+    }
+
+    public Set<NodePortTuple> getSuppressLLDPsInfo() {
+        return suppressLinkDiscovery;
+    }
+
+    /**
+     * Add a switch port to the suppressed LLDP list.
+     * Remove any known links on the switch port.
+     */
+    public void AddToSuppressLLDPs(long sw, short port)
+    {
+        NodePortTuple npt = new NodePortTuple(sw, port);
+        this.suppressLinkDiscovery.add(npt);
+        deleteLinksOnPort(npt, "LLDP suppressed.");
+    }
+
+    /**
+     * Remove a switch port from the suppressed LLDP list.
+     * Discover links on that switchport.
+     */
+    public void RemoveFromSuppressLLDPs(long sw, short port) 
+    {
+        NodePortTuple npt = new NodePortTuple(sw, port);
+        this.suppressLinkDiscovery.remove(npt);
+        discover(npt);
+    }
+
+    public boolean isShuttingDown() {
+        return shuttingDown;
+    }
+
+    public boolean isFastPort(long sw, short port) {
+        return false;
+    }
+
+    public ILinkDiscovery.LinkType getLinkType(Link lt, LinkInfo info) {
+        if (info.getUnicastValidTime() != null) {
+            return ILinkDiscovery.LinkType.DIRECT_LINK;
+        } else if (info.getMulticastValidTime() != null) {
+            return ILinkDiscovery.LinkType.MULTIHOP_LINK;
+        }
+        return ILinkDiscovery.LinkType.INVALID_LINK;
+    }
+
+    @LogMessageDoc(level="ERROR",
+            message="Error in link discovery updates loop",
+            explanation="An unknown error occured while dispatching " +
+            		"link update notifications",
+            recommendation=LogMessageDoc.GENERIC_ACTION)
+    private void doUpdatesThread() throws InterruptedException {
+        do {
+            LDUpdate update = updates.take();
+
+            if (linkDiscoveryAware != null) {
+                if (log.isTraceEnabled()) {
+                    log.trace("Dispatching link discovery update {} {} {} {} {} for {}",
+                              new Object[]{update.getOperation(),
+                                           HexString.toHexString(update.getSrc()), update.getSrcPort(),
+                                           HexString.toHexString(update.getDst()), update.getDstPort(),
+                                           linkDiscoveryAware});
+                }
+                try {
+                    for (ILinkDiscoveryListener lda : linkDiscoveryAware) { // order maintained
+                        lda.linkDiscoveryUpdate(update);
+                    }
+                }
+                catch (Exception e) {
+                    log.error("Error in link discovery updates loop", e);
+                }
+            }
+        } while (updates.peek() != null);
+    }
+    private boolean isLinkDiscoverySuppressed(long sw, short portNumber) {
+        return this.suppressLinkDiscovery.contains(new NodePortTuple(sw, portNumber));
+    }
+
+    protected void discoverLinks() {
+
+        // timeout known links.
+        timeoutLinks();
+
+        //increment LLDP clock
+        lldpClock = (lldpClock + 1)% LLDP_TO_ALL_INTERVAL;
+
+        if (lldpClock == 0) {
+            log.debug("Sending LLDP out on all ports.");
+            discoverOnAllPorts();
+        }
+    }
+
+
+    /**
+     *  Quarantine Ports.
+     */
+    protected class QuarantineWorker implements Runnable {
+        @Override
+        public void run() {
+            try {
+                processBDDPLists();
+            }
+            catch (Exception e) {
+                log.error("Error in quarantine worker thread", e);
+            } finally {
+                    bddpTask.reschedule(BDDP_TASK_INTERVAL,
+                                              TimeUnit.MILLISECONDS);
+            }
+        }
+    }
+
+    /**
+     * Add a switch port to the quarantine queue. Schedule the
+     * quarantine task if the quarantine queue was empty before adding
+     * this switch port.
+     * @param npt
+     */
+    protected void addToQuarantineQueue(NodePortTuple npt) {
+        if (quarantineQueue.contains(npt) == false)
+            quarantineQueue.add(npt);
+    }
+
+    /**
+     * Remove a switch port from the quarantine queue.
+     */
+    protected void removeFromQuarantineQueue(NodePortTuple npt) {
+        // Remove all occurrences of the node port tuple from the list.
+        while (quarantineQueue.remove(npt));
+    }
+
+    /**
+     * Add a switch port to maintenance queue.
+     * @param npt
+     */
+    protected void addToMaintenanceQueue(NodePortTuple npt) {
+        // TODO We are not checking if the switch port tuple is already
+        // in the maintenance list or not.  This will be an issue for
+        // really large number of switch ports in the network.
+        if (maintenanceQueue.contains(npt) == false)
+            maintenanceQueue.add(npt);
+    }
+
+    /**
+     * Remove a switch port from maintenance queue.
+     * @param npt
+     */
+    protected void removeFromMaintenanceQueue(NodePortTuple npt) {
+        // Remove all occurrences of the node port tuple from the queue.
+        while (maintenanceQueue.remove(npt));
+    }
+
+    /**
+    * This method processes the quarantine list in bursts.  The task is
+    * at most once per BDDP_TASK_INTERVAL.
+    * One each call, BDDP_TASK_SIZE number of switch ports are processed.
+    * Once the BDDP packets are sent out through the switch ports, the ports
+    * are removed from the quarantine list.
+    */
+
+    protected void processBDDPLists() {
+        int count = 0;
+        Set<NodePortTuple> nptList = new HashSet<NodePortTuple>();
+
+        while(count < BDDP_TASK_SIZE && quarantineQueue.peek() !=null) {
+            NodePortTuple npt;
+            npt = quarantineQueue.remove();
+            sendDiscoveryMessage(npt.getNodeId(), npt.getPortId(), false, false);
+            nptList.add(npt);
+            count++;
+        }
+
+        count = 0;
+        while (count < BDDP_TASK_SIZE && maintenanceQueue.peek() != null) {
+            NodePortTuple npt;
+            npt = maintenanceQueue.remove();
+            sendDiscoveryMessage(npt.getNodeId(), npt.getPortId(), false, false);
+            count++;
+        }
+
+        for(NodePortTuple npt:nptList) {
+            generateSwitchPortStatusUpdate(npt.getNodeId(), npt.getPortId());
+        }
+    }
+
+    public Set<Short> getQuarantinedPorts(long sw) {
+        Set<Short> qPorts = new HashSet<Short>();
+
+        Iterator<NodePortTuple> iter = quarantineQueue.iterator();
+        while (iter.hasNext()) {
+            NodePortTuple npt = iter.next();
+            if (npt.getNodeId() == sw) {
+                qPorts.add(npt.getPortId());
+            }
+        }
+        return qPorts;
+    }
+
+    private void generateSwitchPortStatusUpdate(long sw, short port) {
+        UpdateOperation operation;
+
+        IOFSwitch iofSwitch = floodlightProvider.getSwitches().get(sw);
+        if (iofSwitch == null) return;
+
+        OFPhysicalPort ofp = iofSwitch.getPort(port);
+        if (ofp == null) return;
+
+        int srcPortState = ofp.getState();
+        boolean portUp = ((srcPortState &
+                OFPortState.OFPPS_STP_MASK.getValue()) !=
+                OFPortState.OFPPS_STP_BLOCK.getValue());
+
+        if (portUp) operation = UpdateOperation.PORT_UP;
+        else operation = UpdateOperation.PORT_DOWN;
+
+        updates.add(new LDUpdate(sw, port, operation));
+    }
+
+    /** 
+     * Send LLDP on known ports
+     */
+    protected void discoverOnKnownLinkPorts() {
+        // Copy the port set.
+        Set<NodePortTuple> nptSet = new HashSet<NodePortTuple>();
+        nptSet.addAll(portLinks.keySet());
+
+        // Send LLDP from each of them.
+        for(NodePortTuple npt: nptSet) {
+            discover(npt);
+        }
+    }
+
+    protected void discover(NodePortTuple npt) {
+        discover(npt.getNodeId(), npt.getPortId());
+    }
+
+    protected void discover(long sw, short port) {
+        sendDiscoveryMessage(sw, port, true, false);
+    }
+
+    /**
+     * Send link discovery message out of a given switch port.
+     * The discovery message may be a standard LLDP or a modified
+     * LLDP, where the dst mac address is set to :ff.  
+     * 
+     * TODO: The modified LLDP will updated in the future and may
+     * use a different eth-type.
+     * @param sw
+     * @param port
+     * @param isStandard   indicates standard or modified LLDP
+     * @param isReverse    indicates whether the LLDP was sent as a response
+     */
+    @LogMessageDoc(level="ERROR",
+            message="Failure sending LLDP out port {port} on switch {switch}",
+            explanation="An I/O error occured while sending LLDP message " +
+            		"to the switch.",
+            recommendation=LogMessageDoc.CHECK_SWITCH)
+    protected void sendDiscoveryMessage(long sw, short port,
+                             boolean isStandard,
+                             boolean isReverse) {
+
+        IOFSwitch iofSwitch = floodlightProvider.getSwitches().get(sw);
+        if (iofSwitch == null) {
+            return;
+        }
+
+        if (port == OFPort.OFPP_LOCAL.getValue())
+            return;
+
+        OFPhysicalPort ofpPort = iofSwitch.getPort(port);
+
+        if (ofpPort == null) {
+            if (log.isTraceEnabled()) {
+                log.trace("Null physical port. sw={}, port={}", sw, port);
+            }
+            return;
+        }
+
+        if (isLinkDiscoverySuppressed(sw, port)) {
+            /* Dont send LLDPs out of this port as suppressLLDPs set
+             * 
+             */
+            return;
+        }
+
+        // For fast ports, do not send forward LLDPs or BDDPs.
+        if (!isReverse && autoPortFastFeature && isFastPort(sw, port))
+            return;
+
+        if (log.isTraceEnabled()) {
+            log.trace("Sending LLDP packet out of swich: {}, port: {}",
+                      sw, port);
+        }
+
+        // using "nearest customer bridge" MAC address for broadest possible propagation
+        // through provider and TPMR bridges (see IEEE 802.1AB-2009 and 802.1Q-2011),
+        // in particular the Linux bridge which behaves mostly like a provider bridge
+        byte[] chassisId = new byte[] {4, 0, 0, 0, 0, 0, 0}; // filled in later
+        byte[] portId = new byte[] {2, 0, 0}; // filled in later
+        byte[] ttlValue = new byte[] {0, 0x78};
+        // OpenFlow OUI - 00-26-E1
+        byte[] dpidTLVValue = new byte[] {0x0, 0x26, (byte) 0xe1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+        LLDPTLV dpidTLV = new LLDPTLV().setType((byte) 127).setLength((short) dpidTLVValue.length).setValue(dpidTLVValue);
+
+        byte[] dpidArray = new byte[8];
+        ByteBuffer dpidBB = ByteBuffer.wrap(dpidArray);
+        ByteBuffer portBB = ByteBuffer.wrap(portId, 1, 2);
+
+        Long dpid = sw;
+        dpidBB.putLong(dpid);
+        // set the ethernet source mac to last 6 bytes of dpid
+        System.arraycopy(dpidArray, 2, ofpPort.getHardwareAddress(), 0, 6);
+        // set the chassis id's value to last 6 bytes of dpid
+        System.arraycopy(dpidArray, 2, chassisId, 1, 6);
+        // set the optional tlv to the full dpid
+        System.arraycopy(dpidArray, 0, dpidTLVValue, 4, 8);
+
+
+        // set the portId to the outgoing port
+        portBB.putShort(port);
+        if (log.isTraceEnabled()) {
+            log.trace("Sending LLDP out of interface: {}/{}",
+                      HexString.toHexString(sw), port);
+        }
+
+        LLDP lldp = new LLDP();
+        lldp.setChassisId(new LLDPTLV().setType((byte) 1).setLength((short) chassisId.length).setValue(chassisId));
+        lldp.setPortId(new LLDPTLV().setType((byte) 2).setLength((short) portId.length).setValue(portId));
+        lldp.setTtl(new LLDPTLV().setType((byte) 3).setLength((short) ttlValue.length).setValue(ttlValue));
+        lldp.getOptionalTLVList().add(dpidTLV);
+
+        // Add the controller identifier to the TLV value.
+        lldp.getOptionalTLVList().add(controllerTLV);
+        if (isReverse) {
+            lldp.getOptionalTLVList().add(reverseTLV);
+        }else {
+            lldp.getOptionalTLVList().add(forwardTLV);
+        }
+
+        Ethernet ethernet;
+        if (isStandard) {
+            ethernet = new Ethernet()
+            .setSourceMACAddress(ofpPort.getHardwareAddress())
+            .setDestinationMACAddress(LLDP_STANDARD_DST_MAC_STRING)
+            .setEtherType(Ethernet.TYPE_LLDP);
+            ethernet.setPayload(lldp);
+        } else {
+            BSN bsn = new BSN(BSN.BSN_TYPE_BDDP);
+            bsn.setPayload(lldp);
+
+            ethernet = new Ethernet()
+            .setSourceMACAddress(ofpPort.getHardwareAddress())
+            .setDestinationMACAddress(LLDP_BSN_DST_MAC_STRING)
+            .setEtherType(Ethernet.TYPE_BSN);
+            ethernet.setPayload(bsn);
+        }
+
+
+        // serialize and wrap in a packet out
+        byte[] data = ethernet.serialize();
+        OFPacketOut po = (OFPacketOut) floodlightProvider.getOFMessageFactory().getMessage(OFType.PACKET_OUT);
+        po.setBufferId(OFPacketOut.BUFFER_ID_NONE);
+        po.setInPort(OFPort.OFPP_NONE);
+
+        // set actions
+        List<OFAction> actions = new ArrayList<OFAction>();
+        actions.add(new OFActionOutput(port, (short) 0));
+        po.setActions(actions);
+        po.setActionsLength((short) OFActionOutput.MINIMUM_LENGTH);
+
+        // set data
+        po.setLengthU(OFPacketOut.MINIMUM_LENGTH + po.getActionsLength() + data.length);
+        po.setPacketData(data);
+
+        // send
+        try {
+            iofSwitch.write(po, null);
+            iofSwitch.flush();
+        } catch (IOException e) {
+            log.error("Failure sending LLDP out port {} on switch {}",
+                      new Object[]{ port, iofSwitch.getStringId() }, e);
+        }
+
+    }
+
+    /**
+     * Send LLDPs to all switch-ports
+     */
+    protected void discoverOnAllPorts() {
+        if (log.isTraceEnabled()) {
+            log.trace("Sending LLDP packets out of all the enabled ports on switch {}");
+        }
+        Set<Long> switches = floodlightProvider.getSwitches().keySet();
+        // Send standard LLDPs
+        for (long sw: switches) {
+            IOFSwitch iofSwitch = floodlightProvider.getSwitches().get(sw);
+            if (iofSwitch == null) continue;
+            if (iofSwitch.getEnabledPorts() != null) {
+                for (OFPhysicalPort ofp: iofSwitch.getEnabledPorts()) {
+                    if (isLinkDiscoverySuppressed(sw, ofp.getPortNumber()))
+                        continue;
+                    if (autoPortFastFeature && isFastPort(sw, ofp.getPortNumber()))
+                        continue;
+
+                    // sends forward LLDP only non-fastports.
+                    sendDiscoveryMessage(sw, ofp.getPortNumber(), true, false);
+
+                    // If the switch port is not alreayd in the maintenance
+                    // queue, add it.
+                    NodePortTuple npt = new NodePortTuple(sw, ofp.getPortNumber());
+                    addToMaintenanceQueue(npt);
+                }
+            }
+        }
+    }
+
+    protected void setControllerTLV() {
+        //Setting the controllerTLVValue based on current nano time,
+        //controller's IP address, and the network interface object hash
+        //the corresponding IP address.
+
+        final int prime = 7867;
+        InetAddress localIPAddress = null;
+        NetworkInterface localInterface = null;
+
+        byte[] controllerTLVValue = new byte[] {0, 0, 0, 0, 0, 0, 0, 0};  // 8 byte value.
+        ByteBuffer bb = ByteBuffer.allocate(10);
+
+        try{
+            localIPAddress = java.net.InetAddress.getLocalHost();
+            localInterface = NetworkInterface.getByInetAddress(localIPAddress);
+        } catch (Exception e) {
+            e.printStackTrace();
+        }
+
+        long result = System.nanoTime();
+        if (localIPAddress != null)
+            result = result * prime + IPv4.toIPv4Address(localIPAddress.getHostAddress());
+        if (localInterface != null)
+            result = result * prime + localInterface.hashCode();
+        // set the first 4 bits to 0.
+        result = result & (0x0fffffffffffffffL);
+
+        bb.putLong(result);
+
+        bb.rewind();
+        bb.get(controllerTLVValue, 0, 8);
+
+        this.controllerTLV = new LLDPTLV().setType((byte) 0x0c).setLength((short) controllerTLVValue.length).setValue(controllerTLVValue);
+    }
+
+    @Override
+    public String getName() {
+        return "linkdiscovery";
+    }
+
+    @Override
+    public Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx) {
+        switch (msg.getType()) {
+            case PACKET_IN:
+                return this.handlePacketIn(sw.getId(), (OFPacketIn) msg, cntx);
+            case PORT_STATUS:
+                return this.handlePortStatus(sw.getId(), (OFPortStatus) msg);
+            default:
+                break;
+        }
+        return Command.CONTINUE;
+    }
+
+    private Command handleLldp(LLDP lldp, long sw, OFPacketIn pi, boolean isStandard, FloodlightContext cntx) {
+        // If LLDP is suppressed on this port, ignore received packet as well
+        IOFSwitch iofSwitch = floodlightProvider.getSwitches().get(sw);
+        if (iofSwitch == null) {
+            return Command.STOP;
+        }
+
+        if (isLinkDiscoverySuppressed(sw, pi.getInPort()))
+            return Command.STOP;
+
+        // If this is a malformed LLDP, or not from us, exit
+        if (lldp.getPortId() == null || lldp.getPortId().getLength() != 3)
+            return Command.CONTINUE;
+
+        long myId = ByteBuffer.wrap(controllerTLV.getValue()).getLong();
+        long otherId = 0;
+        boolean myLLDP = false;
+        Boolean isReverse = null;
+
+        ByteBuffer portBB = ByteBuffer.wrap(lldp.getPortId().getValue());
+        portBB.position(1);
+
+        Short remotePort = portBB.getShort();
+        IOFSwitch remoteSwitch = null;
+
+        // Verify this LLDP packet matches what we're looking for
+        for (LLDPTLV lldptlv : lldp.getOptionalTLVList()) {
+            if (lldptlv.getType() == 127 && lldptlv.getLength() == 12 &&
+                    lldptlv.getValue()[0] == 0x0 && lldptlv.getValue()[1] == 0x26 &&
+                    lldptlv.getValue()[2] == (byte)0xe1 && lldptlv.getValue()[3] == 0x0) {
+                ByteBuffer dpidBB = ByteBuffer.wrap(lldptlv.getValue());
+                remoteSwitch = floodlightProvider.getSwitches().get(dpidBB.getLong(4));
+            } else if (lldptlv.getType() == 12 && lldptlv.getLength() == 8){
+                otherId = ByteBuffer.wrap(lldptlv.getValue()).getLong();
+                if (myId == otherId)
+                    myLLDP = true;
+            } else if (lldptlv.getType() == TLV_DIRECTION_TYPE &&
+                    lldptlv.getLength() == TLV_DIRECTION_LENGTH) {
+                if (lldptlv.getValue()[0] == TLV_DIRECTION_VALUE_FORWARD[0])
+                    isReverse = false;
+                else if (lldptlv.getValue()[0] == TLV_DIRECTION_VALUE_REVERSE[0])
+                    isReverse = true;
+            }
+        }
+
+        if (myLLDP == false) {
+            // This is not the LLDP sent by this controller.
+            // If the LLDP message has multicast bit set, then we need to broadcast
+            // the packet as a regular packet.
+            if (isStandard) {
+                if (log.isTraceEnabled()) {
+                    log.trace("Getting standard LLDP from a different controller and quelching it.");
+                }
+                return Command.STOP;
+            }
+            else if (myId < otherId)  {
+                if (log.isTraceEnabled()) {
+                    log.trace("Getting BDDP packets from a different controller" +
+                            "and letting it go through normal processing chain.");
+                }
+                return Command.CONTINUE;
+            }
+        }
+
+
+        if (remoteSwitch == null) {
+            // Ignore LLDPs not generated by Floodlight, or from a switch that has recently
+            // disconnected, or from a switch connected to another Floodlight instance
+            if (log.isTraceEnabled()) {
+                log.trace("Received LLDP from remote switch not connected to the controller");
+            }
+            return Command.STOP;
+        }
+
+        if (!remoteSwitch.portEnabled(remotePort)) {
+            if (log.isTraceEnabled()) {
+                log.trace("Ignoring link with disabled source port: switch {} port {}", remoteSwitch, remotePort);
+            }
+            return Command.STOP;
+        }
+        if (suppressLinkDiscovery.contains(new NodePortTuple(remoteSwitch.getId(),
+                                                     remotePort))) {
+            if (log.isTraceEnabled()) {
+                log.trace("Ignoring link with suppressed src port: switch {} port {}",
+                      remoteSwitch, remotePort);
+            }
+            return Command.STOP;
+        }
+        if (!iofSwitch.portEnabled(pi.getInPort())) {
+            if (log.isTraceEnabled()) {
+                log.trace("Ignoring link with disabled dest port: switch {} port {}", sw, pi.getInPort());
+            }
+            return Command.STOP;
+        }
+
+        OFPhysicalPort physicalPort = remoteSwitch.getPort(remotePort);
+        int srcPortState = (physicalPort != null) ? physicalPort.getState() : 0;
+        physicalPort = iofSwitch.getPort(pi.getInPort());
+        int dstPortState = (physicalPort != null) ? physicalPort.getState() : 0;
+
+        // Store the time of update to this link, and push it out to routingEngine
+        Link lt = new Link(remoteSwitch.getId(), remotePort, iofSwitch.getId(), pi.getInPort());
+
+
+        Long lastLldpTime = null;
+        Long lastBddpTime = null;
+
+        Long firstSeenTime = System.currentTimeMillis();
+
+        if (isStandard)
+            lastLldpTime = System.currentTimeMillis();
+        else
+            lastBddpTime = System.currentTimeMillis();
+
+        LinkInfo newLinkInfo =
+                new LinkInfo(firstSeenTime, lastLldpTime, lastBddpTime,
+                             srcPortState, dstPortState);
+
+        addOrUpdateLink(lt, newLinkInfo);
+
+        // Check if reverse link exists. 
+        // If it doesn't exist and if the forward link was seen 
+        // first seen within a small interval, send probe on the 
+        // reverse link.
+
+        newLinkInfo = links.get(lt);
+        if (newLinkInfo != null && isStandard && isReverse == false) {
+            Link reverseLink = new Link(lt.getDst(), lt.getDstPort(),
+                                        lt.getSrc(), lt.getSrcPort());
+            LinkInfo reverseInfo = links.get(reverseLink);
+            if (reverseInfo == null) {
+                // the reverse link does not exist.
+                if (newLinkInfo.getFirstSeenTime() > System.currentTimeMillis() - LINK_TIMEOUT) {
+                    this.sendDiscoveryMessage(lt.getDst(), lt.getDstPort(), isStandard, true);
+                }
+            }
+        }
+
+        // If the received packet is a BDDP packet, then create a reverse BDDP
+        // link as well.
+        if (!isStandard) {
+            Link reverseLink = new Link(lt.getDst(), lt.getDstPort(),
+                                        lt.getSrc(), lt.getSrcPort());
+
+            // srcPortState and dstPort state are reversed.
+            LinkInfo reverseInfo =
+                    new LinkInfo(firstSeenTime, lastLldpTime, lastBddpTime,
+                                 dstPortState, srcPortState);
+
+            addOrUpdateLink(reverseLink, reverseInfo);
+        }
+
+        // Remove the node ports from the quarantine and maintenance queues.
+        NodePortTuple nptSrc = new NodePortTuple(lt.getSrc(), lt.getSrcPort());
+        NodePortTuple nptDst = new NodePortTuple(lt.getDst(), lt.getDstPort());
+        removeFromQuarantineQueue(nptSrc);
+        removeFromMaintenanceQueue(nptSrc);
+        removeFromQuarantineQueue(nptDst);
+        removeFromMaintenanceQueue(nptDst);
+
+        // Consume this message
+        return Command.STOP;
+    }
+
+    protected Command handlePacketIn(long sw, OFPacketIn pi,
+                                     FloodlightContext cntx) {
+        Ethernet eth = 
+                IFloodlightProviderService.bcStore.get(cntx, 
+                                                       IFloodlightProviderService.CONTEXT_PI_PAYLOAD);
+
+        if(eth.getEtherType() == Ethernet.TYPE_BSN) {
+            BSN bsn = (BSN) eth.getPayload();
+            if (bsn == null) return Command.STOP;
+            if (bsn.getPayload() == null) return Command.STOP;
+            // It could be a packet other than BSN LLDP, therefore
+            // continue with the regular processing.
+            if (bsn.getPayload() instanceof LLDP == false)
+                return Command.CONTINUE;
+            return handleLldp((LLDP) bsn.getPayload(), sw, pi, false, cntx);
+        } else if (eth.getEtherType() == Ethernet.TYPE_LLDP)  {
+            return handleLldp((LLDP) eth.getPayload(), sw, pi, true, cntx);
+        } else if (eth.getEtherType() < 1500) {
+            long destMac = eth.getDestinationMAC().toLong();
+            if ((destMac & LINK_LOCAL_MASK) == LINK_LOCAL_VALUE){
+                if (log.isTraceEnabled()) {
+                    log.trace("Ignoring packet addressed to 802.1D/Q " +
+                            "reserved address.");
+                }
+                return Command.STOP;
+            }
+        }
+
+        // If packet-in is from a quarantine port, stop processing.
+        NodePortTuple npt = new NodePortTuple(sw, pi.getInPort());
+        if (quarantineQueue.contains(npt)) return Command.STOP;
+
+        return Command.CONTINUE;
+    }
+
+    protected UpdateOperation getUpdateOperation(int srcPortState,
+                                                 int dstPortState) {
+        boolean added =
+                (((srcPortState &
+                   OFPortState.OFPPS_STP_MASK.getValue()) !=
+                   OFPortState.OFPPS_STP_BLOCK.getValue()) &&
+                ((dstPortState &
+                  OFPortState.OFPPS_STP_MASK.getValue()) !=
+                  OFPortState.OFPPS_STP_BLOCK.getValue()));
+
+        if (added) return UpdateOperation.LINK_UPDATED;
+        return UpdateOperation.LINK_REMOVED;
+    }
+
+
+
+    protected UpdateOperation getUpdateOperation(int srcPortState) {
+        boolean portUp = ((srcPortState &
+                OFPortState.OFPPS_STP_MASK.getValue()) !=
+                OFPortState.OFPPS_STP_BLOCK.getValue());
+
+        if (portUp) return UpdateOperation.PORT_UP;
+        else return UpdateOperation.PORT_DOWN;
+    }
+
+    protected boolean addOrUpdateLink(Link lt, LinkInfo newInfo) {
+
+        NodePortTuple srcNpt, dstNpt;
+        boolean linkChanged = false;
+
+        lock.writeLock().lock();
+        try {
+            // put the new info.  if an old info exists, it will be returned.
+            LinkInfo oldInfo = links.put(lt, newInfo);
+            if (oldInfo != null &&
+                    oldInfo.getFirstSeenTime() < newInfo.getFirstSeenTime())
+                newInfo.setFirstSeenTime(oldInfo.getFirstSeenTime());
+
+            if (log.isTraceEnabled()) {
+                log.trace("addOrUpdateLink: {} {}", 
+                          lt, 
+                          (newInfo.getMulticastValidTime()!=null) ? "multicast" : "unicast");
+            }
+
+            UpdateOperation updateOperation = null;
+            linkChanged = false;
+
+            srcNpt = new NodePortTuple(lt.getSrc(), lt.getSrcPort());
+            dstNpt = new NodePortTuple(lt.getDst(), lt.getDstPort());
+
+            if (oldInfo == null) {
+                // index it by switch source
+                if (!switchLinks.containsKey(lt.getSrc()))
+                    switchLinks.put(lt.getSrc(), new HashSet<Link>());
+                switchLinks.get(lt.getSrc()).add(lt);
+
+                // index it by switch dest
+                if (!switchLinks.containsKey(lt.getDst()))
+                    switchLinks.put(lt.getDst(), new HashSet<Link>());
+                switchLinks.get(lt.getDst()).add(lt);
+
+                // index both ends by switch:port
+                if (!portLinks.containsKey(srcNpt))
+                    portLinks.put(srcNpt, new HashSet<Link>());
+                portLinks.get(srcNpt).add(lt);
+
+                if (!portLinks.containsKey(dstNpt))
+                    portLinks.put(dstNpt, new HashSet<Link>());
+                portLinks.get(dstNpt).add(lt);
+
+                // Add to portNOFLinks if the unicast valid time is null
+                if (newInfo.getUnicastValidTime() == null)
+                    addLinkToBroadcastDomain(lt);
+
+                writeLinkToStorage(lt, newInfo);
+                updateOperation = UpdateOperation.LINK_UPDATED;
+                linkChanged = true;
+
+                // Add to event history
+                evHistTopoLink(lt.getSrc(),
+                               lt.getDst(),
+                               lt.getSrcPort(),
+                               lt.getDstPort(),
+                               newInfo.getSrcPortState(), newInfo.getDstPortState(),
+                               getLinkType(lt, newInfo),
+                               EvAction.LINK_ADDED, "LLDP Recvd");
+            } else {
+                // Since the link info is already there, we need to
+                // update the right fields.
+                if (newInfo.getUnicastValidTime() == null) {
+                    // This is due to a multicast LLDP, so copy the old unicast
+                    // value.
+                    if (oldInfo.getUnicastValidTime() != null) {
+                        newInfo.setUnicastValidTime(oldInfo.getUnicastValidTime());
+                    }
+                } else if (newInfo.getMulticastValidTime() == null) {
+                    // This is due to a unicast LLDP, so copy the old multicast
+                    // value.
+                    if (oldInfo.getMulticastValidTime() != null) {
+                        newInfo.setMulticastValidTime(oldInfo.getMulticastValidTime());
+                    }
+                }
+
+                Long oldTime = oldInfo.getUnicastValidTime();
+                Long newTime = newInfo.getUnicastValidTime();
+                // the link has changed its state between openflow and non-openflow
+                // if the unicastValidTimes are null or not null
+                if (oldTime != null & newTime == null) {
+                    // openflow -> non-openflow transition
+                    // we need to add the link tuple to the portNOFLinks
+                    addLinkToBroadcastDomain(lt);
+                    linkChanged = true;
+                } else if (oldTime == null & newTime != null) {
+                    // non-openflow -> openflow transition
+                    // we need to remove the link from the portNOFLinks
+                    removeLinkFromBroadcastDomain(lt);
+                    linkChanged = true;
+                }
+
+                // Only update the port states if they've changed
+                if (newInfo.getSrcPortState().intValue() !=
+                        oldInfo.getSrcPortState().intValue() ||
+                        newInfo.getDstPortState().intValue() !=
+                        oldInfo.getDstPortState().intValue())
+                    linkChanged = true;
+
+                // Write changes to storage. This will always write the updated
+                // valid time, plus the port states if they've changed (i.e. if
+                // they weren't set to null in the previous block of code.
+                writeLinkToStorage(lt, newInfo);
+
+                if (linkChanged) {
+                    updateOperation = getUpdateOperation(newInfo.getSrcPortState(),
+                                                         newInfo.getDstPortState());
+                    if (log.isTraceEnabled()) {
+                        log.trace("Updated link {}", lt);
+                    }
+                    // Add to event history
+                    evHistTopoLink(lt.getSrc(),
+                                   lt.getDst(),
+                                   lt.getSrcPort(),
+                                   lt.getDstPort(),
+                                   newInfo.getSrcPortState(), newInfo.getDstPortState(),
+                                   getLinkType(lt, newInfo),
+                                   EvAction.LINK_PORT_STATE_UPDATED,
+                                   "LLDP Recvd");
+                }
+            }
+
+            if (linkChanged) {
+                // find out if the link was added or removed here.
+                updates.add(new LDUpdate(lt.getSrc(), lt.getSrcPort(),
+                                         lt.getDst(), lt.getDstPort(),
+                                         getLinkType(lt, newInfo),
+                                         updateOperation));
+            }
+        } finally {
+            lock.writeLock().unlock();
+        }
+
+        return linkChanged;
+    }
+
+    public Map<Long, Set<Link>> getSwitchLinks() {
+        return this.switchLinks;
+    }
+
+    /**
+     * Removes links from memory and storage.
+     * @param links The List of @LinkTuple to delete.
+     */
+    protected void deleteLinks(List<Link> links, String reason) {
+        NodePortTuple srcNpt, dstNpt;
+
+        lock.writeLock().lock();
+        try {
+            for (Link lt : links) {
+                srcNpt = new NodePortTuple(lt.getSrc(), lt.getSrcPort());
+                dstNpt  =new NodePortTuple(lt.getDst(), lt.getDstPort());
+
+                switchLinks.get(lt.getSrc()).remove(lt);
+                switchLinks.get(lt.getDst()).remove(lt);
+                if (switchLinks.containsKey(lt.getSrc()) &&
+                        switchLinks.get(lt.getSrc()).isEmpty())
+                    this.switchLinks.remove(lt.getSrc());
+                if (this.switchLinks.containsKey(lt.getDst()) &&
+                        this.switchLinks.get(lt.getDst()).isEmpty())
+                    this.switchLinks.remove(lt.getDst());
+
+                if (this.portLinks.get(srcNpt) != null) {
+                    this.portLinks.get(srcNpt).remove(lt);
+                    if (this.portLinks.get(srcNpt).isEmpty())
+                        this.portLinks.remove(srcNpt);
+                }
+                if (this.portLinks.get(dstNpt) != null) {
+                    this.portLinks.get(dstNpt).remove(lt);
+                    if (this.portLinks.get(dstNpt).isEmpty())
+                        this.portLinks.remove(dstNpt);
+                }
+
+                LinkInfo info = this.links.remove(lt);
+                updates.add(new LDUpdate(lt.getSrc(), lt.getSrcPort(),
+                                         lt.getDst(), lt.getDstPort(),
+                                         getLinkType(lt, info),
+                                         UpdateOperation.LINK_REMOVED));
+
+                // Update Event History
+                evHistTopoLink(lt.getSrc(),
+                               lt.getDst(),
+                               lt.getSrcPort(),
+                               lt.getDstPort(),
+                               0, 0, // Port states
+                               ILinkDiscovery.LinkType.INVALID_LINK,
+                               EvAction.LINK_DELETED, reason);
+
+                // remove link from storage.
+                removeLinkFromStorage(lt);
+
+                // TODO  Whenever link is removed, it has to checked if
+                // the switchports must be added to quarantine.
+
+                if (log.isTraceEnabled()) {
+                    log.trace("Deleted link {}", lt);
+                }
+            }
+        } finally {
+            lock.writeLock().unlock();
+        }
+    }
+
+    /**
+     * Handles an OFPortStatus message from a switch. We will add or
+     * delete LinkTupes as well re-compute the topology if needed.
+     * @param sw The IOFSwitch that sent the port status message
+     * @param ps The OFPortStatus message
+     * @return The Command to continue or stop after we process this message
+     */
+    protected Command handlePortStatus(long sw, OFPortStatus ps) {
+
+        IOFSwitch iofSwitch = floodlightProvider.getSwitches().get(sw);
+        if (iofSwitch == null) return Command.CONTINUE;
+
+        if (log.isTraceEnabled()) {
+            log.trace("handlePortStatus: Switch {} port #{} reason {}; " +
+                    "config is {} state is {}",
+                    new Object[] {iofSwitch.getStringId(),
+                                  ps.getDesc().getPortNumber(),
+                                  ps.getReason(),
+                                  ps.getDesc().getConfig(),
+                                  ps.getDesc().getState()});
+        }
+
+        short port = ps.getDesc().getPortNumber();
+        NodePortTuple npt = new NodePortTuple(sw, port);
+        boolean linkDeleted  = false;
+        boolean linkInfoChanged = false;
+
+        lock.writeLock().lock();
+        try {
+            // if ps is a delete, or a modify where the port is down or
+            // configured down
+            if ((byte)OFPortReason.OFPPR_DELETE.ordinal() == ps.getReason() ||
+                    ((byte)OFPortReason.OFPPR_MODIFY.ordinal() ==
+                    ps.getReason() && !portEnabled(ps.getDesc()))) {
+                deleteLinksOnPort(npt, "Port Status Changed");
+                LDUpdate update = new LDUpdate(sw, port, UpdateOperation.PORT_DOWN);
+                updates.add(update);
+                linkDeleted = true;
+                } 
+            else if (ps.getReason() ==
+                    (byte)OFPortReason.OFPPR_MODIFY.ordinal()) {
+                // If ps is a port modification and the port state has changed
+                // that affects links in the topology
+
+                if (this.portLinks.containsKey(npt)) {
+                    for (Link lt: this.portLinks.get(npt)) {
+                        LinkInfo linkInfo = links.get(lt);
+                        assert(linkInfo != null);
+                        Integer updatedSrcPortState = null;
+                        Integer updatedDstPortState = null;
+                        if (lt.getSrc() == npt.getNodeId() && 
+                                lt.getSrcPort() == npt.getPortId() &&
+                                (linkInfo.getSrcPortState() !=
+                                ps.getDesc().getState())) {
+                            updatedSrcPortState = ps.getDesc().getState();
+                            linkInfo.setSrcPortState(updatedSrcPortState);
+                        }
+                        if (lt.getDst() == npt.getNodeId() &&
+                                lt.getDstPort() == npt.getPortId() &&
+                                (linkInfo.getDstPortState() !=
+                                ps.getDesc().getState())) {
+                            updatedDstPortState = ps.getDesc().getState();
+                            linkInfo.setDstPortState(updatedDstPortState);
+                        }
+                        if ((updatedSrcPortState != null) ||
+                                (updatedDstPortState != null)) {
+                            // The link is already known to link discovery
+                            // manager and the status has changed, therefore
+                            // send an LDUpdate.
+                            UpdateOperation operation =
+                                    getUpdateOperation(linkInfo.getSrcPortState(),
+                                                       linkInfo.getDstPortState());
+                            updates.add(new LDUpdate(lt.getSrc(), lt.getSrcPort(),
+                                                     lt.getDst(), lt.getDstPort(),
+                                                     getLinkType(lt, linkInfo),
+                                                     operation));
+                            writeLinkToStorage(lt, linkInfo);
+                            linkInfoChanged = true;
+                        }
+                    }
+                }
+
+                UpdateOperation operation =
+                        getUpdateOperation(ps.getDesc().getState());
+                updates.add(new LDUpdate(sw, port, operation));
+            }
+
+            if (!linkDeleted && !linkInfoChanged){
+                if (log.isTraceEnabled()) {
+                    log.trace("handlePortStatus: Switch {} port #{} reason {};"+
+                            " no links to update/remove",
+                            new Object[] {HexString.toHexString(sw),
+                                          ps.getDesc().getPortNumber(),
+                                          ps.getReason()});
+                }
+            }
+        } finally {
+            lock.writeLock().unlock();
+        }
+
+        if (!linkDeleted) {
+            // Send LLDP right away when port state is changed for faster
+            // cluster-merge. If it is a link delete then there is not need
+            // to send the LLDPs right away and instead we wait for the LLDPs
+            // to be sent on the timer as it is normally done
+            // do it outside the write-lock
+            // sendLLDPTask.reschedule(1000, TimeUnit.MILLISECONDS);
+            processNewPort(npt.getNodeId(), npt.getPortId());
+        }
+        return Command.CONTINUE;
+    }
+
+    /**
+     * Process a new port.
+     * If link discovery is disabled on the port, then do nothing.
+     * If autoportfast feature is enabled and the port is a fast port, then
+     * do nothing.
+     * Otherwise, send LLDP message.  Add the port to quarantine.
+     * @param sw
+     * @param p
+     */
+    private void processNewPort(long sw, short p) {
+        if (isLinkDiscoverySuppressed(sw, p)) {
+            // Do nothing as link discovery is suppressed.
+        }
+        else if (autoPortFastFeature && isFastPort(sw, p)) {
+            // Do nothing as the port is a fast port.
+        }
+        else {
+            NodePortTuple npt = new NodePortTuple(sw, p);
+            discover(sw, p);
+            // if it is not a fast port, add it to quarantine.
+            if (!isFastPort(sw, p)) {
+                addToQuarantineQueue(npt);
+            } else {
+                // Add to maintenance queue to ensure that BDDP packets
+                // are sent out.
+                addToMaintenanceQueue(npt);
+            }
+        }
+    }
+
+    /**
+     * We send out LLDP messages when a switch is added to discover the topology
+     * @param sw The IOFSwitch that connected to the controller
+     */
+    @Override
+    public void addedSwitch(IOFSwitch sw) {
+
+        if (sw.getEnabledPorts() != null) {
+            for (Short p : sw.getEnabledPortNumbers()) {
+                processNewPort(sw.getId(), p);
+            }
+        }
+        // Update event history
+        evHistTopoSwitch(sw, EvAction.SWITCH_CONNECTED, "None");
+        LDUpdate update = new LDUpdate(sw.getId(), null,
+                                       UpdateOperation.SWITCH_UPDATED);
+        updates.add(update);
+    }
+
+    /**
+     * When a switch disconnects we remove any links from our map and notify.
+     * @param The id of the switch
+     */
+    @Override
+    public void removedSwitch(IOFSwitch iofSwitch) {
+        // Update event history
+        long sw = iofSwitch.getId();
+        evHistTopoSwitch(iofSwitch, EvAction.SWITCH_DISCONNECTED, "None");
+        List<Link> eraseList = new ArrayList<Link>();
+        lock.writeLock().lock();
+        try {
+            if (switchLinks.containsKey(sw)) {
+                if (log.isTraceEnabled()) {
+                    log.trace("Handle switchRemoved. Switch {}; removing links {}",
+                              HexString.toHexString(sw), switchLinks.get(sw));
+                }
+                // add all tuples with an endpoint on this switch to erase list
+                eraseList.addAll(switchLinks.get(sw));
+                deleteLinks(eraseList, "Switch Removed");
+
+                // Send a switch removed update
+                LDUpdate update = new LDUpdate(sw, null, UpdateOperation.SWITCH_REMOVED);
+                updates.add(update);
+            }
+        } finally {
+            lock.writeLock().unlock();
+        }
+    }
+    
+    /**
+     * We don't react the port changed notifications here. we listen for 
+     * OFPortStatus messages directly. Might consider using this notifier
+     * instead
+     */
+    @Override
+    public void switchPortChanged(Long switchId) {
+        // no-op
+    }
+
+    /** 
+     * Delete links incident on a given switch port.
+     * @param npt
+     * @param reason
+     */
+    protected void deleteLinksOnPort(NodePortTuple npt, String reason) {
+        List<Link> eraseList = new ArrayList<Link>();
+        if (this.portLinks.containsKey(npt)) {
+            if (log.isTraceEnabled()) {
+                log.trace("handlePortStatus: Switch {} port #{} " +
+                        "removing links {}",
+                        new Object[] {HexString.toHexString(npt.getNodeId()),
+                                      npt.getPortId(),
+                                      this.portLinks.get(npt)});
+            }
+            eraseList.addAll(this.portLinks.get(npt));
+            deleteLinks(eraseList, reason);
+        }
+    }
+
+    /** 
+     * Iterates through the list of links and deletes if the
+     * last discovery message reception time exceeds timeout values.
+     */
+    protected void timeoutLinks() {
+        List<Link> eraseList = new ArrayList<Link>();
+        Long curTime = System.currentTimeMillis();
+        boolean linkChanged = false;
+
+        // reentrant required here because deleteLink also write locks
+        lock.writeLock().lock();
+        try {
+            Iterator<Entry<Link, LinkInfo>> it =
+                    this.links.entrySet().iterator();
+            while (it.hasNext()) {
+                Entry<Link, LinkInfo> entry = it.next();
+                Link lt = entry.getKey();
+                LinkInfo info = entry.getValue();
+
+                // Timeout the unicast and multicast LLDP valid times
+                // independently.
+                if ((info.getUnicastValidTime() != null) && 
+                        (info.getUnicastValidTime() + (this.LINK_TIMEOUT * 1000) < curTime)){
+                    info.setUnicastValidTime(null);
+
+                    if (info.getMulticastValidTime() != null)
+                        addLinkToBroadcastDomain(lt);
+                    // Note that even if mTime becomes null later on,
+                    // the link would be deleted, which would trigger updateClusters().
+                    linkChanged = true;
+                }
+                if ((info.getMulticastValidTime()!= null) && 
+                        (info.getMulticastValidTime()+ (this.LINK_TIMEOUT * 1000) < curTime)) {
+                    info.setMulticastValidTime(null);
+                    // if uTime is not null, then link will remain as openflow
+                    // link. If uTime is null, it will be deleted.  So, we
+                    // don't care about linkChanged flag here.
+                    removeLinkFromBroadcastDomain(lt);
+                    linkChanged = true;
+                }
+                // Add to the erase list only if the unicast
+                // time is null.
+                if (info.getUnicastValidTime() == null && 
+                        info.getMulticastValidTime() == null){
+                    eraseList.add(entry.getKey());
+                } else if (linkChanged) {
+                    UpdateOperation operation;
+                    operation = getUpdateOperation(info.getSrcPortState(),
+                                                   info.getDstPortState());
+                    updates.add(new LDUpdate(lt.getSrc(), lt.getSrcPort(),
+                                             lt.getDst(), lt.getDstPort(),
+                                             getLinkType(lt, info),
+                                             operation));
+                }
+            }
+
+            // if any link was deleted or any link was changed.
+            if ((eraseList.size() > 0) || linkChanged) {
+                deleteLinks(eraseList, "LLDP timeout");
+            }
+        } finally {
+            lock.writeLock().unlock();
+        }
+    }
+
+    private boolean portEnabled(OFPhysicalPort port) {
+        if (port == null)
+            return false;
+        if ((OFPortConfig.OFPPC_PORT_DOWN.getValue() & port.getConfig()) > 0)
+            return false;
+        if ((OFPortState.OFPPS_LINK_DOWN.getValue() & port.getState()) > 0)
+            return false;
+        // Port STP state doesn't work with multiple VLANs, so ignore it for now
+        // if ((port.getState() & OFPortState.OFPPS_STP_MASK.getValue()) == OFPortState.OFPPS_STP_BLOCK.getValue())
+        //    return false;
+        return true;
+    }
+
+    public Map<NodePortTuple, Set<Link>> getPortBroadcastDomainLinks() {
+        return portBroadcastDomainLinks;
+    }
+
+    @Override
+    public Map<Link, LinkInfo> getLinks() {
+        lock.readLock().lock();
+        Map<Link, LinkInfo> result;
+        try {
+            result = new HashMap<Link, LinkInfo>(links);
+        } finally {
+            lock.readLock().unlock();
+        }
+        return result;
+    }
+
+    protected void addLinkToBroadcastDomain(Link lt) {
+
+        NodePortTuple srcNpt, dstNpt;
+        srcNpt = new NodePortTuple(lt.getSrc(), lt.getSrcPort());
+        dstNpt = new NodePortTuple(lt.getDst(), lt.getDstPort());
+
+        if (!portBroadcastDomainLinks.containsKey(lt.getSrc()))
+            portBroadcastDomainLinks.put(srcNpt, new HashSet<Link>());
+        portBroadcastDomainLinks.get(srcNpt).add(lt);
+
+        if (!portBroadcastDomainLinks.containsKey(lt.getDst()))
+            portBroadcastDomainLinks.put(dstNpt, new HashSet<Link>());
+        portBroadcastDomainLinks.get(dstNpt).add(lt);
+    }
+
+    protected void removeLinkFromBroadcastDomain(Link lt) {
+
+        NodePortTuple srcNpt, dstNpt;
+        srcNpt = new NodePortTuple(lt.getSrc(), lt.getSrcPort());
+        dstNpt = new NodePortTuple(lt.getDst(), lt.getDstPort());
+
+        if (portBroadcastDomainLinks.containsKey(srcNpt)) {
+            portBroadcastDomainLinks.get(srcNpt).remove(lt);
+            if (portBroadcastDomainLinks.get(srcNpt).isEmpty())
+                portBroadcastDomainLinks.remove(srcNpt);
+        }
+
+        if (portBroadcastDomainLinks.containsKey(dstNpt)) {
+            portBroadcastDomainLinks.get(dstNpt).remove(lt);
+            if (portBroadcastDomainLinks.get(dstNpt).isEmpty())
+                portBroadcastDomainLinks.remove(dstNpt);
+        }
+    }
+
+    // STORAGE METHODS
+    /**
+     * Deletes all links from storage
+     */
+    void clearAllLinks() {
+        storageSource.deleteRowsAsync(LINK_TABLE_NAME, null);
+    }
+
+    /**
+     * Gets the storage key for a LinkTuple
+     * @param lt The LinkTuple to get
+     * @return The storage key as a String
+     */
+    private String getLinkId(Link lt) {
+        return HexString.toHexString(lt.getSrc()) +
+                "-" + lt.getSrcPort() + "-" +
+                HexString.toHexString(lt.getDst())+
+                "-" + lt.getDstPort();
+    }
+
+    /**
+     * Writes a LinkTuple and corresponding LinkInfo to storage
+     * @param lt The LinkTuple to write
+     * @param linkInfo The LinkInfo to write
+     */
+    protected void writeLinkToStorage(Link lt, LinkInfo linkInfo) {
+        LinkType type = getLinkType(lt, linkInfo);
+
+        // Write only direct links.  Do not write links to external
+        // L2 network.
+        // if (type != LinkType.DIRECT_LINK && type != LinkType.TUNNEL) {
+        //    return;
+        // }
+
+        Map<String, Object> rowValues = new HashMap<String, Object>();
+        String id = getLinkId(lt);
+        rowValues.put(LINK_ID, id);
+        rowValues.put(LINK_VALID_TIME, linkInfo.getUnicastValidTime());
+        String srcDpid = HexString.toHexString(lt.getSrc());
+        rowValues.put(LINK_SRC_SWITCH, srcDpid);
+        rowValues.put(LINK_SRC_PORT, lt.getSrcPort());
+
+        if (type == LinkType.DIRECT_LINK)
+            rowValues.put(LINK_TYPE, "internal");
+        else if (type == LinkType.MULTIHOP_LINK) 
+            rowValues.put(LINK_TYPE, "external");
+        else if (type == LinkType.TUNNEL) 
+            rowValues.put(LINK_TYPE, "tunnel"); 
+        else rowValues.put(LINK_TYPE, "invalid");
+
+        if (linkInfo.linkStpBlocked()) {
+            if (log.isTraceEnabled()) {
+                log.trace("writeLink, link {}, info {}, srcPortState Blocked",
+                          lt, linkInfo);
+            }
+            rowValues.put(LINK_SRC_PORT_STATE,
+                          OFPhysicalPort.OFPortState.OFPPS_STP_BLOCK.getValue());
+        } else {
+            if (log.isTraceEnabled()) {
+                log.trace("writeLink, link {}, info {}, srcPortState {}",
+                          new Object[]{ lt, linkInfo, linkInfo.getSrcPortState() });
+            }
+            rowValues.put(LINK_SRC_PORT_STATE, linkInfo.getSrcPortState());
+        }
+        String dstDpid = HexString.toHexString(lt.getDst());
+        rowValues.put(LINK_DST_SWITCH, dstDpid);
+        rowValues.put(LINK_DST_PORT, lt.getDstPort());
+        if (linkInfo.linkStpBlocked()) {
+            if (log.isTraceEnabled()) {
+                log.trace("writeLink, link {}, info {}, dstPortState Blocked",
+                          lt, linkInfo);
+            }
+            rowValues.put(LINK_DST_PORT_STATE,
+                          OFPhysicalPort.OFPortState.OFPPS_STP_BLOCK.getValue());
+        } else {
+            if (log.isTraceEnabled()) {
+                log.trace("writeLink, link {}, info {}, dstPortState {}",
+                          new Object[]{ lt, linkInfo, linkInfo.getDstPortState() });
+            }
+            rowValues.put(LINK_DST_PORT_STATE, linkInfo.getDstPortState());
+        }
+        storageSource.updateRowAsync(LINK_TABLE_NAME, rowValues);
+    }
+
+    public Long readLinkValidTime(Link lt) {
+        // FIXME: We're not currently using this right now, but if we start
+        // to use this again, we probably shouldn't use it in its current
+        // form, because it's doing synchronous storage calls. Depending
+        // on the context this may still be OK, but if it's being called
+        // on the packet in processing thread it should be reworked to
+        // use asynchronous storage calls.
+        Long validTime = null;
+        IResultSet resultSet = null;
+        try {
+            String[] columns = { LINK_VALID_TIME };
+            String id = getLinkId(lt);
+            resultSet = storageSource.executeQuery(LINK_TABLE_NAME, columns,
+                                                   new OperatorPredicate(LINK_ID, OperatorPredicate.Operator.EQ, id), null);
+            if (resultSet.next())
+                validTime = resultSet.getLong(LINK_VALID_TIME);
+        }
+        finally {
+            if (resultSet != null)
+                resultSet.close();
+        }
+        return validTime;
+    }
+
+    /**
+     * Removes a link from storage using an asynchronous call.
+     * @param lt The LinkTuple to delete.
+     */
+    protected void removeLinkFromStorage(Link lt) {
+        String id = getLinkId(lt);
+        storageSource.deleteRowAsync(LINK_TABLE_NAME, id);
+    }
+
+    @Override
+    public void addListener(ILinkDiscoveryListener listener) {
+        linkDiscoveryAware.add(listener);
+    }
+
+    /**
+     * Register a link discovery aware component
+     * @param linkDiscoveryAwareComponent
+     */
+    public void addLinkDiscoveryAware(ILinkDiscoveryListener linkDiscoveryAwareComponent) {
+        // TODO make this a copy on write set or lock it somehow
+        this.linkDiscoveryAware.add(linkDiscoveryAwareComponent);
+    }
+
+    /**
+     * Deregister a link discovery aware component
+     * @param linkDiscoveryAwareComponent
+     */
+    public void removeLinkDiscoveryAware(ILinkDiscoveryListener linkDiscoveryAwareComponent) {
+        // TODO make this a copy on write set or lock it somehow
+        this.linkDiscoveryAware.remove(linkDiscoveryAwareComponent);
+    }
+
+    /**
+     * Sets the IStorageSource to use for ITology
+     * @param storageSource the storage source to use
+     */
+    public void setStorageSource(IStorageSourceService storageSource) {
+        this.storageSource = storageSource;
+    }
+
+    /**
+     * Gets the storage source for this ITopology
+     * @return The IStorageSource ITopology is writing to
+     */
+    public IStorageSourceService getStorageSource() {
+        return storageSource;
+    }
+
+    @Override
+    public boolean isCallbackOrderingPrereq(OFType type, String name) {
+        return false;
+    }
+
+    @Override
+    public boolean isCallbackOrderingPostreq(OFType type, String name) {
+        return false;
+    }
+
+    @Override
+    public void rowsModified(String tableName, Set<Object> rowKeys) {
+        Map<Long, IOFSwitch> switches = floodlightProvider.getSwitches();
+        ArrayList<IOFSwitch> updated_switches = new ArrayList<IOFSwitch>();
+        for(Object key: rowKeys) {
+            Long swId = new Long(HexString.toLong((String)key));
+            if (switches.containsKey(swId)) {
+                IOFSwitch sw = switches.get(swId);
+                boolean curr_status = sw.hasAttribute(IOFSwitch.SWITCH_IS_CORE_SWITCH);
+                boolean new_status =  false;
+                IResultSet resultSet = null;
+
+                try {
+                    resultSet = storageSource.getRow(tableName, key);
+                    for (Iterator<IResultSet> it = resultSet.iterator(); it.hasNext();) {
+                        // In case of multiple rows, use the status in last row?
+                        Map<String, Object> row = it.next().getRow();
+                        if (row.containsKey(SWITCH_CONFIG_CORE_SWITCH)) {
+                            new_status = ((String)row.get(SWITCH_CONFIG_CORE_SWITCH)).equals("true");
+                        }
+                    }
+                }
+                finally {
+                    if (resultSet != null)
+                        resultSet.close();
+                }
+
+                if (curr_status != new_status) {
+                    updated_switches.add(sw);
+                }
+            } else {
+                if (log.isTraceEnabled()) {
+                    log.trace("Update for switch which has no entry in switch " +
+                            "list (dpid={}), a delete action.", (String)key);
+                }
+            }
+        }
+
+        for (IOFSwitch sw : updated_switches) {
+            // Set SWITCH_IS_CORE_SWITCH to it's inverse value
+            if (sw.hasAttribute(IOFSwitch.SWITCH_IS_CORE_SWITCH)) {
+                sw.removeAttribute(IOFSwitch.SWITCH_IS_CORE_SWITCH);
+                if (log.isTraceEnabled()) {
+                    log.trace("SWITCH_IS_CORE_SWITCH set to False for {}", sw);
+                }
+                updates.add(new LDUpdate(sw.getId(), SwitchType.BASIC_SWITCH,
+                                         UpdateOperation.SWITCH_UPDATED));
+            }
+            else {
+                sw.setAttribute(IOFSwitch.SWITCH_IS_CORE_SWITCH, new Boolean(true));
+                if (log.isTraceEnabled()) {
+                    log.trace("SWITCH_IS_CORE_SWITCH set to True for {}", sw);
+                }
+                updates.add(new LDUpdate(sw.getId(), SwitchType.CORE_SWITCH,
+                                         UpdateOperation.SWITCH_UPDATED));
+            }
+        }
+    }
+
+    @Override
+    public void rowsDeleted(String tableName, Set<Object> rowKeys) {
+        // Ignore delete events, the switch delete will do the right thing on it's own
+    }
+
+    // IFloodlightModule classes
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(ILinkDiscoveryService.class);
+        //l.add(ITopologyService.class);
+        return l;
+    }
+
+    @Override
+    public Map<Class<? extends IFloodlightService>, IFloodlightService>
+    getServiceImpls() {
+        Map<Class<? extends IFloodlightService>,
+        IFloodlightService> m = 
+        new HashMap<Class<? extends IFloodlightService>,
+        IFloodlightService>();
+        // We are the class that implements the service
+        m.put(ILinkDiscoveryService.class, this);
+        return m;
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IFloodlightProviderService.class);
+        l.add(IStorageSourceService.class);
+        l.add(IThreadPoolService.class);
+        l.add(IRestApiService.class);
+        return l;
+    }
+
+    @Override
+    public void init(FloodlightModuleContext context)
+            throws FloodlightModuleException {
+        floodlightProvider = context.getServiceImpl(IFloodlightProviderService.class);
+        storageSource = context.getServiceImpl(IStorageSourceService.class);
+        threadPool = context.getServiceImpl(IThreadPoolService.class);
+        restApi = context.getServiceImpl(IRestApiService.class);
+
+        // Set the autoportfast feature to false.
+        this.autoPortFastFeature = false;
+
+        // We create this here because there is no ordering guarantee
+        this.linkDiscoveryAware = new ArrayList<ILinkDiscoveryListener>();
+        this.lock = new ReentrantReadWriteLock();
+        this.updates = new LinkedBlockingQueue<LDUpdate>();
+        this.links = new HashMap<Link, LinkInfo>();
+        this.portLinks = new HashMap<NodePortTuple, Set<Link>>();
+        this.suppressLinkDiscovery =
+                Collections.synchronizedSet(new HashSet<NodePortTuple>());
+        this.portBroadcastDomainLinks = new HashMap<NodePortTuple, Set<Link>>();
+        this.switchLinks = new HashMap<Long, Set<Link>>();
+        this.quarantineQueue = new LinkedBlockingQueue<NodePortTuple>();
+        this.maintenanceQueue = new LinkedBlockingQueue<NodePortTuple>();
+
+        this.evHistTopologySwitch =
+                new EventHistory<EventHistoryTopologySwitch>("Topology: Switch");
+        this.evHistTopologyLink =
+                new EventHistory<EventHistoryTopologyLink>("Topology: Link");
+        this.evHistTopologyCluster =
+                new EventHistory<EventHistoryTopologyCluster>("Topology: Cluster");
+    }
+
+    @Override
+    @LogMessageDocs({
+        @LogMessageDoc(level="ERROR",
+                message="No storage source found.",
+                explanation="Storage source was not initialized; cannot initialize " +
+                "link discovery.",
+                recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG),
+        @LogMessageDoc(level="ERROR",
+                message="Error in installing listener for " +
+                        "switch config table {table}",
+                explanation="Failed to install storage notification for the " +
+                		"switch config table",
+                recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG),
+        @LogMessageDoc(level="ERROR",
+                message="No storage source found.",
+                explanation="Storage source was not initialized; cannot initialize " +
+                "link discovery.",
+                recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG),
+        @LogMessageDoc(level="ERROR",
+                message="Exception in LLDP send timer.",
+                explanation="An unknown error occured while sending LLDP " +
+                		"messages to switches.",
+                recommendation=LogMessageDoc.CHECK_SWITCH)
+    })
+    public void startUp(FloodlightModuleContext context) {
+        // Create our storage tables
+        if (storageSource == null) {
+            log.error("No storage source found.");
+            return;
+        }
+
+        storageSource.createTable(LINK_TABLE_NAME, null);
+        storageSource.setTablePrimaryKeyName(LINK_TABLE_NAME, LINK_ID);
+        storageSource.deleteMatchingRows(LINK_TABLE_NAME, null);
+        // Register for storage updates for the switch table
+        try {
+            storageSource.addListener(SWITCH_CONFIG_TABLE_NAME, this);
+        } catch (StorageException ex) {
+            log.error("Error in installing listener for " +
+            		  "switch table {}", SWITCH_CONFIG_TABLE_NAME);
+        }
+
+        ScheduledExecutorService ses = threadPool.getScheduledExecutor();
+
+        // To be started by the first switch connection
+        discoveryTask = new SingletonTask(ses, new Runnable() {
+            @Override
+            public void run() {
+                try {
+                    discoverLinks();
+                } catch (StorageException e) {
+                    log.error("Storage exception in LLDP send timer; " + 
+                            "terminating process", e);
+                    floodlightProvider.terminate();
+                } catch (Exception e) {
+                    log.error("Exception in LLDP send timer.", e);
+                } finally {
+                    if (!shuttingDown) {
+                        // null role implies HA mode is not enabled.
+                         Role role = floodlightProvider.getRole();
+                         if (role == null || role == Role.MASTER) {
+                             log.trace("Rescheduling discovery task as role = {}", role);
+                             discoveryTask.reschedule(DISCOVERY_TASK_INTERVAL,
+                                                TimeUnit.SECONDS);
+                         } else {
+                             log.trace("Stopped LLDP rescheduling due to role = {}.", role);
+                         }
+                    }
+                }
+            }
+        });
+
+        // null role implies HA mode is not enabled.
+        Role role = floodlightProvider.getRole();
+        if (role == null || role == Role.MASTER) {
+            log.trace("Setup: Rescheduling discovery task. role = {}", role);
+            discoveryTask.reschedule(DISCOVERY_TASK_INTERVAL, TimeUnit.SECONDS);
+        } else {
+                log.trace("Setup: Not scheduling LLDP as role = {}.", role);
+        }
+
+        // Setup the BDDP task.  It is invoked whenever switch port tuples
+        // are added to the quarantine list.
+        bddpTask = new SingletonTask(ses, new QuarantineWorker());
+        bddpTask.reschedule(BDDP_TASK_INTERVAL, TimeUnit.MILLISECONDS);
+
+        updatesThread = new Thread(new Runnable () {
+            @Override
+            public void run() {
+                while (true) {
+                    try {
+                        doUpdatesThread();
+                    } catch (InterruptedException e) {
+                        return;
+                    }
+                }
+            }}, "Topology Updates");
+        updatesThread.start();
+
+
+
+        // Register for the OpenFlow messages we want to receive
+        floodlightProvider.addOFMessageListener(OFType.PACKET_IN, this);
+        floodlightProvider.addOFMessageListener(OFType.PORT_STATUS, this);
+        // Register for switch updates
+        floodlightProvider.addOFSwitchListener(this);
+        floodlightProvider.addHAListener(this);
+        floodlightProvider.addInfoProvider("summary", this);
+        if (restApi != null)
+            restApi.addRestletRoutable(new LinkDiscoveryWebRoutable());
+        setControllerTLV();
+    }
+
+    // ****************************************************
+    // Topology Manager's Event History members and methods
+    // ****************************************************
+
+    // Topology Manager event history
+    public EventHistory<EventHistoryTopologySwitch>  evHistTopologySwitch;
+    public EventHistory<EventHistoryTopologyLink>    evHistTopologyLink;
+    public EventHistory<EventHistoryTopologyCluster> evHistTopologyCluster;
+    public EventHistoryTopologySwitch  evTopoSwitch;
+    public EventHistoryTopologyLink    evTopoLink;
+    public EventHistoryTopologyCluster evTopoCluster;
+
+    // Switch Added/Deleted
+    private void evHistTopoSwitch(IOFSwitch sw, EvAction actn, String reason) {
+        if (evTopoSwitch == null) {
+            evTopoSwitch = new EventHistoryTopologySwitch();
+        }
+        evTopoSwitch.dpid     = sw.getId();
+        if ((sw.getChannel() != null) &&
+                (SocketAddress.class.isInstance(
+                                                sw.getChannel().getRemoteAddress()))) {
+            evTopoSwitch.ipv4Addr = 
+                    IPv4.toIPv4Address(((InetSocketAddress)(sw.getChannel().
+                            getRemoteAddress())).getAddress().getAddress());
+            evTopoSwitch.l4Port   =
+                    ((InetSocketAddress)(sw.getChannel().
+                            getRemoteAddress())).getPort();
+        } else {
+            evTopoSwitch.ipv4Addr = 0;
+            evTopoSwitch.l4Port = 0;
+        }
+        evTopoSwitch.reason   = reason;
+        evTopoSwitch = evHistTopologySwitch.put(evTopoSwitch, actn);
+    }
+
+    private void evHistTopoLink(long srcDpid, long dstDpid, short srcPort,
+                                short dstPort, int srcPortState, int dstPortState,
+                                ILinkDiscovery.LinkType linkType,
+                                EvAction actn, String reason) {
+        if (evTopoLink == null) {
+            evTopoLink = new EventHistoryTopologyLink();
+        }
+        evTopoLink.srcSwDpid = srcDpid;
+        evTopoLink.dstSwDpid = dstDpid;
+        evTopoLink.srcSwport = srcPort & 0xffff;
+        evTopoLink.dstSwport = dstPort & 0xffff;
+        evTopoLink.srcPortState = srcPortState;
+        evTopoLink.dstPortState = dstPortState;
+        evTopoLink.reason    = reason;
+        switch (linkType) {
+            case DIRECT_LINK:
+                evTopoLink.linkType = "DIRECT_LINK";
+                break;
+            case MULTIHOP_LINK:
+                evTopoLink.linkType = "MULTIHOP_LINK";
+                break;
+            case TUNNEL:
+                evTopoLink.linkType = "TUNNEL";
+                break;
+            case INVALID_LINK:
+            default:
+                evTopoLink.linkType = "Unknown";
+                break;
+        }
+        evTopoLink = evHistTopologyLink.put(evTopoLink, actn);
+    }
+
+    public void evHistTopoCluster(long dpid, long clusterIdOld,
+                                  long clusterIdNew, EvAction action, String reason) {
+        if (evTopoCluster == null) {
+            evTopoCluster = new EventHistoryTopologyCluster();
+        }
+        evTopoCluster.dpid         = dpid;
+        evTopoCluster.clusterIdOld = clusterIdOld;
+        evTopoCluster.clusterIdNew = clusterIdNew;
+        evTopoCluster.reason       = reason;
+        evTopoCluster = evHistTopologyCluster.put(evTopoCluster, action);
+    }
+
+    @Override
+    public Map<String, Object> getInfo(String type) {
+        if (!"summary".equals(type)) return null;
+
+        Map<String, Object> info = new HashMap<String, Object>();
+
+        int num_links = 0;
+        for (Set<Link> links : switchLinks.values())
+            num_links += links.size();
+        info.put("# inter-switch links", num_links / 2);
+
+        return info;
+    }
+
+    // IHARoleListener
+    @Override
+    public void roleChanged(Role oldRole, Role newRole) {
+        switch(newRole) {
+            case MASTER:
+                if (oldRole == Role.SLAVE) {
+                    if (log.isTraceEnabled()) {
+                        log.trace("Sending LLDPs " +
+                                "to HA change from SLAVE->MASTER");
+                    }
+                    clearAllLinks();
+                    log.debug("Role Change to Master: Rescheduling discovery task.");
+                    discoveryTask.reschedule(1, TimeUnit.MICROSECONDS);
+                }
+                break;
+            case SLAVE:
+                if (log.isTraceEnabled()) {
+                    log.trace("Clearing links due to " +
+                            "HA change to SLAVE");
+                }
+                switchLinks.clear();
+                links.clear();
+                portLinks.clear();
+                portBroadcastDomainLinks.clear();
+                discoverOnAllPorts();
+                break;
+            default:
+                break;
+        }
+    }
+
+    @Override
+    public void controllerNodeIPsChanged(
+                                         Map<String, String> curControllerNodeIPs,
+                                         Map<String, String> addedControllerNodeIPs,
+                                         Map<String, String> removedControllerNodeIPs) {
+        // ignore
+    }
+
+    public boolean isAutoPortFastFeature() {
+        return autoPortFastFeature;
+    }
+
+    public void setAutoPortFastFeature(boolean autoPortFastFeature) {
+        this.autoPortFastFeature = autoPortFastFeature;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/web/AutoPortFast.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/web/AutoPortFast.java
new file mode 100644
index 0000000..8f4f4ad
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/web/AutoPortFast.java
@@ -0,0 +1,31 @@
+package net.floodlightcontroller.linkdiscovery.web;
+
+import net.floodlightcontroller.linkdiscovery.ILinkDiscoveryService;
+
+import org.restlet.data.Status;
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class AutoPortFast extends ServerResource {
+    protected static Logger log = LoggerFactory.getLogger(AutoPortFast.class);
+
+    @Get("json")
+    public String retrieve() {
+        ILinkDiscoveryService linkDiscovery;
+        linkDiscovery = (ILinkDiscoveryService)getContext().getAttributes().
+                get(ILinkDiscoveryService.class.getCanonicalName());
+
+        String param = ((String)getRequestAttributes().get("state")).toLowerCase();
+        if (param.equals("enable") || param.equals("true")) {
+            linkDiscovery.setAutoPortFastFeature(true);
+        } else if (param.equals("disable") || param.equals("false")) {
+            linkDiscovery.setAutoPortFastFeature(false);
+        }
+        setStatus(Status.SUCCESS_OK, "OK");
+        if (linkDiscovery.isAutoPortFastFeature())
+            return "enabled";
+        else return "disabled";
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/web/LinkDiscoveryWebRoutable.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/web/LinkDiscoveryWebRoutable.java
new file mode 100644
index 0000000..3990fba
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/web/LinkDiscoveryWebRoutable.java
@@ -0,0 +1,26 @@
+package net.floodlightcontroller.linkdiscovery.web;
+
+import net.floodlightcontroller.restserver.RestletRoutable;
+
+import org.restlet.Context;
+import org.restlet.routing.Router;
+
+public class LinkDiscoveryWebRoutable implements RestletRoutable {
+    /**
+     * Create the Restlet router and bind to the proper resources.
+     */
+    @Override
+    public Router getRestlet(Context context) {
+        Router router = new Router(context);
+        router.attach("/autoportfast/{state}/json", AutoPortFast.class); // enable/true or disable/false
+        return router;
+    }
+
+    /**
+     * Set the base path for the Topology
+     */
+    @Override
+    public String basePath() {
+        return "/wm/linkdiscovery";
+    }
+}
\ No newline at end of file
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/web/LinkWithType.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/web/LinkWithType.java
new file mode 100644
index 0000000..893e4ad
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/web/LinkWithType.java
@@ -0,0 +1,65 @@
+package net.floodlightcontroller.linkdiscovery.web;
+
+import java.io.IOException;
+
+import org.codehaus.jackson.JsonGenerator;
+import org.codehaus.jackson.JsonProcessingException;
+import org.codehaus.jackson.map.JsonSerializer;
+import org.codehaus.jackson.map.SerializerProvider;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+import org.openflow.util.HexString;
+
+import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.LinkType;
+import net.floodlightcontroller.routing.Link;
+
+/**
+ * This class is both the datastructure and the serializer
+ * for a link with the corresponding type of link.
+ * @author alexreimers
+ */
+@JsonSerialize(using=LinkWithType.class)
+public class LinkWithType extends JsonSerializer<LinkWithType> {
+    public long srcSwDpid;
+    public short srcPort;
+    public int srcPortState;
+    public long dstSwDpid;
+    public short dstPort;
+    public int dstPortState;
+    public LinkType type;
+
+    // Do NOT delete this, it's required for the serializer
+    public LinkWithType() {}
+    
+    public LinkWithType(Link link,
+                        int srcPortState,
+                        int dstPortState,
+                        LinkType type) {
+        this.srcSwDpid = link.getSrc();
+        this.srcPort = link.getSrcPort();
+        this.srcPortState = srcPortState;
+        this.dstSwDpid = link.getDst();
+        this.dstPort = link.getDstPort();
+        this.dstPortState = dstPortState;
+        this.type = type;
+    }
+
+	@Override
+	public void serialize(LinkWithType lwt, JsonGenerator jgen, SerializerProvider arg2) 
+			throws IOException, JsonProcessingException {
+		// You ****MUST*** use lwt for the fields as it's actually a different object.
+		jgen.writeStartObject();
+		jgen.writeStringField("src-switch", HexString.toHexString(lwt.srcSwDpid));
+		jgen.writeNumberField("src-port", lwt.srcPort);
+		jgen.writeNumberField("src-port-state", lwt.srcPortState);
+		jgen.writeStringField("dst-switch", HexString.toHexString(lwt.dstSwDpid));
+		jgen.writeNumberField("dst-port", lwt.dstPort);
+		jgen.writeNumberField("dst-port-state", lwt.dstPortState);
+		jgen.writeStringField("type", lwt.type.toString());
+		jgen.writeEndObject();
+	}
+	
+	@Override
+	public Class<LinkWithType> handledType() {
+		return LinkWithType.class;
+	}
+}
\ No newline at end of file
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/web/LinksResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/web/LinksResource.java
new file mode 100644
index 0000000..4cad18e
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/linkdiscovery/web/LinksResource.java
@@ -0,0 +1,37 @@
+package net.floodlightcontroller.linkdiscovery.web;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import net.floodlightcontroller.linkdiscovery.ILinkDiscoveryService;
+import net.floodlightcontroller.linkdiscovery.LinkInfo;
+import net.floodlightcontroller.routing.Link;
+
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+
+public class LinksResource extends ServerResource {
+
+    @Get("json")
+    public Set<LinkWithType> retrieve() {
+        ILinkDiscoveryService ld = (ILinkDiscoveryService)getContext().getAttributes().
+                get(ILinkDiscoveryService.class.getCanonicalName());
+        Map<Link, LinkInfo> links = new HashMap<Link, LinkInfo>();
+        Set<LinkWithType> returnLinkSet = new HashSet<LinkWithType>();
+
+        if (ld != null) {
+            links.putAll(ld.getLinks());
+            for (Link link: links.keySet()) {
+                LinkInfo info = links.get(link);
+                LinkWithType lwt = new LinkWithType(link,
+                                                    info.getSrcPortState(),
+                                                    info.getDstPortState(),
+                                                    ld.getLinkType(link, info));
+                returnLinkSet.add(lwt);
+            }
+        }
+        return returnLinkSet;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/ARP.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/ARP.java
new file mode 100644
index 0000000..e8428ea
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/ARP.java
@@ -0,0 +1,316 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.packet;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+/**
+ *
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ */
+public class ARP extends BasePacket {
+    public static short HW_TYPE_ETHERNET = 0x1;
+
+    public static short PROTO_TYPE_IP = 0x800;
+
+    public static short OP_REQUEST = 0x1;
+    public static short OP_REPLY = 0x2;
+    public static short OP_RARP_REQUEST = 0x3;
+    public static short OP_RARP_REPLY = 0x4;
+
+    protected short hardwareType;
+    protected short protocolType;
+    protected byte hardwareAddressLength;
+    protected byte protocolAddressLength;
+    protected short opCode;
+    protected byte[] senderHardwareAddress;
+    protected byte[] senderProtocolAddress;
+    protected byte[] targetHardwareAddress;
+    protected byte[] targetProtocolAddress;
+
+    /**
+     * @return the hardwareType
+     */
+    public short getHardwareType() {
+        return hardwareType;
+    }
+
+    /**
+     * @param hardwareType the hardwareType to set
+     */
+    public ARP setHardwareType(short hardwareType) {
+        this.hardwareType = hardwareType;
+        return this;
+    }
+
+    /**
+     * @return the protocolType
+     */
+    public short getProtocolType() {
+        return protocolType;
+    }
+
+    /**
+     * @param protocolType the protocolType to set
+     */
+    public ARP setProtocolType(short protocolType) {
+        this.protocolType = protocolType;
+        return this;
+    }
+
+    /**
+     * @return the hardwareAddressLength
+     */
+    public byte getHardwareAddressLength() {
+        return hardwareAddressLength;
+    }
+
+    /**
+     * @param hardwareAddressLength the hardwareAddressLength to set
+     */
+    public ARP setHardwareAddressLength(byte hardwareAddressLength) {
+        this.hardwareAddressLength = hardwareAddressLength;
+        return this;
+    }
+
+    /**
+     * @return the protocolAddressLength
+     */
+    public byte getProtocolAddressLength() {
+        return protocolAddressLength;
+    }
+
+    /**
+     * @param protocolAddressLength the protocolAddressLength to set
+     */
+    public ARP setProtocolAddressLength(byte protocolAddressLength) {
+        this.protocolAddressLength = protocolAddressLength;
+        return this;
+    }
+
+    /**
+     * @return the opCode
+     */
+    public short getOpCode() {
+        return opCode;
+    }
+
+    /**
+     * @param opCode the opCode to set
+     */
+    public ARP setOpCode(short opCode) {
+        this.opCode = opCode;
+        return this;
+    }
+
+    /**
+     * @return the senderHardwareAddress
+     */
+    public byte[] getSenderHardwareAddress() {
+        return senderHardwareAddress;
+    }
+
+    /**
+     * @param senderHardwareAddress the senderHardwareAddress to set
+     */
+    public ARP setSenderHardwareAddress(byte[] senderHardwareAddress) {
+        this.senderHardwareAddress = senderHardwareAddress;
+        return this;
+    }
+
+    /**
+     * @return the senderProtocolAddress
+     */
+    public byte[] getSenderProtocolAddress() {
+        return senderProtocolAddress;
+    }
+
+    /**
+     * @param senderProtocolAddress the senderProtocolAddress to set
+     */
+    public ARP setSenderProtocolAddress(byte[] senderProtocolAddress) {
+        this.senderProtocolAddress = senderProtocolAddress;
+        return this;
+    }
+    
+    public ARP setSenderProtocolAddress(int address) {
+        this.senderProtocolAddress = ByteBuffer.allocate(4).putInt(address).array();
+        return this;
+    }
+
+    /**
+     * @return the targetHardwareAddress
+     */
+    public byte[] getTargetHardwareAddress() {
+        return targetHardwareAddress;
+    }
+
+    /**
+     * @param targetHardwareAddress the targetHardwareAddress to set
+     */
+    public ARP setTargetHardwareAddress(byte[] targetHardwareAddress) {
+        this.targetHardwareAddress = targetHardwareAddress;
+        return this;
+    }
+
+    /**
+     * @return the targetProtocolAddress
+     */
+    public byte[] getTargetProtocolAddress() {
+        return targetProtocolAddress;
+    }
+
+    /**
+     * @return True if gratuitous ARP (SPA = TPA), false otherwise
+     */
+    public boolean isGratuitous() {        
+        assert(senderProtocolAddress.length == targetProtocolAddress.length);
+        
+        int indx = 0;
+        while (indx < senderProtocolAddress.length) {
+            if (senderProtocolAddress[indx] != targetProtocolAddress[indx]) {
+                return false;
+            }
+            indx++;
+        }
+        
+        return true;
+    }
+    
+    /**
+     * @param targetProtocolAddress the targetProtocolAddress to set
+     */
+    public ARP setTargetProtocolAddress(byte[] targetProtocolAddress) {
+        this.targetProtocolAddress = targetProtocolAddress;
+        return this;
+    }
+    
+    public ARP setTargetProtocolAddress(int address) {
+        this.targetProtocolAddress = ByteBuffer.allocate(4).putInt(address).array();
+        return this;
+    }
+
+    @Override
+    public byte[] serialize() {
+        int length = 8 + (2 * (0xff & this.hardwareAddressLength))
+                + (2 * (0xff & this.protocolAddressLength));
+        byte[] data = new byte[length];
+        ByteBuffer bb = ByteBuffer.wrap(data);
+        bb.putShort(this.hardwareType);
+        bb.putShort(this.protocolType);
+        bb.put(this.hardwareAddressLength);
+        bb.put(this.protocolAddressLength);
+        bb.putShort(this.opCode);
+        bb.put(this.senderHardwareAddress, 0, 0xff & this.hardwareAddressLength);
+        bb.put(this.senderProtocolAddress, 0, 0xff & this.protocolAddressLength);
+        bb.put(this.targetHardwareAddress, 0, 0xff & this.hardwareAddressLength);
+        bb.put(this.targetProtocolAddress, 0, 0xff & this.protocolAddressLength);
+        return data;
+    }
+
+    @Override
+    public IPacket deserialize(byte[] data, int offset, int length) {
+        ByteBuffer bb = ByteBuffer.wrap(data, offset, length);
+        this.hardwareType = bb.getShort();
+        this.protocolType = bb.getShort();
+        this.hardwareAddressLength = bb.get();
+        this.protocolAddressLength = bb.get();
+        this.opCode = bb.getShort();
+        this.senderHardwareAddress = new byte[0xff & this.hardwareAddressLength];
+        bb.get(this.senderHardwareAddress, 0, this.senderHardwareAddress.length);
+        this.senderProtocolAddress = new byte[0xff & this.protocolAddressLength];
+        bb.get(this.senderProtocolAddress, 0, this.senderProtocolAddress.length);
+        this.targetHardwareAddress = new byte[0xff & this.hardwareAddressLength];
+        bb.get(this.targetHardwareAddress, 0, this.targetHardwareAddress.length);
+        this.targetProtocolAddress = new byte[0xff & this.protocolAddressLength];
+        bb.get(this.targetProtocolAddress, 0, this.targetProtocolAddress.length);
+        return this;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#hashCode()
+     */
+    @Override
+    public int hashCode() {
+        final int prime = 13121;
+        int result = super.hashCode();
+        result = prime * result + hardwareAddressLength;
+        result = prime * result + hardwareType;
+        result = prime * result + opCode;
+        result = prime * result + protocolAddressLength;
+        result = prime * result + protocolType;
+        result = prime * result + Arrays.hashCode(senderHardwareAddress);
+        result = prime * result + Arrays.hashCode(senderProtocolAddress);
+        result = prime * result + Arrays.hashCode(targetHardwareAddress);
+        result = prime * result + Arrays.hashCode(targetProtocolAddress);
+        return result;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#equals(java.lang.Object)
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (!super.equals(obj))
+            return false;
+        if (!(obj instanceof ARP))
+            return false;
+        ARP other = (ARP) obj;
+        if (hardwareAddressLength != other.hardwareAddressLength)
+            return false;
+        if (hardwareType != other.hardwareType)
+            return false;
+        if (opCode != other.opCode)
+            return false;
+        if (protocolAddressLength != other.protocolAddressLength)
+            return false;
+        if (protocolType != other.protocolType)
+            return false;
+        if (!Arrays.equals(senderHardwareAddress, other.senderHardwareAddress))
+            return false;
+        if (!Arrays.equals(senderProtocolAddress, other.senderProtocolAddress))
+            return false;
+        if (!Arrays.equals(targetHardwareAddress, other.targetHardwareAddress))
+            return false;
+        if (!Arrays.equals(targetProtocolAddress, other.targetProtocolAddress))
+            return false;
+        return true;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#toString()
+     */
+    @Override
+    public String toString() {
+        return "ARP [hardwareType=" + hardwareType + ", protocolType="
+                + protocolType + ", hardwareAddressLength="
+                + hardwareAddressLength + ", protocolAddressLength="
+                + protocolAddressLength + ", opCode=" + opCode
+                + ", senderHardwareAddress="
+                + Arrays.toString(senderHardwareAddress)
+                + ", senderProtocolAddress="
+                + Arrays.toString(senderProtocolAddress)
+                + ", targetHardwareAddress="
+                + Arrays.toString(targetHardwareAddress)
+                + ", targetProtocolAddress="
+                + Arrays.toString(targetProtocolAddress) + "]";
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/BPDU.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/BPDU.java
new file mode 100644
index 0000000..6c27216
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/BPDU.java
@@ -0,0 +1,138 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.packet;
+
+import java.nio.ByteBuffer;
+
+/**
+ * This class is a Rapid Spanning Tree Protocol
+ * Bridge Protocol Data Unit
+ * @author alexreimers
+ */
+public class BPDU extends BasePacket {
+    public enum BPDUType {
+        CONFIG,
+        TOPOLOGY_CHANGE;
+    }
+    
+    private final long destMac = 0x0180c2000000L; // 01-80-c2-00-00-00
+    
+    // TODO - check this for RSTP
+    private LLC llcHeader;
+    private short protocolId = 0;
+    private byte version = 0;
+    private byte type;
+    private byte flags;
+    private byte[] rootBridgeId;
+    private int rootPathCost;
+    private byte[] senderBridgeId; // switch cluster MAC
+    private short portId; // port it was transmitted from
+    private short messageAge; // 256ths of a second
+    private short maxAge; // 256ths of a second
+    private short helloTime; // 256ths of a second
+    private short forwardDelay; // 256ths of a second
+    
+    public BPDU(BPDUType type) {
+        rootBridgeId = new byte[8];
+        senderBridgeId = new byte[8];
+        
+        llcHeader = new LLC();
+        llcHeader.setDsap((byte) 0x42);
+        llcHeader.setSsap((byte) 0x42);
+        llcHeader.setCtrl((byte) 0x03);
+        
+        switch(type) {
+            case CONFIG:
+                this.type = 0x0;
+                break;
+            case TOPOLOGY_CHANGE:
+                this.type = (byte) 0x80; // 1000 0000
+                break;
+            default:
+                this.type = 0;
+                break;
+        }
+    }
+    
+    @Override
+    public byte[] serialize() {
+        byte[] data;
+        // TODO check these
+        if (type == 0x0) { 
+            // config
+            data = new byte[38];
+        } else {
+            // topology change
+            data = new byte[7]; // LLC + TC notification
+        }
+        
+        ByteBuffer bb = ByteBuffer.wrap(data);
+        // Serialize the LLC header
+        byte[] llc = llcHeader.serialize();
+        bb.put(llc, 0, llc.length);
+        bb.putShort(protocolId);
+        bb.put(version);
+        bb.put(type);
+        
+        if (type == 0x0) {
+            bb.put(flags);
+            bb.put(rootBridgeId, 0, rootBridgeId.length);
+            bb.putInt(rootPathCost);
+            bb.put(senderBridgeId, 0, senderBridgeId.length);
+            bb.putShort(portId);
+            bb.putShort(messageAge);
+            bb.putShort(maxAge);
+            bb.putShort(helloTime);
+            bb.putShort(forwardDelay);
+        }
+        
+        return data;
+    }
+
+    @Override
+    public IPacket deserialize(byte[] data, int offset, int length) {
+        ByteBuffer bb = ByteBuffer.wrap(data, offset, length);
+        
+        // LLC header
+        llcHeader.deserialize(data, offset, 3);
+        
+        this.protocolId = bb.getShort();
+        this.version = bb.get();
+        this.type = bb.get();
+        
+        // These fields only exist if it's a configuration BPDU
+        if (this.type == 0x0) {
+            this.flags = bb.get();
+            bb.get(rootBridgeId, 0, 6);
+            this.rootPathCost = bb.getInt();
+            bb.get(this.senderBridgeId, 0, 6);
+            this.portId = bb.getShort();
+            this.messageAge = bb.getShort();
+            this.maxAge = bb.getShort();
+            this.helloTime = bb.getShort();
+            this.forwardDelay = bb.getShort();
+        }
+        // TODO should we set other fields to 0?
+        
+        return this;
+    }
+
+    public long getDestMac() {
+        return destMac;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/BSN.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/BSN.java
new file mode 100644
index 0000000..27c8f70
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/BSN.java
@@ -0,0 +1,172 @@
+/**
+*    Copyright 2012, Big Switch Networks, Inc. 
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+/**
+ * 
+ */
+package net.floodlightcontroller.packet;
+
+import java.nio.ByteBuffer;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * @author Shudong Zhou (shudong.zhou@bigswitch.com)
+ *
+ */
+public class BSN extends BasePacket {
+	public static final int BSN_MAGIC = 0x20000604;
+	public static final short BSN_VERSION_CURRENT = 0x0;
+	public static final short BSN_TYPE_PROBE = 0x1;
+	public static final short BSN_TYPE_BDDP  = 0x2;
+	public static Map<Short, Class<? extends IPacket>> typeClassMap;
+	
+    static {
+        typeClassMap = new HashMap<Short, Class<? extends IPacket>>();
+        typeClassMap.put(BSN_TYPE_PROBE, BSNPROBE.class);
+        typeClassMap.put(BSN_TYPE_BDDP, LLDP.class);
+    }
+
+	protected short type;
+	protected short version;
+
+	public BSN() {
+    	version = BSN_VERSION_CURRENT;
+	}
+	
+    public BSN(short type) {
+    	this.type = type;
+    	version = BSN_VERSION_CURRENT;
+    }
+
+    public short getType() {
+		return type;
+	}
+
+	public BSN setType(short type) {
+		this.type = type;
+		return this;
+	}
+	
+    public short getVersion() {
+		return version;
+	}
+
+	public BSN setVersion(short version) {
+		this.version = version;
+		return this;
+	}
+
+    @Override
+    public byte[] serialize() {
+    	short length = 4 /* magic */ + 2 /* type */ + 2 /* version */;
+    	
+    	byte[] payloadData = null;
+    	if (this.payload != null) {
+            payload.setParent(this);
+            payloadData = payload.serialize();
+            length += payloadData.length;
+        }
+    
+        byte[] data = new byte[length];
+        ByteBuffer bb = ByteBuffer.wrap(data);
+        bb.putInt(BSN_MAGIC);
+        bb.putShort(this.type);
+        bb.putShort(this.version);
+        if (payloadData != null)
+        	bb.put(payloadData);
+
+        if (this.parent != null && this.parent instanceof Ethernet)
+            ((Ethernet)this.parent).setEtherType(Ethernet.TYPE_BSN);
+
+        return data;
+    }
+
+    @Override
+    public IPacket deserialize(byte[] data, int offset, int length) {
+        ByteBuffer bb = ByteBuffer.wrap(data, offset, length);
+        
+        int magic = bb.getInt();
+        if (magic != BSN_MAGIC) {
+        	throw new RuntimeException("Invalid BSN magic " + magic);
+        }
+        
+        this.type = bb.getShort();
+        this.version = bb.getShort();
+        if (this.version != BSN_VERSION_CURRENT) {
+        	throw new RuntimeException(
+        			"Invalid BSN packet version " + this.version + ", should be "
+        	        + BSN_VERSION_CURRENT);
+        }
+        
+        IPacket payload;
+        if (typeClassMap.containsKey(this.type)) {
+            Class<? extends IPacket> clazz = typeClassMap.get(this.type);
+            try {
+                payload = clazz.newInstance();
+            } catch (Exception e) {
+                throw new RuntimeException("Error parsing payload for BSN packet" + e);
+            }
+        } else {
+            payload = new Data();
+        }
+        
+        this.payload = new Data();
+        this.payload = payload.deserialize(data, bb.position(), bb.limit() - bb.position());
+        this.payload.setParent(this);
+        
+        return this;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#hashCode()
+     */
+    @Override
+    public int hashCode() {
+        final int prime = 883;
+        int result = super.hashCode();
+        result = prime * result + version;
+        result = prime * result + type;
+        return result;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#equals(java.lang.Object)
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (!super.equals(obj))
+            return false;
+        if (!(obj instanceof BSN))
+            return false;
+        BSN other = (BSN) obj;
+        return (type == other.type &&
+        		version == other.version);
+    }
+    
+    public String toString() {
+    	StringBuffer sb = new StringBuffer("\n");
+    	sb.append("BSN packet");
+        if (typeClassMap.containsKey(this.type))
+        	sb.append(" type: " + typeClassMap.get(this.type).getCanonicalName());
+        else
+        	sb.append(" type: " + this.type);
+        
+    	return sb.toString();
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/BSNPROBE.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/BSNPROBE.java
new file mode 100644
index 0000000..720b45f
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/BSNPROBE.java
@@ -0,0 +1,197 @@
+/**
+*    Copyright 2012, Big Switch Networks, Inc. 
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+/**
+ * 
+ */
+package net.floodlightcontroller.packet;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import org.openflow.util.HexString;
+
+/**
+ * @author Shudong Zhou (shudong.zhou@bigswitch.com)
+ *
+ */
+public class BSNPROBE extends BasePacket {	
+	protected long controllerId;
+	protected int sequenceId;
+	protected byte[] srcMac;
+	protected byte[] dstMac;
+	protected long srcSwDpid;
+	protected int srcPortNo;
+
+    public BSNPROBE() {
+        srcMac = new byte[6];
+        dstMac = new byte[6];
+    }
+
+
+	public long getControllerId() {
+		return this.controllerId;
+	}
+
+	public BSNPROBE setControllerId(long controllerId) {
+		this.controllerId = controllerId;
+		return this;
+	}
+
+	public int getSequenceId() {
+		return sequenceId;
+	}
+
+	public BSNPROBE setSequenceId(int sequenceId) {
+		this.sequenceId = sequenceId;
+		return this;
+	}
+	
+    public byte[] getSrcMac() {
+        return this.srcMac;
+    }
+
+    public BSNPROBE setSrcMac(byte[] srcMac) {
+        this.srcMac = srcMac;
+        return this;
+    }
+    
+	public byte[] getDstMac() {
+		return dstMac;
+	}
+
+	public BSNPROBE setDstMac(byte[] dstMac) {
+		this.dstMac = dstMac;
+		return this;
+	}
+
+	public long getSrcSwDpid() {
+		return srcSwDpid;
+	}
+
+	public BSNPROBE setSrcSwDpid(long srcSwDpid) {
+		this.srcSwDpid = srcSwDpid;
+		return this;
+	}
+
+	public int getSrcPortNo() {
+		return srcPortNo;
+	}
+
+	public BSNPROBE setSrcPortNo(int srcPortNo) {
+		this.srcPortNo = srcPortNo;
+		return this;
+	}
+
+    @Override
+    public byte[] serialize() {
+    	short length = 8 /* controllerId */ + 4 /* seqId */
+    			+ 12 /* srcMac dstMac */ + 8 /* srcSwDpid */ + 4 /* srcPortNo */;
+    	
+    	byte[] payloadData = null;
+    	if (this.payload != null) {
+            payload.setParent(this);
+            payloadData = payload.serialize();
+            length += payloadData.length;
+        }
+    
+        byte[] data = new byte[length];
+        ByteBuffer bb = ByteBuffer.wrap(data);
+        bb.putLong(this.controllerId);
+        bb.putInt(this.sequenceId);
+        bb.put(this.srcMac);
+        bb.put(this.dstMac);
+        bb.putLong(this.srcSwDpid);
+        bb.putInt(this.srcPortNo);
+        if (payloadData != null)
+        	bb.put(payloadData);
+
+        if (this.parent != null && this.parent instanceof BSN)
+            ((BSN)this.parent).setType(BSN.BSN_TYPE_PROBE);
+
+        return data;
+    }
+
+    @Override
+    public IPacket deserialize(byte[] data, int offset, int length) {
+        ByteBuffer bb = ByteBuffer.wrap(data, offset, length);
+        
+        controllerId = bb.getLong();
+        sequenceId = bb.getInt();
+        bb.get(this.srcMac, 0, 6);
+        bb.get(this.dstMac, 0, 6);
+        this.srcSwDpid = bb.getLong();
+        this.srcPortNo = bb.getInt();
+        
+        if (bb.hasRemaining()) {
+        	this.payload = new Data();
+	        this.payload = payload.deserialize(data, bb.position(), bb.limit() - bb.position());
+	        this.payload.setParent(this);
+        }
+        
+        return this;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#hashCode()
+     */
+    @Override
+    public int hashCode() {
+        final int prime = 883;
+        int result = super.hashCode();
+        result = prime * result + srcMac.hashCode();
+        result = prime * result + dstMac.hashCode();
+        result = prime * result + (int) (srcSwDpid >> 32) + (int) srcSwDpid;
+        result = prime * result + srcPortNo;
+        return result;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#equals(java.lang.Object)
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (!super.equals(obj))
+            return false;
+        if (!(obj instanceof BSNPROBE))
+            return false;
+        BSNPROBE other = (BSNPROBE) obj;
+        if (!Arrays.equals(srcMac, other.srcMac))
+            return false;
+        if (!Arrays.equals(dstMac, other.dstMac))
+        	return false;
+        return (sequenceId == other.sequenceId &&
+        	    srcSwDpid == other.srcSwDpid &&
+        	    srcPortNo == other.srcPortNo
+        	    );
+    }
+    
+    public String toString() {
+    	StringBuffer sb = new StringBuffer("\n");
+    	sb.append("BSN Probe packet");
+    	sb.append("\nSource Mac: ");
+    	sb.append(HexString.toHexString(srcMac));
+    	sb.append("\nDestination Mac: ");
+    	sb.append(HexString.toHexString(dstMac));
+    	sb.append("\nSource Switch: ");
+    	sb.append(HexString.toHexString(srcSwDpid));
+    	sb.append(" port: " + srcPortNo);
+    	sb.append("\nSequence No.:" + sequenceId);
+    	
+    	return sb.toString();
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/BasePacket.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/BasePacket.java
new file mode 100644
index 0000000..4ecfded
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/BasePacket.java
@@ -0,0 +1,116 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.packet;
+
+
+/**
+*
+* @author David Erickson (daviderickson@cs.stanford.edu)
+*/
+public abstract class BasePacket implements IPacket {
+    protected IPacket parent;
+    protected IPacket payload;
+
+    /**
+     * @return the parent
+     */
+    @Override
+    public IPacket getParent() {
+        return parent;
+    }
+
+    /**
+     * @param parent the parent to set
+     */
+    @Override
+    public IPacket setParent(IPacket parent) {
+        this.parent = parent;
+        return this;
+    }
+
+    /**
+     * @return the payload
+     */
+    @Override
+    public IPacket getPayload() {
+        return payload;
+    }
+
+    /**
+     * @param payload the payload to set
+     */
+    @Override
+    public IPacket setPayload(IPacket payload) {
+        this.payload = payload;
+        return this;
+    }
+    
+    @Override
+    public void resetChecksum() {
+        if (this.parent != null)
+            this.parent.resetChecksum();
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#hashCode()
+     */
+    @Override
+    public int hashCode() {
+        final int prime = 6733;
+        int result = 1;
+        result = prime * result + ((payload == null) ? 0 : payload.hashCode());
+        return result;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#equals(java.lang.Object)
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (!(obj instanceof BasePacket))
+            return false;
+        BasePacket other = (BasePacket) obj;
+        if (payload == null) {
+            if (other.payload != null)
+                return false;
+        } else if (!payload.equals(other.payload))
+            return false;
+        return true;
+    }
+    
+    @Override
+    public Object clone() {
+        IPacket pkt;
+        try {
+            pkt = this.getClass().newInstance();
+        } catch (Exception e) {
+            throw new RuntimeException("Could not clone packet");
+        }
+        // TODO: we are using serialize()/deserialize() to perform the 
+        // cloning. Not the most efficient way but simple. We can revisit
+        // if we hit performance problems.
+        byte[] data = this.serialize();
+        pkt.deserialize(this.serialize(), 0, data.length);
+        pkt.setParent(this.parent);
+        return pkt;
+    }
+}
\ No newline at end of file
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/DHCP.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/DHCP.java
new file mode 100644
index 0000000..f73d9c9
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/DHCP.java
@@ -0,0 +1,517 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.packet;
+
+import java.io.UnsupportedEncodingException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.ListIterator;
+
+/**
+ *
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ */
+public class DHCP extends BasePacket {
+    /**
+     * ------------------------------------------
+     * |op (1)  | htype(1) | hlen(1) | hops(1)  |
+     * ------------------------------------------
+     * |        xid (4)                         |
+     * ------------------------------------------
+     * |  secs (2)          |   flags (2)       |
+     * ------------------------------------------
+     * |            ciaddr (4)                  |
+     * ------------------------------------------
+     * |            yiaddr (4)                  |
+     * ------------------------------------------
+     * |            siaddr (4)                  |
+     * ------------------------------------------
+     * |            giaddr (4)                  |
+     * ------------------------------------------
+     * |            chaddr (16)                  |
+     * ------------------------------------------
+     * |            sname (64)                  |
+     * ------------------------------------------
+     * |            file (128)                  |
+     * ------------------------------------------
+     * |            options (312)               |
+     * ------------------------------------------
+     * 
+     */
+    // Header + magic without options
+    public static int MIN_HEADER_LENGTH = 240;
+    public static byte OPCODE_REQUEST = 0x1;
+    public static byte OPCODE_REPLY = 0x2;
+
+    public static byte HWTYPE_ETHERNET = 0x1;
+    
+    public enum DHCPOptionCode {
+        OptionCode_SubnetMask           ((byte)1),
+        OptionCode_RequestedIP          ((byte)50),
+        OptionCode_LeaseTime            ((byte)51),
+        OptionCode_MessageType          ((byte)53),
+        OptionCode_DHCPServerIp         ((byte)54),
+        OptionCode_RequestedParameters  ((byte)55),
+        OptionCode_RenewalTime          ((byte)58),
+        OPtionCode_RebindingTime        ((byte)59),
+        OptionCode_ClientID             ((byte)61),
+        OptionCode_END                  ((byte)255);
+    
+        protected byte value;
+        
+        private DHCPOptionCode(byte value) {
+            this.value = value;
+        }
+        
+        public byte getValue() {
+            return value;
+        }
+    }
+    
+    protected byte opCode;
+    protected byte hardwareType;
+    protected byte hardwareAddressLength;
+    protected byte hops;
+    protected int transactionId;
+    protected short seconds;
+    protected short flags;
+    protected int clientIPAddress;
+    protected int yourIPAddress;
+    protected int serverIPAddress;
+    protected int gatewayIPAddress;
+    protected byte[] clientHardwareAddress;
+    protected String serverName;
+    protected String bootFileName;
+    protected List<DHCPOption> options = new ArrayList<DHCPOption>();
+
+    /**
+     * @return the opCode
+     */
+    public byte getOpCode() {
+        return opCode;
+    }
+
+    /**
+     * @param opCode the opCode to set
+     */
+    public DHCP setOpCode(byte opCode) {
+        this.opCode = opCode;
+        return this;
+    }
+
+    /**
+     * @return the hardwareType
+     */
+    public byte getHardwareType() {
+        return hardwareType;
+    }
+
+    /**
+     * @param hardwareType the hardwareType to set
+     */
+    public DHCP setHardwareType(byte hardwareType) {
+        this.hardwareType = hardwareType;
+        return this;
+    }
+
+    /**
+     * @return the hardwareAddressLength
+     */
+    public byte getHardwareAddressLength() {
+        return hardwareAddressLength;
+    }
+
+    /**
+     * @param hardwareAddressLength the hardwareAddressLength to set
+     */
+    public DHCP setHardwareAddressLength(byte hardwareAddressLength) {
+        this.hardwareAddressLength = hardwareAddressLength;
+        return this;
+    }
+
+    /**
+     * @return the hops
+     */
+    public byte getHops() {
+        return hops;
+    }
+
+    /**
+     * @param hops the hops to set
+     */
+    public DHCP setHops(byte hops) {
+        this.hops = hops;
+        return this;
+    }
+
+    /**
+     * @return the transactionId
+     */
+    public int getTransactionId() {
+        return transactionId;
+    }
+
+    /**
+     * @param transactionId the transactionId to set
+     */
+    public DHCP setTransactionId(int transactionId) {
+        this.transactionId = transactionId;
+        return this;
+    }
+
+    /**
+     * @return the seconds
+     */
+    public short getSeconds() {
+        return seconds;
+    }
+
+    /**
+     * @param seconds the seconds to set
+     */
+    public DHCP setSeconds(short seconds) {
+        this.seconds = seconds;
+        return this;
+    }
+
+    /**
+     * @return the flags
+     */
+    public short getFlags() {
+        return flags;
+    }
+
+    /**
+     * @param flags the flags to set
+     */
+    public DHCP setFlags(short flags) {
+        this.flags = flags;
+        return this;
+    }
+
+    /**
+     * @return the clientIPAddress
+     */
+    public int getClientIPAddress() {
+        return clientIPAddress;
+    }
+
+    /**
+     * @param clientIPAddress the clientIPAddress to set
+     */
+    public DHCP setClientIPAddress(int clientIPAddress) {
+        this.clientIPAddress = clientIPAddress;
+        return this;
+    }
+
+    /**
+     * @return the yourIPAddress
+     */
+    public int getYourIPAddress() {
+        return yourIPAddress;
+    }
+
+    /**
+     * @param yourIPAddress the yourIPAddress to set
+     */
+    public DHCP setYourIPAddress(int yourIPAddress) {
+        this.yourIPAddress = yourIPAddress;
+        return this;
+    }
+
+    /**
+     * @return the serverIPAddress
+     */
+    public int getServerIPAddress() {
+        return serverIPAddress;
+    }
+
+    /**
+     * @param serverIPAddress the serverIPAddress to set
+     */
+    public DHCP setServerIPAddress(int serverIPAddress) {
+        this.serverIPAddress = serverIPAddress;
+        return this;
+    }
+
+    /**
+     * @return the gatewayIPAddress
+     */
+    public int getGatewayIPAddress() {
+        return gatewayIPAddress;
+    }
+
+    /**
+     * @param gatewayIPAddress the gatewayIPAddress to set
+     */
+    public DHCP setGatewayIPAddress(int gatewayIPAddress) {
+        this.gatewayIPAddress = gatewayIPAddress;
+        return this;
+    }
+
+    /**
+     * @return the clientHardwareAddress
+     */
+    public byte[] getClientHardwareAddress() {
+        return clientHardwareAddress;
+    }
+
+    /**
+     * @param clientHardwareAddress the clientHardwareAddress to set
+     */
+    public DHCP setClientHardwareAddress(byte[] clientHardwareAddress) {
+        this.clientHardwareAddress = clientHardwareAddress;
+        return this;
+    }
+    
+    /**
+     * Gets a specific DHCP option parameter
+     * @param opetionCode The option code to get
+     * @return The value of the option if it exists, null otherwise
+     */
+    public DHCPOption getOption(DHCPOptionCode optionCode) {
+        for (DHCPOption opt : options) {
+            if (opt.code == optionCode.value)
+                return opt;
+        }
+        return null;
+    }
+
+    /**
+     * @return the options
+     */
+    public List<DHCPOption> getOptions() {
+        return options;
+    }
+
+    /**
+     * @param options the options to set
+     */
+    public DHCP setOptions(List<DHCPOption> options) {
+        this.options = options;
+        return this;
+    }
+
+    /**
+     * @return the packetType base on option 53
+     */
+    public DHCPPacketType getPacketType() {
+        ListIterator<DHCPOption> lit = options.listIterator();
+        while (lit.hasNext()) {
+            DHCPOption option = lit.next();
+            // only care option 53
+            if (option.getCode() == 53) {
+                return DHCPPacketType.getType(option.getData()[0]);
+            }
+        }
+        return null;
+    }
+    
+    /**
+     * @return the serverName
+     */
+    public String getServerName() {
+        return serverName;
+    }
+
+    /**
+     * @param serverName the serverName to set
+     */
+    public DHCP setServerName(String serverName) {
+        this.serverName = serverName;
+        return this;
+    }
+
+    /**
+     * @return the bootFileName
+     */
+    public String getBootFileName() {
+        return bootFileName;
+    }
+
+    /**
+     * @param bootFileName the bootFileName to set
+     */
+    public DHCP setBootFileName(String bootFileName) {
+        this.bootFileName = bootFileName;
+        return this;
+    }
+
+    @Override
+    public byte[] serialize() {
+        // not guaranteed to retain length/exact format
+        resetChecksum();
+
+        // minimum size 240 including magic cookie, options generally padded to 300
+        int optionsLength = 0;
+        for (DHCPOption option : this.options) {
+            if (option.getCode() == 0 || option.getCode() == 255) {
+                optionsLength += 1;
+            } else {
+                optionsLength += 2 + (int)(0xff & option.getLength());
+            }
+        }
+        int optionsPadLength = 0;
+        if (optionsLength < 60)
+            optionsPadLength = 60 - optionsLength;
+
+        byte[] data = new byte[240+optionsLength+optionsPadLength];
+        ByteBuffer bb = ByteBuffer.wrap(data);
+        bb.put(this.opCode);
+        bb.put(this.hardwareType);
+        bb.put(this.hardwareAddressLength);
+        bb.put(this.hops);
+        bb.putInt(this.transactionId);
+        bb.putShort(this.seconds);
+        bb.putShort(this.flags);
+        bb.putInt(this.clientIPAddress);
+        bb.putInt(this.yourIPAddress);
+        bb.putInt(this.serverIPAddress);
+        bb.putInt(this.gatewayIPAddress);
+        bb.put(this.clientHardwareAddress);
+        if (this.clientHardwareAddress.length < 16) {
+            for (int i = 0; i < (16 - this.clientHardwareAddress.length); ++i) {
+                bb.put((byte) 0x0);
+            }
+        }
+        writeString(this.serverName, bb, 64);
+        writeString(this.bootFileName, bb, 128);
+        // magic cookie
+        bb.put((byte) 0x63);
+        bb.put((byte) 0x82);
+        bb.put((byte) 0x53);
+        bb.put((byte) 0x63);
+        for (DHCPOption option : this.options) {
+            int code = option.getCode() & 0xff;
+            bb.put((byte) code);
+            if ((code != 0) && (code != 255)) {
+                bb.put(option.getLength());
+                bb.put(option.getData());
+            }
+        }
+        // assume the rest is padded out with zeroes
+        return data;
+    }
+
+    protected void writeString(String string, ByteBuffer bb, int maxLength) {
+        if (string == null) {
+            for (int i = 0; i < maxLength; ++i) {
+                bb.put((byte) 0x0);
+            }
+        } else {
+            byte[] bytes = null;
+            try {
+                 bytes = string.getBytes("ascii");
+            } catch (UnsupportedEncodingException e) {
+                throw new RuntimeException("Failure encoding server name", e);
+            }
+            int writeLength = bytes.length;
+            if (writeLength > maxLength) {
+                writeLength = maxLength;
+            }
+            bb.put(bytes, 0, writeLength);
+            for (int i = writeLength; i < maxLength; ++i) {
+                bb.put((byte) 0x0);
+            }
+        }
+    }
+
+    @Override
+    public IPacket deserialize(byte[] data, int offset, int length) {
+        ByteBuffer bb = ByteBuffer.wrap(data, offset, length);
+        if (bb.remaining() < MIN_HEADER_LENGTH) {
+            return this;
+        }
+        
+        this.opCode = bb.get();
+        this.hardwareType = bb.get();
+        this.hardwareAddressLength = bb.get();
+        this.hops = bb.get();
+        this.transactionId = bb.getInt();
+        this.seconds = bb.getShort();
+        this.flags = bb.getShort();
+        this.clientIPAddress = bb.getInt();
+        this.yourIPAddress = bb.getInt();
+        this.serverIPAddress = bb.getInt();
+        this.gatewayIPAddress = bb.getInt();
+        int hardwareAddressLength = 0xff & this.hardwareAddressLength;
+        this.clientHardwareAddress = new byte[hardwareAddressLength];
+
+        bb.get(this.clientHardwareAddress);
+        for (int i = hardwareAddressLength; i < 16; ++i)
+            bb.get();
+        this.serverName = readString(bb, 64);
+        this.bootFileName = readString(bb, 128);
+        // read the magic cookie
+        // magic cookie
+        bb.get();
+        bb.get();
+        bb.get();
+        bb.get();
+        // read options
+        while (bb.hasRemaining()) {
+            DHCPOption option = new DHCPOption();
+            int code = 0xff & bb.get(); // convert signed byte to int in range [0,255]
+            option.setCode((byte) code);
+            if (code == 0) {
+                // skip these
+                continue;
+            } else if (code != 255) {
+                if (bb.hasRemaining()) {
+                    int l = 0xff & bb.get(); // convert signed byte to int in range [0,255]
+                    option.setLength((byte) l);
+                    if (bb.remaining() >= l) {
+                        byte[] optionData = new byte[l];
+                        bb.get(optionData);
+                        option.setData(optionData);
+                    } else {
+                        // Skip the invalid option and set the END option
+                        code = 0xff;
+                        option.setCode((byte)code);
+                        option.setLength((byte) 0);
+                    }
+                } else {
+                    // Skip the invalid option and set the END option
+                    code = 0xff;
+                    option.setCode((byte)code);
+                    option.setLength((byte) 0);
+                }
+            }
+            this.options.add(option);
+            if (code == 255) {
+                // remaining bytes are supposed to be 0, but ignore them just in case
+                break;
+            }
+        }
+
+        return this;
+    }
+
+    protected String readString(ByteBuffer bb, int maxLength) {
+        byte[] bytes = new byte[maxLength];
+        bb.get(bytes);
+        String result = null;
+        try {
+            result = new String(bytes, "ascii").trim();
+        } catch (UnsupportedEncodingException e) {
+            throw new RuntimeException("Failure decoding string", e);
+        }
+        return result;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/DHCPOption.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/DHCPOption.java
new file mode 100644
index 0000000..1fcc324
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/DHCPOption.java
@@ -0,0 +1,118 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.packet;
+
+import java.util.Arrays;
+
+/**
+ *
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ */
+public class DHCPOption {
+    protected byte code;
+    protected byte length;
+    protected byte[] data;
+
+    /**
+     * @return the code
+     */
+    public byte getCode() {
+        return code;
+    }
+
+    /**
+     * @param code the code to set
+     */
+    public DHCPOption setCode(byte code) {
+        this.code = code;
+        return this;
+    }
+
+    /**
+     * @return the length
+     */
+    public byte getLength() {
+        return length;
+    }
+
+    /**
+     * @param length the length to set
+     */
+    public DHCPOption setLength(byte length) {
+        this.length = length;
+        return this;
+    }
+
+    /**
+     * @return the data
+     */
+    public byte[] getData() {
+        return data;
+    }
+
+    /**
+     * @param data the data to set
+     */
+    public DHCPOption setData(byte[] data) {
+        this.data = data;
+        return this;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#hashCode()
+     */
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + code;
+        result = prime * result + Arrays.hashCode(data);
+        result = prime * result + length;
+        return result;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#equals(java.lang.Object)
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (!(obj instanceof DHCPOption))
+            return false;
+        DHCPOption other = (DHCPOption) obj;
+        if (code != other.code)
+            return false;
+        if (!Arrays.equals(data, other.data))
+            return false;
+        if (length != other.length)
+            return false;
+        return true;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#toString()
+     */
+    @Override
+    public String toString() {
+        return "DHCPOption [code=" + code + ", length=" + length + ", data="
+                + Arrays.toString(data) + "]";
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/DHCPPacketType.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/DHCPPacketType.java
new file mode 100644
index 0000000..3417a18
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/DHCPPacketType.java
@@ -0,0 +1,116 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.packet;
+
+public enum DHCPPacketType {
+    // From RFC 1533
+    DHCPDISCOVER        (1),
+    DHCPOFFER           (2),
+    DHCPREQUEST         (3),
+    DHCPDECLINE         (4),
+    DHCPACK             (5),
+    DHCPNAK             (6),
+    DHCPRELEASE         (7),
+    
+    // From RFC2132
+    DHCPINFORM          (8),
+    
+    // From RFC3203
+    DHCPFORCERENEW      (9),
+    
+    // From RFC4388
+    DHCPLEASEQUERY      (10),
+    DHCPLEASEUNASSIGNED (11),
+    DHCPLEASEUNKNOWN    (12),
+    DHCPLEASEACTIVE     (13);
+    
+    protected int value;
+    
+    private DHCPPacketType(int value) {
+        this.value = value;
+    }
+    
+    public int getValue() {
+        return value;
+    }
+    
+    public String toString(){
+        switch (value) {
+            case 1:
+                return "DHCPDISCOVER";
+            case 2:
+                return "DHCPOFFER";
+            case 3:
+                return "DHCPREQUEST";
+            case 4:
+                return "DHCPDECLINE";
+            case 5:
+                return "DHCPACK";
+            case 6:
+                return "DHCPNAK";
+            case 7:
+                return "DHCPRELEASE";
+            case 8:
+                return "DHCPINFORM";
+            case 9:
+                return "DHCPFORCERENEW";
+            case 10:
+                return "DHCPLEASEQUERY";
+            case 11:
+                return "DHCPLEASEUNASSIGNED";
+            case 12:
+                return "DHCPLEASEUNKNOWN";
+            case 13:
+                return "DHCPLEASEACTIVE";
+        }
+        
+        return null;
+    }
+    public static DHCPPacketType getType(int value) {
+        switch (value) {
+            case 1:
+                return DHCPDISCOVER;
+            case 2:
+                return DHCPOFFER;
+            case 3:
+                return DHCPREQUEST;
+            case 4:
+                return DHCPDECLINE;
+            case 5:
+                return DHCPACK;
+            case 6:
+                return DHCPNAK;
+            case 7:
+                return DHCPRELEASE;
+            case 8:
+                return DHCPINFORM;
+            case 9:
+                return DHCPFORCERENEW;
+            case 10:
+                return DHCPLEASEQUERY;
+            case 11:
+                return DHCPLEASEUNASSIGNED;
+            case 12:
+                return DHCPLEASEUNKNOWN;
+            case 13:
+                return DHCPLEASEACTIVE;
+        }
+        
+        return null;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/Data.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/Data.java
new file mode 100644
index 0000000..47762da
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/Data.java
@@ -0,0 +1,94 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.packet;
+
+import java.util.Arrays;
+
+/**
+ *
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ */
+public class Data extends BasePacket {
+    protected byte[] data;
+
+    /**
+     * 
+     */
+    public Data() {
+    }
+
+    /**
+     * @param data
+     */
+    public Data(byte[] data) {
+        this.data = data;
+    }
+
+    /**
+     * @return the data
+     */
+    public byte[] getData() {
+        return data;
+    }
+
+    /**
+     * @param data the data to set
+     */
+    public Data setData(byte[] data) {
+        this.data = data;
+        return this;
+    }
+
+    public byte[] serialize() {
+        return this.data;
+    }
+
+    @Override
+    public IPacket deserialize(byte[] data, int offset, int length) {
+        this.data = Arrays.copyOfRange(data, offset, data.length);
+        return this;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#hashCode()
+     */
+    @Override
+    public int hashCode() {
+        final int prime = 1571;
+        int result = super.hashCode();
+        result = prime * result + Arrays.hashCode(data);
+        return result;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#equals(java.lang.Object)
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (!super.equals(obj))
+            return false;
+        if (!(obj instanceof Data))
+            return false;
+        Data other = (Data) obj;
+        if (!Arrays.equals(data, other.data))
+            return false;
+        return true;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/Ethernet.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/Ethernet.java
new file mode 100644
index 0000000..6bd627b
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/Ethernet.java
@@ -0,0 +1,468 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.packet;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+import net.floodlightcontroller.util.MACAddress;
+import org.openflow.util.HexString;
+
+/**
+ *
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ */
+public class Ethernet extends BasePacket {
+    private static String HEXES = "0123456789ABCDEF";
+    public static final short TYPE_ARP = 0x0806;
+    public static final short TYPE_RARP = (short) 0x8035;
+    public static final short TYPE_IPv4 = 0x0800;
+    public static final short TYPE_LLDP = (short) 0x88cc;
+    public static final short TYPE_BSN = (short) 0x8942;
+    public static final short VLAN_UNTAGGED = (short)0xffff;
+    public static final short DATALAYER_ADDRESS_LENGTH = 6; // bytes
+    public static Map<Short, Class<? extends IPacket>> etherTypeClassMap;
+
+    static {
+        etherTypeClassMap = new HashMap<Short, Class<? extends IPacket>>();
+        etherTypeClassMap.put(TYPE_ARP, ARP.class);
+        etherTypeClassMap.put(TYPE_RARP, ARP.class);
+        etherTypeClassMap.put(TYPE_IPv4, IPv4.class);
+        etherTypeClassMap.put(TYPE_LLDP, LLDP.class);
+        etherTypeClassMap.put(TYPE_BSN, BSN.class);
+    }
+
+    protected MACAddress destinationMACAddress;
+    protected MACAddress sourceMACAddress;
+    protected byte priorityCode;
+    protected short vlanID;
+    protected short etherType;
+    protected boolean pad = false;
+
+    /**
+     * By default, set Ethernet to untagged
+     */
+    public Ethernet() {
+        super();
+        this.vlanID = VLAN_UNTAGGED;
+    }
+    
+    /**
+     * @return the destination MAC as a byte array
+     */
+    public byte[] getDestinationMACAddress() {
+        return destinationMACAddress.toBytes();
+    }
+    
+    /**
+     * @return the destination MAC
+     */
+    public MACAddress getDestinationMAC() {
+        return destinationMACAddress;
+    }
+
+    /**
+     * @param destinationMACAddress the destination MAC to set
+     */
+    public Ethernet setDestinationMACAddress(byte[] destinationMACAddress) {
+        this.destinationMACAddress = MACAddress.valueOf(destinationMACAddress);
+        return this;
+    }
+
+    /**
+     * @param destinationMACAddress the destination MAC to set
+     */
+    public Ethernet setDestinationMACAddress(String destinationMACAddress) {
+        this.destinationMACAddress = MACAddress.valueOf(destinationMACAddress);
+        return this;
+    }
+
+    /**
+     * @return the source MACAddress as a byte array
+     */
+    public byte[] getSourceMACAddress() {
+        return sourceMACAddress.toBytes();
+    }
+    
+    /**
+     * @return the source MACAddress
+     */
+    public MACAddress getSourceMAC() {
+        return sourceMACAddress;
+    }
+
+    /**
+     * @param sourceMACAddress the source MAC to set
+     */
+    public Ethernet setSourceMACAddress(byte[] sourceMACAddress) {
+        this.sourceMACAddress = MACAddress.valueOf(sourceMACAddress);
+        return this;
+    }
+
+    /**
+     * @param sourceMACAddress the source MAC to set
+     */
+    public Ethernet setSourceMACAddress(String sourceMACAddress) {
+        this.sourceMACAddress = MACAddress.valueOf(sourceMACAddress);
+        return this;
+    }
+
+    /**
+     * @return the priorityCode
+     */
+    public byte getPriorityCode() {
+        return priorityCode;
+    }
+
+    /**
+     * @param priorityCode the priorityCode to set
+     */
+    public Ethernet setPriorityCode(byte priorityCode) {
+        this.priorityCode = priorityCode;
+        return this;
+    }
+
+    /**
+     * @return the vlanID
+     */
+    public short getVlanID() {
+        return vlanID;
+    }
+
+    /**
+     * @param vlanID the vlanID to set
+     */
+    public Ethernet setVlanID(short vlanID) {
+        this.vlanID = vlanID;
+        return this;
+    }
+
+    /**
+     * @return the etherType
+     */
+    public short getEtherType() {
+        return etherType;
+    }
+
+    /**
+     * @param etherType the etherType to set
+     */
+    public Ethernet setEtherType(short etherType) {
+        this.etherType = etherType;
+        return this;
+    }
+    
+    /**
+     * @return True if the Ethernet frame is broadcast, false otherwise
+     */
+    public boolean isBroadcast() {
+        assert(destinationMACAddress.length() == 6);
+        return destinationMACAddress.isBroadcast();
+    }
+    
+    /**
+     * @return True is the Ethernet frame is multicast, False otherwise
+     */
+    public boolean isMulticast() {
+        return destinationMACAddress.isMulticast();
+    }
+    /**
+     * Pad this packet to 60 bytes minimum, filling with zeros?
+     * @return the pad
+     */
+    public boolean isPad() {
+        return pad;
+    }
+
+    /**
+     * Pad this packet to 60 bytes minimum, filling with zeros?
+     * @param pad the pad to set
+     */
+    public Ethernet setPad(boolean pad) {
+        this.pad = pad;
+        return this;
+    }
+
+    public byte[] serialize() {
+        byte[] payloadData = null;
+        if (payload != null) {
+            payload.setParent(this);
+            payloadData = payload.serialize();
+        }
+        int length = 14 + ((vlanID == VLAN_UNTAGGED) ? 0 : 4) +
+                          ((payloadData == null) ? 0 : payloadData.length);
+        if (pad && length < 60) {
+            length = 60;
+        }
+        byte[] data = new byte[length];
+        ByteBuffer bb = ByteBuffer.wrap(data);
+        bb.put(destinationMACAddress.toBytes());
+        bb.put(sourceMACAddress.toBytes());
+        if (vlanID != VLAN_UNTAGGED) {
+            bb.putShort((short) 0x8100);
+            bb.putShort((short) ((priorityCode << 13) | (vlanID & 0x0fff)));
+        }
+        bb.putShort(etherType);
+        if (payloadData != null)
+            bb.put(payloadData);
+        if (pad) {
+            Arrays.fill(data, bb.position(), data.length, (byte)0x0);
+        }
+        return data;
+    }
+
+    @Override
+    public IPacket deserialize(byte[] data, int offset, int length) {
+        if (length <= 0)
+            return null;
+        ByteBuffer bb = ByteBuffer.wrap(data, offset, length);
+        if (this.destinationMACAddress == null)
+            this.destinationMACAddress = MACAddress.valueOf(new byte[6]);
+        byte[] dstAddr = new byte[MACAddress.MAC_ADDRESS_LENGTH];
+        bb.get(dstAddr);
+        this.destinationMACAddress = MACAddress.valueOf(dstAddr);
+
+        if (this.sourceMACAddress == null)
+            this.sourceMACAddress = MACAddress.valueOf(new byte[6]);
+        byte[] srcAddr = new byte[MACAddress.MAC_ADDRESS_LENGTH];
+        bb.get(srcAddr);
+        this.sourceMACAddress = MACAddress.valueOf(srcAddr);
+
+        short etherType = bb.getShort();
+        if (etherType == (short) 0x8100) {
+            short tci = bb.getShort();
+            this.priorityCode = (byte) ((tci >> 13) & 0x07);
+            this.vlanID = (short) (tci & 0x0fff);
+            etherType = bb.getShort();
+        } else {
+            this.vlanID = VLAN_UNTAGGED;
+        }
+        this.etherType = etherType;
+        
+        IPacket payload;
+        if (Ethernet.etherTypeClassMap.containsKey(this.etherType)) {
+            Class<? extends IPacket> clazz = Ethernet.etherTypeClassMap.get(this.etherType);
+            try {
+                payload = clazz.newInstance();
+            } catch (Exception e) {
+                throw new RuntimeException("Error parsing payload for Ethernet packet", e);
+            }
+        } else {
+            payload = new Data();
+        }
+        this.payload = payload.deserialize(data, bb.position(), bb.limit()-bb.position());
+        this.payload.setParent(this);
+        return this;
+    }
+
+    /**
+     * Checks to see if a string is a valid MAC address.
+     * @param macAddress
+     * @return True if macAddress is a valid MAC, False otherwise
+     */
+    public static boolean isMACAddress(String macAddress) {
+        String[] macBytes = macAddress.split(":");
+        if (macBytes.length != 6)
+            return false;
+        for (int i = 0; i < 6; ++i) {
+            if (HEXES.indexOf(macBytes[i].toUpperCase().charAt(0)) == -1 || 
+                HEXES.indexOf(macBytes[i].toUpperCase().charAt(1)) == -1) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    /**
+     * Accepts a MAC address of the form 00:aa:11:bb:22:cc, case does not
+     * matter, and returns a corresponding byte[].
+     * @param macAddress The MAC address to convert into a bye array
+     * @return The macAddress as a byte array 
+     */
+    public static byte[] toMACAddress(String macAddress) {
+        return MACAddress.valueOf(macAddress).toBytes();
+    }
+
+
+    /**
+     * Accepts a MAC address and returns the corresponding long, where the
+     * MAC bytes are set on the lower order bytes of the long.
+     * @param macAddress
+     * @return a long containing the mac address bytes
+     */
+    public static long toLong(byte[] macAddress) {
+        return MACAddress.valueOf(macAddress).toLong();
+    }
+
+    /**
+     * Convert a long MAC address to a byte array
+     * @param macAddress
+     * @return the bytes of the mac address
+     */
+    public static byte[] toByteArray(long macAddress) {
+        return MACAddress.valueOf(macAddress).toBytes();
+    }
+    
+    /* (non-Javadoc)
+     * @see java.lang.Object#hashCode()
+     */
+    @Override
+    public int hashCode() {
+        final int prime = 7867;
+        int result = super.hashCode();
+        result = prime * result + destinationMACAddress.hashCode();
+        result = prime * result + etherType;
+        result = prime * result + vlanID;
+        result = prime * result + priorityCode;
+        result = prime * result + (pad ? 1231 : 1237);
+        result = prime * result + sourceMACAddress.hashCode();
+        return result;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#equals(java.lang.Object)
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (!super.equals(obj))
+            return false;
+        if (!(obj instanceof Ethernet))
+            return false;
+        Ethernet other = (Ethernet) obj;
+        if (!destinationMACAddress.equals(other.destinationMACAddress))
+            return false;
+        if (priorityCode != other.priorityCode)
+            return false;
+        if (vlanID != other.vlanID)
+            return false;
+        if (etherType != other.etherType)
+            return false;
+        if (pad != other.pad)
+            return false;
+        if (!sourceMACAddress.equals(other.sourceMACAddress))
+            return false;
+        return true;
+    }
+    
+    /* (non-Javadoc)
+     * @see java.lang.Object#toString(java.lang.Object)
+     */
+    @Override
+    public String toString() {
+
+        StringBuffer sb = new StringBuffer("\n");
+
+        IPacket pkt = (IPacket) this.getPayload();
+
+        if (pkt instanceof ARP)
+            sb.append("arp");
+        else if (pkt instanceof LLDP)
+            sb.append("lldp");
+        else if (pkt instanceof ICMP)
+            sb.append("icmp");
+        else if (pkt instanceof IPv4)
+            sb.append("ip");
+        else if (pkt instanceof DHCP)
+            sb.append("dhcp");
+        else  sb.append(this.getEtherType());
+
+        sb.append("\ndl_vlan: ");
+        if (this.getVlanID() == Ethernet.VLAN_UNTAGGED)
+            sb.append("untagged");
+        else
+            sb.append(this.getVlanID());
+        sb.append("\ndl_vlan_pcp: ");
+        sb.append(this.getPriorityCode());
+        sb.append("\ndl_src: ");
+        sb.append(HexString.toHexString(this.getSourceMACAddress()));
+        sb.append("\ndl_dst: ");
+        sb.append(HexString.toHexString(this.getDestinationMACAddress()));
+
+
+        if (pkt instanceof ARP) {
+            ARP p = (ARP) pkt;
+            sb.append("\nnw_src: ");
+            sb.append(IPv4.fromIPv4Address(IPv4.toIPv4Address(p.getSenderProtocolAddress())));
+            sb.append("\nnw_dst: ");
+            sb.append(IPv4.fromIPv4Address(IPv4.toIPv4Address(p.getTargetProtocolAddress())));
+        }
+        else if (pkt instanceof LLDP) {
+            sb.append("lldp packet");
+        }
+        else if (pkt instanceof ICMP) {
+            ICMP icmp = (ICMP) pkt;
+            sb.append("\nicmp_type: ");
+            sb.append(icmp.getIcmpType());
+            sb.append("\nicmp_code: ");
+            sb.append(icmp.getIcmpCode());
+        }
+        else if (pkt instanceof IPv4) {
+            IPv4 p = (IPv4) pkt;
+            sb.append("\nnw_src: ");
+            sb.append(IPv4.fromIPv4Address(p.getSourceAddress()));
+            sb.append("\nnw_dst: ");
+            sb.append(IPv4.fromIPv4Address(p.getDestinationAddress()));
+            sb.append("\nnw_tos: ");
+            sb.append(p.getDiffServ());
+            sb.append("\nnw_proto: ");
+            sb.append(p.getProtocol());
+
+            if (pkt instanceof TCP) {
+                sb.append("\ntp_src: ");
+                sb.append(((TCP) pkt).getSourcePort());
+                sb.append("\ntp_dst: ");
+                sb.append(((TCP) pkt).getDestinationPort());
+
+            } else if (pkt instanceof UDP) {
+                sb.append("\ntp_src: ");
+                sb.append(((UDP) pkt).getSourcePort());
+                sb.append("\ntp_dst: ");
+                sb.append(((UDP) pkt).getDestinationPort());
+            }
+
+            if (pkt instanceof ICMP) {
+                ICMP icmp = (ICMP) pkt;
+                sb.append("\nicmp_type: ");
+                sb.append(icmp.getIcmpType());
+                sb.append("\nicmp_code: ");
+                sb.append(icmp.getIcmpCode());
+            }
+
+        }
+        else if (pkt instanceof DHCP) {
+            sb.append("\ndhcp packet");
+        }
+        else if (pkt instanceof Data) {
+            sb.append("\ndata packet");
+        }
+        else if (pkt instanceof LLC) {
+            sb.append("\nllc packet");
+        }
+        else if (pkt instanceof BPDU) {
+            sb.append("\nbpdu packet");
+        }
+        else sb.append("\nunknwon packet");
+
+        return sb.toString();
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/ICMP.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/ICMP.java
new file mode 100644
index 0000000..5431277
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/ICMP.java
@@ -0,0 +1,170 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.packet;
+
+import java.nio.ByteBuffer;
+
+/**
+ * Implements ICMP packet format
+ * @author shudong.zhou@bigswitch.com
+ */
+public class ICMP extends BasePacket {
+    protected byte icmpType;
+    protected byte icmpCode;
+    protected short checksum;
+
+    /**
+     * @return the icmpType
+     */
+    public byte getIcmpType() {
+        return icmpType;
+    }
+
+    /**
+     * @param icmpType to set
+     */
+    public ICMP setIcmpType(byte icmpType) {
+        this.icmpType = icmpType;
+        return this;
+    }
+
+    /**
+     * @return the icmp code
+     */
+    public byte getIcmpCode() {
+        return icmpCode;
+    }
+
+    /**
+     * @param icmpCode code to set
+     */
+    public ICMP setIcmpCode(byte icmpCode) {
+        this.icmpCode = icmpCode;
+        return this;
+    }
+
+    /**
+     * @return the checksum
+     */
+    public short getChecksum() {
+        return checksum;
+    }
+
+    /**
+     * @param checksum the checksum to set
+     */
+    public ICMP setChecksum(short checksum) {
+        this.checksum = checksum;
+        return this;
+    }
+
+    /**
+     * Serializes the packet. Will compute and set the following fields if they
+     * are set to specific values at the time serialize is called:
+     *      -checksum : 0
+     *      -length : 0
+     */
+    public byte[] serialize() {
+        int length = 4;
+        byte[] payloadData = null;
+        if (payload != null) {
+            payload.setParent(this);
+            payloadData = payload.serialize();
+            length += payloadData.length;
+        }
+
+        byte[] data = new byte[length];
+        ByteBuffer bb = ByteBuffer.wrap(data);
+
+        bb.put(this.icmpType);
+        bb.put(this.icmpCode);
+        bb.putShort(this.checksum);
+        if (payloadData != null)
+            bb.put(payloadData);
+
+        if (this.parent != null && this.parent instanceof IPv4)
+            ((IPv4)this.parent).setProtocol(IPv4.PROTOCOL_ICMP);
+
+        // compute checksum if needed
+        if (this.checksum == 0) {
+            bb.rewind();
+            int accumulation = 0;
+
+            for (int i = 0; i < length / 2; ++i) {
+                accumulation += 0xffff & bb.getShort();
+            }
+            // pad to an even number of shorts
+            if (length % 2 > 0) {
+                accumulation += (bb.get() & 0xff) << 8;
+            }
+
+            accumulation = ((accumulation >> 16) & 0xffff)
+                    + (accumulation & 0xffff);
+            this.checksum = (short) (~accumulation & 0xffff);
+            bb.putShort(2, this.checksum);
+        }
+        return data;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#hashCode()
+     */
+    @Override
+    public int hashCode() {
+        final int prime = 5807;
+        int result = super.hashCode();
+        result = prime * result + icmpType;
+        result = prime * result + icmpCode;
+        result = prime * result + checksum;
+        return result;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#equals(java.lang.Object)
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (!super.equals(obj))
+            return false;
+        if (!(obj instanceof ICMP))
+            return false;
+        ICMP other = (ICMP) obj;
+        if (icmpType != other.icmpType)
+            return false;
+        if (icmpCode != other.icmpCode)
+            return false;
+        if (checksum != other.checksum)
+            return false;
+        return true;
+    }
+
+    @Override
+    public IPacket deserialize(byte[] data, int offset, int length) {
+        ByteBuffer bb = ByteBuffer.wrap(data, offset, length);
+        this.icmpType = bb.get();
+        this.icmpCode = bb.get();
+        this.checksum = bb.getShort();
+        
+        this.payload = new Data();
+        this.payload = payload.deserialize(data, bb.position(), bb.limit()-bb.position());
+        this.payload.setParent(this);
+        return this;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/IPacket.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/IPacket.java
new file mode 100644
index 0000000..02376cd
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/IPacket.java
@@ -0,0 +1,77 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.packet;
+
+/**
+*
+* @author David Erickson (daviderickson@cs.stanford.edu)
+*/
+public interface IPacket {
+    /**
+     * 
+     * @return
+     */
+    public IPacket getPayload();
+
+    /**
+     * 
+     * @param packet
+     * @return
+     */
+    public IPacket setPayload(IPacket packet);
+
+    /**
+     * 
+     * @return
+     */
+    public IPacket getParent();
+
+    /**
+     * 
+     * @param packet
+     * @return
+     */
+    public IPacket setParent(IPacket packet);
+
+    /**
+     * Reset any checksums as needed, and call resetChecksum on all parents
+     */
+    public void resetChecksum();
+    
+    /**
+     * Sets all payloads parent packet if applicable, then serializes this 
+     * packet and all payloads
+     * @return a byte[] containing this packet and payloads
+     */
+    public byte[] serialize();
+
+    /**
+     * Deserializes this packet layer and all possible payloads
+     * @param data
+     * @param offset offset to start deserializing from
+     * @param length length of the data to deserialize
+     * @return the deserialized data
+     */
+    public IPacket deserialize(byte[] data, int offset, int length);
+    
+    /** Clone this packet and its payload packet but not its parent. 
+     * 
+     * @return
+     */
+    public Object clone();
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/IPv4.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/IPv4.java
new file mode 100644
index 0000000..01f886d
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/IPv4.java
@@ -0,0 +1,558 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+/**
+ * 
+ */
+package net.floodlightcontroller.packet;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ *
+ */
+public class IPv4 extends BasePacket {
+    public static final byte PROTOCOL_ICMP = 0x1;
+    public static final byte PROTOCOL_TCP = 0x6;
+    public static final byte PROTOCOL_UDP = 0x11;
+    public static Map<Byte, Class<? extends IPacket>> protocolClassMap;
+
+    static {
+        protocolClassMap = new HashMap<Byte, Class<? extends IPacket>>();
+        protocolClassMap.put(PROTOCOL_ICMP, ICMP.class);
+        protocolClassMap.put(PROTOCOL_TCP, TCP.class);
+        protocolClassMap.put(PROTOCOL_UDP, UDP.class);
+    }
+
+    protected byte version;
+    protected byte headerLength;
+    protected byte diffServ;
+    protected short totalLength;
+    protected short identification;
+    protected byte flags;
+    protected short fragmentOffset;
+    protected byte ttl;
+    protected byte protocol;
+    protected short checksum;
+    protected int sourceAddress;
+    protected int destinationAddress;
+    protected byte[] options;
+
+    protected boolean isTruncated;
+
+    /**
+     * Default constructor that sets the version to 4.
+     */
+    public IPv4() {
+        super();
+        this.version = 4;
+        isTruncated = false;
+    }
+
+    /**
+     * @return the version
+     */
+    public byte getVersion() {
+        return version;
+    }
+
+    /**
+     * @param version the version to set
+     */
+    public IPv4 setVersion(byte version) {
+        this.version = version;
+        return this;
+    }
+
+    /**
+     * @return the headerLength
+     */
+    public byte getHeaderLength() {
+        return headerLength;
+    }
+
+    /**
+     * @return the diffServ
+     */
+    public byte getDiffServ() {
+        return diffServ;
+    }
+
+    /**
+     * @param diffServ the diffServ to set
+     */
+    public IPv4 setDiffServ(byte diffServ) {
+        this.diffServ = diffServ;
+        return this;
+    }
+
+    /**
+     * @return the totalLength
+     */
+    public short getTotalLength() {
+        return totalLength;
+    }
+
+    /**
+     * @return the identification
+     */
+    public short getIdentification() {
+        return identification;
+    }
+
+    public boolean isTruncated() {
+        return isTruncated;
+    }
+
+    public void setTruncated(boolean isTruncated) {
+        this.isTruncated = isTruncated;
+    }
+
+    /**
+     * @param identification the identification to set
+     */
+    public IPv4 setIdentification(short identification) {
+        this.identification = identification;
+        return this;
+    }
+
+    /**
+     * @return the flags
+     */
+    public byte getFlags() {
+        return flags;
+    }
+
+    /**
+     * @param flags the flags to set
+     */
+    public IPv4 setFlags(byte flags) {
+        this.flags = flags;
+        return this;
+    }
+
+    /**
+     * @return the fragmentOffset
+     */
+    public short getFragmentOffset() {
+        return fragmentOffset;
+    }
+
+    /**
+     * @param fragmentOffset the fragmentOffset to set
+     */
+    public IPv4 setFragmentOffset(short fragmentOffset) {
+        this.fragmentOffset = fragmentOffset;
+        return this;
+    }
+
+    /**
+     * @return the ttl
+     */
+    public byte getTtl() {
+        return ttl;
+    }
+
+    /**
+     * @param ttl the ttl to set
+     */
+    public IPv4 setTtl(byte ttl) {
+        this.ttl = ttl;
+        return this;
+    }
+
+    /**
+     * @return the protocol
+     */
+    public byte getProtocol() {
+        return protocol;
+    }
+
+    /**
+     * @param protocol the protocol to set
+     */
+    public IPv4 setProtocol(byte protocol) {
+        this.protocol = protocol;
+        return this;
+    }
+
+    /**
+     * @return the checksum
+     */
+    public short getChecksum() {
+        return checksum;
+    }
+
+    /**
+     * @param checksum the checksum to set
+     */
+    public IPv4 setChecksum(short checksum) {
+        this.checksum = checksum;
+        return this;
+    }
+    @Override
+    public void resetChecksum() {
+        this.checksum = 0;
+        super.resetChecksum();
+    }
+
+    /**
+     * @return the sourceAddress
+     */
+    public int getSourceAddress() {
+        return sourceAddress;
+    }
+
+    /**
+     * @param sourceAddress the sourceAddress to set
+     */
+    public IPv4 setSourceAddress(int sourceAddress) {
+        this.sourceAddress = sourceAddress;
+        return this;
+    }
+
+    /**
+     * @param sourceAddress the sourceAddress to set
+     */
+    public IPv4 setSourceAddress(String sourceAddress) {
+        this.sourceAddress = IPv4.toIPv4Address(sourceAddress);
+        return this;
+    }
+
+    /**
+     * @return the destinationAddress
+     */
+    public int getDestinationAddress() {
+        return destinationAddress;
+    }
+
+    /**
+     * @param destinationAddress the destinationAddress to set
+     */
+    public IPv4 setDestinationAddress(int destinationAddress) {
+        this.destinationAddress = destinationAddress;
+        return this;
+    }
+
+    /**
+     * @param destinationAddress the destinationAddress to set
+     */
+    public IPv4 setDestinationAddress(String destinationAddress) {
+        this.destinationAddress = IPv4.toIPv4Address(destinationAddress);
+        return this;
+    }
+
+    /**
+     * @return the options
+     */
+    public byte[] getOptions() {
+        return options;
+    }
+
+    /**
+     * @param options the options to set
+     */
+    public IPv4 setOptions(byte[] options) {
+        if (options != null && (options.length % 4) > 0)
+            throw new IllegalArgumentException(
+                    "Options length must be a multiple of 4");
+        this.options = options;
+        return this;
+    }
+
+    /**
+     * Serializes the packet. Will compute and set the following fields if they
+     * are set to specific values at the time serialize is called:
+     *      -checksum : 0
+     *      -headerLength : 0
+     *      -totalLength : 0
+     */
+    public byte[] serialize() {
+        byte[] payloadData = null;
+        if (payload != null) {
+            payload.setParent(this);
+            payloadData = payload.serialize();
+        }
+
+        int optionsLength = 0;
+        if (this.options != null)
+            optionsLength = this.options.length / 4;
+        this.headerLength = (byte) (5 + optionsLength);
+
+        this.totalLength = (short) (this.headerLength * 4 + ((payloadData == null) ? 0
+                : payloadData.length));
+
+        byte[] data = new byte[this.totalLength];
+        ByteBuffer bb = ByteBuffer.wrap(data);
+
+        bb.put((byte) (((this.version & 0xf) << 4) | (this.headerLength & 0xf)));
+        bb.put(this.diffServ);
+        bb.putShort(this.totalLength);
+        bb.putShort(this.identification);
+        bb.putShort((short) (((this.flags & 0x7) << 13) | (this.fragmentOffset & 0x1fff)));
+        bb.put(this.ttl);
+        bb.put(this.protocol);
+        bb.putShort(this.checksum);
+        bb.putInt(this.sourceAddress);
+        bb.putInt(this.destinationAddress);
+        if (this.options != null)
+            bb.put(this.options);
+        if (payloadData != null)
+            bb.put(payloadData);
+
+        // compute checksum if needed
+        if (this.checksum == 0) {
+            bb.rewind();
+            int accumulation = 0;
+            for (int i = 0; i < this.headerLength * 2; ++i) {
+                accumulation += 0xffff & bb.getShort();
+            }
+            accumulation = ((accumulation >> 16) & 0xffff)
+                    + (accumulation & 0xffff);
+            this.checksum = (short) (~accumulation & 0xffff);
+            bb.putShort(10, this.checksum);
+        }
+        return data;
+    }
+
+    @Override
+    public IPacket deserialize(byte[] data, int offset, int length) {
+        ByteBuffer bb = ByteBuffer.wrap(data, offset, length);
+        short sscratch;
+
+        this.version = bb.get();
+        this.headerLength = (byte) (this.version & 0xf);
+        this.version = (byte) ((this.version >> 4) & 0xf);
+        this.diffServ = bb.get();
+        this.totalLength = bb.getShort();
+        this.identification = bb.getShort();
+        sscratch = bb.getShort();
+        this.flags = (byte) ((sscratch >> 13) & 0x7);
+        this.fragmentOffset = (short) (sscratch & 0x1fff);
+        this.ttl = bb.get();
+        this.protocol = bb.get();
+        this.checksum = bb.getShort();
+        this.sourceAddress = bb.getInt();
+        this.destinationAddress = bb.getInt();
+
+        if (this.headerLength > 5) {
+            int optionsLength = (this.headerLength - 5) * 4;
+            this.options = new byte[optionsLength];
+            bb.get(this.options);
+        }
+
+        IPacket payload;
+        if (IPv4.protocolClassMap.containsKey(this.protocol)) {
+            Class<? extends IPacket> clazz = IPv4.protocolClassMap.get(this.protocol);
+            try {
+                payload = clazz.newInstance();
+            } catch (Exception e) {
+                throw new RuntimeException("Error parsing payload for IPv4 packet", e);
+            }
+        } else {
+            payload = new Data();
+        }
+        this.payload = payload.deserialize(data, bb.position(), bb.limit()-bb.position());
+        this.payload.setParent(this);
+
+        if (this.totalLength != length)
+            this.isTruncated = true;
+        else
+            this.isTruncated = false;
+
+        return this;
+    }
+
+    /**
+     * Accepts an IPv4 address of the form xxx.xxx.xxx.xxx, ie 192.168.0.1 and
+     * returns the corresponding 32 bit integer.
+     * @param ipAddress
+     * @return
+     */
+    public static int toIPv4Address(String ipAddress) {
+        if (ipAddress == null)
+            throw new IllegalArgumentException("Specified IPv4 address must" +
+                "contain 4 sets of numerical digits separated by periods");
+        String[] octets = ipAddress.split("\\.");
+        if (octets.length != 4) 
+            throw new IllegalArgumentException("Specified IPv4 address must" +
+                "contain 4 sets of numerical digits separated by periods");
+
+        int result = 0;
+        for (int i = 0; i < 4; ++i) {
+            result |= Integer.valueOf(octets[i]) << ((3-i)*8);
+        }
+        return result;
+    }
+
+    /**
+     * Accepts an IPv4 address in a byte array and returns the corresponding
+     * 32-bit integer value.
+     * @param ipAddress
+     * @return
+     */
+    public static int toIPv4Address(byte[] ipAddress) {
+        int ip = 0;
+        for (int i = 0; i < 4; i++) {
+          int t = (ipAddress[i] & 0xff) << ((3-i)*8);
+          ip |= t;
+        }
+        return ip;
+    }
+
+    /**
+     * Accepts an IPv4 address and returns of string of the form xxx.xxx.xxx.xxx
+     * ie 192.168.0.1
+     * 
+     * @param ipAddress
+     * @return
+     */
+    public static String fromIPv4Address(int ipAddress) {
+        StringBuffer sb = new StringBuffer();
+        int result = 0;
+        for (int i = 0; i < 4; ++i) {
+            result = (ipAddress >> ((3-i)*8)) & 0xff;
+            sb.append(Integer.valueOf(result).toString());
+            if (i != 3)
+                sb.append(".");
+        }
+        return sb.toString();
+    }
+
+    /**
+     * Accepts a collection of IPv4 addresses as integers and returns a single
+     * String useful in toString method's containing collections of IP
+     * addresses.
+     * 
+     * @param ipAddresses collection
+     * @return
+     */
+    public static String fromIPv4AddressCollection(Collection<Integer> ipAddresses) {
+        if (ipAddresses == null)
+            return "null";
+        StringBuffer sb = new StringBuffer();
+        sb.append("[");
+        for (Integer ip : ipAddresses) {
+            sb.append(fromIPv4Address(ip));
+            sb.append(",");
+        }
+        sb.replace(sb.length()-1, sb.length(), "]");
+        return sb.toString();
+    }
+
+    /**
+     * Accepts an IPv4 address of the form xxx.xxx.xxx.xxx, ie 192.168.0.1 and
+     * returns the corresponding byte array.
+     * @param ipAddress The IP address in the form xx.xxx.xxx.xxx.
+     * @return The IP address separated into bytes
+     */
+    public static byte[] toIPv4AddressBytes(String ipAddress) {
+        String[] octets = ipAddress.split("\\.");
+        if (octets.length != 4) 
+            throw new IllegalArgumentException("Specified IPv4 address must" +
+                "contain 4 sets of numerical digits separated by periods");
+
+        byte[] result = new byte[4];
+        for (int i = 0; i < 4; ++i) {
+            result[i] = Integer.valueOf(octets[i]).byteValue();
+        }
+        return result;
+    }
+    
+    /**
+     * Accepts an IPv4 address in the form of an integer and
+     * returns the corresponding byte array.
+     * @param ipAddress The IP address as an integer.
+     * @return The IP address separated into bytes.
+     */
+    public static byte[] toIPv4AddressBytes(int ipAddress) {
+    	return new byte[] {
+                (byte)(ipAddress >>> 24),
+                (byte)(ipAddress >>> 16),
+                (byte)(ipAddress >>> 8),
+                (byte)ipAddress};
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#hashCode()
+     */
+    @Override
+    public int hashCode() {
+        final int prime = 2521;
+        int result = super.hashCode();
+        result = prime * result + checksum;
+        result = prime * result + destinationAddress;
+        result = prime * result + diffServ;
+        result = prime * result + flags;
+        result = prime * result + fragmentOffset;
+        result = prime * result + headerLength;
+        result = prime * result + identification;
+        result = prime * result + Arrays.hashCode(options);
+        result = prime * result + protocol;
+        result = prime * result + sourceAddress;
+        result = prime * result + totalLength;
+        result = prime * result + ttl;
+        result = prime * result + version;
+        return result;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#equals(java.lang.Object)
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (!super.equals(obj))
+            return false;
+        if (!(obj instanceof IPv4))
+            return false;
+        IPv4 other = (IPv4) obj;
+        if (checksum != other.checksum)
+            return false;
+        if (destinationAddress != other.destinationAddress)
+            return false;
+        if (diffServ != other.diffServ)
+            return false;
+        if (flags != other.flags)
+            return false;
+        if (fragmentOffset != other.fragmentOffset)
+            return false;
+        if (headerLength != other.headerLength)
+            return false;
+        if (identification != other.identification)
+            return false;
+        if (!Arrays.equals(options, other.options))
+            return false;
+        if (protocol != other.protocol)
+            return false;
+        if (sourceAddress != other.sourceAddress)
+            return false;
+        if (totalLength != other.totalLength)
+            return false;
+        if (ttl != other.ttl)
+            return false;
+        if (version != other.version)
+            return false;
+        return true;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/LLC.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/LLC.java
new file mode 100644
index 0000000..dc7d6d8
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/LLC.java
@@ -0,0 +1,75 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.packet;
+
+import java.nio.ByteBuffer;
+
+/**
+ * This class represents an Link Local Control
+ * header that is used in Ethernet 802.3.
+ * @author alexreimers
+ *
+ */
+public class LLC extends BasePacket {
+    private byte dsap = 0;
+    private byte ssap = 0;
+    private byte ctrl = 0;
+    
+    public byte getDsap() {
+        return dsap;
+    }
+
+    public void setDsap(byte dsap) {
+        this.dsap = dsap;
+    }
+
+    public byte getSsap() {
+        return ssap;
+    }
+
+    public void setSsap(byte ssap) {
+        this.ssap = ssap;
+    }
+
+    public byte getCtrl() {
+        return ctrl;
+    }
+
+    public void setCtrl(byte ctrl) {
+        this.ctrl = ctrl;
+    }
+
+    @Override
+    public byte[] serialize() {
+        byte[] data = new byte[3];
+        ByteBuffer bb = ByteBuffer.wrap(data);
+        bb.put(dsap);
+        bb.put(ssap);
+        bb.put(ctrl);
+        return data;
+    }
+
+    @Override
+    public IPacket deserialize(byte[] data, int offset, int length) {
+        ByteBuffer bb = ByteBuffer.wrap(data, offset, length);
+        dsap = bb.get();
+        ssap = bb.get();
+        ctrl = bb.get();
+        return this;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/LLDP.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/LLDP.java
new file mode 100644
index 0000000..8c2c457
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/LLDP.java
@@ -0,0 +1,204 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+/**
+ * 
+ */
+package net.floodlightcontroller.packet;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ *
+ */
+public class LLDP extends BasePacket {
+    protected LLDPTLV chassisId;
+    protected LLDPTLV portId;
+    protected LLDPTLV ttl;
+    protected List<LLDPTLV> optionalTLVList;
+    protected short ethType;
+
+    public LLDP() {
+        this.optionalTLVList = new ArrayList<LLDPTLV>();
+        this.ethType = Ethernet.TYPE_LLDP;
+    }
+
+    /**
+     * @return the chassisId
+     */
+    public LLDPTLV getChassisId() {
+        return chassisId;
+    }
+
+    /**
+     * @param chassisId the chassisId to set
+     */
+    public LLDP setChassisId(LLDPTLV chassisId) {
+        this.chassisId = chassisId;
+        return this;
+    }
+
+    /**
+     * @return the portId
+     */
+    public LLDPTLV getPortId() {
+        return portId;
+    }
+
+    /**
+     * @param portId the portId to set
+     */
+    public LLDP setPortId(LLDPTLV portId) {
+        this.portId = portId;
+        return this;
+    }
+
+    /**
+     * @return the ttl
+     */
+    public LLDPTLV getTtl() {
+        return ttl;
+    }
+
+    /**
+     * @param ttl the ttl to set
+     */
+    public LLDP setTtl(LLDPTLV ttl) {
+        this.ttl = ttl;
+        return this;
+    }
+
+    /**
+     * @return the optionalTLVList
+     */
+    public List<LLDPTLV> getOptionalTLVList() {
+        return optionalTLVList;
+    }
+
+    /**
+     * @param optionalTLVList the optionalTLVList to set
+     */
+    public LLDP setOptionalTLVList(List<LLDPTLV> optionalTLVList) {
+        this.optionalTLVList = optionalTLVList;
+        return this;
+    }
+
+    @Override
+    public byte[] serialize() {
+        int length = 2+this.chassisId.getLength() + 2+this.portId.getLength() +
+            2+this.ttl.getLength() + 2;
+        for (LLDPTLV tlv : this.optionalTLVList) {
+            length += 2 + tlv.getLength();
+        }
+
+        byte[] data = new byte[length];
+        ByteBuffer bb = ByteBuffer.wrap(data);
+        bb.put(this.chassisId.serialize());
+        bb.put(this.portId.serialize());
+        bb.put(this.ttl.serialize());
+        for (LLDPTLV tlv : this.optionalTLVList) {
+            bb.put(tlv.serialize());
+        }
+        bb.putShort((short) 0); // End of LLDPDU
+
+        if (this.parent != null && this.parent instanceof Ethernet)
+            ((Ethernet)this.parent).setEtherType(ethType);
+
+        return data;
+    }
+
+    @Override
+    public IPacket deserialize(byte[] data, int offset, int length) {
+        ByteBuffer bb = ByteBuffer.wrap(data, offset, length);
+        LLDPTLV tlv;
+        do {
+            tlv = new LLDPTLV().deserialize(bb);
+
+            // if there was a failure to deserialize stop processing TLVs
+            if (tlv == null)
+                break;
+            switch (tlv.getType()) {
+                case 0x0:
+                    // can throw this one away, its just an end delimiter
+                    break;
+                case 0x1:
+                    this.chassisId = tlv;
+                    break;
+                case 0x2:
+                    this.portId = tlv;
+                    break;
+                case 0x3:
+                    this.ttl = tlv;
+                    break;
+                default:
+                    this.optionalTLVList.add(tlv);
+                    break;
+            }
+        } while (tlv.getType() != 0 && bb.hasRemaining());
+        return this;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#hashCode()
+     */
+    @Override
+    public int hashCode() {
+        final int prime = 883;
+        int result = super.hashCode();
+        result = prime * result
+                + ((chassisId == null) ? 0 : chassisId.hashCode());
+        result = prime * result + (optionalTLVList.hashCode());
+        result = prime * result + ((portId == null) ? 0 : portId.hashCode());
+        result = prime * result + ((ttl == null) ? 0 : ttl.hashCode());
+        return result;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#equals(java.lang.Object)
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (!super.equals(obj))
+            return false;
+        if (!(obj instanceof LLDP))
+            return false;
+        LLDP other = (LLDP) obj;
+        if (chassisId == null) {
+            if (other.chassisId != null)
+                return false;
+        } else if (!chassisId.equals(other.chassisId))
+            return false;
+        if (!optionalTLVList.equals(other.optionalTLVList))
+            return false;
+        if (portId == null) {
+            if (other.portId != null)
+                return false;
+        } else if (!portId.equals(other.portId))
+            return false;
+        if (ttl == null) {
+            if (other.ttl != null)
+                return false;
+        } else if (!ttl.equals(other.ttl))
+            return false;
+        return true;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/LLDPOrganizationalTLV.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/LLDPOrganizationalTLV.java
new file mode 100644
index 0000000..a0930bd
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/LLDPOrganizationalTLV.java
@@ -0,0 +1,181 @@
+/**
+ *    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *    not use this file except in compliance with the License. You may obtain
+ *    a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *    License for the specific language governing permissions and limitations
+ *    under the License.
+ **/
+
+package net.floodlightcontroller.packet;
+
+import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
+import java.util.Arrays;
+
+/**
+ * The class representing LLDP Organizationally Specific TLV.
+ *
+ * @author Sho Shimizu (sho.shimizu@gmail.com)
+ */
+public class LLDPOrganizationalTLV extends LLDPTLV {
+    public static final int OUI_LENGTH = 3;
+    public static final int SUBTYPE_LENGTH = 1;
+    public static final byte ORGANIZATIONAL_TLV_TYPE = 127;
+    public static final int MAX_INFOSTRING_LENGTH = 507;
+
+    protected byte[] oui;
+    protected byte subType;
+    private byte[] infoString;
+
+    public LLDPOrganizationalTLV() {
+        type = ORGANIZATIONAL_TLV_TYPE;
+    }
+
+    /**
+     * Set the value of OUI.
+     * @param oui The value of OUI to be set.
+     * @return This LLDP Organizationally Specific TLV.
+     */
+    public LLDPOrganizationalTLV setOUI(byte[] oui) {
+        if (oui.length != OUI_LENGTH) {
+            throw new IllegalArgumentException("The length of OUI must be " + OUI_LENGTH +
+                ", but it is " + oui.length);
+        }
+        this.oui = Arrays.copyOf(oui, oui.length);
+        return this;
+    }
+
+    /**
+     * Returns the value of the OUI.
+     * @return The value of the OUI .
+     */
+    public byte[] getOUI() {
+        return Arrays.copyOf(oui, oui.length);
+    }
+
+    /**
+     * Set the value of sub type.
+     * @param subType The value of sub type to be set.
+     * @return This LLDP Organizationally Specific TLV.
+     */
+    public LLDPOrganizationalTLV setSubType(byte subType) {
+        this.subType = subType;
+        return this;
+    }
+
+    /**
+     * Returns the value of the sub type.
+     * @return The value of the sub type.
+     */
+    public byte getSubType() {
+        return subType;
+    }
+
+    /**
+     * Set the value of information string.
+     * @param infoString the byte array of the value of information string.
+     * @return This LLDP Organizationally Specific TLV.
+     */
+    public LLDPOrganizationalTLV setInfoString(byte[] infoString) {
+        if (infoString.length > MAX_INFOSTRING_LENGTH) {
+            throw new IllegalArgumentException("The length of infoString cannot exceed " + MAX_INFOSTRING_LENGTH);
+        }
+        this.infoString = Arrays.copyOf(infoString, infoString.length);
+        return this;
+    }
+
+    /**
+     * Set the value of information string.
+     * The String value is automatically converted into byte array with UTF-8 encoding.
+     * @param infoString the String value of information string.
+     * @return This LLDP Organizationally Specific TLV.
+     */
+    public LLDPOrganizationalTLV setInfoString(String infoString) {
+        byte[] infoStringBytes = infoString.getBytes(Charset.forName("UTF-8"));
+        return setInfoString(infoStringBytes);
+    }
+
+    /**
+     * Returns the value of information string.
+     * @return the value of information string.
+     */
+    public byte[] getInfoString() {
+        return Arrays.copyOf(infoString, infoString.length);
+    }
+
+    @Override
+    public byte[] serialize() {
+        int valueLength = OUI_LENGTH + SUBTYPE_LENGTH + infoString.length;
+        value = new byte[valueLength];
+        ByteBuffer bb = ByteBuffer.wrap(value);
+        bb.put(oui);
+        bb.put(subType);
+        bb.put(infoString);
+        return super.serialize();
+    }
+
+    @Override
+    public LLDPTLV deserialize(ByteBuffer bb) {
+        super.deserialize(bb);
+        ByteBuffer optionalField = ByteBuffer.wrap(value);
+
+        byte[] oui = new byte[OUI_LENGTH];
+        optionalField.get(oui);
+        setOUI(oui);
+
+        setSubType(optionalField.get());
+
+        byte[] infoString = new byte[getLength() - OUI_LENGTH - SUBTYPE_LENGTH];
+        optionalField.get(infoString);
+        setInfoString(infoString);
+        return this;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 1423;
+        int result = 1;
+        result = prime * result + type;
+        result = prime * result + length;
+        result = prime * result + Arrays.hashCode(oui);
+        result = prime * result + subType;
+        result = prime * result + Arrays.hashCode(infoString);
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (o == this) {
+            return true;
+        }
+
+        if (!(o instanceof LLDPOrganizationalTLV)) {
+            return false;
+        }
+
+        LLDPOrganizationalTLV other = (LLDPOrganizationalTLV)o;
+        if (this.type != other.type) {
+            return false;
+        }
+        if (this.length != other.length) {
+            return false;
+        }
+        if (!Arrays.equals(this.oui, other.oui)) {
+            return false;
+        }
+        if (this.subType != other.subType) {
+            return false;
+        }
+        if (!Arrays.equals(this.infoString, other.infoString)) {
+            return false;
+        }
+
+        return true;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/LLDPTLV.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/LLDPTLV.java
new file mode 100644
index 0000000..0552321
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/LLDPTLV.java
@@ -0,0 +1,140 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.packet;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+/**
+ *
+ *
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ */
+public class LLDPTLV {
+    protected byte type;
+    protected short length;
+    protected byte[] value;
+
+    /**
+     * @return the type
+     */
+    public byte getType() {
+        return type;
+    }
+
+    /**
+     * @param type the type to set
+     */
+    public LLDPTLV setType(byte type) {
+        this.type = type;
+        return this;
+    }
+
+    /**
+     * @return the length
+     */
+    public short getLength() {
+        return length;
+    }
+
+    /**
+     * @param length the length to set
+     */
+    public LLDPTLV setLength(short length) {
+        this.length = length;
+        return this;
+    }
+
+    /**
+     * @return the value
+     */
+    public byte[] getValue() {
+        return value;
+    }
+
+    /**
+     * @param value the value to set
+     */
+    public LLDPTLV setValue(byte[] value) {
+        this.value = value;
+        return this;
+    }
+
+    public byte[] serialize() {
+        // type = 7 bits
+        // info string length 9 bits, each value == byte
+        // info string
+        short scratch = (short) (((0x7f & this.type) << 9) | (0x1ff & this.length));
+        byte[] data = new byte[2+this.length];
+        ByteBuffer bb = ByteBuffer.wrap(data);
+        bb.putShort(scratch);
+        if (this.value != null)
+            bb.put(this.value);
+        return data;
+    }
+
+    public LLDPTLV deserialize(ByteBuffer bb) {
+        short sscratch;
+        sscratch = bb.getShort();
+        this.type = (byte) ((sscratch >> 9) & 0x7f);
+        this.length = (short) (sscratch & 0x1ff);
+        if (this.length > 0) {
+            this.value = new byte[this.length];
+
+            // if there is an underrun just toss the TLV
+            if (bb.remaining() < this.length)
+                return null;
+            bb.get(this.value);
+        }
+        return this;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#hashCode()
+     */
+    @Override
+    public int hashCode() {
+        final int prime = 1423;
+        int result = 1;
+        result = prime * result + length;
+        result = prime * result + type;
+        result = prime * result + Arrays.hashCode(value);
+        return result;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#equals(java.lang.Object)
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (!(obj instanceof LLDPTLV))
+            return false;
+        LLDPTLV other = (LLDPTLV) obj;
+        if (length != other.length)
+            return false;
+        if (type != other.type)
+            return false;
+        if (!Arrays.equals(value, other.value))
+            return false;
+        return true;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/TCP.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/TCP.java
new file mode 100644
index 0000000..889e4c6
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/TCP.java
@@ -0,0 +1,290 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.packet;
+
+import java.nio.ByteBuffer;
+
+/**
+ *
+ * @author shudong.zhou@bigswitch.com
+ */
+public class TCP extends BasePacket {
+    protected short sourcePort;
+    protected short destinationPort;
+    protected int sequence;
+    protected int acknowledge;
+    protected byte dataOffset;
+    protected short flags;
+    protected short windowSize;
+    protected short checksum;
+    protected short urgentPointer;
+    protected byte[] options;
+
+    /**
+     * @return the sourcePort
+     */
+    public short getSourcePort() {
+        return sourcePort;
+    }
+
+    /**
+     * @param sourcePort the sourcePort to set
+     */
+    public TCP setSourcePort(short sourcePort) {
+        this.sourcePort = sourcePort;
+        return this;
+    }
+
+    /**
+     * @return the destinationPort
+     */
+    public short getDestinationPort() {
+        return destinationPort;
+    }
+
+    /**
+     * @param destinationPort the destinationPort to set
+     */
+    public TCP setDestinationPort(short destinationPort) {
+        this.destinationPort = destinationPort;
+        return this;
+    }
+
+    /**
+     * @return the checksum
+     */
+    public short getChecksum() {
+        return checksum;
+    }
+    
+    public int getSequence() {
+        return this.sequence;
+    }
+    public TCP setSequence(int seq) {
+        this.sequence = seq;
+        return this;
+    }
+    public int getAcknowledge() {
+        return this.acknowledge;
+    }
+    public TCP setAcknowledge(int ack) {
+        this.acknowledge = ack;
+        return this;
+    }
+    public byte getDataOffset() {
+        return this.dataOffset;
+    }
+    public TCP setDataOffset(byte offset) {
+        this.dataOffset = offset;
+        return this;
+    }
+    public short getFlags() {
+        return this.flags;
+    }
+    public TCP setFlags(short flags) {
+        this.flags = flags;
+        return this;
+    }
+    public short getWindowSize() {
+        return this.windowSize;
+    }
+    public TCP setWindowSize(short windowSize) {
+        this.windowSize = windowSize;
+        return this;
+    }
+    public short getTcpChecksum() {
+        return this.checksum;
+    }
+    public TCP setTcpChecksum(short checksum) {
+        this.checksum = checksum;
+        return this;
+    }
+    
+    @Override
+    public void resetChecksum() {
+        this.checksum = 0;
+        super.resetChecksum();
+    }
+    
+    public short getUrgentPointer(short urgentPointer) {
+        return this.urgentPointer;
+    }
+    public TCP setUrgentPointer(short urgentPointer) {
+        this.urgentPointer= urgentPointer;
+        return this;
+    }
+    public byte[] getOptions() {
+        return this.options;
+    }
+    public TCP setOptions(byte[] options) {
+        this.options = options;
+        this.dataOffset = (byte) ((20 + options.length + 3) >> 2);
+        return this;
+    }
+    /**
+     * @param checksum the checksum to set
+     */
+    public TCP setChecksum(short checksum) {
+        this.checksum = checksum;
+        return this;
+    }
+
+    /**
+     * Serializes the packet. Will compute and set the following fields if they
+     * are set to specific values at the time serialize is called:
+     *      -checksum : 0
+     *      -length : 0
+     */
+    public byte[] serialize() {
+        int length;
+        if (dataOffset == 0)
+            dataOffset = 5;  // default header length
+        length = dataOffset << 2;
+        byte[] payloadData = null;
+        if (payload != null) {
+            payload.setParent(this);
+            payloadData = payload.serialize();
+            length += payloadData.length;
+        }
+
+        byte[] data = new byte[length];
+        ByteBuffer bb = ByteBuffer.wrap(data);
+
+        bb.putShort(this.sourcePort);
+        bb.putShort(this.destinationPort);
+        bb.putInt(this.sequence);
+        bb.putInt(this.acknowledge);
+        bb.putShort((short) (this.flags | (dataOffset << 12)));
+        bb.putShort(this.windowSize);
+        bb.putShort(this.checksum);
+        bb.putShort(this.urgentPointer);
+        if (dataOffset > 5) {
+            int padding;
+            bb.put(options);
+            padding = (dataOffset << 2) - 20 - options.length;
+            for (int i = 0; i < padding; i++)
+                bb.put((byte) 0);
+        }
+        if (payloadData != null)
+            bb.put(payloadData);
+
+        if (this.parent != null && this.parent instanceof IPv4)
+            ((IPv4)this.parent).setProtocol(IPv4.PROTOCOL_TCP);
+
+        // compute checksum if needed
+        if (this.checksum == 0) {
+            bb.rewind();
+            int accumulation = 0;
+
+            // compute pseudo header mac
+            if (this.parent != null && this.parent instanceof IPv4) {
+                IPv4 ipv4 = (IPv4) this.parent;
+                accumulation += ((ipv4.getSourceAddress() >> 16) & 0xffff)
+                        + (ipv4.getSourceAddress() & 0xffff);
+                accumulation += ((ipv4.getDestinationAddress() >> 16) & 0xffff)
+                        + (ipv4.getDestinationAddress() & 0xffff);
+                accumulation += ipv4.getProtocol() & 0xff;
+                accumulation += length & 0xffff;
+            }
+
+            for (int i = 0; i < length / 2; ++i) {
+                accumulation += 0xffff & bb.getShort();
+            }
+            // pad to an even number of shorts
+            if (length % 2 > 0) {
+                accumulation += (bb.get() & 0xff) << 8;
+            }
+
+            accumulation = ((accumulation >> 16) & 0xffff)
+                    + (accumulation & 0xffff);
+            this.checksum = (short) (~accumulation & 0xffff);
+            bb.putShort(16, this.checksum);
+        }
+        return data;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#hashCode()
+     */
+    @Override
+    public int hashCode() {
+        final int prime = 5807;
+        int result = super.hashCode();
+        result = prime * result + checksum;
+        result = prime * result + destinationPort;
+        result = prime * result + sourcePort;
+        return result;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#equals(java.lang.Object)
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (!super.equals(obj))
+            return false;
+        if (!(obj instanceof TCP))
+            return false;
+        TCP other = (TCP) obj;
+        // May want to compare fields based on the flags set
+        return (checksum == other.checksum) &&
+               (destinationPort == other.destinationPort) &&
+               (sourcePort == other.sourcePort) &&
+               (sequence == other.sequence) &&
+               (acknowledge == other.acknowledge) &&
+               (dataOffset == other.dataOffset) &&
+               (flags == other.flags) &&
+               (windowSize == other.windowSize) &&
+               (urgentPointer == other.urgentPointer) &&
+               (dataOffset == 5 || options.equals(other.options));
+    }
+
+    @Override
+    public IPacket deserialize(byte[] data, int offset, int length) {
+        ByteBuffer bb = ByteBuffer.wrap(data, offset, length);
+        this.sourcePort = bb.getShort();
+        this.destinationPort = bb.getShort();
+        this.sequence = bb.getInt();
+        this.acknowledge = bb.getInt();
+        this.flags = bb.getShort();
+        this.dataOffset = (byte) ((this.flags >> 12) & 0xf);
+        this.flags = (short) (this.flags & 0x1ff);
+        this.windowSize = bb.getShort();
+        this.checksum = bb.getShort();
+        this.urgentPointer = bb.getShort();
+        if (this.dataOffset > 5) {
+            int optLength = (dataOffset << 2) - 20;
+            if (bb.limit() < bb.position()+optLength) {
+                optLength = bb.limit() - bb.position();
+            }
+            try {
+                this.options = new byte[optLength];
+                bb.get(this.options, 0, optLength);
+            } catch (IndexOutOfBoundsException e) {
+                this.options = null;
+            }
+        }
+        
+        this.payload = new Data();
+        this.payload = payload.deserialize(data, bb.position(), bb.limit()-bb.position());
+        this.payload.setParent(this);
+        return this;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/UDP.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/UDP.java
new file mode 100644
index 0000000..cbeeedf
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packet/UDP.java
@@ -0,0 +1,231 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.packet;
+
+import java.nio.ByteBuffer;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ *
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ */
+public class UDP extends BasePacket {
+    public static Map<Short, Class<? extends IPacket>> decodeMap;
+    public static short DHCP_SERVER_PORT = (short)67;
+    public static short DHCP_CLIENT_PORT = (short)68;
+
+    static {
+        decodeMap = new HashMap<Short, Class<? extends IPacket>>();
+        /*
+         * Disable DHCP until the deserialize code is hardened to deal with garbage input
+         */
+        UDP.decodeMap.put(DHCP_SERVER_PORT, DHCP.class);
+        UDP.decodeMap.put(DHCP_CLIENT_PORT, DHCP.class);
+        
+    }
+
+    protected short sourcePort;
+    protected short destinationPort;
+    protected short length;
+    protected short checksum;
+
+    /**
+     * @return the sourcePort
+     */
+    public short getSourcePort() {
+        return sourcePort;
+    }
+
+    /**
+     * @param sourcePort the sourcePort to set
+     */
+    public UDP setSourcePort(short sourcePort) {
+        this.sourcePort = sourcePort;
+        return this;
+    }
+
+    /**
+     * @return the destinationPort
+     */
+    public short getDestinationPort() {
+        return destinationPort;
+    }
+
+    /**
+     * @param destinationPort the destinationPort to set
+     */
+    public UDP setDestinationPort(short destinationPort) {
+        this.destinationPort = destinationPort;
+        return this;
+    }
+
+    /**
+     * @return the length
+     */
+    public short getLength() {
+        return length;
+    }
+
+    /**
+     * @return the checksum
+     */
+    public short getChecksum() {
+        return checksum;
+    }
+
+    /**
+     * @param checksum the checksum to set
+     */
+    public UDP setChecksum(short checksum) {
+        this.checksum = checksum;
+        return this;
+    }
+
+    @Override
+    public void resetChecksum() {
+        this.checksum = 0;
+        super.resetChecksum();
+    }
+
+    /**
+     * Serializes the packet. Will compute and set the following fields if they
+     * are set to specific values at the time serialize is called:
+     *      -checksum : 0
+     *      -length : 0
+     */
+    public byte[] serialize() {
+        byte[] payloadData = null;
+        if (payload != null) {
+            payload.setParent(this);
+            payloadData = payload.serialize();
+        }
+
+        this.length = (short) (8 + ((payloadData == null) ? 0
+                : payloadData.length));
+
+        byte[] data = new byte[this.length];
+        ByteBuffer bb = ByteBuffer.wrap(data);
+
+        bb.putShort(this.sourcePort);
+        bb.putShort(this.destinationPort);
+        bb.putShort(this.length);
+        bb.putShort(this.checksum);
+        if (payloadData != null)
+            bb.put(payloadData);
+
+        if (this.parent != null && this.parent instanceof IPv4)
+            ((IPv4)this.parent).setProtocol(IPv4.PROTOCOL_UDP);
+
+        // compute checksum if needed
+        if (this.checksum == 0) {
+            bb.rewind();
+            int accumulation = 0;
+
+            // compute pseudo header mac
+            if (this.parent != null && this.parent instanceof IPv4) {
+                IPv4 ipv4 = (IPv4) this.parent;
+                accumulation += ((ipv4.getSourceAddress() >> 16) & 0xffff)
+                        + (ipv4.getSourceAddress() & 0xffff);
+                accumulation += ((ipv4.getDestinationAddress() >> 16) & 0xffff)
+                        + (ipv4.getDestinationAddress() & 0xffff);
+                accumulation += ipv4.getProtocol() & 0xff;
+                accumulation += this.length & 0xffff;
+            }
+
+            for (int i = 0; i < this.length / 2; ++i) {
+                accumulation += 0xffff & bb.getShort();
+            }
+            // pad to an even number of shorts
+            if (this.length % 2 > 0) {
+                accumulation += (bb.get() & 0xff) << 8;
+            }
+
+            accumulation = ((accumulation >> 16) & 0xffff)
+                    + (accumulation & 0xffff);
+            this.checksum = (short) (~accumulation & 0xffff);
+            bb.putShort(6, this.checksum);
+        }
+        return data;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#hashCode()
+     */
+    @Override
+    public int hashCode() {
+        final int prime = 5807;
+        int result = super.hashCode();
+        result = prime * result + checksum;
+        result = prime * result + destinationPort;
+        result = prime * result + length;
+        result = prime * result + sourcePort;
+        return result;
+    }
+
+    /* (non-Javadoc)
+     * @see java.lang.Object#equals(java.lang.Object)
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (!super.equals(obj))
+            return false;
+        if (!(obj instanceof UDP))
+            return false;
+        UDP other = (UDP) obj;
+        if (checksum != other.checksum)
+            return false;
+        if (destinationPort != other.destinationPort)
+            return false;
+        if (length != other.length)
+            return false;
+        if (sourcePort != other.sourcePort)
+            return false;
+        return true;
+    }
+
+    @Override
+    public IPacket deserialize(byte[] data, int offset, int length) {
+        ByteBuffer bb = ByteBuffer.wrap(data, offset, length);
+        this.sourcePort = bb.getShort();
+        this.destinationPort = bb.getShort();
+        this.length = bb.getShort();
+        this.checksum = bb.getShort();
+
+        if (UDP.decodeMap.containsKey(this.destinationPort)) {
+            try {
+                this.payload = UDP.decodeMap.get(this.destinationPort).getConstructor().newInstance();
+            } catch (Exception e) {
+                throw new RuntimeException("Failure instantiating class", e);
+            }
+        } else if (UDP.decodeMap.containsKey(this.sourcePort)) {
+            try {
+                this.payload = UDP.decodeMap.get(this.sourcePort).getConstructor().newInstance();
+            } catch (Exception e) {
+                throw new RuntimeException("Failure instantiating class", e);
+            }
+        } else {
+            this.payload = new Data();
+        }
+        this.payload = payload.deserialize(data, bb.position(), bb.limit()-bb.position());
+        this.payload.setParent(this);
+        return this;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packetstreamer/PacketStreamerClient.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packetstreamer/PacketStreamerClient.java
new file mode 100644
index 0000000..abed853
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packetstreamer/PacketStreamerClient.java
@@ -0,0 +1,93 @@
+package net.floodlightcontroller.packetstreamer;
+
+import net.floodlightcontroller.packetstreamer.thrift.*;
+
+import java.util.List;
+import java.util.ArrayList;
+
+import org.apache.thrift.TException;
+import org.apache.thrift.transport.TFramedTransport;
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.transport.TTransportException;
+import org.apache.thrift.protocol.TBinaryProtocol;
+import org.apache.thrift.protocol.TProtocol;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The PacketStreamer Sample Client.
+ */
+public class PacketStreamerClient {
+    protected static Logger log = LoggerFactory.getLogger(PacketStreamerClient.class);
+
+    /** 
+     * Main function entry point;
+     * @param args
+     */
+    public static void main(String [] args) {
+        try {
+            int serverPort = Integer.parseInt(System.getProperty("net.floodlightcontroller.packetstreamer.port", "9090"));
+            TTransport transport;
+            transport = new TFramedTransport(new TSocket("localhost", serverPort));
+            transport.open();
+  
+
+            TProtocol protocol = new  TBinaryProtocol(transport);
+            PacketStreamer.Client client = new PacketStreamer.Client(protocol);
+
+            sendPackets(client, (short)2, OFMessageType.PACKET_IN, true);
+            log.debug("Terminate session1");
+            client.terminateSession("session1");
+
+            transport.close();
+        } catch (TException x) {
+            x.printStackTrace();
+        } 
+    }
+
+    /** 
+     * Send test packets of the given OFMessageType to the packetstreamer server;
+     * @param client Packetstreamer client object
+     * @param numPackets number of test packets to be sent
+     * @param ofType OFMessageType of the test packets
+     * @param sync true if send with synchronous interface, false for asynchronous interface
+     * @throws TException
+     */
+    private static void sendPackets(PacketStreamer.Client client, short numPackets, OFMessageType ofType, boolean sync) 
+    throws TException {
+        while (numPackets-- > 0) {
+            Message msg = new Message();
+            Packet packet = new Packet();
+    
+            List<String> sids = new ArrayList<String>();
+            sids.add("session1");
+            sids.add("session2");
+            msg.setSessionIDs(sids);
+            packet.setMessageType(ofType);
+            long sw_dpid = numPackets/40 + 1;
+            packet.setSwPortTuple(new SwitchPortTuple(sw_dpid, (short)(numPackets - (sw_dpid-1)*40)));
+    
+            String strData = "New data, sequence " + numPackets;
+            packet.setData(strData.getBytes());
+            msg.setPacket(packet);
+
+            try {
+                if (sync) {
+                      client.pushMessageSync(msg);
+                      log.debug("Send packet sync: " + msg.toString());
+                } else {
+                      client.pushMessageAsync(msg);
+                      log.debug("Send packet sync: " + msg.toString());
+                }
+            } catch (TTransportException e) {
+                log.error(e.toString());
+            }
+            
+            try {
+                Thread.sleep(100);
+            } catch (Exception e) {}
+        }
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packetstreamer/PacketStreamerHandler.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packetstreamer/PacketStreamerHandler.java
new file mode 100644
index 0000000..903295e
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packetstreamer/PacketStreamerHandler.java
@@ -0,0 +1,213 @@
+package net.floodlightcontroller.packetstreamer;
+
+import net.floodlightcontroller.core.annotations.LogMessageCategory;
+import net.floodlightcontroller.core.annotations.LogMessageDoc;
+import net.floodlightcontroller.core.annotations.LogMessageDocs;
+import net.floodlightcontroller.packetstreamer.thrift.*;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The PacketStreamer handler class that implements the service APIs.
+ */
+@LogMessageCategory("OpenFlow Message Tracing")
+public class PacketStreamerHandler implements PacketStreamer.Iface {
+
+	/**
+	 * The queue wrapper class that contains the queue for the streamed packets.
+	 */
+    protected class SessionQueue {
+        protected BlockingQueue<ByteBuffer> pQueue;
+
+        /**
+         * The queue wrapper constructor
+         */
+        public SessionQueue() {
+            this.pQueue = new LinkedBlockingQueue<ByteBuffer>();
+        }
+
+        /**
+         * The access method to get to the internal queue.
+         */
+        public BlockingQueue<ByteBuffer> getQueue() {
+            return this.pQueue;
+        }
+    }
+    
+    /**
+     * The class logger object
+     */
+    protected static Logger log = 
+            LoggerFactory.getLogger(PacketStreamerServer.class);
+    
+    /**
+     * A sessionId-to-queue mapping
+     */
+    protected Map<String, SessionQueue> msgQueues;
+
+    /**
+     * The handler's constructor
+     */
+    public PacketStreamerHandler() {
+        this.msgQueues = new ConcurrentHashMap<String, SessionQueue>();
+    }
+
+    /**
+     * The implementation for getPackets() function.
+     * This is a blocking API.
+     * 
+     * @param sessionid
+     * @return A list of packets associated with the session
+     */
+    @Override
+    @LogMessageDocs({
+        @LogMessageDoc(level="ERROR",
+                message="Interrupted while waiting for session start",
+                explanation="The thread was interrupted waiting " +
+                     "for the packet streamer session to start",
+                recommendation=LogMessageDoc.CHECK_CONTROLLER),
+        @LogMessageDoc(level="ERROR",
+                message="Interrupted while waiting for packets",
+                explanation="The thread was interrupted waiting " +
+                        "for packets",
+                recommendation=LogMessageDoc.CHECK_CONTROLLER)
+    })
+    public List<ByteBuffer> getPackets(String sessionid)
+            throws org.apache.thrift.TException {
+        List<ByteBuffer> packets = new ArrayList<ByteBuffer>();
+        int count = 0;
+        
+        while (!msgQueues.containsKey(sessionid) && count++ < 100) {
+            log.debug("Queue for session {} doesn't exist yet.", sessionid);
+            try {
+                Thread.sleep(100);    // Wait 100 ms to check again.
+            } catch (InterruptedException e) {
+                log.error("Interrupted while waiting for session start");
+            }
+        }
+
+        if (count < 100) {
+	        SessionQueue pQueue = msgQueues.get(sessionid);
+	        BlockingQueue<ByteBuffer> queue = pQueue.getQueue();
+	        // Block if queue is empty
+	        try {
+	            packets.add(queue.take());
+	            queue.drainTo(packets);
+	        } catch (InterruptedException e) {
+	            log.error("Interrupted while waiting for packets");
+	        }
+        }
+
+        return packets;
+    }
+
+    /**
+     * The implementation for pushMessageSync() function.
+     * 
+     * @param msg
+     * @return 1 for success, 0 for failure
+     * @throws TException
+     */
+    @Override
+    @LogMessageDocs({
+        @LogMessageDoc(level="ERROR",
+                message="Could not push empty message",
+                explanation="An empty message was sent to the packet streamer",
+                recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG),
+        @LogMessageDoc(level="ERROR",
+                message="queue for session {sessionId} is null",
+                explanation="The queue for the packet streamer session " +
+                		"is missing",
+                recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
+    })
+
+    public int pushMessageSync(Message msg)
+            throws org.apache.thrift.TException {
+
+        if (msg == null) {
+            log.error("Could not push empty message");
+            return 0;
+        }
+
+        List<String> sessionids = msg.getSessionIDs();
+        for (String sid : sessionids) {
+            SessionQueue pQueue = null;
+
+            if (!msgQueues.containsKey(sid)) {
+                pQueue = new SessionQueue();
+                msgQueues.put(sid, pQueue);
+            } else {
+                pQueue = msgQueues.get(sid);
+            }
+
+            log.debug("pushMessageSync: SessionId: " + sid + 
+                      " Receive a message, " + msg.toString() + "\n");
+            ByteBuffer bb = ByteBuffer.wrap(msg.getPacket().getData());
+            //ByteBuffer dst = ByteBuffer.wrap(msg.getPacket().toString().getBytes());
+            BlockingQueue<ByteBuffer> queue = pQueue.getQueue();
+            if (queue != null) {
+                if (!queue.offer(bb)) {
+                    log.error("Failed to queue message for session: " + sid);
+                } else {
+                    log.debug("insert a message to session: " + sid);
+                }
+            } else {
+                log.error("queue for session {} is null", sid);
+            }
+        }
+
+        return 1;
+    }
+
+    /**
+     * The implementation for pushMessageAsync() function.
+     * 
+     * @param msg
+     * @throws TException
+     */
+    @Override
+    public void pushMessageAsync(Message msg)
+            throws org.apache.thrift.TException {
+        pushMessageSync(msg);
+        return;
+    }
+
+    /**
+     * The implementation for terminateSession() function.
+     * It removes the session to queue association.
+     * @param sessionid
+     * @throws TException
+     */
+    @Override
+    public void terminateSession(String sessionid)
+            throws org.apache.thrift.TException {
+        if (!msgQueues.containsKey(sessionid)) {
+            return;
+        }
+
+        SessionQueue pQueue = msgQueues.get(sessionid);
+
+        log.debug("terminateSession: SessionId: " + sessionid + "\n");
+        String data = "FilterTimeout";
+        ByteBuffer bb = ByteBuffer.wrap(data.getBytes());
+        BlockingQueue<ByteBuffer> queue = pQueue.getQueue();
+        if (queue != null) {
+            if (!queue.offer(bb)) {
+                log.error("Failed to queue message for session: " + sessionid);
+            }
+            msgQueues.remove(sessionid);
+        } else {
+            log.error("queue for session {} is null", sessionid);
+        }
+    }
+}
+
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/packetstreamer/PacketStreamerServer.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packetstreamer/PacketStreamerServer.java
new file mode 100644
index 0000000..4a425e0
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/packetstreamer/PacketStreamerServer.java
@@ -0,0 +1,72 @@
+package net.floodlightcontroller.packetstreamer;
+
+import org.apache.thrift.protocol.TBinaryProtocol;
+import org.apache.thrift.server.TServer;
+import org.apache.thrift.server.THsHaServer;
+import org.apache.thrift.transport.TFramedTransport;
+import org.apache.thrift.transport.TNonblockingServerSocket;
+import org.apache.thrift.transport.TNonblockingServerTransport;
+
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import net.floodlightcontroller.core.annotations.LogMessageCategory;
+// Generated code
+import net.floodlightcontroller.packetstreamer.thrift.*;
+
+/**
+ * The PacketStreamer Server that brokers the packet streaming service.
+ */
+@LogMessageCategory("OpenFlow Message Tracing")
+public class PacketStreamerServer {
+    protected static Logger log = LoggerFactory.getLogger(PacketStreamerServer.class);
+    protected static int port = 9090;
+    protected static PacketStreamerHandler handler;
+    protected static PacketStreamer.Processor<PacketStreamerHandler> processor;
+
+    
+    /** 
+     * Main function entry point;
+     * @param args
+     */
+    public static void main(String [] args) {
+        try {
+            port = Integer.parseInt(System.getProperty("net.floodlightcontroller.packetstreamer.port", "9090"));
+            
+            handler = new PacketStreamerHandler();
+            processor = new PacketStreamer.Processor<PacketStreamerHandler>(handler);
+
+            Runnable simple = new Runnable() {
+                public void run() {
+                    hshaServer(processor);
+                }
+            };
+
+            new Thread(simple).start();
+        } catch (Exception x) {
+            x.printStackTrace();
+        }
+    }
+
+    
+    /** 
+     * The function to create a thrift Half-Sync and Half-Async Server.
+     * @param processor
+     */
+    public static void hshaServer(PacketStreamer.Processor<PacketStreamerHandler> processor) {
+        try {
+            TNonblockingServerTransport serverTransport = new TNonblockingServerSocket(port);
+            THsHaServer.Args args = new THsHaServer.Args(serverTransport);
+            args.processor(processor);
+            args.transportFactory(new TFramedTransport.Factory());
+            args.protocolFactory(new TBinaryProtocol.Factory(true, true));
+            TServer server = new THsHaServer(args);
+    
+            log.info("Starting the packetstreamer hsha server on port {} ...", port);
+            server.serve();
+        } catch (Exception e) {
+            e.printStackTrace();
+        }
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/CumulativeTimeBucket.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/CumulativeTimeBucket.java
new file mode 100644
index 0000000..e76253d
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/CumulativeTimeBucket.java
@@ -0,0 +1,122 @@
+package net.floodlightcontroller.perfmon;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+import net.floodlightcontroller.core.IOFMessageListener;
+
+@JsonSerialize(using=CumulativeTimeBucketJSONSerializer.class)
+public class CumulativeTimeBucket {
+    private long startTime_ns; // First pkt time-stamp in this bucket
+    private Map<Integer, OneComponentTime> compStats;
+    private long totalPktCnt;
+    private long totalProcTimeNs; // total processing time for one pkt in
+    private long sumSquaredProcTimeNs2;
+    private long maxTotalProcTimeNs;
+    private long minTotalProcTimeNs;
+    private long avgTotalProcTimeNs;
+    private long sigmaTotalProcTimeNs; // std. deviation
+
+    public long getStartTimeNs() {
+        return startTime_ns;
+    }
+
+    public long getTotalPktCnt() {
+        return totalPktCnt;
+    }
+    
+    public long getAverageProcTimeNs() {
+        return avgTotalProcTimeNs;
+    }
+
+    public long getMinTotalProcTimeNs() {
+        return minTotalProcTimeNs;
+    }
+    
+    public long getMaxTotalProcTimeNs() {
+        return maxTotalProcTimeNs;
+    }
+    
+    public long getTotalSigmaProcTimeNs() {
+        return sigmaTotalProcTimeNs;
+    }
+    
+    public int getNumComps() {
+        return compStats.values().size();
+    }
+    
+    public Collection<OneComponentTime> getModules() {
+        return compStats.values();
+    }
+
+    public CumulativeTimeBucket(List<IOFMessageListener> listeners) {
+        compStats = new ConcurrentHashMap<Integer, OneComponentTime>(listeners.size());
+        for (IOFMessageListener l : listeners) {
+            OneComponentTime oct = new OneComponentTime(l);
+            compStats.put(oct.hashCode(), oct);
+        }
+        startTime_ns = System.nanoTime();
+    }
+
+    private void updateSquaredProcessingTime(long curTimeNs) {
+        sumSquaredProcTimeNs2 += (Math.pow(curTimeNs, 2));
+    }
+    
+    /**
+     * Resets all counters and counters for each component time
+     */
+    public void reset() {
+        startTime_ns = System.nanoTime();
+        totalPktCnt = 0;
+        totalProcTimeNs = 0;
+        avgTotalProcTimeNs = 0;
+        sumSquaredProcTimeNs2 = 0;
+        maxTotalProcTimeNs = Long.MIN_VALUE;
+        minTotalProcTimeNs = Long.MAX_VALUE;
+        sigmaTotalProcTimeNs = 0;
+        for (OneComponentTime oct : compStats.values()) {
+            oct.resetAllCounters();
+        }
+    }
+    
+    private void computeSigma() {
+        // Computes std. deviation from the sum of count numbers and from
+        // the sum of the squares of count numbers
+        double temp = totalProcTimeNs;
+        temp = Math.pow(temp, 2) / totalPktCnt;
+        temp = (sumSquaredProcTimeNs2 - temp) / totalPktCnt;
+        sigmaTotalProcTimeNs = (long) Math.sqrt(temp);
+    }
+    
+    public void computeAverages() {
+        // Must be called last to, needs latest info
+        computeSigma();
+        
+        for (OneComponentTime oct : compStats.values()) {
+            oct.computeSigma();
+        }
+    }
+    
+    public void updatePerPacketCounters(long procTimeNs) {
+        totalPktCnt++;
+        totalProcTimeNs += procTimeNs;
+        avgTotalProcTimeNs = totalProcTimeNs / totalPktCnt;
+        updateSquaredProcessingTime(procTimeNs);
+        
+        if (procTimeNs > maxTotalProcTimeNs) {
+            maxTotalProcTimeNs = procTimeNs;
+        }
+        
+        if (procTimeNs < minTotalProcTimeNs) {
+            minTotalProcTimeNs = procTimeNs;
+        }
+    }
+    
+    public void updateOneComponent(IOFMessageListener l, long procTimeNs) {
+        compStats.get(l.hashCode()).updatePerPacketCounters(procTimeNs);
+    }
+}
\ No newline at end of file
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/CumulativeTimeBucketJSONSerializer.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/CumulativeTimeBucketJSONSerializer.java
new file mode 100644
index 0000000..e492777
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/CumulativeTimeBucketJSONSerializer.java
@@ -0,0 +1,47 @@
+package net.floodlightcontroller.perfmon;
+
+import java.io.IOException;
+import java.sql.Timestamp;
+
+
+import org.codehaus.jackson.JsonGenerator;
+import org.codehaus.jackson.JsonProcessingException;
+import org.codehaus.jackson.map.JsonSerializer;
+import org.codehaus.jackson.map.SerializerProvider;
+
+public class CumulativeTimeBucketJSONSerializer
+                                extends JsonSerializer<CumulativeTimeBucket> {
+    /**
+     * Performs the serialization of a OneComponentTime object
+     */
+   @Override
+   public void serialize(CumulativeTimeBucket ctb,
+                   JsonGenerator jGen,
+                   SerializerProvider serializer) 
+                   throws IOException, JsonProcessingException {
+       jGen.writeStartObject();
+       Timestamp ts = new Timestamp(ctb.getStartTimeNs()/1000000);
+       jGen.writeStringField("start-time", ts.toString());
+       jGen.writeStringField("current-time", 
+         new Timestamp(System.currentTimeMillis()).toString());
+       jGen.writeNumberField("total-packets", ctb.getTotalPktCnt());
+       jGen.writeNumberField("average", ctb.getAverageProcTimeNs());
+       jGen.writeNumberField("min", ctb.getMinTotalProcTimeNs());
+       jGen.writeNumberField("max", ctb.getMaxTotalProcTimeNs());
+       jGen.writeNumberField("std-dev", ctb.getTotalSigmaProcTimeNs());
+       jGen.writeArrayFieldStart("modules");
+       for (OneComponentTime oct : ctb.getModules()) {
+           serializer.defaultSerializeValue(oct, jGen);
+       }
+       jGen.writeEndArray();
+       jGen.writeEndObject();
+   }
+
+   /**
+    * Tells SimpleModule that we are the serializer for OFMatch
+    */
+   @Override
+   public Class<CumulativeTimeBucket> handledType() {
+       return CumulativeTimeBucket.class;
+   }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/IPktInProcessingTimeService.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/IPktInProcessingTimeService.java
new file mode 100644
index 0000000..80dfda0
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/IPktInProcessingTimeService.java
@@ -0,0 +1,37 @@
+package net.floodlightcontroller.perfmon;
+
+import java.util.List;
+
+import org.openflow.protocol.OFMessage;
+
+import net.floodlightcontroller.core.FloodlightContext;
+import net.floodlightcontroller.core.IOFMessageListener;
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.module.IFloodlightService;
+
+public interface IPktInProcessingTimeService extends IFloodlightService {
+
+    /**
+     * Creates time buckets for a set of modules to measure their performance
+     * @param listeners The message listeners to create time buckets for
+     */
+    public void bootstrap(List<IOFMessageListener> listeners);
+    
+    /**
+     * Stores a timestamp in ns. Used right before a service handles an
+     * OF message. Only stores if the service is enabled.
+     */
+    public void recordStartTimeComp(IOFMessageListener listener);
+    
+    public void recordEndTimeComp(IOFMessageListener listener);
+    
+    public void recordStartTimePktIn();
+    
+    public void recordEndTimePktIn(IOFSwitch sw, OFMessage m, FloodlightContext cntx);
+    
+    public boolean isEnabled();
+    
+    public void setEnabled(boolean enabled);
+    
+    public CumulativeTimeBucket getCtb();
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/NullPktInProcessingTime.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/NullPktInProcessingTime.java
new file mode 100644
index 0000000..3d9504b
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/NullPktInProcessingTime.java
@@ -0,0 +1,109 @@
+package net.floodlightcontroller.perfmon;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.openflow.protocol.OFMessage;
+
+import net.floodlightcontroller.core.FloodlightContext;
+import net.floodlightcontroller.core.IOFMessageListener;
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+
+/**
+ * An IPktInProcessingTimeService implementation that does nothing.
+ * This is used mainly for performance testing or if you don't
+ * want to use the IPktInProcessingTimeService features.
+ * @author alexreimers
+ *
+ */
+public class NullPktInProcessingTime 
+    implements IFloodlightModule, IPktInProcessingTimeService {
+    
+    private CumulativeTimeBucket ctb;
+    private boolean inited = false;
+    
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IPktInProcessingTimeService.class);
+        return l;
+    }
+    
+    @Override
+    public Map<Class<? extends IFloodlightService>, IFloodlightService>
+            getServiceImpls() {
+        Map<Class<? extends IFloodlightService>,
+        IFloodlightService> m = 
+            new HashMap<Class<? extends IFloodlightService>,
+                        IFloodlightService>();
+        // We are the class that implements the service
+        m.put(IPktInProcessingTimeService.class, this);
+        return m;
+    }
+    
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
+        // We don't have any dependencies
+        return null;
+    }
+    
+    @Override
+    public void init(FloodlightModuleContext context)
+                             throws FloodlightModuleException {
+
+    }
+
+    @Override
+    public void startUp(FloodlightModuleContext context) {
+        // no-op
+    }
+
+    @Override
+    public boolean isEnabled() {
+        return false;
+    }
+
+    @Override
+    public void bootstrap(List<IOFMessageListener> listeners) {
+        if (!inited)
+            ctb = new CumulativeTimeBucket(listeners);
+    }
+
+    @Override
+    public void recordStartTimeComp(IOFMessageListener listener) {
+
+    }
+
+    @Override
+    public void recordEndTimeComp(IOFMessageListener listener) {
+
+    }
+
+    @Override
+    public void recordStartTimePktIn() {
+
+    }
+
+    @Override
+    public void recordEndTimePktIn(IOFSwitch sw, OFMessage m,
+                                   FloodlightContext cntx) {
+        
+    }
+
+    @Override
+    public void setEnabled(boolean enabled) {
+    
+    }
+
+    @Override
+    public CumulativeTimeBucket getCtb() {
+        return ctb;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/OneComponentTime.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/OneComponentTime.java
new file mode 100644
index 0000000..3e9734b
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/OneComponentTime.java
@@ -0,0 +1,129 @@
+package net.floodlightcontroller.perfmon;
+
+import org.codehaus.jackson.annotate.JsonProperty;
+
+import net.floodlightcontroller.core.IOFMessageListener;
+
+/**
+ * Holds OF message processing time information for one IFloodlightModule.
+ * @author Subrata
+ */
+public class OneComponentTime {
+    private int compId; // hascode of IOFMessageListener
+    private String compName;
+    private int pktCnt;
+    // all times in nanoseconds
+    private long totalProcTimeNs;
+    private long sumSquaredProcTimeNs2; // squared
+    private long maxProcTimeNs;
+    private long minProcTimeNs;
+    private long avgProcTimeNs;
+    private long sigmaProcTimeNs;  // std. deviation
+
+    public OneComponentTime(IOFMessageListener module) {
+        compId = module.hashCode();
+        compName = module.getClass().getCanonicalName();
+        resetAllCounters();
+    }
+    
+    public void resetAllCounters() {
+        maxProcTimeNs = Long.MIN_VALUE;
+        minProcTimeNs = Long.MAX_VALUE;
+        pktCnt = 0;
+        totalProcTimeNs = 0;
+        sumSquaredProcTimeNs2 = 0;
+        avgProcTimeNs = 0;
+        sigmaProcTimeNs = 0;
+    }
+    
+    @JsonProperty("module-name")
+    public String getCompName() {
+        return compName;
+    }
+
+    @JsonProperty("num-packets")
+    public int getPktCnt() {
+        return pktCnt;
+    }
+
+    @JsonProperty("total")
+    public long getSumProcTimeNs() {
+        return totalProcTimeNs;
+    }
+
+    @JsonProperty("max")
+    public long getMaxProcTimeNs() {
+        return maxProcTimeNs;
+    }
+
+    @JsonProperty("min")
+    public long getMinProcTimeNs() {
+        return minProcTimeNs;
+    }
+
+    @JsonProperty("average")
+    public long getAvgProcTimeNs() {
+        return avgProcTimeNs;
+    }
+
+    @JsonProperty("std-dev")
+    public long getSigmaProcTimeNs() {
+        return sigmaProcTimeNs;
+    }
+    
+    @JsonProperty("average-squared")
+    public long getSumSquaredProcTimeNs() {
+        return sumSquaredProcTimeNs2;
+    }
+
+    // Methods used to update the counters
+    
+    private void increasePktCount() {
+        pktCnt++;
+    }
+    
+    private void updateTotalProcessingTime(long procTimeNs) {
+        totalProcTimeNs += procTimeNs;
+    }
+    
+    private void updateAvgProcessTime() {
+        avgProcTimeNs = totalProcTimeNs / pktCnt;
+    }
+    
+    private void updateSquaredProcessingTime(long procTimeNs) {
+        sumSquaredProcTimeNs2 += (Math.pow(procTimeNs, 2));
+    }
+    
+    private void calculateMinProcTime(long curTimeNs) {
+        if (curTimeNs < minProcTimeNs)
+            minProcTimeNs = curTimeNs;
+    }
+    
+    private void calculateMaxProcTime(long curTimeNs) {
+        if (curTimeNs > maxProcTimeNs)
+            maxProcTimeNs = curTimeNs;
+    }
+    
+    public void computeSigma() {
+        // Computes std. deviation from the sum of count numbers and from
+        // the sum of the squares of count numbers
+        double temp = totalProcTimeNs;
+        temp = Math.pow(temp, 2) / pktCnt;
+        temp = (sumSquaredProcTimeNs2 - temp) / pktCnt;
+        sigmaProcTimeNs = (long) Math.sqrt(temp);
+    }
+    
+    public void updatePerPacketCounters(long procTimeNs) {
+        increasePktCount();
+        updateTotalProcessingTime(procTimeNs);
+        calculateMinProcTime(procTimeNs);
+        calculateMaxProcTime(procTimeNs);
+        updateAvgProcessTime();
+        updateSquaredProcessingTime(procTimeNs);
+    }
+    
+    @Override
+    public int hashCode() {
+        return compId;
+    }
+}
\ No newline at end of file
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/PerfMonDataResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/PerfMonDataResource.java
new file mode 100644
index 0000000..c43708d
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/PerfMonDataResource.java
@@ -0,0 +1,33 @@
+package net.floodlightcontroller.perfmon;
+
+import org.restlet.data.Status;
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * Return the performance monitoring data for the get rest api call
+ * @author subrata
+ */
+public class PerfMonDataResource extends ServerResource {
+    protected static Logger logger = LoggerFactory.getLogger(PerfMonDataResource.class);  
+    
+    @Get("json")
+    public CumulativeTimeBucket handleApiQuery() {        
+        IPktInProcessingTimeService pktinProcTime = 
+            (IPktInProcessingTimeService)getContext().getAttributes().
+                get(IPktInProcessingTimeService.class.getCanonicalName());
+        
+        setStatus(Status.SUCCESS_OK, "OK");
+        // Allocate output object
+        if (pktinProcTime.isEnabled()) {
+            CumulativeTimeBucket ctb = pktinProcTime.getCtb();
+            ctb.computeAverages();
+            return ctb;
+        }
+        
+        return null;
+    }
+}
\ No newline at end of file
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/PerfMonToggleResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/PerfMonToggleResource.java
new file mode 100644
index 0000000..9ea1876
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/PerfMonToggleResource.java
@@ -0,0 +1,28 @@
+package net.floodlightcontroller.perfmon;
+
+import org.restlet.data.Status;
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+
+public class PerfMonToggleResource extends ServerResource {
+    
+    @Get("json")
+    public String retrieve() {
+        IPktInProcessingTimeService pktinProcTime = 
+                (IPktInProcessingTimeService)getContext().getAttributes().
+                    get(IPktInProcessingTimeService.class.getCanonicalName());
+        
+        String param = ((String)getRequestAttributes().get("perfmonstate")).toLowerCase();
+        if (param.equals("reset")) {
+            pktinProcTime.getCtb().reset();
+        } else {
+            if (param.equals("enable") || param.equals("true")) {
+                pktinProcTime.setEnabled(true);
+            } else if (param.equals("disable") || param.equals("false")) {
+                pktinProcTime.setEnabled(false);
+            }
+        }
+        setStatus(Status.SUCCESS_OK, "OK");
+        return "{ \"enabled\" : " + pktinProcTime.isEnabled() + " }";
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/PerfWebRoutable.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/PerfWebRoutable.java
new file mode 100644
index 0000000..ace0bc8
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/PerfWebRoutable.java
@@ -0,0 +1,23 @@
+package net.floodlightcontroller.perfmon;
+
+import org.restlet.Context;
+import org.restlet.Restlet;
+import org.restlet.routing.Router;
+
+import net.floodlightcontroller.restserver.RestletRoutable;
+
+public class PerfWebRoutable implements RestletRoutable {
+
+    @Override
+    public Restlet getRestlet(Context context) {
+        Router router = new Router(context);
+        router.attach("/data/json", PerfMonDataResource.class);
+        router.attach("/{perfmonstate}/json", PerfMonToggleResource.class); // enable, disable, or reset
+        return router;
+    }
+
+    @Override
+    public String basePath() {
+        return "/wm/performance";
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/PktInProcessingTime.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/PktInProcessingTime.java
new file mode 100644
index 0000000..639623b
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/perfmon/PktInProcessingTime.java
@@ -0,0 +1,205 @@
+/**
+ * Performance monitoring package
+ */
+package net.floodlightcontroller.perfmon;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import net.floodlightcontroller.core.FloodlightContext;
+import net.floodlightcontroller.core.IOFMessageListener;
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.annotations.LogMessageCategory;
+import net.floodlightcontroller.core.annotations.LogMessageDoc;
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.restserver.IRestApiService;
+
+import org.openflow.protocol.OFMessage;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class contains a set of buckets (called time buckets as the
+ * primarily contain 'times' that are used in a circular way to 
+ * store information on packet in processing time.
+ * Each bucket is meant to store the various processing time 
+ * related data for a fixed duration.
+ * Buckets are reused to reduce garbage generation! Once the
+ * last bucket is used up the LRU bucket is reused.
+ * 
+ * Naming convention for variable or constants
+ * variable_s : value in seconds
+ * variable_ms: value in milliseconds
+ * variable_us: value in microseconds
+ * variable_ns: value in nanoseconds
+ * 
+ * Key Constants:
+ * ONE_BUCKET_DURATION_SECONDS_INT:  time duration of each bucket
+ * BUCKET_SET_SIZE: Number of buckets
+ * TOT_PROC_TIME_WARN_THRESHOLD_US: if processing time for a packet
+ *    exceeds this threshold then a warning LOG message is generated
+ * TOT_PROC_TIME_ALERT_THRESHOLD_US: same as above but an alert level
+ *    syslog is generated instead
+ * 
+ */
+@LogMessageCategory("Performance Monitoring")
+public class PktInProcessingTime
+    implements IFloodlightModule, IPktInProcessingTimeService {
+
+    
+    // Our dependencies
+    private IRestApiService restApi;
+    
+    protected long ptWarningThresholdInNano;
+
+    // DB storage tables
+    protected static final String ControllerTableName = "controller_controller";
+    public static final String COLUMN_ID = "id";
+    public static final String COLUMN_PERF_MON = "performance_monitor_feature";
+    
+    protected static  Logger  logger = 
+        LoggerFactory.getLogger(PktInProcessingTime.class);
+    
+    protected boolean isEnabled = false;
+    protected boolean isInited = false;
+    // Maintains the time when the last packet was processed
+    protected long lastPktTime_ns;
+    private CumulativeTimeBucket ctb = null;
+
+    
+    /***
+     * BUCKET_SET_SIZE buckets each holding 10s of processing time data, a total
+     * of 30*10s = 5mins of processing time data is maintained
+     */
+    protected static final int ONE_BUCKET_DURATION_SECONDS = 10;// seconds
+    protected static final long ONE_BUCKET_DURATION_NANOSECONDS  =
+                                ONE_BUCKET_DURATION_SECONDS * 1000000000;
+    
+    @Override
+    public void bootstrap(List<IOFMessageListener> listeners) {
+        if (!isInited) {
+            ctb = new CumulativeTimeBucket(listeners);
+            isInited = true;
+        }
+    }
+    
+    @Override
+    public boolean isEnabled() {
+        return isEnabled && isInited;
+    }
+    
+    @Override
+    public void setEnabled(boolean enabled) {
+        this.isEnabled = enabled;
+        logger.debug("Setting module to " + isEnabled);
+    }
+    
+    @Override
+    public CumulativeTimeBucket getCtb() {
+        return ctb;
+    }
+    
+    private long startTimePktNs;
+    private long startTimeCompNs;
+    @Override
+    public void recordStartTimeComp(IOFMessageListener listener) {
+        if (isEnabled()) {
+            startTimeCompNs = System.nanoTime();
+        }
+    }
+    
+    @Override
+    public void recordEndTimeComp(IOFMessageListener listener) {
+        if (isEnabled()) {
+            long procTime = System.nanoTime() - startTimeCompNs;
+            ctb.updateOneComponent(listener, procTime);
+        }
+    }
+    
+    @Override
+    public void recordStartTimePktIn() {
+        if (isEnabled()) {
+            startTimePktNs = System.nanoTime();
+        }
+    }
+    
+    @Override
+    @LogMessageDoc(level="WARN",
+            message="Time to process packet-in exceeded threshold: {}",
+            explanation="Time to process packet-in exceeded the configured " +
+            		"performance threshold",
+            recommendation=LogMessageDoc.CHECK_CONTROLLER)
+    public void recordEndTimePktIn(IOFSwitch sw, OFMessage m, FloodlightContext cntx) {
+        if (isEnabled()) {
+            long procTimeNs = System.nanoTime() - startTimePktNs;
+            ctb.updatePerPacketCounters(procTimeNs);
+            
+            if (ptWarningThresholdInNano > 0 && 
+                    procTimeNs > ptWarningThresholdInNano) {
+                logger.warn("Time to process packet-in exceeded threshold: {}", 
+                            procTimeNs/1000);
+            }
+        }
+    }
+    
+    // IFloodlightModule methods
+    
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IPktInProcessingTimeService.class);
+        return l;
+    }
+    
+    @Override
+    public Map<Class<? extends IFloodlightService>, IFloodlightService>
+            getServiceImpls() {
+        Map<Class<? extends IFloodlightService>,
+        IFloodlightService> m = 
+            new HashMap<Class<? extends IFloodlightService>,
+                        IFloodlightService>();
+        // We are the class that implements the service
+        m.put(IPktInProcessingTimeService.class, this);
+        return m;
+    }
+    
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IRestApiService.class);
+        return l;
+    }
+    
+    @Override
+    public void init(FloodlightModuleContext context)
+                                             throws FloodlightModuleException {
+        restApi = context.getServiceImpl(IRestApiService.class);
+    }
+    
+    @Override
+    @LogMessageDoc(level="INFO",
+        message="Packet processing time threshold for warning" +
+            " set to {time} ms.",
+        explanation="Performance monitoring will log a warning if " +
+    		"packet processing time exceeds the configured threshold")
+    public void startUp(FloodlightModuleContext context) {
+        // Add our REST API
+        restApi.addRestletRoutable(new PerfWebRoutable());
+        
+        // TODO - Alex - change this to a config option
+        ptWarningThresholdInNano = Long.parseLong(System.getProperty(
+             "net.floodlightcontroller.core.PTWarningThresholdInMilli", "0")) * 1000000;
+        if (ptWarningThresholdInNano > 0) {
+            logger.info("Packet processing time threshold for warning" +
+            		" set to {} ms.", ptWarningThresholdInNano/1000000);
+        }
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/restserver/IRestApiService.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/restserver/IRestApiService.java
new file mode 100644
index 0000000..d906795
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/restserver/IRestApiService.java
@@ -0,0 +1,16 @@
+package net.floodlightcontroller.restserver;
+
+import net.floodlightcontroller.core.module.IFloodlightService;
+
+public interface IRestApiService extends IFloodlightService {
+    /**
+     * Adds a REST API
+     * @param routeable
+     */
+    public void addRestletRoutable(RestletRoutable routable);
+
+    /**
+     * Runs the REST API server
+     */
+    public void run();
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/restserver/RestApiServer.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/restserver/RestApiServer.java
new file mode 100644
index 0000000..2ca8483
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/restserver/RestApiServer.java
@@ -0,0 +1,188 @@
+package net.floodlightcontroller.restserver;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.restlet.Application;
+import org.restlet.Component;
+import org.restlet.Context;
+import org.restlet.Request;
+import org.restlet.Response;
+import org.restlet.Restlet;
+import org.restlet.data.Protocol;
+import org.restlet.data.Reference;
+import org.restlet.data.Status;
+import org.restlet.ext.jackson.JacksonRepresentation;
+import org.restlet.representation.Representation;
+import org.restlet.routing.Filter;
+import org.restlet.routing.Router;
+import org.restlet.routing.Template;
+import org.restlet.service.StatusService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+
+public class RestApiServer
+    implements IFloodlightModule, IRestApiService {
+    protected static Logger logger = LoggerFactory.getLogger(RestApiServer.class);
+    protected List<RestletRoutable> restlets;
+    protected FloodlightModuleContext fmlContext;
+    protected int restPort = 8080;
+    
+    // ***********
+    // Application
+    // ***********
+    
+    protected class RestApplication extends Application {
+        protected Context context;
+        
+        public RestApplication() {
+            super(new Context());
+            this.context = getContext();
+        }
+        
+        @Override
+        public Restlet createInboundRoot() {
+            Router baseRouter = new Router(context);
+            baseRouter.setDefaultMatchingMode(Template.MODE_STARTS_WITH);
+            for (RestletRoutable rr : restlets) {
+                baseRouter.attach(rr.basePath(), rr.getRestlet(context));
+            }
+
+            Filter slashFilter = new Filter() {            
+                @Override
+                protected int beforeHandle(Request request, Response response) {
+                    Reference ref = request.getResourceRef();
+                    String originalPath = ref.getPath();
+                    if (originalPath.contains("//"))
+                    {
+                        String newPath = originalPath.replaceAll("/+", "/");
+                        ref.setPath(newPath);
+                    }
+                    return Filter.CONTINUE;
+                }
+
+            };
+            slashFilter.setNext(baseRouter);
+            
+            return slashFilter;
+        }
+        
+        public void run(FloodlightModuleContext fmlContext, int restPort) {
+            setStatusService(new StatusService() {
+                @Override
+                public Representation getRepresentation(Status status,
+                                                        Request request,
+                                                        Response response) {
+                    return new JacksonRepresentation<Status>(status);
+                }                
+            });
+            
+            // Add everything in the module context to the rest
+            for (Class<? extends IFloodlightService> s : fmlContext.getAllServices()) {
+                if (logger.isTraceEnabled()) {
+                    logger.trace("Adding {} for service {} into context",
+                                 s.getCanonicalName(), fmlContext.getServiceImpl(s));
+                }
+                context.getAttributes().put(s.getCanonicalName(), 
+                                            fmlContext.getServiceImpl(s));
+            }
+            
+            // Start listening for REST requests
+            try {
+                final Component component = new Component();
+                component.getServers().add(Protocol.HTTP, restPort);
+                component.getClients().add(Protocol.CLAP);
+                component.getDefaultHost().attach(this);
+                component.start();
+            } catch (Exception e) {
+                throw new RuntimeException(e);
+            }
+        }
+    }
+    
+    // ***************
+    // IRestApiService
+    // ***************
+    
+    @Override
+    public void addRestletRoutable(RestletRoutable routable) {
+        restlets.add(routable);
+    }
+
+    @Override
+    public void run() {
+        if (logger.isDebugEnabled()) {
+            StringBuffer sb = new StringBuffer();
+            sb.append("REST API routables: ");
+            for (RestletRoutable routable : restlets) {
+                sb.append(routable.getClass().getSimpleName());
+                sb.append(" (");
+                sb.append(routable.basePath());
+                sb.append("), ");
+            }
+            logger.debug(sb.toString());
+        }
+        
+        RestApplication restApp = new RestApplication();
+        restApp.run(fmlContext, restPort);
+    }
+    
+    // *****************
+    // IFloodlightModule
+    // *****************
+    
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        Collection<Class<? extends IFloodlightService>> services =
+                new ArrayList<Class<? extends IFloodlightService>>(1);
+        services.add(IRestApiService.class);
+        return services;
+    }
+
+    @Override
+    public Map<Class<? extends IFloodlightService>, IFloodlightService>
+            getServiceImpls() {
+        Map<Class<? extends IFloodlightService>,
+        IFloodlightService> m = 
+            new HashMap<Class<? extends IFloodlightService>,
+                        IFloodlightService>();
+        m.put(IRestApiService.class, this);
+        return m;
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
+        // We don't have any
+        return null;
+    }
+
+    @Override
+    public void init(FloodlightModuleContext context)
+            throws FloodlightModuleException {
+        // This has to be done here since we don't know what order the
+        // startUp methods will be called
+        this.restlets = new ArrayList<RestletRoutable>();
+        this.fmlContext = context;
+        
+        // read our config options
+        Map<String, String> configOptions = context.getConfigParams(this);
+        String port = configOptions.get("port");
+        if (port != null) {
+            restPort = Integer.parseInt(port);
+        }
+        logger.debug("REST port set to {}", restPort);
+    }
+
+    @Override
+    public void startUp(FloodlightModuleContext Context) {
+        // no-op
+    }
+}
\ No newline at end of file
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/restserver/RestletRoutable.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/restserver/RestletRoutable.java
new file mode 100644
index 0000000..cb7dfce
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/restserver/RestletRoutable.java
@@ -0,0 +1,40 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.restserver;
+
+import org.restlet.Context;
+import org.restlet.Restlet;
+
+/**
+ * Register a set of REST resources with the central controller
+ * @author readams
+ */
+public interface RestletRoutable {
+    /**
+     * Get the restlet that will map to the resources
+     * @param context the context for constructing the restlet
+     * @return the restlet
+     */
+    Restlet getRestlet(Context context);
+    
+    /**
+     * Get the base path URL where the router should be registered
+     * @return the base path URL where the router should be registered
+     */
+    String basePath();
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/BroadcastTree.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/BroadcastTree.java
new file mode 100644
index 0000000..0c3703c
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/BroadcastTree.java
@@ -0,0 +1,67 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.routing;
+import java.util.HashMap;
+
+import net.floodlightcontroller.routing.Link;
+
+import org.openflow.util.HexString;
+
+public class BroadcastTree {
+    protected HashMap<Long, Link> links;
+    protected HashMap<Long, Integer> costs;
+
+    public BroadcastTree() {
+        links = new HashMap<Long, Link>();
+        costs = new HashMap<Long, Integer>();
+    }
+
+    public BroadcastTree(HashMap<Long, Link> links, HashMap<Long, Integer> costs) {
+        this.links = links;
+        this.costs = costs;
+    }
+
+    public Link getTreeLink(long node) {
+        return links.get(node);
+    }
+
+    public int getCost(long node) {
+        if (costs.get(node) == null) return -1;
+        return (costs.get(node));
+    }
+
+    public HashMap<Long, Link> getLinks() {
+        return links;
+    }
+
+    public void addTreeLink(long myNode, Link link) {
+        links.put(myNode, link);
+    }
+
+    public String toString() {
+        StringBuffer sb = new StringBuffer();
+        for(long n: links.keySet()) {
+            sb.append("[" + HexString.toHexString(n) + ": cost=" + costs.get(n) + ", " + links.get(n) + "]");
+        }
+        return sb.toString();
+    }
+
+    public HashMap<Long, Integer> getCosts() {
+        return costs;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/ForwardingBase.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/ForwardingBase.java
new file mode 100644
index 0000000..22312c1
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/ForwardingBase.java
@@ -0,0 +1,692 @@
+/**
+ *    Copyright 2011, Big Switch Networks, Inc. 
+ *    Originally created by David Erickson, Stanford University
+ * 
+ *    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *    not use this file except in compliance with the License. You may obtain
+ *    a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *    License for the specific language governing permissions and limitations
+ *    under the License.
+ **/
+
+package net.floodlightcontroller.routing;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import net.floodlightcontroller.core.FloodlightContext;
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.core.IOFMessageListener;
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.annotations.LogMessageCategory;
+import net.floodlightcontroller.core.annotations.LogMessageDoc;
+import net.floodlightcontroller.core.annotations.LogMessageDocs;
+import net.floodlightcontroller.core.util.AppCookie;
+import net.floodlightcontroller.counter.ICounterStoreService;
+import net.floodlightcontroller.devicemanager.IDevice;
+import net.floodlightcontroller.devicemanager.IDeviceListener;
+import net.floodlightcontroller.devicemanager.IDeviceService;
+import net.floodlightcontroller.devicemanager.SwitchPort;
+import net.floodlightcontroller.packet.Ethernet;
+import net.floodlightcontroller.packet.IPacket;
+import net.floodlightcontroller.routing.IRoutingService;
+import net.floodlightcontroller.routing.IRoutingDecision;
+import net.floodlightcontroller.routing.Route;
+import net.floodlightcontroller.topology.ITopologyService;
+import net.floodlightcontroller.topology.NodePortTuple;
+import net.floodlightcontroller.util.OFMessageDamper;
+import net.floodlightcontroller.util.TimedCache;
+
+import org.openflow.protocol.OFFlowMod;
+import org.openflow.protocol.OFMatch;
+import org.openflow.protocol.OFMessage;
+import org.openflow.protocol.OFPacketIn;
+import org.openflow.protocol.OFPacketOut;
+import org.openflow.protocol.OFType;
+import org.openflow.protocol.action.OFAction;
+import org.openflow.protocol.action.OFActionOutput;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Abstract base class for implementing a forwarding module.  Forwarding is
+ * responsible for programming flows to a switch in response to a policy
+ * decision.
+ */
+@LogMessageCategory("Flow Programming")
+public abstract class ForwardingBase 
+    implements IOFMessageListener, IDeviceListener {
+    
+    protected static Logger log =
+            LoggerFactory.getLogger(ForwardingBase.class);
+
+    protected static int OFMESSAGE_DAMPER_CAPACITY = 50000; // TODO: find sweet spot
+    protected static int OFMESSAGE_DAMPER_TIMEOUT = 250; // ms 
+
+    public static short FLOWMOD_DEFAULT_IDLE_TIMEOUT = 5; // in seconds
+    public static short FLOWMOD_DEFAULT_HARD_TIMEOUT = 0; // infinite
+    
+    protected IFloodlightProviderService floodlightProvider;
+    protected IDeviceService deviceManager;
+    protected IRoutingService routingEngine;
+    protected ITopologyService topology;
+    protected ICounterStoreService counterStore;
+    
+    protected OFMessageDamper messageDamper;
+    
+    // for broadcast loop suppression
+    protected boolean broadcastCacheFeature = true;
+    public final int prime1 = 2633;  // for hash calculation
+    public final static int prime2 = 4357;  // for hash calculation
+    public TimedCache<Long> broadcastCache =
+        new TimedCache<Long>(100, 5*1000);  // 5 seconds interval;
+
+    // flow-mod - for use in the cookie
+    public static final int FORWARDING_APP_ID = 2; // TODO: This must be managed
+                                                   // by a global APP_ID class
+    public long appCookie = AppCookie.makeCookie(FORWARDING_APP_ID, 0);
+    
+    // Comparator for sorting by SwitchCluster
+    public Comparator<SwitchPort> clusterIdComparator =
+            new Comparator<SwitchPort>() {
+                @Override
+                public int compare(SwitchPort d1, SwitchPort d2) {
+                    Long d1ClusterId = 
+                            topology.getL2DomainId(d1.getSwitchDPID());
+                    Long d2ClusterId = 
+                            topology.getL2DomainId(d2.getSwitchDPID());
+                    return d1ClusterId.compareTo(d2ClusterId);
+                }
+            };
+            
+    /**
+     * init data structures
+     * 
+     */
+    protected void init() {
+        messageDamper = new OFMessageDamper(OFMESSAGE_DAMPER_CAPACITY, 
+                                            EnumSet.of(OFType.FLOW_MOD),
+                                            OFMESSAGE_DAMPER_TIMEOUT);
+    }
+
+    /**
+     * Adds a listener for devicemanager and registers for PacketIns.
+     */
+    protected void startUp() {
+        deviceManager.addListener(this);
+        floodlightProvider.addOFMessageListener(OFType.PACKET_IN, this);
+    }
+
+    /**
+     * Returns the application name "forwarding".
+     */
+    @Override
+    public String getName() {
+        return "forwarding";
+    }
+
+    /**
+     * All subclasses must define this function if they want any specific
+     * forwarding action
+     * 
+     * @param sw
+     *            Switch that the packet came in from
+     * @param pi
+     *            The packet that came in
+     * @param decision
+     *            Any decision made by a policy engine
+     */
+    public abstract Command
+            processPacketInMessage(IOFSwitch sw, OFPacketIn pi,
+                                   IRoutingDecision decision,
+                                   FloodlightContext cntx);
+
+    @Override
+    public Command receive(IOFSwitch sw, OFMessage msg,
+                           FloodlightContext cntx) {
+        switch (msg.getType()) {
+            case PACKET_IN:
+                IRoutingDecision decision = null;
+                if (cntx != null)
+                     decision =
+                             IRoutingDecision.rtStore.get(cntx,
+                                                          IRoutingDecision.CONTEXT_DECISION);
+
+                return this.processPacketInMessage(sw,
+                                                   (OFPacketIn) msg,
+                                                   decision,
+                                                   cntx);
+            default:
+                break;
+        }
+        return Command.CONTINUE;
+    }
+
+    /**
+     * Push routes from back to front
+     * @param route Route to push
+     * @param match OpenFlow fields to match on
+     * @param srcSwPort Source switch port for the first hop
+     * @param dstSwPort Destination switch port for final hop
+     * @param cookie The cookie to set in each flow_mod
+     * @param cntx The floodlight context
+     * @param reqeustFlowRemovedNotifn if set to true then the switch would
+     * send a flow mod removal notification when the flow mod expires
+     * @param doFlush if set to true then the flow mod would be immediately
+     *        written to the switch
+     * @param flowModCommand flow mod. command to use, e.g. OFFlowMod.OFPFC_ADD,
+     *        OFFlowMod.OFPFC_MODIFY etc.
+     * @return srcSwitchIincluded True if the source switch is included in this route
+     */
+    @LogMessageDocs({
+        @LogMessageDoc(level="WARN",
+            message="Unable to push route, switch at DPID {dpid} not available",
+            explanation="A switch along the calculated path for the " +
+                        "flow has disconnected.",
+            recommendation=LogMessageDoc.CHECK_SWITCH),
+        @LogMessageDoc(level="ERROR",
+            message="Failure writing flow mod",
+            explanation="An I/O error occurred while writing a " +
+                        "flow modification to a switch",
+            recommendation=LogMessageDoc.CHECK_SWITCH)            
+    })
+    public boolean pushRoute(Route route, OFMatch match, 
+                             Integer wildcard_hints,
+                             OFPacketIn pi,
+                             long pinSwitch,
+                             long cookie, 
+                             FloodlightContext cntx,
+                             boolean reqeustFlowRemovedNotifn,
+                             boolean doFlush,
+                             short   flowModCommand) {
+
+        boolean srcSwitchIncluded = false;
+        OFFlowMod fm =
+                (OFFlowMod) floodlightProvider.getOFMessageFactory()
+                                              .getMessage(OFType.FLOW_MOD);
+        OFActionOutput action = new OFActionOutput();
+        action.setMaxLength((short)0xffff);
+        List<OFAction> actions = new ArrayList<OFAction>();
+        actions.add(action);
+
+        fm.setIdleTimeout(FLOWMOD_DEFAULT_IDLE_TIMEOUT)
+            .setHardTimeout(FLOWMOD_DEFAULT_HARD_TIMEOUT)
+            .setBufferId(OFPacketOut.BUFFER_ID_NONE)
+            .setCookie(cookie)
+            .setCommand(flowModCommand)
+            .setMatch(match)
+            .setActions(actions)
+            .setLengthU(OFFlowMod.MINIMUM_LENGTH+OFActionOutput.MINIMUM_LENGTH);
+
+        List<NodePortTuple> switchPortList = route.getPath();
+
+        for (int indx = switchPortList.size()-1; indx > 0; indx -= 2) {
+            // indx and indx-1 will always have the same switch DPID.
+            long switchDPID = switchPortList.get(indx).getNodeId();
+            IOFSwitch sw = floodlightProvider.getSwitches().get(switchDPID);
+            if (sw == null) {
+                if (log.isWarnEnabled()) {
+                    log.warn("Unable to push route, switch at DPID {} " +
+                            "not available", switchDPID);
+                }
+                return srcSwitchIncluded;
+            }
+
+            // set the match.
+            fm.setMatch(wildcard(match, sw, wildcard_hints));
+
+            // set buffer id if it is the source switch
+            if (1 == indx) {
+                // Set the flag to request flow-mod removal notifications only for the
+                // source switch. The removal message is used to maintain the flow
+                // cache. Don't set the flag for ARP messages - TODO generalize check
+                if ((reqeustFlowRemovedNotifn)
+                        && (match.getDataLayerType() != Ethernet.TYPE_ARP)) {
+                    fm.setFlags(OFFlowMod.OFPFF_SEND_FLOW_REM);
+                    match.setWildcards(fm.getMatch().getWildcards());
+                }
+            }
+
+            short outPort = switchPortList.get(indx).getPortId();
+            short inPort = switchPortList.get(indx-1).getPortId();
+            // set input and output ports on the switch
+            fm.getMatch().setInputPort(inPort);
+            ((OFActionOutput)fm.getActions().get(0)).setPort(outPort);
+
+            try {
+                counterStore.updatePktOutFMCounterStore(sw, fm);
+                if (log.isTraceEnabled()) {
+                    log.trace("Pushing Route flowmod routeIndx={} " + 
+                            "sw={} inPort={} outPort={}",
+                            new Object[] {indx,
+                                          sw,
+                                          fm.getMatch().getInputPort(),
+                                          outPort });
+                }
+                messageDamper.write(sw, fm, cntx);
+                if (doFlush) {
+                    sw.flush();
+                }
+
+                // Push the packet out the source switch
+                if (sw.getId() == pinSwitch) {
+                    // TODO: Instead of doing a packetOut here we could also 
+                    // send a flowMod with bufferId set.... 
+                    pushPacket(sw, match, pi, outPort, cntx);
+                    srcSwitchIncluded = true;
+                }
+            } catch (IOException e) {
+                log.error("Failure writing flow mod", e);
+            }
+
+            try {
+                fm = fm.clone();
+            } catch (CloneNotSupportedException e) {
+                log.error("Failure cloning flow mod", e);
+            }
+        }
+
+        return srcSwitchIncluded;
+    }
+
+    protected OFMatch wildcard(OFMatch match, IOFSwitch sw,
+                               Integer wildcard_hints) {
+        if (wildcard_hints != null) {
+            return match.clone().setWildcards(wildcard_hints.intValue());
+        }
+        return match.clone();
+    }
+    
+    /**
+     * Pushes a packet-out to a switch. If bufferId != BUFFER_ID_NONE we 
+     * assume that the packetOut switch is the same as the packetIn switch
+     * and we will use the bufferId 
+     * Caller needs to make sure that inPort and outPort differs
+     * @param packet    packet data to send
+     * @param sw        switch from which packet-out is sent
+     * @param bufferId  bufferId
+     * @param inPort    input port
+     * @param outPort   output port
+     * @param cntx      context of the packet
+     * @param flush     force to flush the packet.
+     */
+    @LogMessageDocs({
+        @LogMessageDoc(level="ERROR",
+            message="BufferId is not and packet data is null. " +
+                    "Cannot send packetOut. " +
+                    "srcSwitch={dpid} inPort={port} outPort={port}",
+            explanation="The switch send a malformed packet-in." +
+                        "The packet will be dropped",
+            recommendation=LogMessageDoc.REPORT_SWITCH_BUG),
+        @LogMessageDoc(level="ERROR",
+            message="Failure writing packet out",
+            explanation="An I/O error occurred while writing a " +
+                    "packet out to a switch",
+            recommendation=LogMessageDoc.CHECK_SWITCH)            
+    })
+    public void pushPacket(IPacket packet, 
+                           IOFSwitch sw,
+                           int bufferId,
+                           short inPort,
+                           short outPort, 
+                           FloodlightContext cntx,
+                           boolean flush) {
+        
+        
+        if (log.isTraceEnabled()) {
+            log.trace("PacketOut srcSwitch={} inPort={} outPort={}", 
+                      new Object[] {sw, inPort, outPort});
+        }
+
+        OFPacketOut po =
+                (OFPacketOut) floodlightProvider.getOFMessageFactory()
+                                                .getMessage(OFType.PACKET_OUT);
+
+        // set actions
+        List<OFAction> actions = new ArrayList<OFAction>();
+        actions.add(new OFActionOutput(outPort, (short) 0xffff));
+
+        po.setActions(actions)
+          .setActionsLength((short) OFActionOutput.MINIMUM_LENGTH);
+        short poLength =
+                (short) (po.getActionsLength() + OFPacketOut.MINIMUM_LENGTH);
+
+        // set buffer_id, in_port
+        po.setBufferId(bufferId);
+        po.setInPort(inPort);
+
+        // set data - only if buffer_id == -1
+        if (po.getBufferId() == OFPacketOut.BUFFER_ID_NONE) {
+            if (packet == null) {
+                log.error("BufferId is not set and packet data is null. " +
+                          "Cannot send packetOut. " +
+                        "srcSwitch={} inPort={} outPort={}",
+                        new Object[] {sw, inPort, outPort});
+                return;
+            }
+            byte[] packetData = packet.serialize();
+            poLength += packetData.length;
+            po.setPacketData(packetData);
+        }
+
+        po.setLength(poLength);
+
+        try {
+            counterStore.updatePktOutFMCounterStore(sw, po);
+            messageDamper.write(sw, po, cntx, flush);
+        } catch (IOException e) {
+            log.error("Failure writing packet out", e);
+        }
+    }
+
+    /**
+     * Pushes a packet-out to a switch.  The assumption here is that
+     * the packet-in was also generated from the same switch.  Thus, if the input
+     * port of the packet-in and the outport are the same, the function will not 
+     * push the packet-out.
+     * @param sw        switch that generated the packet-in, and from which packet-out is sent
+     * @param match     OFmatch
+     * @param pi        packet-in
+     * @param outport   output port
+     * @param cntx      context of the packet
+     */
+    protected void pushPacket(IOFSwitch sw, OFMatch match, OFPacketIn pi, 
+                           short outport, FloodlightContext cntx) {
+
+        if (pi == null) {
+            return;
+        } else if (pi.getInPort() == outport){
+            log.warn("Packet out not sent as the outport matches inport. {}",
+                     pi);
+            return;
+        }
+
+        // The assumption here is (sw) is the switch that generated the 
+        // packet-in. If the input port is the same as output port, then
+        // the packet-out should be ignored.
+        if (pi.getInPort() == outport) {
+            if (log.isDebugEnabled()) {
+                log.debug("Attempting to do packet-out to the same " + 
+                          "interface as packet-in. Dropping packet. " + 
+                          " SrcSwitch={}, match = {}, pi={}", 
+                          new Object[]{sw, match, pi});
+                return;
+            }
+        }
+
+        if (log.isTraceEnabled()) {
+            log.trace("PacketOut srcSwitch={} match={} pi={}", 
+                      new Object[] {sw, match, pi});
+        }
+
+        OFPacketOut po =
+                (OFPacketOut) floodlightProvider.getOFMessageFactory()
+                                                .getMessage(OFType.PACKET_OUT);
+
+        // set actions
+        List<OFAction> actions = new ArrayList<OFAction>();
+        actions.add(new OFActionOutput(outport, (short) 0xffff));
+
+        po.setActions(actions)
+          .setActionsLength((short) OFActionOutput.MINIMUM_LENGTH);
+        short poLength =
+                (short) (po.getActionsLength() + OFPacketOut.MINIMUM_LENGTH);
+
+        // If the switch doens't support buffering set the buffer id to be none
+        // otherwise it'll be the the buffer id of the PacketIn
+        if (sw.getBuffers() == 0) {
+            // We set the PI buffer id here so we don't have to check again below
+            pi.setBufferId(OFPacketOut.BUFFER_ID_NONE);
+            po.setBufferId(OFPacketOut.BUFFER_ID_NONE);
+        } else {
+            po.setBufferId(pi.getBufferId());
+        }
+
+        po.setInPort(pi.getInPort());
+
+        // If the buffer id is none or the switch doesn's support buffering
+        // we send the data with the packet out
+        if (pi.getBufferId() == OFPacketOut.BUFFER_ID_NONE) {
+            byte[] packetData = pi.getPacketData();
+            poLength += packetData.length;
+            po.setPacketData(packetData);
+        }
+
+        po.setLength(poLength);
+
+        try {
+            counterStore.updatePktOutFMCounterStore(sw, po);
+            messageDamper.write(sw, po, cntx);
+        } catch (IOException e) {
+            log.error("Failure writing packet out", e);
+        }
+    }
+
+    
+    /**
+     * Write packetout message to sw with output actions to one or more
+     * output ports with inPort/outPorts passed in.
+     * @param packetData
+     * @param sw
+     * @param inPort
+     * @param ports
+     * @param cntx
+     */
+    public void packetOutMultiPort(byte[] packetData,
+                                   IOFSwitch sw,
+                                   short inPort,
+                                   Set<Integer> outPorts,
+                                   FloodlightContext cntx) {
+        //setting actions
+        List<OFAction> actions = new ArrayList<OFAction>();
+
+        Iterator<Integer> j = outPorts.iterator();
+
+        while (j.hasNext())
+        {
+            actions.add(new OFActionOutput(j.next().shortValue(), 
+                                           (short) 0));
+        }
+
+        OFPacketOut po = 
+                (OFPacketOut) floodlightProvider.getOFMessageFactory().
+                getMessage(OFType.PACKET_OUT);
+        po.setActions(actions);
+        po.setActionsLength((short) (OFActionOutput.MINIMUM_LENGTH * 
+                outPorts.size()));
+
+        // set buffer-id to BUFFER_ID_NONE, and set in-port to OFPP_NONE
+        po.setBufferId(OFPacketOut.BUFFER_ID_NONE);
+        po.setInPort(inPort);
+
+        // data (note buffer_id is always BUFFER_ID_NONE) and length
+        short poLength = (short)(po.getActionsLength() + 
+                OFPacketOut.MINIMUM_LENGTH);
+        poLength += packetData.length;
+        po.setPacketData(packetData);
+        po.setLength(poLength);
+
+        try {
+            counterStore.updatePktOutFMCounterStore(sw, po);
+            if (log.isTraceEnabled()) {
+                log.trace("write broadcast packet on switch-id={} " + 
+                        "interfaces={} packet-out={}",
+                        new Object[] {sw.getId(), outPorts, po});
+            }
+            messageDamper.write(sw, po, cntx);
+
+        } catch (IOException e) {
+            log.error("Failure writing packet out", e);
+        }
+    }
+    
+    /** 
+     * @see packetOutMultiPort
+     * Accepts a PacketIn instead of raw packet data. Note that the inPort
+     * and switch can be different than the packet in switch/port
+     */
+    public void packetOutMultiPort(OFPacketIn pi,
+                                   IOFSwitch sw,
+                                   short inPort,
+                                   Set<Integer> outPorts,
+                                   FloodlightContext cntx) {
+        packetOutMultiPort(pi.getPacketData(), sw, inPort, outPorts, cntx);
+    }
+    
+    /** 
+     * @see packetOutMultiPort
+     * Accepts an IPacket instead of raw packet data. Note that the inPort
+     * and switch can be different than the packet in switch/port
+     */
+    public void packetOutMultiPort(IPacket packet,
+                                   IOFSwitch sw,
+                                   short inPort,
+                                   Set<Integer> outPorts,
+                                   FloodlightContext cntx) {
+        packetOutMultiPort(packet.serialize(), sw, inPort, outPorts, cntx);
+    }
+
+    protected boolean isInBroadcastCache(IOFSwitch sw, OFPacketIn pi,
+                        FloodlightContext cntx) {
+        // Get the cluster id of the switch.
+        // Get the hash of the Ethernet packet.
+        if (sw == null) return true;  
+        
+        // If the feature is disabled, always return false;
+        if (!broadcastCacheFeature) return false;
+
+        Ethernet eth = 
+            IFloodlightProviderService.bcStore.get(cntx,
+                IFloodlightProviderService.CONTEXT_PI_PAYLOAD);
+        
+        Long broadcastHash;
+        broadcastHash = topology.getL2DomainId(sw.getId()) * prime1 +
+                        pi.getInPort() * prime2 + eth.hashCode();
+        if (broadcastCache.update(broadcastHash)) {
+            sw.updateBroadcastCache(broadcastHash, pi.getInPort());
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    protected boolean isInSwitchBroadcastCache(IOFSwitch sw, OFPacketIn pi, FloodlightContext cntx) {
+        if (sw == null) return true;
+        
+        // If the feature is disabled, always return false;
+        if (!broadcastCacheFeature) return false;
+
+        // Get the hash of the Ethernet packet.
+        Ethernet eth =
+                IFloodlightProviderService.bcStore.get(cntx, IFloodlightProviderService.CONTEXT_PI_PAYLOAD);
+
+        long hash =  pi.getInPort() * prime2 + eth.hashCode();
+
+        // some FORWARD_OR_FLOOD packets are unicast with unknown destination mac
+        return sw.updateBroadcastCache(hash, pi.getInPort());
+    }
+
+    @LogMessageDocs({
+        @LogMessageDoc(level="ERROR",
+            message="Failure writing deny flow mod",
+            explanation="An I/O error occurred while writing a " +
+                    "deny flow mod to a switch",
+            recommendation=LogMessageDoc.CHECK_SWITCH)            
+    })
+    public static boolean
+            blockHost(IFloodlightProviderService floodlightProvider,
+                      SwitchPort sw_tup, long host_mac,
+                      short hardTimeout, long cookie) {
+
+        if (sw_tup == null) {
+            return false;
+        }
+
+        IOFSwitch sw = 
+                floodlightProvider.getSwitches().get(sw_tup.getSwitchDPID());
+        if (sw == null) return false;
+        int inputPort = sw_tup.getPort();
+        log.debug("blockHost sw={} port={} mac={}",
+                  new Object[] { sw, sw_tup.getPort(), new Long(host_mac) });
+
+        // Create flow-mod based on packet-in and src-switch
+        OFFlowMod fm =
+                (OFFlowMod) floodlightProvider.getOFMessageFactory()
+                                              .getMessage(OFType.FLOW_MOD);
+        OFMatch match = new OFMatch();
+        List<OFAction> actions = new ArrayList<OFAction>(); // Set no action to
+                                                            // drop
+        match.setDataLayerSource(Ethernet.toByteArray(host_mac))
+             .setInputPort((short)inputPort)
+             .setWildcards(OFMatch.OFPFW_ALL & ~OFMatch.OFPFW_DL_SRC
+                     & ~OFMatch.OFPFW_IN_PORT);
+        fm.setCookie(cookie)
+          .setHardTimeout((short) hardTimeout)
+          .setIdleTimeout(FLOWMOD_DEFAULT_IDLE_TIMEOUT)
+          .setHardTimeout(FLOWMOD_DEFAULT_HARD_TIMEOUT)
+          .setBufferId(OFPacketOut.BUFFER_ID_NONE)
+          .setMatch(match)
+          .setActions(actions)
+          .setLengthU(OFFlowMod.MINIMUM_LENGTH); // +OFActionOutput.MINIMUM_LENGTH);
+
+        try {
+            log.debug("write drop flow-mod sw={} match={} flow-mod={}",
+                      new Object[] { sw, match, fm });
+            // TODO: can't use the message damper sine this method is static
+            sw.write(fm, null);
+        } catch (IOException e) {
+            log.error("Failure writing deny flow mod", e);
+            return false;
+        }
+        return true;
+
+    }
+
+    @Override
+    public void deviceAdded(IDevice device) {
+        // NOOP
+    }
+
+    @Override
+    public void deviceRemoved(IDevice device) {
+        // NOOP
+    }
+
+    @Override
+    public void deviceMoved(IDevice device) {
+    }
+
+    @Override
+    public void deviceIPV4AddrChanged(IDevice device) {
+
+    }
+
+    @Override
+    public void deviceVlanChanged(IDevice device) {
+
+    }
+
+    @Override
+    public boolean isCallbackOrderingPrereq(OFType type, String name) {
+        return (type.equals(OFType.PACKET_IN) && 
+                (name.equals("topology") || 
+                 name.equals("devicemanager")));
+    }
+
+    @Override
+    public boolean isCallbackOrderingPostreq(OFType type, String name) {
+        return false;
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/IRoutingDecision.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/IRoutingDecision.java
new file mode 100644
index 0000000..ed72706
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/IRoutingDecision.java
@@ -0,0 +1,58 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.routing;
+
+import java.util.List;
+
+import net.floodlightcontroller.core.FloodlightContext;
+import net.floodlightcontroller.core.FloodlightContextStore;
+import net.floodlightcontroller.devicemanager.IDevice;
+import net.floodlightcontroller.devicemanager.SwitchPort;
+
+public interface IRoutingDecision {
+    public enum RoutingAction {
+        /*
+         * NONE:                    NO-OP, continue with the packet processing chain
+         * DROP:                    Drop this packet and this flow
+         * FORWARD:                 Forward this packet, and this flow, to the first (and only device) in getDestinationDevices(),
+         *                          if the destination is not known at this time, initiate a discovery action for it (e.g. ARP)
+         * FORWARD_OR_FLOOD:        Forward this packet, and this flow, to the first (and only device) in getDestinationDevices(),
+         *                          if the destination is not known at this time, flood this packet on the source switch
+         * BROADCAST:               Broadcast this packet on all links                         
+         * MULTICAST:               Multicast this packet to all the interfaces and devices attached
+         */
+        NONE, DROP, FORWARD, FORWARD_OR_FLOOD, BROADCAST, MULTICAST
+    }
+    
+    public static final FloodlightContextStore<IRoutingDecision> rtStore =
+        new FloodlightContextStore<IRoutingDecision>();
+    public static final String CONTEXT_DECISION = 
+            "net.floodlightcontroller.routing.decision";
+
+    public void addToContext(FloodlightContext cntx);
+    public RoutingAction getRoutingAction();
+    public void setRoutingAction(RoutingAction action);
+    public SwitchPort getSourcePort();
+    public IDevice getSourceDevice();
+    public List<IDevice> getDestinationDevices();
+    public void addDestinationDevice(IDevice d);
+    public List<SwitchPort> getMulticastInterfaces();
+    public void setMulticastInterfaces(List<SwitchPort> lspt);
+    public Integer getWildcards();
+    public void setWildcards(Integer wildcards);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/IRoutingService.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/IRoutingService.java
new file mode 100644
index 0000000..fcd70ad
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/IRoutingService.java
@@ -0,0 +1,49 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.routing;
+
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.routing.Route;
+
+public interface IRoutingService extends IFloodlightService {
+
+    /** Provides a route between src and dst that allows tunnels. */
+    public Route getRoute(long src, long dst);
+
+    /** Provides a route between src and dst, with option to allow or 
+     *  not allow tunnels in the path.*/
+    public Route getRoute(long src, long dst, boolean tunnelEnabled);
+
+
+    public Route getRoute(long srcId, short srcPort, 
+                             long dstId, short dstPort);
+
+    public Route getRoute(long srcId, short srcPort, 
+                             long dstId, short dstPort, 
+                             boolean tunnelEnabled);
+
+    /** Check if a route exists between src and dst, including tunnel links
+     *  in the path.
+     */
+    public boolean routeExists(long src, long dst);
+
+    /** Check if a route exists between src and dst, with option to have
+     *  or not have tunnels as part of the path.
+     */
+    public boolean routeExists(long src, long dst, boolean tunnelEnabled);
+}
\ No newline at end of file
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/Link.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/Link.java
new file mode 100755
index 0000000..7958596
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/Link.java
@@ -0,0 +1,122 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.routing;
+
+import net.floodlightcontroller.core.web.serializers.DPIDSerializer;
+import net.floodlightcontroller.core.web.serializers.UShortSerializer;
+
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+import org.openflow.util.HexString;
+
+public class Link {
+    private long src;
+    private short srcPort;
+    private long dst;
+    private short dstPort;
+
+
+    public Link(long srcId, short srcPort, long dstId, short dstPort) {
+        this.src = srcId;
+        this.srcPort = srcPort;
+        this.dst = dstId;
+        this.dstPort = dstPort;
+    }
+
+    // Convenience method
+    public Link(long srcId, int srcPort, long dstId, int dstPort) {
+        this.src = srcId;
+        this.srcPort = (short) srcPort;
+        this.dst = dstId;
+        this.dstPort = (short) dstPort;
+    }
+
+    @JsonProperty("src-switch")
+    @JsonSerialize(using=DPIDSerializer.class)
+    public long getSrc() {
+        return src;
+    }
+
+    @JsonProperty("src-port")
+    @JsonSerialize(using=UShortSerializer.class)
+    public short getSrcPort() {
+        return srcPort;
+    }
+
+    @JsonProperty("dst-switch")
+    @JsonSerialize(using=DPIDSerializer.class)
+    public long getDst() {
+        return dst;
+    }
+    @JsonProperty("dst-port")
+    @JsonSerialize(using=UShortSerializer.class)
+    public short getDstPort() {
+        return dstPort;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + (int) (dst ^ (dst >>> 32));
+        result = prime * result + dstPort;
+        result = prime * result + (int) (src ^ (src >>> 32));
+        result = prime * result + srcPort;
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        Link other = (Link) obj;
+        if (dst != other.dst)
+            return false;
+        if (dstPort != other.dstPort)
+            return false;
+        if (src != other.src)
+            return false;
+        if (srcPort != other.srcPort)
+            return false;
+        return true;
+    }
+
+
+    @Override
+    public String toString() {
+        return "Link [src=" + HexString.toHexString(this.src) 
+                + " outPort="
+                + (srcPort & 0xffff)
+                + ", dst=" + HexString.toHexString(this.dst)
+                + ", inPort="
+                + (dstPort & 0xffff)
+                + "]";
+    }
+    
+    public String toKeyString() {
+    	return (HexString.toHexString(this.src) + "|" +
+    			(this.srcPort & 0xffff) + "|" +
+    			HexString.toHexString(this.dst) + "|" +
+    		    (this.dstPort & 0xffff) );
+    }
+}
+
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/Route.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/Route.java
new file mode 100755
index 0000000..211a924
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/Route.java
@@ -0,0 +1,117 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.routing;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import net.floodlightcontroller.topology.NodePortTuple;
+
+/**
+ * Represents a route between two switches
+ *
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ */
+public class Route implements Comparable<Route> {
+    protected RouteId id;
+    protected List<NodePortTuple> switchPorts;
+
+    public Route(RouteId id, List<NodePortTuple> switchPorts) {
+        super();
+        this.id = id;
+        this.switchPorts = switchPorts;
+    }
+
+    public Route(Long src, Long dst) {
+        super();
+        this.id = new RouteId(src, dst);
+        this.switchPorts = new ArrayList<NodePortTuple>();
+    }
+
+    /**
+     * @return the id
+     */
+    public RouteId getId() {
+        return id;
+    }
+
+    /**
+     * @param id the id to set
+     */
+    public void setId(RouteId id) {
+        this.id = id;
+    }
+
+    /**
+     * @return the path
+     */
+    public List<NodePortTuple> getPath() {
+        return switchPorts;
+    }
+
+    /**
+     * @param path the path to set
+     */
+    public void setPath(List<NodePortTuple> switchPorts) {
+        this.switchPorts = switchPorts;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 5791;
+        int result = 1;
+        result = prime * result + ((id == null) ? 0 : id.hashCode());
+        result = prime * result + ((switchPorts == null) ? 0 : switchPorts.hashCode());
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        Route other = (Route) obj;
+        if (id == null) {
+            if (other.id != null)
+                return false;
+        } else if (!id.equals(other.id))
+            return false;
+        if (switchPorts == null) {
+            if (other.switchPorts != null)
+                return false;
+        } else if (!switchPorts.equals(other.switchPorts))
+            return false;
+        return true;
+    }
+
+    @Override
+    public String toString() {
+        return "Route [id=" + id + ", switchPorts=" + switchPorts + "]";
+    }
+
+    /**
+     * Compares the path lengths between Routes.
+     */
+    @Override
+    public int compareTo(Route o) {
+        return ((Integer)switchPorts.size()).compareTo(o.switchPorts.size());
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/RouteId.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/RouteId.java
new file mode 100755
index 0000000..a550961
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/RouteId.java
@@ -0,0 +1,102 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.routing;
+
+import org.openflow.util.HexString;
+
+/**
+ * Stores the endpoints of a route, in this case datapath ids
+ *
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ */
+public class RouteId implements Cloneable, Comparable<RouteId> {
+    protected Long src;
+    protected Long dst;
+
+    public RouteId(Long src, Long dst) {
+        super();
+        this.src = src;
+        this.dst = dst;
+    }
+
+    public Long getSrc() {
+        return src;
+    }
+
+    public void setSrc(Long src) {
+        this.src = src;
+    }
+
+    public Long getDst() {
+        return dst;
+    }
+
+    public void setDst(Long dst) {
+        this.dst = dst;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 2417;
+        int result = 1;
+        result = prime * result + ((dst == null) ? 0 : dst.hashCode());
+        result = prime * result + ((src == null) ? 0 : src.hashCode());
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        RouteId other = (RouteId) obj;
+        if (dst == null) {
+            if (other.dst != null)
+                return false;
+        } else if (!dst.equals(other.dst))
+            return false;
+        if (src == null) {
+            if (other.src != null)
+                return false;
+        } else if (!src.equals(other.src))
+            return false;
+        return true;
+    }
+
+    @Override
+    public String toString() {
+        return "RouteId [src=" + HexString.toHexString(this.src) + " dst="
+                + HexString.toHexString(this.dst) + "]";
+    }
+
+    @Override
+    protected Object clone() throws CloneNotSupportedException {
+        return super.clone();
+    }
+
+    @Override
+    public int compareTo(RouteId o) {
+        int result = src.compareTo(o.getSrc());
+        if (result != 0)
+            return result;
+        return dst.compareTo(o.getDst());
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/RoutingDecision.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/RoutingDecision.java
new file mode 100644
index 0000000..5b32b23
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/routing/RoutingDecision.java
@@ -0,0 +1,97 @@
+package net.floodlightcontroller.routing;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import net.floodlightcontroller.core.FloodlightContext;
+import net.floodlightcontroller.devicemanager.IDevice;
+import net.floodlightcontroller.devicemanager.SwitchPort;
+
+
+public class RoutingDecision implements IRoutingDecision {
+
+    protected RoutingAction action;
+    protected Integer wildcards;
+    protected SwitchPort srcPort;
+    protected IDevice srcDevice;
+    protected List<IDevice> destDevices;
+    protected List<SwitchPort> broadcastIntertfaces;
+
+    public RoutingDecision(long swDipd,
+                                  short inPort,
+                                  IDevice srcDevice,
+                                  RoutingAction action) {
+        this.srcPort = new SwitchPort(swDipd, inPort);
+        this.srcDevice = srcDevice;
+        this.destDevices = 
+                Collections.synchronizedList(new ArrayList<IDevice>());
+        this.broadcastIntertfaces = 
+                Collections.synchronizedList(new ArrayList<SwitchPort>());
+        this.action = action;
+        this.wildcards = null;
+    }
+    
+    @Override
+    public RoutingAction getRoutingAction() {
+        return this.action;
+    }
+    
+    @Override
+    public void setRoutingAction(RoutingAction action) {
+        this.action = action;
+    }
+    
+    @Override
+    public SwitchPort getSourcePort() {
+        return this.srcPort;
+    }
+    
+    @Override
+    public IDevice getSourceDevice() {
+        return this.srcDevice;
+    }
+    
+    @Override
+    public List<IDevice> getDestinationDevices() {
+        return this.destDevices;
+    }
+    
+    @Override
+    public void addDestinationDevice(IDevice d) {
+        if (!destDevices.contains(d)) {
+            destDevices.add(d);
+        }
+    }
+    
+    @Override
+    public void setMulticastInterfaces(List<SwitchPort> lspt) {
+        this.broadcastIntertfaces = lspt;
+    }
+    
+    @Override
+    public List<SwitchPort> getMulticastInterfaces() {
+        return this.broadcastIntertfaces;
+    }
+    
+    @Override
+    public Integer getWildcards() {
+        return this.wildcards;
+    }
+    
+    @Override
+    public void setWildcards(Integer wildcards) {
+        this.wildcards = wildcards;
+    }
+   
+    @Override
+    public void addToContext(FloodlightContext cntx) {
+        rtStore.put(cntx, IRoutingDecision.CONTEXT_DECISION, this);
+    }
+    
+    public String toString() {
+        return "action " + action +
+               " wildcard " +
+               ((wildcards == null) ? null : "0x"+Integer.toHexString(wildcards.intValue()));
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/staticflowentry/IStaticFlowEntryPusherService.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/staticflowentry/IStaticFlowEntryPusherService.java
new file mode 100644
index 0000000..66e02dd
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/staticflowentry/IStaticFlowEntryPusherService.java
@@ -0,0 +1,44 @@
+package net.floodlightcontroller.staticflowentry;
+
+import java.util.Map;
+
+import org.openflow.protocol.OFFlowMod;
+
+import net.floodlightcontroller.core.module.IFloodlightService;
+
+public interface IStaticFlowEntryPusherService extends IFloodlightService {
+    /**
+     * Adds a static flow.
+     * @param name Name of the flow mod. Must be unique.
+     * @param fm The flow to push.
+     * @param swDpid The switch DPID to push it to, in 00:00:00:00:00:00:00:01 notation.
+     */
+    public void addFlow(String name, OFFlowMod fm, String swDpid);
+    
+    /**
+     * Deletes a static flow
+     * @param name The name of the static flow to delete.
+     */
+    public void deleteFlow(String name);
+    
+    /**
+     * Deletes all static flows for a practicular switch
+     * @param dpid The DPID of the switch to delete flows for.
+     */
+    public void deleteFlowsForSwitch(long dpid);
+    
+    /**
+     * Deletes all flows.
+     */
+    public void deleteAllFlows();
+    
+    /**
+     * Gets all list of all flows
+     */
+    public Map<String, Map<String, OFFlowMod>> getFlows();
+    
+    /**
+     * Gets a list of flows by switch
+     */
+    public Map<String, OFFlowMod> getFlows(String dpid);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntries.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntries.java
new file mode 100644
index 0000000..ba28619
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntries.java
@@ -0,0 +1,831 @@
+package net.floodlightcontroller.staticflowentry;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import net.floodlightcontroller.core.annotations.LogMessageCategory;
+import net.floodlightcontroller.core.annotations.LogMessageDoc;
+import net.floodlightcontroller.core.util.AppCookie;
+import net.floodlightcontroller.packet.IPv4;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+import org.codehaus.jackson.JsonParseException;
+import org.codehaus.jackson.JsonParser;
+import org.codehaus.jackson.JsonToken;
+import org.codehaus.jackson.map.MappingJsonFactory;
+import org.openflow.protocol.OFFlowMod;
+import org.openflow.protocol.OFMatch;
+import org.openflow.protocol.OFPacketOut;
+import org.openflow.protocol.OFPort;
+import org.openflow.protocol.action.OFAction;
+import org.openflow.protocol.action.OFActionDataLayerDestination;
+import org.openflow.protocol.action.OFActionDataLayerSource;
+import org.openflow.protocol.action.OFActionEnqueue;
+import org.openflow.protocol.action.OFActionNetworkLayerDestination;
+import org.openflow.protocol.action.OFActionNetworkLayerSource;
+import org.openflow.protocol.action.OFActionNetworkTypeOfService;
+import org.openflow.protocol.action.OFActionOutput;
+import org.openflow.protocol.action.OFActionStripVirtualLan;
+import org.openflow.protocol.action.OFActionTransportLayerDestination;
+import org.openflow.protocol.action.OFActionTransportLayerSource;
+import org.openflow.protocol.action.OFActionVirtualLanIdentifier;
+import org.openflow.protocol.action.OFActionVirtualLanPriorityCodePoint;
+import org.openflow.util.HexString;
+
+/**
+ * Represents static flow entries to be maintained by the controller on the 
+ * switches. 
+ */
+@LogMessageCategory("Static Flow Pusher")
+public class StaticFlowEntries {
+    protected static Logger log = LoggerFactory.getLogger(StaticFlowEntries.class);
+    
+    private static class SubActionStruct {
+        OFAction action;
+        int      len;
+    }
+    
+    private static byte[] zeroMac = new byte[] {0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
+    
+    /**
+     * This function generates a random hash for the bottom half of the cookie
+     * 
+     * @param fm
+     * @param userCookie
+     * @param name
+     * @return A cookie that encodes the application ID and a hash
+     */
+    public static long computeEntryCookie(OFFlowMod fm, int userCookie, String name) {
+        // flow-specific hash is next 20 bits LOOK! who knows if this 
+        int prime = 211;
+        int flowHash = 2311;
+        for (int i=0; i < name.length(); i++)
+            flowHash = flowHash * prime + (int)name.charAt(i);
+        
+        return AppCookie.makeCookie(StaticFlowEntryPusher.STATIC_FLOW_APP_ID, flowHash);
+    }
+    
+    /**
+     * Sets defaults for an OFFlowMod
+     * @param fm The OFFlowMod to set defaults for
+     * @param entryName The name of the entry. Used to compute the cookie.
+     */
+    public static void initDefaultFlowMod(OFFlowMod fm, String entryName) {
+        fm.setIdleTimeout((short) 0);   // infinite
+        fm.setHardTimeout((short) 0);   // infinite
+        fm.setBufferId(OFPacketOut.BUFFER_ID_NONE);
+        fm.setCommand((short) 0);
+        fm.setFlags((short) 0);
+        fm.setOutPort(OFPort.OFPP_NONE.getValue());
+        fm.setCookie(computeEntryCookie(fm, 0, entryName));  
+        fm.setPriority(Short.MAX_VALUE);
+    }
+    
+    /**
+     * Gets the entry name of a flow mod
+     * @param fmJson The OFFlowMod in a JSON representation
+     * @return The name of the OFFlowMod, null if not found
+     * @throws IOException If there was an error parsing the JSON
+     */
+    public static String getEntryNameFromJson(String fmJson) throws IOException{
+        MappingJsonFactory f = new MappingJsonFactory();
+        JsonParser jp;
+        
+        try {
+            jp = f.createJsonParser(fmJson);
+        } catch (JsonParseException e) {
+            throw new IOException(e);
+        }
+        
+        jp.nextToken();
+        if (jp.getCurrentToken() != JsonToken.START_OBJECT) {
+            throw new IOException("Expected START_OBJECT");
+        }
+        
+        while (jp.nextToken() != JsonToken.END_OBJECT) {
+            if (jp.getCurrentToken() != JsonToken.FIELD_NAME) {
+                throw new IOException("Expected FIELD_NAME");
+            }
+            
+            String n = jp.getCurrentName();
+            jp.nextToken();
+            if (jp.getText().equals("")) 
+                continue;
+            
+            if (n == "name")
+                return jp.getText();
+        }
+        
+        return null;
+    }
+    
+    /**
+     * Parses an OFFlowMod (and it's inner OFMatch) to the storage entry format.
+     * @param fm The FlowMod to parse
+     * @param sw The switch the FlowMod is going to be installed on
+     * @param name The name of this static flow entry
+     * @return A Map representation of the storage entry 
+     */
+    public static Map<String, Object> flowModToStorageEntry(OFFlowMod fm, String sw, String name) {
+        Map<String, Object> entry = new HashMap<String, Object>();
+        OFMatch match = fm.getMatch();
+        entry.put(StaticFlowEntryPusher.COLUMN_NAME, name);
+        entry.put(StaticFlowEntryPusher.COLUMN_SWITCH, sw);
+        entry.put(StaticFlowEntryPusher.COLUMN_ACTIVE, Boolean.toString(true));
+        entry.put(StaticFlowEntryPusher.COLUMN_PRIORITY, Short.toString(fm.getPriority()));
+        entry.put(StaticFlowEntryPusher.COLUMN_WILDCARD, Integer.toString(match.getWildcards()));
+        
+        if ((fm.getActions() != null) && (fm.getActions().size() > 0))
+        	entry.put(StaticFlowEntryPusher.COLUMN_ACTIONS, StaticFlowEntries.flowModActionsToString(fm.getActions()));
+        
+        if (match.getInputPort() != 0)
+        	entry.put(StaticFlowEntryPusher.COLUMN_IN_PORT, Short.toString(match.getInputPort()));
+        
+        if (!Arrays.equals(match.getDataLayerSource(), zeroMac))
+        	entry.put(StaticFlowEntryPusher.COLUMN_DL_SRC, HexString.toHexString(match.getDataLayerSource()));
+
+        if (!Arrays.equals(match.getDataLayerDestination(), zeroMac))
+        	entry.put(StaticFlowEntryPusher.COLUMN_DL_DST, HexString.toHexString(match.getDataLayerDestination()));
+        
+        if (match.getDataLayerVirtualLan() != -1)
+        	entry.put(StaticFlowEntryPusher.COLUMN_DL_VLAN, Short.toString(match.getDataLayerVirtualLan()));
+        
+        if (match.getDataLayerVirtualLanPriorityCodePoint() != 0)
+        	entry.put(StaticFlowEntryPusher.COLUMN_DL_VLAN_PCP, Short.toString(match.getDataLayerVirtualLanPriorityCodePoint()));
+        
+        if (match.getDataLayerType() != 0)
+        	entry.put(StaticFlowEntryPusher.COLUMN_DL_TYPE, Short.toString(match.getDataLayerType()));
+        
+        if (match.getNetworkTypeOfService() != 0)
+        	entry.put(StaticFlowEntryPusher.COLUMN_NW_TOS, Short.toString(match.getNetworkTypeOfService()));
+        
+        if (match.getNetworkProtocol() != 0)
+        	entry.put(StaticFlowEntryPusher.COLUMN_NW_PROTO, Short.toString(match.getNetworkProtocol()));
+        
+        if (match.getNetworkSource() != 0)
+        	entry.put(StaticFlowEntryPusher.COLUMN_NW_SRC, IPv4.fromIPv4Address(match.getNetworkSource()));
+        
+        if (match.getNetworkDestination() != 0)
+        	entry.put(StaticFlowEntryPusher.COLUMN_NW_DST, IPv4.fromIPv4Address(match.getNetworkDestination()));
+        
+        if (match.getTransportSource() != 0)
+        	entry.put(StaticFlowEntryPusher.COLUMN_TP_SRC, Short.toString(match.getTransportSource()));
+        
+        if (match.getTransportDestination() != 0)
+        	entry.put(StaticFlowEntryPusher.COLUMN_TP_DST, Short.toString(match.getTransportDestination()));
+        
+        return entry;
+    }
+    
+    /**
+     * Returns a String representation of all the openflow actions.
+     * @param fmActions A list of OFActions to encode into one string
+     * @return A string of the actions encoded for our database
+     */
+    @LogMessageDoc(level="ERROR",
+            message="Could not decode action {action}",
+            explanation="A static flow entry contained an invalid action",
+            recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
+    private static String flowModActionsToString(List<OFAction> fmActions) {
+        StringBuilder sb = new StringBuilder();
+        for (OFAction a : fmActions) {
+            if (sb.length() > 0) {
+                sb.append(',');
+            }
+            switch(a.getType()) {
+                case OUTPUT:
+                    sb.append("output=" + Short.toString(((OFActionOutput)a).getPort()));
+                    break;
+                case OPAQUE_ENQUEUE:
+                    int queue = ((OFActionEnqueue)a).getQueueId();
+                    short port = ((OFActionEnqueue)a).getPort();
+                    sb.append("enqueue=" + Short.toString(port) + ":0x" + String.format("%02x", queue));
+                    break;
+                case STRIP_VLAN:
+                    sb.append("strip-vlan");
+                    break;
+                case SET_VLAN_ID:
+                    sb.append("set-vlan-id=" + 
+                        Short.toString(((OFActionVirtualLanIdentifier)a).getVirtualLanIdentifier()));
+                    break;
+                case SET_VLAN_PCP:
+                    sb.append("set-vlan-priority=" +
+                        Byte.toString(((OFActionVirtualLanPriorityCodePoint)a).getVirtualLanPriorityCodePoint()));
+                    break;
+                case SET_DL_SRC:
+                    sb.append("set-src-mac=" + 
+                        HexString.toHexString(((OFActionDataLayerSource)a).getDataLayerAddress()));
+                    break;
+                case SET_DL_DST:
+                    sb.append("set-dst-mac=" + 
+                        HexString.toHexString(((OFActionDataLayerDestination)a).getDataLayerAddress()));
+                    break;
+                case SET_NW_TOS:
+                    sb.append("set-tos-bits=" +
+                        Byte.toString(((OFActionNetworkTypeOfService)a).getNetworkTypeOfService()));
+                    break;
+                case SET_NW_SRC:
+                    sb.append("set-src-ip=" +
+                        IPv4.fromIPv4Address(((OFActionNetworkLayerSource)a).getNetworkAddress()));
+                    break;
+                case SET_NW_DST:
+                    sb.append("set-dst-ip=" +
+                        IPv4.fromIPv4Address(((OFActionNetworkLayerDestination)a).getNetworkAddress()));
+                    break;
+                case SET_TP_SRC:
+                    sb.append("set-src-port=" +
+                        Short.toString(((OFActionTransportLayerSource)a).getTransportPort()));
+                    break;
+                case SET_TP_DST:
+                    sb.append("set-dst-port=" +
+                        Short.toString(((OFActionTransportLayerDestination)a).getTransportPort()));
+                    break;
+                default:
+                    log.error("Could not decode action: {}", a);
+                    break;
+            }
+                
+        }
+        return sb.toString();
+    }
+    
+    /**
+     * Turns a JSON formatted Static Flow Pusher string into a storage entry
+     * Expects a string in JSON along the lines of:
+     *        {
+     *            "switch":       "AA:BB:CC:DD:EE:FF:00:11",
+     *            "name":         "flow-mod-1",
+     *            "cookie":       "0",
+     *            "priority":     "32768",
+     *            "ingress-port": "1",
+     *            "actions":      "output=2",
+     *        }
+     * @param fmJson The JSON formatted static flow pusher entry
+     * @return The map of the storage entry
+     * @throws IOException If there was an error parsing the JSON
+     */
+    public static Map<String, Object> jsonToStorageEntry(String fmJson) throws IOException {
+        Map<String, Object> entry = new HashMap<String, Object>();
+        MappingJsonFactory f = new MappingJsonFactory();
+        JsonParser jp;
+        
+        try {
+            jp = f.createJsonParser(fmJson);
+        } catch (JsonParseException e) {
+            throw new IOException(e);
+        }
+        
+        jp.nextToken();
+        if (jp.getCurrentToken() != JsonToken.START_OBJECT) {
+            throw new IOException("Expected START_OBJECT");
+        }
+        
+        while (jp.nextToken() != JsonToken.END_OBJECT) {
+            if (jp.getCurrentToken() != JsonToken.FIELD_NAME) {
+                throw new IOException("Expected FIELD_NAME");
+            }
+            
+            String n = jp.getCurrentName();
+            jp.nextToken();
+            if (jp.getText().equals("")) 
+                continue;
+            
+            if (n == "name")
+                entry.put(StaticFlowEntryPusher.COLUMN_NAME, jp.getText());
+            else if (n == "switch")
+                entry.put(StaticFlowEntryPusher.COLUMN_SWITCH, jp.getText());
+            else if (n == "actions")
+                entry.put(StaticFlowEntryPusher.COLUMN_ACTIONS, jp.getText());
+            else if (n == "priority")
+                entry.put(StaticFlowEntryPusher.COLUMN_PRIORITY, jp.getText());
+            else if (n == "active")
+                entry.put(StaticFlowEntryPusher.COLUMN_ACTIVE, jp.getText());
+            else if (n == "wildcards")
+                entry.put(StaticFlowEntryPusher.COLUMN_WILDCARD, jp.getText());
+            else if (n == "ingress-port")
+                entry.put(StaticFlowEntryPusher.COLUMN_IN_PORT, jp.getText());
+            else if (n == "src-mac")
+                entry.put(StaticFlowEntryPusher.COLUMN_DL_SRC, jp.getText());
+            else if (n == "dst-mac")
+                entry.put(StaticFlowEntryPusher.COLUMN_DL_DST, jp.getText());
+            else if (n == "vlan-id")
+                entry.put(StaticFlowEntryPusher.COLUMN_DL_VLAN, jp.getText());
+            else if (n == "vlan-priority")
+                entry.put(StaticFlowEntryPusher.COLUMN_DL_VLAN_PCP, jp.getText());
+            else if (n == "ether-type")
+                entry.put(StaticFlowEntryPusher.COLUMN_DL_TYPE, jp.getText());
+            else if (n == "tos-bits")
+                entry.put(StaticFlowEntryPusher.COLUMN_NW_TOS, jp.getText());
+            else if (n == "protocol")
+                entry.put(StaticFlowEntryPusher.COLUMN_NW_PROTO, jp.getText());
+            else if (n == "src-ip")
+                entry.put(StaticFlowEntryPusher.COLUMN_NW_SRC, jp.getText());
+            else if (n == "dst-ip")
+                entry.put(StaticFlowEntryPusher.COLUMN_NW_DST, jp.getText());
+            else if (n == "src-port")
+                entry.put(StaticFlowEntryPusher.COLUMN_TP_SRC, jp.getText());
+            else if (n == "dst-port")
+                entry.put(StaticFlowEntryPusher.COLUMN_TP_DST, jp.getText());
+        }
+        
+        return entry;
+    }
+    
+    /**
+     * Parses OFFlowMod actions from strings.
+     * @param flowMod The OFFlowMod to set the actions for
+     * @param actionstr The string containing all the actions
+     * @param log A logger to log for errors.
+     */
+    @LogMessageDoc(level="ERROR",
+            message="Unexpected action '{action}', '{subaction}'",
+            explanation="A static flow entry contained an invalid action",
+            recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
+    public static void parseActionString(OFFlowMod flowMod, String actionstr, Logger log) {
+        List<OFAction> actions = new LinkedList<OFAction>();
+        int actionsLength = 0;
+        if (actionstr != null) {
+            actionstr = actionstr.toLowerCase();
+            for (String subaction : actionstr.split(",")) {
+                String action = subaction.split("[=:]")[0];
+                SubActionStruct subaction_struct = null;
+                
+                if (action.equals("output")) {
+                    subaction_struct = StaticFlowEntries.decode_output(subaction, log);
+                }
+                else if (action.equals("enqueue")) {
+                    subaction_struct = decode_enqueue(subaction, log);
+                }
+                else if (action.equals("strip-vlan")) {
+                    subaction_struct = decode_strip_vlan(subaction, log);
+                }
+                else if (action.equals("set-vlan-id")) {
+                    subaction_struct = decode_set_vlan_id(subaction, log);
+                }
+                else if (action.equals("set-vlan-priority")) {
+                    subaction_struct = decode_set_vlan_priority(subaction, log);
+                }
+                else if (action.equals("set-src-mac")) {
+                    subaction_struct = decode_set_src_mac(subaction, log);
+                }
+                else if (action.equals("set-dst-mac")) {
+                    subaction_struct = decode_set_dst_mac(subaction, log);
+                }
+                else if (action.equals("set-tos-bits")) {
+                    subaction_struct = decode_set_tos_bits(subaction, log);
+                }
+                else if (action.equals("set-src-ip")) {
+                    subaction_struct = decode_set_src_ip(subaction, log);
+                }
+                else if (action.equals("set-dst-ip")) {
+                    subaction_struct = decode_set_dst_ip(subaction, log);
+                }
+                else if (action.equals("set-src-port")) {
+                    subaction_struct = decode_set_src_port(subaction, log);
+                }
+                else if (action.equals("set-dst-port")) {
+                    subaction_struct = decode_set_dst_port(subaction, log);
+                }
+                else {
+                    log.error("Unexpected action '{}', '{}'", action, subaction);
+                }
+                
+                if (subaction_struct != null) {
+                    actions.add(subaction_struct.action);
+                    actionsLength += subaction_struct.len;
+                }
+            }
+        }
+        log.debug("action {}", actions);
+        
+        flowMod.setActions(actions);
+        flowMod.setLengthU(OFFlowMod.MINIMUM_LENGTH + actionsLength);
+    } 
+    
+    @LogMessageDoc(level="ERROR",
+            message="Invalid subaction: '{subaction}'",
+            explanation="A static flow entry contained an invalid subaction",
+            recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
+    private static SubActionStruct decode_output(String subaction, Logger log) {
+        SubActionStruct sa = null;
+        Matcher n;
+        
+        n = Pattern.compile("output=(?:((?:0x)?\\d+)|(all)|(controller)|(local)|(ingress-port)|(normal)|(flood))").matcher(subaction);
+        if (n.matches()) {
+            OFActionOutput action = new OFActionOutput();
+            action.setMaxLength((short) Short.MAX_VALUE);
+            short port = OFPort.OFPP_NONE.getValue();
+            if (n.group(1) != null) {
+                try {
+                    port = get_short(n.group(1));
+                }
+                catch (NumberFormatException e) {
+                    log.debug("Invalid port in: '{}' (error ignored)", subaction);
+                    return null;
+                }
+            }
+            else if (n.group(2) != null)
+                port = OFPort.OFPP_ALL.getValue();
+            else if (n.group(3) != null)
+                port = OFPort.OFPP_CONTROLLER.getValue();
+            else if (n.group(4) != null)
+                port = OFPort.OFPP_LOCAL.getValue();
+            else if (n.group(5) != null)
+                port = OFPort.OFPP_IN_PORT.getValue();
+            else if (n.group(6) != null)
+                port = OFPort.OFPP_NORMAL.getValue();
+            else if (n.group(7) != null)
+                port = OFPort.OFPP_FLOOD.getValue();
+            action.setPort(port);
+            log.debug("action {}", action);
+            
+            sa = new SubActionStruct();
+            sa.action = action;
+            sa.len = OFActionOutput.MINIMUM_LENGTH;
+        }
+        else {
+            log.error("Invalid subaction: '{}'", subaction);
+            return null;
+        }
+        
+        return sa;
+    }
+    
+    private static SubActionStruct decode_enqueue(String subaction, Logger log) {
+        SubActionStruct sa = null;
+        Matcher n;
+        
+        n = Pattern.compile("enqueue=(?:((?:0x)?\\d+)\\:((?:0x)?\\d+))").matcher(subaction);
+        if (n.matches()) {
+            short portnum = 0;
+            if (n.group(1) != null) {
+                try {
+                    portnum = get_short(n.group(1));
+                }
+                catch (NumberFormatException e) {
+                    log.debug("Invalid port-num in: '{}' (error ignored)", subaction);
+                    return null;
+                }
+            }
+
+            int queueid = 0;
+            if (n.group(2) != null) {
+                try {
+                    queueid = get_int(n.group(2));
+                }
+                catch (NumberFormatException e) {
+                    log.debug("Invalid queue-id in: '{}' (error ignored)", subaction);
+                    return null;
+               }
+            }
+            
+            OFActionEnqueue action = new OFActionEnqueue();
+            action.setPort(portnum);
+            action.setQueueId(queueid);
+            log.debug("action {}", action);
+            
+            sa = new SubActionStruct();
+            sa.action = action;
+            sa.len = OFActionEnqueue.MINIMUM_LENGTH;
+        }
+        else {
+            log.debug("Invalid action: '{}'", subaction);
+            return null;
+        }
+        
+        return sa;
+    }
+    
+    private static SubActionStruct decode_strip_vlan(String subaction, Logger log) {
+        SubActionStruct sa = null;
+        Matcher n = Pattern.compile("strip-vlan").matcher(subaction);
+        
+        if (n.matches()) {
+            OFActionStripVirtualLan action = new OFActionStripVirtualLan();
+            log.debug("action {}", action);
+            
+            sa = new SubActionStruct();
+            sa.action = action;
+            sa.len = OFActionStripVirtualLan.MINIMUM_LENGTH;
+        }
+        else {
+            log.debug("Invalid action: '{}'", subaction);
+            return null;
+        }
+
+        return sa;
+    }
+    
+    private static SubActionStruct decode_set_vlan_id(String subaction, Logger log) {
+        SubActionStruct sa = null;
+        Matcher n = Pattern.compile("set-vlan-id=((?:0x)?\\d+)").matcher(subaction);
+        
+        if (n.matches()) {            
+            if (n.group(1) != null) {
+                try {
+                    short vlanid = get_short(n.group(1));
+                    OFActionVirtualLanIdentifier action = new OFActionVirtualLanIdentifier();
+                    action.setVirtualLanIdentifier(vlanid);
+                    log.debug("  action {}", action);
+
+                    sa = new SubActionStruct();
+                    sa.action = action;
+                    sa.len = OFActionVirtualLanIdentifier.MINIMUM_LENGTH;
+                }
+                catch (NumberFormatException e) {
+                    log.debug("Invalid VLAN in: {} (error ignored)", subaction);
+                    return null;
+                }
+            }          
+        }
+        else {
+            log.debug("Invalid action: '{}'", subaction);
+            return null;
+        }
+
+        return sa;
+    }
+    
+    private static SubActionStruct decode_set_vlan_priority(String subaction, Logger log) {
+        SubActionStruct sa = null;
+        Matcher n = Pattern.compile("set-vlan-priority=((?:0x)?\\d+)").matcher(subaction); 
+        
+        if (n.matches()) {            
+            if (n.group(1) != null) {
+                try {
+                    byte prior = get_byte(n.group(1));
+                    OFActionVirtualLanPriorityCodePoint action = new OFActionVirtualLanPriorityCodePoint();
+                    action.setVirtualLanPriorityCodePoint(prior);
+                    log.debug("  action {}", action);
+                    
+                    sa = new SubActionStruct();
+                    sa.action = action;
+                    sa.len = OFActionVirtualLanPriorityCodePoint.MINIMUM_LENGTH;
+                }
+                catch (NumberFormatException e) {
+                    log.debug("Invalid VLAN priority in: {} (error ignored)", subaction);
+                    return null;
+                }
+            }
+        }
+        else {
+            log.debug("Invalid action: '{}'", subaction);
+            return null;
+        }
+
+        return sa;
+    }
+    
+    private static SubActionStruct decode_set_src_mac(String subaction, Logger log) {
+        SubActionStruct sa = null;
+        Matcher n = Pattern.compile("set-src-mac=(?:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+))").matcher(subaction); 
+
+        if (n.matches()) {
+            byte[] macaddr = get_mac_addr(n, subaction, log);
+            if (macaddr != null) {
+                OFActionDataLayerSource action = new OFActionDataLayerSource();
+                action.setDataLayerAddress(macaddr);
+                log.debug("action {}", action);
+
+                sa = new SubActionStruct();
+                sa.action = action;
+                sa.len = OFActionDataLayerSource.MINIMUM_LENGTH;
+            }            
+        }
+        else {
+            log.debug("Invalid action: '{}'", subaction);
+            return null;
+        }
+
+        return sa;
+    }
+
+    private static SubActionStruct decode_set_dst_mac(String subaction, Logger log) {
+        SubActionStruct sa = null;
+        Matcher n = Pattern.compile("set-dst-mac=(?:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+))").matcher(subaction);
+        
+        if (n.matches()) {
+            byte[] macaddr = get_mac_addr(n, subaction, log);            
+            if (macaddr != null) {
+                OFActionDataLayerDestination action = new OFActionDataLayerDestination();
+                action.setDataLayerAddress(macaddr);
+                log.debug("  action {}", action);
+                
+                sa = new SubActionStruct();
+                sa.action = action;
+                sa.len = OFActionDataLayerDestination.MINIMUM_LENGTH;
+            }
+        }
+        else {
+            log.debug("Invalid action: '{}'", subaction);
+            return null;
+        }
+
+        return sa;
+    }
+    
+    private static SubActionStruct decode_set_tos_bits(String subaction, Logger log) {
+        SubActionStruct sa = null;
+        Matcher n = Pattern.compile("set-tos-bits=((?:0x)?\\d+)").matcher(subaction); 
+
+        if (n.matches()) {
+            if (n.group(1) != null) {
+                try {
+                    byte tosbits = get_byte(n.group(1));
+                    OFActionNetworkTypeOfService action = new OFActionNetworkTypeOfService();
+                    action.setNetworkTypeOfService(tosbits);
+                    log.debug("  action {}", action);
+                    
+                    sa = new SubActionStruct();
+                    sa.action = action;
+                    sa.len = OFActionNetworkTypeOfService.MINIMUM_LENGTH;
+                }
+                catch (NumberFormatException e) {
+                    log.debug("Invalid dst-port in: {} (error ignored)", subaction);
+                    return null;
+                }
+            }
+        }
+        else {
+            log.debug("Invalid action: '{}'", subaction);
+            return null;
+        }
+
+        return sa;
+    }
+    
+    private static SubActionStruct decode_set_src_ip(String subaction, Logger log) {
+        SubActionStruct sa = null;
+        Matcher n = Pattern.compile("set-src-ip=(?:(\\d+)\\.(\\d+)\\.(\\d+)\\.(\\d+))").matcher(subaction);
+
+        if (n.matches()) {
+            int ipaddr = get_ip_addr(n, subaction, log);
+            OFActionNetworkLayerSource action = new OFActionNetworkLayerSource();
+            action.setNetworkAddress(ipaddr);
+            log.debug("  action {}", action);
+
+            sa = new SubActionStruct();
+            sa.action = action;
+            sa.len = OFActionNetworkLayerSource.MINIMUM_LENGTH;
+        }
+        else {
+            log.debug("Invalid action: '{}'", subaction);
+            return null;
+        }
+
+        return sa;
+    }
+
+    private static SubActionStruct decode_set_dst_ip(String subaction, Logger log) {
+        SubActionStruct sa = null;
+        Matcher n = Pattern.compile("set-dst-ip=(?:(\\d+)\\.(\\d+)\\.(\\d+)\\.(\\d+))").matcher(subaction);
+
+        if (n.matches()) {
+            int ipaddr = get_ip_addr(n, subaction, log);
+            OFActionNetworkLayerDestination action = new OFActionNetworkLayerDestination();
+            action.setNetworkAddress(ipaddr);
+            log.debug("action {}", action);
+ 
+            sa = new SubActionStruct();
+            sa.action = action;
+            sa.len = OFActionNetworkLayerDestination.MINIMUM_LENGTH;
+        }
+        else {
+            log.debug("Invalid action: '{}'", subaction);
+            return null;
+        }
+
+        return sa;
+    }
+
+    private static SubActionStruct decode_set_src_port(String subaction, Logger log) {
+        SubActionStruct sa = null;
+        Matcher n = Pattern.compile("set-src-port=((?:0x)?\\d+)").matcher(subaction); 
+
+        if (n.matches()) {
+            if (n.group(1) != null) {
+                try {
+                    short portnum = get_short(n.group(1));
+                    OFActionTransportLayerSource action = new OFActionTransportLayerSource();
+                    action.setTransportPort(portnum);
+                    log.debug("action {}", action);
+                    
+                    sa = new SubActionStruct();
+                    sa.action = action;
+                    sa.len = OFActionTransportLayerSource.MINIMUM_LENGTH;;
+                }
+                catch (NumberFormatException e) {
+                    log.debug("Invalid src-port in: {} (error ignored)", subaction);
+                    return null;
+                }
+            }
+        }
+        else {
+            log.debug("Invalid action: '{}'", subaction);
+            return null;
+        }
+
+        return sa;
+    }
+
+    private static SubActionStruct decode_set_dst_port(String subaction, Logger log) {
+        SubActionStruct sa = null;
+        Matcher n = Pattern.compile("set-dst-port=((?:0x)?\\d+)").matcher(subaction);
+
+        if (n.matches()) {
+            if (n.group(1) != null) {
+                try {
+                    short portnum = get_short(n.group(1));
+                    OFActionTransportLayerDestination action = new OFActionTransportLayerDestination();
+                    action.setTransportPort(portnum);
+                    log.debug("action {}", action);
+                    
+                    sa = new SubActionStruct();
+                    sa.action = action;
+                    sa.len = OFActionTransportLayerDestination.MINIMUM_LENGTH;;
+                }
+                catch (NumberFormatException e) {
+                    log.debug("Invalid dst-port in: {} (error ignored)", subaction);
+                    return null;
+                }
+            }
+        }
+        else {
+            log.debug("Invalid action: '{}'", subaction);
+            return null;
+        }
+
+        return sa;
+    }
+
+    private static byte[] get_mac_addr(Matcher n, String subaction, Logger log) {
+        byte[] macaddr = new byte[6];
+        
+        for (int i=0; i<6; i++) {
+            if (n.group(i+1) != null) {
+                try {
+                    macaddr[i] = get_byte("0x" + n.group(i+1));
+                }
+                catch (NumberFormatException e) {
+                    log.debug("Invalid src-mac in: '{}' (error ignored)", subaction);
+                    return null;
+                }
+            }
+            else { 
+                log.debug("Invalid src-mac in: '{}' (null, error ignored)", subaction);
+                return null;
+            }
+        }
+        
+        return macaddr;
+    }
+    
+    private static int get_ip_addr(Matcher n, String subaction, Logger log) {
+        int ipaddr = 0;
+
+        for (int i=0; i<4; i++) {
+            if (n.group(i+1) != null) {
+                try {
+                    ipaddr = ipaddr<<8;
+                    ipaddr = ipaddr | get_int(n.group(i+1));
+                }
+                catch (NumberFormatException e) {
+                    log.debug("Invalid src-ip in: '{}' (error ignored)", subaction);
+                    return 0;
+                }
+            }
+            else {
+                log.debug("Invalid src-ip in: '{}' (null, error ignored)", subaction);
+                return 0;
+            }
+        }
+        
+        return ipaddr;
+    }
+    
+    // Parse int as decimal, hex (start with 0x or #) or octal (starts with 0)
+    private static int get_int(String str) {
+        return (int)Integer.decode(str);
+    }
+   
+    // Parse short as decimal, hex (start with 0x or #) or octal (starts with 0)
+    private static short get_short(String str) {
+        return (short)(int)Integer.decode(str);
+    }
+   
+    // Parse byte as decimal, hex (start with 0x or #) or octal (starts with 0)
+    private static byte get_byte(String str) {
+        return Integer.decode(str).byteValue();
+    }
+
+}
+
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntryPusher.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntryPusher.java
new file mode 100644
index 0000000..4ed59d7
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntryPusher.java
@@ -0,0 +1,679 @@
+package net.floodlightcontroller.staticflowentry;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
+import net.floodlightcontroller.core.FloodlightContext;
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.core.IFloodlightProviderService.Role;
+import net.floodlightcontroller.core.IHAListener;
+import net.floodlightcontroller.core.IOFMessageListener;
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.IOFSwitchListener;
+import net.floodlightcontroller.core.annotations.LogMessageCategory;
+import net.floodlightcontroller.core.annotations.LogMessageDoc;
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.core.util.AppCookie;
+import net.floodlightcontroller.restserver.IRestApiService;
+import net.floodlightcontroller.staticflowentry.web.StaticFlowEntryWebRoutable;
+import net.floodlightcontroller.staticflowentry.IStaticFlowEntryPusherService;
+import net.floodlightcontroller.storage.IResultSet;
+import net.floodlightcontroller.storage.IStorageSourceService;
+import net.floodlightcontroller.storage.IStorageSourceListener;
+
+import net.floodlightcontroller.storage.StorageException;
+import org.openflow.protocol.OFFlowMod;
+import org.openflow.protocol.OFFlowRemoved;
+import org.openflow.protocol.OFMatch;
+import org.openflow.protocol.OFMessage;
+import org.openflow.protocol.OFType;
+import org.openflow.protocol.factory.BasicFactory;
+import org.openflow.util.HexString;
+import org.openflow.util.U16;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@LogMessageCategory("Static Flow Pusher")
+/**
+ * This module is responsible for maintaining a set of static flows on
+ * switches. This is just a big 'ol dumb list of flows and something external
+ * is responsible for ensuring they make sense for the network.
+ */
+public class StaticFlowEntryPusher 
+    implements IOFSwitchListener, IFloodlightModule, IStaticFlowEntryPusherService,
+        IStorageSourceListener, IOFMessageListener, IHAListener {
+    protected static Logger log = LoggerFactory.getLogger(StaticFlowEntryPusher.class);
+    public static final String StaticFlowName = "staticflowentry";
+    
+    public static final int STATIC_FLOW_APP_ID = 10;
+
+    public static final String TABLE_NAME = "controller_staticflowtableentry";
+    public static final String COLUMN_NAME = "name";
+    public static final String COLUMN_SWITCH = "switch_id";
+    public static final String COLUMN_ACTIVE = "active";
+    public static final String COLUMN_IDLE_TIMEOUT = "idle_timeout";
+    public static final String COLUMN_HARD_TIMEOUT = "hard_timeout";
+    public static final String COLUMN_PRIORITY = "priority";
+    public static final String COLUMN_COOKIE = "cookie";
+    public static final String COLUMN_WILDCARD = "wildcards";
+    public static final String COLUMN_IN_PORT = "in_port";
+    public static final String COLUMN_DL_SRC = "dl_src";
+    public static final String COLUMN_DL_DST = "dl_dst";
+    public static final String COLUMN_DL_VLAN = "dl_vlan";
+    public static final String COLUMN_DL_VLAN_PCP = "dl_vlan_pcp";
+    public static final String COLUMN_DL_TYPE = "dl_type";
+    public static final String COLUMN_NW_TOS = "nw_tos";
+    public static final String COLUMN_NW_PROTO = "nw_proto";
+    public static final String COLUMN_NW_SRC = "nw_src"; // includes CIDR-style
+                                                         // netmask, e.g.
+                                                         // "128.8.128.0/24"
+    public static final String COLUMN_NW_DST = "nw_dst";
+    public static final String COLUMN_TP_DST = "tp_dst";
+    public static final String COLUMN_TP_SRC = "tp_src";
+    public static final String COLUMN_ACTIONS = "actions";
+    public static String ColumnNames[] = { COLUMN_NAME, COLUMN_SWITCH,
+            COLUMN_ACTIVE, COLUMN_IDLE_TIMEOUT, COLUMN_HARD_TIMEOUT,
+            COLUMN_PRIORITY, COLUMN_COOKIE, COLUMN_WILDCARD, COLUMN_IN_PORT,
+            COLUMN_DL_SRC, COLUMN_DL_DST, COLUMN_DL_VLAN, COLUMN_DL_VLAN_PCP,
+            COLUMN_DL_TYPE, COLUMN_NW_TOS, COLUMN_NW_PROTO, COLUMN_NW_SRC,
+            COLUMN_NW_DST, COLUMN_TP_DST, COLUMN_TP_SRC, COLUMN_ACTIONS };
+ 
+
+    protected IFloodlightProviderService floodlightProvider;
+    protected IStorageSourceService storageSource;
+    protected IRestApiService restApi;
+
+    // Map<DPID, Map<Name, FlowMod>> ; FlowMod can be null to indicate non-active
+    protected Map<String, Map<String, OFFlowMod>> entriesFromStorage;
+    // Entry Name -> DPID of Switch it's on
+    protected Map<String, String> entry2dpid;
+
+    private BasicFactory ofMessageFactory;
+
+    // Class to sort FlowMod's by priority, from lowest to highest
+    class FlowModSorter implements Comparator<String> {
+        private String dpid;
+        public FlowModSorter(String dpid) {
+            this.dpid = dpid;
+        }
+        @Override
+        public int compare(String o1, String o2) {
+            OFFlowMod f1 = entriesFromStorage.get(dpid).get(o1);
+            OFFlowMod f2 = entriesFromStorage.get(dpid).get(o2);
+            if (f1 == null || f2 == null) // sort active=false flows by key
+                return o1.compareTo(o2);
+            return U16.f(f1.getPriority()) - U16.f(f2.getPriority());
+        }
+    };
+
+    /**
+     * used for debugging and unittests
+     * @return the number of static flow entries as cached from storage
+     */
+    public int countEntries() {
+        int size = 0;
+        if (entriesFromStorage == null)
+            return 0;
+        for (String ofswitch : entriesFromStorage.keySet())
+            size += entriesFromStorage.get(ofswitch).size();
+        return size;
+    }
+
+    public IFloodlightProviderService getFloodlightProvider() {
+        return floodlightProvider;
+    }
+
+    public void setFloodlightProvider(IFloodlightProviderService floodlightProvider) {
+        this.floodlightProvider = floodlightProvider;
+    }
+
+    public void setStorageSource(IStorageSourceService storageSource) {
+        this.storageSource = storageSource;
+    }
+
+    /**
+     * Reads from our entriesFromStorage for the specified switch and
+     * sends the FlowMods down to the controller in <b>sorted</b> order.
+     *
+     * Sorted is important to maintain correctness of the switch:
+     * if a packet would match both a lower and a higher priority
+     * rule, then we want it to match the higher priority or nothing,
+     * but never just the lower priority one.  Inserting from high to
+     * low priority fixes this.
+     *
+     * TODO consider adding a "block all" flow mod and then removing it
+     * while starting up.
+     *
+     * @param sw The switch to send entries to
+     */
+    protected void sendEntriesToSwitch(IOFSwitch sw) {
+        String dpid = sw.getStringId();
+
+        if ((entriesFromStorage != null) && (entriesFromStorage.containsKey(dpid))) {
+            Map<String, OFFlowMod> entries = entriesFromStorage.get(dpid);
+            List<String> sortedList = new ArrayList<String>(entries.keySet());
+            // weird that Collections.sort() returns void
+            Collections.sort( sortedList, new FlowModSorter(dpid));
+            for (String entryName : sortedList) {
+                OFFlowMod flowMod = entries.get(entryName);
+                if (flowMod != null) {
+                    if (log.isDebugEnabled()) {
+                        log.debug("Pushing static entry {} for {}", dpid, entryName);
+                    }
+                    writeFlowModToSwitch(sw, flowMod);
+                }
+            }
+        }
+    }
+    
+    /**
+     * Used only for bundle-local indexing
+     * 
+     * @param map
+     * @return
+     */
+
+    protected Map<String, String> computeEntry2DpidMap(
+                Map<String, Map<String, OFFlowMod>> map) {
+        Map<String, String> ret = new HashMap<String, String>();
+        for(String dpid : map.keySet()) {
+            for( String entry: map.get(dpid).keySet())
+                ret.put(entry, dpid);
+        }
+        return ret;
+    }
+    
+    /**
+     * Read entries from storageSource, and store them in a hash
+     * 
+     * @return
+     */
+    @LogMessageDoc(level="ERROR",
+            message="failed to access storage: {reason}",
+            explanation="Could not retrieve static flows from the system " +
+            		"database",
+            recommendation=LogMessageDoc.CHECK_CONTROLLER)
+    private Map<String, Map<String, OFFlowMod>> readEntriesFromStorage() {
+        Map<String, Map<String, OFFlowMod>> entries = new ConcurrentHashMap<String, Map<String, OFFlowMod>>();
+        try {
+            Map<String, Object> row;
+            // null1=no predicate, null2=no ordering
+            IResultSet resultSet = storageSource.executeQuery(TABLE_NAME,
+                    ColumnNames, null, null);
+            for (Iterator<IResultSet> it = resultSet.iterator(); it.hasNext();) {
+                row = it.next().getRow();
+                parseRow(row, entries);
+            }
+        } catch (StorageException e) {
+            log.error("failed to access storage: {}", e.getMessage());
+            // if the table doesn't exist, then wait to populate later via
+            // setStorageSource()
+        }
+        return entries;
+    }
+
+    /**
+     * Take a single row, turn it into a flowMod, and add it to the
+     * entries{$dpid}.{$entryName}=FlowMod 
+     * 
+     * IF an entry is in active, mark it with FlowMod = null
+     * 
+     * @param row
+     * @param entries
+     */
+
+    void parseRow(Map<String, Object> row,
+            Map<String, Map<String, OFFlowMod>> entries) {
+        String switchName = null;
+        String entryName = null;
+
+        StringBuffer matchString = new StringBuffer();
+        if (ofMessageFactory == null) // lazy init
+            ofMessageFactory = new BasicFactory();
+
+        OFFlowMod flowMod = (OFFlowMod) ofMessageFactory
+                .getMessage(OFType.FLOW_MOD);
+
+        if (!row.containsKey(COLUMN_SWITCH) || !row.containsKey(COLUMN_NAME)) {
+            log.debug(
+                    "skipping entry with missing required 'switch' or 'name' entry: {}",
+                    row);
+            return;
+        }
+        // most error checking done with ClassCastException
+        try {
+            // first, snag the required entries, for debugging info
+            switchName = (String) row.get(COLUMN_SWITCH);
+            entryName = (String) row.get(COLUMN_NAME);
+            if (!entries.containsKey(switchName))
+                entries.put(switchName, new HashMap<String, OFFlowMod>());
+            StaticFlowEntries.initDefaultFlowMod(flowMod, entryName);
+            
+            for (String key : row.keySet()) {
+                if (row.get(key) == null)
+                    continue;
+                if ( key.equals(COLUMN_SWITCH) || key.equals(COLUMN_NAME)
+                        || key.equals("id"))
+                    continue; // already handled
+                // explicitly ignore timeouts and wildcards
+                if ( key.equals(COLUMN_HARD_TIMEOUT) || key.equals(COLUMN_IDLE_TIMEOUT) ||
+                        key.equals(COLUMN_WILDCARD))
+                    continue;
+                if ( key.equals(COLUMN_ACTIVE)) {
+                    if  (! Boolean.valueOf((String) row.get(COLUMN_ACTIVE))) {
+                        log.debug("skipping inactive entry {} for switch {}",
+                                entryName, switchName);
+                        entries.get(switchName).put(entryName, null);  // mark this an inactive
+                        return;
+                    }
+                } else if ( key.equals(COLUMN_ACTIONS)){
+                    StaticFlowEntries.parseActionString(flowMod, (String) row.get(COLUMN_ACTIONS), log);
+                } else if ( key.equals(COLUMN_COOKIE)) {
+                    flowMod.setCookie(
+                            StaticFlowEntries.computeEntryCookie(flowMod, 
+                                    Integer.valueOf((String) row.get(COLUMN_COOKIE)), 
+                                    entryName)
+                        );
+                } else if ( key.equals(COLUMN_PRIORITY)) {
+                    flowMod.setPriority(U16.t(Integer.valueOf((String) row.get(COLUMN_PRIORITY))));
+                } else { // the rest of the keys are for OFMatch().fromString()
+                    if (matchString.length() > 0)
+                        matchString.append(",");
+                    matchString.append(key + "=" + row.get(key).toString());
+                }
+            }
+        } catch (ClassCastException e) {
+            if (entryName != null && switchName != null)
+                log.debug(
+                        "skipping entry {} on switch {} with bad data : "
+                                + e.getMessage(), entryName, switchName);
+            else
+                log.debug("skipping entry with bad data: {} :: {} ",
+                        e.getMessage(), e.getStackTrace());
+        }
+
+        OFMatch ofMatch = new OFMatch();
+        String match = matchString.toString();
+        try {
+            ofMatch.fromString(match);
+        } catch (IllegalArgumentException e) {
+            log.debug(
+                    "ignoring flow entry {} on switch {} with illegal OFMatch() key: "
+                            + match, entryName, switchName);
+            return;
+        }
+        flowMod.setMatch(ofMatch);
+
+        entries.get(switchName).put(entryName, flowMod);
+    }
+    
+    @Override
+    public void addedSwitch(IOFSwitch sw) {
+        log.debug("addedSwitch {}; processing its static entries", sw);
+        sendEntriesToSwitch(sw);
+    }
+
+    @Override
+    public void removedSwitch(IOFSwitch sw) {
+        log.debug("removedSwitch {}", sw);
+        // do NOT delete from our internal state; we're tracking the rules,
+        // not the switches
+    }
+    
+    @Override
+    public void switchPortChanged(Long switchId) {
+        // no-op
+    }
+
+    /**
+     * This handles both rowInsert() and rowUpdate()
+     */
+    
+    @Override
+    public void rowsModified(String tableName, Set<Object> rowKeys) {
+        log.debug("Modifying Table {}", tableName);
+
+        HashMap<String, Map<String, OFFlowMod>> entriesToAdd = 
+            new HashMap<String, Map<String, OFFlowMod>>();
+        // build up list of what was added 
+        for(Object key: rowKeys) {
+            IResultSet resultSet = storageSource.getRow(tableName, key);
+            for (Iterator<IResultSet> it = resultSet.iterator(); it.hasNext();) {
+                Map<String, Object> row = it.next().getRow();
+                parseRow(row, entriesToAdd);
+            }            
+        }
+        // batch updates by switch and blast them out
+        for (String dpid : entriesToAdd.keySet()) {
+            if (!entriesFromStorage.containsKey(dpid))
+                entriesFromStorage.put(dpid, new HashMap<String, OFFlowMod>());
+            List<OFMessage> outQueue = new ArrayList<OFMessage>();
+            for(String entry : entriesToAdd.get(dpid).keySet()) {
+                OFFlowMod newFlowMod = entriesToAdd.get(dpid).get(entry);
+                OFFlowMod oldFlowMod = entriesFromStorage.get(dpid).get(entry);
+                if (oldFlowMod != null) {  // remove any pre-existing rule
+                    oldFlowMod.setCommand(OFFlowMod.OFPFC_DELETE_STRICT);
+                    outQueue.add(oldFlowMod);
+                }
+                if (newFlowMod != null) {
+                    entriesFromStorage.get(dpid).put(entry, newFlowMod);
+                    outQueue.add(newFlowMod);
+                    entry2dpid.put(entry, dpid);
+                } else {
+                    entriesFromStorage.get(dpid).remove(entry);
+                    entry2dpid.remove(entry);
+                }
+            }
+            
+            writeOFMessagesToSwitch(HexString.toLong(dpid), outQueue);
+        }
+    }
+
+    @Override
+    public void rowsDeleted(String tableName, Set<Object> rowKeys) {
+        if (log.isDebugEnabled()) {
+            log.debug("deleting from Table {}", tableName);
+        }
+        
+        for(Object obj : rowKeys) {
+            if (!(obj instanceof String)) {
+                log.debug("tried to delete non-string key {}; ignoring", obj);
+                continue;
+            }
+            deleteStaticFlowEntry((String) obj);
+        }
+    }
+    
+    @LogMessageDoc(level="ERROR",
+            message="inconsistent internal state: no switch has rule {rule}",
+            explanation="Inconsistent internat state discovered while " +
+            		"deleting a static flow rule",
+            recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
+    private boolean deleteStaticFlowEntry(String entryName) {
+        String dpid = entry2dpid.get(entryName);
+        if (log.isDebugEnabled()) {
+            log.debug("Deleting flow {} for switch {}", entryName, dpid);
+        }
+        if (dpid == null) {
+            log.error("inconsistent internal state: no switch has rule {}",
+                    entryName);
+            return false;
+        }
+        
+        // send flow_mod delete
+        OFFlowMod flowMod = entriesFromStorage.get(dpid).get(entryName);
+        flowMod.setCommand(OFFlowMod.OFPFC_DELETE_STRICT);
+
+        if (entriesFromStorage.containsKey(dpid) && 
+                entriesFromStorage.get(dpid).containsKey(entryName)) {
+            entriesFromStorage.get(dpid).remove(entryName);
+        } else { 
+            log.debug("Tried to delete non-existent entry {} for switch {}", 
+                    entryName, dpid);
+            return false;
+        }
+        
+        writeFlowModToSwitch(HexString.toLong(dpid), flowMod);
+        return true;
+    }
+    
+    /**
+     * Writes a list of OFMessages to a switch
+     * @param dpid The datapath ID of the switch to write to
+     * @param messages The list of OFMessages to write.
+     */
+    @LogMessageDoc(level="ERROR",
+            message="Tried to write to switch {switch} but got {error}",
+            explanation="An I/O error occured while trying to write a " +
+            		"static flow to a switch",
+            recommendation=LogMessageDoc.CHECK_SWITCH)
+    private void writeOFMessagesToSwitch(long dpid, List<OFMessage> messages) {
+        IOFSwitch ofswitch = floodlightProvider.getSwitches().get(dpid);
+        if (ofswitch != null) {  // is the switch connected
+            try {
+                if (log.isDebugEnabled()) {
+                    log.debug("Sending {} new entries to {}", messages.size(), dpid);
+                }
+                ofswitch.write(messages, null);
+                ofswitch.flush();
+            } catch (IOException e) {
+                log.error("Tried to write to switch {} but got {}", dpid, e.getMessage());
+            }
+        }
+    }
+    
+    /**
+     * Writes an OFFlowMod to a switch. It checks to make sure the switch
+     * exists before it sends
+     * @param dpid The data  to write the flow mod to
+     * @param flowMod The OFFlowMod to write
+     */
+    private void writeFlowModToSwitch(long dpid, OFFlowMod flowMod) {
+        Map<Long,IOFSwitch> switches = floodlightProvider.getSwitches();
+        IOFSwitch ofSwitch = switches.get(dpid);
+        if (ofSwitch == null) {
+            if (log.isDebugEnabled()) {
+                log.debug("Not deleting key {} :: switch {} not connected", 
+                          dpid);
+            }
+            return;
+        }
+        writeFlowModToSwitch(ofSwitch, flowMod);
+    }
+    
+    /**
+     * Writes an OFFlowMod to a switch
+     * @param sw The IOFSwitch to write to
+     * @param flowMod The OFFlowMod to write
+     */
+    @LogMessageDoc(level="ERROR",
+            message="Tried to write OFFlowMod to {switch} but got {error}",
+            explanation="An I/O error occured while trying to write a " +
+                    "static flow to a switch",
+            recommendation=LogMessageDoc.CHECK_SWITCH)
+    private void writeFlowModToSwitch(IOFSwitch sw, OFFlowMod flowMod) {
+        try {
+            sw.write(flowMod, null);
+            sw.flush();
+        } catch (IOException e) {
+            log.error("Tried to write OFFlowMod to {} but failed: {}", 
+                    HexString.toHexString(sw.getId()), e.getMessage());
+        }
+    }
+
+    @Override
+    public String getName() {
+        return StaticFlowName;
+    }
+    
+    @Override
+    @LogMessageDoc(level="ERROR",
+        message="Got a FlowRemove message for a infinite " +
+                "timeout flow: {flow} from switch {switch}",
+        explanation="Flows with infinite timeouts should not expire. " +
+        		"The switch has expired the flow anyway.",
+        recommendation=LogMessageDoc.REPORT_SWITCH_BUG)
+    public Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx) {
+        switch (msg.getType()) {
+        case FLOW_REMOVED:
+            break;
+        default:
+            return Command.CONTINUE;
+        }
+        OFFlowRemoved flowRemoved = (OFFlowRemoved) msg;
+        long cookie = flowRemoved.getCookie();
+        /**
+         * This is just to sanity check our assumption that static flows 
+         * never expire.
+         */
+        if( AppCookie.extractApp(cookie) == STATIC_FLOW_APP_ID) {
+            if (flowRemoved.getReason() != 
+                    OFFlowRemoved.OFFlowRemovedReason.OFPRR_DELETE)
+                log.error("Got a FlowRemove message for a infinite " +
+                		  "timeout flow: {} from switch {}", msg, sw);
+            return Command.STOP;    // only for us
+        } else
+            return Command.CONTINUE;
+    }
+
+    @Override
+    public boolean isCallbackOrderingPrereq(OFType type, String name) {
+        return false;  // no dependency for non-packet in
+    }
+
+    @Override
+    public boolean isCallbackOrderingPostreq(OFType type, String name) {
+        return false;  // no dependency for non-packet in
+    }
+
+    // IFloodlightModule
+    
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IStaticFlowEntryPusherService.class);
+        return l;
+    }
+
+    @Override
+    public Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls() {
+        Map<Class<? extends IFloodlightService>,
+            IFloodlightService> m = 
+                new HashMap<Class<? extends IFloodlightService>,
+                    IFloodlightService>();
+        m.put(IStaticFlowEntryPusherService.class, this);
+        return m;
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IFloodlightProviderService.class);
+        l.add(IStorageSourceService.class);
+        l.add(IRestApiService.class);
+        return l;
+    }
+
+    @Override
+    public void init(FloodlightModuleContext context)
+            throws FloodlightModuleException {
+        floodlightProvider =
+            context.getServiceImpl(IFloodlightProviderService.class);
+        storageSource =
+            context.getServiceImpl(IStorageSourceService.class);
+        restApi =
+            context.getServiceImpl(IRestApiService.class);
+    }
+
+    @Override
+    public void startUp(FloodlightModuleContext context) {        
+        floodlightProvider.addOFMessageListener(OFType.FLOW_REMOVED, this);
+        floodlightProvider.addOFSwitchListener(this);
+        floodlightProvider.addHAListener(this);
+        
+        // assumes no switches connected at startup()
+        storageSource.createTable(TABLE_NAME, null);
+        storageSource.setTablePrimaryKeyName(TABLE_NAME, COLUMN_NAME);
+        storageSource.addListener(TABLE_NAME, this);
+        entriesFromStorage = readEntriesFromStorage(); 
+        entry2dpid = computeEntry2DpidMap(entriesFromStorage);
+        restApi.addRestletRoutable(new StaticFlowEntryWebRoutable());
+    }
+
+    // IStaticFlowEntryPusherService methods
+    
+    @Override
+    public void addFlow(String name, OFFlowMod fm, String swDpid) {
+        Map<String, Object> fmMap = StaticFlowEntries.flowModToStorageEntry(fm, swDpid, name);
+        entry2dpid.put(name, swDpid);
+        Map<String, OFFlowMod> switchEntries = entriesFromStorage.get(swDpid);
+        if (switchEntries == null) {
+            switchEntries = new HashMap<String, OFFlowMod>();
+            entriesFromStorage.put(swDpid, switchEntries);
+        }
+        switchEntries.put(name, fm);
+        storageSource.insertRowAsync(TABLE_NAME, fmMap);
+    }
+
+    @Override
+    public void deleteFlow(String name) {
+        storageSource.deleteRowAsync(TABLE_NAME, name);
+        // TODO - What if there is a delay in storage?
+    }
+    
+    @Override
+    public void deleteAllFlows() {
+        for (String entry : entry2dpid.keySet()) {
+            deleteFlow(entry);
+        }
+    }
+    
+    @Override
+    public void deleteFlowsForSwitch(long dpid) {
+        String sDpid = HexString.toHexString(dpid);
+        
+        for (Entry<String, String> e : entry2dpid.entrySet()) {
+            if (e.getValue().equals(sDpid))
+                deleteFlow(e.getKey());
+        }
+    }
+    
+    @Override
+    public Map<String, Map<String, OFFlowMod>> getFlows() {
+        return entriesFromStorage;
+    }
+    
+    @Override
+    public Map<String, OFFlowMod> getFlows(String dpid) {
+        return entriesFromStorage.get(dpid);
+    }
+
+    
+    // IHAListener
+    
+    @Override
+    public void roleChanged(Role oldRole, Role newRole) {
+        switch(newRole) {
+            case MASTER:
+                if (oldRole == Role.SLAVE) {
+                    log.debug("Re-reading static flows from storage due " +
+                            "to HA change from SLAVE->MASTER");
+                    entriesFromStorage = readEntriesFromStorage(); 
+                    entry2dpid = computeEntry2DpidMap(entriesFromStorage);
+                }
+                break;
+            case SLAVE:
+                log.debug("Clearing in-memory flows due to " +
+                        "HA change to SLAVE");
+                entry2dpid.clear();
+                entriesFromStorage.clear();
+                break;
+            default:
+            	break;
+        }
+    }
+    
+    @Override
+    public void controllerNodeIPsChanged(
+            Map<String, String> curControllerNodeIPs,
+            Map<String, String> addedControllerNodeIPs,
+            Map<String, String> removedControllerNodeIPs) {
+        // ignore
+    }
+     
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/staticflowentry/web/ClearStaticFlowEntriesResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/staticflowentry/web/ClearStaticFlowEntriesResource.java
new file mode 100644
index 0000000..c1d826a
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/staticflowentry/web/ClearStaticFlowEntriesResource.java
@@ -0,0 +1,38 @@
+package net.floodlightcontroller.staticflowentry.web;
+
+import net.floodlightcontroller.core.web.ControllerSwitchesResource;
+import net.floodlightcontroller.staticflowentry.IStaticFlowEntryPusherService;
+
+import org.openflow.util.HexString;
+import org.restlet.data.Status;
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ClearStaticFlowEntriesResource extends ServerResource {
+    protected static Logger log = LoggerFactory.getLogger(ClearStaticFlowEntriesResource.class);
+    
+    @Get
+    public void ClearStaticFlowEntries() {
+        IStaticFlowEntryPusherService sfpService =
+                (IStaticFlowEntryPusherService)getContext().getAttributes().
+                    get(IStaticFlowEntryPusherService.class.getCanonicalName());
+        
+        String param = (String) getRequestAttributes().get("switch");
+        if (log.isDebugEnabled())
+            log.debug("Clearing all static flow entires for switch: " + param);
+        
+        if (param.toLowerCase().equals("all")) {
+            sfpService.deleteAllFlows();
+        } else {
+            try {
+                sfpService.deleteFlowsForSwitch(HexString.toLong(param));
+            } catch (NumberFormatException e){
+                setStatus(Status.CLIENT_ERROR_BAD_REQUEST, 
+                          ControllerSwitchesResource.DPID_ERROR);
+                return;
+            }
+        }
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/staticflowentry/web/ListStaticFlowEntriesResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/staticflowentry/web/ListStaticFlowEntriesResource.java
new file mode 100644
index 0000000..0ad778f
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/staticflowentry/web/ListStaticFlowEntriesResource.java
@@ -0,0 +1,45 @@
+package net.floodlightcontroller.staticflowentry.web;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import net.floodlightcontroller.core.web.ControllerSwitchesResource;
+import net.floodlightcontroller.staticflowentry.IStaticFlowEntryPusherService;
+
+import org.openflow.protocol.OFFlowMod;
+import org.restlet.data.Status;
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ListStaticFlowEntriesResource extends ServerResource {
+    protected static Logger log = LoggerFactory.getLogger(ListStaticFlowEntriesResource.class);
+    
+    @Get
+    public Map<String, Map<String, OFFlowMod>> ListStaticFlowEntries() {
+        IStaticFlowEntryPusherService sfpService =
+                (IStaticFlowEntryPusherService)getContext().getAttributes().
+                    get(IStaticFlowEntryPusherService.class.getCanonicalName());
+        
+        String param = (String) getRequestAttributes().get("switch");
+        if (log.isDebugEnabled())
+            log.debug("Listing all static flow entires for switch: " + param);
+        
+        if (param.toLowerCase().equals("all")) {
+            return sfpService.getFlows();
+        } else {
+            try {
+                Map<String, Map<String, OFFlowMod>> retMap = 
+                        new HashMap<String, Map<String, OFFlowMod>>();
+                retMap.put(param, sfpService.getFlows(param));
+                return retMap;
+                
+            } catch (NumberFormatException e){
+                setStatus(Status.CLIENT_ERROR_BAD_REQUEST, 
+                          ControllerSwitchesResource.DPID_ERROR);
+            }
+        }
+        return null;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryPusherResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryPusherResource.java
new file mode 100644
index 0000000..3b750ae
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryPusherResource.java
@@ -0,0 +1,142 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.staticflowentry.web;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.restlet.resource.Delete;
+import org.restlet.resource.Post;
+import org.restlet.resource.ServerResource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+import net.floodlightcontroller.core.annotations.LogMessageCategory;
+import net.floodlightcontroller.core.annotations.LogMessageDoc;
+import net.floodlightcontroller.staticflowentry.StaticFlowEntries;
+import net.floodlightcontroller.staticflowentry.StaticFlowEntryPusher;
+import net.floodlightcontroller.storage.IStorageSourceService;
+
+/**
+ * Pushes a static flow entry to the storage source
+ * @author alexreimers
+ *
+ */
+@LogMessageCategory("Static Flow Pusher")
+public class StaticFlowEntryPusherResource extends ServerResource {
+    protected static Logger log = LoggerFactory.getLogger(StaticFlowEntryPusherResource.class);
+    
+    /**
+     * Checks to see if the user matches IP information without
+     * checking for the correct ether-type (2048).
+     * @param rows The Map that is a string representation of
+     * the static flow.
+     * @reutrn True if they checked the ether-type, false otherwise
+     */
+    private boolean checkMatchIp(Map<String, Object> rows) {
+        boolean matchEther = false;
+        String val = (String) rows.get(StaticFlowEntryPusher.COLUMN_DL_TYPE);
+        if (val != null) {
+            int type = 0;
+            // check both hex and decimal
+            if (val.startsWith("0x")) {
+                type = Integer.parseInt(val.substring(2), 16);
+            } else {
+                try {
+                    type = Integer.parseInt(val);
+                } catch (NumberFormatException e) { /* fail silently */}
+            }
+            if (type == 2048) matchEther = true;
+        }
+        
+        if ((rows.containsKey(StaticFlowEntryPusher.COLUMN_NW_DST) || 
+                rows.containsKey(StaticFlowEntryPusher.COLUMN_NW_SRC) ||
+                rows.containsKey(StaticFlowEntryPusher.COLUMN_NW_PROTO) ||
+                rows.containsKey(StaticFlowEntryPusher.COLUMN_NW_TOS)) &&
+                (matchEther == false))
+            return false;
+        
+        return true;
+    }
+    
+    /**
+     * Takes a Static Flow Pusher string in JSON format and parses it into
+     * our database schema then pushes it to the database.
+     * @param fmJson The Static Flow Pusher entry in JSON format.
+     * @return A string status message
+     */
+    @Post
+    @LogMessageDoc(level="ERROR",
+        message="Error parsing push flow mod request: {request}",
+        explanation="An invalid request was sent to static flow pusher",
+        recommendation="Fix the format of the static flow mod request")
+    public String store(String fmJson) {
+        IStorageSourceService storageSource =
+                (IStorageSourceService)getContext().getAttributes().
+                    get(IStorageSourceService.class.getCanonicalName());
+        
+        Map<String, Object> rowValues;
+        try {
+            rowValues = StaticFlowEntries.jsonToStorageEntry(fmJson);
+            String status = null;
+            if (!checkMatchIp(rowValues)) {
+                status = "Warning! Pushing a static flow entry that matches IP " +
+                        "fields without matching for IP payload (ether-type 2048) will cause " +
+                        "the switch to wildcard higher level fields.";
+                log.error(status);
+            } else {
+                status = "Entry pushed";
+            }
+            storageSource.insertRowAsync(StaticFlowEntryPusher.TABLE_NAME, rowValues);
+            return ("{\"status\" : \"" + status + "\"}");
+        } catch (IOException e) {
+            log.error("Error parsing push flow mod request: " + fmJson, e);
+            e.printStackTrace();
+            return "{\"status\" : \"Error! Could not parse flod mod, see log for details.\"}";
+        }
+    }
+    
+    @Delete
+    @LogMessageDoc(level="ERROR",
+        message="Error deleting flow mod request: {request}",
+        explanation="An invalid delete request was sent to static flow pusher",
+        recommendation="Fix the format of the static flow mod request")
+    public String del(String fmJson) {
+        IStorageSourceService storageSource =
+                (IStorageSourceService)getContext().getAttributes().
+                    get(IStorageSourceService.class.getCanonicalName());
+        String fmName = null;
+        if (fmJson == null) {
+            return "{\"status\" : \"Error! No data posted.\"}";
+        }
+        try {
+            fmName = StaticFlowEntries.getEntryNameFromJson(fmJson);
+            if (fmName == null) {
+                return "{\"status\" : \"Error deleting entry, no name provided\"}";
+            }
+        } catch (IOException e) {
+            log.error("Error deleting flow mod request: " + fmJson, e);
+            e.printStackTrace();
+            return "{\"status\" : \"Error deleting entry, see log for details\"}";
+        }
+        
+        storageSource.deleteRowAsync(StaticFlowEntryPusher.TABLE_NAME, fmName);
+        return "{\"status\" : \"Entry " + fmName + " deleted\"}";
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryWebRoutable.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryWebRoutable.java
new file mode 100644
index 0000000..b5a6fe1
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryWebRoutable.java
@@ -0,0 +1,29 @@
+package net.floodlightcontroller.staticflowentry.web;
+
+import net.floodlightcontroller.restserver.RestletRoutable;
+
+import org.restlet.Context;
+import org.restlet.Restlet;
+import org.restlet.routing.Router;
+
+public class StaticFlowEntryWebRoutable implements RestletRoutable {
+    /**
+     * Create the Restlet router and bind to the proper resources.
+     */
+    @Override
+    public Restlet getRestlet(Context context) {
+        Router router = new Router(context);
+        router.attach("/json", StaticFlowEntryPusherResource.class);
+        router.attach("/clear/{switch}/json", ClearStaticFlowEntriesResource.class);
+        router.attach("/list/{switch}/json", ListStaticFlowEntriesResource.class);
+        return router;
+    }
+
+    /**
+     * Set the base path for the Topology
+     */
+    @Override
+    public String basePath() {
+        return "/wm/staticflowentrypusher";
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/AbstractStorageSource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/AbstractStorageSource.java
new file mode 100644
index 0000000..aae3962
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/AbstractStorageSource.java
@@ -0,0 +1,534 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CopyOnWriteArraySet;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+import net.floodlightcontroller.core.annotations.LogMessageCategory;
+import net.floodlightcontroller.core.annotations.LogMessageDoc;
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.counter.ICounter;
+import net.floodlightcontroller.counter.CounterStore;
+import net.floodlightcontroller.counter.ICounterStoreService;
+import net.floodlightcontroller.counter.CounterValue.CounterType;
+import net.floodlightcontroller.restserver.IRestApiService;
+import net.floodlightcontroller.storage.web.StorageWebRoutable;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@LogMessageCategory("System Database")
+public abstract class AbstractStorageSource 
+    implements IStorageSourceService, IFloodlightModule {
+    protected static Logger logger = LoggerFactory.getLogger(AbstractStorageSource.class);
+
+    // Shared instance of the executor to use to execute the storage tasks.
+    // We make this a single threaded executor, because if we used a thread pool
+    // then storage operations could be executed out of order which would cause
+    // problems in some cases (e.g. delete and update of a row getting reordered).
+    // If we wanted to make this more multi-threaded we could have multiple
+    // worker threads/executors with affinity of operations on a given table
+    // to a single worker thread. But for now, we'll keep it simple and just have
+    // a single thread for all operations.
+    protected static ExecutorService defaultExecutorService = Executors.newSingleThreadExecutor();
+
+    protected final static String STORAGE_QUERY_COUNTER_NAME = "StorageQuery";
+    protected final static String STORAGE_UPDATE_COUNTER_NAME = "StorageUpdate";
+    protected final static String STORAGE_DELETE_COUNTER_NAME = "StorageDelete";
+    
+    protected Set<String> allTableNames = new CopyOnWriteArraySet<String>();
+    protected ICounterStoreService counterStore;
+    protected ExecutorService executorService = defaultExecutorService;
+    protected IStorageExceptionHandler exceptionHandler;
+
+    private Map<String, Set<IStorageSourceListener>> listeners =
+        new ConcurrentHashMap<String, Set<IStorageSourceListener>>();
+
+    // Our dependencies
+    protected IRestApiService restApi = null;
+    
+    protected static final String DB_ERROR_EXPLANATION =
+            "An unknown error occurred while executing asynchronous " +
+            "database operation";
+    
+    @LogMessageDoc(level="ERROR",
+            message="Failure in asynchronous call to executeQuery",
+            explanation=DB_ERROR_EXPLANATION,
+            recommendation=LogMessageDoc.GENERIC_ACTION)
+    abstract class StorageCallable<V> implements Callable<V> {
+        public V call() {
+            try {
+                return doStorageOperation();
+            }
+            catch (StorageException e) {
+                logger.error("Failure in asynchronous call to executeQuery", e);
+                if (exceptionHandler != null)
+                    exceptionHandler.handleException(e);
+                throw e;
+            }
+        }
+        abstract protected V doStorageOperation();
+    }
+    
+    @LogMessageDoc(level="ERROR",
+            message="Failure in asynchronous call to updateRows",
+            explanation=DB_ERROR_EXPLANATION,
+            recommendation=LogMessageDoc.GENERIC_ACTION)
+    abstract class StorageRunnable implements Runnable {
+        public void run() {
+            try {
+                doStorageOperation();
+            }
+            catch (StorageException e) {
+                logger.error("Failure in asynchronous call to updateRows", e);
+                if (exceptionHandler != null)
+                    exceptionHandler.handleException(e);
+                throw e;
+            }
+        }
+        abstract void doStorageOperation();
+    }
+    
+    public AbstractStorageSource() {
+        this.executorService = defaultExecutorService;
+    }
+
+    public void setExecutorService(ExecutorService executorService) {
+        this.executorService = (executorService != null) ?
+                executorService : defaultExecutorService;
+    }
+    
+    @Override
+    public void setExceptionHandler(IStorageExceptionHandler exceptionHandler) {
+        this.exceptionHandler = exceptionHandler;
+    }
+    
+    @Override
+    public abstract void setTablePrimaryKeyName(String tableName, String primaryKeyName);
+
+    @Override
+    public void createTable(String tableName, Set<String> indexedColumns) {
+        allTableNames.add(tableName);
+    }
+
+    @Override
+    public Set<String> getAllTableNames() {
+        return allTableNames;
+    }
+    
+    public void setCounterStore(CounterStore counterStore) {
+        this.counterStore = counterStore;
+    }
+    
+    protected void updateCounters(String baseName, String tableName) {
+        if (counterStore != null) {
+            String counterName;
+            if (tableName != null) {
+                updateCounters(baseName, null);
+                counterName = baseName + CounterStore.TitleDelimitor + tableName;
+            } else {
+                counterName = baseName;
+            }
+            ICounter counter = counterStore.getCounter(counterName);
+            if (counter == null) {
+                counter = counterStore.createCounter(counterName, CounterType.LONG);
+            }
+            counter.increment();
+        }
+    }
+    
+    @Override
+    public abstract IQuery createQuery(String tableName, String[] columnNames,
+            IPredicate predicate, RowOrdering ordering);
+
+    @Override
+    public IResultSet executeQuery(IQuery query) {
+        updateCounters(STORAGE_QUERY_COUNTER_NAME, query.getTableName());
+        return executeQueryImpl(query);
+    }
+    
+    protected abstract IResultSet executeQueryImpl(IQuery query);
+
+    @Override
+    public IResultSet executeQuery(String tableName, String[] columnNames,
+            IPredicate predicate, RowOrdering ordering) {
+        IQuery query = createQuery(tableName, columnNames, predicate, ordering);
+        IResultSet resultSet = executeQuery(query);
+        return resultSet;
+    }
+
+    @Override
+    public Object[] executeQuery(String tableName, String[] columnNames,
+            IPredicate predicate, RowOrdering ordering, IRowMapper rowMapper) {
+        List<Object> objectList = new ArrayList<Object>();
+        IResultSet resultSet = executeQuery(tableName, columnNames, predicate, ordering);
+        while (resultSet.next()) {
+            Object object = rowMapper.mapRow(resultSet);
+            objectList.add(object);
+        }
+        return objectList.toArray();
+    }
+    
+    @Override
+    public Future<IResultSet> executeQueryAsync(final IQuery query) {
+        Future<IResultSet> future = executorService.submit(
+            new StorageCallable<IResultSet>() {
+                public IResultSet doStorageOperation() {
+                    return executeQuery(query);
+                }
+            });
+        return future;
+    }
+
+    @Override
+    public Future<IResultSet> executeQueryAsync(final String tableName,
+            final String[] columnNames,  final IPredicate predicate,
+            final RowOrdering ordering) {
+        Future<IResultSet> future = executorService.submit(
+            new StorageCallable<IResultSet>() {
+                public IResultSet doStorageOperation() {
+                    return executeQuery(tableName, columnNames,
+                            predicate, ordering);
+                }
+            });
+        return future;
+    }
+
+    @Override
+    public Future<Object[]> executeQueryAsync(final String tableName,
+            final String[] columnNames,  final IPredicate predicate,
+            final RowOrdering ordering, final IRowMapper rowMapper) {
+        Future<Object[]> future = executorService.submit(
+            new StorageCallable<Object[]>() {
+                public Object[] doStorageOperation() {
+                    return executeQuery(tableName, columnNames, predicate,
+                            ordering, rowMapper);
+                }
+            });
+        return future;
+    }
+
+    @Override
+    public Future<?> insertRowAsync(final String tableName,
+            final Map<String,Object> values) {
+        Future<?> future = executorService.submit(
+            new StorageRunnable() {
+                public void doStorageOperation() {
+                    insertRow(tableName, values);
+                }
+            }, null);
+        return future;
+    }
+
+    @Override
+    public Future<?> updateRowsAsync(final String tableName, final List<Map<String,Object>> rows) {
+        Future<?> future = executorService.submit(    
+            new StorageRunnable() {
+                public void doStorageOperation() {
+                    updateRows(tableName, rows);
+                }
+            }, null);
+        return future;
+    }
+
+    @Override
+    public Future<?> updateMatchingRowsAsync(final String tableName,
+            final IPredicate predicate, final Map<String,Object> values) {
+        Future<?> future = executorService.submit(    
+            new StorageRunnable() {
+                public void doStorageOperation() {
+                    updateMatchingRows(tableName, predicate, values);
+                }
+            }, null);
+        return future;
+    }
+
+    @Override
+    public Future<?> updateRowAsync(final String tableName,
+            final Object rowKey, final Map<String,Object> values) {
+        Future<?> future = executorService.submit(
+            new StorageRunnable() {
+                public void doStorageOperation() {
+                    updateRow(tableName, rowKey, values);
+                }
+            }, null);
+        return future;
+    }
+
+    @Override
+    public Future<?> updateRowAsync(final String tableName,
+            final Map<String,Object> values) {
+        Future<?> future = executorService.submit(
+            new StorageRunnable() {
+                public void doStorageOperation() {
+                    updateRow(tableName, values);
+                }
+            }, null);
+        return future;
+    }
+
+    @Override
+    public Future<?> deleteRowAsync(final String tableName, final Object rowKey) {
+        Future<?> future = executorService.submit(
+            new StorageRunnable() {
+                public void doStorageOperation() {
+                    deleteRow(tableName, rowKey);
+                }
+            }, null);
+        return future;
+    }
+
+    @Override
+    public Future<?> deleteRowsAsync(final String tableName, final Set<Object> rowKeys) {
+        Future<?> future = executorService.submit(
+                new StorageRunnable() {
+                    public void doStorageOperation() {
+                        deleteRows(tableName, rowKeys);
+                    }
+                }, null);
+        return future;
+    }
+
+    @Override
+    public Future<?> deleteMatchingRowsAsync(final String tableName, final IPredicate predicate) {
+        Future<?> future = executorService.submit(
+                new StorageRunnable() {
+                    public void doStorageOperation() {
+                        deleteMatchingRows(tableName, predicate);
+                    }
+                }, null);
+        return future;
+    }
+
+    @Override
+    public Future<?> getRowAsync(final String tableName, final Object rowKey) {
+        Future<?> future = executorService.submit(
+            new StorageRunnable() {
+                public void doStorageOperation() {
+                    getRow(tableName, rowKey);
+                }
+            }, null);
+        return future;
+    }
+    
+    @Override
+    public Future<?> saveAsync(final IResultSet resultSet) {
+        Future<?> future = executorService.submit(
+            new StorageRunnable() {
+                public void doStorageOperation() {
+                    resultSet.save();
+                }
+            }, null);
+        return future;
+    }
+
+    @Override
+    public void insertRow(String tableName, Map<String, Object> values) {
+        updateCounters(STORAGE_UPDATE_COUNTER_NAME, tableName);
+        insertRowImpl(tableName, values);
+    }
+
+    protected abstract void insertRowImpl(String tableName, Map<String, Object> values);
+
+    
+    @Override
+    public void updateRows(String tableName, List<Map<String,Object>> rows) {
+        updateCounters(STORAGE_UPDATE_COUNTER_NAME, tableName);
+        updateRowsImpl(tableName, rows);
+    }
+
+    protected abstract void updateRowsImpl(String tableName, List<Map<String,Object>> rows);
+
+    @Override
+    public void updateMatchingRows(String tableName, IPredicate predicate,
+            Map<String, Object> values) {
+        updateCounters(STORAGE_UPDATE_COUNTER_NAME, tableName);
+        updateMatchingRowsImpl(tableName, predicate, values);
+    }
+    
+    protected abstract void updateMatchingRowsImpl(String tableName, IPredicate predicate,
+                                    Map<String, Object> values);
+
+    @Override
+    public void updateRow(String tableName, Object rowKey,
+            Map<String, Object> values) {
+        updateCounters(STORAGE_UPDATE_COUNTER_NAME, tableName);
+        updateRowImpl(tableName, rowKey, values);
+    }
+    
+    protected abstract void updateRowImpl(String tableName, Object rowKey,
+                                   Map<String, Object> values);
+
+    @Override
+    public void updateRow(String tableName, Map<String, Object> values) {
+        updateCounters(STORAGE_UPDATE_COUNTER_NAME, tableName);
+        updateRowImpl(tableName, values);
+    }
+    
+    protected abstract void updateRowImpl(String tableName, Map<String, Object> values);
+
+    @Override
+    public void deleteRow(String tableName, Object rowKey) {
+        updateCounters(STORAGE_DELETE_COUNTER_NAME, tableName);
+        deleteRowImpl(tableName, rowKey);
+    }
+    
+    protected abstract void deleteRowImpl(String tableName, Object rowKey);
+
+    @Override
+    public void deleteRows(String tableName, Set<Object> rowKeys) {
+        updateCounters(STORAGE_DELETE_COUNTER_NAME, tableName);
+        deleteRowsImpl(tableName, rowKeys);
+    }
+
+    protected abstract void deleteRowsImpl(String tableName, Set<Object> rowKeys);
+
+    @Override
+    public void deleteMatchingRows(String tableName, IPredicate predicate) {
+        IResultSet resultSet = null;
+        try {
+            resultSet = executeQuery(tableName, null, predicate, null);
+            while (resultSet.next()) {
+                resultSet.deleteRow();
+            }
+            resultSet.save();
+        }
+        finally {
+            if (resultSet != null)
+                resultSet.close();
+        }
+    }
+    
+    @Override
+    public IResultSet getRow(String tableName, Object rowKey) {
+        updateCounters(STORAGE_QUERY_COUNTER_NAME, tableName);
+        return getRowImpl(tableName, rowKey);
+    }
+
+    protected abstract IResultSet getRowImpl(String tableName, Object rowKey);
+
+    @Override
+    public synchronized void addListener(String tableName, IStorageSourceListener listener) {
+        Set<IStorageSourceListener> tableListeners = listeners.get(tableName);
+        if (tableListeners == null) {
+            tableListeners = new CopyOnWriteArraySet<IStorageSourceListener>();
+            listeners.put(tableName, tableListeners);
+        }
+        tableListeners.add(listener);
+    }
+  
+    @Override
+    public synchronized void removeListener(String tableName, IStorageSourceListener listener) {
+        Set<IStorageSourceListener> tableListeners = listeners.get(tableName);
+        if (tableListeners != null) {
+            tableListeners.remove(listener);
+        }
+    }
+
+    @LogMessageDoc(level="ERROR",
+            message="Exception caught handling storage notification",
+            explanation="An unknown error occured while trying to notify" +
+            		" storage listeners",
+            recommendation=LogMessageDoc.GENERIC_ACTION)
+    protected synchronized void notifyListeners(StorageSourceNotification notification) {
+        String tableName = notification.getTableName();
+        Set<Object> keys = notification.getKeys();
+        Set<IStorageSourceListener> tableListeners = listeners.get(tableName);
+        if (tableListeners != null) {
+            for (IStorageSourceListener listener : tableListeners) {
+                try {
+                    switch (notification.getAction()) {
+                        case MODIFY:
+                            listener.rowsModified(tableName, keys);
+                            break;
+                        case DELETE:
+                            listener.rowsDeleted(tableName, keys);
+                            break;
+                    }
+                }
+                catch (Exception e) {
+                    logger.error("Exception caught handling storage notification", e);
+                }
+            }
+        }
+    }
+    
+    @Override
+    public void notifyListeners(List<StorageSourceNotification> notifications) {
+        for (StorageSourceNotification notification : notifications)
+            notifyListeners(notification);
+    }
+    
+    // IFloodlightModule
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IStorageSourceService.class);
+        return l;
+    }
+    
+    @Override
+    public Map<Class<? extends IFloodlightService>,
+               IFloodlightService> getServiceImpls() {
+        Map<Class<? extends IFloodlightService>,
+            IFloodlightService> m = 
+                new HashMap<Class<? extends IFloodlightService>,
+                            IFloodlightService>();
+        m.put(IStorageSourceService.class, this);
+        return m;
+    }
+    
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IRestApiService.class);
+        l.add(ICounterStoreService.class);
+        return l;
+    }
+
+    @Override
+    public void init(FloodlightModuleContext context)
+            throws FloodlightModuleException {
+        restApi =
+           context.getServiceImpl(IRestApiService.class);
+        counterStore =
+            context.getServiceImpl(ICounterStoreService.class);
+    }
+
+    @Override
+    public void startUp(FloodlightModuleContext context) {
+        restApi.addRestletRoutable(new StorageWebRoutable());
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/CompoundPredicate.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/CompoundPredicate.java
new file mode 100644
index 0000000..a23e560
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/CompoundPredicate.java
@@ -0,0 +1,52 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage;
+
+/** Predicate class to handle AND and OR combinations of a number
+ * of child predicates. The result of the logical combination of the
+ * child predicates can also be negated to support a NOT operation.
+ * 
+ * @author rob
+ *
+ */
+public class CompoundPredicate implements IPredicate {
+
+    public enum Operator { AND, OR };
+    
+    private Operator operator;
+    private boolean negated;
+    private IPredicate[] predicateList;
+    
+    public CompoundPredicate(Operator operator, boolean negated, IPredicate... predicateList) {
+        this.operator = operator;
+        this.negated = negated;
+        this.predicateList = predicateList;
+    }
+    
+    public Operator getOperator() {
+        return operator;
+    }
+    
+    public boolean isNegated() {
+        return negated;
+    }
+    
+    public IPredicate[] getPredicateList() {
+        return predicateList;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/IPredicate.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/IPredicate.java
new file mode 100644
index 0000000..291edff
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/IPredicate.java
@@ -0,0 +1,26 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage;
+
+/** Common base interface for the OperatorPredicate and CompoundPredicate classes.
+ * It's necessary so that you can use either type of predicate as child
+ * predicates of a CompoundPredicate.
+ * @author rob
+ */
+public interface IPredicate {
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/IQuery.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/IQuery.java
new file mode 100644
index 0000000..b75b8ae
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/IQuery.java
@@ -0,0 +1,39 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage;
+
+/** Representation of a database query. For SQL queries this maps to
+ * a prepared statement, so it will be more efficient than if you use the
+ * methods in IStorageSource that bypass the IQuery. For many NoSQL
+ * storage sources there won't be any performance improvement from keeping
+ * around the query.
+ * 
+ * The query interface also supports parameterized queries (i.e. which maps
+ * to using ? values in a SQL query). The values of the parameters are set
+ * using the setParameter method. In the storage source API the parameters
+ * are named rather than positional. The format of the parameterized values
+ * in the query predicates is the parameter name bracketed with question marks
+ * (e.g. ?MinimumSalary? ).
+ * 
+ * @author rob
+ *
+ */
+public interface IQuery {
+    String getTableName();
+    void setParameter(String name, Object value);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/IResultSet.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/IResultSet.java
new file mode 100644
index 0000000..fbd2a4a
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/IResultSet.java
@@ -0,0 +1,106 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage;
+
+import java.util.Date;
+import java.util.Map;
+
+/** Interface to iterate over the results from a storage query.
+ * 
+ * @author rob
+ *
+ */
+public interface IResultSet extends Iterable<IResultSet> {
+    
+    /** This should be called when the client is done using the result set.
+     * This will release any underlying resources (e.g. a database connection),
+     * which you don't want to wait for or rely on finalizers to release.
+     */
+    public void close();
+    
+    /** Advance to the next row in the result set. 
+     * @return Returns true if there are more rows to process
+     * (i.e. if there's a valid current row) and false if there are no more
+     * rows in the result set.
+     */
+    public boolean next();
+    
+    /** Save/commit any pending updates to the data in the result set.
+     * This must be called after any calls to the set methods or deleting rows
+     * for the changes to be applied/committed to the storage source. Note that
+     * this doesn't need to be called after each set method or even after each
+     * row. It is typically called at the end after updating all of the
+     * rows in the result set.
+     */
+    public void save();
+    
+    /** Get the current row in the result set. This returns all of the
+     * columns in the current row.
+     * @return Map containing all of the columns in the current row, indexed
+     * by the column name.
+     */
+    public Map<String,Object> getRow();
+    
+    /** Delete the current row in the result set.
+     */
+    public void deleteRow();
+    
+    public boolean containsColumn(String columnName);
+    
+    public String getString(String columnName);
+    public short getShort(String columnName);
+    public int getInt(String columnName);
+    public long getLong(String columnName);
+    public float getFloat(String columnName);
+    public double getDouble(String columnName);
+    public boolean getBoolean(String columnName);
+    public byte getByte(String columnName);
+    public byte[] getByteArray(String columnName);
+    public Date getDate(String columnName);
+    
+    public Short getShortObject(String columnName);
+    public Integer getIntegerObject(String columnName);
+    public Long getLongObject(String columnName);
+    public Float getFloatObject(String columnName);
+    public Double getDoubleObject(String columnName);
+    public Boolean getBooleanObject(String columnName);
+    public Byte getByteObject(String columnName);
+    
+    public boolean isNull(String columnName);
+    
+    public void setString(String columnName, String value);
+    public void setShort(String columnName, short value);
+    public void setInt(String columnName, int value);
+    public void setLong(String columnName, long value);
+    public void setFloat(String columnName, float value);
+    public void setDouble(String columnName, double value);
+    public void setBoolean(String columnName, boolean value);
+    public void setByte(String columnName, byte value);
+    public void setByteArray(String columnName, byte[] byteArray);
+    public void setDate(String columnName, Date date);
+    
+    public void setShortObject(String columnName, Short value);
+    public void setIntegerObject(String columnName, Integer value);
+    public void setLongObject(String columnName, Long value);
+    public void setFloatObject(String columnName, Float value);
+    public void setDoubleObject(String columnName, Double value);
+    public void setBooleanObject(String columnName, Boolean value);
+    public void setByteObject(String columnName, Byte value);
+    
+    public void setNull(String columnName);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/IRowMapper.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/IRowMapper.java
new file mode 100644
index 0000000..6c4502b
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/IRowMapper.java
@@ -0,0 +1,35 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage;
+
+/**
+ * Interface for mapping the current row in a result set to an object.
+ * This is based on the Spring JDBC support.
+ * 
+ * @author rob
+ */
+public interface IRowMapper {
+
+    /** This method must be implemented by the client of the storage API
+     * to map the current row in the result set to a Java object.
+     * 
+     * @param resultSet The result set obtained from a storage source query
+     * @return The object created from the data in the result set
+     */
+    Object mapRow(IResultSet resultSet);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/IStorageExceptionHandler.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/IStorageExceptionHandler.java
new file mode 100644
index 0000000..e3c8e94
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/IStorageExceptionHandler.java
@@ -0,0 +1,22 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage;
+
+public interface IStorageExceptionHandler {
+    public void handleException(Exception exc);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/IStorageSourceListener.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/IStorageSourceListener.java
new file mode 100644
index 0000000..ea3764d
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/IStorageSourceListener.java
@@ -0,0 +1,39 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage;
+
+import java.util.Set;
+
+public interface IStorageSourceListener {
+
+    /**
+     * Called when rows are inserted or updated in the table.
+     * 
+     * @param tableName The table where the rows were inserted
+     * @param rowKeys The keys of the rows that were inserted
+     */
+    public void rowsModified(String tableName, Set<Object> rowKeys);
+    
+    /**
+     * Called when a new row is deleted from the table.
+     * 
+     * @param tableName The table where the rows were deleted
+     * @param rowKeys The keys of the rows that were deleted
+     */
+    public void rowsDeleted(String tableName, Set<Object> rowKeys);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/IStorageSourceService.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/IStorageSourceService.java
new file mode 100644
index 0000000..b8a1be8
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/IStorageSourceService.java
@@ -0,0 +1,331 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Future;
+
+import net.floodlightcontroller.core.module.IFloodlightService;
+
+public interface IStorageSourceService extends IFloodlightService {
+
+    /** Set the column to be used as the primary key for a table. This should
+     * be guaranteed to be unique for all of the rows in the table, although the
+     * storage API does not necessarily enforce this requirement. If no primary
+     * key name is specified for a table then the storage API assumes there is
+     * a column named "id" that is used as the primary key. In this case when
+     * a new row is inserted using the storage API and no id is specified
+     * explictly in the row data, the storage API automatically generates a
+     * unique ID (typically a UUID) for the id column. To work across all
+     * possible implementations of the storage API it is safest, though, to
+     * specify the primary key column explicitly.
+     * FIXME: It's sort of a kludge to have to specify the primary key column
+     * here. Ideally there would be some sort of metadata -- perhaps stored
+     * directly in the table, at least in the NoSQL case -- that the
+     * storage API could query to obtain the primary key info.
+     * @param tableName The name of the table for which we're setting the key
+     * @param primaryKeyName The name of column to be used as the primary key
+     */
+    public void setTablePrimaryKeyName(String tableName, String primaryKeyName);
+
+    /** Create a new table if one does not already exist with the given name.
+     * 
+     * @param tableName The name of the table to create.
+     * @param indexedColumns Which columns should be indexed
+     */
+    void createTable(String tableName, Set<String> indexedColumns);
+    
+    /**
+     * @return the set of all tables that have been created via createTable
+     */
+    Set<String> getAllTableNames();
+    
+    /** Create a query object representing the given query parameters. The query
+     * object can be passed to executeQuery to actually perform the query and obtain
+     * a result set.
+     * 
+     * @param tableName The name of the table to query.
+     * @param columnNames The list of columns to return in the result set.
+     * @param predicate The predicate that specifies which rows to return in the result set.
+     * @param ordering Specification of order that rows are returned from the result set
+     * returned from executing the query. If the ordering is null, then rows are returned
+     * in an implementation-specific order.
+     * @return Query object to be passed to executeQuery.
+     */
+    IQuery createQuery(String tableName, String[] columnNames, IPredicate predicate, RowOrdering ordering);
+    
+    /** Execute a query created with createQuery.
+     * 
+     * @param query The query to execute
+     * @return The result set containing the rows/columns specified in the query.
+     */
+    IResultSet executeQuery(IQuery query);
+
+    /** Execute a query created with the given query parameters.
+     *
+     * @param tableName The name of the table to query.
+     * @param columnNames The list of columns to return in the result set.
+     * @param predicate The predicate that specifies which rows to return in the result set.
+     * @param ordering Specification of order that rows are returned from the result set
+     * returned from executing the query. If the ordering is null, then rows are returned
+     * in an implementation-specific order.
+     * @return The result set containing the rows/columns specified in the query.
+     */
+    IResultSet executeQuery(String tableName, String[] columnNames, IPredicate predicate,
+            RowOrdering ordering);
+    
+    /** Execute a query and call the row mapper to map the results to Java objects.
+     * 
+     * @param tableName The name of the table to query.
+     * @param columnNames The list of columns to return in the result set.
+     * @param predicate The predicate that specifies which rows to return in the result set.
+     * @param ordering Specification of order that rows are returned from the result set
+     * returned from executing the query. If the ordering is null, then rows are returned
+     * in an implementation-specific order.
+     * @param rowMapper The client-supplied object that maps the data in a row in the result
+     * set to a client object.
+     * @return The result set containing the rows/columns specified in the query.
+     */
+    Object[] executeQuery(String tableName, String[] columnNames, IPredicate predicate,
+            RowOrdering ordering, IRowMapper rowMapper);
+    
+    /** Insert a new row in the table with the given column data.
+     * If the primary key is the default value of "id" and is not specified in the
+     * then a unique id will be automatically assigned to the row.
+     * @param tableName The name of the table to which to add the row
+     * @param values The map of column names/values to add to the table.
+     */
+    void insertRow(String tableName, Map<String,Object> values);
+
+    /** Update or insert a list of rows in the table.
+     * The primary key must be included in the map of values for each row.
+     * @param tableName The table to update or insert into
+     * @param values The map of column names/values to update the rows
+     */
+    void updateRows(String tableName, List<Map<String,Object>> rows);
+    
+    /** Update the rows in the given table. Any rows matching the predicate
+     * are updated with the column names/values specified in the values map.
+     * (The values map should not contain the special column "id".)
+     * @param tableName The table to update
+     * @param predicate The predicate to use to select which rows to update
+     * @param values The map of column names/values to update the rows.
+     */
+    void updateMatchingRows(String tableName, IPredicate predicate, Map<String,Object> values);
+    
+    /** Update or insert a row in the table with the given row key (primary
+     * key) and column names/values. (If the values map contains the special
+     * column "id", its value must match rowId.)
+     * @param tableName The table to update or insert into
+     * @param rowKey The ID (primary key) of the row to update
+     * @param values The map of column names/values to update the rows
+     */
+    void updateRow(String tableName, Object rowKey, Map<String,Object> values);
+    
+    /** Update or insert a row in the table with the given column data.
+     * The primary key must be included in the map of values.
+     * @param tableName The table to update or insert into
+     * @param values The map of column names/values to update the rows
+     */
+    void updateRow(String tableName, Map<String,Object> values);
+    
+    /** Delete the row with the given primary key.
+     * 
+     * @param tableName The table from which to delete the row
+     * @param rowKey The primary key of the row to delete.
+     */
+    void deleteRow(String tableName, Object rowKey);
+
+    /** Delete the rows with the given keys.
+     * 
+     * @param tableName The table from which to delete the rows
+     * @param rowKeys The set of primary keys of the rows to delete.
+     */
+    void deleteRows(String tableName, Set<Object> rowKeys);
+    
+    /**
+     * Delete the rows that match the predicate
+     * @param tableName
+     * @param predicate
+     */
+    void deleteMatchingRows(String tableName, IPredicate predicate);
+    
+    /** Query for a row with the given ID (primary key).
+     * 
+     * @param tableName The name of the table to query
+     * @param rowKey The primary key of the row
+     * @return The result set containing the row with the given ID
+     */
+    IResultSet getRow(String tableName, Object rowKey);
+    
+    /**
+     * Set exception handler to use for asynchronous operations.
+     * @param exceptionHandler
+     */
+    void setExceptionHandler(IStorageExceptionHandler exceptionHandler);
+    
+    /**
+     * Asynchronous variant of executeQuery.
+     * 
+     * @param query
+     * @return
+     */
+    public Future<IResultSet> executeQueryAsync(final IQuery query);
+    
+    /**
+     * Asynchronous variant of executeQuery.
+     * 
+     * @param tableName
+     * @param columnNames
+     * @param predicate
+     * @param ordering
+     * @return
+     */
+    public Future<IResultSet> executeQueryAsync(final String tableName,
+            final String[] columnNames,  final IPredicate predicate,
+            final RowOrdering ordering);
+    
+    /**
+     * Asynchronous variant of executeQuery
+     * 
+     * @param tableName
+     * @param columnNames
+     * @param predicate
+     * @param ordering
+     * @param rowMapper
+     * @return
+     */
+    public Future<Object[]> executeQueryAsync(final String tableName,
+            final String[] columnNames,  final IPredicate predicate,
+            final RowOrdering ordering, final IRowMapper rowMapper);
+    
+    /**
+     * Asynchronous variant of insertRow.
+     * 
+     * @param tableName
+     * @param values
+     * @return
+     */
+    public Future<?> insertRowAsync(final String tableName, final Map<String,Object> values);
+
+    /**
+     * Asynchronous variant of updateRows
+     * @param tableName
+     * @param rows
+     */
+    public Future<?> updateRowsAsync(final String tableName, final List<Map<String,Object>> rows);
+
+    /**
+     * Asynchronous variant of updateMatchingRows
+     * 
+     * @param tableName
+     * @param predicate
+     * @param values
+     * @return
+     */
+    public Future<?> updateMatchingRowsAsync(final String tableName, final IPredicate predicate,
+            final Map<String,Object> values);
+
+    /**
+     * Asynchronous variant of updateRow
+     * 
+     * @param tableName
+     * @param rowKey
+     * @param values
+     * @return
+     */
+    public Future<?> updateRowAsync(final String tableName, final Object rowKey,
+            final Map<String,Object> values);
+            
+    /**
+     * Asynchronous version of updateRow
+     * 
+     * @param tableName
+     * @param values
+     * @return
+     */
+    public Future<?> updateRowAsync(final String tableName, final Map<String,Object> values);
+    
+    /**
+     * Asynchronous version of deleteRow
+     * 
+     * @param tableName
+     * @param rowKey
+     * @return
+     */
+    public Future<?> deleteRowAsync(final String tableName, final Object rowKey);
+
+    /**
+     * Asynchronous version of deleteRows
+     * 
+     * @param tableName
+     * @param rowKeys
+     * @return
+     */
+    public Future<?> deleteRowsAsync(final String tableName, final Set<Object> rowKeys);
+
+    /**
+     * Asynchronous version of deleteRows
+     * 
+     * @param tableName
+     * @param predicate
+     * @return
+     */
+    public Future<?> deleteMatchingRowsAsync(final String tableName, final IPredicate predicate);
+    
+    /**
+     * Asynchronous version of getRow
+     * 
+     * @param tableName
+     * @param rowKey
+     * @return
+     */
+    public Future<?> getRowAsync(final String tableName, final Object rowKey);
+    
+    /**
+     * Asynchronous version of save
+     * 
+     * @param resultSet
+     * @return
+     */
+    public Future<?> saveAsync(final IResultSet resultSet);
+    
+    /** Add a listener to the specified table. The listener is called
+     * when any modifications are made to the table. You can add the same
+     * listener instance to multiple tables, since the table name is
+     * included as a parameter in the listener methods.
+     * @param tableName The name of the table to listen for modifications
+     * @param listener The listener instance to call
+     */
+    public void addListener(String tableName, IStorageSourceListener listener);
+    
+    /** Remove a listener from the specified table. The listener should
+     * have been previously added to the table with addListener.
+     * @param tableName The name of the table with the listener
+     * @param listener The previously installed listener instance
+     */
+    public void removeListener(String tableName, IStorageSourceListener listener);
+    
+    /** This is logically a private method and should not be called by
+     * clients of this interface.
+     * @param notifications the notifications to dispatch
+     */
+    public void notifyListeners(List<StorageSourceNotification> notifications);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/NullValueStorageException.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/NullValueStorageException.java
new file mode 100644
index 0000000..0c148b8
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/NullValueStorageException.java
@@ -0,0 +1,44 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage;
+
+public class NullValueStorageException extends StorageException {
+
+    private static final long serialVersionUID = 897572085681189926L;
+
+    private static String makeExceptionMessage(String columnName) {
+        String message = "Null column value could not be converted to built-in type";
+        if (columnName != null) {
+            message += ": column name = ";
+            message += columnName;
+        }
+        return message;
+    }
+    
+    public NullValueStorageException() {
+        super(makeExceptionMessage(null));
+    }
+    
+    public NullValueStorageException(String columnName) {
+        super(makeExceptionMessage(columnName));
+    }
+    
+    public NullValueStorageException(String columnName, Throwable exc) {
+        super(makeExceptionMessage(columnName), exc);
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/OperatorPredicate.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/OperatorPredicate.java
new file mode 100644
index 0000000..dc78260
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/OperatorPredicate.java
@@ -0,0 +1,51 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage;
+
+/** Predicate class to specify rows by equality or comparison operations
+ * of column values. The Storage API uses the special column name of "id"
+ * to specify the primary key values for the row.
+ * 
+ * @author rob
+ */
+public class OperatorPredicate implements IPredicate {
+    
+    public enum Operator { EQ, LT, LTE, GT, GTE };
+    
+    private String columnName;
+    private Operator operator;
+    private Comparable<?> value;
+    
+    public OperatorPredicate(String columnName, Operator operator, Comparable<?> value) {
+        this.columnName = columnName;
+        this.operator = operator;
+        this.value = value;
+    }
+    
+    public String getColumnName() {
+        return columnName;
+    }
+    
+    public Operator getOperator() {
+        return operator;
+    }
+    
+    public Comparable<?> getValue() {
+        return value;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/ResultSetIterator.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/ResultSetIterator.java
new file mode 100644
index 0000000..669833d
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/ResultSetIterator.java
@@ -0,0 +1,64 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage;
+
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+/** Iterator wrapper for an IResultSet, useful for iterating through query
+ * results in an enhanced for (foreach) loop.
+ * 
+ * Note that the iterator manipulates the state of the underlying IResultSet.
+ */
+public class ResultSetIterator implements Iterator<IResultSet> {
+    private IResultSet resultSet;
+    private boolean hasAnother;
+    private boolean peekedAtNext;
+    
+    public ResultSetIterator(IResultSet resultSet) {
+        this.resultSet = resultSet;
+        this.peekedAtNext = false;
+    }
+    
+    @Override
+    public IResultSet next() {
+        if (!peekedAtNext) {
+            hasAnother = resultSet.next();
+        }
+        peekedAtNext = false;
+        if (!hasAnother)
+            throw new NoSuchElementException();
+        return resultSet;
+    }
+    
+    @Override
+    public boolean hasNext() {
+        if (!peekedAtNext) {
+            hasAnother = resultSet.next();
+            peekedAtNext = true;
+        }
+        return hasAnother;
+    }
+    
+    /** Row removal is not supported; use IResultSet.deleteRow instead.
+     */
+    @Override
+    public void remove() {
+        throw new UnsupportedOperationException();
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/RowOrdering.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/RowOrdering.java
new file mode 100644
index 0000000..f9e61ed
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/RowOrdering.java
@@ -0,0 +1,119 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class RowOrdering {
+    
+    public enum Direction { ASCENDING, DESCENDING };
+    
+    public class Item {
+        
+        private String column;
+        private Direction direction;
+        
+        public Item(String column, Direction direction) {
+            assert(column != null);
+            assert(direction != null);
+            this.column = column;
+            this.direction = direction;
+        }
+        
+        public String getColumn() {
+            return column;
+        }
+        
+        public Direction getDirection() {
+            return direction;
+        }
+    }
+    
+    private List<Item> itemList = new ArrayList<Item>();
+    
+    public RowOrdering() {
+    }
+    
+    public RowOrdering(String column) {
+        add(column);
+    }
+    
+    public RowOrdering(String column, Direction direction) {
+        add(column, direction);
+    }
+    
+    public RowOrdering(Item item) {
+        add(item);
+    }
+    
+    public RowOrdering(Item[] itemArray) {
+        add(itemArray);
+    }
+    
+    public RowOrdering(List<Item> itemList) {
+        add(itemList);
+    }
+    
+    public void add(String column) {
+        itemList.add(new Item(column, Direction.ASCENDING));
+    }
+    
+    public void add(String column, Direction direction) {
+        itemList.add(new Item(column, direction));
+    }
+    
+    public void add(Item item) {
+        assert(item != null);
+        itemList.add(item);
+    }
+    
+    public void add(Item[] itemArray) {
+        for (Item item: itemArray) {
+            itemList.add(item);
+        }
+    }
+    
+    public void add(List<Item> itemList) {
+        this.itemList.addAll(itemList);
+    }
+    
+    public List<Item> getItemList() {
+        return itemList;
+    }
+    
+    public boolean equals(RowOrdering rowOrdering) {
+        if (rowOrdering == null)
+            return false;
+        
+        int len1 = itemList.size();
+        int len2 = rowOrdering.getItemList().size();
+        if (len1 != len2)
+            return false;
+        
+        for (int i = 0; i < len1; i++) {
+            Item item1 = itemList.get(i);
+            Item item2 = rowOrdering.getItemList().get(i);
+            if (!item1.getColumn().equals(item2.getColumn()) ||
+                    item1.getDirection() != item2.getDirection())
+                return false;
+        }
+        
+        return true;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/StorageException.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/StorageException.java
new file mode 100644
index 0000000..f5dea23
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/StorageException.java
@@ -0,0 +1,44 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage;
+
+public class StorageException extends RuntimeException {
+
+    static final long serialVersionUID = 7839989010156155681L;
+    
+    static private String makeExceptionMessage(String s) {
+        String message = "Storage Exception";
+        if (s != null) {
+            message += ": ";
+            message += s;
+        }
+        return message;
+    }
+
+    public StorageException() {
+        super(makeExceptionMessage(null));
+    }
+    
+    public StorageException(String s) {
+        super(makeExceptionMessage(s));
+    }
+    
+    public StorageException(String s, Throwable exc) {
+        super(makeExceptionMessage(s), exc);
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/StorageNotificationFormatException.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/StorageNotificationFormatException.java
new file mode 100644
index 0000000..f6ce565
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/StorageNotificationFormatException.java
@@ -0,0 +1,26 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage;
+
+public class StorageNotificationFormatException extends StorageException {
+    private static final long serialVersionUID = 504758477518283156L;
+
+    public StorageNotificationFormatException() {
+        super("Invalid storage notification format");
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/StorageSourceNotification.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/StorageSourceNotification.java
new file mode 100644
index 0000000..c9a5450
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/StorageSourceNotification.java
@@ -0,0 +1,108 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage;
+
+import java.util.Set;
+
+public class StorageSourceNotification {
+    
+    public enum Action { MODIFY, DELETE };
+    
+    private String tableName;
+    private Action action;
+    private Set<Object> keys;
+    
+    public StorageSourceNotification() {
+    }
+    
+    public StorageSourceNotification(String tableName, Action action, Set<Object> keys) {
+        this.tableName = tableName;
+        this.action = action;
+        this.keys = keys;
+    }
+    
+    public String getTableName() {
+        return tableName;
+    }
+    
+    public Action getAction() {
+        return action;
+    }
+    
+    public Set<Object> getKeys() {
+        return keys;
+    }
+    
+    public void setTableName(String tableName) {
+        this.tableName = tableName;
+    }
+    
+    public void setAction(Action action) {
+        this.action = action;
+    }
+    
+    public void setKeys(Set<Object> keys) {
+        this.keys = keys;
+    }
+    
+    /* (non-Javadoc)
+     * @see java.lang.Object#hashCode()
+     */
+    @Override
+    public int hashCode() {
+        final int prime = 7867;
+        int result = 1;
+        result = prime * result + tableName.hashCode();
+        result = prime * result + action.hashCode();
+        result = prime * result + keys.hashCode();
+        return result;
+    }
+    
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (!(obj instanceof StorageSourceNotification))
+            return false;
+        StorageSourceNotification other = (StorageSourceNotification) obj;
+        if (tableName == null) {
+            if (other.tableName != null)
+                return false;
+        } else if (!tableName.equals(other.tableName))
+            return false;
+        if (action == null) {
+            if (other.action != null)
+                return false;
+        } else if (action != other.action)
+            return false;
+        if (keys == null) {
+            if (other.keys != null)
+                return false;
+        } else if (!keys.equals(other.keys))
+            return false;
+        return true;
+    }
+    
+    @Override
+    public String toString() {
+        return ("StorageNotification[table=" + tableName + "; action=" +
+                 action.toString() + "; keys=" + keys.toString() + "]");
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/SynchronousExecutorService.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/SynchronousExecutorService.java
new file mode 100644
index 0000000..f1e7cd3
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/SynchronousExecutorService.java
@@ -0,0 +1,177 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+public class SynchronousExecutorService implements ExecutorService {
+
+    class SynchronousFuture<T> implements Future<T> {
+
+        T result;
+        Exception exc;
+        
+        public SynchronousFuture() {
+        }
+        
+        public SynchronousFuture(T result) {
+            this.result = result;
+        }
+        
+        public SynchronousFuture(Exception exc) {
+            this.exc = exc;
+        }
+        
+        @Override
+        public boolean cancel(boolean mayInterruptIfRunning) {
+            return false;
+        }
+
+        @Override
+        public boolean isCancelled() {
+            return false;
+        }
+
+        @Override
+        public boolean isDone() {
+            return true;
+        }
+
+        @Override
+        public T get() throws InterruptedException, ExecutionException {
+            if (exc != null)
+                throw new ExecutionException(exc);
+            return result;
+        }
+
+        @Override
+        public T get(long timeout, TimeUnit unit) throws InterruptedException,
+                ExecutionException, TimeoutException {
+            return get();
+        }
+    }
+    
+    @Override
+    public void shutdown() {
+    }
+
+    @Override
+    public List<Runnable> shutdownNow() {
+        return null;
+    }
+
+    @Override
+    public boolean isShutdown() {
+        return false;
+    }
+
+    @Override
+    public boolean isTerminated() {
+        return false;
+    }
+
+    @Override
+    public boolean awaitTermination(long timeout, TimeUnit unit)
+            throws InterruptedException {
+        return false;
+    }
+
+    @Override
+    public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks)
+            throws InterruptedException {
+        List<Future<T>> l = new ArrayList<Future<T>>();
+        for (Callable<T> task : tasks) {
+            Future<T> future = submit(task);
+            l.add(future);
+        }
+        return l;
+    }
+
+    @Override
+    public <T> List<Future<T>> invokeAll(
+            Collection<? extends Callable<T>> tasks, long timeout, TimeUnit units)
+            throws InterruptedException {
+        return invokeAll(tasks);
+    }
+
+    @Override
+    public <T> T invokeAny(Collection<? extends Callable<T>> tasks)
+            throws InterruptedException, ExecutionException {
+        for (Callable<T> task : tasks) {
+            try {
+                task.call();
+            } catch (Exception e) {
+
+            }
+        }
+        throw new ExecutionException(new Exception("no task completed successfully"));
+    }
+
+    @Override
+    public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout,
+            TimeUnit units) throws InterruptedException, ExecutionException,
+            TimeoutException {
+        return invokeAny(tasks);
+    }
+
+    @Override
+    public <T> Future<T> submit(Callable<T> callable) {
+        try {
+            T result = callable.call();
+            return new SynchronousFuture<T>(result);
+        }
+        catch (Exception exc) {
+            return new SynchronousFuture<T>(exc);
+        }
+    }
+    
+    @Override
+    public Future<?> submit(Runnable runnable) {
+        try {
+            runnable.run();
+            return new SynchronousFuture<Void>();
+        }
+        catch (Exception exc) {
+            return new SynchronousFuture<Void>(exc);
+        }
+    }
+    
+    @Override
+    public <T> Future<T> submit(Runnable runnable, T result) {
+        try {
+            runnable.run();
+            return new SynchronousFuture<T>(result);
+        }
+        catch (Exception exc) {
+            return new SynchronousFuture<T>(exc);
+        }
+    }
+    
+    @Override
+    public void execute(Runnable command) {
+        command.run();
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/TypeMismatchStorageException.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/TypeMismatchStorageException.java
new file mode 100644
index 0000000..5643140
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/TypeMismatchStorageException.java
@@ -0,0 +1,42 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage;
+
+public class TypeMismatchStorageException extends StorageException {
+
+    private static final long serialVersionUID = -7923586656854871345L;
+
+    private static String makeExceptionMessage(String requestedType, String actualType, String columnName) {
+        if (requestedType == null)
+            requestedType = "???";
+        if (actualType == null)
+            actualType = "???";
+        if (columnName == null)
+            columnName = "???";
+        String message = "The requested type (" + requestedType + ") does not match the actual type (" + actualType + ") of the value for column \"" + columnName + "\".";
+        return message;
+    }
+    
+    public TypeMismatchStorageException() {
+        super(makeExceptionMessage(null, null, null));
+    }
+    
+    public TypeMismatchStorageException(String requestedType, String actualType, String columnName) {
+        super(makeExceptionMessage(requestedType, actualType, columnName));
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/memory/MemoryStorageSource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/memory/MemoryStorageSource.java
new file mode 100644
index 0000000..8a69eca
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/memory/MemoryStorageSource.java
@@ -0,0 +1,198 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage.memory;
+
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.perfmon.IPktInProcessingTimeService;
+import net.floodlightcontroller.storage.nosql.NoSqlStorageSource;
+import net.floodlightcontroller.storage.SynchronousExecutorService;
+import net.floodlightcontroller.storage.IStorageSourceService;
+import net.floodlightcontroller.core.module.IFloodlightService;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import net.floodlightcontroller.storage.StorageException;
+
+public class MemoryStorageSource extends NoSqlStorageSource {
+    
+    private Map<String, MemoryTable> tableMap = new HashMap<String,MemoryTable>();
+    IPktInProcessingTimeService pktinProcessingTime;
+    
+    synchronized private MemoryTable getTable(String tableName, boolean create) {
+        MemoryTable table = tableMap.get(tableName);
+        if (table == null) {
+            if (!create)
+                throw new StorageException("Table " + tableName + " does not exist");
+            table = new MemoryTable(tableName);
+            tableMap.put(tableName, table);
+        }
+        return table;
+    }
+    
+    @Override
+    protected Collection<Map<String,Object>> getAllRows(String tableName, String[] columnNameList) {
+        MemoryTable table = getTable(tableName, false);
+        return table.getAllRows();
+    }
+    
+    @Override
+    protected Map<String,Object> getRow(String tableName, String[] columnNameList, Object rowKey) {
+        MemoryTable table = getTable(tableName, false);
+        return table.getRow(rowKey);
+    }
+    
+    @Override
+    protected List<Map<String,Object>> executeEqualityQuery(String tableName,
+            String[] columnNameList, String predicateColumnName, Comparable<?> value) {
+        MemoryTable table = getTable(tableName, false);
+        List<Map<String,Object>> result = new ArrayList<Map<String,Object>>();
+        synchronized (table) {
+            Collection<Map<String,Object>> allRows = table.getAllRows();
+            for (Map<String,Object> row : allRows) {
+                Object v = row.get(predicateColumnName);
+                if (value.equals(v)) {
+                    result.add(row);
+                }
+            }
+        }
+        return result;
+    }
+    
+    @SuppressWarnings({ "unchecked", "rawtypes" })
+    @Override
+    protected List<Map<String,Object>> executeRangeQuery(String tableName,
+            String[] columnNameList, String predicateColumnName,
+            Comparable<?> startValue, boolean startInclusive, Comparable<?> endValue, boolean endInclusive) {
+        MemoryTable table = getTable(tableName, false);
+        List<Map<String,Object>> result = new ArrayList<Map<String,Object>>();
+        synchronized (table) {
+            Collection<Map<String,Object>> allRows = table.getAllRows();
+            for (Map<String,Object> row : allRows) {
+                Comparable value = (Comparable) row.get(predicateColumnName);
+                if (value != null) {
+                    int compareResult = value.compareTo(startValue);
+                    if ((compareResult > 0) || (startInclusive && (compareResult >= 0))) {
+                        compareResult = value.compareTo(endValue);
+                        if ((compareResult < 0) || (startInclusive && (compareResult <= 0))) {
+                            result.add(row);
+                        }
+                    }
+                }
+            }
+        }
+        return result;
+    }
+    
+    @Override
+    protected void insertRows(String tableName, List<Map<String,Object>> insertRowList) {
+        MemoryTable table = getTable(tableName, false);
+        String primaryKeyName = getTablePrimaryKeyName(tableName);
+        synchronized (table) {
+            for (Map<String,Object> row : insertRowList) {
+                Object primaryKey = row.get(primaryKeyName);
+                if (primaryKey == null) {
+                    if (primaryKeyName.equals(DEFAULT_PRIMARY_KEY_NAME)) {
+                        row = new HashMap<String,Object>(row);
+                        primaryKey = table.getNextId();
+                        row.put(primaryKeyName, primaryKey);
+                    }
+                }
+                table.insertRow(primaryKey, row);
+            }
+        }
+    }
+    
+    @Override
+    protected void updateRows(String tableName, Set<Object> rowKeys, Map<String,Object> updateRowList) {
+        MemoryTable table = getTable(tableName, false);
+        synchronized (table) {
+            for (Object rowKey : rowKeys) {
+                Map<String,Object> row = table.getRow(rowKey);
+                if (row == null)
+                    row = table.newRow(rowKey);
+                for (Map.Entry<String,Object> entry: updateRowList.entrySet()) {
+                    row.put(entry.getKey(), entry.getValue());
+                }
+            }
+        }
+    }
+    
+    @Override
+    protected void updateRowsImpl(String tableName, List<Map<String,Object>> updateRowList) {
+        MemoryTable table = getTable(tableName, false);
+        String primaryKeyName = getTablePrimaryKeyName(tableName);
+        synchronized (table) {
+            for (Map<String,Object> updateRow : updateRowList) {
+                Object rowKey = updateRow.get(primaryKeyName);
+                if (rowKey == null)
+                    throw new StorageException("Primary key not found.");
+                Map<String,Object> row = table.getRow(rowKey);
+                if (row == null)
+                    row = table.newRow(rowKey);
+                for (Map.Entry<String,Object> entry: updateRow.entrySet()) {
+                    row.put(entry.getKey(), entry.getValue());
+                }
+            }
+        }
+    }
+    
+    @Override
+    protected void deleteRowsImpl(String tableName, Set<Object> rowKeys) {
+        MemoryTable table = getTable(tableName, false);
+        synchronized (table) {
+            for (Object rowKey : rowKeys) {
+                table.deleteRow(rowKey);
+            }
+        }
+    }
+    
+    @Override
+    public void createTable(String tableName, Set<String> indexedColumnNames) {
+        super.createTable(tableName, indexedColumnNames);
+        getTable(tableName, true);
+    }
+    
+    public void setPktinProcessingTime(
+            IPktInProcessingTimeService pktinProcessingTime) {
+        this.pktinProcessingTime = pktinProcessingTime;
+    }
+
+    // IFloodlightModule methods
+
+    @Override
+    public void startUp(FloodlightModuleContext context) {
+        super.startUp(context);
+        executorService = new SynchronousExecutorService();
+    }
+
+    @Override
+    public Map<Class<? extends IFloodlightService>,
+               IFloodlightService> getServiceImpls() {
+        Map<Class<? extends IFloodlightService>,
+            IFloodlightService> m =
+                new HashMap<Class<? extends IFloodlightService>,
+                            IFloodlightService>();
+        m.put(IStorageSourceService.class, this);
+        return m;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/memory/MemoryTable.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/memory/MemoryTable.java
new file mode 100644
index 0000000..f87ee45
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/memory/MemoryTable.java
@@ -0,0 +1,72 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage.memory;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.TreeMap;
+
+public class MemoryTable {
+
+    private String tableName;
+    private Map<Object,Map<String,Object>> rowMap;
+    private int nextId;
+    
+    MemoryTable(String tableName) {
+        this.tableName = tableName;
+        rowMap = new TreeMap<Object,Map<String,Object>>();
+        nextId = 0;
+    }
+    
+    String getTableName() {
+        return tableName;
+    }
+    
+    Collection<Map<String,Object>> getAllRows() {
+        return rowMap.values();
+    }
+    
+    Map<String,Object> getRow(Object key) {
+        Map<String,Object> row = rowMap.get(key);
+        return row;
+    }
+    
+    // rkv: Do we still need this? Probably needs to be tweaked a bit
+    // to work with the support for specifying which column to use as the
+    // primary key
+    Map<String,Object> newRow(Object key) {
+        Map<String,Object> row = new HashMap<String, Object>();
+        row.put("id", key);
+        rowMap.put(key, row);
+        return row;
+    }
+    
+    void insertRow(Object key, Map<String,Object> rowValues) {
+        assert(key != null);
+        rowMap.put(key, rowValues);
+    }
+    
+    void deleteRow(Object rowKey) {
+        rowMap.remove(rowKey);
+    }
+    
+    Integer getNextId() {
+        return new Integer(++nextId);
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlQuery.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlQuery.java
new file mode 100644
index 0000000..05f8fc7
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlQuery.java
@@ -0,0 +1,77 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage.nosql;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import net.floodlightcontroller.storage.IPredicate;
+import net.floodlightcontroller.storage.IQuery;
+import net.floodlightcontroller.storage.RowOrdering;
+
+public class NoSqlQuery implements IQuery {
+
+    private String tableName;
+    private String[] columnNameList;
+    private IPredicate predicate;
+    private RowOrdering rowOrdering;
+    private Map<String,Comparable<?>> parameterMap;
+    
+    NoSqlQuery(String className, String[] columnNameList, IPredicate predicate, RowOrdering rowOrdering) {
+        this.tableName = className;
+        this.columnNameList = columnNameList;
+        this.predicate = predicate;
+        this.rowOrdering = rowOrdering;
+    }
+    
+    @Override
+    public void setParameter(String name, Object value) {
+        if (parameterMap == null)
+            parameterMap = new HashMap<String,Comparable<?>>();
+        parameterMap.put(name, (Comparable<?>)value);
+    }
+
+    @Override
+    public String getTableName() {
+        return tableName;
+    }
+    
+    String[] getColumnNameList() {
+        return columnNameList;
+    }
+    
+    IPredicate getPredicate() {
+        return predicate;
+    }
+    
+    RowOrdering getRowOrdering() {
+        return rowOrdering;
+    }
+    
+    Comparable<?> getParameter(String name) {
+        Comparable<?> value = null;
+        if (parameterMap != null) {
+            value = parameterMap.get(name);
+        }
+        return value;
+    }
+    
+    Map<String,Comparable<?>> getParameterMap() {
+        return parameterMap;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlResultSet.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlResultSet.java
new file mode 100644
index 0000000..b3a8c20
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlResultSet.java
@@ -0,0 +1,487 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage.nosql;
+
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.Iterator;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.TimeZone;
+
+import net.floodlightcontroller.storage.IResultSet;
+import net.floodlightcontroller.storage.NullValueStorageException;
+import net.floodlightcontroller.storage.ResultSetIterator;
+import net.floodlightcontroller.storage.StorageException;
+import net.floodlightcontroller.storage.TypeMismatchStorageException;
+
+public class NoSqlResultSet implements IResultSet {
+
+    NoSqlStorageSource storageSource;
+    String tableName;
+    String primaryKeyName;
+    List<Map<String,Object>> rowList;
+    int currentIndex;
+    Map<String,Object> currentRowUpdate;
+    List<Map<String,Object>> rowUpdateList;
+    Set<Object> rowDeleteSet;
+    Iterator<IResultSet> resultSetIterator;
+    
+    NoSqlResultSet(NoSqlStorageSource storageSource, String tableName, List<Map<String,Object>> rowList) {
+        this.storageSource = storageSource;
+        this.primaryKeyName = storageSource.getTablePrimaryKeyName(tableName);
+        this.tableName = tableName;
+        if (rowList == null)
+            rowList = new ArrayList<Map<String,Object>>();
+        this.rowList = rowList;
+        currentIndex = -1;
+    }
+    
+    void addRow(Map<String,Object> row) {
+        rowList.add(row);
+    }
+    
+    @Override
+    public Map<String,Object> getRow() {
+        if ((currentIndex < 0) || (currentIndex >= rowList.size())) {
+            throw new StorageException("No current row in result set.");
+        }
+        
+        return rowList.get(currentIndex);
+    }
+
+    @Override
+    public boolean containsColumn(String columnName) {
+        return getObject(columnName) != null;
+    }
+        
+    @Override
+    public void close() {
+    }
+
+    private void endCurrentRowUpdate() {
+        if (currentRowUpdate != null) {
+            if (rowUpdateList == null)
+                rowUpdateList = new ArrayList<Map<String,Object>>();
+            rowUpdateList.add(currentRowUpdate);
+            currentRowUpdate = null;
+        }
+    }
+    
+    @Override
+    public boolean next() {
+        endCurrentRowUpdate();
+        currentIndex++;
+        return currentIndex < rowList.size();
+    }
+
+    @Override
+    public void save() {
+        endCurrentRowUpdate();
+        
+        if (rowUpdateList != null) {
+            storageSource.updateRows(tableName, rowUpdateList);
+            rowUpdateList = null;
+        }
+        
+        if (rowDeleteSet != null) {
+            storageSource.deleteRows(tableName, rowDeleteSet);
+            rowDeleteSet = null;
+        }
+    }
+
+    Object getObject(String columnName) {
+        Map<String,Object> row = rowList.get(currentIndex);
+        Object value = row.get(columnName);
+        return value;
+    }
+    
+    @Override
+    public boolean getBoolean(String columnName) {
+        Boolean b = getBooleanObject(columnName);
+        if (b == null)
+            throw new NullValueStorageException(columnName);
+        return b.booleanValue();
+    }
+
+    @Override
+    public byte getByte(String columnName) {
+        Byte b = getByteObject(columnName);
+        if (b == null)
+            throw new NullValueStorageException(columnName);
+        return b.byteValue();
+    }
+
+    @Override
+    public byte[] getByteArray(String columnName) {
+        byte[] b = null;
+        Object obj = getObject(columnName);
+        if (obj != null) {
+            if (!(obj instanceof byte[]))
+                throw new StorageException("Invalid byte array value");
+            b = (byte[])obj;
+        }
+        return b;
+    }
+
+    @Override
+    public double getDouble(String columnName) {
+        Double d = getDoubleObject(columnName);
+        if (d == null)
+            throw new NullValueStorageException(columnName);
+        return d.doubleValue();
+    }
+
+    @Override
+    public float getFloat(String columnName) {
+        Float f = getFloatObject(columnName);
+        if (f == null)
+            throw new NullValueStorageException(columnName);
+        return f.floatValue();
+    }
+
+    @Override
+    public int getInt(String columnName) {
+        Integer i = getIntegerObject(columnName);
+        if (i == null)
+            throw new NullValueStorageException(columnName);
+        return i.intValue();
+    }
+
+    @Override
+    public long getLong(String columnName) {
+        Long l = getLongObject(columnName);
+        if (l == null)
+            throw new NullValueStorageException(columnName);
+        return l.longValue();
+    }
+
+    @Override
+    public short getShort(String columnName) {
+        Short s = getShortObject(columnName);
+        if (s == null)
+            throw new NullValueStorageException(columnName);
+        return s.shortValue();
+    }
+
+    @Override
+    public String getString(String columnName) {
+        Object obj = getObject(columnName);
+        if (obj == null)
+            return null;
+        return obj.toString();
+    }
+
+    @Override
+    public Date getDate(String column) {
+        Date d;
+        Object obj = getObject(column);
+        if (obj == null) {
+            d = null;
+        } else if (obj instanceof Date) {
+            d = (Date) obj;
+        } else {
+            SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
+            dateFormat.setTimeZone(TimeZone.getTimeZone("GMT"));
+            try {
+                d = dateFormat.parse(obj.toString());
+            }
+            catch (ParseException exc) {
+                throw new TypeMismatchStorageException(Date.class.getName(), obj.getClass().getName(), column);
+            }
+        }
+        return d;
+    }
+
+
+    @Override
+    public Short getShortObject(String columnName)
+    {
+        Short s;
+        Object obj = getObject(columnName);
+        if (obj instanceof Short) {
+            s = (Short)obj;
+        } else if (obj != null) {
+            try {
+                s = Short.parseShort(obj.toString());
+            }
+            catch (NumberFormatException exc) {
+                throw new TypeMismatchStorageException(Short.class.getName(), obj.getClass().getName(), columnName);
+            }
+        } else {
+            s = null;
+        }
+        return s;
+    }
+    
+    @Override
+    public Integer getIntegerObject(String columnName)
+    {
+        Integer i;
+        Object obj = getObject(columnName);
+        if (obj instanceof Integer) {
+            i = (Integer)obj;
+        } else if (obj != null) {
+            try {
+                i = Integer.parseInt(obj.toString());
+            }
+            catch (NumberFormatException exc) {
+                throw new TypeMismatchStorageException(Integer.class.getName(), obj.getClass().getName(), columnName);
+            }
+        } else {
+            i = null;
+        }
+        return i;
+    }
+
+    @Override
+    public Long getLongObject(String columnName)
+    {
+        Long l;
+        Object obj = getObject(columnName);
+        if (obj instanceof Long) {
+            l = (Long)obj;
+        } else if (obj != null) {
+            try {
+                l = Long.parseLong(obj.toString());
+            }
+            catch (NumberFormatException exc) {
+                throw new TypeMismatchStorageException(Long.class.getName(), obj.getClass().getName(), columnName);
+            }
+        } else {
+            l = null;
+        }
+        return l;
+    }
+
+    @Override
+    public Float getFloatObject(String columnName)
+    {
+        Float f;
+        Object obj = getObject(columnName);
+        if (obj instanceof Float) {
+            f = (Float)obj;
+        } else if (obj != null) {
+            try {
+                f = Float.parseFloat(obj.toString());
+            }
+            catch (NumberFormatException exc) {
+                throw new TypeMismatchStorageException(Float.class.getName(), obj.getClass().getName(), columnName);
+            }
+        } else {
+            f = null;
+        }
+        return f;
+    }
+
+    @Override
+    public Double getDoubleObject(String columnName)
+    {
+        Double d;
+        Object obj = getObject(columnName);
+        if (obj instanceof Double) {
+            d = (Double)obj;
+        } else if (obj != null) {
+            try {
+                d = Double.parseDouble(obj.toString());
+            }
+            catch (NumberFormatException exc) {
+                throw new TypeMismatchStorageException(Double.class.getName(), obj.getClass().getName(), columnName);
+            }
+        } else {
+            d = null;
+        }
+        return d;
+    }
+
+    @Override
+    public Boolean getBooleanObject(String columnName)
+    {
+        Boolean b;
+        Object obj = getObject(columnName);
+        if (obj instanceof Boolean) {
+            b = (Boolean)obj;
+        } else if (obj != null) {
+            try {
+                b = Boolean.parseBoolean(obj.toString());
+            }
+            catch (NumberFormatException exc) {
+                throw new TypeMismatchStorageException(Boolean.class.getName(), obj.getClass().getName(), columnName);
+            }
+        } else {
+            b = null;
+        }
+        return b;
+    }
+
+    @Override
+    public Byte getByteObject(String columnName)
+    {
+        Byte b;
+        Object obj = getObject(columnName);
+        if (obj instanceof Byte) {
+            b = (Byte)obj;
+        } else if (obj != null) {
+            try {
+                b = Byte.parseByte(obj.toString());
+            }
+            catch (NumberFormatException exc) {
+                throw new TypeMismatchStorageException(Byte.class.getName(), obj.getClass().getName(), columnName);
+            }
+        } else {
+            b = null;
+        }
+        return b;
+    }
+
+    
+    @Override
+    public boolean isNull(String columnName)
+    {
+        Object obj = getObject(columnName);
+        return (obj == null);
+    }
+
+    private void addRowUpdate(String column, Object value) {
+        if (currentRowUpdate == null) {
+            currentRowUpdate = new HashMap<String,Object>();
+            Object key = rowList.get(currentIndex).get(primaryKeyName);
+            currentRowUpdate.put(primaryKeyName, key);
+        }
+        currentRowUpdate.put(column, value);
+    }
+    
+    @Override
+    public void setBoolean(String columnName, boolean value) {
+        addRowUpdate(columnName, value);
+    }
+
+    @Override
+    public void setByte(String columnName, byte value) {
+        addRowUpdate(columnName, value);
+    }
+
+    @Override
+    public void setByteArray(String columnName, byte[] byteArray) {
+        addRowUpdate(columnName, byteArray);
+    }
+
+    @Override
+    public void setDouble(String columnName, double value) {
+        addRowUpdate(columnName, value);
+    }
+
+    @Override
+    public void setFloat(String columnName, float value) {
+        addRowUpdate(columnName, value);
+    }
+
+    @Override
+    public void setInt(String columnName, int value) {
+        addRowUpdate(columnName, value);
+    }
+
+    @Override
+    public void setLong(String columnName, long value) {
+        addRowUpdate(columnName, value);
+    }
+
+    @Override
+    public void setShort(String columnName, short value) {
+        addRowUpdate(columnName, value);
+    }
+
+    @Override
+    public void setString(String columnName, String value) {
+        addRowUpdate(columnName, value);
+    }
+
+    @Override
+    public void setShortObject(String columnName, Short value)
+    {
+        addRowUpdate(columnName, value);
+    }
+    
+    @Override
+    public void setIntegerObject(String columnName, Integer value)
+    {
+        addRowUpdate(columnName, value);
+    }
+
+    @Override
+    public void setLongObject(String columnName, Long value)
+    {
+        addRowUpdate(columnName, value);
+    }
+
+    @Override
+    public void setFloatObject(String columnName, Float value)
+    {
+        addRowUpdate(columnName, value);
+    }
+
+    @Override
+    public void setDoubleObject(String columnName, Double value)
+    {
+        addRowUpdate(columnName, value);
+    }
+
+    @Override
+    public void setBooleanObject(String columnName, Boolean value)
+    {
+        addRowUpdate(columnName, value);
+    }
+
+    @Override
+    public void setByteObject(String columnName, Byte value)
+    {
+        addRowUpdate(columnName, value);
+    }
+
+    @Override
+    public void setDate(String column, Date value) {
+        addRowUpdate(column, value);
+    }
+
+    
+    public void setNull(String columnName)
+    {
+        addRowUpdate(columnName, null);
+    }
+
+    
+    @Override
+    public void deleteRow() {
+        Object key = (String) rowList.get(currentIndex).get(primaryKeyName);
+        if (rowDeleteSet == null)
+            rowDeleteSet = new HashSet<Object>();
+        rowDeleteSet.add(key);
+    }
+    
+    @Override
+    public Iterator<IResultSet> iterator() {
+        if (resultSetIterator == null)
+            resultSetIterator = new ResultSetIterator(this);
+        return resultSetIterator;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlStorageSource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlStorageSource.java
new file mode 100644
index 0000000..d7e5f95
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlStorageSource.java
@@ -0,0 +1,823 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage.nosql;
+
+import java.lang.Class;
+
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TimeZone;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import net.floodlightcontroller.storage.AbstractStorageSource;
+import net.floodlightcontroller.storage.CompoundPredicate;
+import net.floodlightcontroller.storage.IPredicate;
+import net.floodlightcontroller.storage.IQuery;
+import net.floodlightcontroller.storage.IResultSet;
+import net.floodlightcontroller.storage.OperatorPredicate;
+import net.floodlightcontroller.storage.RowOrdering;
+import net.floodlightcontroller.storage.StorageException;
+import net.floodlightcontroller.storage.StorageSourceNotification;
+import net.floodlightcontroller.storage.TypeMismatchStorageException;
+
+public abstract class NoSqlStorageSource extends AbstractStorageSource {
+    protected static Logger log = LoggerFactory.getLogger(NoSqlStorageSource.class);
+
+    public enum ColumnIndexMode { NOT_INDEXED, RANGE_INDEXED, EQUALITY_INDEXED };
+    
+    protected static final String DEFAULT_PRIMARY_KEY_NAME = "id";
+    
+    private Map<String,String> tablePrimaryKeyMap = new HashMap<String,String>();
+    private Map<String, Map<String,ColumnIndexMode>> tableIndexedColumnMap =
+        new HashMap<String,Map<String,ColumnIndexMode>>();
+    
+    abstract class NoSqlPredicate {
+
+        public boolean incorporateComparison(String columnName,
+                OperatorPredicate.Operator operator, Comparable<?> value,
+                CompoundPredicate.Operator parentOperator) {
+            return false;
+        }
+        
+        public boolean canExecuteEfficiently() {
+            return false;
+        }
+        
+        public List<Map<String,Object>> execute(String[] columnNames) {
+            assert(false);
+            return null;
+        }
+        
+        abstract public boolean matchesRow(Map<String,Object> row);
+    }
+    
+    @SuppressWarnings({ "unchecked", "rawtypes" })
+    class NoSqlRangePredicate extends NoSqlPredicate {
+        NoSqlStorageSource storageSource;
+        String tableName;
+        String columnName;
+        Comparable<?> startValue;
+        boolean startInclusive;
+        Comparable<?> endValue;
+        boolean endInclusive;
+        
+        NoSqlRangePredicate(NoSqlStorageSource storageSource, String tableName,
+                String columnName, Comparable<?> startValue, boolean startInclusive,
+                Comparable<?> endValue, boolean endInclusive) {
+            this.storageSource = storageSource;
+            this.tableName = tableName;
+            this.columnName = columnName;
+            this.startValue = startValue;
+            this.startInclusive = startInclusive;
+            this.endValue = endValue;
+            this.endInclusive = endInclusive;
+        }
+        
+        public boolean incorporateComparison(String columnName,
+                OperatorPredicate.Operator operator, Comparable<?> value,
+                CompoundPredicate.Operator parentOperator) {
+            
+            assert(operator != null);
+            assert(parentOperator != null);
+            
+            // Must be the same column to incorporate
+            if (!this.columnName.equals(columnName))
+                return false;
+            
+            // The only time we allow a null value is if it's an EQ operator.
+            // In that case we can only incorporate if this predicate is also
+            // a null equality predicate.
+            if (value == null) {
+                return ((operator == OperatorPredicate.Operator.EQ) &&
+                        (startValue == null) && (endValue == null) &&
+                        startInclusive && endInclusive);
+            }
+            
+            // Don't incorporate parameterized values
+            if (value instanceof String) {
+                String s = (String)value;
+                if (s.startsWith("?") && s.endsWith("?")) {
+                    return false;
+                }
+            }
+            
+            if (parentOperator == CompoundPredicate.Operator.AND) {
+                switch (operator) {
+                case EQ:
+                    if (matchesValue(value)) {
+                        startValue = endValue = value;
+                        startInclusive = endInclusive = true;
+                        return true;
+                    }
+                    break;
+                case LT:
+                    if ((endValue == null) || (((Comparable)value).compareTo(endValue) <= 0)) {
+                        endValue = value;
+                        endInclusive = false;
+                        return true;
+                    }
+                    break;
+                case LTE:
+                    if ((endValue == null) || (((Comparable)value).compareTo(endValue) < 0)) {
+                        endValue = value;
+                        endInclusive = true;
+                        return true;
+                    }
+                    break;
+                case GT:
+                    if ((startValue == null) || (((Comparable)value).compareTo(startValue) >= 0)) {
+                        startValue = value;
+                        startInclusive = false;
+                        return true;
+                    }
+                    break;
+                case GTE:
+                    if ((startValue == null) || (((Comparable)value).compareTo(startValue) > 0)) {
+                        startValue = value;
+                        startInclusive = true;
+                        return true;
+                    }
+                    break;
+                }
+            } else {
+                switch (operator) {
+                case EQ:
+                    if (matchesValue(value))
+                        return true;
+                    break;
+                case LT:
+                    if ((endValue == null) || (((Comparable)value).compareTo(endValue) > 0)) {
+                        endValue = value;
+                        endInclusive = false;
+                        return true;
+                    }
+                    break;
+                case LTE:
+                    if ((endValue == null) || (((Comparable)value).compareTo(endValue) >= 0)) {
+                        endValue = value;
+                        endInclusive = true;
+                        return true;
+                    }
+                    break;
+                case GT:
+                    if ((startValue == null) || (((Comparable)value).compareTo(startValue) < 0)) {
+                        startValue = value;
+                        startInclusive = false;
+                        return true;
+                    }
+                    break;
+                case GTE:
+                    if ((startValue == null) || (((Comparable)value).compareTo(startValue) <= 0)) {
+                        startValue = value;
+                        startInclusive = true;
+                        return true;
+                    }
+                    break;
+                }
+            }
+            
+            return false;
+        }
+
+        private boolean isEqualityRange() {
+            return (startValue == endValue) && startInclusive && endInclusive;
+        }
+        
+        public boolean canExecuteEfficiently() {
+            ColumnIndexMode indexMode = storageSource.getColumnIndexMode(tableName, columnName);
+            switch (indexMode) {
+            case NOT_INDEXED:
+                return false;
+            case RANGE_INDEXED:
+                return true;
+            case EQUALITY_INDEXED:
+                return isEqualityRange();
+            }
+            return true;
+        }
+
+        public List<Map<String,Object>> execute(String columnNameList[]) {
+            List<Map<String,Object>> rowList;
+            if (isEqualityRange())
+                rowList = storageSource.executeEqualityQuery(tableName, columnNameList, columnName, startValue);
+            else
+                rowList = storageSource.executeRangeQuery(tableName, columnNameList, columnName,
+                        startValue, startInclusive, endValue, endInclusive);
+                
+            return rowList;
+        }
+        
+        Comparable<?> coerceValue(Comparable<?> value, Class targetClass) {
+            
+            if (value == null)
+                return null;
+            
+            if (value.getClass() == targetClass)
+                return value;
+            
+            // FIXME: For now we convert by first converting the source value to a
+            // string and then converting to the target type. This logic probably needs
+            // another pass to make it more robust/optimized.
+            
+            String s = value.toString();
+            Comparable<?> obj = null;
+            
+            try {
+                if (targetClass == Integer.class) {
+                    obj = new Integer(s);
+                } else if (targetClass == Long.class) {
+                    obj = new Long(s);
+                } else if (targetClass == Short.class) {
+                    obj = new Short(s);
+                } else if (targetClass == Boolean.class) {
+                    obj = new Boolean(s);
+                } else if (targetClass == Float.class) {
+                    obj = new Float(s);
+                } else if (targetClass == Double.class) {
+                    obj = new Double(s);
+                } else if (targetClass == Byte.class) {
+                    obj = new Byte(s);
+                } else if (targetClass == String.class) {
+                    obj = s;
+                } else if (targetClass == Date.class) {
+                    SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
+                    dateFormat.setTimeZone(TimeZone.getTimeZone("GMT"));
+                    try {
+                        obj = dateFormat.parse(s);
+                    }
+                    catch (ParseException exc) {
+                        throw new TypeMismatchStorageException(Date.class.getName(), value.getClass().getName(), "???");
+                    }
+                }
+            }
+            catch (Exception exc) {
+                // Ignore the exception here. In this case obj will not be set, so we'll
+                // throw the StorageException below when we check for a null obj.
+            }
+            
+            if (obj == null)
+                throw new StorageException("Column value could not be coerced to the correct type");
+            
+            return obj;
+        }
+        
+        boolean matchesValue(Comparable<?> value) {
+            boolean isNullEqPredicate = (startValue == null) && (endValue == null) && startInclusive && endInclusive;
+            if (value == null)
+                return isNullEqPredicate;
+
+            if (isNullEqPredicate)
+                return false;
+            
+            int result;
+            Comparable<?> coercedValue;
+            if (startValue != null) {
+                coercedValue = coerceValue(value, startValue.getClass());
+                result = ((Comparable)coercedValue).compareTo(startValue);
+                if ((result < 0) || (!startInclusive && (result == 0)))
+                    return false;
+            }
+            if (endValue != null) {
+                coercedValue = coerceValue(value, endValue.getClass());
+                result = ((Comparable)coercedValue).compareTo(endValue);
+                if ((result > 0) || (!endInclusive && (result == 0)))
+                    return false;
+            }
+            return true;
+        }
+        
+        public boolean matchesRow(Map<String,Object> row) {
+            Comparable value = (Comparable)row.get(columnName);
+            return matchesValue(value);
+        }
+    }
+    
+    class NoSqlOperatorPredicate extends NoSqlPredicate {
+        
+        NoSqlStorageSource storageSource;
+        String columnName;
+        OperatorPredicate.Operator operator;
+        Object value;
+        
+        NoSqlOperatorPredicate(NoSqlStorageSource storageSource, String columnName,
+                OperatorPredicate.Operator operator, Object value) {
+            this.storageSource = storageSource;
+            this.columnName = columnName;
+            this.operator = operator;
+            this.value = value;
+        }
+
+        public boolean incorporateComparison(String columnName,
+                OperatorPredicate.Operator operator, Comparable<?> value,
+                CompoundPredicate.Operator parentOperator) {
+            return false;
+        }
+
+        public boolean canExecuteEfficiently() {
+            return false;
+        }
+
+        public List<Map<String,Object>> execute(String columnNames[]) {
+            throw new StorageException("Unimplemented predicate.");
+        }
+        
+        public boolean matchesRow(Map<String,Object> row) {
+            return false;
+        }
+    }
+    
+    class NoSqlCompoundPredicate extends NoSqlPredicate {
+        
+        NoSqlStorageSource storageSource;
+        CompoundPredicate.Operator operator;
+        boolean negated;
+        List<NoSqlPredicate> predicateList;
+        
+        NoSqlCompoundPredicate(NoSqlStorageSource storageSource, CompoundPredicate.Operator operator,
+                boolean negated, List<NoSqlPredicate> predicateList) {
+            this.storageSource = storageSource;
+            this.operator = operator;
+            this.negated = negated;
+            this.predicateList = predicateList;
+        }
+
+        public boolean incorporateComparison(String columnName,
+                OperatorPredicate.Operator operator, Comparable<?> value,
+                CompoundPredicate.Operator parentOperator) {
+            // It may be possible to incorporate other operator predicate into this one,
+            // but it would need to take into account the negated attribute and I'd need
+            // to think about it some more to make sure it was correct, so for now we just
+            // disallow incorporation
+            //if (parentOperator == this.operator) {
+            //    for (NoSqlPredicate predicate: predicateList) {
+            //        if (predicate.incorporateComparison(columnName, operator, value, parentOperator))
+            //            return true;
+            //    }
+            //}
+            return false;
+        }
+
+        public boolean canExecuteEfficiently() {
+            if (operator == CompoundPredicate.Operator.AND) {
+                for (NoSqlPredicate predicate: predicateList) {
+                    if (predicate.canExecuteEfficiently()) {
+                        return true;
+                    }
+                }
+                return false;
+            } else {
+                for (NoSqlPredicate predicate: predicateList) {
+                    if (!predicate.canExecuteEfficiently()) {
+                        return false;
+                    }
+                }
+                return true;
+            }
+        }
+
+        @SuppressWarnings({ "unchecked", "rawtypes" })
+        class RowComparator implements Comparator<Map<String,Object>> {
+            private String primaryKeyName;
+            
+            public RowComparator(String primaryKeyName) {
+                this.primaryKeyName = primaryKeyName;
+            }
+            
+            public int compare(Map<String,Object> row1, Map<String,Object> row2) {
+                Comparable key1 = (Comparable)row1.get(primaryKeyName);
+                Comparable key2 = (Comparable)row2.get(primaryKeyName);
+                return key1.compareTo(key2);
+            }
+            
+            public boolean equals(Object obj) {
+                if (!(obj instanceof RowComparator))
+                    return false;
+                RowComparator rc = (RowComparator)obj;
+                if (rc.primaryKeyName == null)
+                    return this.primaryKeyName == null;
+                return rc.primaryKeyName.equals(this.primaryKeyName);
+            }
+        }
+
+        @SuppressWarnings({ "unchecked", "rawtypes" })
+        private List<Map<String,Object>> combineRowLists(String primaryKeyName,
+                List<Map<String,Object>> list1, List<Map<String,Object>> list2,
+                CompoundPredicate.Operator operator) {
+            ArrayList<Map<String,Object>> combinedRowList = new ArrayList<Map<String,Object>>();
+            RowComparator rc = new RowComparator(primaryKeyName);
+            Collections.sort(list1, rc);
+            Collections.sort(list2,rc);
+            
+            Iterator<Map<String,Object>> iterator1 = list1.iterator();
+            Iterator<Map<String,Object>> iterator2 = list2.iterator();
+            boolean update1 = true;
+            boolean update2 = true;
+            Map<String,Object> row1 = null;
+            Map<String,Object> row2 = null;
+            Comparable<?> key1 = null;
+            Comparable<?> key2 = null;
+            
+            while (true) {
+                if (update1) {
+                    if (iterator1.hasNext()) {
+                        row1 = iterator1.next();
+                        key1 = (Comparable<?>)row1.get(primaryKeyName);
+                    } else {
+                        row1 = null;
+                    }
+                }
+                if (update2) {
+                    if (iterator2.hasNext()) {
+                        row2 = iterator1.next();
+                        key2 = (Comparable<?>)row2.get(primaryKeyName);
+                    } else {
+                        row2 = null;
+                    }
+                }
+                if (operator == CompoundPredicate.Operator.AND) {
+                    if ((row1 == null) || (row2 == null))
+                        break;
+                    if (key1.equals(key2))
+                        combinedRowList.add(row1);
+                } else {
+                    if (row1 == null) {
+                        if (row2 == null)
+                            break;
+                        combinedRowList.add(row2);
+                    } else if ((row2 == null) || (((Comparable)key1).compareTo(key2) <= 0)) {
+                        combinedRowList.add(row2);
+                    } else {
+                        combinedRowList.add(row1);
+                    }
+                }
+                
+                update1 = (key2 == null) || (((Comparable)key1).compareTo(key2) <= 0);
+                update2 = (key1 == null) || (((Comparable)key2).compareTo(key1) <= 0);
+            }
+            
+            return combinedRowList;
+        }
+        
+        public List<Map<String,Object>> execute(String columnNames[]) {
+            List<Map<String,Object>> combinedRowList = null;
+            for (NoSqlPredicate predicate: predicateList) {
+                List<Map<String,Object>> rowList = predicate.execute(columnNames);
+                if (combinedRowList != null) {
+                    combinedRowList = combineRowLists("id", combinedRowList, rowList, operator);
+                } else {
+                    combinedRowList = rowList;
+                }
+            }
+            return combinedRowList;
+        }
+
+        public boolean matchesRow(Map<String,Object> row) {
+            if (operator == CompoundPredicate.Operator.AND) {
+                for (NoSqlPredicate predicate : predicateList) {
+                    if (!predicate.matchesRow(row))  {
+                        return false;
+                    }
+                }
+                return true;
+            } else {
+                for (NoSqlPredicate predicate : predicateList) {
+                    if (predicate.matchesRow(row))  {
+                        return true;
+                    }
+                }
+                return false;
+                
+            }
+        }
+    }
+    
+    public NoSqlStorageSource() {
+        super();
+    }
+    
+    @Override
+    public void createTable(String tableName, Set<String> indexedColumns) {
+        super.createTable(tableName, indexedColumns);
+        if (indexedColumns == null) return;
+        for (String columnName : indexedColumns) {
+            setColumnIndexMode(tableName, columnName,
+                               ColumnIndexMode.EQUALITY_INDEXED);
+        }
+    }
+
+    public void setTablePrimaryKeyName(String tableName, String primaryKeyName) {
+        if ((tableName == null) || (primaryKeyName == null))
+            throw new NullPointerException();
+        tablePrimaryKeyMap.put(tableName, primaryKeyName);
+    }
+    
+    protected String getTablePrimaryKeyName(String tableName) {
+        String primaryKeyName = tablePrimaryKeyMap.get(tableName);
+        if (primaryKeyName == null)
+            primaryKeyName = DEFAULT_PRIMARY_KEY_NAME;
+        return primaryKeyName;
+    }
+    
+    protected ColumnIndexMode getColumnIndexMode(String tableName, String columnName) {
+        ColumnIndexMode columnIndexMode = null;
+        Map<String, ColumnIndexMode> indexedColumnMap = tableIndexedColumnMap.get(tableName);
+        if (indexedColumnMap != null)
+            columnIndexMode = indexedColumnMap.get(columnName);
+        if (columnIndexMode == null)
+            return ColumnIndexMode.NOT_INDEXED;
+        return columnIndexMode;
+    }
+    
+    public void setColumnIndexMode(String tableName, String columnName, ColumnIndexMode indexMode) {
+        Map<String, ColumnIndexMode> indexedColumnMap = tableIndexedColumnMap.get(tableName);
+        if (indexedColumnMap == null) {
+            indexedColumnMap = new HashMap<String,ColumnIndexMode>();
+            tableIndexedColumnMap.put(tableName, indexedColumnMap);
+        }
+        indexedColumnMap.put(columnName, indexMode);
+    }
+    
+    Comparable<?> getOperatorPredicateValue(OperatorPredicate predicate, Map<String,Comparable<?>> parameterMap) {
+        Comparable<?> value = predicate.getValue();
+        if (value instanceof String) {
+            String stringValue = (String) value;
+            if ((stringValue.charAt(0) == '?') && (stringValue.charAt(stringValue.length()-1) == '?')) {
+                String parameterName = stringValue.substring(1,stringValue.length()-1);
+                value = parameterMap.get(parameterName);
+            }
+        }
+        return value;
+    }
+    
+    NoSqlPredicate convertPredicate(IPredicate predicate, String tableName, Map<String,Comparable<?>> parameterMap) {
+        if (predicate == null)
+            return null;
+        NoSqlPredicate convertedPredicate = null;
+        if (predicate instanceof CompoundPredicate) {
+            CompoundPredicate compoundPredicate = (CompoundPredicate)predicate;
+            ArrayList<NoSqlPredicate> noSqlPredicateList = new ArrayList<NoSqlPredicate>();
+            for (IPredicate childPredicate: compoundPredicate.getPredicateList()) {
+                boolean incorporated = false;
+                if (childPredicate instanceof OperatorPredicate) {
+                    OperatorPredicate childOperatorPredicate = (OperatorPredicate)childPredicate;
+                    for (NoSqlPredicate childNoSqlPredicate: noSqlPredicateList) {
+                        incorporated = childNoSqlPredicate.incorporateComparison(
+                                childOperatorPredicate.getColumnName(), childOperatorPredicate.getOperator(),
+                                getOperatorPredicateValue(childOperatorPredicate, parameterMap),
+                                compoundPredicate.getOperator());
+                        if (incorporated)
+                            break;
+                    }
+                }
+                if (!incorporated) {
+                    NoSqlPredicate noSqlPredicate = convertPredicate(childPredicate, tableName, parameterMap);
+                    noSqlPredicateList.add(noSqlPredicate);
+                }
+            }
+            convertedPredicate = new NoSqlCompoundPredicate(this, compoundPredicate.getOperator(),
+                    compoundPredicate.isNegated(), noSqlPredicateList);
+        } else if (predicate instanceof OperatorPredicate) {
+            OperatorPredicate operatorPredicate = (OperatorPredicate) predicate;
+            Comparable<?> value = getOperatorPredicateValue(operatorPredicate, parameterMap);
+            switch (operatorPredicate.getOperator()) {
+            case EQ:
+                convertedPredicate = new NoSqlRangePredicate(this, tableName,
+                        operatorPredicate.getColumnName(), value, true, value, true);
+                break;
+            case LT:
+                convertedPredicate = new NoSqlRangePredicate(this, tableName,
+                        operatorPredicate.getColumnName(), null, false, value, false);
+                break;
+            case LTE:
+                convertedPredicate = new NoSqlRangePredicate(this, tableName,
+                        operatorPredicate.getColumnName(), null, false, value, true);
+                break;
+            case GT:
+                convertedPredicate = new NoSqlRangePredicate(this, tableName,
+                        operatorPredicate.getColumnName(), value, false, null, false);
+                break;
+            case GTE:
+                convertedPredicate = new NoSqlRangePredicate(this, tableName,
+                        operatorPredicate.getColumnName(), value, true, null, false);
+                break;
+            default:
+                convertedPredicate = new NoSqlOperatorPredicate(this, operatorPredicate.getColumnName(),
+                        operatorPredicate.getOperator(), value);
+            }
+        } else {
+            throw new StorageException("Unknown predicate type");
+        }
+        
+        return convertedPredicate;
+    }
+    
+    @SuppressWarnings({ "unchecked", "rawtypes" })
+    class RowComparator implements Comparator<Map<String,Object>> {
+        private RowOrdering rowOrdering;
+        
+        public RowComparator(RowOrdering rowOrdering) {
+            this.rowOrdering = rowOrdering;
+        }
+        
+        public int compare(Map<String,Object> row1, Map<String,Object> row2) {
+            if (rowOrdering == null)
+                return 0;
+            
+            for (RowOrdering.Item item: rowOrdering.getItemList()) {
+                Comparable key1 = (Comparable)row1.get(item.getColumn());
+                Comparable key2 = (Comparable)row2.get(item.getColumn());
+                int result = key1.compareTo(key2);
+                if (result != 0) {
+                    if (item.getDirection() == RowOrdering.Direction.DESCENDING)
+                        result = -result;
+                    return result;
+                }
+            }
+            
+            return 0;
+        }
+        
+        public boolean equals(Object obj) {
+            if (!(obj instanceof RowComparator))
+                return false;
+            RowComparator rc = (RowComparator)obj;
+            if (rc.rowOrdering == null)
+                return this.rowOrdering == null;
+            return rc.rowOrdering.equals(this.rowOrdering);
+        }
+    }
+    
+    private NoSqlResultSet executeParameterizedQuery(String tableName, String[] columnNameList,
+            IPredicate predicate, RowOrdering rowOrdering, Map<String,Comparable<?>> parameterMap) {
+        NoSqlPredicate noSqlPredicate = convertPredicate(predicate, tableName, parameterMap);
+        List<Map<String,Object>> rowList;
+        if ((noSqlPredicate != null) && noSqlPredicate.canExecuteEfficiently()) {
+            rowList = noSqlPredicate.execute(columnNameList);
+        } else {
+            rowList = new ArrayList<Map<String,Object>>();
+            Collection<Map<String,Object>> allRowList = getAllRows(tableName, columnNameList);
+            for (Map<String,Object> row: allRowList) {
+                if ((noSqlPredicate == null) || noSqlPredicate.matchesRow(row)) {
+                    rowList.add(row);
+                }
+            }
+        }
+        if (rowOrdering != null)
+            Collections.sort(rowList, new RowComparator(rowOrdering));
+            
+        return new NoSqlResultSet(this, tableName, rowList);
+    }
+    
+    @Override
+    public IQuery createQuery(String tableName, String[] columnNameList,
+            IPredicate predicate, RowOrdering rowOrdering) {
+        return new NoSqlQuery(tableName, columnNameList, predicate, rowOrdering);
+    }
+
+    @Override
+    public IResultSet executeQueryImpl(IQuery query) {
+        NoSqlQuery noSqlQuery = (NoSqlQuery) query;
+        return executeParameterizedQuery(noSqlQuery.getTableName(),
+                noSqlQuery.getColumnNameList(), noSqlQuery.getPredicate(),
+                noSqlQuery.getRowOrdering(), noSqlQuery.getParameterMap());
+    }
+
+    protected void sendNotification(String tableName, StorageSourceNotification.Action action,
+            List<Map<String,Object>> rows) {
+        Set<Object> rowKeys = new HashSet<Object>();
+        String primaryKeyName = getTablePrimaryKeyName(tableName);
+        for (Map<String,Object> row : rows) {
+            Object rowKey = row.get(primaryKeyName);
+            rowKeys.add(rowKey);
+        }
+        StorageSourceNotification notification =
+            new StorageSourceNotification(tableName, action, rowKeys);
+        notifyListeners(notification);
+    }
+    
+    protected void sendNotification(String tableName,
+            StorageSourceNotification.Action action, Set<Object> rowKeys) {
+        StorageSourceNotification notification =
+            new StorageSourceNotification(tableName, action, rowKeys);
+        notifyListeners(notification);
+    }
+    
+    protected void insertRowsAndNotify(String tableName, List<Map<String,Object>> insertRowList) {
+        insertRows(tableName, insertRowList);
+        sendNotification(tableName, StorageSourceNotification.Action.MODIFY, insertRowList);
+    }
+
+    @Override
+    public void insertRowImpl(String tableName, Map<String, Object> values) {
+        ArrayList<Map<String,Object>> rowList = new ArrayList<Map<String,Object>>();
+        rowList.add(values);
+        insertRowsAndNotify(tableName, rowList);
+    }
+
+    protected void updateRowsAndNotify(String tableName, Set<Object> rowKeys, Map<String,Object> updateRowList) {
+        updateRows(tableName, rowKeys, updateRowList);
+        sendNotification(tableName, StorageSourceNotification.Action.MODIFY, rowKeys);
+    }
+
+    protected void updateRowsAndNotify(String tableName, List<Map<String,Object>> updateRowList) {
+        updateRows(tableName, updateRowList);
+        sendNotification(tableName, StorageSourceNotification.Action.MODIFY, updateRowList);
+    }
+
+    @Override
+    public void updateMatchingRowsImpl(String tableName, IPredicate predicate, Map<String,Object> values) {
+        String primaryKeyName = getTablePrimaryKeyName(tableName);
+        String[] columnNameList = {primaryKeyName};
+        IResultSet resultSet = executeQuery(tableName, columnNameList, predicate, null);
+        Set<Object> rowKeys = new HashSet<Object>();
+        while (resultSet.next()) {
+            String rowKey = resultSet.getString(primaryKeyName);
+            rowKeys.add(rowKey);
+        }
+        updateRowsAndNotify(tableName, rowKeys, values);
+    }
+    
+    @Override
+    public void updateRowImpl(String tableName, Object rowKey, Map<String,Object> values) {
+        Map<String,Object> valuesWithKey = new HashMap<String,Object>(values);
+        String primaryKeyName = getTablePrimaryKeyName(tableName);
+        valuesWithKey.put(primaryKeyName, rowKey);
+        List<Map<String,Object>> rowList = new ArrayList<Map<String,Object>>();
+        rowList.add(valuesWithKey);
+        updateRowsAndNotify(tableName, rowList);
+    }
+
+   @Override
+    public void updateRowImpl(String tableName, Map<String,Object> values) {
+        List<Map<String,Object>> rowKeys = new ArrayList<Map<String,Object>>();
+        rowKeys.add(values);
+        updateRowsAndNotify(tableName, rowKeys);
+    }
+
+   protected void deleteRowsAndNotify(String tableName, Set<Object> rowKeyList) {
+       deleteRows(tableName, rowKeyList);
+       sendNotification(tableName, StorageSourceNotification.Action.DELETE, rowKeyList);
+   }
+
+    @Override
+    public void deleteRowImpl(String tableName, Object key) {
+        HashSet<Object> keys = new HashSet<Object>();
+        keys.add(key);
+        deleteRowsAndNotify(tableName, keys);
+    }
+
+    @Override
+    public IResultSet getRowImpl(String tableName, Object rowKey) {
+        List<Map<String,Object>> rowList = new ArrayList<Map<String,Object>>();
+        Map<String,Object> row = getRow(tableName, null, rowKey);
+        if (row != null)
+            rowList.add(row);
+        NoSqlResultSet resultSet = new NoSqlResultSet(this, tableName, rowList);
+        return resultSet;
+    }
+   
+    // Below are the methods that must be implemented by the subclasses
+    
+    protected abstract Collection<Map<String,Object>> getAllRows(String tableName, String[] columnNameList);
+    
+    protected abstract Map<String,Object> getRow(String tableName, String[] columnNameList, Object rowKey);
+    
+    protected abstract List<Map<String,Object>> executeEqualityQuery(String tableName,
+            String[] columnNameList, String predicateColumnName, Comparable<?> value);
+    
+    protected abstract List<Map<String,Object>> executeRangeQuery(String tableName,
+            String[] columnNameList, String predicateColumnName,
+            Comparable<?> startValue, boolean startInclusive, Comparable<?> endValue, boolean endInclusive);
+    
+    protected abstract void insertRows(String tableName, List<Map<String,Object>> insertRowList);
+    
+    protected abstract void updateRows(String tableName, Set<Object> rowKeys, Map<String,Object> updateColumnMap);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/web/StorageNotifyResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/web/StorageNotifyResource.java
new file mode 100644
index 0000000..fcfa96f
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/web/StorageNotifyResource.java
@@ -0,0 +1,55 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage.web;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import net.floodlightcontroller.storage.IStorageSourceService;
+import net.floodlightcontroller.storage.StorageSourceNotification;
+
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.type.TypeReference;
+import org.restlet.resource.Post;
+import org.restlet.resource.ServerResource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class StorageNotifyResource extends ServerResource {
+    protected static Logger log = LoggerFactory.getLogger(StorageNotifyResource.class);
+    
+    @Post("json")
+    public Map<String,Object> notify(String entity) throws Exception {
+        List<StorageSourceNotification> notifications = null;
+        ObjectMapper mapper = new ObjectMapper();
+        notifications = 
+            mapper.readValue(entity, 
+                    new TypeReference<List<StorageSourceNotification>>(){});
+        
+        IStorageSourceService storageSource = 
+            (IStorageSourceService)getContext().getAttributes().
+                get(IStorageSourceService.class.getCanonicalName());
+        storageSource.notifyListeners(notifications);
+        
+        HashMap<String, Object> model = new HashMap<String,Object>();
+        model.put("output", "OK");
+        return model;
+    }
+    
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/web/StorageWebRoutable.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/web/StorageWebRoutable.java
new file mode 100644
index 0000000..681847d
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/storage/web/StorageWebRoutable.java
@@ -0,0 +1,45 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.storage.web;
+
+import org.restlet.Context;
+import org.restlet.Restlet;
+import org.restlet.routing.Router;
+
+import net.floodlightcontroller.restserver.RestletRoutable;
+
+/**
+ * Creates a router to handle the storage web URIs
+ * @author readams
+ *
+ */
+public class StorageWebRoutable implements RestletRoutable {
+
+    @Override
+    public String basePath() {
+        return "/wm/storage";
+    }
+
+    @Override
+    public Restlet getRestlet(Context context) {
+        Router router = new Router(context);
+        router.attach("/notify/json", StorageNotifyResource.class);
+        return router;
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/threadpool/IThreadPoolService.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/threadpool/IThreadPoolService.java
new file mode 100644
index 0000000..a537a3a
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/threadpool/IThreadPoolService.java
@@ -0,0 +1,15 @@
+package net.floodlightcontroller.threadpool;
+
+import java.util.concurrent.ScheduledExecutorService;
+
+import net.floodlightcontroller.core.module.IFloodlightService;
+
+public interface IThreadPoolService extends IFloodlightService {
+    /**
+     * Get the master scheduled thread pool executor maintained by the
+     * ThreadPool provider.  This can be used by other modules as a centralized
+     * way to schedule tasks.
+     * @return
+     */
+    public ScheduledExecutorService getScheduledExecutor();
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/threadpool/ThreadPool.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/threadpool/ThreadPool.java
new file mode 100644
index 0000000..aa426a7
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/threadpool/ThreadPool.java
@@ -0,0 +1,64 @@
+package net.floodlightcontroller.threadpool;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+
+public class ThreadPool implements IThreadPoolService, IFloodlightModule {
+    protected ScheduledExecutorService executor = null;
+    
+    // IThreadPoolService
+
+    @Override
+    public ScheduledExecutorService getScheduledExecutor() {
+        return executor;
+    }
+    
+    // IFloodlightModule
+    
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IThreadPoolService.class);
+        return l;
+    }
+
+    @Override
+    public Map<Class<? extends IFloodlightService>, IFloodlightService>
+            getServiceImpls() {
+        Map<Class<? extends IFloodlightService>,
+            IFloodlightService> m = 
+                new HashMap<Class<? extends IFloodlightService>,
+                    IFloodlightService>();
+        m.put(IThreadPoolService.class, this);
+        // We are the class that implements the service
+        return m;
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>>
+            getModuleDependencies() {
+        // No dependencies
+        return null;
+    }
+
+    @Override
+    public void init(FloodlightModuleContext context)
+                                 throws FloodlightModuleException {
+        executor = Executors.newScheduledThreadPool(15);
+    }
+
+    @Override
+    public void startUp(FloodlightModuleContext context) {
+        // no-op
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/Cluster.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/Cluster.java
new file mode 100644
index 0000000..606b079
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/Cluster.java
@@ -0,0 +1,79 @@
+package net.floodlightcontroller.topology;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import net.floodlightcontroller.routing.Link;
+
+import org.openflow.util.HexString;
+
+public class Cluster {
+    protected long id; // the lowest id of the nodes
+    protected Map<Long, Set<Link>> links; // set of links connected to a node.
+
+    public Cluster() {
+        id = Long.MAX_VALUE;
+        links = new HashMap<Long, Set<Link>>();
+    }
+
+    public long getId() {
+        return id;
+    }
+
+    public void setId(long id) {
+        this.id = id;
+    }
+
+    public Map<Long, Set<Link>> getLinks() {
+        return links;
+    }
+
+    public Set<Long> getNodes() {
+        return links.keySet();
+    }
+
+    void add(long n) {
+        if (links.containsKey(n) == false) {
+            links.put(n, new HashSet<Link>());
+            if (n < id) id = n;
+        }
+    }
+
+    void addLink(Link l) {
+        if (links.containsKey(l.getSrc()) == false) {
+            links.put(l.getSrc(), new HashSet<Link>());
+            if (l.getSrc() < id) id = l.getSrc();
+        }
+        links.get(l.getSrc()).add(l);
+
+        if (links.containsKey(l.getDst()) == false) {
+            links.put(l.getDst(), new HashSet<Link>());
+            if (l.getDst() < id) id = l.getDst();
+        }
+        links.get(l.getDst()).add(l);
+     }
+
+    @Override 
+    public int hashCode() {
+        return (int) (id + id >>>32);
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+
+        Cluster other = (Cluster) obj;
+        return (this.id == other.id);
+    }
+    
+    public String toString() {
+        return "[Cluster id=" + HexString.toHexString(id) + ", " + links.keySet() + "]";
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/ITopologyListener.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/ITopologyListener.java
new file mode 100644
index 0000000..9f06992
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/ITopologyListener.java
@@ -0,0 +1,8 @@
+package net.floodlightcontroller.topology;
+
+public interface ITopologyListener {
+    /**
+     * Happens when the switch clusters are recomputed
+     */
+    void topologyChanged();
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/ITopologyService.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/ITopologyService.java
new file mode 100644
index 0000000..cc62e82
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/ITopologyService.java
@@ -0,0 +1,198 @@
+package net.floodlightcontroller.topology;
+
+import java.util.Date;
+import java.util.List;
+import java.util.Set;
+
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.LDUpdate;
+
+public interface ITopologyService extends IFloodlightService  {
+
+    public void addListener(ITopologyListener listener);
+
+    public Date getLastUpdateTime();
+
+    /**
+     * Query to determine if devices must be learned on a given switch port.
+     */
+    public boolean isAttachmentPointPort(long switchid, short port);
+    public boolean isAttachmentPointPort(long switchid, short port,
+                                         boolean tunnelEnabled);
+
+    public long getOpenflowDomainId(long switchId);
+    public long getOpenflowDomainId(long switchId, boolean tunnelEnabled);
+
+    /**
+     * Returns the identifier of the L2 domain of a given switch.
+     * @param switchId The DPID of the switch in long form
+     * @return The DPID of the switch that is the key for the cluster
+     */
+    public long getL2DomainId(long switchId);
+    public long getL2DomainId(long switchId, boolean tunnelEnabled);
+
+    /**
+     * Queries whether two switches are in the same cluster.
+     * @param switch1
+     * @param switch2
+     * @return true if the switches are in the same cluster
+     */
+    public boolean inSameOpenflowDomain(long switch1, long switch2);
+    public boolean inSameOpenflowDomain(long switch1, long switch2, 
+                                        boolean tunnelEnabled);
+
+    /**
+     * Queries whether two switches are in the same island.
+     * Currently, island and cluster are the same. In future,
+     * islands could be different than clusters.
+     * @param switch1
+     * @param switch2
+     * @return True of they are in the same island, false otherwise
+     */
+    public boolean inSameL2Domain(long switch1, long switch2);
+    public boolean inSameL2Domain(long switch1, long switch2, 
+                                  boolean tunnelEnabled);
+
+    public boolean isBroadcastDomainPort(long sw, short port);
+    public boolean isBroadcastDomainPort(long sw, short port, 
+                                         boolean tunnelEnabled);
+
+
+    public boolean isAllowed(long sw, short portId);
+    public boolean isAllowed(long sw, short portId, boolean tunnelEnabled);
+
+    /**
+     * Indicates if an attachment point on the new switch port is consistent
+     * with the attachment point on the old switch port or not.
+     */
+    public boolean isConsistent(long oldSw, short oldPort,
+                                long newSw, short newPort);
+    public boolean isConsistent(long oldSw, short oldPort,
+                                long newSw, short newPort,
+                                boolean tunnelEnabled);
+
+    /**
+     * Indicates if the two switch ports are connected to the same
+     * broadcast domain or not.
+     * @param s1
+     * @param p1
+     * @param s2
+     * @param p2
+     * @return
+     */
+    public boolean isInSameBroadcastDomain(long s1, short p1, 
+                                           long s2, short p2);
+    public boolean isInSameBroadcastDomain(long s1, short p1,
+                                           long s2, short p2,
+                                           boolean tunnelEnabled);
+
+    /**
+     * Gets a list of ports on a given switch that are known to topology.
+     * @param sw The switch DPID in long
+     * @return The set of ports on this switch
+     */
+    public Set<Short> getPortsWithLinks(long sw);
+    public Set<Short> getPortsWithLinks(long sw, boolean tunnelEnabled);
+
+    /** Get broadcast ports on a target switch for a given attachmentpoint
+     * point port.
+     */
+    public Set<Short> getBroadcastPorts(long targetSw, long src, short srcPort);
+
+    public Set<Short> getBroadcastPorts(long targetSw, long src, short srcPort,
+                                        boolean tunnelEnabled);
+
+    /**
+     * 
+     */
+    public boolean isIncomingBroadcastAllowed(long sw, short portId);
+    public boolean isIncomingBroadcastAllowed(long sw, short portId,
+                                              boolean tunnelEnabled);
+
+
+    /** Get the proper outgoing switchport for a given pair of src-dst
+     * switchports.
+     */
+    public NodePortTuple getOutgoingSwitchPort(long src, short srcPort,
+                                               long dst, short dstPort);
+
+
+    public NodePortTuple getOutgoingSwitchPort(long src, short srcPort,
+                                               long dst, short dstPort,
+                                               boolean tunnelEnabled);
+
+
+    public NodePortTuple getIncomingSwitchPort(long src, short srcPort,
+                                               long dst, short dstPort);
+    public NodePortTuple getIncomingSwitchPort(long src, short srcPort,
+                                               long dst, short dstPort,
+                                               boolean tunnelEnabled);
+
+    /**
+     * If the dst is not allowed by the higher-level topology,
+     * this method provides the topologically equivalent broadcast port.
+     * @param src
+     * @param dst
+     * @return the allowed broadcast port
+     */
+    public NodePortTuple 
+    getAllowedOutgoingBroadcastPort(long src,
+                                    short srcPort,
+                                    long dst,
+                                    short dstPort);
+
+    public NodePortTuple 
+    getAllowedOutgoingBroadcastPort(long src,
+                                    short srcPort,
+                                    long dst,
+                                    short dstPort,
+                                    boolean tunnelEnabled);
+
+    /**
+     * If the src broadcast domain port is not allowed for incoming
+     * broadcast, this method provides the topologically equivalent
+     * incoming broadcast-allowed
+     * src port.  
+     * @param src
+     * @param dst
+     * @return the allowed broadcast port
+     */
+    public NodePortTuple
+    getAllowedIncomingBroadcastPort(long src,
+                                    short srcPort);
+
+    public NodePortTuple
+    getAllowedIncomingBroadcastPort(long src,
+                                    short srcPort,
+                                    boolean tunnelEnabled);
+
+
+    /**
+     * Gets the set of ports that belong to a broadcast domain.
+     * @return The set of ports that belong to a broadcast domain.
+     */
+    public Set<NodePortTuple> getBroadcastDomainPorts();
+    public Set<NodePortTuple> getTunnelPorts();
+
+
+    /**
+     * Returns a set of blocked ports.  The set of blocked
+     * ports is the union of all the blocked ports across all
+     * instances.
+     * @return
+     */
+    public Set<NodePortTuple> getBlockedPorts();
+
+    /**
+     * ITopologyListener provides topologyChanged notification, 
+     * but not *what* the changes were.  
+     * This method returns the delta in the linkUpdates between the current and the previous topology instance.
+     * @return
+     */
+    public List<LDUpdate> getLastLinkUpdates();
+
+    /**
+     * Switch methods
+     */
+    public Set<Short> getPorts(long sw);
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/NodePair.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/NodePair.java
new file mode 100644
index 0000000..ff954a0
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/NodePair.java
@@ -0,0 +1,53 @@
+package net.floodlightcontroller.topology;
+
+public class NodePair {
+    private long min;
+    private long max;
+
+    public NodePair(long a, long b) {
+        if (a < b) {
+            min = a; 
+            max = b;
+        } else {
+            min = b;
+            max = a;
+        }
+    }
+
+    public long getNode() {
+        return min;
+    }
+
+    public long getOtherNode() {
+        return max;
+    }
+
+    public String toString() {
+        return "[" + new Long(min) + ", " + new Long(max) + "]";
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + (int) (max ^ (max >>> 32));
+        result = prime * result + (int) (min ^ (min >>> 32));
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        NodePair other = (NodePair) obj;
+        if (max != other.max)
+            return false;
+        if (min != other.min)
+            return false;
+        return true;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/NodePortTuple.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/NodePortTuple.java
new file mode 100644
index 0000000..4983529
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/NodePortTuple.java
@@ -0,0 +1,90 @@
+package net.floodlightcontroller.topology;
+
+import net.floodlightcontroller.core.web.serializers.DPIDSerializer;
+import net.floodlightcontroller.core.web.serializers.UShortSerializer;
+
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+import org.openflow.util.HexString;
+
+/**
+ * A NodePortTuple is similar to a SwitchPortTuple
+ * but it only stores IDs instead of references
+ * to the actual objects.
+ * @author srini
+ */
+public class NodePortTuple {
+    protected long nodeId; // switch DPID
+    protected short portId; // switch port id
+
+    /**
+     * Creates a NodePortTuple
+     * @param nodeId The DPID of the switch
+     * @param portId The port of the switch
+     */
+    public NodePortTuple(long nodeId, short portId) {
+        this.nodeId = nodeId;
+        this.portId = portId;
+    }
+
+    public NodePortTuple(long nodeId, int portId) {
+        this.nodeId = nodeId;
+        this.portId = (short) portId;
+    }
+
+    @JsonProperty("switch")
+    @JsonSerialize(using=DPIDSerializer.class)
+    public long getNodeId() {
+        return nodeId;
+    }
+    public void setNodeId(long nodeId) {
+        this.nodeId = nodeId;
+    }
+    @JsonProperty("port")
+    @JsonSerialize(using=UShortSerializer.class)
+    public short getPortId() {
+        return portId;
+    }
+    public void setPortId(short portId) {
+        this.portId = portId;
+    }
+    
+    public String toString() {
+        return "[id=" + HexString.toHexString(nodeId) + ", port=" + new Short(portId) + "]";
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + (int) (nodeId ^ (nodeId >>> 32));
+        result = prime * result + portId;
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        NodePortTuple other = (NodePortTuple) obj;
+        if (nodeId != other.nodeId)
+            return false;
+        if (portId != other.portId)
+            return false;
+        return true;
+    }
+    
+    /**
+     * API to return a String value formed wtih NodeID and PortID
+     * The portID is a 16-bit field, so mask it as an integer to get full
+     * positive value
+     * @return
+     */
+    public String toKeyString() {
+        return (HexString.toHexString(nodeId)+ "|" + (portId & 0xffff));
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/OrderedNodePair.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/OrderedNodePair.java
new file mode 100644
index 0000000..af9e677
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/OrderedNodePair.java
@@ -0,0 +1,49 @@
+package net.floodlightcontroller.topology;
+
+public class OrderedNodePair {
+    private long src;
+    private long dst;
+
+    public OrderedNodePair(long s, long d) {
+        src = s;
+        dst = d;
+    }
+
+    public long getSrc() {
+        return src;
+    }
+
+    public long getDst() {
+        return dst;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 2417;
+        int result = 1;
+        result = prime * result + (int) (dst ^ (dst >>> 32));
+        result = prime * result + (int) (src ^ (src >>> 32));
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        OrderedNodePair other = (OrderedNodePair) obj;
+        if (dst != other.dst)
+            return false;
+        if (src != other.src)
+            return false;
+        return true;
+    }
+
+    @Override
+    public String toString() {
+        return "OrderedNodePair [src=" + src + ", dst=" + dst + "]";
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/TopologyInstance.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/TopologyInstance.java
new file mode 100644
index 0000000..85ac6b8
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/TopologyInstance.java
@@ -0,0 +1,782 @@
+package net.floodlightcontroller.topology;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.PriorityQueue;
+import java.util.Set;
+
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import net.floodlightcontroller.util.ClusterDFS;
+import net.floodlightcontroller.core.annotations.LogMessageCategory;
+import net.floodlightcontroller.core.annotations.LogMessageDoc;
+import net.floodlightcontroller.routing.BroadcastTree;
+import net.floodlightcontroller.routing.Link;
+import net.floodlightcontroller.routing.Route;
+import net.floodlightcontroller.routing.RouteId;
+import net.floodlightcontroller.util.LRUHashMap;
+
+/**
+ * A representation of a network topology.  Used internally by 
+ * {@link TopologyManager}
+ */
+@LogMessageCategory("Network Topology")
+public class TopologyInstance {
+
+    public static final short LT_SH_LINK = 1;
+    public static final short LT_BD_LINK = 2;
+    public static final short LT_TUNNEL  = 3; 
+
+    public static final int MAX_LINK_WEIGHT = 10000;
+    public static final int MAX_PATH_WEIGHT = Integer.MAX_VALUE - MAX_LINK_WEIGHT - 1;
+    public static final int PATH_CACHE_SIZE = 1000;
+
+    protected static Logger log = LoggerFactory.getLogger(TopologyInstance.class);
+
+    protected Map<Long, Set<Short>> switchPorts; // Set of ports for each switch
+    /** Set of switch ports that are marked as blocked.  A set of blocked
+     * switch ports may be provided at the time of instantiation. In addition,
+     * we may add additional ports to this set.
+     */
+    protected Set<NodePortTuple> blockedPorts;  
+    protected Map<NodePortTuple, Set<Link>> switchPortLinks; // Set of links organized by node port tuple
+    /** Set of links that are blocked. */
+    protected Set<Link> blockedLinks;
+
+    protected Set<Long> switches;
+    protected Set<NodePortTuple> broadcastDomainPorts;
+    protected Set<NodePortTuple> tunnelPorts;
+
+    protected Set<Cluster> clusters;  // set of openflow domains
+    protected Map<Long, Cluster> switchClusterMap; // switch to OF domain map
+
+    // States for routing
+    protected Map<Long, BroadcastTree> destinationRootedTrees;
+    protected Map<Long, Set<NodePortTuple>> clusterBroadcastNodePorts;
+    protected Map<Long, BroadcastTree> clusterBroadcastTrees;
+    protected LRUHashMap<RouteId, Route> pathcache;
+
+    public TopologyInstance() {
+        this.switches = new HashSet<Long>();
+        this.switchPorts = new HashMap<Long, Set<Short>>();
+        this.switchPortLinks = new HashMap<NodePortTuple, Set<Link>>();
+        this.broadcastDomainPorts = new HashSet<NodePortTuple>();
+        this.tunnelPorts = new HashSet<NodePortTuple>();
+        this.blockedPorts = new HashSet<NodePortTuple>();
+        this.blockedLinks = new HashSet<Link>();
+    }
+    
+    public TopologyInstance(Map<Long, Set<Short>> switchPorts,
+                            Map<NodePortTuple, Set<Link>> switchPortLinks)
+    {
+        this.switches = new HashSet<Long>(switchPorts.keySet());
+        this.switchPorts = new HashMap<Long, Set<Short>>(switchPorts);
+        this.switchPortLinks = new HashMap<NodePortTuple, 
+                                           Set<Link>>(switchPortLinks);
+        this.broadcastDomainPorts = new HashSet<NodePortTuple>();
+        this.tunnelPorts = new HashSet<NodePortTuple>();
+        this.blockedPorts = new HashSet<NodePortTuple>();
+        this.blockedLinks = new HashSet<Link>();
+        
+        clusters = new HashSet<Cluster>();
+        switchClusterMap = new HashMap<Long, Cluster>();
+    }
+    public TopologyInstance(Map<Long, Set<Short>> switchPorts,
+                            Set<NodePortTuple> blockedPorts,
+                            Map<NodePortTuple, Set<Link>> switchPortLinks,
+                            Set<NodePortTuple> broadcastDomainPorts,
+                            Set<NodePortTuple> tunnelPorts){
+
+        // copy these structures
+        this.switches = new HashSet<Long>(switchPorts.keySet());
+        this.switchPorts = new HashMap<Long, Set<Short>>();
+        for(long sw: switchPorts.keySet()) {
+            this.switchPorts.put(sw, new HashSet<Short>(switchPorts.get(sw)));
+        }
+
+        this.blockedPorts = new HashSet<NodePortTuple>(blockedPorts);
+        this.switchPortLinks = new HashMap<NodePortTuple, Set<Link>>();
+        for(NodePortTuple npt: switchPortLinks.keySet()) {
+            this.switchPortLinks.put(npt, 
+                                     new HashSet<Link>(switchPortLinks.get(npt)));
+        }
+        this.broadcastDomainPorts = new HashSet<NodePortTuple>(broadcastDomainPorts);
+        this.tunnelPorts = new HashSet<NodePortTuple>(tunnelPorts);
+
+        blockedLinks = new HashSet<Link>();
+        clusters = new HashSet<Cluster>();
+        switchClusterMap = new HashMap<Long, Cluster>();
+        destinationRootedTrees = new HashMap<Long, BroadcastTree>();
+        clusterBroadcastTrees = new HashMap<Long, BroadcastTree>();
+        clusterBroadcastNodePorts = new HashMap<Long, Set<NodePortTuple>>();
+        pathcache = new LRUHashMap<RouteId, Route>(PATH_CACHE_SIZE);
+    }
+
+    public void compute() {
+
+        // Step 1: Compute clusters ignoring broadcast domain links
+        // Create nodes for clusters in the higher level topology
+        // Must ignore blocked links.
+        identifyOpenflowDomains();
+
+        // Step 0: Remove all links connected to blocked ports.
+        // removeLinksOnBlockedPorts();
+
+
+        // Step 1.1: Add links to clusters
+        // Avoid adding blocked links to clusters
+        addLinksToOpenflowDomains();
+
+        // Step 2. Compute shortest path trees in each cluster for 
+        // unicast routing.  The trees are rooted at the destination.
+        // Cost for tunnel links and direct links are the same.
+        calculateShortestPathTreeInClusters();
+
+        // Step 3. Compute broadcast tree in each cluster.
+        // Cost for tunnel links are high to discourage use of 
+        // tunnel links.  The cost is set to the number of nodes
+        // in the cluster + 1, to use as minimum number of 
+        // clusters as possible.
+        calculateBroadcastNodePortsInClusters();
+
+        // Step 4. print topology.
+        // printTopology();
+    }
+
+    public void printTopology() {
+        log.trace("-----------------------------------------------");
+        log.trace("Links: {}",this.switchPortLinks);
+        log.trace("broadcastDomainPorts: {}", broadcastDomainPorts);
+        log.trace("tunnelPorts: {}", tunnelPorts);
+        log.trace("clusters: {}", clusters);
+        log.trace("destinationRootedTrees: {}", destinationRootedTrees);
+        log.trace("clusterBroadcastNodePorts: {}", clusterBroadcastNodePorts);
+        log.trace("-----------------------------------------------");
+    }
+
+    protected void addLinksToOpenflowDomains() {
+        for(long s: switches) {
+            if (switchPorts.get(s) == null) continue;
+            for (short p: switchPorts.get(s)) {
+                NodePortTuple np = new NodePortTuple(s, p);
+                if (switchPortLinks.get(np) == null) continue;
+                if (isBroadcastDomainPort(np)) continue;
+                for(Link l: switchPortLinks.get(np)) {
+                    if (isBlockedLink(l)) continue;
+                    if (isBroadcastDomainLink(l)) continue;
+                    Cluster c1 = switchClusterMap.get(l.getSrc());
+                    Cluster c2 = switchClusterMap.get(l.getDst());
+                    if (c1 ==c2) {
+                        c1.addLink(l);
+                    }
+                }
+            }
+        }
+    }
+
+    /**
+     * @author Srinivasan Ramasubramanian
+     *
+     * This function divides the network into clusters. Every cluster is
+     * a strongly connected component. The network may contain unidirectional
+     * links.  The function calls dfsTraverse for performing depth first
+     * search and cluster formation.
+     *
+     * The computation of strongly connected components is based on
+     * Tarjan's algorithm.  For more details, please see the Wikipedia
+     * link below.
+     *
+     * http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
+     */
+    @LogMessageDoc(level="ERROR",
+            message="No DFS object for switch {} found.",
+            explanation="The internal state of the topology module is corrupt",
+            recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
+    public void identifyOpenflowDomains() {
+        Map<Long, ClusterDFS> dfsList = new HashMap<Long, ClusterDFS>();
+
+        if (switches == null) return;
+
+        for (Long key: switches) {
+            ClusterDFS cdfs = new ClusterDFS();
+            dfsList.put(key, cdfs);
+        }
+        Set<Long> currSet = new HashSet<Long>();
+
+        for (Long sw: switches) {
+            ClusterDFS cdfs = dfsList.get(sw);
+            if (cdfs == null) {
+                log.error("No DFS object for switch {} found.", sw);
+            }else if (!cdfs.isVisited()) {
+                dfsTraverse(0, 1, sw, dfsList, currSet);
+            }
+        }
+    }
+
+
+    /**
+     * @author Srinivasan Ramasubramanian
+     *
+     * This algorithm computes the depth first search (DFS) traversal of the
+     * switches in the network, computes the lowpoint, and creates clusters
+     * (of strongly connected components).
+     *
+     * The computation of strongly connected components is based on
+     * Tarjan's algorithm.  For more details, please see the Wikipedia
+     * link below.
+     *
+     * http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
+     *
+     * The initialization of lowpoint and the check condition for when a
+     * cluster should be formed is modified as we do not remove switches that
+     * are already part of a cluster.
+     *
+     * A return value of -1 indicates that dfsTraverse failed somewhere in the middle
+     * of computation.  This could happen when a switch is removed during the cluster
+     * computation procedure.
+     *
+     * @param parentIndex: DFS index of the parent node
+     * @param currIndex: DFS index to be assigned to a newly visited node
+     * @param currSw: ID of the current switch
+     * @param dfsList: HashMap of DFS data structure for each switch
+     * @param currSet: Set of nodes in the current cluster in formation
+     * @return long: DSF index to be used when a new node is visited
+     */
+
+    private long dfsTraverse (long parentIndex, long currIndex, long currSw,
+                              Map<Long, ClusterDFS> dfsList, Set <Long> currSet) {
+
+        //Get the DFS object corresponding to the current switch
+        ClusterDFS currDFS = dfsList.get(currSw);
+        // Get all the links corresponding to this switch
+
+
+        //Assign the DFS object with right values.
+        currDFS.setVisited(true);
+        currDFS.setDfsIndex(currIndex);
+        currDFS.setParentDFSIndex(parentIndex);
+        currIndex++;
+
+        // Traverse the graph through every outgoing link.
+        if (switchPorts.get(currSw) != null){
+            for(Short p: switchPorts.get(currSw)) {
+                Set<Link> lset = switchPortLinks.get(new NodePortTuple(currSw, p));
+                if (lset == null) continue;
+                for(Link l:lset) {
+                    long dstSw = l.getDst();
+
+                    // ignore incoming links.
+                    if (dstSw == currSw) continue;
+
+                    // ignore if the destination is already added to 
+                    // another cluster
+                    if (switchClusterMap.get(dstSw) != null) continue;
+
+                    // ignore the link if it is blocked.
+                    if (isBlockedLink(l)) continue;
+
+                    // ignore this link if it is in broadcast domain
+                    if (isBroadcastDomainLink(l)) continue;
+
+                    // Get the DFS object corresponding to the dstSw
+                    ClusterDFS dstDFS = dfsList.get(dstSw);
+
+                    if (dstDFS.getDfsIndex() < currDFS.getDfsIndex()) {
+                        // could be a potential lowpoint
+                        if (dstDFS.getDfsIndex() < currDFS.getLowpoint())
+                            currDFS.setLowpoint(dstDFS.getDfsIndex());
+
+                    } else if (!dstDFS.isVisited()) {
+                        // make a DFS visit
+                        currIndex = dfsTraverse(currDFS.getDfsIndex(), currIndex, dstSw,
+                                                dfsList, currSet);
+
+                        if (currIndex < 0) return -1;
+
+                        // update lowpoint after the visit
+                        if (dstDFS.getLowpoint() < currDFS.getLowpoint())
+                            currDFS.setLowpoint(dstDFS.getLowpoint());
+                    }
+                    // else, it is a node already visited with a higher
+                    // dfs index, just ignore.
+                }
+            }
+        }
+
+        // Add current node to currSet.
+        currSet.add(currSw);
+
+        // Cluster computation.
+        // If the node's lowpoint is greater than its parent's DFS index,
+        // we need to form a new cluster with all the switches in the
+        // currSet.
+        if (currDFS.getLowpoint() > currDFS.getParentDFSIndex()) {
+            // The cluster thus far forms a strongly connected component.
+            // create a new switch cluster and the switches in the current
+            // set to the switch cluster.
+            Cluster sc = new Cluster();
+            for(long sw: currSet){
+                sc.add(sw);
+                switchClusterMap.put(sw, sc);
+            }
+            // delete all the nodes in the current set.
+            currSet.clear();
+            // add the newly formed switch clusters to the cluster set.
+            clusters.add(sc);
+        }
+
+        return currIndex;
+    }
+
+    /**
+     *  Go through every link and identify it is a blocked link or not.
+     *  If blocked, remove it from the switchport links and put them in the
+     *  blocked link category.
+     *
+     *  Note that we do not update the tunnel ports and broadcast domain 
+     *  port structures.  We need those to still answer the question if the
+     *  ports are tunnel or broadcast domain ports.
+     *
+     *  If we add additional ports to blocked ports later on, we may simply
+     *  call this method again to remove the links on the newly blocked ports.
+     */
+    protected void removeLinksOnBlockedPorts() {
+        Iterator<NodePortTuple> nptIter;
+        Iterator<Link> linkIter;
+
+        // Iterate through all the links and all the switch ports
+        // and move the links on blocked switch ports to blocked links
+        nptIter = this.switchPortLinks.keySet().iterator();
+        while (nptIter.hasNext()) {
+            NodePortTuple npt = nptIter.next();
+            linkIter = switchPortLinks.get(npt).iterator();
+            while (linkIter.hasNext()) {
+                Link link = linkIter.next();
+                if (isBlockedLink(link)) {
+                    this.blockedLinks.add(link);
+                    linkIter.remove();
+                }
+            }
+            // Note that at this point, the switchport may have
+            // no links in it.  We could delete the switch port, 
+            // but we will leave it as is.
+        }
+    }
+
+    public Set<NodePortTuple> getBlockedPorts() {
+        return this.blockedPorts;
+    }
+
+    protected Set<Link> getBlockedLinks() {
+        return this.blockedLinks;
+    }
+
+    /** Returns true if a link has either one of its switch ports 
+     * blocked.
+     * @param l
+     * @return
+     */
+    protected boolean isBlockedLink(Link l) {
+        NodePortTuple n1 = new NodePortTuple(l.getSrc(), l.getSrcPort());
+        NodePortTuple n2 = new NodePortTuple(l.getDst(), l.getDstPort());
+        return (isBlockedPort(n1) || isBlockedPort(n2));
+    }
+
+    protected boolean isBlockedPort(NodePortTuple npt) {
+        return blockedPorts.contains(npt);
+    }
+
+    protected boolean isTunnelPort(NodePortTuple npt) {
+        return tunnelPorts.contains(npt);
+    }
+
+    protected boolean isTunnelLink(Link l) {
+        NodePortTuple n1 = new NodePortTuple(l.getSrc(), l.getSrcPort());
+        NodePortTuple n2 = new NodePortTuple(l.getDst(), l.getDstPort());
+        return (isTunnelPort(n1) || isTunnelPort(n2));
+    }
+
+    public boolean isBroadcastDomainLink(Link l) {
+        NodePortTuple n1 = new NodePortTuple(l.getSrc(), l.getSrcPort());
+        NodePortTuple n2 = new NodePortTuple(l.getDst(), l.getDstPort());
+        return (isBroadcastDomainPort(n1) || isBroadcastDomainPort(n2));
+    }
+
+    public boolean isBroadcastDomainPort(NodePortTuple npt) {
+        return broadcastDomainPorts.contains(npt);
+    }
+
+    class NodeDist implements Comparable<NodeDist> {
+        private Long node;
+        public Long getNode() {
+            return node;
+        }
+
+        private int dist; 
+        public int getDist() {
+            return dist;
+        }
+
+        public NodeDist(Long node, int dist) {
+            this.node = node;
+            this.dist = dist;
+        }
+
+        public int compareTo(NodeDist o) {
+            if (o.dist == this.dist) {
+                return (int)(o.node - this.node);
+            }
+            return o.dist - this.dist;
+        }
+    }
+
+    protected BroadcastTree dijkstra(Cluster c, Long root, 
+                                     Map<Link, Integer> linkCost,
+                                     boolean isDstRooted) {
+        HashMap<Long, Link> nexthoplinks = new HashMap<Long, Link>();
+        //HashMap<Long, Long> nexthopnodes = new HashMap<Long, Long>();
+        HashMap<Long, Integer> cost = new HashMap<Long, Integer>();
+        int w;
+
+        for (Long node: c.links.keySet()) {
+            nexthoplinks.put(node, null);
+            //nexthopnodes.put(node, null);
+            cost.put(node, MAX_PATH_WEIGHT);
+        }
+
+        HashMap<Long, Boolean> seen = new HashMap<Long, Boolean>();
+        PriorityQueue<NodeDist> nodeq = new PriorityQueue<NodeDist>();
+        nodeq.add(new NodeDist(root, 0));
+        cost.put(root, 0);
+        while (nodeq.peek() != null) {
+            NodeDist n = nodeq.poll();
+            Long cnode = n.getNode();
+            int cdist = n.getDist();
+            if (cdist >= MAX_PATH_WEIGHT) break;
+            if (seen.containsKey(cnode)) continue;
+            seen.put(cnode, true);
+
+            for (Link link: c.links.get(cnode)) {
+                Long neighbor;
+                
+                if (isDstRooted == true) neighbor = link.getSrc();
+                else neighbor = link.getDst();
+                
+                // links directed toward cnode will result in this condition
+                // if (neighbor == cnode) continue;
+                
+                if (linkCost == null || linkCost.get(link)==null) w = 1;
+                else w = linkCost.get(link);
+
+                int ndist = cdist + w; // the weight of the link, always 1 in current version of floodlight.
+                if (ndist < cost.get(neighbor)) {
+                    cost.put(neighbor, ndist);
+                    nexthoplinks.put(neighbor, link);
+                    //nexthopnodes.put(neighbor, cnode);
+                    nodeq.add(new NodeDist(neighbor, ndist));
+                }
+            }
+        }
+
+        BroadcastTree ret = new BroadcastTree(nexthoplinks, cost);
+        return ret;
+    }
+
+    protected void calculateShortestPathTreeInClusters() {
+        pathcache.clear();
+        destinationRootedTrees.clear();
+
+        Map<Link, Integer> linkCost = new HashMap<Link, Integer>();
+        int tunnel_weight = switchPorts.size() + 1;
+
+        for(NodePortTuple npt: tunnelPorts) {
+            if (switchPortLinks.get(npt) == null) continue;
+            for(Link link: switchPortLinks.get(npt)) {
+                if (link == null) continue;
+                linkCost.put(link, tunnel_weight);
+            }
+        }
+
+        for(Cluster c: clusters) {
+            for (Long node : c.links.keySet()) {
+                BroadcastTree tree = dijkstra(c, node, linkCost, true);
+                destinationRootedTrees.put(node, tree);
+            }
+        }
+    }
+
+    protected void calculateBroadcastTreeInClusters() {
+        for(Cluster c: clusters) {
+            // c.id is the smallest node that's in the cluster
+            BroadcastTree tree = destinationRootedTrees.get(c.id);
+            clusterBroadcastTrees.put(c.id, tree);
+        }
+    }
+
+    protected void calculateBroadcastNodePortsInClusters() {
+
+        clusterBroadcastTrees.clear();
+
+        calculateBroadcastTreeInClusters();
+
+        for(Cluster c: clusters) {
+            // c.id is the smallest node that's in the cluster
+            BroadcastTree tree = clusterBroadcastTrees.get(c.id);
+            //log.info("Broadcast Tree {}", tree);
+
+            Set<NodePortTuple> nptSet = new HashSet<NodePortTuple>();
+            Map<Long, Link> links = tree.getLinks();
+            if (links == null) continue;
+            for(long nodeId: links.keySet()) {
+                Link l = links.get(nodeId);
+                if (l == null) continue;
+                NodePortTuple npt1 = new NodePortTuple(l.getSrc(), l.getSrcPort());
+                NodePortTuple npt2 = new NodePortTuple(l.getDst(), l.getDstPort());
+                nptSet.add(npt1);
+                nptSet.add(npt2);
+            }
+            clusterBroadcastNodePorts.put(c.id, nptSet);
+        }
+    }
+
+    protected Route buildroute(RouteId id, long srcId, long dstId) {
+        NodePortTuple npt;
+
+        LinkedList<NodePortTuple> switchPorts =
+                new LinkedList<NodePortTuple>();
+
+        if (destinationRootedTrees == null) return null;
+        if (destinationRootedTrees.get(dstId) == null) return null;
+
+        Map<Long, Link> nexthoplinks =
+                destinationRootedTrees.get(dstId).getLinks();
+
+        if (!switches.contains(srcId) || !switches.contains(dstId)) {
+            // This is a switch that is not connected to any other switch
+            // hence there was no update for links (and hence it is not
+            // in the network)
+            log.debug("buildroute: Standalone switch: {}", srcId);
+
+            // The only possible non-null path for this case is
+            // if srcId equals dstId --- and that too is an 'empty' path []
+
+        } else if ((nexthoplinks!=null) && (nexthoplinks.get(srcId)!=null)) {
+            while (srcId != dstId) {
+                Link l = nexthoplinks.get(srcId);
+
+                npt = new NodePortTuple(l.getSrc(), l.getSrcPort());
+                switchPorts.addLast(npt);
+                npt = new NodePortTuple(l.getDst(), l.getDstPort());
+                switchPorts.addLast(npt);
+                srcId = nexthoplinks.get(srcId).getDst();
+            }
+        }
+        // else, no path exists, and path equals null
+
+        Route result = null;
+        if (switchPorts != null && !switchPorts.isEmpty()) 
+            result = new Route(id, switchPorts);
+        if (log.isTraceEnabled()) {
+            log.trace("buildroute: {}", result);
+        }
+        return result;
+    }
+
+    protected int getCost(long srcId, long dstId) {
+        BroadcastTree bt = destinationRootedTrees.get(dstId);
+        if (bt == null) return -1;
+        return (bt.getCost(srcId));
+    }
+
+    /* 
+     * Getter Functions
+     */
+
+    protected Set<Cluster> getClusters() {
+        return clusters;
+    }
+
+    // IRoutingEngineService interfaces
+    protected boolean routeExists(long srcId, long dstId) {
+        BroadcastTree bt = destinationRootedTrees.get(dstId);
+        if (bt == null) return false;
+        Link link = bt.getLinks().get(srcId);
+        if (link == null) return false;
+        return true;
+    }
+
+    protected Route getRoute(long srcId, short srcPort,
+                             long dstId, short dstPort) {
+
+
+        // Return null the route source and desitnation are the
+        // same switchports.
+        if (srcId == dstId && srcPort == dstPort)
+            return null;
+
+        List<NodePortTuple> nptList;
+        NodePortTuple npt;
+        Route r = getRoute(srcId, dstId);
+        if (r == null && srcId != dstId) return null;
+
+        if (r != null) {
+            nptList= new ArrayList<NodePortTuple>(r.getPath());
+        } else {
+            nptList = new ArrayList<NodePortTuple>();
+        }
+        npt = new NodePortTuple(srcId, srcPort);
+        nptList.add(0, npt); // add src port to the front
+        npt = new NodePortTuple(dstId, dstPort);
+        nptList.add(npt); // add dst port to the end
+
+        RouteId id = new RouteId(srcId, dstId);
+        r = new Route(id, nptList);
+        return r;
+    }
+
+    protected Route getRoute(long srcId, long dstId) {
+        RouteId id = new RouteId(srcId, dstId);
+        Route result = null;
+        if (pathcache.containsKey(id)) {
+            result = pathcache.get(id);
+        } else {
+            result = buildroute(id, srcId, dstId);
+            pathcache.put(id, result);
+        }
+        if (log.isTraceEnabled()) {
+            log.trace("getRoute: {} -> {}", id, result);
+        }
+        return result;
+    }
+
+    protected BroadcastTree getBroadcastTreeForCluster(long clusterId){
+        Cluster c = switchClusterMap.get(clusterId);
+        if (c == null) return null;
+        return clusterBroadcastTrees.get(c.id);
+    }
+
+    // 
+    //  ITopologyService interface method helpers.
+    // 
+
+    protected boolean isInternalToOpenflowDomain(long switchid, short port) {
+        return !isAttachmentPointPort(switchid, port);
+    }
+
+    public boolean isAttachmentPointPort(long switchid, short port) {
+        NodePortTuple npt = new NodePortTuple(switchid, port);
+        if (switchPortLinks.containsKey(npt)) return false;
+        return true;
+    }
+
+    protected long getOpenflowDomainId(long switchId) {
+        Cluster c = switchClusterMap.get(switchId);
+        if (c == null) return switchId;
+        return c.getId();
+    }
+
+    protected long getL2DomainId(long switchId) {
+        return getOpenflowDomainId(switchId);
+    }
+
+    protected Set<Long> getSwitchesInOpenflowDomain(long switchId) {
+        Cluster c = switchClusterMap.get(switchId);
+        if (c == null) return null;
+        return (c.getNodes());
+    }
+
+    protected boolean inSameOpenflowDomain(long switch1, long switch2) {
+        Cluster c1 = switchClusterMap.get(switch1);
+        Cluster c2 = switchClusterMap.get(switch2);
+        if (c1 != null && c2 != null)
+            return (c1.getId() == c2.getId());
+        return (switch1 == switch2);
+    }
+
+    public boolean isAllowed(long sw, short portId) {
+        return true;
+    }
+
+    protected boolean
+    isIncomingBroadcastAllowedOnSwitchPort(long sw, short portId) {
+        if (isInternalToOpenflowDomain(sw, portId)) {
+            long clusterId = getOpenflowDomainId(sw);
+            NodePortTuple npt = new NodePortTuple(sw, portId);
+            if (clusterBroadcastNodePorts.get(clusterId).contains(npt))
+                return true;
+            else return false;
+        }
+        return true;
+    }
+
+    public boolean isConsistent(long oldSw, short oldPort, long newSw,
+                                short newPort) {
+        if (isInternalToOpenflowDomain(newSw, newPort)) return true;
+        return (oldSw == newSw && oldPort == newPort);
+    }
+
+    protected Set<NodePortTuple>
+    getBroadcastNodePortsInCluster(long sw) {
+        long clusterId = getOpenflowDomainId(sw);
+        return clusterBroadcastNodePorts.get(clusterId);
+    }
+
+    public boolean inSameBroadcastDomain(long s1, short p1, long s2, short p2) {
+        return false;
+    }
+
+    public boolean inSameL2Domain(long switch1, long switch2) {
+        return inSameOpenflowDomain(switch1, switch2);
+    }
+
+    public NodePortTuple getOutgoingSwitchPort(long src, short srcPort,
+                                               long dst, short dstPort) {
+        // Use this function to redirect traffic if needed.
+        return new NodePortTuple(dst, dstPort);
+    }
+
+    public NodePortTuple getIncomingSwitchPort(long src, short srcPort,
+                                               long dst, short dstPort) {
+     // Use this function to reinject traffic from a different port if needed.
+        return new NodePortTuple(src, srcPort);
+    }
+
+    public Set<Long> getSwitches() {
+        return switches;
+    }
+
+    public Set<Short> getPortsWithLinks(long sw) {
+        return switchPorts.get(sw);
+    }
+
+    public Set<Short> getBroadcastPorts(long targetSw, long src, short srcPort) {
+        Set<Short> result = new HashSet<Short>();
+        long clusterId = getOpenflowDomainId(targetSw);
+        for(NodePortTuple npt: clusterBroadcastNodePorts.get(clusterId)) {
+            if (npt.getNodeId() == targetSw) {
+                result.add(npt.getPortId());
+            }
+        }
+        return result;
+    }
+
+    public NodePortTuple
+            getAllowedOutgoingBroadcastPort(long src, short srcPort, long dst,
+                                            short dstPort) {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    public NodePortTuple
+    getAllowedIncomingBroadcastPort(long src, short srcPort) {
+        // TODO Auto-generated method stub
+        return null;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/TopologyManager.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/TopologyManager.java
new file mode 100644
index 0000000..ba17483
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/TopologyManager.java
@@ -0,0 +1,1169 @@
+package net.floodlightcontroller.topology;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import net.floodlightcontroller.core.FloodlightContext;
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.core.IFloodlightProviderService.Role;
+import net.floodlightcontroller.core.IOFMessageListener;
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.IHAListener;
+import net.floodlightcontroller.core.annotations.LogMessageCategory;
+import net.floodlightcontroller.core.annotations.LogMessageDoc;
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.core.util.SingletonTask;
+import net.floodlightcontroller.counter.ICounterStoreService;
+import net.floodlightcontroller.linkdiscovery.ILinkDiscoveryListener;
+import net.floodlightcontroller.linkdiscovery.ILinkDiscoveryService;
+import net.floodlightcontroller.packet.BSN;
+import net.floodlightcontroller.packet.Ethernet;
+import net.floodlightcontroller.packet.LLDP;
+import net.floodlightcontroller.restserver.IRestApiService;
+import net.floodlightcontroller.routing.IRoutingService;
+import net.floodlightcontroller.routing.Link;
+import net.floodlightcontroller.routing.Route;
+import net.floodlightcontroller.threadpool.IThreadPoolService;
+import net.floodlightcontroller.topology.web.TopologyWebRoutable;
+
+import org.openflow.protocol.OFMessage;
+import org.openflow.protocol.OFPacketIn;
+import org.openflow.protocol.OFPacketOut;
+import org.openflow.protocol.OFPort;
+import org.openflow.protocol.action.OFAction;
+import org.openflow.protocol.action.OFActionOutput;
+import org.openflow.protocol.OFType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Topology manager is responsible for maintaining the controller's notion
+ * of the network graph, as well as implementing tools for finding routes 
+ * through the topology.
+ */
+@LogMessageCategory("Network Topology")
+public class TopologyManager implements 
+        IFloodlightModule, ITopologyService, 
+        IRoutingService, ILinkDiscoveryListener,
+        IOFMessageListener, IHAListener {
+
+    protected static Logger log = LoggerFactory.getLogger(TopologyManager.class);
+
+    public static final String CONTEXT_TUNNEL_ENABLED = 
+            "com.bigswitch.floodlight.topologymanager.tunnelEnabled";
+
+    /** 
+     * Set of ports for each switch
+     */
+    protected Map<Long, Set<Short>> switchPorts;
+
+    /**
+     * Set of links organized by node port tuple
+     */
+    protected Map<NodePortTuple, Set<Link>> switchPortLinks;
+
+    /**
+     * Set of direct links
+     */
+    protected Map<NodePortTuple, Set<Link>> directLinks;
+
+    /**
+     * set of links that are broadcast domain links.
+     */
+    protected Map<NodePortTuple, Set<Link>> portBroadcastDomainLinks;
+
+    /**
+     * set of tunnel links
+     */
+    protected Map<NodePortTuple, Set<Link>> tunnelLinks; 
+
+    protected ILinkDiscoveryService linkDiscovery;
+    protected IThreadPoolService threadPool;
+    protected IFloodlightProviderService floodlightProvider;
+    protected IRestApiService restApi;
+
+    // Modules that listen to our updates
+    protected ArrayList<ITopologyListener> topologyAware;
+
+    protected BlockingQueue<LDUpdate> ldUpdates;
+    protected List<LDUpdate> appliedUpdates;
+    
+    // These must be accessed using getCurrentInstance(), not directly
+    protected TopologyInstance currentInstance;
+    protected TopologyInstance currentInstanceWithoutTunnels;
+    
+    protected SingletonTask newInstanceTask;
+    private Date lastUpdateTime;
+
+    /**
+     * Flag that indicates if links (direct/tunnel/multihop links) were
+     * updated as part of LDUpdate.
+     */
+    protected boolean linksUpdated;
+    /**
+     * Flag that indicates if direct or tunnel links were updated as
+     * part of LDUpdate.
+     */
+    protected boolean dtLinksUpdated;
+
+    /**
+     * Thread for recomputing topology.  The thread is always running, 
+     * however the function applyUpdates() has a blocking call.
+     */
+    @LogMessageDoc(level="ERROR",
+            message="Error in topology instance task thread",
+            explanation="An unknown error occured in the topology " +
+            		"discovery module.",
+            recommendation=LogMessageDoc.CHECK_CONTROLLER)
+    protected class UpdateTopologyWorker implements Runnable {
+        @Override 
+        public void run() {
+            try {
+                updateTopology();
+            }
+            catch (Exception e) {
+                log.error("Error in topology instance task thread", e);
+            }
+        }
+    }
+
+    public boolean updateTopology() {
+        boolean newInstanceFlag;
+        linksUpdated = false;
+        dtLinksUpdated = false;
+        applyUpdates();
+        newInstanceFlag = createNewInstance();
+        lastUpdateTime = new Date();
+        informListeners();
+        return newInstanceFlag;
+    }
+
+    // **********************
+    // ILinkDiscoveryListener
+    // **********************
+
+    @Override
+    public void linkDiscoveryUpdate(LDUpdate update) {
+        boolean scheduleFlag = false;
+        // if there's no udpates in the queue, then
+        // we need to schedule an update.
+        if (ldUpdates.peek() == null)
+            scheduleFlag = true;
+
+        if (log.isTraceEnabled()) {
+            log.trace("Queuing update: {}", update);
+        }
+        ldUpdates.add(update);
+
+        if (scheduleFlag) {
+            newInstanceTask.reschedule(1, TimeUnit.MICROSECONDS);
+        }
+    }
+    
+    // ****************
+    // ITopologyService
+    // ****************
+
+    //
+    // ITopologyService interface methods
+    //
+    @Override
+    public Date getLastUpdateTime() {
+        return lastUpdateTime;
+    }
+
+    @Override
+    public void addListener(ITopologyListener listener) {
+        topologyAware.add(listener);
+    }
+
+    @Override 
+    public boolean isAttachmentPointPort(long switchid, short port) {
+        return isAttachmentPointPort(switchid, port, true);
+    }
+
+    @Override
+    public boolean isAttachmentPointPort(long switchid, short port, 
+                                         boolean tunnelEnabled) {
+        TopologyInstance ti = getCurrentInstance(tunnelEnabled);
+
+        // if the port is not attachment point port according to
+        // topology instance, then return false
+        if (ti.isAttachmentPointPort(switchid, port) == false)
+                return false;
+
+        // Check whether the port is a physical port. We should not learn
+        // attachment points on "special" ports.
+        if ((port & 0xff00) == 0xff00 && port != (short)0xfffe) return false;
+
+        // Make sure that the port is enabled.
+        IOFSwitch sw = floodlightProvider.getSwitches().get(switchid);
+        if (sw == null) return false;
+        return (sw.portEnabled(port));
+    }
+
+    public long getOpenflowDomainId(long switchId) {
+        return getOpenflowDomainId(switchId, true);
+    }
+
+    public long getOpenflowDomainId(long switchId, boolean tunnelEnabled) {
+        TopologyInstance ti = getCurrentInstance(tunnelEnabled);
+        return ti.getOpenflowDomainId(switchId);
+    }
+
+    @Override
+    public long getL2DomainId(long switchId) {
+        return getL2DomainId(switchId, true);
+    }
+
+    @Override
+    public long getL2DomainId(long switchId, boolean tunnelEnabled) {
+        TopologyInstance ti = getCurrentInstance(tunnelEnabled);
+        return ti.getL2DomainId(switchId);
+    }
+
+    @Override
+    public boolean inSameOpenflowDomain(long switch1, long switch2) {
+        return inSameOpenflowDomain(switch1, switch2, true);
+    }
+
+    @Override
+    public boolean inSameOpenflowDomain(long switch1, long switch2,
+                                        boolean tunnelEnabled) {
+        TopologyInstance ti = getCurrentInstance(tunnelEnabled);
+        return ti.inSameOpenflowDomain(switch1, switch2);
+    }
+
+    @Override
+    public boolean isAllowed(long sw, short portId) {
+        return isAllowed(sw, portId, true);
+    }
+
+    @Override
+    public boolean isAllowed(long sw, short portId, boolean tunnelEnabled) {
+        TopologyInstance ti = getCurrentInstance(tunnelEnabled);
+        return ti.isAllowed(sw, portId);
+    }
+
+    ////////////////////////////////////////////////////////////////////////
+    ////////////////////////////////////////////////////////////////////////
+    @Override
+    public boolean isIncomingBroadcastAllowed(long sw, short portId) {
+        return isIncomingBroadcastAllowed(sw, portId, true);
+    }
+
+    public boolean isIncomingBroadcastAllowed(long sw, short portId,
+                                              boolean tunnelEnabled) {
+        TopologyInstance ti = getCurrentInstance(tunnelEnabled);
+        return ti.isIncomingBroadcastAllowedOnSwitchPort(sw, portId);
+    }
+
+    ////////////////////////////////////////////////////////////////////////
+    ////////////////////////////////////////////////////////////////////////
+    /** Get all the ports connected to the switch */
+    @Override
+    public Set<Short> getPortsWithLinks(long sw) {
+        return getPortsWithLinks(sw, true);
+    }
+
+    /** Get all the ports connected to the switch */
+    @Override
+    public Set<Short> getPortsWithLinks(long sw, boolean tunnelEnabled) {
+        TopologyInstance ti = getCurrentInstance(tunnelEnabled);
+        return ti.getPortsWithLinks(sw);
+    }
+
+    ////////////////////////////////////////////////////////////////////////
+    ////////////////////////////////////////////////////////////////////////
+    /** Get all the ports on the target switch (targetSw) on which a 
+     * broadcast packet must be sent from a host whose attachment point
+     * is on switch port (src, srcPort).
+     */
+    public Set<Short> getBroadcastPorts(long targetSw, 
+                                        long src, short srcPort) {
+        return getBroadcastPorts(targetSw, src, srcPort, true);
+    }
+
+    /** Get all the ports on the target switch (targetSw) on which a 
+     * broadcast packet must be sent from a host whose attachment point
+     * is on switch port (src, srcPort).
+     */
+    public Set<Short> getBroadcastPorts(long targetSw, 
+                                        long src, short srcPort,
+                                        boolean tunnelEnabled) {
+        TopologyInstance ti = getCurrentInstance(tunnelEnabled);
+        return ti.getBroadcastPorts(targetSw, src, srcPort);
+    }
+
+    ////////////////////////////////////////////////////////////////////////
+    ////////////////////////////////////////////////////////////////////////
+    @Override
+    public NodePortTuple getOutgoingSwitchPort(long src, short srcPort,
+                                               long dst, short dstPort) {
+        // Use this function to redirect traffic if needed.
+        return getOutgoingSwitchPort(src, srcPort, dst, dstPort, true);
+    }
+    
+    @Override
+    public NodePortTuple getOutgoingSwitchPort(long src, short srcPort,
+                                               long dst, short dstPort,
+                                               boolean tunnelEnabled) {
+        // Use this function to redirect traffic if needed.
+        TopologyInstance ti = getCurrentInstance(tunnelEnabled);
+        return ti.getOutgoingSwitchPort(src, srcPort,
+                                                     dst, dstPort);
+    }
+
+    ////////////////////////////////////////////////////////////////////////
+    ////////////////////////////////////////////////////////////////////////
+    @Override
+    public NodePortTuple getIncomingSwitchPort(long src, short srcPort,
+                                               long dst, short dstPort) {
+        return getIncomingSwitchPort(src, srcPort, dst, dstPort, true);
+    }
+
+    @Override
+    public NodePortTuple getIncomingSwitchPort(long src, short srcPort,
+                                               long dst, short dstPort,
+                                               boolean tunnelEnabled) {
+        TopologyInstance ti = getCurrentInstance(tunnelEnabled);
+        return ti.getIncomingSwitchPort(src, srcPort,
+                                                     dst, dstPort);
+    }
+
+    ////////////////////////////////////////////////////////////////////////
+    ////////////////////////////////////////////////////////////////////////
+    /**
+     * Checks if the two switchports belong to the same broadcast domain.
+     */
+    @Override
+    public boolean isInSameBroadcastDomain(long s1, short p1, long s2,
+                                           short p2) {
+        return isInSameBroadcastDomain(s1, p1, s2, p2, true);
+
+    }
+
+    @Override
+    public boolean isInSameBroadcastDomain(long s1, short p1,
+                                           long s2, short p2,
+                                           boolean tunnelEnabled) {
+        TopologyInstance ti = getCurrentInstance(tunnelEnabled);
+        return ti.inSameBroadcastDomain(s1, p1, s2, p2);
+
+    }
+
+
+    /**
+     * Checks if the switchport is a broadcast domain port or not.
+     */
+    @Override
+    public boolean isBroadcastDomainPort(long sw, short port) {
+        return isBroadcastDomainPort(sw, port, true);
+    }
+
+    @Override
+    public boolean isBroadcastDomainPort(long sw, short port,
+                                         boolean tunnelEnabled) {
+        TopologyInstance ti = getCurrentInstance(tunnelEnabled);
+        return ti.isBroadcastDomainPort(new NodePortTuple(sw, port));
+    }
+
+
+    /**
+     * Checks if the new attachment point port is consistent with the
+     * old attachment point port.
+     */
+    @Override
+    public boolean isConsistent(long oldSw, short oldPort,
+                                long newSw, short newPort) {
+        return isConsistent(oldSw, oldPort,
+                                            newSw, newPort, true);
+    }
+
+    @Override
+    public boolean isConsistent(long oldSw, short oldPort,
+                                long newSw, short newPort,
+                                boolean tunnelEnabled) {
+        TopologyInstance ti = getCurrentInstance(tunnelEnabled);
+        return ti.isConsistent(oldSw, oldPort, newSw, newPort);
+    }
+
+    ////////////////////////////////////////////////////////////////////////
+    ////////////////////////////////////////////////////////////////////////
+    /**
+     * Checks if the two switches are in the same Layer 2 domain.
+     */
+    @Override
+    public boolean inSameL2Domain(long switch1, long switch2) {
+        return inSameL2Domain(switch1, switch2, true);
+    }
+
+    @Override
+    public boolean inSameL2Domain(long switch1, long switch2,
+                                  boolean tunnelEnabled) {
+        TopologyInstance ti = getCurrentInstance(tunnelEnabled);
+        return ti.inSameL2Domain(switch1, switch2);
+    }
+
+    ////////////////////////////////////////////////////////////////////////
+    ////////////////////////////////////////////////////////////////////////
+    @Override
+    public NodePortTuple getAllowedOutgoingBroadcastPort(long src,
+                                                         short srcPort,
+                                                         long dst,
+                                                         short dstPort) {
+        return getAllowedOutgoingBroadcastPort(src, srcPort,
+                                               dst, dstPort, true);
+    }
+
+    @Override
+    public NodePortTuple getAllowedOutgoingBroadcastPort(long src,
+                                                         short srcPort,
+                                                         long dst,
+                                                         short dstPort,
+                                                         boolean tunnelEnabled){
+        TopologyInstance ti = getCurrentInstance(tunnelEnabled);
+        return ti.getAllowedOutgoingBroadcastPort(src, srcPort,
+                                                  dst, dstPort);
+    }
+    ////////////////////////////////////////////////////////////////////////
+    ////////////////////////////////////////////////////////////////////////
+    @Override
+    public NodePortTuple 
+    getAllowedIncomingBroadcastPort(long src, short srcPort) {
+        return getAllowedIncomingBroadcastPort(src,srcPort, true);
+    }
+
+    @Override
+    public NodePortTuple 
+    getAllowedIncomingBroadcastPort(long src, short srcPort,
+                                    boolean tunnelEnabled) {
+        TopologyInstance ti = getCurrentInstance(tunnelEnabled);
+        return ti.getAllowedIncomingBroadcastPort(src,srcPort);
+    }
+
+    @Override
+    public Set<NodePortTuple> getBroadcastDomainPorts() {
+        return portBroadcastDomainLinks.keySet();
+    }
+
+    @Override
+    public Set<NodePortTuple> getTunnelPorts() {
+        return tunnelLinks.keySet();
+    }
+
+    @Override
+    public Set<NodePortTuple> getBlockedPorts() {
+        Set<NodePortTuple> bp;
+        Set<NodePortTuple> blockedPorts =
+                new HashSet<NodePortTuple>();
+
+        // As we might have two topologies, simply get the union of
+        // both of them and send it.
+        bp = getCurrentInstance(true).getBlockedPorts();
+        if (bp != null)
+            blockedPorts.addAll(bp);
+
+        bp = getCurrentInstance(false).getBlockedPorts();
+        if (bp != null)
+            blockedPorts.addAll(bp);
+
+        return blockedPorts;
+    }
+
+    @Override
+    public List<LDUpdate> getLastLinkUpdates() {
+    	return appliedUpdates;
+    }
+    ////////////////////////////////////////////////////////////////////////
+    ////////////////////////////////////////////////////////////////////////
+
+    // ***************
+    // IRoutingService
+    // ***************
+
+    @Override
+    public Route getRoute(long src, long dst) {
+        return getRoute(src, dst, true);
+    }
+
+    @Override
+    public Route getRoute(long src, long dst, boolean tunnelEnabled) {
+        TopologyInstance ti = getCurrentInstance(tunnelEnabled);
+        return ti.getRoute(src, dst);
+    }
+
+    @Override
+    public Route getRoute(long src, short srcPort, long dst, short dstPort) {
+        return getRoute(src, srcPort, dst, dstPort, true);
+    }
+
+    @Override
+    public Route getRoute(long src, short srcPort, long dst, short dstPort, 
+                          boolean tunnelEnabled) {
+        TopologyInstance ti = getCurrentInstance(tunnelEnabled);
+        return ti.getRoute(src, srcPort, dst, dstPort);
+    }
+
+    @Override
+    public boolean routeExists(long src, long dst) {
+        return routeExists(src, dst, true);
+    }
+
+    @Override
+    public boolean routeExists(long src, long dst, boolean tunnelEnabled) {
+        TopologyInstance ti = getCurrentInstance(tunnelEnabled);
+        return ti.routeExists(src, dst);
+    }
+
+
+    // ******************
+    // IOFMessageListener
+    // ******************
+
+    @Override
+    public String getName() {
+        return "topology";
+    }
+
+    @Override
+    public boolean isCallbackOrderingPrereq(OFType type, String name) {
+        return "linkdiscovery".equals(name);
+    }
+
+    @Override
+    public boolean isCallbackOrderingPostreq(OFType type, String name) {
+        return false;
+    }
+
+    @Override
+    public Command receive(IOFSwitch sw, OFMessage msg,
+                           FloodlightContext cntx) {
+        switch (msg.getType()) {
+            case PACKET_IN:
+                return this.processPacketInMessage(sw, 
+                                                   (OFPacketIn) msg, cntx);
+            default:
+            	break;
+        }
+
+        return Command.CONTINUE;
+    }
+
+    // ***************
+    // IHAListener
+    // ***************
+
+    @Override
+    public void roleChanged(Role oldRole, Role newRole) {
+        switch(newRole) {
+            case MASTER:
+                if (oldRole == Role.SLAVE) {
+                    log.debug("Re-computing topology due " +
+                            "to HA change from SLAVE->MASTER");
+                    newInstanceTask.reschedule(1, TimeUnit.MILLISECONDS);
+                }
+                break;
+            case SLAVE:
+                log.debug("Clearing topology due to " +
+                        "HA change to SLAVE");
+                clearCurrentTopology();
+                break;
+            default:
+            	break;
+        }
+    }
+
+    @Override
+    public void controllerNodeIPsChanged(
+                          Map<String, String> curControllerNodeIPs,
+                          Map<String, String> addedControllerNodeIPs,
+                          Map<String, String> removedControllerNodeIPs) {
+        // no-op
+    }
+
+    // *****************
+    // IFloodlightModule
+    // *****************
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(ITopologyService.class);
+        l.add(IRoutingService.class);
+        return l;
+    }
+
+    @Override
+    public Map<Class<? extends IFloodlightService>, IFloodlightService>
+            getServiceImpls() {
+        Map<Class<? extends IFloodlightService>,
+        IFloodlightService> m = 
+            new HashMap<Class<? extends IFloodlightService>,
+                IFloodlightService>();
+        // We are the class that implements the service
+        m.put(ITopologyService.class, this);
+        m.put(IRoutingService.class, this);
+        return m;
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>> 
+            getModuleDependencies() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(ILinkDiscoveryService.class);
+        l.add(IThreadPoolService.class);
+        l.add(IFloodlightProviderService.class);
+        l.add(ICounterStoreService.class);
+        l.add(IRestApiService.class);
+        return l;
+    }
+
+    @Override
+    public void init(FloodlightModuleContext context)
+            throws FloodlightModuleException {
+        linkDiscovery = context.getServiceImpl(ILinkDiscoveryService.class);
+        threadPool = context.getServiceImpl(IThreadPoolService.class);
+        floodlightProvider = 
+                context.getServiceImpl(IFloodlightProviderService.class);
+        restApi = context.getServiceImpl(IRestApiService.class);
+
+        switchPorts = new HashMap<Long,Set<Short>>();
+        switchPortLinks = new HashMap<NodePortTuple, Set<Link>>();
+        directLinks = new HashMap<NodePortTuple, Set<Link>>();
+        portBroadcastDomainLinks = new HashMap<NodePortTuple, Set<Link>>();
+        tunnelLinks = new HashMap<NodePortTuple, Set<Link>>();
+        topologyAware = new ArrayList<ITopologyListener>();
+        ldUpdates = new LinkedBlockingQueue<LDUpdate>();
+        appliedUpdates = new ArrayList<LDUpdate>();
+        clearCurrentTopology();
+    }
+
+    @Override
+    public void startUp(FloodlightModuleContext context) {
+        ScheduledExecutorService ses = threadPool.getScheduledExecutor();
+        newInstanceTask = new SingletonTask(ses, new UpdateTopologyWorker());
+        linkDiscovery.addListener(this);
+        floodlightProvider.addOFMessageListener(OFType.PACKET_IN, this);
+        floodlightProvider.addHAListener(this);
+        addRestletRoutable();
+    }
+
+    protected void addRestletRoutable() {
+        restApi.addRestletRoutable(new TopologyWebRoutable());
+    }
+
+    // ****************
+    // Internal methods
+    // ****************
+    /**
+     * If the packet-in switch port is disabled for all data traffic, then
+     * the packet will be dropped.  Otherwise, the packet will follow the
+     * normal processing chain.
+     * @param sw
+     * @param pi
+     * @param cntx
+     * @return
+     */
+    protected Command dropFilter(long sw, OFPacketIn pi,
+                                             FloodlightContext cntx) {
+        Command result = Command.CONTINUE;
+        short port = pi.getInPort();
+
+        // If the input port is not allowed for data traffic, drop everything.
+        // BDDP packets will not reach this stage.
+        if (isAllowed(sw, port) == false) {
+            if (log.isTraceEnabled()) {
+                log.trace("Ignoring packet because of topology " +
+                        "restriction on switch={}, port={}", sw, port);
+                result = Command.STOP;
+            }
+        }
+
+        // if sufficient information is available, then drop broadcast
+        // packets here as well.
+        return result;
+    }
+
+    /** 
+     * TODO This method must be moved to a layer below forwarding
+     * so that anyone can use it.
+     * @param packetData
+     * @param sw
+     * @param ports
+     * @param cntx
+     */
+    @LogMessageDoc(level="ERROR",
+            message="Failed to clear all flows on switch {switch}",
+            explanation="An I/O error occured while trying send " +
+            		"topology discovery packet",
+            recommendation=LogMessageDoc.CHECK_SWITCH)
+    public void doMultiActionPacketOut(byte[] packetData, IOFSwitch sw, 
+                                       Set<Short> ports,
+                                       FloodlightContext cntx) {
+
+        if (ports == null) return;
+        if (packetData == null || packetData.length <= 0) return;
+
+        OFPacketOut po = 
+                (OFPacketOut) floodlightProvider.getOFMessageFactory().
+                getMessage(OFType.PACKET_OUT);
+
+        List<OFAction> actions = new ArrayList<OFAction>();
+        for(short p: ports) {
+            actions.add(new OFActionOutput(p, (short) 0));
+        }
+
+        // set actions
+        po.setActions(actions);
+        // set action length
+        po.setActionsLength((short) (OFActionOutput.MINIMUM_LENGTH * 
+                ports.size()));
+        // set buffer-id to BUFFER_ID_NONE
+        po.setBufferId(OFPacketOut.BUFFER_ID_NONE);
+        // set in-port to OFPP_NONE
+        po.setInPort(OFPort.OFPP_NONE.getValue());
+
+        // set packet data
+        po.setPacketData(packetData);
+
+        // compute and set packet length.
+        short poLength = (short)(OFPacketOut.MINIMUM_LENGTH + 
+                po.getActionsLength() + 
+                packetData.length);
+
+        po.setLength(poLength);
+
+        try {
+            //counterStore.updatePktOutFMCounterStore(sw, po);
+            if (log.isTraceEnabled()) {
+                log.trace("write broadcast packet on switch-id={} " + 
+                        "interaces={} packet-data={} packet-out={}",
+                        new Object[] {sw.getId(), ports, packetData, po});
+            }
+            sw.write(po, cntx);
+
+        } catch (IOException e) {
+            log.error("Failure writing packet out", e);
+        }
+    }
+
+
+    /**
+     * The BDDP packets are forwarded out of all the ports out of an
+     * openflowdomain.  Get all the switches in the same openflow
+     * domain as the sw (disabling tunnels).  Then get all the 
+     * external switch ports and send these packets out.
+     * @param sw
+     * @param pi
+     * @param cntx
+     */
+    protected void doFloodBDDP(long pinSwitch, OFPacketIn pi, 
+                               FloodlightContext cntx) {
+
+        TopologyInstance ti = getCurrentInstance(false);
+
+        Set<Long> switches = ti.getSwitchesInOpenflowDomain(pinSwitch);
+
+        if (switches == null)
+        {
+            // indicates no links are connected to the switches
+            switches = new HashSet<Long>();
+            switches.add(pinSwitch);
+        }
+
+        for(long sid: switches) {
+            IOFSwitch sw = floodlightProvider.getSwitches().get(sid);
+            if (sw == null) continue;
+            Collection<Short> enabledPorts = sw.getEnabledPortNumbers();
+            if (enabledPorts == null)
+                continue;
+            Set<Short> ports = new HashSet<Short>();
+            ports.addAll(enabledPorts);
+
+            // all the ports known to topology // without tunnels.
+            // out of these, we need to choose only those that are 
+            // broadcast port, otherwise, we should eliminate.
+            Set<Short> portsKnownToTopo = ti.getPortsWithLinks(sid);
+
+            if (portsKnownToTopo != null) {
+                for(short p: portsKnownToTopo) {
+                    NodePortTuple npt = 
+                            new NodePortTuple(sid, p);
+                    if (ti.isBroadcastDomainPort(npt) == false) {
+                        ports.remove(p);
+                    }
+                }
+            }
+
+            // remove the incoming switch port
+            if (pinSwitch == sid) {
+                ports.remove(pi.getInPort());
+            }
+
+            // we have all the switch ports to which we need to broadcast.
+            doMultiActionPacketOut(pi.getPacketData(), sw, ports, cntx);
+        }
+
+    }
+
+    protected Command processPacketInMessage(IOFSwitch sw, OFPacketIn pi, 
+                                             FloodlightContext cntx) {
+
+        // get the packet-in switch.
+        Ethernet eth = 
+                IFloodlightProviderService.bcStore.
+                get(cntx,IFloodlightProviderService.CONTEXT_PI_PAYLOAD);
+
+        if (eth.getEtherType() == Ethernet.TYPE_BSN) {
+            BSN bsn = (BSN) eth.getPayload();
+            if (bsn == null) return Command.STOP;
+            if (bsn.getPayload() == null) return Command.STOP;
+
+            // It could be a packet other than BSN LLDP, therefore
+            // continue with the regular processing.
+            if (bsn.getPayload() instanceof LLDP == false)
+                return Command.CONTINUE;
+
+            doFloodBDDP(sw.getId(), pi, cntx);
+        } else {
+            return dropFilter(sw.getId(), pi, cntx);
+        }
+        return Command.STOP;
+    }
+
+
+    /**
+     * Updates concerning switch disconnect and port down are not processed.
+     * LinkDiscoveryManager is expected to process those messages and send
+     * multiple link removed messages.  However, all the updates from
+     * LinkDiscoveryManager would be propagated to the listeners of topology.
+     */
+    @LogMessageDoc(level="ERROR",
+            message="Error reading link discovery update.",
+            explanation="Unable to process link discovery update",
+            recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
+    public void applyUpdates() {
+        appliedUpdates.clear();
+        LDUpdate update = null;
+        while (ldUpdates.peek() != null) {
+            try {
+                update = ldUpdates.take();
+            } catch (Exception e) {
+                log.error("Error reading link discovery update.", e);
+            }
+            if (log.isTraceEnabled()) {
+                log.trace("Applying update: {}", update);
+            }
+            if (update.getOperation() == UpdateOperation.LINK_UPDATED) {
+                addOrUpdateLink(update.getSrc(), update.getSrcPort(),
+                                update.getDst(), update.getDstPort(),
+                                update.getType());
+            } else if (update.getOperation() == UpdateOperation.LINK_REMOVED){
+                removeLink(update.getSrc(), update.getSrcPort(), 
+                           update.getDst(), update.getDstPort());
+            }
+            // Add to the list of applied updates.
+            appliedUpdates.add(update);
+        }
+    }
+
+    /**
+     * This function computes a new topology.
+     */
+    /**
+     * This function computes a new topology instance.
+     * It ignores links connected to all broadcast domain ports
+     * and tunnel ports. The method returns if a new instance of
+     * topology was created or not.
+     */
+    protected boolean createNewInstance() {
+        Set<NodePortTuple> blockedPorts = new HashSet<NodePortTuple>();
+
+        if (!linksUpdated) return false;
+
+        Map<NodePortTuple, Set<Link>> openflowLinks;
+        openflowLinks = 
+                new HashMap<NodePortTuple, Set<Link>>(switchPortLinks);
+
+        // Remove all tunnel links.
+        for(NodePortTuple npt: tunnelLinks.keySet()) {
+            if (openflowLinks.get(npt) != null)
+                openflowLinks.remove(npt);
+        }
+
+        // Remove all broadcast domain links.
+        for(NodePortTuple npt: portBroadcastDomainLinks.keySet()) {
+            if (openflowLinks.get(npt) != null)
+                openflowLinks.remove(npt);
+        }
+
+        TopologyInstance nt = new TopologyInstance(switchPorts, 
+                                                   blockedPorts,
+                                                   openflowLinks, 
+                                                   portBroadcastDomainLinks.keySet(), 
+                                                   tunnelLinks.keySet());
+        nt.compute();
+        // We set the instances with and without tunnels to be identical.
+        // If needed, we may compute them differently.
+        currentInstance = nt;
+        currentInstanceWithoutTunnels = nt;
+        return true;
+    }
+
+
+    public void informListeners() {
+        for(int i=0; i<topologyAware.size(); ++i) {
+            ITopologyListener listener = topologyAware.get(i);
+            listener.topologyChanged();
+        }
+    }
+
+    public void addSwitch(long sid) {
+        if (switchPorts.containsKey(sid) == false) {
+            switchPorts.put(sid, new HashSet<Short>());
+        }
+    }
+
+    private void addPortToSwitch(long s, short p) {
+        addSwitch(s);
+        switchPorts.get(s).add(p);
+    }
+
+    public boolean removeSwitchPort(long sw, short port) {
+
+        Set<Link> linksToRemove = new HashSet<Link>();
+        NodePortTuple npt = new NodePortTuple(sw, port);
+        if (switchPortLinks.containsKey(npt) == false) return false;
+
+        linksToRemove.addAll(switchPortLinks.get(npt));
+        for(Link link: linksToRemove) {
+            removeLink(link);
+        }
+        return true;
+    }
+
+    public boolean removeSwitch(long sid) {
+        // Delete all the links in the switch, switch and all 
+        // associated data should be deleted.
+        if (switchPorts.containsKey(sid) == false) return false;
+
+        Set<Link> linksToRemove = new HashSet<Link>();
+        for(Short p: switchPorts.get(sid)) {
+            NodePortTuple n1 = new NodePortTuple(sid, p);
+            linksToRemove.addAll(switchPortLinks.get(n1));
+        }
+
+        if (linksToRemove.isEmpty()) return false;
+
+        for(Link link: linksToRemove) {
+            removeLink(link);
+        }
+        return true;
+    }
+
+    /**
+     * Add the given link to the data structure.  Returns true if a link was
+     * added.
+     * @param s
+     * @param l
+     * @return
+     */
+    private boolean addLinkToStructure(Map<NodePortTuple, 
+                                       Set<Link>> s, Link l) {
+        boolean result1 = false, result2 = false; 
+
+        NodePortTuple n1 = new NodePortTuple(l.getSrc(), l.getSrcPort());
+        NodePortTuple n2 = new NodePortTuple(l.getDst(), l.getDstPort());
+
+        if (s.get(n1) == null) {
+            s.put(n1, new HashSet<Link>());
+        }
+        if (s.get(n2) == null) {
+            s.put(n2, new HashSet<Link>());
+        }
+        result1 = s.get(n1).add(l);
+        result2 = s.get(n2).add(l);
+
+        return (result1 || result2);
+    }
+
+    /**
+     * Delete the given link from the data strucure.  Returns true if the
+     * link was deleted.
+     * @param s
+     * @param l
+     * @return
+     */
+    private boolean removeLinkFromStructure(Map<NodePortTuple, 
+                                            Set<Link>> s, Link l) {
+
+        boolean result1 = false, result2 = false;
+        NodePortTuple n1 = new NodePortTuple(l.getSrc(), l.getSrcPort());
+        NodePortTuple n2 = new NodePortTuple(l.getDst(), l.getDstPort());
+
+        if (s.get(n1) != null) {
+            result1 = s.get(n1).remove(l);
+            if (s.get(n1).isEmpty()) s.remove(n1);
+        }
+        if (s.get(n2) != null) {
+            result2 = s.get(n2).remove(l);
+            if (s.get(n2).isEmpty()) s.remove(n2);
+        }
+        return result1 || result2;
+    }
+
+    public void addOrUpdateLink(long srcId, short srcPort, long dstId, 
+                                short dstPort, LinkType type) {
+        boolean flag1 = false, flag2 = false;
+
+        Link link = new Link(srcId, srcPort, dstId, dstPort);
+        addPortToSwitch(srcId, srcPort);
+        addPortToSwitch(dstId, dstPort);
+
+        addLinkToStructure(switchPortLinks, link);
+
+        if (type.equals(LinkType.MULTIHOP_LINK)) {
+            addLinkToStructure(portBroadcastDomainLinks, link);
+            flag1 = removeLinkFromStructure(tunnelLinks, link);
+            flag2 = removeLinkFromStructure(directLinks, link);
+            dtLinksUpdated = flag1 || flag2;
+        } else if (type.equals(LinkType.TUNNEL)) {
+            addLinkToStructure(tunnelLinks, link);
+            removeLinkFromStructure(portBroadcastDomainLinks, link);
+            removeLinkFromStructure(directLinks, link);
+            dtLinksUpdated = true;
+        } else if (type.equals(LinkType.DIRECT_LINK)) {
+            addLinkToStructure(directLinks, link);
+            removeLinkFromStructure(tunnelLinks, link);
+            removeLinkFromStructure(portBroadcastDomainLinks, link);
+            dtLinksUpdated = true;
+        }
+        linksUpdated = true;
+    }
+
+    public void removeLink(Link link)  {
+        boolean flag1 = false, flag2 = false;
+
+        flag1 = removeLinkFromStructure(directLinks, link);
+        flag2 = removeLinkFromStructure(tunnelLinks, link);
+
+        linksUpdated = true;
+        dtLinksUpdated = flag1 || flag2;
+
+        removeLinkFromStructure(portBroadcastDomainLinks, link);
+        removeLinkFromStructure(switchPortLinks, link);
+
+        NodePortTuple srcNpt = 
+                new NodePortTuple(link.getSrc(), link.getSrcPort());
+        NodePortTuple dstNpt = 
+                new NodePortTuple(link.getDst(), link.getDstPort());
+
+        // Remove switch ports if there are no links through those switch ports
+        if (switchPortLinks.get(srcNpt) == null) {
+            if (switchPorts.get(srcNpt.getNodeId()) != null)
+                switchPorts.get(srcNpt.getNodeId()).remove(srcNpt.getPortId());
+        }
+        if (switchPortLinks.get(dstNpt) == null) {
+            if (switchPorts.get(dstNpt.getNodeId()) != null)
+                switchPorts.get(dstNpt.getNodeId()).remove(dstNpt.getPortId());
+        }
+
+        // Remove the node if no ports are present
+        if (switchPorts.get(srcNpt.getNodeId())!=null && 
+                switchPorts.get(srcNpt.getNodeId()).isEmpty()) {
+            switchPorts.remove(srcNpt.getNodeId());
+        }
+        if (switchPorts.get(dstNpt.getNodeId())!=null && 
+                switchPorts.get(dstNpt.getNodeId()).isEmpty()) {
+            switchPorts.remove(dstNpt.getNodeId());
+        }
+    }
+
+    public void removeLink(long srcId, short srcPort,
+                           long dstId, short dstPort) {
+        Link link = new Link(srcId, srcPort, dstId, dstPort);
+        removeLink(link);
+    }
+
+    public void clear() {
+        switchPorts.clear();
+        switchPortLinks.clear();
+        portBroadcastDomainLinks.clear();
+        tunnelLinks.clear();
+        directLinks.clear();
+        appliedUpdates.clear();
+    }
+
+    /**
+    * Clears the current topology. Note that this does NOT
+    * send out updates.
+    */
+    public void clearCurrentTopology() {
+        this.clear();
+        linksUpdated = true;
+        dtLinksUpdated = true;
+        createNewInstance();
+        lastUpdateTime = new Date();
+    }
+
+    /**
+     * Getters.  No Setters.
+     */
+    public Map<Long, Set<Short>> getSwitchPorts() {
+        return switchPorts;
+    }
+
+    public Map<NodePortTuple, Set<Link>> getSwitchPortLinks() {
+        return switchPortLinks;
+    }
+
+    public Map<NodePortTuple, Set<Link>> getPortBroadcastDomainLinks() {
+        return portBroadcastDomainLinks;
+    }
+
+    public TopologyInstance getCurrentInstance(boolean tunnelEnabled) {
+        if (tunnelEnabled)
+            return currentInstance;
+        else return this.currentInstanceWithoutTunnels;
+    }
+
+    public TopologyInstance getCurrentInstance() {
+        return this.getCurrentInstance(true);
+    }
+
+    /**
+     *  Switch methods
+     */
+    public Set<Short> getPorts(long sw) {
+        Set<Short> ports = new HashSet<Short>();
+        IOFSwitch iofSwitch = floodlightProvider.getSwitches().get(sw);
+        if (iofSwitch == null) return null;
+
+        Collection<Short> ofpList = iofSwitch.getEnabledPortNumbers();
+        if (ofpList == null) return null;
+
+        Set<Short> qPorts = linkDiscovery.getQuarantinedPorts(sw);
+        if (qPorts != null)
+            ofpList.removeAll(qPorts);
+
+        ports.addAll(ofpList);
+        return ports;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/web/BlockedPortsResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/web/BlockedPortsResource.java
new file mode 100644
index 0000000..dc4ac61
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/web/BlockedPortsResource.java
@@ -0,0 +1,20 @@
+package net.floodlightcontroller.topology.web;
+
+import java.util.Set;
+
+import net.floodlightcontroller.topology.ITopologyService;
+import net.floodlightcontroller.topology.NodePortTuple;
+
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+
+public class BlockedPortsResource extends ServerResource {
+    @Get("json")
+    public Set<NodePortTuple> retrieve() {
+        ITopologyService topology = 
+                (ITopologyService)getContext().getAttributes().
+                    get(ITopologyService.class.getCanonicalName());
+        
+        return topology.getBlockedPorts();
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/web/BroadcastDomainPortsResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/web/BroadcastDomainPortsResource.java
new file mode 100644
index 0000000..61b4338
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/web/BroadcastDomainPortsResource.java
@@ -0,0 +1,20 @@
+package net.floodlightcontroller.topology.web;
+
+import java.util.Set;
+
+import net.floodlightcontroller.topology.ITopologyService;
+import net.floodlightcontroller.topology.NodePortTuple;
+
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+
+public class BroadcastDomainPortsResource extends ServerResource {
+    @Get("json")
+    public Set<NodePortTuple> retrieve() {
+        ITopologyService topology = 
+                (ITopologyService)getContext().getAttributes().
+                    get(ITopologyService.class.getCanonicalName());
+        
+        return topology.getBroadcastDomainPorts();
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/web/EnabledPortsResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/web/EnabledPortsResource.java
new file mode 100644
index 0000000..aa75321
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/web/EnabledPortsResource.java
@@ -0,0 +1,42 @@
+package net.floodlightcontroller.topology.web;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.topology.ITopologyService;
+import net.floodlightcontroller.topology.NodePortTuple;
+
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+
+public class EnabledPortsResource extends ServerResource {
+    @Get("json")
+    public List<NodePortTuple> retrieve() {
+        List<NodePortTuple> result = new ArrayList<NodePortTuple>();
+
+        IFloodlightProviderService floodlightProvider =
+                (IFloodlightProviderService)getContext().getAttributes().
+                get(IFloodlightProviderService.class.getCanonicalName());
+
+        ITopologyService topology= 
+                (ITopologyService)getContext().getAttributes().
+                get(ITopologyService.class.getCanonicalName());
+
+        if (floodlightProvider == null || topology == null)
+            return result;
+
+        Set<Long> switches = floodlightProvider.getSwitches().keySet();
+        if (switches == null) return result;
+
+        for(long sw: switches) {
+            Set<Short> ports = topology.getPorts(sw);
+            if (ports == null) continue;
+            for(short p: ports) {
+                result.add(new NodePortTuple(sw, p));
+            }
+        }
+        return result;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/web/RouteResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/web/RouteResource.java
new file mode 100644
index 0000000..70e406f
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/web/RouteResource.java
@@ -0,0 +1,47 @@
+package net.floodlightcontroller.topology.web;
+
+import java.util.List;
+
+import net.floodlightcontroller.routing.IRoutingService;
+import net.floodlightcontroller.routing.Route;
+import net.floodlightcontroller.topology.NodePortTuple;
+
+import org.openflow.util.HexString;
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class RouteResource extends ServerResource {
+
+    protected static Logger log = LoggerFactory.getLogger(RouteResource.class);
+
+    @Get("json")
+    public List<NodePortTuple> retrieve() {
+        IRoutingService routing = 
+                (IRoutingService)getContext().getAttributes().
+                    get(IRoutingService.class.getCanonicalName());
+        
+        String srcDpid = (String) getRequestAttributes().get("src-dpid");
+        String srcPort = (String) getRequestAttributes().get("src-port");
+        String dstDpid = (String) getRequestAttributes().get("dst-dpid");
+        String dstPort = (String) getRequestAttributes().get("dst-port");
+
+        log.debug( srcDpid + "--" + srcPort + "--" + dstDpid + "--" + dstPort);
+
+        long longSrcDpid = HexString.toLong(srcDpid);
+        short shortSrcPort = Short.parseShort(srcPort);
+        long longDstDpid = HexString.toLong(dstDpid);
+        short shortDstPort = Short.parseShort(dstPort);
+        
+        Route result = routing.getRoute(longSrcDpid, shortSrcPort, longDstDpid, shortDstPort);
+        
+        if (result!=null) {
+            return routing.getRoute(longSrcDpid, shortSrcPort, longDstDpid, shortDstPort).getPath();
+        }
+        else {
+            log.debug("ERROR! no route found");
+            return null;
+        }
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/web/SwitchClustersResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/web/SwitchClustersResource.java
new file mode 100644
index 0000000..f52d27a
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/web/SwitchClustersResource.java
@@ -0,0 +1,72 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.topology.web;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.topology.ITopologyService;
+
+import org.openflow.util.HexString;
+import org.restlet.data.Form;
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+
+/**
+ * Returns a JSON map of <ClusterId, List<SwitchDpids>>
+ */
+public class SwitchClustersResource extends ServerResource {
+    @Get("json")
+    public Map<String, List<String>> retrieve() {
+        IFloodlightProviderService floodlightProvider = 
+                (IFloodlightProviderService)getContext().getAttributes().
+                    get(IFloodlightProviderService.class.getCanonicalName());
+        ITopologyService topology = 
+                (ITopologyService)getContext().getAttributes().
+                    get(ITopologyService.class.getCanonicalName());
+
+        Form form = getQuery();
+        String queryType = form.getFirstValue("type", true);
+        boolean openflowDomain = true;
+        if (queryType != null && "l2".equals(queryType)) {
+            openflowDomain = false;
+        }
+        
+        Map<String, List<String>> switchClusterMap = new HashMap<String, List<String>>();
+        for (Entry<Long, IOFSwitch> entry : floodlightProvider.getSwitches().entrySet()) {
+            Long clusterDpid = 
+                    (openflowDomain
+                     ? topology.getOpenflowDomainId(entry.getValue().getId())
+                     :topology.getL2DomainId(entry.getValue().getId()));
+            List<String> switchesInCluster = switchClusterMap.get(HexString.toHexString(clusterDpid));
+            if (switchesInCluster != null) {
+                switchesInCluster.add(HexString.toHexString(entry.getKey()));              
+            } else {
+                List<String> l = new ArrayList<String>();
+                l.add(HexString.toHexString(entry.getKey()));
+                switchClusterMap.put(HexString.toHexString(clusterDpid), l);
+            }
+        }
+        return switchClusterMap;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/web/TopologyWebRoutable.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/web/TopologyWebRoutable.java
new file mode 100644
index 0000000..7989413
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/web/TopologyWebRoutable.java
@@ -0,0 +1,33 @@
+package net.floodlightcontroller.topology.web;
+
+import org.restlet.Context;
+import org.restlet.routing.Router;
+
+import net.floodlightcontroller.linkdiscovery.web.LinksResource;
+import net.floodlightcontroller.restserver.RestletRoutable;
+
+public class TopologyWebRoutable implements RestletRoutable {
+    /**
+     * Create the Restlet router and bind to the proper resources.
+     */
+    @Override
+    public Router getRestlet(Context context) {
+        Router router = new Router(context);
+        router.attach("/links/json", LinksResource.class);
+        router.attach("/tunnellinks/json", TunnelLinksResource.class);
+        router.attach("/switchclusters/json", SwitchClustersResource.class);
+        router.attach("/broadcastdomainports/json", BroadcastDomainPortsResource.class);
+        router.attach("/enabledports/json", EnabledPortsResource.class);
+        router.attach("/blockedports/json", BlockedPortsResource.class);
+        router.attach("/route/{src-dpid}/{src-port}/{dst-dpid}/{dst-port}/json", RouteResource.class);
+        return router;
+    }
+
+    /**
+     * Set the base path for the Topology
+     */
+    @Override
+    public String basePath() {
+        return "/wm/topology";
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/web/TunnelLinksResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/web/TunnelLinksResource.java
new file mode 100644
index 0000000..71c3f12
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/topology/web/TunnelLinksResource.java
@@ -0,0 +1,20 @@
+package net.floodlightcontroller.topology.web;
+
+import java.util.Set;
+
+import net.floodlightcontroller.topology.ITopologyService;
+import net.floodlightcontroller.topology.NodePortTuple;
+
+import org.restlet.resource.Get;
+import org.restlet.resource.ServerResource;
+
+public class TunnelLinksResource extends ServerResource {
+    @Get("json")
+    public Set<NodePortTuple> retrieve() {
+        ITopologyService topology = 
+                (ITopologyService)getContext().getAttributes().
+                    get(ITopologyService.class.getCanonicalName());
+        
+        return topology.getTunnelPorts();
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/ui/web/StaticWebRoutable.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/ui/web/StaticWebRoutable.java
new file mode 100644
index 0000000..c1d5b5f
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/ui/web/StaticWebRoutable.java
@@ -0,0 +1,70 @@
+package net.floodlightcontroller.ui.web;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Map;
+
+import org.restlet.Client;
+import org.restlet.Context;
+import org.restlet.Restlet;
+import org.restlet.data.Protocol;
+import org.restlet.resource.Directory;
+import org.restlet.routing.Router;
+
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.restserver.IRestApiService;
+import net.floodlightcontroller.restserver.RestletRoutable;
+
+public class StaticWebRoutable implements RestletRoutable, IFloodlightModule {
+
+	private IRestApiService restApi;
+	
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IRestApiService.class);
+        return l;
+    }
+    
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        return null;
+    }
+    
+    @Override
+    public Map<Class<? extends IFloodlightService>, IFloodlightService>
+            getServiceImpls() {
+        return null;
+    }
+
+    @Override
+    public void init(FloodlightModuleContext context)
+                                             throws FloodlightModuleException {
+        restApi = context.getServiceImpl(IRestApiService.class);
+    }
+    
+    @Override
+    public void startUp(FloodlightModuleContext context) {
+        // Add our REST API
+        restApi.addRestletRoutable(this);
+        
+    }
+
+	@Override
+	public Restlet getRestlet(Context context) {
+        Router router = new Router(context);
+        router.attach("", new Directory(context, "clap://classloader/web/"));
+        context.setClientDispatcher(new Client(context, Protocol.CLAP));
+        return router;
+	}
+
+	@Override
+	public String basePath() {
+		return "/ui/";
+	}
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/BundleAction.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/BundleAction.java
new file mode 100644
index 0000000..0d82275
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/BundleAction.java
@@ -0,0 +1,54 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.util;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ *
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ */
+public enum BundleAction {
+    START,
+    STOP,
+    UNINSTALL,
+    REFRESH;
+
+    public static List<BundleAction> getAvailableActions(BundleState state) {
+        List<BundleAction> actions = new ArrayList<BundleAction>();
+        if (Arrays.binarySearch(new BundleState[] {
+                BundleState.ACTIVE, BundleState.STARTING,
+                BundleState.UNINSTALLED }, state) < 0) {
+            actions.add(START);
+        }
+        if (Arrays.binarySearch(new BundleState[] {
+                BundleState.ACTIVE}, state) >= 0) {
+            actions.add(STOP);
+        }
+        if (Arrays.binarySearch(new BundleState[] {
+                BundleState.UNINSTALLED}, state) < 0) {
+            actions.add(UNINSTALL);
+        }
+
+        // Always capable of refresh?
+        actions.add(REFRESH);
+        return actions;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/BundleState.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/BundleState.java
new file mode 100644
index 0000000..f89bc0b
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/BundleState.java
@@ -0,0 +1,62 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.util;
+
+/**
+ *
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ */
+public enum BundleState {
+    ACTIVE              (32),
+    INSTALLED           (2),
+    RESOLVED            (4),
+    STARTING            (8),
+    STOPPING            (16),
+    UNINSTALLED         (1);
+
+    protected int value;
+
+    private BundleState(int value) {
+        this.value = value;
+    }
+
+    /**
+     * @return the value
+     */
+    public int getValue() {
+        return value;
+    }
+
+    public static BundleState getState(int value) {
+        switch (value) {
+            case 32:
+                return ACTIVE;
+            case 2:
+                return INSTALLED;
+            case 4:
+                return RESOLVED;
+            case 8:
+                return STARTING;
+            case 16:
+                return STOPPING;
+            case 1:
+                return UNINSTALLED;
+        }
+        return null;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/ClusterDFS.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/ClusterDFS.java
new file mode 100644
index 0000000..3efd5bc
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/ClusterDFS.java
@@ -0,0 +1,47 @@
+package net.floodlightcontroller.util;
+
+public class ClusterDFS {
+    long dfsIndex;
+    long parentDFSIndex;
+    long lowpoint;
+    boolean visited;
+
+    public ClusterDFS() {
+        visited = false;
+        dfsIndex = Long.MAX_VALUE;
+        parentDFSIndex = Long.MAX_VALUE;
+        lowpoint = Long.MAX_VALUE;
+    }
+
+    public long getDfsIndex() {
+        return dfsIndex;
+    }
+
+    public void setDfsIndex(long dfsIndex) {
+        this.dfsIndex = dfsIndex;
+    }
+
+    public long getParentDFSIndex() {
+        return parentDFSIndex;
+    }
+
+    public void setParentDFSIndex(long parentDFSIndex) {
+        this.parentDFSIndex = parentDFSIndex;
+    }
+
+    public long getLowpoint() {
+        return lowpoint;
+    }
+
+    public void setLowpoint(long lowpoint) {
+        this.lowpoint = lowpoint;
+    }
+
+    public boolean isVisited() {
+        return visited;
+    }
+
+    public void setVisited(boolean visited) {
+        this.visited = visited;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/EventHistory.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/EventHistory.java
new file mode 100644
index 0000000..69031ba
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/EventHistory.java
@@ -0,0 +1,179 @@
+/**
+ * 
+ */
+package net.floodlightcontroller.util;
+
+import java.util.ArrayList;
+
+/**
+ * @author subrata
+ *
+ */
+
+public class EventHistory<T> {
+    public static final int EV_HISTORY_DEFAULT_SIZE = 1024;
+
+    public String  description;
+    public int     event_history_size;
+    public int     current_index;
+    public boolean full; // true if all are in use
+    public ArrayList<Event> events;
+
+    public String getDescription() {
+        return description;
+    }
+    public int getEvent_history_size() {
+        return event_history_size;
+    }
+    public int getCurrent_index() {
+        return current_index;
+    }
+    public boolean isFull() {
+        return full;
+    }
+    public ArrayList<Event> getEvents() {
+        return events;
+    }
+
+    public class Event {
+        public EventHistoryBaseInfo base_info;
+        public T info;
+    }
+
+    public enum EvState {
+        FREE,             // no valid event written yet
+        BEING_MODIFIED,   // event is being updated with new value, skip
+        ACTIVE,           // event is active and can be displayed
+    }
+
+    public enum EvAction {
+        ADDED,   // specific entry added
+        REMOVED, // specific entry removed
+        UPDATED, // Entry updated
+        BLOCKED, // Blocked - used for Attachment Points
+        UNBLOCKED,
+        CLEARED,  // All entries are removed
+        PKT_IN,
+        PKT_OUT,
+        SWITCH_CONNECTED,
+        SWITCH_DISCONNECTED,
+        LINK_ADDED,
+        LINK_DELETED,
+        LINK_PORT_STATE_UPDATED,
+        CLUSTER_ID_CHANGED_FOR_CLUSTER,
+        CLUSTER_ID_CHANGED_FOR_A_SWITCH,
+    }
+
+    // Constructor
+    public EventHistory(int maxEvents, String desc) {
+        events = new ArrayList<Event>(maxEvents);
+
+        for (int idx = 0; idx < maxEvents; idx++) {
+            Event evH     = new Event();
+            evH.base_info = new EventHistoryBaseInfo();
+            evH.info      = null;
+            evH.base_info.state = EvState.FREE;
+            evH.base_info.idx   = idx;
+            events.add(idx, evH);
+        }
+
+        description = "Event-History:" + desc;
+        event_history_size   = maxEvents;
+        current_index        = 0;
+        full                 = false;
+    }
+
+    // Constructor for default size
+    public EventHistory(String desc) {
+        this(EV_HISTORY_DEFAULT_SIZE, desc);
+    }
+
+    // Copy constructor - copy latest k items of the event history
+    public EventHistory(EventHistory<T> eventHist, int latestK) {
+
+        if (eventHist == null) {
+            description = "No event found";
+            return;
+        }
+        int curSize = (eventHist.full)?eventHist.event_history_size:
+                                                    eventHist.current_index;
+        int size  = (latestK < curSize)?latestK:curSize;
+        int evIdx = eventHist.current_index;
+        int topSz = (evIdx >= size)?size:evIdx;
+
+        // Need to create a new one since size is different
+        events = new ArrayList<Event>(size);
+
+        // Get the top part
+        int origIdx = evIdx;
+        for (int idx = 0; idx < topSz; idx++) {
+            Event evH         = eventHist.events.get(--origIdx);
+            evH.base_info.idx = idx;
+            events.add(idx, evH);
+        }
+
+        // Get the bottom part
+        origIdx = eventHist.event_history_size;
+        for (int idx = topSz; idx < size; idx++) {
+            Event evH = eventHist.events.get(--origIdx);
+            evH.base_info.idx = idx;
+            events.add(idx, evH);
+        }
+
+        description = eventHist.description;
+        event_history_size   = size;
+        current_index        = 0; // since it is full
+        full                 = true;
+    }
+
+    // Get an index for writing a new event. This method is synchronized for
+    // this event history infra. to be thread-safe. Once the index is obtained
+    // by the caller event at the index is updated without any lock
+    public synchronized int NextIdx() {
+        // curIdx should be in the 0 to evArraySz-1
+        if (current_index == (event_history_size-1)) {
+            current_index = 0;
+            full = true;
+            return (event_history_size-1);
+        } else {
+            current_index++;
+            return (current_index-1);
+        }
+    }
+
+    /**
+     * Add an event to the event history
+     * Eliminate java garbage cration by reusing the same object T
+     * Supplied object t is used to populate the event history array
+     * and the current object at that array location is returned to the
+     * calling process so that the calling process can use that object
+     * for the next event of the same type
+     * @param t
+     * @param op
+     * @return
+     */
+
+    public T put(T t, EvAction action) {
+        int idx = NextIdx();
+        Event evH = events.get(idx);
+        evH.base_info.state = EvState.BEING_MODIFIED;
+        evH.base_info.time_ms = System.currentTimeMillis();
+        evH.base_info.action = action;
+        T temp = evH.info;
+        evH.info = t;
+        evH.base_info.state = EvState.ACTIVE;
+        return temp;
+    }
+
+    /***
+     * Clear the event history, needs to be done under lock
+     */
+    public void clear() {
+        for (int idx = 0; idx < event_history_size; idx++) {
+            Event evH = events.get(idx);
+            evH.base_info.state = EvState.FREE;
+            current_index = 0;
+            full = false;
+        }
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/EventHistoryBaseInfo.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/EventHistoryBaseInfo.java
new file mode 100644
index 0000000..74fc973
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/EventHistoryBaseInfo.java
@@ -0,0 +1,26 @@
+package net.floodlightcontroller.util;
+
+
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+@JsonSerialize(using=EventHistoryBaseInfoJSONSerializer.class)
+public class EventHistoryBaseInfo {
+    public int              idx;
+    public long             time_ms; // timestamp in milliseconds
+    public EventHistory.EvState          state;
+    public EventHistory.EvAction         action;
+
+    // Getters
+    public int getIdx() {
+        return idx;
+    }
+    public long getTime_ms() {
+        return time_ms;
+    }
+    public EventHistory.EvState getState() {
+        return state;
+    }
+    public EventHistory.EvAction getAction() {
+        return action;
+    }
+}
\ No newline at end of file
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/EventHistoryBaseInfoJSONSerializer.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/EventHistoryBaseInfoJSONSerializer.java
new file mode 100644
index 0000000..6f1d1ff
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/EventHistoryBaseInfoJSONSerializer.java
@@ -0,0 +1,69 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.util;
+
+import java.io.IOException;
+
+import java.sql.Timestamp;
+
+
+import org.codehaus.jackson.JsonGenerator;
+import org.codehaus.jackson.JsonProcessingException;
+import org.codehaus.jackson.map.JsonSerializer;
+import org.codehaus.jackson.map.SerializerProvider;
+
+
+/**
+ * @author subrata
+ *
+ */
+
+public class EventHistoryBaseInfoJSONSerializer extends 
+                                    JsonSerializer<EventHistoryBaseInfo> {
+
+ 
+    /**
+     * Performs the serialization of a EventHistory.BaseInfo object
+     */
+    @Override
+    public void serialize(EventHistoryBaseInfo base_info, JsonGenerator jGen,
+                    SerializerProvider serializer) 
+                    throws IOException, JsonProcessingException {
+        jGen.writeStartObject();
+        jGen.writeNumberField("Idx",    base_info.getIdx());
+        Timestamp ts = new Timestamp(base_info.getTime_ms());
+        String tsStr = ts.toString();
+        while (tsStr.length() < 23) {
+            tsStr = tsStr.concat("0");
+        }
+        jGen.writeStringField("Time", tsStr);
+        jGen.writeStringField("State",  base_info.getState().name());
+        String acStr = base_info.getAction().name().toLowerCase();
+        // Capitalize the first letter
+        acStr = acStr.substring(0,1).toUpperCase().concat(acStr.substring(1));
+        jGen.writeStringField("Action", acStr);
+        jGen.writeEndObject();
+    }
+
+    /**
+     * Tells SimpleModule that we are the serializer for OFMatch
+     */
+    @Override
+    public Class<EventHistoryBaseInfo> handledType() {
+        return EventHistoryBaseInfo.class;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/FilterIterator.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/FilterIterator.java
new file mode 100644
index 0000000..47cd5c9
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/FilterIterator.java
@@ -0,0 +1,80 @@
+/**
+*    Copyright 2012, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.util;
+
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+/**
+ * An iterator that will filter values from an iterator and return only
+ * those values that match the predicate.
+ */
+public abstract class FilterIterator<T> implements Iterator<T> {
+    protected Iterator<T> subIterator;
+    protected T next;
+
+    /**
+     * Construct a filter iterator from the given sub iterator
+     * @param subIterator the sub iterator over which we'll filter
+     */
+    public FilterIterator(Iterator<T> subIterator) {
+        super();
+        this.subIterator = subIterator;
+    }
+
+    /**
+     * Check whether the given value should be returned by the
+     * filter
+     * @param value the value to check
+     * @return true if the value should be included
+     */
+    protected abstract boolean matches(T value);
+    
+    // ***********
+    // Iterator<T>
+    // ***********
+
+    @Override
+    public boolean hasNext() {
+        if (next != null) return true;
+        
+        while (subIterator.hasNext()) {
+            next = subIterator.next();
+            if (matches(next))
+                return true;
+        }
+        next = null;
+        return false;
+    }
+
+    @Override
+    public T next() {
+        if (hasNext()) {
+            T cur = next;
+            next = null;
+            return cur;
+        }
+        throw new NoSuchElementException();
+    }
+
+    @Override
+    public void remove() {
+        throw new UnsupportedOperationException();
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/IterableIterator.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/IterableIterator.java
new file mode 100644
index 0000000..584de08
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/IterableIterator.java
@@ -0,0 +1,66 @@
+/**
+*    Copyright 2012 Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.util;
+
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+/**
+ * Iterator over all values in an iterator of iterators
+ *
+ * @param <T> the type of elements returned by this iterator
+ */
+public class IterableIterator<T> implements Iterator<T> {
+    Iterator<? extends Iterable<T>> subIterator;
+    Iterator<T> current = null;
+    
+    public IterableIterator(Iterator<? extends Iterable<T>> subIterator) {
+        super();
+        this.subIterator = subIterator;
+    }
+
+    @Override
+    public boolean hasNext() {
+        if (current == null) {
+            if (subIterator.hasNext()) {
+                current = subIterator.next().iterator();
+            } else {
+                return false;
+            }
+        }
+        while (!current.hasNext() && subIterator.hasNext()) {
+            current = subIterator.next().iterator();
+        }
+        
+        return current.hasNext();
+    }
+
+    @Override
+    public T next() {
+        if (hasNext())
+            return current.next();
+        throw new NoSuchElementException();
+    }
+
+    @Override
+    public void remove() {
+        if (hasNext())
+            current.remove();
+        throw new NoSuchElementException();
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/LRUHashMap.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/LRUHashMap.java
new file mode 100644
index 0000000..477e886
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/LRUHashMap.java
@@ -0,0 +1,38 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.util;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+public class LRUHashMap<K, V> extends LinkedHashMap<K, V> {
+    
+    private static final long serialVersionUID = 1L;
+    
+    private final int capacity;
+    public LRUHashMap(int capacity)
+    {
+        super(capacity+1, 0.75f, true);
+        this.capacity = capacity;
+    }
+    
+    protected boolean removeEldestEntry(Map.Entry<K,V> eldest) {
+       return size() > capacity;
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/MACAddress.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/MACAddress.java
new file mode 100644
index 0000000..4ba9dad
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/MACAddress.java
@@ -0,0 +1,157 @@
+package net.floodlightcontroller.util;
+
+import java.util.Arrays;
+
+/**
+ * The class representing MAC address.
+ *
+ * @author Sho Shimizu (sho.shimizu@gmail.com)
+ */
+public class MACAddress {
+    public static final int MAC_ADDRESS_LENGTH = 6;
+    private byte[] address = new byte[MAC_ADDRESS_LENGTH];
+
+    public MACAddress(byte[] address) {
+        this.address = Arrays.copyOf(address, MAC_ADDRESS_LENGTH);
+    }
+
+    /**
+     * Returns a MAC address instance representing the value of the specified {@code String}.
+     * @param address the String representation of the MAC Address to be parsed.
+     * @return a MAC Address instance representing the value of the specified {@code String}.
+     * @throws IllegalArgumentException if the string cannot be parsed as a MAC address.
+     */
+    public static MACAddress valueOf(String address) {
+        String[] elements = address.split(":");
+        if (elements.length != MAC_ADDRESS_LENGTH) {
+            throw new IllegalArgumentException(
+                    "Specified MAC Address must contain 12 hex digits" +
+                    " separated pairwise by :'s.");
+        }
+
+        byte[] addressInBytes = new byte[MAC_ADDRESS_LENGTH];
+        for (int i = 0; i < MAC_ADDRESS_LENGTH; i++) {
+            String element = elements[i];
+            addressInBytes[i] = (byte)Integer.parseInt(element, 16);
+        }
+
+        return new MACAddress(addressInBytes);
+    }
+
+    /**
+     * Returns a MAC address instance representing the specified {@code byte} array.
+     * @param address the byte array to be parsed.
+     * @return a MAC address instance representing the specified {@code byte} array.
+     * @throws IllegalArgumentException if the byte array cannot be parsed as a MAC address.
+     */
+    public static MACAddress valueOf(byte[] address) {
+        if (address.length != MAC_ADDRESS_LENGTH) {
+            throw new IllegalArgumentException("the length is not " + MAC_ADDRESS_LENGTH);
+        }
+
+        return new MACAddress(address);
+    }
+
+    /**
+     * Returns a MAC address instance representing the specified {@code long} value.
+     * The lower 48 bits of the long value are used to parse as a MAC address.
+     * @param address the long value to be parsed. The lower 48 bits are used for a MAC address.
+     * @return a MAC address instance representing the specified {@code long} value.
+     * @throws IllegalArgumentException if the long value cannot be parsed as a MAC address.
+     */
+    public static MACAddress valueOf(long address) {
+        byte[] addressInBytes = new byte[] {
+                (byte)((address >> 40) & 0xff),
+                (byte)((address >> 32) & 0xff),
+                (byte)((address >> 24) & 0xff),
+                (byte)((address >> 16) & 0xff),
+                (byte)((address >> 8 ) & 0xff),
+                (byte)((address >> 0) & 0xff)
+        };
+
+        return new MACAddress(addressInBytes);
+    }
+
+    /**
+     * Returns the length of the {@code MACAddress}.
+     * @return the length of the {@code MACAddress}.
+     */
+    public int length() {
+        return address.length;
+    }
+
+    /**
+     * Returns the value of the {@code MACAddress} as a {@code byte} array.
+     * @return the numeric value represented by this object after conversion to type {@code byte} array.
+     */
+    public byte[] toBytes() {
+        return Arrays.copyOf(address, address.length);
+    }
+
+    /**
+     * Returns the value of the {@code MACAddress} as a {@code long}.
+     * @return the numeric value represented by this object after conversion to type {@code long}.
+     */
+    public long toLong() {
+        long mac = 0;
+        for (int i = 0; i < 6; i++) {
+            long t = (address[i] & 0xffL) << ((5 - i) * 8);
+            mac |= t;
+        }
+        return mac;
+    }
+
+    /**
+     * Returns {@code true} if the MAC address is the broadcast address.
+     * @return {@code true} if the MAC address is the broadcast address.
+     */
+    public boolean isBroadcast() {
+        for (byte b : address) {
+            if (b != -1) // checks if equal to 0xff
+                return false;
+        }
+        return true;
+    }
+
+    /**
+     * Returns {@code true} if the MAC address is the multicast address.
+     * @return {@code true} if the MAC address is the multicast address.
+     */
+    public boolean isMulticast() {
+        if (isBroadcast()) {
+            return false;
+        }
+        return (address[0] & 0x01) != 0;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (o == this) {
+            return true;
+        }
+
+        if (!(o instanceof MACAddress)) {
+            return false;
+        }
+
+        MACAddress other = (MACAddress)o;
+        return Arrays.equals(this.address, other.address);
+    }
+
+    @Override
+    public int hashCode() {
+        return Arrays.hashCode(this.address);
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder builder = new StringBuilder();
+        for (byte b: address) {
+            if (builder.length() > 0) {
+                builder.append(":");
+            }
+            builder.append(String.format("%02X", b & 0xFF));
+        }
+        return builder.toString();
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/MultiIterator.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/MultiIterator.java
new file mode 100644
index 0000000..bcbc916
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/MultiIterator.java
@@ -0,0 +1,66 @@
+/**
+*    Copyright 2012 Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.util;
+
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+/**
+ * Iterator over all values in an iterator of iterators
+ *
+ * @param <T> the type of elements returned by this iterator
+ */
+public class MultiIterator<T> implements Iterator<T> {
+    Iterator<Iterator<T>> subIterator;
+    Iterator<T> current = null;
+    
+    public MultiIterator(Iterator<Iterator<T>> subIterator) {
+        super();
+        this.subIterator = subIterator;
+    }
+
+    @Override
+    public boolean hasNext() {
+        if (current == null) {
+            if (subIterator.hasNext()) {
+                current = subIterator.next();
+            } else {
+                return false;
+            }
+        }
+        while (!current.hasNext() && subIterator.hasNext()) {
+            current = subIterator.next();
+        }
+        
+        return current.hasNext();
+    }
+
+    @Override
+    public T next() {
+        if (hasNext())
+            return current.next();
+        throw new NoSuchElementException();
+    }
+
+    @Override
+    public void remove() {
+        if (hasNext())
+            current.remove();
+        throw new NoSuchElementException();
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/OFMessageDamper.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/OFMessageDamper.java
new file mode 100644
index 0000000..4dfb60b
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/OFMessageDamper.java
@@ -0,0 +1,144 @@
+/*
+ * Copyright Big Switch Networks 2012
+ */
+
+package net.floodlightcontroller.util;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Set;
+
+import net.floodlightcontroller.core.FloodlightContext;
+import net.floodlightcontroller.core.IOFSwitch;
+
+import org.openflow.protocol.OFMessage;
+import org.openflow.protocol.OFType;
+
+/**
+ * Dampens OFMessages sent to an OF switch. A message is only written to 
+ * a switch if the same message (as defined by .equals()) has not been written
+ * in the last n milliseconds. Timer granularity is based on TimedCache
+ * @author gregor
+ *
+ */
+public class OFMessageDamper {
+    /**
+     * An entry in the TimedCache. A cache entry consists of the sent message
+     * as well as the switch to which the message was sent. 
+     * 
+     * NOTE: We currently use the full OFMessage object. To save space, we 
+     * could use a cryptographic hash (e.g., SHA-1). However, this would 
+     * obviously be more time-consuming.... 
+     * 
+     * We also store a reference to the actual IOFSwitch object and /not/
+     * the switch DPID. This way we are guarnteed to not dampen messages if
+     * a switch disconnects and then reconnects.
+     * 
+     * @author gregor
+     */
+    protected static class DamperEntry {
+        OFMessage msg;
+        IOFSwitch sw;
+        public DamperEntry(OFMessage msg, IOFSwitch sw) {
+            super();
+            this.msg = msg;
+            this.sw = sw;
+        }
+        /* (non-Javadoc)
+         * @see java.lang.Object#hashCode()
+         */
+        @Override
+        public int hashCode() {
+            final int prime = 31;
+            int result = 1;
+            result = prime * result + ((msg == null) ? 0 : msg.hashCode());
+            result = prime * result + ((sw == null) ? 0 : sw.hashCode());
+            return result;
+        }
+        /* (non-Javadoc)
+         * @see java.lang.Object#equals(java.lang.Object)
+         */
+        @Override
+        public boolean equals(Object obj) {
+            if (this == obj) return true;
+            if (obj == null) return false;
+            if (getClass() != obj.getClass()) return false;
+            DamperEntry other = (DamperEntry) obj;
+            if (msg == null) {
+                if (other.msg != null) return false;
+            } else if (!msg.equals(other.msg)) return false;
+            if (sw == null) {
+                if (other.sw != null) return false;
+            } else if (!sw.equals(other.sw)) return false;
+            return true;
+        }
+        
+      
+    }
+    TimedCache<DamperEntry> cache;
+    EnumSet<OFType> msgTypesToCache;
+    /**
+     * 
+     * @param capacity the maximum number of messages that should be 
+     * kept
+     * @param typesToDampen The set of OFMessageTypes that should be 
+     * dampened by this instance. Other types will be passed through
+     * @param timeout The dampening timeout. A message will only be
+     * written if the last write for the an equal message more than
+     * timeout ms ago. 
+     */
+    public OFMessageDamper(int capacity, 
+                           Set<OFType> typesToDampen,  
+                           int timeout) {
+        cache = new TimedCache<DamperEntry>(capacity, timeout);
+        msgTypesToCache = EnumSet.copyOf(typesToDampen);
+    }        
+    
+    /**
+     * write the messag to the switch according to our dampening settings
+     * @param sw
+     * @param msg
+     * @param cntx
+     * @return true if the message was written to the switch, false if
+     * the message was dampened. 
+     * @throws IOException
+     */
+    public boolean write(IOFSwitch sw, OFMessage msg, FloodlightContext cntx)
+                    throws IOException {
+        return write(sw, msg, cntx, false);
+    }
+    
+    /**
+     * write the messag to the switch according to our dampening settings
+     * @param sw
+     * @param msg
+     * @param cntx
+     * @param flush true to flush the packet immidiately
+     * @return true if the message was written to the switch, false if
+     * the message was dampened. 
+     * @throws IOException
+     */
+    public boolean write(IOFSwitch sw, OFMessage msg,
+                        FloodlightContext cntx, boolean flush) 
+            throws IOException {
+        if (! msgTypesToCache.contains(msg.getType())) {
+            sw.write(msg, cntx);
+            if (flush) {
+                sw.flush();
+            }
+            return true;
+        }
+        
+        DamperEntry entry = new DamperEntry(msg, sw);
+        if (cache.update(entry)) {
+            // entry exists in cache. Dampening.
+            return false; 
+        } else {
+            sw.write(msg, cntx);
+            if (flush) {
+                sw.flush();
+            }
+            return true;
+        }
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/TimedCache.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/TimedCache.java
new file mode 100644
index 0000000..7341df7
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/util/TimedCache.java
@@ -0,0 +1,79 @@
+/**
+*    Copyright 2011, Big Switch Networks, Inc. 
+*    Originally created by David Erickson, Stanford University
+* 
+*    Licensed under the Apache License, Version 2.0 (the "License"); you may
+*    not use this file except in compliance with the License. You may obtain
+*    a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+*    Unless required by applicable law or agreed to in writing, software
+*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+*    License for the specific language governing permissions and limitations
+*    under the License.
+**/
+
+package net.floodlightcontroller.util;
+
+import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+/**
+ * The key is any object/hash-code
+ * The value is time-stamp in milliseconds
+ * The time interval denotes the interval for which the entry should remain in the hashmap.
+ * If an entry is present in the Linkedhashmap, it does not mean that it's valid (recently seen)
+ * 
+ * @param <K> Type of the values in this cache
+ */
+public class TimedCache<K> {    
+    private final long timeoutInterval;    //specified in milliseconds.
+	private ConcurrentMap<K, Long> cache;
+    
+    /**
+     * 
+     * @param capacity the maximum number of entries in the cache before the 
+     * oldest entry is evicted. 
+     * @param timeToLive specified in milliseconds
+     */
+	public TimedCache(int capacity, int timeToLive) {
+        cache = new ConcurrentLinkedHashMap.Builder<K, Long>()
+        	    .maximumWeightedCapacity(capacity)
+            .build();
+        this.timeoutInterval = timeToLive;
+    }
+    
+    public long getTimeoutInterval() {
+        return this.timeoutInterval;
+    }
+    
+    /**
+     * Always try to update the cache and set the last-seen value for this key.
+     * 
+     * Return true, if a valid existing field was updated, else return false.
+     * (note: if multiple threads update simultaneously, one of them will succeed,
+     *  other wills return false)
+     * 
+     * @param key
+     * @return boolean
+     */
+    public boolean update(K key)
+    {
+        Long curr = new Long(System.currentTimeMillis());
+        Long prev = cache.putIfAbsent(key, curr);
+        
+        if (prev == null) {
+        		return false;
+        }
+
+        if (curr - prev > this.timeoutInterval) {
+            if (cache.replace(key, prev, curr)) {
+            		return false;
+            }
+        }
+        
+        return true;
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/HostResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/HostResource.java
new file mode 100644
index 0000000..6021e3d
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/HostResource.java
@@ -0,0 +1,95 @@
+package net.floodlightcontroller.virtualnetwork;
+
+import java.io.IOException;
+
+import net.floodlightcontroller.util.MACAddress;
+
+import org.codehaus.jackson.JsonParseException;
+import org.codehaus.jackson.JsonParser;
+import org.codehaus.jackson.JsonToken;
+import org.codehaus.jackson.map.MappingJsonFactory;
+import org.restlet.data.Status;
+import org.restlet.resource.Delete;
+import org.restlet.resource.Put;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class HostResource extends org.restlet.resource.ServerResource {
+    protected static Logger log = LoggerFactory.getLogger(HostResource.class);
+    
+    public class HostDefinition {
+        String port = null; // Logical port name
+        String guid = null; // Network ID
+        String mac = null; // MAC Address
+        String attachment = null; // Attachment name
+    }
+    
+    protected void jsonToHostDefinition(String json, HostDefinition host) throws IOException {
+        MappingJsonFactory f = new MappingJsonFactory();
+        JsonParser jp;
+        
+        try {
+            jp = f.createJsonParser(json);
+        } catch (JsonParseException e) {
+            throw new IOException(e);
+        }
+        
+        jp.nextToken();
+        if (jp.getCurrentToken() != JsonToken.START_OBJECT) {
+            throw new IOException("Expected START_OBJECT");
+        }
+        
+        while (jp.nextToken() != JsonToken.END_OBJECT) {
+            if (jp.getCurrentToken() != JsonToken.FIELD_NAME) {
+                throw new IOException("Expected FIELD_NAME");
+            }
+            
+            String n = jp.getCurrentName();
+            jp.nextToken();
+            if (jp.getText().equals("")) 
+                continue;
+            else if (n.equals("attachment")) {
+                while (jp.nextToken() != JsonToken.END_OBJECT) {
+                    String field = jp.getCurrentName();
+                    if (field.equals("id")) {
+                        host.attachment = jp.getText();
+                    } else if (field.equals("mac")) {
+                        host.mac = jp.getText();
+                    }
+                }
+            }
+        }
+        
+        jp.close();
+    }
+    
+    @Put
+    public String addHost(String postData) {
+        IVirtualNetworkService vns =
+                (IVirtualNetworkService)getContext().getAttributes().
+                    get(IVirtualNetworkService.class.getCanonicalName());
+        HostDefinition host = new HostDefinition();
+        host.port = (String) getRequestAttributes().get("port");
+        host.guid = (String) getRequestAttributes().get("network");
+        try {
+            jsonToHostDefinition(postData, host);
+        } catch (IOException e) {
+            log.error("Could not parse JSON {}", e.getMessage());
+        }
+        vns.addHost(MACAddress.valueOf(host.mac), host.guid, host.port);
+        setStatus(Status.SUCCESS_OK);
+        return "{\"status\":\"ok\"}";
+    }
+    
+    
+    @Delete
+    public String deleteHost() {
+        String port = (String) getRequestAttributes().get("port");
+        IVirtualNetworkService vns =
+                (IVirtualNetworkService)getContext().getAttributes().
+                    get(IVirtualNetworkService.class.getCanonicalName());
+        vns.deleteHost(null, port);
+        setStatus(Status.SUCCESS_OK);
+        return "{\"status\":\"ok\"}";
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/IVirtualNetworkService.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/IVirtualNetworkService.java
new file mode 100644
index 0000000..4304a33
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/IVirtualNetworkService.java
@@ -0,0 +1,46 @@
+package net.floodlightcontroller.virtualnetwork;
+
+import java.util.Collection;
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.util.MACAddress;
+
+public interface IVirtualNetworkService extends IFloodlightService {
+    /**
+     * Creates a new virtual network. This can also be called
+     * to modify a virtual network. To update a network you specify the GUID
+     * and the fields you want to update.
+     * @param network The network name. Must be unique.
+     * @param guid The ID of the network. Must be unique.
+     * @param gateway The IP address of the network gateway, null if none.
+     */
+    public void createNetwork(String guid, String network, Integer gateway);
+    
+    /**
+     * Deletes a virtual network.
+     * @param guid The ID (not name) of virtual network to delete.
+     */
+    public void deleteNetwork(String guid);
+    
+    /**
+     * Adds a host to a virtual network. If a mapping already exists the
+     * new one will override the old mapping.
+     * @param mac The MAC address of the host to add.
+     * @param network The network to add the host to.
+     * @param port The logical port name to attach the host to. Must be unique.
+     */
+    public void addHost(MACAddress mac, String network, String port); 
+    
+    /**
+     * Deletes a host from a virtual network. Either the MAC or Port must
+     * be specified.
+     * @param mac The MAC address to delete.
+     * @param port The logical port the host is attached to.
+     */
+    public void deleteHost(MACAddress mac, String port);
+    
+    /**
+     * Return list of all virtual networks.
+     * @return Collection <VirtualNetwork>
+     */
+    public Collection <VirtualNetwork> listNetworks();
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/NetworkResource.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/NetworkResource.java
new file mode 100644
index 0000000..2efe52a
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/NetworkResource.java
@@ -0,0 +1,133 @@
+package net.floodlightcontroller.virtualnetwork;
+
+import java.io.IOException;
+import java.util.Collection;
+
+import net.floodlightcontroller.packet.IPv4;
+
+import org.codehaus.jackson.JsonParseException;
+import org.codehaus.jackson.JsonParser;
+import org.codehaus.jackson.JsonToken;
+import org.codehaus.jackson.map.MappingJsonFactory;
+import org.restlet.data.Status;
+import org.restlet.resource.Delete;
+import org.restlet.resource.Get;
+import org.restlet.resource.Post;
+import org.restlet.resource.Put;
+import org.restlet.resource.ServerResource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class NetworkResource extends ServerResource {
+    protected static Logger log = LoggerFactory.getLogger(NetworkResource.class);
+    
+    public class NetworkDefinition {
+        public String name = null;
+        public String guid = null;
+        public String gateway = null;
+    }
+    
+    protected void jsonToNetworkDefinition(String json, NetworkDefinition network) throws IOException {
+        MappingJsonFactory f = new MappingJsonFactory();
+        JsonParser jp;
+        
+        try {
+            jp = f.createJsonParser(json);
+        } catch (JsonParseException e) {
+            throw new IOException(e);
+        }
+        
+        jp.nextToken();
+        if (jp.getCurrentToken() != JsonToken.START_OBJECT) {
+            throw new IOException("Expected START_OBJECT");
+        }
+        
+        while (jp.nextToken() != JsonToken.END_OBJECT) {
+            if (jp.getCurrentToken() != JsonToken.FIELD_NAME) {
+                throw new IOException("Expected FIELD_NAME");
+            }
+            
+            String n = jp.getCurrentName();
+            jp.nextToken();
+            if (jp.getText().equals("")) 
+                continue;
+            else if (n.equals("network")) {
+                while (jp.nextToken() != JsonToken.END_OBJECT) {
+                    String field = jp.getCurrentName();
+                    if (field.equals("name")) {
+                        network.name = jp.getText();
+                    } else if (field.equals("gateway")) {
+                    	String gw = jp.getText();
+                    	if ((gw != null) && (!gw.equals("null")))
+                    		network.gateway = gw;
+                    } else if (field.equals("id")) {
+                    	network.guid = jp.getText();
+                    } else {
+                        log.warn("Unrecognized field {} in " +
+                        		"parsing network definition", 
+                        		jp.getText());
+                    }
+                }
+            }
+        }
+        
+        jp.close();
+    }
+    
+    @Get("json")
+    public Collection <VirtualNetwork> retrieve() {
+        IVirtualNetworkService vns =
+                (IVirtualNetworkService)getContext().getAttributes().
+                    get(IVirtualNetworkService.class.getCanonicalName());
+        
+        return vns.listNetworks();               
+    }
+    
+    @Put
+    @Post
+    public String createNetwork(String postData) {        
+        NetworkDefinition network = new NetworkDefinition();
+        try {
+            jsonToNetworkDefinition(postData, network);
+        } catch (IOException e) {
+            log.error("Could not parse JSON {}", e.getMessage());
+        }
+        
+        // We try to get the ID from the URI only if it's not
+        // in the POST data 
+        if (network.guid == null) {
+	        String guid = (String) getRequestAttributes().get("network");
+	        if ((guid != null) && (!guid.equals("null")))
+	        	network.guid = guid;
+        }
+        
+        IVirtualNetworkService vns =
+                (IVirtualNetworkService)getContext().getAttributes().
+                    get(IVirtualNetworkService.class.getCanonicalName());
+        
+        Integer gw = null;
+        if (network.gateway != null) {
+            try {
+                gw = IPv4.toIPv4Address(network.gateway);
+            } catch (IllegalArgumentException e) {
+                log.warn("Could not parse gateway {} as IP for network {}, setting as null",
+                         network.gateway, network.name);
+                network.gateway = null;
+            }
+        }
+        vns.createNetwork(network.guid, network.name, gw);
+        setStatus(Status.SUCCESS_OK);
+        return "{\"status\":\"ok\"}";
+    }
+    
+    @Delete
+    public String deleteNetwork() {
+        IVirtualNetworkService vns =
+                (IVirtualNetworkService)getContext().getAttributes().
+                    get(IVirtualNetworkService.class.getCanonicalName());
+        String guid = (String) getRequestAttributes().get("network");
+        vns.deleteNetwork(guid);
+        setStatus(Status.SUCCESS_OK);
+        return "{\"status\":\"ok\"}";
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/NoOp.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/NoOp.java
new file mode 100644
index 0000000..a184a95
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/NoOp.java
@@ -0,0 +1,21 @@
+package net.floodlightcontroller.virtualnetwork;
+
+import org.restlet.data.Status;
+import org.restlet.resource.Get;
+import org.restlet.resource.Post;
+import org.restlet.resource.Put;
+import org.restlet.resource.ServerResource;
+
+public class NoOp extends ServerResource {
+	/**
+	 * Does nothing and returns 200 OK with a status message
+	 * @return status: ok
+	 */
+	@Get
+	@Put
+	@Post
+	public String noOp(String postdata) {
+		setStatus(Status.SUCCESS_OK);
+        return "{\"status\":\"ok\"}";
+	}
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/VirtualNetwork.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/VirtualNetwork.java
new file mode 100644
index 0000000..f5dfb21
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/VirtualNetwork.java
@@ -0,0 +1,88 @@
+package net.floodlightcontroller.virtualnetwork;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+import net.floodlightcontroller.util.MACAddress;
+
+/**
+ * Data structure for storing and outputing information of a virtual network created
+ * by VirtualNetworkFilter
+ * 
+ * @author KC Wang
+ */
+
+@JsonSerialize(using=VirtualNetworkSerializer.class)
+public class VirtualNetwork{
+    protected String name; // network name
+    protected String guid; // network id
+    protected String gateway; // network gateway
+    protected Collection<MACAddress> hosts; // array of hosts explicitly added to this network
+
+    /**
+     * Constructor requires network name and id
+     * @param name: network name
+     * @param guid: network id 
+     */
+    public VirtualNetwork(String name, String guid) {
+        this.name = name;
+        this.guid = guid;
+        this.gateway = null;
+        this.hosts = new ArrayList<MACAddress>();
+        return;        
+    }
+
+    /**
+     * Sets network name
+     * @param gateway: IP address as String
+     */
+    public void setName(String name){
+        this.name = name;
+        return;                
+    }
+    
+    /**
+     * Sets network gateway IP address
+     * @param gateway: IP address as String
+     */
+    public void setGateway(String gateway){
+        this.gateway = gateway;
+        return;                
+    }
+    
+    /**
+     * Adds a host to this network record
+     * @param host: MAC address as MACAddress
+     */
+    public void addHost(MACAddress host){
+        this.hosts.add(host);
+        return;        
+    }
+    
+    /**
+     * Removes a host from this network record
+     * @param host: MAC address as MACAddress
+     * @return boolean: true: removed, false: host not found
+     */
+    public boolean removeHost(MACAddress host){
+        Iterator<MACAddress> iter = this.hosts.iterator();
+        while(iter.hasNext()){
+            MACAddress element = iter.next();
+            if(element.equals(host) ){
+                //assuming MAC address for host is unique
+                iter.remove();
+                return true;
+            }                
+        }
+        return false;
+    }
+    
+    /**
+     * Removes all hosts from this network record
+     */
+    public void clearHosts(){
+        this.hosts.clear();
+    }
+}
\ No newline at end of file
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/VirtualNetworkFilter.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/VirtualNetworkFilter.java
new file mode 100644
index 0000000..012dfb6
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/VirtualNetworkFilter.java
@@ -0,0 +1,521 @@
+package net.floodlightcontroller.virtualnetwork;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.openflow.protocol.OFFlowMod;
+import org.openflow.protocol.OFMatch;
+import org.openflow.protocol.OFMessage;
+import org.openflow.protocol.OFPacketIn;
+import org.openflow.protocol.OFPacketOut;
+import org.openflow.protocol.OFType;
+import org.openflow.protocol.action.OFAction;
+import org.openflow.util.HexString;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import net.floodlightcontroller.core.FloodlightContext;
+import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.core.IOFMessageListener;
+import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.core.util.AppCookie;
+import net.floodlightcontroller.devicemanager.IDevice;
+import net.floodlightcontroller.devicemanager.IDeviceListener;
+import net.floodlightcontroller.devicemanager.IDeviceService;
+import net.floodlightcontroller.packet.DHCP;
+import net.floodlightcontroller.packet.Ethernet;
+import net.floodlightcontroller.packet.IPacket;
+import net.floodlightcontroller.packet.IPv4;
+import net.floodlightcontroller.restserver.IRestApiService;
+import net.floodlightcontroller.routing.ForwardingBase;
+import net.floodlightcontroller.util.MACAddress;
+
+/**
+ * A simple Layer 2 (MAC based) network virtualization module. This module allows
+ * you to create simple L2 networks (host + gateway) and will drop traffic if
+ * they are not on the same virtual network.
+ * 
+ * LIMITATIONS
+ * - This module does not allow overlapping of IPs or MACs
+ * - You can only have 1 gateway per virtual network (can be shared)
+ * - There is filtering of multicast/broadcast traffic
+ * - All DHCP traffic will be allowed, regardless of unicast/broadcast
+ * 
+ * @author alexreimers
+ */
+public class VirtualNetworkFilter 
+    implements IFloodlightModule, IVirtualNetworkService, IOFMessageListener, IDeviceListener {
+    protected static Logger log = LoggerFactory.getLogger(VirtualNetworkFilter.class);
+    
+    private final short APP_ID = 20;
+    
+    // Our dependencies
+    IFloodlightProviderService floodlightProvider;
+    IRestApiService restApi;
+    IDeviceService deviceService;
+    
+    // Our internal state
+    protected Map<String, VirtualNetwork> vNetsByGuid; // List of all created virtual networks 
+    protected Map<String, String> nameToGuid; // Logical name -> Network ID
+    protected Map<String, Integer> guidToGateway; // Network ID -> Gateway IP
+    protected Map<Integer, Set<String>> gatewayToGuid; // Gateway IP -> Network ID
+    protected Map<MACAddress, Integer> macToGateway; // Gateway MAC -> Gateway IP
+    protected Map<MACAddress, String> macToGuid; // Host MAC -> Network ID
+    protected Map<String, MACAddress> portToMac; // Host MAC -> logical port name
+    
+    /**
+     * Adds a gateway to a virtual network.
+     * @param guid The ID (not name) of the network.
+     * @param ip The IP addresses of the gateway.
+     */
+    protected void addGateway(String guid, Integer ip) {
+        if (ip.intValue() != 0) {
+        	if (log.isDebugEnabled())
+        		log.debug("Adding {} as gateway for GUID {}",
+        				IPv4.fromIPv4Address(ip), guid);
+        	
+            guidToGateway.put(guid, ip);
+            if (vNetsByGuid.get(guid) != null)
+                vNetsByGuid.get(guid).setGateway(IPv4.fromIPv4Address(ip));
+            if (gatewayToGuid.containsKey(ip)) {
+                Set<String> gSet = gatewayToGuid.get(ip);
+                gSet.add(guid);
+            } else {
+                Set<String> gSet = Collections.synchronizedSet(new HashSet<String>());
+                gSet.add(guid);
+                gatewayToGuid.put(ip, gSet);
+            }
+        }
+    }
+    
+    /**
+     * Deletes a gateway for a virtual network.
+     * @param guid The ID (not name) of the network to delete
+     * the gateway for.
+     */
+    protected void deleteGateway(String guid) {
+        Integer gwIp = guidToGateway.remove(guid);
+        if (gwIp == null) return;
+        Set<String> gSet = gatewayToGuid.get(gwIp);
+        gSet.remove(guid);
+        if(vNetsByGuid.get(guid)!=null)
+            vNetsByGuid.get(guid).setGateway(null);
+    }
+    
+    // IVirtualNetworkService
+    
+    @Override
+    public void createNetwork(String guid, String network, Integer gateway) {
+        if (log.isDebugEnabled()) {
+            String gw = null;
+            try {
+                gw = IPv4.fromIPv4Address(gateway);
+            } catch (Exception e) {
+                // fail silently
+            }
+            log.debug("Creating network {} with ID {} and gateway {}", 
+                      new Object[] {network, guid, gw});
+        }
+        
+        if (!nameToGuid.isEmpty()) {
+            // We have to iterate all the networks to handle name/gateway changes
+            for (Entry<String, String> entry : nameToGuid.entrySet()) {
+                if (entry.getValue().equals(guid)) {
+                    nameToGuid.remove(entry.getKey());
+                    break;
+                }
+            }
+        }
+        nameToGuid.put(network, guid);
+        if (vNetsByGuid.containsKey(guid))
+            vNetsByGuid.get(guid).setName(network); //network already exists, just updating name
+        else
+            vNetsByGuid.put(guid, new VirtualNetwork(network, guid)); //new network
+        
+        // If they don't specify a new gateway the old one will be preserved
+        if ((gateway != null) && (gateway != 0)) {
+            addGateway(guid, gateway);
+            if(vNetsByGuid.get(guid)!=null)
+                vNetsByGuid.get(guid).setGateway(IPv4.fromIPv4Address(gateway));
+        }
+    }
+
+    @Override
+    public void deleteNetwork(String guid) {
+        String name = null;
+        if (nameToGuid.isEmpty()) {
+            log.warn("Could not delete network with ID {}, network doesn't exist",
+                     guid);
+            return;
+        }
+        for (Entry<String, String> entry : nameToGuid.entrySet()) {
+            if (entry.getValue().equals(guid)) {
+                name = entry.getKey();
+                break;
+            }
+            log.warn("Could not delete network with ID {}, network doesn't exist",
+                     guid);
+        }
+        
+        if (log.isDebugEnabled()) 
+            log.debug("Deleting network with name {} ID {}", name, guid);
+        
+        nameToGuid.remove(name);
+        deleteGateway(guid);
+        if(vNetsByGuid.get(guid)!=null){
+            vNetsByGuid.get(guid).clearHosts();
+            vNetsByGuid.remove(guid);
+        }
+        Collection<MACAddress> deleteList = new ArrayList<MACAddress>();
+        for (MACAddress host : macToGuid.keySet()) {
+            if (macToGuid.get(host).equals(guid)) {
+                deleteList.add(host);
+            }
+        }
+        for (MACAddress mac : deleteList) {
+            if (log.isDebugEnabled()) {
+                log.debug("Removing host {} from network {}", 
+                          HexString.toHexString(mac.toBytes()), guid);
+            }
+            macToGuid.remove(mac);
+            for (Entry<String, MACAddress> entry : portToMac.entrySet()) {
+                if (entry.getValue().equals(mac)) {
+                    portToMac.remove(entry.getKey());
+                    break;
+                }
+            }
+        }
+    }
+
+    @Override
+    public void addHost(MACAddress mac, String guid, String port) {
+        if (guid != null) {
+            if (log.isDebugEnabled()) {
+                log.debug("Adding {} to network ID {} on port {}",
+                          new Object[] {mac, guid, port});
+            }
+            // We ignore old mappings
+            macToGuid.put(mac, guid);
+            portToMac.put(port, mac);
+            if(vNetsByGuid.get(guid)!=null)
+                vNetsByGuid.get(guid).addHost(new MACAddress(mac.toBytes()));
+        } else {
+            log.warn("Could not add MAC {} to network ID {} on port {}, the network does not exist",
+                     new Object[] {mac, guid, port});
+        }
+    }
+
+    @Override
+    public void deleteHost(MACAddress mac, String port) {
+        if (log.isDebugEnabled()) {
+            log.debug("Removing host {} from port {}", mac, port);
+        }
+        if (mac == null && port == null) return;
+        if (port != null) {
+            MACAddress host = portToMac.remove(port);
+            if(vNetsByGuid.get(macToGuid.get(host)) != null)
+                vNetsByGuid.get(macToGuid.get(host)).removeHost(host);
+            macToGuid.remove(host);
+        } else if (mac != null) {
+            if (!portToMac.isEmpty()) {
+                for (Entry<String, MACAddress> entry : portToMac.entrySet()) {
+                    if (entry.getValue().equals(mac)) {
+                        if(vNetsByGuid.get(macToGuid.get(entry.getValue())) != null)
+                            vNetsByGuid.get(macToGuid.get(entry.getValue())).removeHost(entry.getValue());
+                        portToMac.remove(entry.getKey());
+                        macToGuid.remove(entry.getValue());
+                        return;
+                    }
+                }
+            }
+        }
+    }
+    
+    // IFloodlightModule
+    
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IVirtualNetworkService.class);
+        return l;
+    }
+
+    @Override
+    public Map<Class<? extends IFloodlightService>, IFloodlightService>
+            getServiceImpls() {
+        Map<Class<? extends IFloodlightService>,
+            IFloodlightService> m = 
+                new HashMap<Class<? extends IFloodlightService>,
+                    IFloodlightService>();
+        m.put(IVirtualNetworkService.class, this);
+        return m;
+    }
+
+    @Override
+    public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
+        Collection<Class<? extends IFloodlightService>> l = 
+                new ArrayList<Class<? extends IFloodlightService>>();
+        l.add(IFloodlightProviderService.class);
+        l.add(IRestApiService.class);
+        l.add(IDeviceService.class);
+        return l;
+    }
+
+    @Override
+    public void init(FloodlightModuleContext context)  
+                                 throws FloodlightModuleException {
+        floodlightProvider = context.getServiceImpl(IFloodlightProviderService.class);
+        restApi = context.getServiceImpl(IRestApiService.class);
+        deviceService = context.getServiceImpl(IDeviceService.class);
+        
+        vNetsByGuid = new ConcurrentHashMap<String, VirtualNetwork>();
+        nameToGuid = new ConcurrentHashMap<String, String>();
+        guidToGateway = new ConcurrentHashMap<String, Integer>();
+        gatewayToGuid = new ConcurrentHashMap<Integer, Set<String>>();
+        macToGuid = new ConcurrentHashMap<MACAddress, String>();
+        portToMac = new ConcurrentHashMap<String, MACAddress>();
+        macToGateway = new ConcurrentHashMap<MACAddress, Integer>();
+    }
+
+    @Override
+    public void startUp(FloodlightModuleContext context) {
+        floodlightProvider.addOFMessageListener(OFType.PACKET_IN, this);
+        restApi.addRestletRoutable(new VirtualNetworkWebRoutable());
+        deviceService.addListener(this);
+    }
+
+    // IOFMessageListener
+    
+    @Override
+    public String getName() {
+        return "virtualizer";
+    }
+
+    @Override
+    public boolean isCallbackOrderingPrereq(OFType type, String name) {
+        // Link discovery should go before us so we don't block LLDPs
+        return (type.equals(OFType.PACKET_IN) && 
+        		(name.equals("linkdiscovery") || (name.equals("devicemanager"))));
+    }
+
+    @Override
+    public boolean isCallbackOrderingPostreq(OFType type, String name) {
+        // We need to go before forwarding
+        return (type.equals(OFType.PACKET_IN) && name.equals("forwarding"));
+    }
+
+    @Override
+    public Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx) {
+        switch (msg.getType()) {
+            case PACKET_IN:
+                return processPacketIn(sw, (OFPacketIn)msg, cntx);
+            default:
+            	break;
+        }
+        log.warn("Received unexpected message {}", msg);
+        return Command.CONTINUE;
+    }
+    
+    /**
+     * Checks whether the frame is destined to or from a gateway.
+     * @param frame The ethernet frame to check.
+     * @return True if it is to/from a gateway, false otherwise.
+     */
+    protected boolean isDefaultGateway(Ethernet frame) {
+    	if (macToGateway.containsKey(frame.getSourceMAC()))
+    		return true;
+    	
+    	Integer gwIp = macToGateway.get(frame.getDestinationMAC());
+    	if (gwIp != null) {
+    		MACAddress host = frame.getSourceMAC();
+    		String srcNet = macToGuid.get(host);
+    		if (srcNet != null) {
+	    		Integer gwIpSrcNet = guidToGateway.get(srcNet);
+	    		if ((gwIpSrcNet != null) && (gwIp.equals(gwIpSrcNet)))
+	    			return true;
+    		}
+    	}
+
+    	return false;
+    }
+    
+    /**
+     * Checks to see if two MAC Addresses are on the same network.
+     * @param m1 The first MAC.
+     * @param m2 The second MAC.
+     * @return True if they are on the same virtual network,
+     * 		   false otherwise.
+     */
+    protected boolean oneSameNetwork(MACAddress m1, MACAddress m2) {
+        String net1 = macToGuid.get(m1);
+        String net2 = macToGuid.get(m2);
+        if (net1 == null) return false;
+        if (net2 == null) return false;
+        return net1.equals(net2);
+    }
+    
+    /**
+     * Checks to see if an Ethernet frame is a DHCP packet.
+     * @param frame The Ethernet frame.
+     * @return True if it is a DHCP frame, false otherwise.
+     */
+    protected boolean isDhcpPacket(Ethernet frame) {
+        IPacket payload = frame.getPayload(); // IP
+        if (payload == null) return false;
+        IPacket p2 = payload.getPayload(); // TCP or UDP
+        if (p2 == null) return false;
+        IPacket p3 = p2.getPayload(); // Application
+        if ((p3 != null) && (p3 instanceof DHCP)) return true;
+        return false;
+    }
+    
+    /**
+     * Processes an OFPacketIn message and decides if the OFPacketIn should be dropped
+     * or the processing should continue.
+     * @param sw The switch the PacketIn came from.
+     * @param msg The OFPacketIn message from the switch.
+     * @param cntx The FloodlightContext for this message.
+     * @return Command.CONTINUE if processing should be continued, Command.STOP otherwise.
+     */
+    protected Command processPacketIn(IOFSwitch sw, OFPacketIn msg, FloodlightContext cntx) {
+        Ethernet eth = IFloodlightProviderService.bcStore.get(cntx, 
+                                              IFloodlightProviderService.CONTEXT_PI_PAYLOAD);
+        Command ret = Command.STOP;
+        String srcNetwork = macToGuid.get(eth.getSourceMAC());
+        // If the host is on an unknown network we deny it.
+        // We make exceptions for ARP and DHCP.
+        if (eth.isBroadcast() || eth.isMulticast() || isDefaultGateway(eth) || isDhcpPacket(eth)) {
+        	ret = Command.CONTINUE;
+        } else if (srcNetwork == null) {
+            log.trace("Blocking traffic from host {} because it is not attached to any network.",
+                      HexString.toHexString(eth.getSourceMACAddress()));
+            ret = Command.STOP;
+        } else if (oneSameNetwork(eth.getSourceMAC(), eth.getDestinationMAC())) {
+            // if they are on the same network continue
+            ret = Command.CONTINUE;
+        }
+        
+        if (log.isTraceEnabled())
+        	log.trace("Results for flow between {} and {} is {}",
+        			new Object[] {eth.getSourceMAC(), eth.getDestinationMAC(), ret});
+        /*
+         * TODO - figure out how to still detect gateways while using
+         * drop mods 
+        if (ret == Command.STOP) {
+            if (!(eth.getPayload() instanceof ARP))
+            	doDropFlow(sw, msg, cntx);
+        }
+        */
+        return ret;
+    }
+    
+    /**
+     * Writes a FlowMod to a switch that inserts a drop flow.
+     * @param sw The switch to write the FlowMod to.
+     * @param pi The corresponding OFPacketIn. Used to create the OFMatch structure.
+     * @param cntx The FloodlightContext that gets passed to the switch.
+     */
+    protected void doDropFlow(IOFSwitch sw, OFPacketIn pi, FloodlightContext cntx) {
+        if (log.isTraceEnabled()) {
+            log.trace("doDropFlow pi={} srcSwitch={}",
+                    new Object[] { pi, sw });
+        }
+
+        if (sw == null) {
+            log.warn("Switch is null, not installing drop flowmod for PacketIn {}", pi);
+            return;
+        }
+
+        // Create flow-mod based on packet-in and src-switch
+        OFFlowMod fm = 
+            (OFFlowMod) floodlightProvider.getOFMessageFactory().getMessage(OFType.FLOW_MOD);
+        OFMatch match = new OFMatch();
+        match.loadFromPacket(pi.getPacketData(), pi.getInPort());
+        List<OFAction> actions = new ArrayList<OFAction>(); // no actions = drop
+        long cookie = AppCookie.makeCookie(APP_ID, 0);
+        fm.setCookie(cookie)
+        .setIdleTimeout(ForwardingBase.FLOWMOD_DEFAULT_IDLE_TIMEOUT)
+        .setHardTimeout(ForwardingBase.FLOWMOD_DEFAULT_HARD_TIMEOUT)
+        .setBufferId(OFPacketOut.BUFFER_ID_NONE)
+        .setMatch(match)
+        .setActions(actions)
+        .setLengthU(OFFlowMod.MINIMUM_LENGTH);
+        fm.setFlags(OFFlowMod.OFPFF_SEND_FLOW_REM);
+        try {
+            if (log.isTraceEnabled()) {
+                log.trace("write drop flow-mod srcSwitch={} match={} " + 
+                          "pi={} flow-mod={}",
+                          new Object[] {sw, match, pi, fm});
+            }
+            sw.write(fm, cntx);
+        } catch (IOException e) {
+            log.error("Failure writing drop flow mod", e);
+        }
+        return;
+    }
+
+    // IDeviceListener
+    
+	@Override
+	public void deviceAdded(IDevice device) {
+		if (device.getIPv4Addresses() == null) return;
+		for (Integer i : device.getIPv4Addresses()) {
+			if (gatewayToGuid.containsKey(i)) {
+				MACAddress mac = MACAddress.valueOf(device.getMACAddress());
+				if (log.isDebugEnabled())
+					log.debug("Adding MAC {} with IP {} a a gateway",
+							HexString.toHexString(mac.toBytes()),
+							IPv4.fromIPv4Address(i));
+				macToGateway.put(mac, i);
+			}
+		}
+	}
+
+	@Override
+	public void deviceRemoved(IDevice device) {
+		// if device is a gateway remove
+		MACAddress mac = MACAddress.valueOf(device.getMACAddress());
+		if (macToGateway.containsKey(mac)) {
+			if (log.isDebugEnabled())
+				log.debug("Removing MAC {} as a gateway",
+						HexString.toHexString(mac.toBytes()));
+			macToGateway.remove(mac);
+		}
+	}
+
+	@Override
+	public void deviceIPV4AddrChanged(IDevice device) {
+		// add or remove entry as gateway
+		deviceAdded(device);
+	}
+
+	@Override
+	public void deviceMoved(IDevice device) {
+		// ignore
+	}
+	
+	@Override
+	public void deviceVlanChanged(IDevice device) {
+		// ignore
+	}
+
+    @Override
+    public Collection <VirtualNetwork> listNetworks() {
+        return vNetsByGuid.values();
+        
+    }
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/VirtualNetworkSerializer.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/VirtualNetworkSerializer.java
new file mode 100644
index 0000000..6902f6c
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/VirtualNetworkSerializer.java
@@ -0,0 +1,38 @@
+package net.floodlightcontroller.virtualnetwork;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+import net.floodlightcontroller.util.MACAddress;
+
+import org.codehaus.jackson.JsonGenerator;
+import org.codehaus.jackson.JsonProcessingException;
+import org.codehaus.jackson.map.JsonSerializer;
+import org.codehaus.jackson.map.SerializerProvider;
+
+/**
+ * Serialize a VirtualNetwork object
+ * @author KC Wang
+ */
+public class VirtualNetworkSerializer extends JsonSerializer<VirtualNetwork> {
+
+    @Override
+    public void serialize(VirtualNetwork vNet, JsonGenerator jGen,
+            SerializerProvider serializer) throws IOException,
+            JsonProcessingException {
+        jGen.writeStartObject();
+        
+        jGen.writeStringField("name", vNet.name);
+        jGen.writeStringField("guid", vNet.guid);
+        jGen.writeStringField("gateway", vNet.gateway);
+
+        jGen.writeArrayFieldStart("mac");
+        Iterator<MACAddress> hit = vNet.hosts.iterator();
+        while (hit.hasNext())
+            jGen.writeString(hit.next().toString());
+        jGen.writeEndArray();
+        
+        jGen.writeEndObject();
+    }
+
+}
diff --git a/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/VirtualNetworkWebRoutable.java b/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/VirtualNetworkWebRoutable.java
new file mode 100644
index 0000000..61769ec
--- /dev/null
+++ b/src/ext/floodlight/src/main/java/net/floodlightcontroller/virtualnetwork/VirtualNetworkWebRoutable.java
@@ -0,0 +1,26 @@
+package net.floodlightcontroller.virtualnetwork;
+
+import org.restlet.Context;
+import org.restlet.Restlet;
+import org.restlet.routing.Router;
+
+import net.floodlightcontroller.restserver.RestletRoutable;
+
+public class VirtualNetworkWebRoutable implements RestletRoutable {
+
+    @Override
+    public Restlet getRestlet(Context context) {
+        Router router = new Router(context);
+        router.attach("/tenants/{tenant}/networks", NetworkResource.class); // GET
+        router.attach("/tenants/{tenant}/networks/{network}", NetworkResource.class); // PUT, DELETE
+        router.attach("/tenants/{tenant}/networks", NetworkResource.class); // POST
+        router.attach("/tenants/{tenant}/networks/{network}/ports/{port}/attachment", HostResource.class);
+        router.attachDefault(NoOp.class);
+        return router;
+    }
+
+    @Override
+    public String basePath() {
+        return "/quantum/v1.0";
+    }
+}
\ No newline at end of file