Merge from upstream/master
diff --git a/.gitignore b/.gitignore
index dd8d359..7e971f7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,6 +3,7 @@
 .classpath
 .project
 .pydevproject
+.settings
 target
 onos-logs
 onos.log
diff --git a/LICENSE.txt b/LICENSE.txt
new file mode 100644
index 0000000..bf95ecc
--- /dev/null
+++ b/LICENSE.txt
@@ -0,0 +1,202 @@
+Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2013 Open Networking Laboratory, USA
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
diff --git a/NOTICE.txt b/NOTICE.txt
new file mode 100644
index 0000000..5be37b5
--- /dev/null
+++ b/NOTICE.txt
@@ -0,0 +1,12 @@
+INDEMNITY AND DISCLAIMER OF WARRANTIES
+
+SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
+BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+OR 27973/00100/SF/5339045.1 BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/PARTNERS.txt b/PARTNERS.txt
new file mode 100644
index 0000000..df868d6
--- /dev/null
+++ b/PARTNERS.txt
@@ -0,0 +1 @@
+Add special notes for partnership relationship.
diff --git a/README.md b/README.md
index 9f2c040..cbe9097 100644
--- a/README.md
+++ b/README.md
@@ -5,6 +5,11 @@
 SDN OS. Currently, it is under active development. ONOS was announced
 and demonstrated at ONS'13.
 
+License
+=======
+Apache 2.0
+
+
 Steps to download and setup a development Virtual Machine
 ==========================================
 
diff --git a/conf/cassandra.titan b/conf/cassandra.titan
index c36ecc0..9a9b00f 100644
--- a/conf/cassandra.titan
+++ b/conf/cassandra.titan
@@ -2,6 +2,6 @@
 storage.hostname=localhost
 storage.keyspace=onos
 storage.connection-pool-size=4096
-storage.replication-factor=3
+storage.replication-factor=1
 storage.write-consistency-level=ALL
 storage.read-consistency-level=ONE
diff --git a/conf/hazelcast.titan b/conf/hazelcast.titan
new file mode 100644
index 0000000..d4719fa
--- /dev/null
+++ b/conf/hazelcast.titan
@@ -0,0 +1,2 @@
+storage.backend=hazelcastcache
+storage.directory=/tmp/cache
diff --git a/conf/hazelcast.xml b/conf/hazelcast.xml
index 11bef59..84c7354 100644
--- a/conf/hazelcast.xml
+++ b/conf/hazelcast.xml
@@ -101,5 +101,6 @@
 
   <properties>
     <property name="hazelcast.logging.type">slf4j</property>
+    <property name="hazelcast.version.check.enabled">false</property>
   </properties>
 </hazelcast>
diff --git a/conf/onos-embedded.properties b/conf/onos-embedded.properties
index e280e41..8ae143e 100644
--- a/conf/onos-embedded.properties
+++ b/conf/onos-embedded.properties
@@ -1,11 +1,7 @@
-floodlight.modules = net.floodlightcontroller.storage.memory.MemoryStorageSource,\
-net.floodlightcontroller.core.FloodlightProvider,\
+floodlight.modules = net.floodlightcontroller.core.FloodlightProvider,\
 net.floodlightcontroller.threadpool.ThreadPool,\
 net.onrc.onos.ofcontroller.floodlightlistener.NetworkGraphPublisher, \
 net.floodlightcontroller.devicemanager.internal.DeviceManagerImpl,\
-net.floodlightcontroller.staticflowentry.StaticFlowEntryPusher,\
-net.floodlightcontroller.counter.CounterStore,\
-net.floodlightcontroller.perfmon.PktInProcessingTime,\
 net.floodlightcontroller.ui.web.StaticWebRoutable,\
 net.onrc.onos.datagrid.HazelcastDatagrid,\
 net.onrc.onos.ofcontroller.flowmanager.FlowManager,\
diff --git a/conf/onos.properties b/conf/onos.properties
index d93f594..ec04622 100644
--- a/conf/onos.properties
+++ b/conf/onos.properties
@@ -1,11 +1,7 @@
-floodlight.modules = net.floodlightcontroller.storage.memory.MemoryStorageSource,\
-net.floodlightcontroller.core.FloodlightProvider,\
+floodlight.modules = net.floodlightcontroller.core.FloodlightProvider,\
 net.floodlightcontroller.threadpool.ThreadPool,\
 net.onrc.onos.ofcontroller.floodlightlistener.NetworkGraphPublisher, \
 net.floodlightcontroller.devicemanager.internal.DeviceManagerImpl,\
-net.floodlightcontroller.staticflowentry.StaticFlowEntryPusher,\
-net.floodlightcontroller.counter.CounterStore,\
-net.floodlightcontroller.perfmon.PktInProcessingTime,\
 net.floodlightcontroller.ui.web.StaticWebRoutable,\
 net.onrc.onos.datagrid.HazelcastDatagrid,\
 net.onrc.onos.ofcontroller.flowmanager.FlowManager,\
diff --git a/curator/curator-client-1.3.5-SNAPSHOT.jar b/curator/curator-client-1.3.5-SNAPSHOT.jar
new file mode 100644
index 0000000..c22d602
--- /dev/null
+++ b/curator/curator-client-1.3.5-SNAPSHOT.jar
Binary files differ
diff --git a/curator/curator-framework-1.3.5-SNAPSHOT.jar b/curator/curator-framework-1.3.5-SNAPSHOT.jar
new file mode 100644
index 0000000..1b89270
--- /dev/null
+++ b/curator/curator-framework-1.3.5-SNAPSHOT.jar
Binary files differ
diff --git a/curator/curator-recipes-1.3.5-SNAPSHOT.jar b/curator/curator-recipes-1.3.5-SNAPSHOT.jar
new file mode 100644
index 0000000..30efe51
--- /dev/null
+++ b/curator/curator-recipes-1.3.5-SNAPSHOT.jar
Binary files differ
diff --git a/curator/curator-x-discovery-1.3.5-SNAPSHOT.jar b/curator/curator-x-discovery-1.3.5-SNAPSHOT.jar
new file mode 100644
index 0000000..147417e
--- /dev/null
+++ b/curator/curator-x-discovery-1.3.5-SNAPSHOT.jar
Binary files differ
diff --git a/lib/jamm-0.2.5.jar b/lib/jamm-0.2.5.jar
index e9baf75..ef8750d 100644
--- a/lib/jamm-0.2.5.jar
+++ b/lib/jamm-0.2.5.jar
Binary files differ
diff --git a/perf-scripts/flow-sync-perf.py b/perf-scripts/flow-sync-perf.py
new file mode 100755
index 0000000..f0af050
--- /dev/null
+++ b/perf-scripts/flow-sync-perf.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+'''
+ Script that tests Flow Synchronizer performance
+ Author: Brian O'Connor <bocon@onlab.us>
+
+ Usage: 
+   1. Ensure that ONOS is running
+   2. sudo ./flow-sync-perf.sh <list of tests>
+      e.g. sudo ./flow-sync-perf.sh 1 10 100 1000 
+      or to run the default tests:
+      sudo ./flow-sync-perf.sh
+   3. Results are CSV files in a date stamped directory
+'''
+
+import csv
+import os
+import sys
+from time import sleep, strftime
+from subprocess import Popen, call, check_output, PIPE
+from mininet.net import Mininet
+from mininet.topo import SingleSwitchTopo
+from mininet.node import RemoteController
+from mininet.cli import CLI
+from mininet.log import setLogLevel
+try:
+  import pexpect
+except:
+  # install pexpect if it cannot be found and re-import
+  print '* Installing Pexpect'
+  call( 'apt-get install -y python-pexpect', stdout=PIPE, shell=True )
+  import pexpect
+
+ONOS_HOME = '..'
+ONOS_LOG = '%s/onos-logs/onos.%s.log' % ( ONOS_HOME, check_output( 'hostname').strip() )
+print "ONOS Log File:", ONOS_LOG
+
+# Verify that tcpkill is installed
+if not Popen( 'which tcpkill', stdout=PIPE, shell=True).communicate():
+  print '* Installing tcpkill'
+  call( 'apt-get install -y dsniff', stdout=PIPE, shell=True )
+
+# ----------------- Tests scenarios -------------------------
+def doNothing(n):
+  print "Doing nothing with %d flows..." % n
+
+def addFakeFlows(n):
+  print "Adding %d random flows to switch..." % n
+  for i in range( 1, (n+1) ):
+    a = i / (256*256) % 256
+    b = i / 256 % 256
+    c = i % 256
+    ip = '10.%d.%d.%d' % (a,b,c)
+    call( 'ovs-ofctl add-flow s1 "ip, nw_src=%s/32, idle_timeout=0, hard_timeout=0, cookie=%d, actions=output:2"' % ( ip, i ), shell=True )
+
+def delFlowsFromSwitch(n):
+  print "Removing all %d flows from switch..." % n
+  call( 'ovs-ofctl del-flows s1', shell=True )
+
+
+# ----------------- Utility Functions -------------------------
+def wait(time, msg=None):
+  if msg:
+    print msg,
+  for i in range(time):
+    sys.stdout.write('.')
+    sys.stdout.flush()
+    sleep(1)
+  print ". done"
+
+def startNet(net):
+  tail = pexpect.spawn( 'tail -0f %s' % ONOS_LOG )
+  sleep(1) 
+  net.start()
+  print "Waiting for ONOS to detech the switch..."
+  index = tail.expect(['Sync time \(ms\)', pexpect.EOF, pexpect.TIMEOUT])
+  if index >= 1:
+    print '* ONOS not started'
+    net.stop()
+    exit(1)
+  tail.terminate()
+
+def dumpFlows():
+  return check_output( 'ovs-ofctl dump-flows s1', shell=True )
+
+def addFlowsToONOS(n):
+  print "Adding %d flows to ONOS" % n,
+  call( './generate_flows.py 1 %d > /tmp/flows.txt' % n, shell=True )
+  #call( '%s/web/add_flow.py -m onos -f /tmp/flows.txt' % ONOS_HOME, shell=True )
+  p = Popen( '%s/web/add_flow.py -m onos -f /tmp/flows.txt' % ONOS_HOME, shell=True )
+  while p.poll() is None:
+    sys.stdout.write('.')
+    sys.stdout.flush()
+    sleep(1)
+  print ". done\nWaiting for flow entries to be added to switch",
+  while True:
+    output = check_output( 'ovs-ofctl dump-flows s1', shell=True )
+    lines = len(output.split('\n'))
+    if lines >= (n+2):
+      break
+    sys.stdout.write('.')
+    sys.stdout.flush()
+    sleep(1)
+  print ". done\nWaiting for flow entries to be visible in network graph",
+  while True:
+    output = pexpect.spawn( '%s/web/get_flow.py all' % ONOS_HOME )
+    count = 0
+    while count < n:
+      if output.expect(['FlowEntry', pexpect.EOF], timeout=2000) == 1:
+        break
+      count += 1 
+      print '. done'
+      return
+    sys.stdout.write('.')
+    sys.stdout.flush()
+    sleep(5)
+
+def removeFlowsFromONOS(checkSwitch=True):
+  print "Removing all flows from ONOS",
+  #call( '%s/web/delete_flow.py all' % ONOS_HOME, shell=True )
+  p = Popen( '%s/web/delete_flow.py all' % ONOS_HOME, shell=True )
+  while p.poll() is None:
+    sys.stdout.write('.')
+    sys.stdout.flush()
+    sleep(1)
+  print ". done"
+  if checkSwitch:
+    print "Waiting for flow entries to be removed from switch",
+    while True:
+      output = check_output( 'ovs-ofctl dump-flows s1', shell=True )
+      lines = len(output.split('\n'))
+      if lines == 2:
+        break
+      sys.stdout.write('.')
+      sys.stdout.flush()
+      sleep(1)
+    print ". done"
+  print "Waiting for flow entries to be removed from network graph",
+  while True:
+    output = pexpect.spawn( '%s/web/get_flow.py all' % ONOS_HOME )
+    if output.expect(['FlowEntry', pexpect.EOF], timeout=2000) == 1:
+      break
+    sys.stdout.write('.')
+    sys.stdout.flush()
+    sleep(5)
+  print '. done'
+
+# ----------------- Running the test and output  -------------------------
+def test(i, fn):
+  # Start tailing the onos log
+  tail = pexpect.spawn( "tail -0f %s" % ONOS_LOG )
+  # disconnect the switch from the controller using tcpkill
+  tcp  = Popen( 'exec tcpkill -i lo -9 port 6633 > /dev/null 2>&1', shell=True )
+  # wait until the switch has been disconnected
+  tail.expect( 'Switch removed' )
+  # call the test function
+  fn(i) 
+  # dump to flows to ensure they have all made it to ovs
+  dumpFlows() 
+  # end tcpkill process to reconnect the switch to the controller
+  tcp.terminate()
+  tail.expect('Sync time \(ms\):', timeout=6000)
+  tail.expect('([\d.]+,?)+\s')
+  print "* Results:", tail.match.group(0)
+  tail.terminate()
+  wait(3, "Waiting for 3 seconds between tests")
+  return tail.match.group(0).strip().split(',')
+
+def initResults(files):
+  headers = ['# of FEs', 'Flow IDs from Graph', 'FEs from Switch', 'Compare', 
+             'Read FE from graph', 'Extract FE', 'Push', 'Total' ]
+  for filename in files.values():
+    with open(filename, 'w') as csvfile:
+      writer = csv.writer(csvfile)
+      writer.writerow(headers)
+
+def outputResults(filename, n, results):
+  results.insert(0, n)
+  with open(filename, 'a') as csvfile:
+    writer = csv.writer(csvfile)
+    writer.writerow(results)
+
+def runPerf( resultDir, tests):
+  fileMap = { 'add':    os.path.join(resultDir, 'add.csv'),
+              'delete': os.path.join(resultDir, 'delete.csv'),
+              'sync':   os.path.join(resultDir, 'sync.csv') }
+  initResults(fileMap)
+  removeFlowsFromONOS(checkSwitch=False) # clear ONOS before starting
+  # start Mininet
+  topo = SingleSwitchTopo()
+  net = Mininet(topo=topo, controller=RemoteController)
+  print "Starting Mininet"
+  startNet(net)
+  wait(30, "Give ONOS 30 seconds to warm up") # let ONOS "warm-up"
+  for i in tests:
+    addFlowsToONOS(i)
+    outputResults(fileMap['sync'],   i, test(i, doNothing))
+    outputResults(fileMap['delete'], i, test(i, delFlowsFromSwitch))
+    removeFlowsFromONOS()
+    outputResults(fileMap['add'],    i, test(i, addFakeFlows)) # test needs empty DB
+  net.stop()
+
+if __name__ == '__main__':
+  setLogLevel( 'output' )
+  resultDir = strftime( '%Y%m%d-%H%M%S' )
+  os.mkdir( resultDir )
+  tests = sys.argv[1:]
+  if not tests:
+    tests = [1, 10, 100, 1000, 10000]
+  runPerf( resultDir, tests )
+
diff --git a/perf-scripts/generate_flows.py b/perf-scripts/generate_flows.py
new file mode 100755
index 0000000..11d9c19
--- /dev/null
+++ b/perf-scripts/generate_flows.py
@@ -0,0 +1,90 @@
+#! /usr/bin/env python
+# -*- Mode: python; py-indent-offset: 4; tab-width: 8; indent-tabs-mode: t; -*-
+
+#
+# A script for generating a number of flows.
+#
+# The output of the script should be saved to a file, and the flows from
+# that file should be added by the following command:
+#
+#   web/add_flow.py -f filename
+# 
+# NOTE: Currently, some of the parameters fo the flows are hard-coded,
+# and all flows are between same source and destination DPID and ports
+# (differentiated by different matchSrcMac and matchDstMac).
+#
+
+import copy
+import pprint
+import os
+import sys
+import subprocess
+import json
+import argparse
+import io
+import time
+
+## Global Var ##
+
+DEBUG=0
+pp = pprint.PrettyPrinter(indent=4)
+
+## Worker Functions ##
+def log_error(txt):
+  print '%s' % (txt)
+
+def debug(txt):
+  if DEBUG:
+    print '%s' % (txt)
+
+
+if __name__ == "__main__":
+  usage_msg = "Generate a number of flows by using a pre-defined template.\n"
+  usage_msg = usage_msg + "\n"
+  usage_msg = usage_msg + "NOTE: This script is work-in-progress. Currently all flows are within same\n"
+  usage_msg = usage_msg + "pair of switch ports and contain auto-generated MAC-based matching conditions.\n"
+  usage_msg = usage_msg + "\n"
+  usage_msg = usage_msg + "Usage: %s <begin-flow-id> <end-flow-id>\n" % (sys.argv[0])
+  usage_msg = usage_msg + "\n"
+  usage_msg = usage_msg + "    The output should be saved to a file, and the flows should be installed\n"
+  usage_msg = usage_msg + "    by using the command './add_flow.py -f filename'\n"
+
+
+  # app.debug = False;
+
+  # Usage info
+  if len(sys.argv) > 1 and (sys.argv[1] == "-h" or sys.argv[1] == "--help"):
+    print(usage_msg)
+    exit(0)
+
+  # Check arguments
+  if len(sys.argv) < 3:
+    log_error(usage_msg)
+    exit(1)
+
+  # Extract the arguments
+  begin_flow_id = int(sys.argv[1], 0)
+  end_flow_id = int(sys.argv[2], 0)
+  if begin_flow_id > end_flow_id:
+    log_error(usage_msg)
+    exit(1)
+
+  #
+  # Do the work
+  #
+  # NOTE: Currently, up to 65536 flows are supported.
+  # More flows can be supported by iterating by, say, iterating over some of
+  # the other bytes of the autogenereated source/destination MAC addresses.
+  #
+  flow_id = begin_flow_id
+  idx = 0
+  while flow_id <= end_flow_id:
+    mac3 = idx / 255
+    mac4 = idx % 255
+    str_mac3 = "%0.2x" % mac3
+    str_mac4 = "%0.2x" % mac4
+    src_mac = "00:00:" + str_mac3 + ":" + str_mac4 + ":00:00";
+    dst_mac = "00:01:" + str_mac3 + ":" + str_mac4 + ":00:00";
+    print "%s FOOBAR 00:00:00:00:00:00:00:01 1 00:00:00:00:00:00:00:01 2 matchSrcMac %s matchDstMac %s" % (flow_id, src_mac, dst_mac)
+    flow_id = flow_id + 1
+    idx = idx + 1
diff --git a/pom.xml b/pom.xml
index 3cd33e9..af6c709 100644
--- a/pom.xml
+++ b/pom.xml
@@ -31,7 +31,7 @@
   <properties>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
     <powermock.version>1.5.1</powermock.version>
-    <restlet.version>2.1-RC1</restlet.version>
+    <restlet.version>2.1.4</restlet.version>
     <github.global.server>github</github.global.server>
   </properties>
   <build>
@@ -251,13 +251,11 @@
       <artifactId>frames</artifactId>
       <version>2.3.1</version>
     </dependency>
-    <!--
     <dependency>
       <groupId>com.tinkerpop.blueprints</groupId>
       <artifactId>blueprints-core</artifactId>
       <version>2.4.0</version>
     </dependency>
-    -->
     <dependency>
       <groupId>com.hazelcast</groupId>
       <artifactId>hazelcast</artifactId>
@@ -297,7 +295,7 @@
     <dependency>
       <groupId>ch.qos.logback</groupId>
       <artifactId>logback-classic</artifactId>
-      <version>1.0.0</version>
+      <version>1.0.13</version>
       <scope>runtime</scope>
     </dependency>
     <!-- Floodlight's dependencies -->
@@ -310,13 +308,13 @@
     <dependency>
       <groupId>com.google.guava</groupId>
       <artifactId>guava</artifactId>
-      <version>13.0.1</version>
+      <version>14.0.1</version>
     </dependency>
     -->
     <dependency>
       <groupId>org.slf4j</groupId>
       <artifactId>slf4j-api</artifactId>
-      <version>1.6.4</version>
+      <version>1.7.5</version>
     </dependency>
     <dependency>
       <groupId>org.restlet.jse</groupId>
@@ -341,12 +339,12 @@
     <dependency>
       <groupId>args4j</groupId>
       <artifactId>args4j</artifactId>
-      <version>2.0.16</version>
+      <version>2.0.25</version>
     </dependency>
     <dependency>
       <groupId>com.googlecode.concurrentlinkedhashmap</groupId>
       <artifactId>concurrentlinkedhashmap-lru</artifactId>
-      <version>1.3</version>
+      <version>1.4</version>
     </dependency>
     <!--<dependency>
       <groupId>org.python</groupId>
diff --git a/setup-local-maven.sh b/setup-local-maven.sh
index 371d50f..f8e00f8 100755
--- a/setup-local-maven.sh
+++ b/setup-local-maven.sh
@@ -7,10 +7,10 @@
 # Kryo2 workaround
 ${MVN} -f kryo2/pom.xml package exec:exec
 
-${MVN} install:install-file -Dfile=./lib/curator-framework-1.3.5-SNAPSHOT.jar -DgroupId=com.netflix.curator -DartifactId=curator-framework -Dversion=1.3.5-SNAPSHOT -Dpackaging=jar -DgeneratePom=true
-${MVN} install:install-file -Dfile=./lib/curator-client-1.3.5-SNAPSHOT.jar -DgroupId=com.netflix.curator -DartifactId=curator-client -Dversion=1.3.5-SNAPSHOT -Dpackaging=jar -DgeneratePom=true
-${MVN} install:install-file -Dfile=./lib/curator-recipes-1.3.5-SNAPSHOT.jar -DgroupId=com.netflix.curator -DartifactId=curator-recipes -Dversion=1.3.5-SNAPSHOT -Dpackaging=jar -DgeneratePom=true
-${MVN} install:install-file -Dfile=./lib/curator-x-discovery-1.3.5-SNAPSHOT.jar -DgroupId=com.netflix.curator -DartifactId=curator-x-discovery -Dversion=1.3.5-SNAPSHOT -Dpackaging=jar -DgeneratePom=true
+${MVN} install:install-file -Dfile=./curator/curator-framework-1.3.5-SNAPSHOT.jar -DgroupId=com.netflix.curator -DartifactId=curator-framework -Dversion=1.3.5-SNAPSHOT -Dpackaging=jar -DgeneratePom=true
+${MVN} install:install-file -Dfile=./curator/curator-client-1.3.5-SNAPSHOT.jar -DgroupId=com.netflix.curator -DartifactId=curator-client -Dversion=1.3.5-SNAPSHOT -Dpackaging=jar -DgeneratePom=true
+${MVN} install:install-file -Dfile=./curator/curator-recipes-1.3.5-SNAPSHOT.jar -DgroupId=com.netflix.curator -DartifactId=curator-recipes -Dversion=1.3.5-SNAPSHOT -Dpackaging=jar -DgeneratePom=true
+${MVN} install:install-file -Dfile=./curator/curator-x-discovery-1.3.5-SNAPSHOT.jar -DgroupId=com.netflix.curator -DartifactId=curator-x-discovery -Dversion=1.3.5-SNAPSHOT -Dpackaging=jar -DgeneratePom=true
 
 # download package dependencies
 ${MVN} dependency:go-offline
diff --git a/src/main/java/net/floodlightcontroller/core/FloodlightProvider.java b/src/main/java/net/floodlightcontroller/core/FloodlightProvider.java
index 4d85b7d..41676c4 100644
--- a/src/main/java/net/floodlightcontroller/core/FloodlightProvider.java
+++ b/src/main/java/net/floodlightcontroller/core/FloodlightProvider.java
@@ -10,10 +10,7 @@
 import net.floodlightcontroller.core.module.FloodlightModuleException;
 import net.floodlightcontroller.core.module.IFloodlightModule;
 import net.floodlightcontroller.core.module.IFloodlightService;
-import net.floodlightcontroller.counter.ICounterStoreService;
-import net.floodlightcontroller.perfmon.IPktInProcessingTimeService;
 import net.floodlightcontroller.restserver.IRestApiService;
-import net.floodlightcontroller.storage.IStorageSourceService;
 import net.floodlightcontroller.threadpool.IThreadPoolService;
 import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscoveryService;
 import net.onrc.onos.registry.controller.IControllerRegistryService;
@@ -46,10 +43,7 @@
     public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
         Collection<Class<? extends IFloodlightService>> dependencies =
             new ArrayList<Class<? extends IFloodlightService>>(4);
-        dependencies.add(IStorageSourceService.class);
-        dependencies.add(IPktInProcessingTimeService.class);
         dependencies.add(IRestApiService.class);
-        dependencies.add(ICounterStoreService.class);
         dependencies.add(IThreadPoolService.class);
         // Following added by ONOS
         dependencies.add(IControllerRegistryService.class);
@@ -60,12 +54,6 @@
 
     @Override
     public void init(FloodlightModuleContext context) throws FloodlightModuleException {
-       controller.setStorageSourceService(
-           context.getServiceImpl(IStorageSourceService.class));
-       controller.setPktInProcessingService(
-           context.getServiceImpl(IPktInProcessingTimeService.class));
-       controller.setCounterStore(
-           context.getServiceImpl(ICounterStoreService.class));
        controller.setRestApiService(
            context.getServiceImpl(IRestApiService.class));
        controller.setThreadPoolService(
diff --git a/src/main/java/net/floodlightcontroller/core/IFloodlightProviderService.java b/src/main/java/net/floodlightcontroller/core/IFloodlightProviderService.java
index 22ff029..436af3d 100644
--- a/src/main/java/net/floodlightcontroller/core/IFloodlightProviderService.java
+++ b/src/main/java/net/floodlightcontroller/core/IFloodlightProviderService.java
@@ -83,11 +83,6 @@
     public Map<Long, IOFSwitch> getSwitches();
     
     /**
-     * Get the current role of the controller
-     */
-    public Role getRole();
-    
-    /**
      * Get the current mapping of controller IDs to their IP addresses
      * Returns a copy of the current mapping. 
      * @see IHAListener
@@ -100,11 +95,6 @@
     public String getControllerId();
     
     /**
-     * Set the role of the controller
-     */
-    public void setRole(Role role);
-    
-    /**
      * Add a switch listener
      * @param listener The module that wants to listen for events
      */
@@ -117,18 +107,6 @@
     public void removeOFSwitchListener(IOFSwitchListener listener);
     
     /**
-     * Adds a listener for HA role events
-     * @param listener The module that wants to listen for events
-     */
-    public void addHAListener(IHAListener listener);
-    
-    /**
-     * Removes a listener for HA role events
-     * @param listener The module that no longer wants to listen for events
-     */
-    public void removeHAListener(IHAListener listener);
-
-    /**
      * Terminate the process
      */
     public void terminate();
@@ -170,28 +148,6 @@
      * Run the main I/O loop of the Controller.
      */
     public void run();
-
-    /**
-     * Add an info provider of a particular type
-     * @param type
-     * @param provider
-     */
-    public void addInfoProvider(String type, IInfoProvider provider);
-
-   /**
-    * Remove an info provider of a particular type
-    * @param type
-    * @param provider
-    */
-   public void removeInfoProvider(String type, IInfoProvider provider);
-   
-   /**
-    * Return information of a particular type (for rest services)
-    * @param type
-    * @return
-    */
-   public Map<String, Object> getControllerInfo(String type);
-   
    
    /**
     * Return the controller start time in  milliseconds
diff --git a/src/main/java/net/floodlightcontroller/core/IHAListener.java b/src/main/java/net/floodlightcontroller/core/IHAListener.java
deleted file mode 100644
index c76f46a..0000000
--- a/src/main/java/net/floodlightcontroller/core/IHAListener.java
+++ /dev/null
@@ -1,30 +0,0 @@
-package net.floodlightcontroller.core;
-
-import java.util.Map;
-
-import net.floodlightcontroller.core.IFloodlightProviderService.Role;
-
-public interface IHAListener {
-    /**
-     * Gets called when the controller changes role (i.e. Master -> Slave).
-     * Note that oldRole CAN be null.
-     * @param oldRole The controller's old role
-     * @param newRole The controller's new role
-     */
-    public void roleChanged(Role oldRole, Role newRole);
-    
-    /**
-     * Gets called when the IP addresses of the controller nodes in the 
-     * controller cluster change. All parameters map controller ID to
-     * the controller's IP.
-     *  
-     * @param curControllerNodeIPs The current mapping of controller IDs to IP
-     * @param addedControllerNodeIPs These IPs were added since the last update
-     * @param removedControllerNodeIPs These IPs were removed since the last update
-     */
-    public void controllerNodeIPsChanged(
-    		Map<String, String> curControllerNodeIPs,  
-    		Map<String, String> addedControllerNodeIPs,  
-    		Map<String, String> removedControllerNodeIPs
-    		);
-}
diff --git a/src/main/java/net/floodlightcontroller/core/IInfoProvider.java b/src/main/java/net/floodlightcontroller/core/IInfoProvider.java
deleted file mode 100644
index 8bfae0d..0000000
--- a/src/main/java/net/floodlightcontroller/core/IInfoProvider.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.core;
-
-import java.util.Map;
-
-/**
- *
- *
- * @author Shudong Zhou
- */
-public interface IInfoProvider {
-
-    /**
-     * Called when rest API requests information of a particular type
-     * @param type
-     * @return
-     */
-    public Map<String, Object> getInfo(String type);
-}
diff --git a/src/main/java/net/floodlightcontroller/core/internal/Controller.java b/src/main/java/net/floodlightcontroller/core/internal/Controller.java
index 31f80cc..39bdf3c 100644
--- a/src/main/java/net/floodlightcontroller/core/internal/Controller.java
+++ b/src/main/java/net/floodlightcontroller/core/internal/Controller.java
@@ -19,19 +19,14 @@
 
 import java.io.FileInputStream;
 import java.io.IOException;
-import java.net.InetAddress;
 import java.net.InetSocketAddress;
-import java.net.SocketAddress;
 import java.net.UnknownHostException;
 import java.nio.channels.ClosedChannelException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.Date;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -51,8 +46,6 @@
 
 import net.floodlightcontroller.core.FloodlightContext;
 import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.IHAListener;
-import net.floodlightcontroller.core.IInfoProvider;
 import net.floodlightcontroller.core.IListener.Command;
 import net.floodlightcontroller.core.IOFMessageListener;
 import net.floodlightcontroller.core.IOFSwitch;
@@ -64,15 +57,8 @@
 import net.floodlightcontroller.core.internal.OFChannelState.HandshakeState;
 import net.floodlightcontroller.core.util.ListenerDispatcher;
 import net.floodlightcontroller.core.web.CoreWebRoutable;
-import net.floodlightcontroller.counter.ICounterStoreService;
 import net.floodlightcontroller.packet.Ethernet;
-import net.floodlightcontroller.perfmon.IPktInProcessingTimeService;
 import net.floodlightcontroller.restserver.IRestApiService;
-import net.floodlightcontroller.storage.IResultSet;
-import net.floodlightcontroller.storage.IStorageSourceListener;
-import net.floodlightcontroller.storage.IStorageSourceService;
-import net.floodlightcontroller.storage.OperatorPredicate;
-import net.floodlightcontroller.storage.StorageException;
 import net.floodlightcontroller.threadpool.IThreadPoolService;
 import net.onrc.onos.ofcontroller.core.IOFSwitchPortListener;
 import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscoveryService;
@@ -83,7 +69,6 @@
 import org.jboss.netty.bootstrap.ServerBootstrap;
 import org.jboss.netty.buffer.ChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffers;
-import org.jboss.netty.channel.Channel;
 import org.jboss.netty.channel.ChannelHandlerContext;
 import org.jboss.netty.channel.ChannelPipelineFactory;
 import org.jboss.netty.channel.ChannelStateEvent;
@@ -129,8 +114,6 @@
 import org.openflow.protocol.vendor.OFBasicVendorId;
 import org.openflow.protocol.vendor.OFVendorId;
 import org.openflow.util.HexString;
-import org.openflow.util.U16;
-import org.openflow.util.U32;
 import org.openflow.vendor.nicira.OFNiciraVendorData;
 import org.openflow.vendor.nicira.OFRoleReplyVendorData;
 import org.openflow.vendor.nicira.OFRoleRequestVendorData;
@@ -150,8 +133,7 @@
  * - Additional DEBUG logs
  * - Try using hostname as controller ID, when ID was not explicitly given.
  */
-public class Controller implements IFloodlightProviderService, 
-            IStorageSourceListener {
+public class Controller implements IFloodlightProviderService {
     
     protected final static Logger log = LoggerFactory.getLogger(Controller.class);
 
@@ -178,15 +160,10 @@
     protected HashMap<String, String> controllerNodeIPsCache;
     
     protected Set<IOFSwitchListener> switchListeners;
-    protected Set<IHAListener> haListeners;
-    protected Map<String, List<IInfoProvider>> providerMap;
     protected BlockingQueue<IUpdate> updates;
     
     // Module dependencies
     protected IRestApiService restApi;
-    protected ICounterStoreService counterStore = null;
-    protected IStorageSourceService storageSource;
-    protected IPktInProcessingTimeService pktinProcTime;
     protected IThreadPoolService threadPool;
     protected IControllerRegistryService registryService;
     
@@ -211,47 +188,6 @@
     // Flag to always flush flow table on switch reconnect (HA or otherwise)
     protected boolean alwaysClearFlowsOnSwAdd = false;
     
-    // Storage table names
-    protected static final String CONTROLLER_TABLE_NAME = "controller_controller";
-    protected static final String CONTROLLER_ID = "id";
-    
-    protected static final String SWITCH_TABLE_NAME = "controller_switch";
-    protected static final String SWITCH_DATAPATH_ID = "dpid";
-    protected static final String SWITCH_SOCKET_ADDRESS = "socket_address";
-    protected static final String SWITCH_IP = "ip";
-    protected static final String SWITCH_CONTROLLER_ID = "controller_id";
-    protected static final String SWITCH_ACTIVE = "active";
-    protected static final String SWITCH_CONNECTED_SINCE = "connected_since";
-    protected static final String SWITCH_CAPABILITIES = "capabilities";
-    protected static final String SWITCH_BUFFERS = "buffers";
-    protected static final String SWITCH_TABLES = "tables";
-    protected static final String SWITCH_ACTIONS = "actions";
-
-    protected static final String SWITCH_CONFIG_TABLE_NAME = "controller_switchconfig";
-    protected static final String SWITCH_CONFIG_CORE_SWITCH = "core_switch";
-    
-    protected static final String PORT_TABLE_NAME = "controller_port";
-    protected static final String PORT_ID = "id";
-    protected static final String PORT_SWITCH = "switch_id";
-    protected static final String PORT_NUMBER = "number";
-    protected static final String PORT_HARDWARE_ADDRESS = "hardware_address";
-    protected static final String PORT_NAME = "name";
-    protected static final String PORT_CONFIG = "config";
-    protected static final String PORT_STATE = "state";
-    protected static final String PORT_CURRENT_FEATURES = "current_features";
-    protected static final String PORT_ADVERTISED_FEATURES = "advertised_features";
-    protected static final String PORT_SUPPORTED_FEATURES = "supported_features";
-    protected static final String PORT_PEER_FEATURES = "peer_features";
-    
-    protected static final String CONTROLLER_INTERFACE_TABLE_NAME = "controller_controllerinterface";
-    protected static final String CONTROLLER_INTERFACE_ID = "id";
-    protected static final String CONTROLLER_INTERFACE_CONTROLLER_ID = "controller_id";
-    protected static final String CONTROLLER_INTERFACE_TYPE = "type";
-    protected static final String CONTROLLER_INTERFACE_NUMBER = "number";
-    protected static final String CONTROLLER_INTERFACE_DISCOVERED_IP = "discovered_ip";
-    
-    
-    
     // Perf. related configuration
     protected static final int SEND_BUFFER_SIZE = 4 * 1024 * 1024;
     protected static final int BATCH_MAX_SIZE = 100;
@@ -317,86 +253,9 @@
         }
     }
     
-    /**
-     * Update message indicating controller's role has changed
-     */
-    protected class HARoleUpdate implements IUpdate {
-        public Role oldRole;
-        public Role newRole;
-        public HARoleUpdate(Role newRole, Role oldRole) {
-            this.oldRole = oldRole;
-            this.newRole = newRole;
-        }
-        public void dispatch() {
-            // Make sure that old and new roles are different.
-            if (oldRole == newRole) {
-                if (log.isTraceEnabled()) {
-                    log.trace("HA role update ignored as the old and " +
-                              "new roles are the same. newRole = {}" +
-                              "oldRole = {}", newRole, oldRole);
-                }
-                return;
-            }
-            if (log.isTraceEnabled()) {
-                log.trace("Dispatching HA Role update newRole = {}, oldRole = {}",
-                          newRole, oldRole);
-            }
-            if (haListeners != null) {
-                for (IHAListener listener : haListeners) {
-                        listener.roleChanged(oldRole, newRole);
-                }
-            }
-        }
-    }
-    
-    /**
-     * Update message indicating
-     * IPs of controllers in controller cluster have changed.
-     */
-    protected class HAControllerNodeIPUpdate implements IUpdate {
-        public Map<String,String> curControllerNodeIPs;
-        public Map<String,String> addedControllerNodeIPs;
-        public Map<String,String> removedControllerNodeIPs;
-        public HAControllerNodeIPUpdate(
-                HashMap<String,String> curControllerNodeIPs,  
-                HashMap<String,String> addedControllerNodeIPs,  
-                HashMap<String,String> removedControllerNodeIPs) {
-            this.curControllerNodeIPs = curControllerNodeIPs;
-            this.addedControllerNodeIPs = addedControllerNodeIPs;
-            this.removedControllerNodeIPs = removedControllerNodeIPs;
-        }
-        public void dispatch() {
-            if (log.isTraceEnabled()) {
-                log.trace("Dispatching HA Controller Node IP update "
-                        + "curIPs = {}, addedIPs = {}, removedIPs = {}",
-                        new Object[] { curControllerNodeIPs, addedControllerNodeIPs,
-                            removedControllerNodeIPs }
-                        );
-            }
-            if (haListeners != null) {
-                for (IHAListener listener: haListeners) {
-                    listener.controllerNodeIPsChanged(curControllerNodeIPs,
-                            addedControllerNodeIPs, removedControllerNodeIPs);
-                }
-            }
-        }
-    }
-    
     // ***************
     // Getters/Setters
-    // ***************
-    
-    public void setStorageSourceService(IStorageSourceService storageSource) {
-        this.storageSource = storageSource;
-    }
-    
-    public void setCounterStore(ICounterStoreService counterStore) {
-        this.counterStore = counterStore;
-    }
-    
-    public void setPktInProcessingService(IPktInProcessingTimeService pits) {
-        this.pktinProcTime = pits;
-    }
+    // *************** 
     
     public void setRestApiService(IRestApiService restApi) {
         this.restApi = restApi;
@@ -413,47 +272,6 @@
 	public void setLinkDiscoveryService(ILinkDiscoveryService linkDiscovery) {
 		this.linkDiscovery = linkDiscovery;
 	}
-	
-    @Override
-    public Role getRole() {
-        synchronized(roleChanger) {
-            return role;
-        }
-    }
-    
-    @Override
-    public void setRole(Role role) {
-        if (role == null) throw new NullPointerException("Role can not be null.");
-        if (role == Role.MASTER && this.role == Role.SLAVE) {
-            // Reset db state to Inactive for all switches. 
-            updateAllInactiveSwitchInfo();
-        }
-        
-        // Need to synchronize to ensure a reliable ordering on role request
-        // messages send and to ensure the list of connected switches is stable
-        // RoleChanger will handle the actual sending of the message and 
-        // timeout handling
-        // @see RoleChanger
-        synchronized(roleChanger) {
-            if (role.equals(this.role)) {
-                log.debug("Ignoring role change: role is already {}", role);
-                return;
-            }
-
-            Role oldRole = this.role;
-            this.role = role;
-            
-            log.debug("Submitting role change request to role {}", role);
-            roleChanger.submitRequest(connectedSwitches, role);
-            
-            // Enqueue an update for our listeners.
-            try {
-                this.updates.put(new HARoleUpdate(role, oldRole));
-            } catch (InterruptedException e) {
-                log.error("Failure adding update to queue", e);
-            }
-        }
-    }
     
     public void publishUpdate(IUpdate update) {
     	try {
@@ -607,10 +425,6 @@
                     explanation="Could not parse a message from the switch",
                     recommendation=LogMessageDoc.CHECK_SWITCH),
             @LogMessageDoc(level="ERROR",
-                    message="Terminating controller due to storage exception",
-                    explanation=ERROR_DATABASE,
-                    recommendation=LogMessageDoc.CHECK_CONTROLLER),
-            @LogMessageDoc(level="ERROR",
                     message="Could not process message: queue full",
                     explanation="OpenFlow messages are arriving faster than " +
                                 " the controller can process them.",
@@ -646,10 +460,6 @@
                           " due to message parse failure", 
                           e.getCause());
                 ctx.getChannel().close();
-            } else if (e.getCause() instanceof StorageException) {
-                log.error("Terminating controller due to storage exception", 
-                          e.getCause());
-                terminate();
             } else if (e.getCause() instanceof RejectedExecutionException) {
                 log.warn("Could not process message: queue full");
             } else {
@@ -722,40 +532,8 @@
                                     description);
                     sw.setSwitchProperties(description);
                     data = null;
-
-                    // At this time, also set other switch properties from storage
-                    boolean is_core_switch = false;
-                    IResultSet resultSet = null;
-                    try {
-                        String swid = sw.getStringId();
-                        resultSet = 
-                                storageSource.getRow(SWITCH_CONFIG_TABLE_NAME, swid);
-                        for (Iterator<IResultSet> it = 
-                                resultSet.iterator(); it.hasNext();) {
-                            // In case of multiple rows, use the status
-                            // in last row?
-                            Map<String, Object> row = it.next().getRow();
-                            if (row.containsKey(SWITCH_CONFIG_CORE_SWITCH)) {
-                                if (log.isDebugEnabled()) {
-                                    log.debug("Reading SWITCH_IS_CORE_SWITCH " + 
-                                              "config for switch={}, is-core={}",
-                                              sw, row.get(SWITCH_CONFIG_CORE_SWITCH));
-                                }
-                                String ics = 
-                                        (String)row.get(SWITCH_CONFIG_CORE_SWITCH);
-                                is_core_switch = ics.equals("true");
-                            }
-                        }
-                    }
-                    finally {
-                        if (resultSet != null)
-                            resultSet.close();
-                    }
-                    if (is_core_switch) {
-                        sw.setAttribute(IOFSwitch.SWITCH_IS_CORE_SWITCH, 
-                                        true);
-                    }
                 }
+                
                 sw.removeAttribute(IOFSwitch.SWITCH_DESCRIPTION_FUTURE);
                 state.hasDescription = true;
                 checkSwitchReady();
@@ -968,7 +746,7 @@
                 
                 // Some switches don't seem to update us with port
                 // status messages while in slave role.
-                readSwitchPortStateFromStorage(sw);                
+                //readSwitchPortStateFromStorage(sw);                
                 
                 // Only add the switch to the active switch list if 
                 // we're not in the slave role. Note that if the role 
@@ -1086,7 +864,7 @@
                         // return results to rest api caller
                         sw.deliverOFFeaturesReply(m);
                         // update database */
-                        updateActiveSwitchInfo(sw);
+                        //updateActiveSwitchInfo(sw);
                     }
                     break;
                 case GET_CONFIG_REPLY:
@@ -1301,8 +1079,8 @@
                    log.error("Failure adding update to queue", e);
                }
            }
-            if (updateStorage)
-                updatePortInfo(sw, port);
+            //if (updateStorage)
+                //updatePortInfo(sw, port);
             log.debug("Port #{} modified for {}", portNumber, sw);
         } else if (m.getReason() == (byte)OFPortReason.OFPPR_ADD.ordinal()) {
         	// XXX Workaround to prevent race condition where a link is detected
@@ -1318,8 +1096,8 @@
             } catch (InterruptedException e) {
                 log.error("Failure adding update to queue", e);
             }
-            if (updateStorage)
-                updatePortInfo(sw, port);
+            //if (updateStorage)
+                //updatePortInfo(sw, port);
             log.debug("Port #{} added for {}", portNumber, sw);
         } else if (m.getReason() == 
                    (byte)OFPortReason.OFPPR_DELETE.ordinal()) {
@@ -1330,8 +1108,8 @@
             } catch (InterruptedException e) {
                 log.error("Failure adding update to queue", e);
             }
-            if (updateStorage)
-                removePortInfo(sw, portNumber);
+            //if (updateStorage)
+                //removePortInfo(sw, portNumber);
             log.debug("Port #{} deleted for {}", portNumber, sw);
         }
         SwitchUpdate update = new SwitchUpdate(sw, SwitchUpdateType.PORTCHANGED);
@@ -1418,7 +1196,6 @@
                     eth = new Ethernet();
                     eth.deserialize(pi.getPacketData(), 0,
                             pi.getPacketData().length);
-                    counterStore.updatePacketInCounters(sw, m, eth);
                 }
                 // fall through to default case...
 
@@ -1449,8 +1226,7 @@
                     // Get the starting time (overall and per-component) of 
                     // the processing chain for this packet if performance
                     // monitoring is turned on
-                    pktinProcTime.bootstrap(listeners);
-                    pktinProcTime.recordStartTimePktIn();                     
+                    
                     Command cmd;
                     for (IOFMessageListener listener : listeners) {
                         if (listener instanceof IOFSwitchFilter) {
@@ -1459,15 +1235,15 @@
                             }
                         }
 
-                        pktinProcTime.recordStartTimeComp(listener);
+
                         cmd = listener.receive(sw, m, bc);
-                        pktinProcTime.recordEndTimeComp(listener);
+
                         
                         if (Command.STOP.equals(cmd)) {
                             break;
                         }
                     }
-                    pktinProcTime.recordEndTimePktIn(sw, m, bc);
+
                 } else {
                     log.warn("Unhandled OF Message: {} from {}", m, sw);
                 }
@@ -1582,7 +1358,7 @@
                 
                 oldSw.cancelAllStatisticsReplies();
                 
-                updateInactiveSwitchInfo(oldSw);
+                //updateInactiveSwitchInfo(oldSw);
     
                 // we need to clean out old switch state definitively 
                 // before adding the new switch
@@ -1610,7 +1386,7 @@
             }
         }
         
-        updateActiveSwitchInfo(sw);
+        //updateActiveSwitchInfo(sw);
         SwitchUpdate update = new SwitchUpdate(sw, SwitchUpdateType.ADDED);
         try {
             this.updates.put(update);
@@ -1649,7 +1425,7 @@
         // written out by the new master. Maybe need to revisit how we handle all
         // of the switch state that's written to storage.
         
-        updateInactiveSwitchInfo(sw);
+        //updateInactiveSwitchInfo(sw);
         SwitchUpdate update = new SwitchUpdate(sw, SwitchUpdateType.REMOVED);
         try {
             this.updates.put(update);
@@ -1833,213 +1609,6 @@
     // Initialization
     // **************
 
-    protected void updateAllInactiveSwitchInfo() {
-        if (role == Role.SLAVE) {
-            return;
-        }
-        String controllerId = getControllerId();
-        String[] switchColumns = { SWITCH_DATAPATH_ID,
-                                   SWITCH_CONTROLLER_ID,
-                                   SWITCH_ACTIVE };
-        String[] portColumns = { PORT_ID, PORT_SWITCH };
-        IResultSet switchResultSet = null;
-        try {
-            OperatorPredicate op = 
-                    new OperatorPredicate(SWITCH_CONTROLLER_ID,
-                                          OperatorPredicate.Operator.EQ,
-                                          controllerId);
-            switchResultSet = 
-                    storageSource.executeQuery(SWITCH_TABLE_NAME,
-                                               switchColumns,
-                                               op, null);
-            while (switchResultSet.next()) {
-                IResultSet portResultSet = null;
-                try {
-                    String datapathId =
-                            switchResultSet.getString(SWITCH_DATAPATH_ID);
-                    switchResultSet.setBoolean(SWITCH_ACTIVE, Boolean.FALSE);
-                    op = new OperatorPredicate(PORT_SWITCH, 
-                                               OperatorPredicate.Operator.EQ,
-                                               datapathId);
-                    portResultSet = 
-                            storageSource.executeQuery(PORT_TABLE_NAME,
-                                                       portColumns,
-                                                       op, null);
-                    while (portResultSet.next()) {
-                        portResultSet.deleteRow();
-                    }
-                    portResultSet.save();
-                }
-                finally {
-                    if (portResultSet != null)
-                        portResultSet.close();
-                }
-            }
-            switchResultSet.save();
-        }
-        finally {
-            if (switchResultSet != null)
-                switchResultSet.close();
-        }
-    }
-    
-    protected void updateControllerInfo() {
-        updateAllInactiveSwitchInfo();
-        
-        // Write out the controller info to the storage source
-        Map<String, Object> controllerInfo = new HashMap<String, Object>();
-        String id = getControllerId();
-        controllerInfo.put(CONTROLLER_ID, id);
-        storageSource.updateRow(CONTROLLER_TABLE_NAME, controllerInfo);
-    }
-    
-    protected void updateActiveSwitchInfo(IOFSwitch sw) {
-        if (role == Role.SLAVE) {
-            return;
-        }
-        // Obtain the row info for the switch
-        Map<String, Object> switchInfo = new HashMap<String, Object>();
-        String datapathIdString = sw.getStringId();
-        switchInfo.put(SWITCH_DATAPATH_ID, datapathIdString);
-        String controllerId = getControllerId();
-        switchInfo.put(SWITCH_CONTROLLER_ID, controllerId);
-        Date connectedSince = sw.getConnectedSince();
-        switchInfo.put(SWITCH_CONNECTED_SINCE, connectedSince);
-        Channel channel = sw.getChannel();
-        SocketAddress socketAddress = channel.getRemoteAddress();
-        if (socketAddress != null) {
-            String socketAddressString = socketAddress.toString();
-            switchInfo.put(SWITCH_SOCKET_ADDRESS, socketAddressString);
-            if (socketAddress instanceof InetSocketAddress) {
-                InetSocketAddress inetSocketAddress =
-                        (InetSocketAddress)socketAddress;
-                InetAddress inetAddress = inetSocketAddress.getAddress();
-                String ip = inetAddress.getHostAddress();
-                switchInfo.put(SWITCH_IP, ip);
-            }
-        }
-        
-        // Write out the switch features info
-        long capabilities = U32.f(sw.getCapabilities());
-        switchInfo.put(SWITCH_CAPABILITIES, capabilities);
-        long buffers = U32.f(sw.getBuffers());
-        switchInfo.put(SWITCH_BUFFERS, buffers);
-        long tables = U32.f(sw.getTables());
-        switchInfo.put(SWITCH_TABLES, tables);
-        long actions = U32.f(sw.getActions());
-        switchInfo.put(SWITCH_ACTIONS, actions);
-        switchInfo.put(SWITCH_ACTIVE, Boolean.TRUE);
-        
-        // Update the switch
-        storageSource.updateRowAsync(SWITCH_TABLE_NAME, switchInfo);
-        
-        // Update the ports
-        for (OFPhysicalPort port: sw.getPorts()) {
-            updatePortInfo(sw, port);
-        }
-    }
-    
-    protected void updateInactiveSwitchInfo(IOFSwitch sw) {
-        if (role == Role.SLAVE) {
-            return;
-        }
-        log.debug("Update DB with inactiveSW {}", sw);
-        // Update the controller info in the storage source to be inactive
-        Map<String, Object> switchInfo = new HashMap<String, Object>();
-        String datapathIdString = sw.getStringId();
-        switchInfo.put(SWITCH_DATAPATH_ID, datapathIdString);
-        //switchInfo.put(SWITCH_CONNECTED_SINCE, null);
-        switchInfo.put(SWITCH_ACTIVE, Boolean.FALSE);
-        storageSource.updateRowAsync(SWITCH_TABLE_NAME, switchInfo);
-    }
-
-    protected void updatePortInfo(IOFSwitch sw, OFPhysicalPort port) {
-        if (role == Role.SLAVE) {
-            return;
-        }
-        String datapathIdString = sw.getStringId();
-        Map<String, Object> portInfo = new HashMap<String, Object>();
-        int portNumber = U16.f(port.getPortNumber());
-        String id = datapathIdString + "|" + portNumber;
-        portInfo.put(PORT_ID, id);
-        portInfo.put(PORT_SWITCH, datapathIdString);
-        portInfo.put(PORT_NUMBER, portNumber);
-        byte[] hardwareAddress = port.getHardwareAddress();
-        String hardwareAddressString = HexString.toHexString(hardwareAddress);
-        portInfo.put(PORT_HARDWARE_ADDRESS, hardwareAddressString);
-        String name = port.getName();
-        portInfo.put(PORT_NAME, name);
-        long config = U32.f(port.getConfig());
-        portInfo.put(PORT_CONFIG, config);
-        long state = U32.f(port.getState());
-        portInfo.put(PORT_STATE, state);
-        long currentFeatures = U32.f(port.getCurrentFeatures());
-        portInfo.put(PORT_CURRENT_FEATURES, currentFeatures);
-        long advertisedFeatures = U32.f(port.getAdvertisedFeatures());
-        portInfo.put(PORT_ADVERTISED_FEATURES, advertisedFeatures);
-        long supportedFeatures = U32.f(port.getSupportedFeatures());
-        portInfo.put(PORT_SUPPORTED_FEATURES, supportedFeatures);
-        long peerFeatures = U32.f(port.getPeerFeatures());
-        portInfo.put(PORT_PEER_FEATURES, peerFeatures);
-        storageSource.updateRowAsync(PORT_TABLE_NAME, portInfo);
-    }
-    
-    /**
-     * Read switch port data from storage and write it into a switch object
-     * @param sw the switch to update
-     */
-    protected void readSwitchPortStateFromStorage(OFSwitchImpl sw) {
-        OperatorPredicate op = 
-                new OperatorPredicate(PORT_SWITCH, 
-                                      OperatorPredicate.Operator.EQ,
-                                      sw.getStringId());
-        IResultSet portResultSet = 
-                storageSource.executeQuery(PORT_TABLE_NAME,
-                                           null, op, null);
-        //Map<Short, OFPhysicalPort> oldports = 
-        //        new HashMap<Short, OFPhysicalPort>();
-        //oldports.putAll(sw.getPorts());
-
-        while (portResultSet.next()) {
-            try {
-                OFPhysicalPort p = new OFPhysicalPort();
-                p.setPortNumber((short)portResultSet.getInt(PORT_NUMBER));
-                p.setName(portResultSet.getString(PORT_NAME));
-                p.setConfig((int)portResultSet.getLong(PORT_CONFIG));
-                p.setState((int)portResultSet.getLong(PORT_STATE));
-                String portMac = portResultSet.getString(PORT_HARDWARE_ADDRESS);
-                p.setHardwareAddress(HexString.fromHexString(portMac));
-                p.setCurrentFeatures((int)portResultSet.
-                                     getLong(PORT_CURRENT_FEATURES));
-                p.setAdvertisedFeatures((int)portResultSet.
-                                        getLong(PORT_ADVERTISED_FEATURES));
-                p.setSupportedFeatures((int)portResultSet.
-                                       getLong(PORT_SUPPORTED_FEATURES));
-                p.setPeerFeatures((int)portResultSet.
-                                  getLong(PORT_PEER_FEATURES));
-                //oldports.remove(Short.valueOf(p.getPortNumber()));
-                sw.setPort(p);
-            } catch (NullPointerException e) {
-                // ignore
-            }
-        }
-        SwitchUpdate update = new SwitchUpdate(sw, SwitchUpdateType.PORTCHANGED);
-        try {
-            this.updates.put(update);
-        } catch (InterruptedException e) {
-            log.error("Failure adding update to queue", e);
-        }
-    }
-    
-    protected void removePortInfo(IOFSwitch sw, short portNumber) {
-        if (role == Role.SLAVE) {
-            return;
-        }
-        String datapathIdString = sw.getStringId();
-        String id = datapathIdString + "|" + portNumber;
-        storageSource.deleteRowAsync(PORT_TABLE_NAME, id);
-    }
-
     /**
      * Sets the initial role based on properties in the config params.
      * It looks for two different properties.
@@ -2155,10 +1724,6 @@
                 update.dispatch();
             } catch (InterruptedException e) {
                 return;
-            } catch (StorageException e) {
-                log.error("Storage exception in controller " + 
-                          "updates loop; terminating process", e);
-                return;
             } catch (Exception e) {
                 log.error("Exception in controller updates loop", e);
             }
@@ -2236,15 +1801,12 @@
                                       ListenerDispatcher<OFType, 
                                                          IOFMessageListener>>();
         this.switchListeners = new CopyOnWriteArraySet<IOFSwitchListener>();
-        this.haListeners = new CopyOnWriteArraySet<IHAListener>();
         this.activeSwitches = new ConcurrentHashMap<Long, IOFSwitch>();
         this.connectedSwitches = new HashSet<OFSwitchImpl>();
         this.controllerNodeIPsCache = new HashMap<String, String>();
         this.updates = new LinkedBlockingQueue<IUpdate>();
         this.factory = new BasicFactory();
-        this.providerMap = new HashMap<String, List<IInfoProvider>>();
         setConfigParams(configParams);
-        //this.role = getInitialRole(configParams);
         //Set the controller's role to MASTER so it always tries to do role requests.
         this.role = Role.MASTER;
         this.roleChanger = new RoleChanger();
@@ -2263,144 +1825,13 @@
     public void startupComponents() {
     	try {
 			registryService.registerController(controllerId);
-		} catch (RegistryException e2) {
-			log.warn("Registry service error: {}", e2.getMessage());
+		} catch (RegistryException e) {
+			log.warn("Registry service error: {}", e.getMessage());
 		}
-    	
-        // Create the table names we use
-        storageSource.createTable(CONTROLLER_TABLE_NAME, null);
-        storageSource.createTable(SWITCH_TABLE_NAME, null);
-        storageSource.createTable(PORT_TABLE_NAME, null);
-        storageSource.createTable(CONTROLLER_INTERFACE_TABLE_NAME, null);
-        storageSource.createTable(SWITCH_CONFIG_TABLE_NAME, null);
-        storageSource.setTablePrimaryKeyName(CONTROLLER_TABLE_NAME,
-                                             CONTROLLER_ID);
-        storageSource.setTablePrimaryKeyName(SWITCH_TABLE_NAME,
-                                             SWITCH_DATAPATH_ID);
-        storageSource.setTablePrimaryKeyName(PORT_TABLE_NAME, PORT_ID);
-        storageSource.setTablePrimaryKeyName(CONTROLLER_INTERFACE_TABLE_NAME, 
-                                             CONTROLLER_INTERFACE_ID);
-        storageSource.addListener(CONTROLLER_INTERFACE_TABLE_NAME, this);
-        
-        while (true) {
-            try {
-                updateControllerInfo();
-                break;
-            }
-            catch (StorageException e) {
-                log.info("Waiting for storage source");
-                try {
-                    Thread.sleep(1000);
-                } catch (InterruptedException e1) {
-                }
-            }
-        }
        
         // Add our REST API
         restApi.addRestletRoutable(new CoreWebRoutable());
     }
-
-    @Override
-    public void addInfoProvider(String type, IInfoProvider provider) {
-        if (!providerMap.containsKey(type)) {
-            providerMap.put(type, new ArrayList<IInfoProvider>());
-        }
-        providerMap.get(type).add(provider);
-    }
-
-    @Override
-    public void removeInfoProvider(String type, IInfoProvider provider) {
-        if (!providerMap.containsKey(type)) {
-            log.debug("Provider type {} doesn't exist.", type);
-            return;
-        }
-        
-        providerMap.get(type).remove(provider);
-    }
-    
-    public Map<String, Object> getControllerInfo(String type) {
-        if (!providerMap.containsKey(type)) return null;
-        
-        Map<String, Object> result = new LinkedHashMap<String, Object>();
-        for (IInfoProvider provider : providerMap.get(type)) {
-            result.putAll(provider.getInfo(type));
-        }
-        
-        return result;
-    }
-
-    @Override
-    public void addHAListener(IHAListener listener) {
-        this.haListeners.add(listener);
-    }
-
-    @Override
-    public void removeHAListener(IHAListener listener) {
-        this.haListeners.remove(listener);
-    }
-    
-    
-    /**
-     * Handle changes to the controller nodes IPs and dispatch update. 
-     */
-    @SuppressWarnings("unchecked")
-    protected void handleControllerNodeIPChanges() {
-        HashMap<String,String> curControllerNodeIPs = new HashMap<String,String>();
-        HashMap<String,String> addedControllerNodeIPs = new HashMap<String,String>();
-        HashMap<String,String> removedControllerNodeIPs =new HashMap<String,String>();
-        String[] colNames = { CONTROLLER_INTERFACE_CONTROLLER_ID, 
-                           CONTROLLER_INTERFACE_TYPE, 
-                           CONTROLLER_INTERFACE_NUMBER, 
-                           CONTROLLER_INTERFACE_DISCOVERED_IP };
-        synchronized(controllerNodeIPsCache) {
-            // We currently assume that interface Ethernet0 is the relevant
-            // controller interface. Might change.
-            // We could (should?) implement this using 
-            // predicates, but creating the individual and compound predicate
-            // seems more overhead then just checking every row. Particularly, 
-            // since the number of rows is small and changes infrequent
-            IResultSet res = storageSource.executeQuery(CONTROLLER_INTERFACE_TABLE_NAME,
-                    colNames,null, null);
-            while (res.next()) {
-                if (res.getString(CONTROLLER_INTERFACE_TYPE).equals("Ethernet") &&
-                        res.getInt(CONTROLLER_INTERFACE_NUMBER) == 0) {
-                    String controllerID = res.getString(CONTROLLER_INTERFACE_CONTROLLER_ID);
-                    String discoveredIP = res.getString(CONTROLLER_INTERFACE_DISCOVERED_IP);
-                    String curIP = controllerNodeIPsCache.get(controllerID);
-                    
-                    curControllerNodeIPs.put(controllerID, discoveredIP);
-                    if (curIP == null) {
-                        // new controller node IP
-                        addedControllerNodeIPs.put(controllerID, discoveredIP);
-                    } 
-                    else if (!curIP.equals(discoveredIP)) {
-                        // IP changed                    
-                        removedControllerNodeIPs.put(controllerID, curIP);
-                        addedControllerNodeIPs.put(controllerID, discoveredIP);
-                    }
-                }
-            }
-            // Now figure out if rows have been deleted. We can't use the
-            // rowKeys from rowsDeleted directly, since the tables primary
-            // key is a compound that we can't disassemble
-            Set<String> curEntries = curControllerNodeIPs.keySet();
-            Set<String> removedEntries = controllerNodeIPsCache.keySet();
-            removedEntries.removeAll(curEntries);
-            for (String removedControllerID : removedEntries)
-                removedControllerNodeIPs.put(removedControllerID, controllerNodeIPsCache.get(removedControllerID));
-            controllerNodeIPsCache = (HashMap<String, String>) curControllerNodeIPs.clone();
-            HAControllerNodeIPUpdate update = new HAControllerNodeIPUpdate(
-                                curControllerNodeIPs, addedControllerNodeIPs,
-                                removedControllerNodeIPs);
-            if (!removedControllerNodeIPs.isEmpty() || !addedControllerNodeIPs.isEmpty()) {
-                try {
-                    this.updates.put(update);
-                } catch (InterruptedException e) {
-                    log.error("Failure adding update to queue", e);
-                }
-            }
-        }
-    }
     
     @Override
     public Map<String, String> getControllerNodeIPs() {
@@ -2415,21 +1846,6 @@
     }
 
     @Override
-    public void rowsModified(String tableName, Set<Object> rowKeys) {
-        if (tableName.equals(CONTROLLER_INTERFACE_TABLE_NAME)) {
-            handleControllerNodeIPChanges();
-        }
-        
-    }
-
-    @Override
-    public void rowsDeleted(String tableName, Set<Object> rowKeys) {
-        if (tableName.equals(CONTROLLER_INTERFACE_TABLE_NAME)) {
-            handleControllerNodeIPChanges();
-        }
-    }
-
-    @Override
     public long getSystemStartTime() {
         return (this.systemStartTime);
     }
diff --git a/src/main/java/net/floodlightcontroller/core/internal/OFSwitchImpl.java b/src/main/java/net/floodlightcontroller/core/internal/OFSwitchImpl.java
index 5810967..752675a 100644
--- a/src/main/java/net/floodlightcontroller/core/internal/OFSwitchImpl.java
+++ b/src/main/java/net/floodlightcontroller/core/internal/OFSwitchImpl.java
@@ -36,8 +36,8 @@
 
 import net.floodlightcontroller.core.FloodlightContext;
 import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.IOFMessageListener;
 import net.floodlightcontroller.core.IFloodlightProviderService.Role;
+import net.floodlightcontroller.core.IOFMessageListener;
 import net.floodlightcontroller.core.IOFSwitch;
 import net.floodlightcontroller.core.annotations.LogMessageDoc;
 import net.floodlightcontroller.core.annotations.LogMessageDocs;
@@ -49,7 +49,7 @@
 import org.codehaus.jackson.annotate.JsonIgnore;
 import org.codehaus.jackson.annotate.JsonProperty;
 import org.codehaus.jackson.map.annotate.JsonSerialize;
-import org.codehaus.jackson.map.ser.std.ToStringSerializer;
+import org.codehaus.jackson.map.ser.ToStringSerializer;
 import org.jboss.netty.channel.Channel;
 import org.openflow.protocol.OFFeaturesReply;
 import org.openflow.protocol.OFFeaturesRequest;
@@ -57,12 +57,12 @@
 import org.openflow.protocol.OFMatch;
 import org.openflow.protocol.OFMessage;
 import org.openflow.protocol.OFPhysicalPort;
-import org.openflow.protocol.OFPort;
-import org.openflow.protocol.OFType;
-import org.openflow.protocol.OFVendor;
 import org.openflow.protocol.OFPhysicalPort.OFPortConfig;
 import org.openflow.protocol.OFPhysicalPort.OFPortState;
+import org.openflow.protocol.OFPort;
 import org.openflow.protocol.OFStatisticsRequest;
+import org.openflow.protocol.OFType;
+import org.openflow.protocol.OFVendor;
 import org.openflow.protocol.statistics.OFDescriptionStatistics;
 import org.openflow.protocol.statistics.OFStatistics;
 import org.openflow.util.HexString;
@@ -73,6 +73,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+
 /**
  * This is the internal representation of an openflow switch.
  */
diff --git a/src/main/java/net/floodlightcontroller/core/types/MacVlanPair.java b/src/main/java/net/floodlightcontroller/core/types/MacVlanPair.java
deleted file mode 100644
index 7a44f1d..0000000
--- a/src/main/java/net/floodlightcontroller/core/types/MacVlanPair.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.core.types;
-
-public class MacVlanPair {
-    public Long mac;
-    public Short vlan;
-    public MacVlanPair(Long mac, Short vlan) {
-        this.mac = mac;
-        this.vlan = vlan;
-    }
-    
-    public long getMac() {
-        return mac.longValue();
-    }
-    
-    public short getVlan() {
-        return vlan.shortValue();
-    }
-    
-    public boolean equals(Object o) {
-        return (o instanceof MacVlanPair) && (mac.equals(((MacVlanPair) o).mac))
-            && (vlan.equals(((MacVlanPair) o).vlan));
-    }
-    
-    public int hashCode() {
-        return mac.hashCode() ^ vlan.hashCode();
-    }
-}
\ No newline at end of file
diff --git a/src/main/java/net/floodlightcontroller/core/types/SwitchMessagePair.java b/src/main/java/net/floodlightcontroller/core/types/SwitchMessagePair.java
deleted file mode 100644
index 0e91bc9..0000000
--- a/src/main/java/net/floodlightcontroller/core/types/SwitchMessagePair.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.core.types;
-
-import org.openflow.protocol.OFMessage;
-
-import net.floodlightcontroller.core.IOFSwitch;
-
-public class SwitchMessagePair {
-    private final IOFSwitch sw;
-    private final OFMessage msg;
-    
-    public SwitchMessagePair(IOFSwitch sw, OFMessage msg) {
-        this.sw = sw;
-        this.msg = msg;
-    }
-    
-    public IOFSwitch getSwitch() {
-        return this.sw;
-    }
-    
-    public OFMessage getMessage() {
-        return this.msg;
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/core/web/AllSwitchStatisticsResource.java b/src/main/java/net/floodlightcontroller/core/web/AllSwitchStatisticsResource.java
index a014795..2eeec70 100644
--- a/src/main/java/net/floodlightcontroller/core/web/AllSwitchStatisticsResource.java
+++ b/src/main/java/net/floodlightcontroller/core/web/AllSwitchStatisticsResource.java
@@ -24,8 +24,6 @@
 import java.util.Map;
 
 import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.types.MacVlanPair;
-
 import org.openflow.protocol.OFFeaturesReply;
 import org.openflow.protocol.statistics.OFStatistics;
 import org.openflow.protocol.statistics.OFStatisticsType;
@@ -136,7 +134,6 @@
         private OFStatisticsType statType;
         private REQUESTTYPE requestType;
         private OFFeaturesReply featuresReply;
-        private Map<MacVlanPair, Short> switchTable;
         
         public GetConcurrentStatsThread(long switchId, REQUESTTYPE requestType, OFStatisticsType statType) {
             this.switchId = switchId;
@@ -144,7 +141,6 @@
             this.statType = statType;
             this.switchReply = null;
             this.featuresReply = null;
-            this.switchTable = null;
         }
         
         public List<OFStatistics> getStatisticsReply() {
@@ -155,10 +151,6 @@
             return featuresReply;
         }
         
-        public Map<MacVlanPair, Short> getSwitchTable() {
-            return switchTable;
-        }
-        
         public long getSwitchId() {
             return switchId;
         }
diff --git a/src/main/java/net/floodlightcontroller/core/web/ControllerRoleResource.java b/src/main/java/net/floodlightcontroller/core/web/ControllerRoleResource.java
deleted file mode 100644
index 2ed87cb..0000000
--- a/src/main/java/net/floodlightcontroller/core/web/ControllerRoleResource.java
+++ /dev/null
@@ -1,57 +0,0 @@
-package net.floodlightcontroller.core.web;
-
-import org.restlet.data.Status;
-import org.restlet.resource.ServerResource;
-
-import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.IFloodlightProviderService.Role;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-
-import org.restlet.resource.Get;
-import org.restlet.resource.Post;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ControllerRoleResource extends ServerResource {
-
-    protected final static Logger log = LoggerFactory.getLogger(ControllerRoleResource.class);
-
-    @Get("json")
-    public RoleInfo getRole() {
-        IFloodlightProviderService floodlightProvider = 
-                (IFloodlightProviderService)getContext().getAttributes().
-                    get(IFloodlightProviderService.class.getCanonicalName());
-        return new RoleInfo(floodlightProvider.getRole());
-    }
-    
-    @Post("json")
-    @LogMessageDoc(level="WARN",
-                   message="Invalid role value specified in REST API to " +
-                      "set controller role",
-                   explanation="An HA role change request was malformed.",
-                   recommendation=LogMessageDoc.CHECK_CONTROLLER)
-    public void setRole(RoleInfo roleInfo) {
-        //Role role = Role.lookupRole(roleInfo.getRole());
-        Role role = null;
-        try {
-            role = Role.valueOf(roleInfo.getRole().toUpperCase());
-        }
-        catch (IllegalArgumentException e) {
-            // The role value in the REST call didn't match a valid
-            // role name, so just leave the role as null and handle
-            // the error below.
-        }
-        if (role == null) {
-            log.warn ("Invalid role value specified in REST API to " +
-            		  "set controller role");
-            setStatus(Status.CLIENT_ERROR_BAD_REQUEST, "Invalid role value");
-            return;
-        }
-        
-        IFloodlightProviderService floodlightProvider = 
-                (IFloodlightProviderService)getContext().getAttributes().
-                    get(IFloodlightProviderService.class.getCanonicalName());
-        
-        floodlightProvider.setRole(role);
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/core/web/ControllerSummaryResource.java b/src/main/java/net/floodlightcontroller/core/web/ControllerSummaryResource.java
deleted file mode 100644
index 20fbf85..0000000
--- a/src/main/java/net/floodlightcontroller/core/web/ControllerSummaryResource.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
-*    Copyright 2012, Big Switch Networks, Inc. 
-*    Originally created by Shudong Zhou, Big Switch Networks
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.core.web;
-
-import java.util.Map;
-
-import org.restlet.resource.Get;
-import org.restlet.resource.ServerResource;
-
-import net.floodlightcontroller.core.IFloodlightProviderService;
-
-/**
- * Get summary counters registered by all modules
- * @author shudongz
- */
-public class ControllerSummaryResource extends ServerResource {
-    @Get("json")
-    public Map<String, Object> retrieve() {
-        IFloodlightProviderService floodlightProvider = 
-            (IFloodlightProviderService)getContext().getAttributes().
-                get(IFloodlightProviderService.class.getCanonicalName());
-        return floodlightProvider.getControllerInfo("summary");
-    }
-
-}
diff --git a/src/main/java/net/floodlightcontroller/core/web/CoreWebRoutable.java b/src/main/java/net/floodlightcontroller/core/web/CoreWebRoutable.java
index 7604d7c..9b22617 100644
--- a/src/main/java/net/floodlightcontroller/core/web/CoreWebRoutable.java
+++ b/src/main/java/net/floodlightcontroller/core/web/CoreWebRoutable.java
@@ -47,9 +47,6 @@
         router.attach("/switch/all/{statType}/json", AllSwitchStatisticsResource.class);
         router.attach("/switch/{switchId}/{statType}/json", SwitchStatisticsResource.class);
         router.attach("/controller/switches/json", ControllerSwitchesResource.class);
-        router.attach("/counter/{counterTitle}/json", CounterResource.class);
-        router.attach("/counter/{switchId}/{counterName}/json", SwitchCounterResource.class);
-        router.attach("/counter/categories/{switchId}/{counterName}/{layer}/json", SwitchCounterCategoriesResource.class);
         router.attach("/memory/json", ControllerMemoryResource.class);
         // Get the last {count} events from the event histories
         router.attach("/event-history/topology-switch/{count}/json",
@@ -58,9 +55,6 @@
                 EventHistoryTopologyLinkResource.class);
         router.attach("/event-history/topology-cluster/{count}/json",
                 EventHistoryTopologyClusterResource.class);
-        router.attach("/storage/tables/json", StorageSourceTablesResource.class);
-        router.attach("/controller/summary/json", ControllerSummaryResource.class);
-        router.attach("/role/json", ControllerRoleResource.class);
         router.attach("/health/json", HealthCheckResource.class);
         router.attach("/system/uptime/json", SystemUptimeResource.class);
         // Following added by ONOS
diff --git a/src/main/java/net/floodlightcontroller/core/web/CounterResource.java b/src/main/java/net/floodlightcontroller/core/web/CounterResource.java
deleted file mode 100644
index fb680d7..0000000
--- a/src/main/java/net/floodlightcontroller/core/web/CounterResource.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.core.web;
-
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import net.floodlightcontroller.counter.CounterValue;
-import net.floodlightcontroller.counter.ICounter;
-
-import org.restlet.resource.Get;
-
-public class CounterResource extends CounterResourceBase {
-    @Get("json")
-    public Map<String, Object> retrieve() {
-        String counterTitle = 
-            (String) getRequestAttributes().get("counterTitle");
-        Map<String, Object> model = new HashMap<String,Object>();
-        CounterValue v;
-        if (counterTitle.equalsIgnoreCase("all")) {
-            Map<String, ICounter> counters = this.counterStore.getAll();
-            if (counters != null) {
-                Iterator<Map.Entry<String, ICounter>> it = 
-                    counters.entrySet().iterator();
-                while (it.hasNext()) {
-                    Entry<String, ICounter> entry = it.next();
-                    String counterName = entry.getKey();
-                    v = entry.getValue().getCounterValue();
-
-                    if (CounterValue.CounterType.LONG == v.getType()) {
-                        model.put(counterName, v.getLong());
-                    } else if (v.getType() == CounterValue.CounterType.DOUBLE) {
-                        model.put(counterName, v.getDouble());
-                    }   
-                }   
-            }   
-        } else {
-            ICounter counter = this.counterStore.getCounter(counterTitle);
-            if (counter != null) {
-                v = counter.getCounterValue();
-            } else {
-                v = new CounterValue(CounterValue.CounterType.LONG);
-            }   
-
-            if (CounterValue.CounterType.LONG == v.getType()) {
-                model.put(counterTitle, v.getLong());
-            } else if (v.getType() == CounterValue.CounterType.DOUBLE) {
-                model.put(counterTitle, v.getDouble());
-            }   
-        }
-        return model;
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/core/web/CounterResourceBase.java b/src/main/java/net/floodlightcontroller/core/web/CounterResourceBase.java
deleted file mode 100644
index 70e90ed..0000000
--- a/src/main/java/net/floodlightcontroller/core/web/CounterResourceBase.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.core.web;
-
-import net.floodlightcontroller.counter.ICounterStoreService;
-
-import org.restlet.resource.ResourceException;
-import org.restlet.resource.ServerResource;
-
-public class CounterResourceBase extends ServerResource {
-    protected ICounterStoreService counterStore;
-    
-    @Override
-    protected void doInit() throws ResourceException {
-        super.doInit();
-        counterStore = 
-            (ICounterStoreService)getContext().getAttributes().
-                get(ICounterStoreService.class.getCanonicalName());
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/core/web/StorageSourceTablesResource.java b/src/main/java/net/floodlightcontroller/core/web/StorageSourceTablesResource.java
deleted file mode 100644
index 51f514f..0000000
--- a/src/main/java/net/floodlightcontroller/core/web/StorageSourceTablesResource.java
+++ /dev/null
@@ -1,18 +0,0 @@
-package net.floodlightcontroller.core.web;
-
-import java.util.Set;
-
-import net.floodlightcontroller.storage.IStorageSourceService;
-
-import org.restlet.resource.Get;
-import org.restlet.resource.ServerResource;
-
-public class StorageSourceTablesResource extends ServerResource {
-    @Get("json")
-    public Set<String> retrieve() {
-        IStorageSourceService storageSource = (IStorageSourceService)getContext().
-                getAttributes().get(IStorageSourceService.class.getCanonicalName());
-        Set<String> allTableNames = storageSource.getAllTableNames();
-        return allTableNames;
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/core/web/SwitchCounterCategoriesResource.java b/src/main/java/net/floodlightcontroller/core/web/SwitchCounterCategoriesResource.java
deleted file mode 100644
index f14d706..0000000
--- a/src/main/java/net/floodlightcontroller/core/web/SwitchCounterCategoriesResource.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.core.web;
-
-import java.io.UnsupportedEncodingException;
-import java.net.URLDecoder;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.openflow.util.HexString;
-import org.restlet.resource.Get;
-
-import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.counter.CounterStore.NetworkLayer;
-import net.floodlightcontroller.counter.ICounterStoreService;
-
-/**
- * Get the counter categories for a particular switch
- * @author readams
- */
-public class SwitchCounterCategoriesResource extends CounterResourceBase {
-    @Get("json")
-    public Map<String, Object> retrieve() {
-        IFloodlightProviderService floodlightProvider = 
-                (IFloodlightProviderService)getContext().getAttributes().
-                    get(IFloodlightProviderService.class.getCanonicalName());
-        HashMap<String,Object> model = new HashMap<String,Object>();
-        
-        String switchID = (String) getRequestAttributes().get("switchId");
-        String counterName = (String) getRequestAttributes().get("counterName");
-        String layer = (String) getRequestAttributes().get("layer");
-
-        Long[] switchDpids;
-        if (switchID.equalsIgnoreCase("all")) {
-            switchDpids = floodlightProvider.getSwitches().keySet().toArray(new Long[0]);
-            for (Long dpid : switchDpids) {
-                switchID = HexString.toHexString(dpid);
-
-                getOneSwitchCounterCategoriesJson(model, switchID, counterName, layer);
-            }
-        } else {
-            getOneSwitchCounterCategoriesJson(model, switchID, counterName, layer);
-        }
-        
-        return model;
-    }
-    
-    protected void getOneSwitchCounterCategoriesJson(Map<String, Object> model,
-                                                     String switchID,
-                                                     String counterName, 
-                                                     String layer) {
-        String fullCounterName = "";      
-        NetworkLayer nl = NetworkLayer.L3;
-        
-        try {
-            counterName = URLDecoder.decode(counterName, "UTF-8");
-            layer = URLDecoder.decode(layer, "UTF-8");
-            fullCounterName = switchID + ICounterStoreService.TitleDelimitor + counterName;
-        } catch (UnsupportedEncodingException e) {
-            //Just leave counterTitle undecoded if there is an issue - fail silently
-        }
-
-        if (layer.compareToIgnoreCase("4") == 0) {
-            nl = NetworkLayer.L4;
-        }
-        List<String> categories = this.counterStore.getAllCategories(fullCounterName, nl);
-        if (categories != null) {
-            model.put(fullCounterName + "." + layer, categories);
-        }
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/core/web/SwitchCounterResource.java b/src/main/java/net/floodlightcontroller/core/web/SwitchCounterResource.java
deleted file mode 100644
index 188836d..0000000
--- a/src/main/java/net/floodlightcontroller/core/web/SwitchCounterResource.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.core.web;
-
-import java.io.UnsupportedEncodingException;
-import java.net.URLDecoder;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.openflow.util.HexString;
-import org.restlet.resource.Get;
-
-import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.counter.ICounter;
-import net.floodlightcontroller.counter.ICounterStoreService;
-
-/**
- * Get counters for a particular switch 
- * @author readams
- */
-public class SwitchCounterResource extends CounterResourceBase {
-    @Get("json")
-    public Map<String, Object> retrieve() {
-        IFloodlightProviderService floodlightProvider = 
-                (IFloodlightProviderService)getContext().getAttributes().
-                    get(IFloodlightProviderService.class.getCanonicalName());
-        HashMap<String,Object> model = new HashMap<String,Object>();
-        
-        String switchID = (String) getRequestAttributes().get("switchId");
-        String counterName = (String) getRequestAttributes().get("counterName");
-
-        Long[] switchDpids;
-        if (switchID.equalsIgnoreCase("all")) {
-            switchDpids = floodlightProvider.getSwitches().keySet().toArray(new Long[0]);
-            getOneSwitchCounterJson(model, ICounterStoreService.CONTROLLER_NAME, counterName);
-            for (Long dpid : switchDpids) {
-                switchID = HexString.toHexString(dpid);
-
-                getOneSwitchCounterJson(model, switchID, counterName);
-            }
-        } else {
-            getOneSwitchCounterJson(model, switchID, counterName);
-        }
-        return model;
-    }
-    
-    protected void getOneSwitchCounterJson(Map<String, Object> model, 
-                                           String switchID, String counterName) {
-        String fullCounterName = "";      
-        
-        try {
-            counterName = URLDecoder.decode(counterName, "UTF-8");
-            fullCounterName = 
-                switchID + ICounterStoreService.TitleDelimitor + counterName;
-        } catch (UnsupportedEncodingException e) {
-            //Just leave counterTitle undecoded if there is an issue - fail silently
-        }
-
-        ICounter counter = this.counterStore.getCounter(fullCounterName);
-        Map<String, Long> sample = new HashMap<String, Long> ();
-        if (counter != null) {
-            sample.put(counter.getCounterDate().toString(), 
-                       counter.getCounterValue().getLong());
-            model.put(switchID, sample);
-        }
-    }
-    
-}
diff --git a/src/main/java/net/floodlightcontroller/counter/ConcurrentCounter.java b/src/main/java/net/floodlightcontroller/counter/ConcurrentCounter.java
deleted file mode 100644
index cdec1e0..0000000
--- a/src/main/java/net/floodlightcontroller/counter/ConcurrentCounter.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-/**
- * 
- */
-package net.floodlightcontroller.counter;
-
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Queue;
-import java.util.Set;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-import net.floodlightcontroller.counter.CounterValue.CounterType;
-
-
-/**
- * This module needs to be updated with CounterValue.
- * 
- * This is a crumby attempt at a highly concurrent implementation of the Counter interface.
- * 
- * (Help! Help!  Someone please re-write me!  This will almost certainly break at high loads.)
- * 
- * The gist is that this class, ConcurrentCounter, keeps an internal highly transient buffer that is occasionally flushed
- * in to a set of CountBuffers (circular buffers) which store a longer term historical view of the count values at different
- * moments in time.
- * 
- * This Counter implementation may be a bit over-engineered...  The goal here was to present an implementation that is very
- * predictable with respect to memory and CPU time and, at the same time, present a very fast increment() method.  The reasoning
- * here is that this will be a go-to class when it comes to debugging, particularly in high-load situations where logging
- * may introduce so much variability to the system that it foils the results.
- * 
- * @author kyle
- *
- */
-public class ConcurrentCounter implements ICounter {
-
-  protected static final Map<DateSpan, Integer> MAX_HISTORY = new HashMap<DateSpan, Integer>();
-  static {
-    MAX_HISTORY.put(DateSpan.REALTIME, new Integer(1));
-    MAX_HISTORY.put(DateSpan.SECONDS, new Integer(120));
-    MAX_HISTORY.put(DateSpan.MINUTES, new Integer(60));
-    MAX_HISTORY.put(DateSpan.HOURS, new Integer(48));
-    MAX_HISTORY.put(DateSpan.DAYS, new Integer(60));
-    MAX_HISTORY.put(DateSpan.WEEKS, new Integer(2)); 
-  }
-  
-  protected static Set<ConcurrentCounter> liveCounters;
-  
-  static {
-    liveCounters = Collections.newSetFromMap(new ConcurrentHashMap<ConcurrentCounter, Boolean>()); //nifty way to get concurrent hash set
-    //Set a background thread to flush any liveCounters every 100 milliseconds
-    Executors.newScheduledThreadPool(1).scheduleAtFixedRate(new Runnable() {
-        public void run() {
-            for(ConcurrentCounter c : liveCounters) {
-                c.flush();
-            }
-        }}, 100, 100, TimeUnit.MILLISECONDS);
-  }
-
-  /**
-   * Very simple data structure to store off a single count entry at a single point in time
-   * @author kyle
-   *
-   */
-  protected static final class CountAtom {
-    protected Date date;
-    protected Long delta;
-    
-    protected CountAtom(Date date, Long delta) {
-      this.date = date;
-      this.delta = delta;
-    }
-    
-    public String toString() {
-      return "[" + this.date + ": " + this.delta + "]";
-    }
-  }
-
-  
-  protected Queue<CountAtom> unprocessedCountBuffer;
-  protected Map<DateSpan, CountBuffer> counts;
-  protected Date startDate;
-  
-  /**
-   * Factory method to create a new counter instance.  (Design note - 
-   * use a factory pattern here as it may be necessary to hook in other
-   * registrations around counter objects as they are created.)
-   * 
-   * @param startDate
-   * @return
-   */
-  public static ICounter createCounter(Date startDate) {
-    ConcurrentCounter cc = new ConcurrentCounter(startDate);
-    ConcurrentCounter.liveCounters.add(cc);
-    return cc;
-    
-  }
-  
-  /**
-   * Protected constructor - use createCounter factory method instead
-   * @param startDate
-   */
-  protected ConcurrentCounter(Date startDate) {
-    init(startDate);
-  }
-  
-  protected void init(Date startDate) {
-    this.startDate = startDate;
-    this.unprocessedCountBuffer = new ConcurrentLinkedQueue<CountAtom>();
-    this.counts = new HashMap<DateSpan, CountBuffer>();
-      
-    for(DateSpan ds : DateSpan.values()) {
-      CountBuffer cb = new CountBuffer(startDate, ds, MAX_HISTORY.get(ds));
-      counts.put(ds, cb);
-    }
-  }
-  /**
-   * This is the key method that has to be both fast and very thread-safe.
-   */
-  @Override
-  public void increment() {
-    this.increment(new Date(), (long)1);
-  }
-  
-  @Override
-  public void increment(Date d, long delta) {
-    this.unprocessedCountBuffer.add(new CountAtom(d, delta));
-  }
-  
-  @Override
-  public void setCounter(Date d, CounterValue value) {
-      // To be done later
-  }
-  
-  /**
-   * Reset the value.
-   */
-  @Override
-  public void reset(Date startDate) {
-    init(startDate);
-  }
-  
-  /**
-   * Flushes values out of the internal buffer and in to structures
-   * that can be fetched with a call to snapshot()
-   */
-  public synchronized void flush() {
-    for(CountAtom c = this.unprocessedCountBuffer.poll(); c != null; c = this.unprocessedCountBuffer.poll()) {
-      for(DateSpan ds : DateSpan.values()) {
-        CountBuffer cb = counts.get(ds);
-        cb.increment(c.date, c.delta);
-      }
-    }
-  }
-  
-  @Override
-  public CounterValue getCounterValue() {
-      // To be done later
-      //CountSeries cs = counts.get(DateSpan.REALTIME).snapshot();
-      //return cs.getSeries()[0];
-      return new CounterValue(CounterType.LONG);
-  }
-  
-  @Override
-  public Date getCounterDate() {
-      // To be done later
-      //CountSeries cs = counts.get(DateSpan.REALTIME).snapshot();
-      //return cs.getSeries()[0];
-      return new Date();
-  }
-  
-  @Override
-  /**
-   * This method returns a disconnected copy of the underlying CountSeries corresponding to dateSpan.
-   */
-  public CountSeries snapshot(DateSpan dateSpan) {
-    flush();
-    CountSeries cs = counts.get(dateSpan).snapshot();
-    return cs;
-  }
-
-  
-  
-}
diff --git a/src/main/java/net/floodlightcontroller/counter/CountBuffer.java b/src/main/java/net/floodlightcontroller/counter/CountBuffer.java
deleted file mode 100644
index fa45862..0000000
--- a/src/main/java/net/floodlightcontroller/counter/CountBuffer.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.counter;
-
-import java.util.Date;
-
-import net.floodlightcontroller.counter.ICounter.DateSpan;
-
-
-/**
- * Implements a circular buffer to store the last x time-based counter values.  This is pretty crumby
- * implementation, basically wrapping everything with synchronized blocks, in order to ensure that threads
- * which will be updating the series don't result in a thread which is reading the series getting stuck with
- * a start date which does not correspond to the count values in getSeries.
- * 
- * This could probably use a re-think...
- * 
- * @author kyle
- *
- */
-public class CountBuffer {
-  protected long[] counterValues;
-  protected Date startDate;
-  protected DateSpan dateSpan;
-  protected int currentIndex;
-  protected int seriesLength;
-
-
-  public CountBuffer(Date startDate, DateSpan dateSpan, int seriesLength) {
-    this.seriesLength = seriesLength;
-    this.counterValues = new long[seriesLength];
-    this.dateSpan = dateSpan;
-    
-    this.startDate = startDate;
-    this.currentIndex = 0;
-  }
-  
-  /**
-   * Increment the count associated with Date d, forgetting some of the older count values if necessary to ensure
-   * that the total span of time covered by this series corresponds to DateSpan * seriesLength (circular buffer).
-   * 
-   * Note - fails silently if the Date falls prior to the start of the tracked count values.
-   * 
-   * Note - this should be a reasonably fast method, though it will have to block if there is another thread reading the
-   * series at the same time.
-   * 
-   * @param d
-   * @param delta
-   */
-  public synchronized void increment(Date d, long delta) {
-
-    long dsMillis = CountSeries.dateSpanToMilliseconds(this.dateSpan);
-    Date endDate = new Date(startDate.getTime() + seriesLength * dsMillis - 1);
-
-    if(d.getTime() < startDate.getTime()) {
-      return; //silently fail rather than insert a count at a time older than the history buffer we're keeping
-    }
-    else if (d.getTime() >= startDate.getTime() && d.getTime() <= endDate.getTime()) {
-        int index = (int)  (( d.getTime() - startDate.getTime() ) / dsMillis); // java rounds down on long/long
-        int modIndex = (index + currentIndex) % seriesLength;
-        long currentValue = counterValues[modIndex];
-        counterValues[modIndex] = currentValue + delta;
-    }
-    else if (d.getTime() > endDate.getTime()) {
-      //Initialize new buckets
-      int newBuckets = (int)((d.getTime() - endDate.getTime()) / dsMillis) + 1; // java rounds down on long/long
-      for(int i = 0; i < newBuckets; i++) {
-        int modIndex = (i + currentIndex) % seriesLength;
-        counterValues[modIndex] = 0;
-      }
-      //Update internal vars
-      this.startDate = new Date(startDate.getTime() + dsMillis * newBuckets);
-      this.currentIndex = (currentIndex + newBuckets) % this.seriesLength;    
-
-      //Call again (date should be in the range this time)
-      this.increment(d, delta);
-    }
-  }
-  
-  /**
-   * Relatively slow method, expected to be called primarily from UI rather than from in-packet-path.
-   * 
-   * @return the count values associated with each time interval starting with startDate and demarc'ed by dateSpan
-   */
-  public long[] getSeries() { //synchronized here should lock on 'this', implying that it shares the lock with increment
-    long[] ret = new long[this.seriesLength];
-    for(int i = 0; i < this.seriesLength; i++) {
-      int modIndex = (currentIndex + i) % this.seriesLength;
-      ret[i] = this.counterValues[modIndex];
-    }
-    return ret;
-  }
-
-  
-  /**
-   * Returns an immutable count series that represents a snapshot of this
-   * series at a specific moment in time.
-   * @return
-   */
-  public synchronized CountSeries snapshot() {
-    long[] cvs = new long[this.seriesLength];
-    for(int i = 0; i < this.seriesLength; i++) {
-      int modIndex = (this.currentIndex + i) % this.seriesLength;
-      cvs[i] = this.counterValues[modIndex];
-    }
-
-    return new CountSeries(this.startDate, this.dateSpan, cvs);
-  }
-  
-}
diff --git a/src/main/java/net/floodlightcontroller/counter/CountSeries.java b/src/main/java/net/floodlightcontroller/counter/CountSeries.java
deleted file mode 100644
index e8a547a..0000000
--- a/src/main/java/net/floodlightcontroller/counter/CountSeries.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.counter;
-
-import java.util.Arrays;
-import java.util.Date;
-
-import net.floodlightcontroller.counter.ICounter.DateSpan;
-
-/**
- * Simple immutable class to store a series of historic counter values
- * 
- * This could probably use a re-think...
- * 
- * @author kyle
- *
- */
-public class CountSeries {  
-  protected long[] counterValues;
-  protected Date startDate;
-  protected DateSpan dateSpan;
-  
-  public CountSeries(Date startDate, DateSpan dateSpan, long[] counterValues) {
-    this.counterValues = counterValues.clone();
-    this.dateSpan = dateSpan;    
-    this.startDate = startDate;
-  }
-  
-
-  public long[] getSeries() { //synchronized here should lock on 'this', implying that it shares the lock with increment
-    return this.counterValues.clone();
-  }
-  
-  /**
-   * Returns the startDate of this series.  The first long in getSeries represents the sum of deltas from increment calls with dates
-   * that correspond to >= startDate and < startDate + DateSpan.
-   * @return
-   */
-  public Date getStartDate() {//synchronized here should lock on 'this', implying that it shares the lock with increment
-    return this.startDate;
-  }
-  
-  public String toString() {
-    String ret = "{start: " + this.startDate + ", span: " + this.dateSpan + ", series: " + Arrays.toString(getSeries()) + "}";
-    return ret;
-  }
-  
-  /**
-   * Return a long that is the number of milliseconds in a ds (second/minute/hour/day/week).  (Utility method.)
-   * 
-   * @param ds
-   * @return
-   */
-  public static final long dateSpanToMilliseconds(DateSpan ds) {
-    long delta = 1;
-    switch(ds) {
-	    case WEEKS:
-	    	delta *= 7;
-	    case DAYS:
-	    	delta *= 24;
-	    case HOURS:
-	    	delta *= 60;
-	    case MINUTES:
-	    	delta *= 60;
-	    case SECONDS:
-	    	delta *= 1000;
-	    default:
-	    	break;
-    }
-    return delta;
-  }
-
-}
diff --git a/src/main/java/net/floodlightcontroller/counter/CounterStore.java b/src/main/java/net/floodlightcontroller/counter/CounterStore.java
deleted file mode 100644
index 15ce53b..0000000
--- a/src/main/java/net/floodlightcontroller/counter/CounterStore.java
+++ /dev/null
@@ -1,461 +0,0 @@
-/**
- *    Copyright 2011, Big Switch Networks, Inc. 
- *    Originally created by David Erickson, Stanford University
- * 
- *    Licensed under the Apache License, Version 2.0 (the "License"); you may
- *    not use this file except in compliance with the License. You may obtain
- *    a copy of the License at
- *
- *         http://www.apache.org/licenses/LICENSE-2.0
- *
- *    Unless required by applicable law or agreed to in writing, software
- *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- *    License for the specific language governing permissions and limitations
- *    under the License.
- **/
-
-/**
- * Implements a very simple central store for system counters
- */
-package net.floodlightcontroller.counter;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-import javax.annotation.PostConstruct;
-
-import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.core.module.FloodlightModuleContext;
-import net.floodlightcontroller.core.module.FloodlightModuleException;
-import net.floodlightcontroller.core.module.IFloodlightModule;
-import net.floodlightcontroller.core.module.IFloodlightService;
-import net.floodlightcontroller.counter.CounterValue.CounterType;
-import net.floodlightcontroller.packet.Ethernet;
-import net.floodlightcontroller.packet.IPv4;
-
-import org.openflow.protocol.OFMessage;
-import org.openflow.protocol.OFPacketIn;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-/**
- * @author kyle
- *
- */
-public class CounterStore implements IFloodlightModule, ICounterStoreService {
-    protected final static Logger log = LoggerFactory.getLogger(CounterStore.class);
-
-    public enum NetworkLayer {
-        L2, L3, L4
-    }
-
-    protected class CounterEntry {
-        protected ICounter counter;
-        String title;
-    }
-
-    /**
-     * A map of counterName --> Counter
-     */
-    protected ConcurrentHashMap<String, CounterEntry> nameToCEIndex = 
-            new ConcurrentHashMap<String, CounterEntry>();
-
-    protected ICounter heartbeatCounter;
-    protected ICounter randomCounter;
-
-    /**
-     * Counter Categories grouped by network layers
-     * NetworkLayer -> CounterToCategories
-     */
-    protected static Map<NetworkLayer, Map<String, List<String>>> layeredCategories = 
-            new ConcurrentHashMap<NetworkLayer, Map<String, List<String>>> ();
-
-    public void updatePacketInCounters(IOFSwitch sw, OFMessage m, Ethernet eth) {
-        OFPacketIn packet = (OFPacketIn)m;
-        
-        // Make sure there is data
-        if (packet.getPacketData().length <= 0) return;
-        
-        /* Extract the etherType and protocol field for IPv4 packet.
-         */
-        String etherType = String.format("%04x", eth.getEtherType());
-        
-        /*
-         * Valid EtherType must be greater than or equal to 0x0600
-         * It is V1 Ethernet Frame if EtherType < 0x0600
-         */
-        if (eth.getEtherType() < 0x0600) {
-            etherType = "0599";
-        }
-
-        if (TypeAliases.l3TypeAliasMap != null && 
-            TypeAliases.l3TypeAliasMap.containsKey(etherType)) {
-            etherType = TypeAliases.l3TypeAliasMap.get(etherType);
-        } else {
-            etherType = "L3_" + etherType;
-        }
-        String switchIdHex = sw.getStringId();
-   
-        String packetName = m.getType().toClass().getName();
-        packetName = packetName.substring(packetName.lastIndexOf('.')+1); 
-        
-        // Construct controller counter for the packet_in
-        String controllerCounterName =
-            CounterStore.createCounterName(CONTROLLER_NAME, 
-                                           -1,
-                                           packetName);
-    
-        String controllerL3CategoryCounterName = 
-            CounterStore.createCounterName(CONTROLLER_NAME, 
-                                           -1,
-                                           packetName, 
-                                           etherType, 
-                                           NetworkLayer.L3);
-
-        String l2Type = null;
-        if (eth.isBroadcast()) {
-        	l2Type = BROADCAST;
-        } else if (eth.isMulticast()) {
-        	l2Type = MULTICAST;
-        } else {
-        	l2Type = UNICAST;
-        }
-        
-        // Construct both port and switch L3 counter for the packet_in
-    	String controllerL2CategoryCounterName = CounterStore.createCounterName(CONTROLLER_NAME, 
-                -1,
-                packetName, 
-                l2Type, 
-                NetworkLayer.L2);
-    	String switchL2CategoryCounterName = CounterStore.createCounterName(switchIdHex, 
-                -1, 
-                packetName, 
-                l2Type, 
-                NetworkLayer.L2);
-    	String portL2CategoryCounterName = CounterStore.createCounterName(switchIdHex, 
-                packet.getInPort(),
-                packetName, 
-                l2Type, 
-                NetworkLayer.L2);
-        
-        // Construct both port and switch L3 counter for the packet_in
-        String portCounterName =
-                CounterStore.createCounterName(switchIdHex, 
-                                               packet.getInPort(),
-                                               packetName);
-        String switchCounterName =
-                CounterStore.createCounterName(switchIdHex, 
-                                               -1,
-                                               packetName);
-        
-        String portL3CategoryCounterName = 
-                CounterStore.createCounterName(switchIdHex, 
-                                               packet.getInPort(),
-                                               packetName, 
-                                               etherType, 
-                                               NetworkLayer.L3);
-        String switchL3CategoryCounterName =
-                CounterStore.createCounterName(switchIdHex, 
-                                               -1, 
-                                               packetName, 
-                                               etherType, 
-                                               NetworkLayer.L3);
-
-        // Controller counters
-        ICounter controllerCounter = getCounter(controllerCounterName);
-        if (controllerCounter == null) {
-            controllerCounter = createCounter(controllerCounterName, 
-                                              CounterType.LONG);
-        }
-        controllerCounter.increment();
-        ICounter portCounter = getCounter(portCounterName);
-        if (portCounter == null) {
-            portCounter = createCounter(portCounterName, 
-                                        CounterType.LONG);
-        }
-        portCounter.increment();
-        ICounter switchCounter = getCounter(switchCounterName);
-        if (switchCounter == null) {
-            switchCounter = createCounter(switchCounterName, 
-                                          CounterType.LONG);
-        }
-        switchCounter.increment();
-
-        // L2 counters
-        ICounter controllerL2Counter = getCounter(controllerL2CategoryCounterName);
-        if (controllerL2Counter == null) {
-            controllerL2Counter = createCounter(controllerL2CategoryCounterName,
-                                                CounterType.LONG);
-        }
-        controllerL2Counter.increment();
-        ICounter switchL2Counter = getCounter(switchL2CategoryCounterName);
-        if (switchL2Counter == null) {
-            switchL2Counter = createCounter(switchL2CategoryCounterName,
-                                            CounterType.LONG);
-        }
-        switchL2Counter.increment();
-        ICounter portL2Counter = getCounter(portL2CategoryCounterName);
-        if (portL2Counter == null) {
-            portL2Counter = createCounter(portL2CategoryCounterName,
-                                          CounterType.LONG);
-        }
-        portL2Counter.increment();
-
-        // L3 counters
-        ICounter controllerL3Counter = getCounter(controllerL3CategoryCounterName);
-        if (controllerL3Counter == null) {
-            controllerL3Counter = createCounter(controllerL3CategoryCounterName,
-                                                CounterType.LONG);
-        }
-        controllerL3Counter.increment();
-        ICounter portL3Counter = getCounter(portL3CategoryCounterName);
-        if (portL3Counter == null) {
-            portL3Counter = createCounter(portL3CategoryCounterName,
-                                          CounterType.LONG);
-        }
-        portL3Counter.increment();
-        ICounter switchL3Counter = getCounter(switchL3CategoryCounterName);
-        if (switchL3Counter == null) {
-            switchL3Counter = createCounter(switchL3CategoryCounterName,
-                                            CounterType.LONG);
-        }
-        switchL3Counter.increment();
-
-        // L4 counters
-        if (etherType.compareTo(CounterStore.L3ET_IPV4) == 0) {
-            IPv4 ipV4 = (IPv4)eth.getPayload();
-            String l4Type = String.format("%02x", ipV4.getProtocol());
-            if (TypeAliases.l4TypeAliasMap != null && 
-                    TypeAliases.l4TypeAliasMap.containsKey(l4Type)) {
-                l4Type = TypeAliases.l4TypeAliasMap.get(l4Type);
-            } else {
-                l4Type = "L4_" + l4Type;
-            }
-            String controllerL4CategoryCounterName = 
-                    CounterStore.createCounterName(CONTROLLER_NAME, 
-                                                   -1, 
-                                                   packetName, 
-                                                   l4Type, 
-                                                   NetworkLayer.L4);
-            String portL4CategoryCounterName =
-                    CounterStore.createCounterName(switchIdHex, 
-                                                   packet.getInPort(), 
-                                                   packetName, 
-                                                   l4Type, 
-                                                   NetworkLayer.L4);
-            String switchL4CategoryCounterName = 
-                    CounterStore.createCounterName(switchIdHex, 
-                                                   -1, 
-                                                   packetName, 
-                                                   l4Type, 
-                                                   NetworkLayer.L4);
-            ICounter controllerL4Counter = getCounter(controllerL4CategoryCounterName);
-            if (controllerL4Counter == null) {
-                controllerL4Counter = createCounter(controllerL4CategoryCounterName, 
-                                                    CounterType.LONG);
-            }
-            controllerL4Counter.increment();
-            ICounter portL4Counter = getCounter(portL4CategoryCounterName);
-            if (portL4Counter == null) {
-                portL4Counter = createCounter(portL4CategoryCounterName, 
-                                              CounterType.LONG);
-            }
-            portL4Counter.increment();
-            ICounter switchL4Counter = getCounter(switchL4CategoryCounterName);
-            if (switchL4Counter == null) {
-                switchL4Counter = createCounter(switchL4CategoryCounterName, 
-                                                CounterType.LONG);
-            }
-            switchL4Counter.increment();
-        }
-    }
-    
-    /**
-     * This method can only be used to update packetOut and flowmod counters
-     * 
-     * @param sw
-     * @param ofMsg
-     */
-    public void updatePktOutFMCounterStore(IOFSwitch sw, OFMessage ofMsg) {
-        String packetName = ofMsg.getType().toClass().getName();
-        packetName = packetName.substring(packetName.lastIndexOf('.')+1);
-        // flowmod is per switch and controller. portid = -1
-        String controllerFMCounterName = CounterStore.createCounterName(CONTROLLER_NAME, -1, packetName);  
-        ICounter counter = getCounter(controllerFMCounterName);
-        if (counter == null) {
-            counter = createCounter(controllerFMCounterName, CounterValue.CounterType.LONG);
-        }
-        counter.increment();
-
-        String switchFMCounterName = CounterStore.createCounterName(sw.getStringId(), -1, packetName);
-        counter = getCounter(switchFMCounterName);
-        if (counter == null) {
-            counter = createCounter(switchFMCounterName, CounterValue.CounterType.LONG);
-        }
-        counter.increment();
-    }
-
-
-    /**
-     * Create a title based on switch ID, portID, vlanID, and counterName
-     * If portID is -1, the title represents the given switch only
-     * If portID is a non-negative number, the title represents the port on the given switch
-     */
-    public static String createCounterName(String switchID, int portID, String counterName) {
-        if (portID < 0) {
-            return switchID + TitleDelimitor + counterName;
-        } else {
-            return switchID + TitleDelimitor + portID + TitleDelimitor + counterName;
-        }
-    }
-
-    /**
-     * Create a title based on switch ID, portID, vlanID, counterName, and subCategory
-     * If portID is -1, the title represents the given switch only
-     * If portID is a non-negative number, the title represents the port on the given switch
-     * For example: PacketIns can be further categorized based on L2 etherType or L3 protocol
-     */
-    public static String createCounterName(String switchID, int portID, String counterName,
-            String subCategory, NetworkLayer layer) {
-        String fullCounterName = "";
-        String groupCounterName = "";
-
-        if (portID < 0) {
-            groupCounterName = switchID + TitleDelimitor + counterName;
-            fullCounterName = groupCounterName + TitleDelimitor + subCategory;
-        } else {
-            groupCounterName = switchID + TitleDelimitor + portID + TitleDelimitor + counterName;
-            fullCounterName = groupCounterName + TitleDelimitor + subCategory;
-        }
-
-        Map<String, List<String>> counterToCategories;      
-        if (layeredCategories.containsKey(layer)) {
-            counterToCategories = layeredCategories.get(layer);
-        } else {
-            counterToCategories = new ConcurrentHashMap<String, List<String>> ();
-            layeredCategories.put(layer, counterToCategories);
-        }
-
-        List<String> categories;
-        if (counterToCategories.containsKey(groupCounterName)) {
-            categories = counterToCategories.get(groupCounterName);
-        } else {
-            categories = new ArrayList<String>();
-            counterToCategories.put(groupCounterName, categories);
-        }
-
-        if (!categories.contains(subCategory)) {
-            categories.add(subCategory);
-        }
-        return fullCounterName;
-    }
-
-    @Override
-    public List<String> getAllCategories(String counterName, NetworkLayer layer) {
-        if (layeredCategories.containsKey(layer)) {
-            Map<String, List<String>> counterToCategories = layeredCategories.get(layer);
-            if (counterToCategories.containsKey(counterName)) {
-                return counterToCategories.get(counterName);
-            }
-        }
-        return null;
-    }
-    
-    @Override
-    public ICounter createCounter(String key, CounterValue.CounterType type) {
-        CounterEntry ce;
-        ICounter c;
-
-        c = SimpleCounter.createCounter(new Date(), type);
-        ce = new CounterEntry();
-        ce.counter = c;
-        ce.title = key;
-        nameToCEIndex.putIfAbsent(key, ce);
-        
-        return nameToCEIndex.get(key).counter;
-    }
-
-    /**
-     * Post construction init method to kick off the health check and random (test) counter threads
-     */
-    @PostConstruct
-    public void startUp() {
-        this.heartbeatCounter = this.createCounter("CounterStore heartbeat", CounterValue.CounterType.LONG);
-        this.randomCounter = this.createCounter("CounterStore random", CounterValue.CounterType.LONG);
-        //Set a background thread to flush any liveCounters every 100 milliseconds
-        Executors.newScheduledThreadPool(1).scheduleAtFixedRate(new Runnable() {
-            public void run() {
-                heartbeatCounter.increment();
-                randomCounter.increment(new Date(), (long) (Math.random() * 100)); //TODO - pull this in to random timing
-            }}, 100, 100, TimeUnit.MILLISECONDS);
-    }
-    
-    @Override
-    public ICounter getCounter(String key) {
-        CounterEntry counter = nameToCEIndex.get(key);
-        if (counter != null) {
-            return counter.counter;
-        } else {
-            return null;
-        }
-    }
-
-    /* (non-Javadoc)
-     * @see net.floodlightcontroller.counter.ICounterStoreService#getAll()
-     */
-    @Override
-    public Map<String, ICounter> getAll() {
-        Map<String, ICounter> ret = new ConcurrentHashMap<String, ICounter>();
-        for(Map.Entry<String, CounterEntry> counterEntry : this.nameToCEIndex.entrySet()) {
-            String key = counterEntry.getKey();
-            ICounter counter = counterEntry.getValue().counter;
-            ret.put(key, counter);
-        }
-        return ret;
-    }
-
-    @Override
-    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
-        Collection<Class<? extends IFloodlightService>> services =
-                new ArrayList<Class<? extends IFloodlightService>>(1);
-        services.add(ICounterStoreService.class);
-        return services;
-    }
-
-    @Override
-    public Map<Class<? extends IFloodlightService>, IFloodlightService>
-            getServiceImpls() {
-        Map<Class<? extends IFloodlightService>,
-            IFloodlightService> m = 
-                new HashMap<Class<? extends IFloodlightService>,
-                    IFloodlightService>();
-        m.put(ICounterStoreService.class, this);
-        return m;
-    }
-
-    @Override
-    public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
-        // no-op, no dependencies
-        return null;
-    }
-
-    @Override
-    public void init(FloodlightModuleContext context)
-                                 throws FloodlightModuleException {
-        // no-op for now
-    }
-
-    @Override
-    public void startUp(FloodlightModuleContext context) {
-        // no-op for now
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/counter/CounterValue.java b/src/main/java/net/floodlightcontroller/counter/CounterValue.java
deleted file mode 100644
index 1852d5c..0000000
--- a/src/main/java/net/floodlightcontroller/counter/CounterValue.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.counter;
-
-/**
- * The class defines the counter value type and value
- * 
- * @author Kanzhe
- *
- */
-public class CounterValue { 
-  public enum CounterType {
-      LONG,
-      DOUBLE
-  }
-  
-  protected CounterType type; 
-  protected long longValue;
-  protected double doubleValue;
-  
-  public CounterValue(CounterType type) {
-    this.type = CounterType.LONG;
-    this.longValue = 0;    
-    this.doubleValue = 0.0;
-  }
-  
-  /**
-   * This method is only applicable to type long.
-   * Setter() should be used for type double
-   */
-  public void increment(long delta) {
-      if (this.type == CounterType.LONG) {
-          this.longValue += delta;
-      } else {
-          throw new IllegalArgumentException("Invalid counter type. This counter is not a long type.");
-      }
-  }
-  
-  public void setLongValue(long value) {
-      if (this.type == CounterType.LONG) {
-          this.longValue = value;
-      } else {
-          throw new IllegalArgumentException("Invalid counter type. This counter is not a long type.");
-      }
-  }
-  
-  public void setDoubleValue(double value) {
-      if (this.type == CounterType.DOUBLE) {
-          this.doubleValue = value;
-      } else {
-          throw new IllegalArgumentException("Invalid counter type. This counter is not a double type.");
-      }
-  }
-  
-  public long getLong() {
-      if (this.type == CounterType.LONG) {
-          return this.longValue;
-      } else {
-          throw new IllegalArgumentException("Invalid counter type. This counter is not a long type.");
-      }
-  }
-  
-  public double getDouble() {
-      if (this.type == CounterType.DOUBLE) {
-          return this.doubleValue;
-      } else {
-          throw new IllegalArgumentException("Invalid counter type. This counter is not a double type.");
-      }
-  }
-  
-
-  public CounterType getType() {
-    return this.type;
-  }
-  
-  public String toString() {
-    String ret = "{type: ";
-    if (this.type == CounterType.DOUBLE) {
-        ret += "Double" + ", value: " + this.doubleValue + "}";
-    } else {
-        ret += "Long" + ", value: " + this.longValue + "}";
-    }
-    return ret;
-  }
-
-
-}
diff --git a/src/main/java/net/floodlightcontroller/counter/ICounter.java b/src/main/java/net/floodlightcontroller/counter/ICounter.java
deleted file mode 100644
index 625bebd..0000000
--- a/src/main/java/net/floodlightcontroller/counter/ICounter.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-/**
- * Simple interface for a counter whose value can be retrieved in several different
- * time increments (last x seconds, minutes, hours, days)
- */
-package net.floodlightcontroller.counter;
-
-import java.util.Date;
-
-/**
- * @author kyle
- *
- */
-public interface ICounter {
-  
-  /**
-   * Most commonly used method
-   */
-  public void increment();
-  
-  /**
-   * Used primarily for testing - no performance guarantees
-   */
-  public void increment(Date d, long delta);
-  
-  /**
-   * Counter value setter
-   */
-  public void setCounter(Date d, CounterValue value);
-  
-  /**
-   * Return the most current value
-   */
-  public Date getCounterDate();
-  
-  /**
-   * Return the most current value
-   */
-  public CounterValue getCounterValue();
-  
-  /**
-   * Reset the value
-   */
-  public void reset(Date d);
-  
-  /**
-   * Returns a CountSeries that is a snapshot of the counter's values for the given dateSpan.  (Further changes
-   * to this counter won't be reflected in the CountSeries that comes  back.)
-   * 
-   * @param dateSpan
-   * @return
-   */
-  public CountSeries snapshot(DateSpan dateSpan);
-  
-
-  public static enum DateSpan {
-    REALTIME,
-    SECONDS,
-    MINUTES,
-    HOURS,
-    DAYS,
-    WEEKS
-  }
-}
diff --git a/src/main/java/net/floodlightcontroller/counter/ICounterStoreService.java b/src/main/java/net/floodlightcontroller/counter/ICounterStoreService.java
deleted file mode 100644
index c89eee0..0000000
--- a/src/main/java/net/floodlightcontroller/counter/ICounterStoreService.java
+++ /dev/null
@@ -1,71 +0,0 @@
-package net.floodlightcontroller.counter;
-
-import java.util.List;
-import java.util.Map;
-
-import org.openflow.protocol.OFMessage;
-
-import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.core.module.IFloodlightService;
-import net.floodlightcontroller.counter.CounterStore.NetworkLayer;
-import net.floodlightcontroller.packet.Ethernet;
-
-public interface ICounterStoreService extends IFloodlightService {
-
-	public final static String CONTROLLER_NAME = "controller";
-    public final static String TitleDelimitor = "__";
-
-    /** Broadcast and multicast */
-    public final static String BROADCAST = "broadcast";
-    public final static String MULTICAST = "multicast";
-    public final static String UNICAST = "unicast";
-    
-    /** L2 EtherType subCategories */
-    public final static String L3ET_IPV4 = "L3_IPv4";
-
-    /**
-     * Update packetIn counters
-     * 
-     * @param sw
-     * @param m
-     * @param eth
-     */
-    public void updatePacketInCounters(IOFSwitch sw, OFMessage m, Ethernet eth);
-    
-    /**
-     * This method can only be used to update packetOut and flowmod counters
-     * 
-     * @param sw
-     * @param ofMsg
-     */
-    public void updatePktOutFMCounterStore(IOFSwitch sw, OFMessage ofMsg);
-    
-    /**
-     * Retrieve a list of subCategories by counterName.
-     * null if nothing.
-     */
-    public List<String> getAllCategories(String counterName,
-                                         NetworkLayer layer);
-
-    /**
-     * Create a new ICounter and set the title.  Note that the title must be 
-     * unique, otherwise this will throw an IllegalArgumentException.
-     * 
-     * @param key
-     * @param type
-     * @return
-     */
-    public ICounter createCounter(String key, CounterValue.CounterType type);
-
-    /**
-     * Retrieves a counter with the given title, or null if none can be found.
-     */
-    public ICounter getCounter(String key);
-
-    /**
-     * Returns an immutable map of title:counter with all of the counters in the store.
-     * 
-     * (Note - this method may be slow - primarily for debugging/UI)
-     */
-    public Map<String, ICounter> getAll();
-}
diff --git a/src/main/java/net/floodlightcontroller/counter/NullCounterStore.java b/src/main/java/net/floodlightcontroller/counter/NullCounterStore.java
deleted file mode 100644
index fed8c1e..0000000
--- a/src/main/java/net/floodlightcontroller/counter/NullCounterStore.java
+++ /dev/null
@@ -1,104 +0,0 @@
-package net.floodlightcontroller.counter;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.openflow.protocol.OFMessage;
-
-import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.core.module.FloodlightModuleContext;
-import net.floodlightcontroller.core.module.FloodlightModuleException;
-import net.floodlightcontroller.core.module.IFloodlightModule;
-import net.floodlightcontroller.core.module.IFloodlightService;
-import net.floodlightcontroller.counter.CounterStore.NetworkLayer;
-import net.floodlightcontroller.counter.CounterValue.CounterType;
-import net.floodlightcontroller.packet.Ethernet;
-
-/**
- * An ICounsterStoreService implementation that does nothing.
- * This is used mainly for performance testing or if you don't
- * want to use the counterstore.
- * @author alexreimers
- *
- */
-public class NullCounterStore implements IFloodlightModule,
-        ICounterStoreService {
-
-    private ICounter emptyCounter;
-    private List<String> emptyList;
-    private Map<String, ICounter> emptyMap;
-    
-    @Override
-    public void updatePacketInCounters(IOFSwitch sw, OFMessage m, Ethernet eth) {
-        // no-op
-    }
-
-    @Override
-    public void updatePktOutFMCounterStore(IOFSwitch sw, OFMessage ofMsg) {
-        // no-op
-    }
-
-    @Override
-    public List<String>
-            getAllCategories(String counterName, NetworkLayer layer) {
-        return emptyList;
-    }
-
-    @Override
-    public ICounter createCounter(String key, CounterType type) {
-        return emptyCounter;
-    }
-
-    @Override
-    public ICounter getCounter(String key) {
-        return emptyCounter;
-    }
-
-    @Override
-    public Map<String, ICounter> getAll() {
-        return emptyMap;
-    }
-
-    @Override
-    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
-        Collection<Class<? extends IFloodlightService>> services =
-                new ArrayList<Class<? extends IFloodlightService>>(1);
-        services.add(ICounterStoreService.class);
-        return services;
-    }
-
-    @Override
-    public Map<Class<? extends IFloodlightService>, IFloodlightService>
-            getServiceImpls() {
-        Map<Class<? extends IFloodlightService>,
-            IFloodlightService> m = 
-                new HashMap<Class<? extends IFloodlightService>,
-                        IFloodlightService>();
-        m.put(ICounterStoreService.class, this);
-        return m;
-    }
-
-    @Override
-    public Collection<Class<? extends IFloodlightService>>
-            getModuleDependencies() {
-        // None, return null
-        return null;
-    }
-
-    @Override
-    public void init(FloodlightModuleContext context)
-                             throws FloodlightModuleException {
-        emptyCounter = new SimpleCounter(new Date(), CounterType.LONG);
-        emptyList = new ArrayList<String>();
-        emptyMap = new HashMap<String, ICounter>();
-    }
-
-    @Override
-    public void startUp(FloodlightModuleContext context) {
-        // no-op
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/counter/SimpleCounter.java b/src/main/java/net/floodlightcontroller/counter/SimpleCounter.java
deleted file mode 100644
index 01a0428..0000000
--- a/src/main/java/net/floodlightcontroller/counter/SimpleCounter.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-/**
- * 
- */
-package net.floodlightcontroller.counter;
-
-import java.util.Date;
-
-
-
-/**
- * This is a simple counter implementation that doesn't support data series.
- * The idea is that floodlight only keeps the realtime value for each counter,
- * statd, a statistics collection daemon, samples counters at a user-defined interval
- * and pushes the values to a database, which keeps time-based data series. 
- * @author Kanzhe
- *
- */
-public class SimpleCounter implements ICounter {
-
-  protected CounterValue counter;
-  protected Date samplingTime;
-  protected Date startDate;
-  
-  /**
-   * Factory method to create a new counter instance.  
-   * 
-   * @param startDate
-   * @return
-   */
-  public static ICounter createCounter(Date startDate, CounterValue.CounterType type) {
-    SimpleCounter cc = new SimpleCounter(startDate, type);
-    return cc;
-  }
-  
-  /**
-   * Factory method to create a copy of a counter instance.  
-   * 
-   * @param startDate
-   * @return
-   */
-  public static ICounter createCounter(ICounter copy) {
-    if (copy == null ||
-        copy.getCounterDate() == null ||
-        copy.getCounterValue() == null) {
-        return null;
-    }
-
-     SimpleCounter cc = new SimpleCounter(copy.getCounterDate(),
-            copy.getCounterValue().getType());
-     cc.setCounter(copy.getCounterDate(), copy.getCounterValue());
-     return cc;
-  }
-  
-  /**
-   * Protected constructor - use createCounter factory method instead
-   * @param startDate
-   */
-  protected SimpleCounter(Date startDate, CounterValue.CounterType type) {
-    init(startDate, type);
-  }
-  
-  protected void init(Date startDate, CounterValue.CounterType type) {
-    this.startDate = startDate;
-    this.samplingTime = new Date();
-    this.counter = new CounterValue(type);
-  }
-  
-  /**
-   * This is the key method that has to be both fast and very thread-safe.
-   */
-  @Override
-  synchronized public void increment() {
-    this.increment(new Date(), (long)1);
-  }
-  
-  @Override
-  synchronized public void increment(Date d, long delta) {
-    this.samplingTime = d;
-    this.counter.increment(delta);
-  }
-  
-  synchronized public void setCounter(Date d, CounterValue value) {
-      this.samplingTime = d;
-      this.counter = value;
-  }
-  
-  /**
-   * This is the method to retrieve the current value.
-   */
-  @Override
-  synchronized public CounterValue getCounterValue() {
-    return this.counter;
-  }
-
-  /**
-   * This is the method to retrieve the last sampling time.
-   */
-  @Override
-  synchronized public Date getCounterDate() {
-    return this.samplingTime;
-  }
-  
-  /**
-   * Reset value.
-   */
-  @Override
-  synchronized public void reset(Date startDate) {
-    init(startDate, this.counter.getType());
-  }
-  
-  @Override
-  /**
-   * This method only returns the real-time value.
-   */
-  synchronized public CountSeries snapshot(DateSpan dateSpan) {
-    long[] values = new long[1];
-    values[0] = this.counter.getLong();
-    return new CountSeries(this.samplingTime, DateSpan.DAYS, values);
-  }
-}
diff --git a/src/main/java/net/floodlightcontroller/counter/TypeAliases.java b/src/main/java/net/floodlightcontroller/counter/TypeAliases.java
deleted file mode 100644
index 0d7e2b5..0000000
--- a/src/main/java/net/floodlightcontroller/counter/TypeAliases.java
+++ /dev/null
@@ -1,190 +0,0 @@
-package net.floodlightcontroller.counter;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Class to contain some statically initialized data
- * @author readams
- *
- */
-public class TypeAliases {
-    protected static final Map<String,String> l3TypeAliasMap = 
-            new HashMap<String, String>();
-    static {
-        l3TypeAliasMap.put("0599", "L3_V1Ether");
-        l3TypeAliasMap.put("0800", "L3_IPv4");
-        l3TypeAliasMap.put("0806", "L3_ARP");
-        l3TypeAliasMap.put("8035", "L3_RARP");
-        l3TypeAliasMap.put("809b", "L3_AppleTalk");
-        l3TypeAliasMap.put("80f3", "L3_AARP");
-        l3TypeAliasMap.put("8100", "L3_802_1Q");
-        l3TypeAliasMap.put("8137", "L3_Novell_IPX");
-        l3TypeAliasMap.put("8138", "L3_Novell");
-        l3TypeAliasMap.put("86dd", "L3_IPv6");
-        l3TypeAliasMap.put("8847", "L3_MPLS_uni");
-        l3TypeAliasMap.put("8848", "L3_MPLS_multi");
-        l3TypeAliasMap.put("8863", "L3_PPPoE_DS");
-        l3TypeAliasMap.put("8864", "L3_PPPoE_SS");
-        l3TypeAliasMap.put("886f", "L3_MSFT_NLB");
-        l3TypeAliasMap.put("8870", "L3_Jumbo");
-        l3TypeAliasMap.put("889a", "L3_HyperSCSI");
-        l3TypeAliasMap.put("88a2", "L3_ATA_Ethernet");
-        l3TypeAliasMap.put("88a4", "L3_EtherCAT");
-        l3TypeAliasMap.put("88a8", "L3_802_1ad");
-        l3TypeAliasMap.put("88ab", "L3_Ether_Powerlink");
-        l3TypeAliasMap.put("88cc", "L3_LLDP");
-        l3TypeAliasMap.put("88cd", "L3_SERCOS_III");
-        l3TypeAliasMap.put("88e5", "L3_802_1ae");
-        l3TypeAliasMap.put("88f7", "L3_IEEE_1588");
-        l3TypeAliasMap.put("8902", "L3_802_1ag_CFM");
-        l3TypeAliasMap.put("8906", "L3_FCoE");
-        l3TypeAliasMap.put("9000", "L3_Loop");
-        l3TypeAliasMap.put("9100", "L3_Q_in_Q");
-        l3TypeAliasMap.put("cafe", "L3_LLT");
-    }
-    
-    protected static final Map<String,String> l4TypeAliasMap = 
-            new HashMap<String, String>();
-    static {
-        l4TypeAliasMap.put("00", "L4_HOPOPT");
-        l4TypeAliasMap.put("01", "L4_ICMP");
-        l4TypeAliasMap.put("02", "L4_IGAP_IGMP_RGMP");
-        l4TypeAliasMap.put("03", "L4_GGP");
-        l4TypeAliasMap.put("04", "L4_IP");
-        l4TypeAliasMap.put("05", "L4_ST");
-        l4TypeAliasMap.put("06", "L4_TCP");
-        l4TypeAliasMap.put("07", "L4_UCL");
-        l4TypeAliasMap.put("08", "L4_EGP");
-        l4TypeAliasMap.put("09", "L4_IGRP");
-        l4TypeAliasMap.put("0a", "L4_BBN");
-        l4TypeAliasMap.put("0b", "L4_NVP");
-        l4TypeAliasMap.put("0c", "L4_PUP");
-        l4TypeAliasMap.put("0d", "L4_ARGUS");
-        l4TypeAliasMap.put("0e", "L4_EMCON");
-        l4TypeAliasMap.put("0f", "L4_XNET");
-        l4TypeAliasMap.put("10", "L4_Chaos");
-        l4TypeAliasMap.put("11", "L4_UDP");
-        l4TypeAliasMap.put("12", "L4_TMux");
-        l4TypeAliasMap.put("13", "L4_DCN");
-        l4TypeAliasMap.put("14", "L4_HMP");
-        l4TypeAliasMap.put("15", "L4_Packet_Radio");
-        l4TypeAliasMap.put("16", "L4_XEROX_NS_IDP");
-        l4TypeAliasMap.put("17", "L4_Trunk_1");
-        l4TypeAliasMap.put("18", "L4_Trunk_2");
-        l4TypeAliasMap.put("19", "L4_Leaf_1");
-        l4TypeAliasMap.put("1a", "L4_Leaf_2");
-        l4TypeAliasMap.put("1b", "L4_RDP");
-        l4TypeAliasMap.put("1c", "L4_IRTP");
-        l4TypeAliasMap.put("1d", "L4_ISO_TP4");
-        l4TypeAliasMap.put("1e", "L4_NETBLT");
-        l4TypeAliasMap.put("1f", "L4_MFE");
-        l4TypeAliasMap.put("20", "L4_MERIT");
-        l4TypeAliasMap.put("21", "L4_DCCP");
-        l4TypeAliasMap.put("22", "L4_Third_Party_Connect");
-        l4TypeAliasMap.put("23", "L4_IDPR");
-        l4TypeAliasMap.put("24", "L4_XTP");
-        l4TypeAliasMap.put("25", "L4_Datagram_Delivery");
-        l4TypeAliasMap.put("26", "L4_IDPR");
-        l4TypeAliasMap.put("27", "L4_TP");
-        l4TypeAliasMap.put("28", "L4_ILTP");
-        l4TypeAliasMap.put("29", "L4_IPv6_over_IPv4");
-        l4TypeAliasMap.put("2a", "L4_SDRP");
-        l4TypeAliasMap.put("2b", "L4_IPv6_RH");
-        l4TypeAliasMap.put("2c", "L4_IPv6_FH");
-        l4TypeAliasMap.put("2d", "L4_IDRP");
-        l4TypeAliasMap.put("2e", "L4_RSVP");
-        l4TypeAliasMap.put("2f", "L4_GRE");
-        l4TypeAliasMap.put("30", "L4_DSR");
-        l4TypeAliasMap.put("31", "L4_BNA");
-        l4TypeAliasMap.put("32", "L4_ESP");
-        l4TypeAliasMap.put("33", "L4_AH");
-        l4TypeAliasMap.put("34", "L4_I_NLSP");
-        l4TypeAliasMap.put("35", "L4_SWIPE");
-        l4TypeAliasMap.put("36", "L4_NARP");
-        l4TypeAliasMap.put("37", "L4_Minimal_Encapsulation");
-        l4TypeAliasMap.put("38", "L4_TLSP");
-        l4TypeAliasMap.put("39", "L4_SKIP");
-        l4TypeAliasMap.put("3a", "L4_ICMPv6");
-        l4TypeAliasMap.put("3b", "L4_IPv6_No_Next_Header");
-        l4TypeAliasMap.put("3c", "L4_IPv6_Destination_Options");
-        l4TypeAliasMap.put("3d", "L4_Any_host_IP");
-        l4TypeAliasMap.put("3e", "L4_CFTP");
-        l4TypeAliasMap.put("3f", "L4_Any_local");
-        l4TypeAliasMap.put("40", "L4_SATNET");
-        l4TypeAliasMap.put("41", "L4_Kryptolan");
-        l4TypeAliasMap.put("42", "L4_MIT_RVDP");
-        l4TypeAliasMap.put("43", "L4_Internet_Pluribus");
-        l4TypeAliasMap.put("44", "L4_Distributed_FS");
-        l4TypeAliasMap.put("45", "L4_SATNET");
-        l4TypeAliasMap.put("46", "L4_VISA");
-        l4TypeAliasMap.put("47", "L4_IP_Core");
-        l4TypeAliasMap.put("4a", "L4_Wang_Span");
-        l4TypeAliasMap.put("4b", "L4_Packet_Video");
-        l4TypeAliasMap.put("4c", "L4_Backroom_SATNET");
-        l4TypeAliasMap.put("4d", "L4_SUN_ND");
-        l4TypeAliasMap.put("4e", "L4_WIDEBAND_Monitoring");
-        l4TypeAliasMap.put("4f", "L4_WIDEBAND_EXPAK");
-        l4TypeAliasMap.put("50", "L4_ISO_IP");
-        l4TypeAliasMap.put("51", "L4_VMTP");
-        l4TypeAliasMap.put("52", "L4_SECURE_VMTP");
-        l4TypeAliasMap.put("53", "L4_VINES");
-        l4TypeAliasMap.put("54", "L4_TTP");
-        l4TypeAliasMap.put("55", "L4_NSFNET_IGP");
-        l4TypeAliasMap.put("56", "L4_Dissimilar_GP");
-        l4TypeAliasMap.put("57", "L4_TCF");
-        l4TypeAliasMap.put("58", "L4_EIGRP");
-        l4TypeAliasMap.put("59", "L4_OSPF");
-        l4TypeAliasMap.put("5a", "L4_Sprite_RPC");
-        l4TypeAliasMap.put("5b", "L4_Locus_ARP");
-        l4TypeAliasMap.put("5c", "L4_MTP");
-        l4TypeAliasMap.put("5d", "L4_AX");
-        l4TypeAliasMap.put("5e", "L4_IP_within_IP");
-        l4TypeAliasMap.put("5f", "L4_Mobile_ICP");
-        l4TypeAliasMap.put("61", "L4_EtherIP");
-        l4TypeAliasMap.put("62", "L4_Encapsulation_Header");
-        l4TypeAliasMap.put("64", "L4_GMTP");
-        l4TypeAliasMap.put("65", "L4_IFMP");
-        l4TypeAliasMap.put("66", "L4_PNNI");
-        l4TypeAliasMap.put("67", "L4_PIM");
-        l4TypeAliasMap.put("68", "L4_ARIS");
-        l4TypeAliasMap.put("69", "L4_SCPS");
-        l4TypeAliasMap.put("6a", "L4_QNX");
-        l4TypeAliasMap.put("6b", "L4_Active_Networks");
-        l4TypeAliasMap.put("6c", "L4_IPPCP");
-        l4TypeAliasMap.put("6d", "L4_SNP");
-        l4TypeAliasMap.put("6e", "L4_Compaq_Peer_Protocol");
-        l4TypeAliasMap.put("6f", "L4_IPX_in_IP");
-        l4TypeAliasMap.put("70", "L4_VRRP");
-        l4TypeAliasMap.put("71", "L4_PGM");
-        l4TypeAliasMap.put("72", "L4_0_hop");
-        l4TypeAliasMap.put("73", "L4_L2TP");
-        l4TypeAliasMap.put("74", "L4_DDX");
-        l4TypeAliasMap.put("75", "L4_IATP");
-        l4TypeAliasMap.put("76", "L4_ST");
-        l4TypeAliasMap.put("77", "L4_SRP");
-        l4TypeAliasMap.put("78", "L4_UTI");
-        l4TypeAliasMap.put("79", "L4_SMP");
-        l4TypeAliasMap.put("7a", "L4_SM");
-        l4TypeAliasMap.put("7b", "L4_PTP");
-        l4TypeAliasMap.put("7c", "L4_ISIS");
-        l4TypeAliasMap.put("7d", "L4_FIRE");
-        l4TypeAliasMap.put("7e", "L4_CRTP");
-        l4TypeAliasMap.put("7f", "L4_CRUDP");
-        l4TypeAliasMap.put("80", "L4_SSCOPMCE");
-        l4TypeAliasMap.put("81", "L4_IPLT");
-        l4TypeAliasMap.put("82", "L4_SPS");
-        l4TypeAliasMap.put("83", "L4_PIPE");
-        l4TypeAliasMap.put("84", "L4_SCTP");
-        l4TypeAliasMap.put("85", "L4_Fibre_Channel");
-        l4TypeAliasMap.put("86", "L4_RSVP_E2E_IGNORE");
-        l4TypeAliasMap.put("87", "L4_Mobility_Header");
-        l4TypeAliasMap.put("88", "L4_UDP_Lite");
-        l4TypeAliasMap.put("89", "L4_MPLS");
-        l4TypeAliasMap.put("8a", "L4_MANET");
-        l4TypeAliasMap.put("8b", "L4_HIP");
-        l4TypeAliasMap.put("8c", "L4_Shim6");
-        l4TypeAliasMap.put("8d", "L4_WESP");
-        l4TypeAliasMap.put("8e", "L4_ROHC");
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/devicemanager/SwitchPort.java b/src/main/java/net/floodlightcontroller/devicemanager/SwitchPort.java
index 725d699..7426163 100644
--- a/src/main/java/net/floodlightcontroller/devicemanager/SwitchPort.java
+++ b/src/main/java/net/floodlightcontroller/devicemanager/SwitchPort.java
@@ -20,7 +20,7 @@
 import net.floodlightcontroller.core.web.serializers.DPIDSerializer;
 
 import org.codehaus.jackson.map.annotate.JsonSerialize;
-import org.codehaus.jackson.map.ser.std.ToStringSerializer;
+import org.codehaus.jackson.map.ser.ToStringSerializer;
 
 /**
  * A simple switch DPID/port pair
diff --git a/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImpl.java b/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImpl.java
index 087756c..9956a29 100755
--- a/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImpl.java
+++ b/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImpl.java
@@ -29,7 +29,6 @@
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
-import java.util.ListIterator;
 import java.util.Map;
 import java.util.Queue;
 import java.util.Set;
@@ -39,9 +38,6 @@
 
 import net.floodlightcontroller.core.FloodlightContext;
 import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.IFloodlightProviderService.Role;
-import net.floodlightcontroller.core.IHAListener;
-import net.floodlightcontroller.core.IInfoProvider;
 import net.floodlightcontroller.core.IOFMessageListener;
 import net.floodlightcontroller.core.IOFSwitch;
 import net.floodlightcontroller.core.IUpdate;
@@ -57,16 +53,12 @@
 import net.floodlightcontroller.devicemanager.IEntityClassifierService;
 import net.floodlightcontroller.devicemanager.SwitchPort;
 import net.floodlightcontroller.devicemanager.web.DeviceRoutable;
-import net.floodlightcontroller.flowcache.IFlowReconcileListener;
-import net.floodlightcontroller.flowcache.IFlowReconcileService;
-import net.floodlightcontroller.flowcache.OFMatchReconcile;
 import net.floodlightcontroller.packet.ARP;
 import net.floodlightcontroller.packet.DHCP;
 import net.floodlightcontroller.packet.Ethernet;
 import net.floodlightcontroller.packet.IPv4;
 import net.floodlightcontroller.packet.UDP;
 import net.floodlightcontroller.restserver.IRestApiService;
-import net.floodlightcontroller.storage.IStorageSourceService;
 import net.floodlightcontroller.threadpool.IThreadPoolService;
 import net.floodlightcontroller.topology.ITopologyListener;
 import net.floodlightcontroller.topology.ITopologyService;
@@ -88,17 +80,15 @@
  */
 public class DeviceManagerImpl implements
 IDeviceService, IOFMessageListener, ITopologyListener,
-IFloodlightModule, IEntityClassListener,
-IFlowReconcileListener, IInfoProvider, IHAListener {
+IFloodlightModule, IEntityClassListener {
     protected final static Logger logger =
             LoggerFactory.getLogger(DeviceManagerImpl.class);
 
     protected IFloodlightProviderService floodlightProvider;
     protected ITopologyService topology;
-    protected IStorageSourceService storageSource;
     protected IRestApiService restApi;
     protected IThreadPoolService threadPool;
-    protected IFlowReconcileService flowReconcileMgr;
+
 
     /**
      * Time in milliseconds before entities will expire
@@ -554,20 +544,6 @@
         deviceListeners.add(listener);
     }
 
-    // *************
-    // IInfoProvider
-    // *************
-
-    @Override
-    public Map<String, Object> getInfo(String type) {
-        if (!"summary".equals(type))
-            return null;
-
-        Map<String, Object> info = new HashMap<String, Object>();
-        info.put("# hosts", deviceMap.size());
-        return info;
-    }
-
     // ******************
     // IOFMessageListener
     // ******************
@@ -601,62 +577,7 @@
         return Command.CONTINUE;
     }
 
-    // ***************
-    // IFlowReconcileListener
-    // ***************
-    @Override
-    public Command reconcileFlows(ArrayList<OFMatchReconcile> ofmRcList) {
-        ListIterator<OFMatchReconcile> iter = ofmRcList.listIterator();
-        while (iter.hasNext()) {
-            OFMatchReconcile ofm = iter.next();
-            
-            // Remove the STOPPed flow.
-            if (Command.STOP == reconcileFlow(ofm)) {
-                iter.remove();
-            }
-        }
-        
-        if (ofmRcList.size() > 0) {
-            return Command.CONTINUE;
-        } else {
-            return Command.STOP;
-        }
-    }
-
-    protected Command reconcileFlow(OFMatchReconcile ofm) {
-        // Extract source entity information
-        Entity srcEntity =
-                getEntityFromFlowMod(ofm.ofmWithSwDpid, true);
-        if (srcEntity == null)
-            return Command.STOP;
-
-        // Find the device by source entity
-        Device srcDevice = findDeviceByEntity(srcEntity);
-        if (srcDevice == null)
-            return Command.STOP;
-
-        // Store the source device in the context
-        fcStore.put(ofm.cntx, CONTEXT_SRC_DEVICE, srcDevice);
-
-        // Find the device matching the destination from the entity
-        // classes of the source.
-        Entity dstEntity = getEntityFromFlowMod(ofm.ofmWithSwDpid, false);
-        Device dstDevice = null;
-        if (dstEntity != null) {
-            dstDevice = findDestByEntity(srcDevice, dstEntity);
-            if (dstDevice != null)
-                fcStore.put(ofm.cntx, CONTEXT_DST_DEVICE, dstDevice);
-        }
-        if (logger.isTraceEnabled()) {
-            logger.trace("Reconciling flow: match={}, srcEntity={}, srcDev={}, " 
-                         + "dstEntity={}, dstDev={}",
-                         new Object[] {ofm.ofmWithSwDpid.getOfMatch(),
-                                       srcEntity, srcDevice, 
-                                       dstEntity, dstDevice } );
-        }
-        return Command.CONTINUE;
-    }
-
+ 
     // *****************
     // IFloodlightModule
     // *****************
@@ -686,11 +607,9 @@
         Collection<Class<? extends IFloodlightService>> l =
                 new ArrayList<Class<? extends IFloodlightService>>();
         l.add(IFloodlightProviderService.class);
-        l.add(IStorageSourceService.class);
         l.add(ITopologyService.class);
         l.add(IRestApiService.class);
         l.add(IThreadPoolService.class);
-        l.add(IFlowReconcileService.class);
         l.add(IEntityClassifierService.class);
         return l;
     }
@@ -707,13 +626,10 @@
 
         this.floodlightProvider =
                 fmc.getServiceImpl(IFloodlightProviderService.class);
-        this.storageSource =
-                fmc.getServiceImpl(IStorageSourceService.class);
         this.topology =
                 fmc.getServiceImpl(ITopologyService.class);
         this.restApi = fmc.getServiceImpl(IRestApiService.class);
         this.threadPool = fmc.getServiceImpl(IThreadPoolService.class);
-        this.flowReconcileMgr = fmc.getServiceImpl(IFlowReconcileService.class);
         this.entityClassifier = fmc.getServiceImpl(IEntityClassifierService.class);
     }
 
@@ -728,10 +644,8 @@
         apComparator = new AttachmentPointComparator();
 
         floodlightProvider.addOFMessageListener(OFType.PACKET_IN, this);
-        floodlightProvider.addHAListener(this);
         if (topology != null)
             topology.addListener(this);
-        flowReconcileMgr.addFlowReconcileListener(this);
         entityClassifier.addListener(this);
 
         Runnable ecr = new Runnable() {
@@ -754,30 +668,6 @@
         }
     }
 
-    // ***************
-    // IHAListener
-    // ***************
-
-    @Override
-    public void roleChanged(Role oldRole, Role newRole) {
-        switch(newRole) {
-            case SLAVE:
-                logger.debug("Resetting device state because of role change");
-                startUp(null);
-                break;
-            default:
-                break;
-        }
-    }
-
-    @Override
-    public void controllerNodeIPsChanged(
-                                         Map<String, String> curControllerNodeIPs,
-                                         Map<String, String> addedControllerNodeIPs,
-                                         Map<String, String> removedControllerNodeIPs) {
-        // no-op
-    }
-
     // ****************
     // Internal methods
     // ****************
diff --git a/src/main/java/net/floodlightcontroller/flowcache/FCQueryObj.java b/src/main/java/net/floodlightcontroller/flowcache/FCQueryObj.java
deleted file mode 100644
index cce3401..0000000
--- a/src/main/java/net/floodlightcontroller/flowcache/FCQueryObj.java
+++ /dev/null
@@ -1,117 +0,0 @@
-package net.floodlightcontroller.flowcache;
-
-import java.util.Arrays;
-
-import net.floodlightcontroller.devicemanager.IDevice;
-import net.floodlightcontroller.flowcache.IFlowCacheService.FCQueryEvType;
-
-
-/**
- * The Class FCQueryObj.
- */
-public class FCQueryObj {
-
-    /** The caller of the flow cache query. */
-    public IFlowQueryHandler fcQueryHandler;
-    /** The application instance name. */
-    public String applInstName;
-    /** The vlan Id. */
-    public Short[] vlans;
-    /** The destination device. */
-    public IDevice dstDevice;
-    /** The source device. */
-    public IDevice srcDevice;
-    /** The caller name */
-    public String callerName;
-    /** Event type that triggered this flow query submission */
-    public FCQueryEvType evType;
-    /** The caller opaque data. Returned unchanged in the query response
-     * via the callback. The type of this object could be different for
-     * different callers */
-    public Object callerOpaqueObj;
-
-    /**
-     * Instantiates a new flow cache query object
-     */
-    public FCQueryObj(IFlowQueryHandler fcQueryHandler,
-            String        applInstName,
-            Short         vlan,
-            IDevice       srcDevice,
-            IDevice       dstDevice,
-            String        callerName,
-            FCQueryEvType evType,
-            Object        callerOpaqueObj) {
-        this.fcQueryHandler    = fcQueryHandler;
-        this.applInstName     = applInstName;
-        this.srcDevice        = srcDevice;
-        this.dstDevice        = dstDevice;
-        this.callerName       = callerName;
-        this.evType           = evType;
-        this.callerOpaqueObj  = callerOpaqueObj;
-        
-        if (vlan != null) {
-        	this.vlans = new Short[] { vlan };
-        } else {
-	        if (srcDevice != null) {
-	        	this.vlans = srcDevice.getVlanId();
-	        } else if (dstDevice != null) {
-	            this.vlans = dstDevice.getVlanId();
-	        }
-        }
-    }
-
-    @Override
-    public String toString() {
-        return "FCQueryObj [fcQueryCaller=" + fcQueryHandler
-                + ", applInstName="
-                + applInstName + ", vlans=" + Arrays.toString(vlans)
-                + ", dstDevice=" + dstDevice + ", srcDevice="
-                + srcDevice + ", callerName=" + callerName + ", evType="
-                + evType + ", callerOpaqueObj=" + callerOpaqueObj + "]";
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-        if (this == obj)
-            return true;
-        if (obj == null)
-            return false;
-        if (getClass() != obj.getClass())
-            return false;
-        FCQueryObj other = (FCQueryObj) obj;
-        if (applInstName == null) {
-            if (other.applInstName != null)
-                return false;
-        } else if (!applInstName.equals(other.applInstName))
-            return false;
-        if (callerName == null) {
-            if (other.callerName != null)
-                return false;
-        } else if (!callerName.equals(other.callerName))
-            return false;
-        if (callerOpaqueObj == null) {
-            if (other.callerOpaqueObj != null)
-                return false;
-        } else if (!callerOpaqueObj.equals(other.callerOpaqueObj))
-            return false;
-        if (dstDevice == null) {
-            if (other.dstDevice != null)
-                return false;
-        } else if (!dstDevice.equals(other.dstDevice))
-            return false;
-        if (evType != other.evType)
-            return false;
-        if (fcQueryHandler != other.fcQueryHandler)
-            return false;
-        if (srcDevice == null) {
-            if (other.srcDevice != null)
-                return false;
-        } else if (!srcDevice.equals(other.srcDevice))
-            return false;
-        if (!Arrays.equals(vlans, other.vlans))
-            return false;
-        return true;
-    }
-    
-    
-}
diff --git a/src/main/java/net/floodlightcontroller/flowcache/FlowCacheQueryResp.java b/src/main/java/net/floodlightcontroller/flowcache/FlowCacheQueryResp.java
deleted file mode 100644
index b01aedf..0000000
--- a/src/main/java/net/floodlightcontroller/flowcache/FlowCacheQueryResp.java
+++ /dev/null
@@ -1,54 +0,0 @@
-package net.floodlightcontroller.flowcache;
-
-import java.util.ArrayList;
-
-/**
- * Object to return flows in response to a query message to BigFlowCache.
- * This object is passed in the flowQueryRespHandler() callback.
- */
-public class FlowCacheQueryResp {
-
-    /** query object provided by the caller, returned unchanged. */
-    public FCQueryObj  queryObj;
-    /** 
-     * Set to true if more flows could be returned for this query in
-     * additional callbacks. Set of false in the last callback for the
-     * query. 
-     */
-    public boolean     moreFlag;
-    
-    /**
-     * Set to true if the response has been sent to handler
-     */
-    public boolean     hasSent;
-    
-    /** 
-     * The flow list. If there are large number of flows to be returned
-     * then they may be returned in multiple callbacks.
-     */
-    public ArrayList<QRFlowCacheObj> qrFlowCacheObjList;
-
-    /**
-     * Instantiates a new big flow cache query response.
-     *
-     * @param query the flow cache query object as given by the caller of
-     * flow cache submit query API.
-     */
-    public FlowCacheQueryResp(FCQueryObj query) {
-        qrFlowCacheObjList = new ArrayList<QRFlowCacheObj>();
-        queryObj    = query;
-        moreFlag    = false;
-        hasSent     = false;
-    }
-
-    /* (non-Javadoc)
-     * @see java.lang.Object#toString()
-     */
-    @Override
-    public String toString() {
-        String s = queryObj.toString() + "; moreFlasg=" + moreFlag +
-                   "; hasSent=" + hasSent;
-        s += "; FlowCount=" + Integer.toString(qrFlowCacheObjList.size());
-        return s;
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/flowcache/FlowReconcileManager.java b/src/main/java/net/floodlightcontroller/flowcache/FlowReconcileManager.java
deleted file mode 100644
index b221b84..0000000
--- a/src/main/java/net/floodlightcontroller/flowcache/FlowReconcileManager.java
+++ /dev/null
@@ -1,440 +0,0 @@
-package net.floodlightcontroller.flowcache;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Queue;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-
-import net.floodlightcontroller.core.module.FloodlightModuleContext;
-import net.floodlightcontroller.core.module.FloodlightModuleException;
-import net.floodlightcontroller.core.module.IFloodlightModule;
-import net.floodlightcontroller.core.module.IFloodlightService;
-import net.floodlightcontroller.core.util.ListenerDispatcher;
-import net.floodlightcontroller.core.util.SingletonTask;
-import net.floodlightcontroller.counter.CounterStore;
-import net.floodlightcontroller.counter.ICounter;
-import net.floodlightcontroller.counter.ICounterStoreService;
-import net.floodlightcontroller.counter.SimpleCounter;
-import net.floodlightcontroller.devicemanager.IDevice;
-import net.floodlightcontroller.flowcache.IFlowCacheService.FCQueryEvType;
-import net.floodlightcontroller.flowcache.IFlowReconcileListener;
-import net.floodlightcontroller.flowcache.OFMatchReconcile;
-import net.floodlightcontroller.threadpool.IThreadPoolService;
-
-import org.openflow.protocol.OFType;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class FlowReconcileManager 
-        implements IFloodlightModule, IFlowReconcileService {
-
-    /** The logger. */
-    private final static Logger logger =
-                        LoggerFactory.getLogger(FlowReconcileManager.class);
-    
-    /** Reference to dependent modules */
-    protected IThreadPoolService threadPool;
-    protected ICounterStoreService counterStore;
-
-    /**
-     * The list of flow reconcile listeners that have registered to get
-     * flow reconcile callbacks. Such callbacks are invoked, for example, when
-     * a switch with existing flow-mods joins this controller and those flows
-     * need to be reconciled with the current configuration of the controller.
-     */
-    protected ListenerDispatcher<OFType, IFlowReconcileListener>
-                                               flowReconcileListeners;
-
-    /** A FIFO queue to keep all outstanding flows for reconciliation */
-    Queue<OFMatchReconcile> flowQueue;
-    
-    /** Asynchronous task to feed the flowReconcile pipeline */
-    protected SingletonTask flowReconcileTask;
-    
-    String controllerPktInCounterName;
-    protected SimpleCounter lastPacketInCounter;
-    
-    protected static int MAX_SYSTEM_LOAD_PER_SECOND = 50000;
-    /** a minimum flow reconcile rate so that it won't stave */
-    protected static int MIN_FLOW_RECONCILE_PER_SECOND = 1000;
-    
-    /** once per second */
-    protected static int FLOW_RECONCILE_DELAY_MILLISEC = 10;
-    protected Date lastReconcileTime;
-    
-    /** Config to enable or disable flowReconcile */
-    protected static final String EnableConfigKey = "enable";
-    protected boolean flowReconcileEnabled;
-    
-    public int flowReconcileThreadRunCount;
-    
-    @Override
-    public synchronized void addFlowReconcileListener(
-                IFlowReconcileListener listener) {
-        flowReconcileListeners.addListener(OFType.FLOW_MOD, listener);
-
-        if (logger.isTraceEnabled()) {
-            StringBuffer sb = new StringBuffer();
-            sb.append("FlowMod listeners: ");
-            for (IFlowReconcileListener l :
-                flowReconcileListeners.getOrderedListeners()) {
-                sb.append(l.getName());
-                sb.append(",");
-            }
-            logger.trace(sb.toString());
-        }
-    }
-
-    @Override
-    public synchronized void removeFlowReconcileListener(
-                IFlowReconcileListener listener) {
-        flowReconcileListeners.removeListener(listener);
-    }
-    
-    @Override
-    public synchronized void clearFlowReconcileListeners() {
-        flowReconcileListeners.clearListeners();
-    }
-    
-    /**
-     * Add to-be-reconciled flow to the queue.
-     *
-     * @param ofmRcIn the ofm rc in
-     */
-    public void reconcileFlow(OFMatchReconcile ofmRcIn) {
-        if (ofmRcIn == null) return;
-        
-        // Make a copy before putting on the queue.
-        OFMatchReconcile myOfmRc = new OFMatchReconcile(ofmRcIn);
-    
-        flowQueue.add(myOfmRc);
-    
-        Date currTime = new Date();
-        long delay = 0;
-
-        /** schedule reconcile task immidiately if it has been more than 1 sec
-         *  since the last run. Otherwise, schedule the reconcile task in
-         *  DELAY_MILLISEC.
-         */
-        if (currTime.after(new Date(lastReconcileTime.getTime() + 1000))) {
-            delay = 0;
-        } else {
-            delay = FLOW_RECONCILE_DELAY_MILLISEC;
-        }
-        flowReconcileTask.reschedule(delay, TimeUnit.MILLISECONDS);
-    
-        if (logger.isTraceEnabled()) {
-            logger.trace("Reconciling flow: {}, total: {}",
-                myOfmRc.toString(), flowQueue.size());
-        }
-    }
-    
-    @Override
-    public void updateFlowForDestinationDevice(IDevice device,
-                                            IFlowQueryHandler handler,
-                                            FCQueryEvType fcEvType) {
-        // NO-OP
-    }
-
-    @Override
-    public void updateFlowForSourceDevice(IDevice device,
-                                          IFlowQueryHandler handler,
-                                          FCQueryEvType fcEvType) {
-        // NO-OP
-    }
-    
-    @Override
-    public void flowQueryGenericHandler(FlowCacheQueryResp flowResp) {
-        if (flowResp.queryObj.evType != FCQueryEvType.GET) {
-            OFMatchReconcile ofmRc = new OFMatchReconcile();;
-            /* Re-provision these flows */
-            for (QRFlowCacheObj entry : flowResp.qrFlowCacheObjList) {
-                /* reconcile the flows in entry */
-                entry.toOFMatchReconcile(ofmRc,
-                        flowResp.queryObj.applInstName,
-                        OFMatchReconcile.ReconcileAction.UPDATE_PATH);
-                reconcileFlow(ofmRc);
-            }
-        }
-        return;
-    }
-    
-    // IFloodlightModule
-
-    @Override
-    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
-        Collection<Class<? extends IFloodlightService>> l = 
-            new ArrayList<Class<? extends IFloodlightService>>();
-        l.add(IFlowReconcileService.class);
-        return l;
-    }
-
-    @Override
-    public Map<Class<? extends IFloodlightService>, IFloodlightService> 
-                                                            getServiceImpls() {
-        Map<Class<? extends IFloodlightService>,
-        IFloodlightService> m = 
-            new HashMap<Class<? extends IFloodlightService>,
-                IFloodlightService>();
-        m.put(IFlowReconcileService.class, this);
-        return m;
-    }
-
-    @Override
-    public Collection<Class<? extends IFloodlightService>> 
-                                                    getModuleDependencies() {
-        Collection<Class<? extends IFloodlightService>> l = 
-                new ArrayList<Class<? extends IFloodlightService>>();
-        l.add(IThreadPoolService.class);
-        l.add(ICounterStoreService.class);
-        return null;
-    }
-
-    @Override
-    public void init(FloodlightModuleContext context)
-            throws FloodlightModuleException {
-        threadPool = context.getServiceImpl(IThreadPoolService.class);
-        counterStore = context.getServiceImpl(ICounterStoreService.class);
-    
-        flowQueue = new ConcurrentLinkedQueue<OFMatchReconcile>();
-        flowReconcileListeners = 
-                new ListenerDispatcher<OFType, IFlowReconcileListener>();
-        
-        Map<String, String> configParam = context.getConfigParams(this);
-        String enableValue = configParam.get(EnableConfigKey);
-        // Set flowReconcile default to true
-        flowReconcileEnabled = true;
-        if (enableValue != null &&
-            enableValue.equalsIgnoreCase("false")) {
-            flowReconcileEnabled = false;
-        }
-        
-        flowReconcileThreadRunCount = 0;
-        lastReconcileTime = new Date(0);
-        logger.debug("FlowReconcile is {}", flowReconcileEnabled);
-    }
-
-    @Override
-    public void startUp(FloodlightModuleContext context) {
-        // thread to do flow reconcile
-        ScheduledExecutorService ses = threadPool.getScheduledExecutor();
-        flowReconcileTask = new SingletonTask(ses, new Runnable() {
-            @Override
-            public void run() {
-                try {
-                    if (doReconcile()) {
-                        flowReconcileTask.reschedule(
-                            FLOW_RECONCILE_DELAY_MILLISEC,
-                            TimeUnit.MILLISECONDS);
-                    }
-                } catch (Exception e) {
-                    logger.warn("Exception in doReconcile(): {}",
-                                e.getMessage());
-                    e.printStackTrace();
-                }
-            }
-        });
-        
-        String packetInName = OFType.PACKET_IN.toClass().getName();
-        packetInName = packetInName.substring(packetInName.lastIndexOf('.')+1); 
-        
-        // Construct controller counter for the packet_in
-        controllerPktInCounterName =
-            CounterStore.createCounterName(ICounterStoreService.CONTROLLER_NAME, 
-                                           -1,
-                                           packetInName);
-    }
-    
-    /**
-     * Feed the flows into the flow reconciliation pipeline.
-     * @return true if more flows to be reconciled
-     *         false if no more flows to be reconciled.
-     */
-    protected boolean doReconcile() {
-        if (!flowReconcileEnabled) {
-            return false;
-        }
-    
-        // Record the execution time.
-        lastReconcileTime = new Date();
-    
-        ArrayList<OFMatchReconcile> ofmRcList =
-                        new ArrayList<OFMatchReconcile>();
-        
-        // Get the maximum number of flows that can be reconciled.
-        int reconcileCapacity = getCurrentCapacity();
-        if (logger.isTraceEnabled()) {
-            logger.trace("Reconcile capacity {} flows", reconcileCapacity);
-        }
-        while (!flowQueue.isEmpty() && reconcileCapacity > 0) {
-            OFMatchReconcile ofmRc = flowQueue.poll();
-            reconcileCapacity--;
-            if (ofmRc != null) {
-                ofmRcList.add(ofmRc);
-                if (logger.isTraceEnabled()) {
-                    logger.trace("Add flow {} to be the reconcileList", ofmRc.cookie);
-                }
-            } else {
-                break;
-            }
-        }
-        
-        // Run the flow through all the flow reconcile listeners
-        IFlowReconcileListener.Command retCmd;
-        if (ofmRcList.size() > 0) {
-            List<IFlowReconcileListener> listeners =
-                flowReconcileListeners.getOrderedListeners();
-            if (listeners == null) {
-                if (logger.isTraceEnabled()) {
-                    logger.trace("No flowReconcile listener");
-                }
-                return false;
-            }
-        
-            for (IFlowReconcileListener flowReconciler :
-                flowReconcileListeners.getOrderedListeners()) {
-                if (logger.isTraceEnabled()) {
-                    logger.trace("Reconciling flow: call listener {}",
-                            flowReconciler.getName());
-                }
-                retCmd = flowReconciler.reconcileFlows(ofmRcList);
-                if (retCmd == IFlowReconcileListener.Command.STOP) {
-                    break;
-                }
-            }
-            flowReconcileThreadRunCount++;
-        } else {
-            if (logger.isTraceEnabled()) {
-                logger.trace("No flow to be reconciled.");
-            }
-        }
-        
-        // Return true if there are more flows to be reconciled
-        if (flowQueue.isEmpty()) {
-            return false;
-        } else {
-            if (logger.isTraceEnabled()) {
-                logger.trace("{} more flows to be reconciled.",
-                            flowQueue.size());
-            }
-            return true;
-        }
-    }
-    
-    /**
-     * Compute the maximum number of flows to be reconciled.
-     * 
-     * It computes the packetIn increment from the counter values in
-     * the counter store;
-     * Then computes the rate based on the elapsed time
-     * from the last query;
-     * Then compute the max flow reconcile rate by subtracting the packetIn
-     * rate from the hard-coded max system rate.
-     * If the system rate is reached or less than MIN_FLOW_RECONCILE_PER_SECOND,
-     * set the maximum flow reconcile rate to the MIN_FLOW_RECONCILE_PER_SECOND
-     * to prevent starvation.
-     * Then convert the rate to an absolute number for the
-     * FLOW_RECONCILE_PERIOD.
-     * @return
-     */
-    protected int getCurrentCapacity() {
-        ICounter pktInCounter =
-            counterStore.getCounter(controllerPktInCounterName);
-        int minFlows = MIN_FLOW_RECONCILE_PER_SECOND *
-                        FLOW_RECONCILE_DELAY_MILLISEC / 1000;
-        
-        // If no packetInCounter, then there shouldn't be any flow.
-        if (pktInCounter == null ||
-            pktInCounter.getCounterDate() == null ||
-            pktInCounter.getCounterValue() == null) {
-            logger.debug("counter {} doesn't exist",
-                        controllerPktInCounterName);
-            return minFlows;
-        }
-        
-        // Haven't get any counter yet.
-        if (lastPacketInCounter == null) {
-            logger.debug("First time get the count for {}",
-                        controllerPktInCounterName);
-            lastPacketInCounter = (SimpleCounter)
-            SimpleCounter.createCounter(pktInCounter);
-            return minFlows;
-        }
-        
-        int pktInRate = getPktInRate(pktInCounter, new Date());
-        
-        // Update the last packetInCounter
-        lastPacketInCounter = (SimpleCounter)
-        SimpleCounter.createCounter(pktInCounter);
-        int capacity = minFlows;
-        if ((pktInRate + MIN_FLOW_RECONCILE_PER_SECOND) <=
-                               MAX_SYSTEM_LOAD_PER_SECOND) {
-            capacity = (MAX_SYSTEM_LOAD_PER_SECOND - pktInRate)
-                    * FLOW_RECONCILE_DELAY_MILLISEC / 1000;
-        }
-        
-        if (logger.isTraceEnabled()) {
-            logger.trace("Capacity is {}", capacity);
-        }
-        return capacity;
-    }
-    
-    protected int getPktInRate(ICounter newCnt, Date currentTime) {
-        if (newCnt == null ||
-            newCnt.getCounterDate() == null ||
-            newCnt.getCounterValue() == null) {
-            return 0;
-        }
-    
-        // Somehow the system time is messed up. return max packetIn rate
-        // to reduce the system load.
-        if (newCnt.getCounterDate().before(
-                lastPacketInCounter.getCounterDate())) {
-            logger.debug("Time is going backward. new {}, old {}",
-                    newCnt.getCounterDate(),
-                    lastPacketInCounter.getCounterDate());
-            return MAX_SYSTEM_LOAD_PER_SECOND;
-        }
-    
-        long elapsedTimeInSecond = (currentTime.getTime() -
-                    lastPacketInCounter.getCounterDate().getTime()) / 1000;
-        if (elapsedTimeInSecond == 0) {
-            // This should never happen. Check to avoid division by zero.
-            return 0;
-        }
-    
-        long diff = 0;
-        switch (newCnt.getCounterValue().getType()) {
-            case LONG:
-                long newLong = newCnt.getCounterValue().getLong();
-                long oldLong = lastPacketInCounter.getCounterValue().getLong();
-                if (newLong < oldLong) {
-                    // Roll over event
-                    diff = Long.MAX_VALUE - oldLong + newLong;
-                } else {
-                    diff = newLong - oldLong;
-                }
-                break;
-    
-            case DOUBLE:
-                double newDouble = newCnt.getCounterValue().getDouble();
-                double oldDouble = lastPacketInCounter.getCounterValue().getDouble();
-                if (newDouble < oldDouble) {
-                    // Roll over event
-                    diff = (long)(Double.MAX_VALUE - oldDouble + newDouble);
-                } else {
-                    diff = (long)(newDouble - oldDouble);
-                }
-                break;
-        }
-    
-        return (int)(diff/elapsedTimeInSecond);
-    }
-}
-
diff --git a/src/main/java/net/floodlightcontroller/flowcache/IFlowCacheService.java b/src/main/java/net/floodlightcontroller/flowcache/IFlowCacheService.java
deleted file mode 100644
index 8e44ed3..0000000
--- a/src/main/java/net/floodlightcontroller/flowcache/IFlowCacheService.java
+++ /dev/null
@@ -1,185 +0,0 @@
-package net.floodlightcontroller.flowcache;
-
-import org.openflow.protocol.OFMatchWithSwDpid;
-
-import net.floodlightcontroller.core.FloodlightContext;
-import net.floodlightcontroller.core.FloodlightContextStore;
-import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.devicemanager.SwitchPort;
-import net.floodlightcontroller.core.module.IFloodlightService;
-
-/**
- * The Interface IFlowCache.
- * <p>
- * public interface APIs to Big Switch Flow-Cache Service. Flow-Cache maintains
- * the network-level flows that are currently deployed in the underlying 
- * network. The flow cache can be queried using various filters by using the
- * corresponding APIs.
- * 
- * @author subrata
- *
- */
-public interface IFlowCacheService extends IFloodlightService {
-
-    public static final String FLOWCACHE_APP_NAME = 
-        "net.floodlightcontroller.flowcache.appName";
-    public static final String FLOWCACHE_APP_INSTANCE_NAME = 
-        "net.floodlightcontroller.flowcache.appInstanceName";
-
-    /**
-     * The flow cache query event type indicating the event that triggered the
-     * query. The callerOpaqueObj can be keyed based on this event type
-     */
-    public static enum FCQueryEvType {
-        /** The GET query. Flows need not be reconciled for this query type */
-        GET,
-        /** A new App was added. */
-        APP_ADDED,
-        /** An App was deleted. */
-        APP_DELETED,
-        /** Interface rule of an app was modified */
-        APP_INTERFACE_RULE_CHANGED,
-        /** Some App configuration was changed */
-        APP_CONFIG_CHANGED,
-        /** An ACL was added */
-        ACL_ADDED,
-        /** An ACL was deleted */
-        ACL_DELETED,
-        /** An ACL rule was added */
-        ACL_RULE_ADDED,
-        /** An ACL rule was deleted */
-        ACL_RULE_DELETED,
-        /** ACL configuration was changed */
-        ACL_CONFIG_CHANGED,
-        /** device had moved to a different port in the network */
-        DEVICE_MOVED,
-        /** device's property had changed, such as tag assignment */
-        DEVICE_PROPERTY_CHANGED,
-        /** Link down */
-        LINK_DOWN,
-        /** Periodic scan of switch flow table */
-        PERIODIC_SCAN,
-    }
-    
-    /**
-     * A FloodlightContextStore object that can be used to interact with the 
-     * FloodlightContext information about flowCache.
-     */
-    public static final FloodlightContextStore<String> fcStore = 
-        new FloodlightContextStore<String>();
-    
-    /**
-     * Submit a flow cache query with query parameters specified in FCQueryObj
-     * object. The query object can be created using one of the newFCQueryObj 
-     * helper functions in IFlowCache interface. 
-     * <p>
-     * The queried flows are returned via the flowQueryRespHandler() callback 
-     * that the caller must implement. The caller can match the query with
-     * the response using unique callerOpaqueData which remains unchanged
-     * in the request and response callback.
-     *
-     * @see  com.bigswitch.floodlight.flowcache#flowQueryRespHandler
-     * @param query the flow cache query object as input
-     * 
-     */
-    public void submitFlowCacheQuery(FCQueryObj query);
-
-    /**
-     * Deactivates all flows in the flow cache for which the source switch
-     * matches the given switchDpid. Note that the flows are NOT deleted
-     * from the cache.
-     *
-     * @param switchDpid Data-path identifier of the source switch
-     */
-    public void deactivateFlowCacheBySwitch(long switchDpid);
-
-    /**
-     * Deletes all flows in the flow cache for which the source switch
-     * matches the given switchDpid. 
-     * 
-     * @param switchDpid Data-path identifier of the source switch
-     */
-    public void deleteFlowCacheBySwitch(long switchDpid);
-
-    /**
-     * Add a flow to the flow-cache - called when a flow-mod is about to be
-     * written to a set of switches. If it returns false then it should not
-     * be written to the switches. If it returns true then the cookie returned
-     * should be used for the flow mod sent to the switches.
-     *
-     * @param appInstName Application instance name
-     * @param ofm openflow match object
-     * @param cookie openflow-mod cookie
-     * @param swPort SwitchPort object
-     * @param priority openflow match priority
-     * @param action action taken on the matched packets (PERMIT or DENY)
-     * @return true:  flow should be written to the switch(es)
-     *         false: flow should not be written to the switch(es). false is
-     *                returned, for example, when the flow was recently
-     *                written to the flow-cache and hence it is dampened to
-     *                avoid frequent writes of the same flow to the switches
-     *                This case can typically arise for the flows written at the
-     *                internal ports as they are heavily wild-carded.
-     */
-    public boolean addFlow(String appInstName, OFMatchWithSwDpid ofm, 
-                           Long cookie, long srcSwDpid, 
-                           short inPort, short priority, byte action);
-
-    /**
-     * Add a flow to the flow-cache - called when a flow-mod is about to be
-     * written to a set of switches. If it returns false then it should not
-     * be written to the switches. If it returns true then the cookie returned
-     * should be used for the flow mod sent to the switches.
-     *
-     * @param cntx the cntx
-     * @param ofm the ofm
-     * @param cookie the cookie
-     * @param swPort the sw port
-     * @param priority the priority
-     * @param action the action
-     * @return true:  flow should be written to the switch(es)
-     * false: flow should not be written to the switch(es). false is
-     * returned, for example, when the flow was recently
-     * written to the flow-cache and hence it is dampened to
-     * avoid frequent writes of the same flow to the switches
-     * This case can typically arise for the flows written at the
-     * internal ports as they are heavily wild-carded.
-     */
-    public boolean addFlow(FloodlightContext cntx, OFMatchWithSwDpid ofm, 
-                           Long cookie, SwitchPort swPort, 
-                           short priority, byte action);
-
-    /**
-     * Move the specified flow from its current application instance to a 
-     * different application instance. This API can be used when a flow moves
-     * to a different application instance when the application instance
-     * configuration changes or when a device moves to a different part in
-     * the network that belongs to a different application instance.
-     * <p>
-     * Note that, if the flow was not found in the current application 
-     * instance then the flow is not moved to the new application instance.
-     * 
-     * @param ofMRc the object containing the flow match and new application
-     * instance name.
-     * @return true is the flow was found in the flow cache in the current 
-     * application instance; false if the flow was not found in the flow-cache
-     * in the current application instance.
-     */
-    public boolean moveFlowToDifferentApplInstName(OFMatchReconcile ofMRc);
-
-    /**
-     * Delete all flow from the specified switch
-     * @param sw
-     */
-    public void deleteAllFlowsAtASourceSwitch(IOFSwitch sw);
-    
-    /**
-     * Post a request to update flowcache from a switch.
-     * This is an asynchronous operation.
-     * It queries the switch for stats and updates the flowcache asynchronously
-     * with the response.
-     * @param swDpid
-     * @param delay_ms
-     */
-    public void querySwitchFlowTable(long swDpid);
-}
diff --git a/src/main/java/net/floodlightcontroller/flowcache/IFlowQueryHandler.java b/src/main/java/net/floodlightcontroller/flowcache/IFlowQueryHandler.java
deleted file mode 100644
index 5d1b1a9..0000000
--- a/src/main/java/net/floodlightcontroller/flowcache/IFlowQueryHandler.java
+++ /dev/null
@@ -1,21 +0,0 @@
-package net.floodlightcontroller.flowcache;
-
-public interface IFlowQueryHandler {
-    /**
-     * This callback function is called in response to a flow query request
-     * submitted to the flow cache service. The module handling this callback
-     * can be different from the one that submitted the query. In the flow
-     * query object used for submitting the flow query, the identity of the
-     * callback handler is passed. When flow cache service has all or some
-     * of the flows that needs to be returned then this callback is called
-     * for the appropriate module. The respone contains a boolean more flag 
-     * that indicates if there are additional flows that may be returned
-     * via additional callback calls.
-     *
-     * @param resp the response object containing the original flow query 
-     * object, partial or complete list of flows that we queried and some 
-     * metadata such as the more flag described aboce.
-     *
-     */
-    public void flowQueryRespHandler(FlowCacheQueryResp resp);
-}
diff --git a/src/main/java/net/floodlightcontroller/flowcache/IFlowReconcileListener.java b/src/main/java/net/floodlightcontroller/flowcache/IFlowReconcileListener.java
deleted file mode 100644
index f1100ed..0000000
--- a/src/main/java/net/floodlightcontroller/flowcache/IFlowReconcileListener.java
+++ /dev/null
@@ -1,40 +0,0 @@
-package net.floodlightcontroller.flowcache;
-
-import java.util.ArrayList;
-
-import net.floodlightcontroller.core.IListener;
-import org.openflow.protocol.OFType;
-
-/**
- * The Interface IFlowReconciler.
- *
- * @author subrata
- */
-public interface IFlowReconcileListener extends IListener<OFType> {
-    /**
-     * Given an input OFMatch, this method applies the policy of the reconciler
-     * and returns a the same input OFMatch structure modified. Additional
-     * OFMatches, if needed, are returned in OFMatch-list. All the OFMatches
-     * are assumed to have "PERMIT" action.
-     *
-     * @param ofmRcList  input flow matches, to be updated to be consistent with
-     *                   the policies of this reconciler 
-     *                   Additional OFMatch-es can be added to the "list" as
-     *                   needed. 
-     *                   For example after a new ACL application, one flow-match
-     *                   may result in multiple flow-matches
-     *                   The method must also update the ReconcileAction
-     *                   member in ofmRcList entries to indicate if the
-     *                   flow needs to be modified, deleted or left unchanged
-     *                   OR of a new entry is to be added after flow 
-     *                   reconciliation
-     *
-     *
-     * @return   Command.CONTINUE if the OFMatch should be sent to the
-     *           next flow reconciler. 
-     *           Command.STOP if the OFMatch shouldn't be processed
-     *           further. In this case the no reconciled flow-mods would 
-     *           be programmed
-     */
-    public Command reconcileFlows(ArrayList<OFMatchReconcile> ofmRcList);
-}
diff --git a/src/main/java/net/floodlightcontroller/flowcache/IFlowReconcileService.java b/src/main/java/net/floodlightcontroller/flowcache/IFlowReconcileService.java
deleted file mode 100644
index f48c4e0..0000000
--- a/src/main/java/net/floodlightcontroller/flowcache/IFlowReconcileService.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Provides Flow Reconcile service to other modules that need to reconcile
- * flows.
- */
-package net.floodlightcontroller.flowcache;
-
-import net.floodlightcontroller.core.module.IFloodlightService;
-import net.floodlightcontroller.devicemanager.IDevice;
-import net.floodlightcontroller.flowcache.IFlowCacheService.FCQueryEvType;
-
-public interface IFlowReconcileService extends IFloodlightService {
-    /**
-     * Add a flow reconcile listener
-     * @param listener The module that can reconcile flows
-     */
-    public void addFlowReconcileListener(IFlowReconcileListener listener);
-
-    /**
-     * Remove a flow reconcile listener
-     * @param listener The module that no longer reconcile flows
-     */
-    public void removeFlowReconcileListener(IFlowReconcileListener listener);
-    
-    /**
-     * Remove all flow reconcile listeners
-     */
-    public void clearFlowReconcileListeners();
-    
-    /**
-     * Reconcile flow. Returns false if no modified flow-mod need to be 
-     * programmed if cluster ID is providced then pnly flows in the given 
-     * cluster are reprogrammed
-     *
-     * @param ofmRcIn the ofm rc in
-     */
-    public void reconcileFlow(OFMatchReconcile ofmRcIn);
-    
-    /**
-     * Updates the flows to a device after the device moved to a new location
-     * <p>
-     * Queries the flow-cache to get all the flows destined to the given device.
-     * Reconciles each of these flows by potentially reprogramming them to its
-     * new attachment point
-     *
-     * @param device      device that has moved
-     * @param handler	  handler to process the flows
-     * @param fcEvType    Event type that triggered the update
-     *
-     */
-    public void updateFlowForDestinationDevice(IDevice device,
-            IFlowQueryHandler handler,
-    		FCQueryEvType fcEvType);
-    
-    /**
-     * Updates the flows from a device
-     * <p>
-     * Queries the flow-cache to get all the flows source from the given device.
-     * Reconciles each of these flows by potentially reprogramming them to its
-     * new attachment point
-     *
-     * @param device      device where the flow originates
-     * @param handler	  handler to process the flows
-     * @param fcEvType    Event type that triggered the update
-     *
-     */
-    public void updateFlowForSourceDevice(IDevice device,
-            IFlowQueryHandler handler,
-    		FCQueryEvType fcEvType);
-
-    /**
-     * Generic flow query handler to insert FlowMods into the reconcile pipeline.
-     * @param flowResp
-     */
-    public void flowQueryGenericHandler(FlowCacheQueryResp flowResp);
-}
diff --git a/src/main/java/net/floodlightcontroller/flowcache/OFMatchReconcile.java b/src/main/java/net/floodlightcontroller/flowcache/OFMatchReconcile.java
deleted file mode 100644
index 68831f4..0000000
--- a/src/main/java/net/floodlightcontroller/flowcache/OFMatchReconcile.java
+++ /dev/null
@@ -1,84 +0,0 @@
-package net.floodlightcontroller.flowcache;
-
-import net.floodlightcontroller.core.FloodlightContext;
-import org.openflow.protocol.OFMatchWithSwDpid;
-
-/**
- * OFMatchReconcile class to indicate result of a flow-reconciliation.
- */
-public class OFMatchReconcile  {
- 
-    /**
-     * The enum ReconcileAction. Specifies the result of reconciliation of a 
-     * flow.
-     */
-    public enum ReconcileAction {
-
-        /** Delete the flow-mod from the switch */
-        DROP,
-        /** Leave the flow-mod as-is. */
-        NO_CHANGE,
-        /** Program this new flow mod. */
-        NEW_ENTRY,
-        /** 
-         * Reprogram the flow mod as the path of the flow might have changed,
-         * for example when a host is moved or when a link goes down. */
-        UPDATE_PATH,
-        /* Flow is now in a different BVS */
-        APP_INSTANCE_CHANGED,
-        /* Delete the flow-mod - used to delete, for example, drop flow-mods
-         * when the source and destination are in the same BVS after a 
-         * configuration change */
-        DELETE
-    }
-
-    /** The open flow match after reconciliation. */
-    public OFMatchWithSwDpid ofmWithSwDpid;
-    /** flow mod. priority */
-    public short priority;
-    /** Action of this flow-mod PERMIT or DENY */
-    public byte action;
-    /** flow mod. cookie */
-    public long cookie;
-    /** The application instance name. */
-    public String appInstName;
-    /**
-     * The new application instance name. This is null unless the flow
-     * has moved to a different BVS due to BVS config change or device
-     * move to a different switch port etc.*/
-    public String newAppInstName;
-    /** The reconcile action. */
-    public ReconcileAction rcAction;
-
-    // The context for the reconcile action
-    public FloodlightContext cntx;
-    
-    /**
-     * Instantiates a new oF match reconcile object.
-     */
-    public OFMatchReconcile() {
-        ofmWithSwDpid      = new OFMatchWithSwDpid();
-        rcAction = ReconcileAction.NO_CHANGE;
-        cntx = new FloodlightContext();
-    }
-    
-    public OFMatchReconcile(OFMatchReconcile copy) {
-        ofmWithSwDpid =
-            new OFMatchWithSwDpid(copy.ofmWithSwDpid.getOfMatch(),
-                    copy.ofmWithSwDpid.getSwitchDataPathId());
-        priority = copy.priority;
-        action = copy.action;
-        cookie = copy.cookie;
-        appInstName = copy.appInstName;
-        newAppInstName = copy.newAppInstName;
-        rcAction = copy.rcAction;
-        cntx = new FloodlightContext();
-    }
-    
-    @Override
-    public String toString() {
-        return "OFMatchReconcile [" + ofmWithSwDpid + " priority=" + priority + " action=" + action + 
-                " cookie=" + cookie + " appInstName=" + appInstName + " newAppInstName=" + newAppInstName + 
-                " ReconcileAction=" + rcAction + "]";
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/flowcache/PendingSwRespKey.java b/src/main/java/net/floodlightcontroller/flowcache/PendingSwRespKey.java
deleted file mode 100644
index 767ce94..0000000
--- a/src/main/java/net/floodlightcontroller/flowcache/PendingSwRespKey.java
+++ /dev/null
@@ -1,42 +0,0 @@
-package net.floodlightcontroller.flowcache;
-
-public class PendingSwRespKey {
-    long swDpid;
-    int  transId;
-
-    public PendingSwRespKey(long swDpid, int transId) {
-        this.swDpid  = swDpid;
-        this.transId = transId;
-    }
-
-    @Override
-    public int hashCode() {
-        final int prime = 97;
-        Long dpid   = swDpid;
-        Integer tid = transId;
-        return (tid.hashCode()*prime + dpid.hashCode());
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-        if (this == obj) {
-            return true;
-        }
-        if (obj == null) {
-            return false;
-        }
-        if (!(obj instanceof PendingSwRespKey)) {
-            return false;
-        }
-        PendingSwRespKey other = (PendingSwRespKey) obj;
-        if ((swDpid != other.swDpid) || (transId != other.transId)) {
-            return false;
-        }
-        return true;
-    }
-
-    @Override
-    public String toString() {
-        return Long.toHexString(swDpid)+","+Integer.toString(transId);
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/flowcache/PendingSwitchResp.java b/src/main/java/net/floodlightcontroller/flowcache/PendingSwitchResp.java
deleted file mode 100644
index d6f264f..0000000
--- a/src/main/java/net/floodlightcontroller/flowcache/PendingSwitchResp.java
+++ /dev/null
@@ -1,24 +0,0 @@
-package net.floodlightcontroller.flowcache;
-
-import net.floodlightcontroller.flowcache.IFlowCacheService.FCQueryEvType;
-
-/**
- * The Class PendingSwitchResp. This object is used to track the pending
- * responses to switch flow table queries.
- */
-public class PendingSwitchResp {
-    protected FCQueryEvType evType;
-
-    public PendingSwitchResp(
-            FCQueryEvType evType) {
-        this.evType      = evType;
-    }
-    
-    public FCQueryEvType getEvType() {
-        return evType;
-    }
-
-    public void setEvType(FCQueryEvType evType) {
-        this.evType = evType;
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/flowcache/QRFlowCacheObj.java b/src/main/java/net/floodlightcontroller/flowcache/QRFlowCacheObj.java
deleted file mode 100644
index 5121f8b..0000000
--- a/src/main/java/net/floodlightcontroller/flowcache/QRFlowCacheObj.java
+++ /dev/null
@@ -1,67 +0,0 @@
-package net.floodlightcontroller.flowcache;
-
-
-import org.openflow.protocol.OFMatchWithSwDpid;
-
-/**
- * Used in BigFlowCacheQueryResp as query result.
- * Used to return one flow when queried by one of the big flow cache APIs.
- * One of these QRFlowCacheObj is returned for each combination of
- * priority and action.
- *
- * @author subrata
- */
-public class QRFlowCacheObj {
-
-    /** The open flow match object. */
-    public OFMatchWithSwDpid ofmWithSwDpid;
-    /** The flow-mod priority. */
-    public short   priority;
-    /** flow-mod cookie */
-    public long    cookie;
-    /** The action - PERMIT or DENY. */
-    public byte    action;
-    /** The reserved byte to align with 8 bytes. */
-    public byte    reserved;
-
-    /**
-     * Instantiates a new flow cache query object.
-     *
-     * @param priority the priority
-     * @param action the action
-     */
-    public QRFlowCacheObj(short priority, byte action, long cookie) {
-        ofmWithSwDpid = new OFMatchWithSwDpid();
-        this.action   = action;
-        this.priority = priority;
-        this.cookie   = cookie;
-    }
-
-    /**
-     * Populate a given OFMatchReconcile object from the values of this
-     * class.
-     *
-     * @param ofmRc the given OFMatchReconcile object
-     * @param appInstName the application instance name
-     * @param rcAction the reconcile action
-     */
-    public   void toOFMatchReconcile(OFMatchReconcile ofmRc,
-                            String appInstName, OFMatchReconcile.ReconcileAction rcAction) {
-        ofmRc.ofmWithSwDpid   = ofmWithSwDpid; // not copying
-        ofmRc.appInstName     = appInstName;
-        ofmRc.rcAction        = rcAction;
-        ofmRc.priority        = priority;
-        ofmRc.cookie          = cookie;
-        ofmRc.action          = action;
-    }
-    
-    @Override
-    public String toString() {
-        String str = "ofmWithSwDpid: " + this.ofmWithSwDpid.toString() + " ";
-        str += "priority: " + this.priority + " ";
-        str += "cookie: " + this.cookie + " ";
-        str += "action: " + this.action + " ";
-        str += "reserved: " + this.reserved + " ";
-        return str;
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/forwarding/Forwarding.java b/src/main/java/net/floodlightcontroller/forwarding/Forwarding.java
index 28369eb..a245c02 100644
--- a/src/main/java/net/floodlightcontroller/forwarding/Forwarding.java
+++ b/src/main/java/net/floodlightcontroller/forwarding/Forwarding.java
@@ -27,9 +27,6 @@
 import net.floodlightcontroller.core.FloodlightContext;
 import net.floodlightcontroller.core.IFloodlightProviderService;
 import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.devicemanager.IDevice;
-import net.floodlightcontroller.devicemanager.IDeviceService;
-import net.floodlightcontroller.devicemanager.SwitchPort;
 import net.floodlightcontroller.core.annotations.LogMessageCategory;
 import net.floodlightcontroller.core.annotations.LogMessageDoc;
 import net.floodlightcontroller.core.annotations.LogMessageDocs;
@@ -38,7 +35,9 @@
 import net.floodlightcontroller.core.module.IFloodlightModule;
 import net.floodlightcontroller.core.module.IFloodlightService;
 import net.floodlightcontroller.core.util.AppCookie;
-import net.floodlightcontroller.counter.ICounterStoreService;
+import net.floodlightcontroller.devicemanager.IDevice;
+import net.floodlightcontroller.devicemanager.IDeviceService;
+import net.floodlightcontroller.devicemanager.SwitchPort;
 import net.floodlightcontroller.packet.Ethernet;
 import net.floodlightcontroller.routing.ForwardingBase;
 import net.floodlightcontroller.routing.IRoutingDecision;
@@ -389,7 +388,6 @@
         l.add(IDeviceService.class);
         l.add(IRoutingService.class);
         l.add(ITopologyService.class);
-        l.add(ICounterStoreService.class);
         return l;
     }
 
@@ -416,7 +414,6 @@
         this.deviceManager = context.getServiceImpl(IDeviceService.class);
         this.routingEngine = context.getServiceImpl(IRoutingService.class);
         this.topology = context.getServiceImpl(ITopologyService.class);
-        this.counterStore = context.getServiceImpl(ICounterStoreService.class);
         
         // read our config options
         Map<String, String> configOptions = context.getConfigParams(this);
diff --git a/src/main/java/net/floodlightcontroller/perfmon/CumulativeTimeBucket.java b/src/main/java/net/floodlightcontroller/perfmon/CumulativeTimeBucket.java
deleted file mode 100644
index e76253d..0000000
--- a/src/main/java/net/floodlightcontroller/perfmon/CumulativeTimeBucket.java
+++ /dev/null
@@ -1,122 +0,0 @@
-package net.floodlightcontroller.perfmon;
-
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.codehaus.jackson.map.annotate.JsonSerialize;
-
-import net.floodlightcontroller.core.IOFMessageListener;
-
-@JsonSerialize(using=CumulativeTimeBucketJSONSerializer.class)
-public class CumulativeTimeBucket {
-    private long startTime_ns; // First pkt time-stamp in this bucket
-    private Map<Integer, OneComponentTime> compStats;
-    private long totalPktCnt;
-    private long totalProcTimeNs; // total processing time for one pkt in
-    private long sumSquaredProcTimeNs2;
-    private long maxTotalProcTimeNs;
-    private long minTotalProcTimeNs;
-    private long avgTotalProcTimeNs;
-    private long sigmaTotalProcTimeNs; // std. deviation
-
-    public long getStartTimeNs() {
-        return startTime_ns;
-    }
-
-    public long getTotalPktCnt() {
-        return totalPktCnt;
-    }
-    
-    public long getAverageProcTimeNs() {
-        return avgTotalProcTimeNs;
-    }
-
-    public long getMinTotalProcTimeNs() {
-        return minTotalProcTimeNs;
-    }
-    
-    public long getMaxTotalProcTimeNs() {
-        return maxTotalProcTimeNs;
-    }
-    
-    public long getTotalSigmaProcTimeNs() {
-        return sigmaTotalProcTimeNs;
-    }
-    
-    public int getNumComps() {
-        return compStats.values().size();
-    }
-    
-    public Collection<OneComponentTime> getModules() {
-        return compStats.values();
-    }
-
-    public CumulativeTimeBucket(List<IOFMessageListener> listeners) {
-        compStats = new ConcurrentHashMap<Integer, OneComponentTime>(listeners.size());
-        for (IOFMessageListener l : listeners) {
-            OneComponentTime oct = new OneComponentTime(l);
-            compStats.put(oct.hashCode(), oct);
-        }
-        startTime_ns = System.nanoTime();
-    }
-
-    private void updateSquaredProcessingTime(long curTimeNs) {
-        sumSquaredProcTimeNs2 += (Math.pow(curTimeNs, 2));
-    }
-    
-    /**
-     * Resets all counters and counters for each component time
-     */
-    public void reset() {
-        startTime_ns = System.nanoTime();
-        totalPktCnt = 0;
-        totalProcTimeNs = 0;
-        avgTotalProcTimeNs = 0;
-        sumSquaredProcTimeNs2 = 0;
-        maxTotalProcTimeNs = Long.MIN_VALUE;
-        minTotalProcTimeNs = Long.MAX_VALUE;
-        sigmaTotalProcTimeNs = 0;
-        for (OneComponentTime oct : compStats.values()) {
-            oct.resetAllCounters();
-        }
-    }
-    
-    private void computeSigma() {
-        // Computes std. deviation from the sum of count numbers and from
-        // the sum of the squares of count numbers
-        double temp = totalProcTimeNs;
-        temp = Math.pow(temp, 2) / totalPktCnt;
-        temp = (sumSquaredProcTimeNs2 - temp) / totalPktCnt;
-        sigmaTotalProcTimeNs = (long) Math.sqrt(temp);
-    }
-    
-    public void computeAverages() {
-        // Must be called last to, needs latest info
-        computeSigma();
-        
-        for (OneComponentTime oct : compStats.values()) {
-            oct.computeSigma();
-        }
-    }
-    
-    public void updatePerPacketCounters(long procTimeNs) {
-        totalPktCnt++;
-        totalProcTimeNs += procTimeNs;
-        avgTotalProcTimeNs = totalProcTimeNs / totalPktCnt;
-        updateSquaredProcessingTime(procTimeNs);
-        
-        if (procTimeNs > maxTotalProcTimeNs) {
-            maxTotalProcTimeNs = procTimeNs;
-        }
-        
-        if (procTimeNs < minTotalProcTimeNs) {
-            minTotalProcTimeNs = procTimeNs;
-        }
-    }
-    
-    public void updateOneComponent(IOFMessageListener l, long procTimeNs) {
-        compStats.get(l.hashCode()).updatePerPacketCounters(procTimeNs);
-    }
-}
\ No newline at end of file
diff --git a/src/main/java/net/floodlightcontroller/perfmon/CumulativeTimeBucketJSONSerializer.java b/src/main/java/net/floodlightcontroller/perfmon/CumulativeTimeBucketJSONSerializer.java
deleted file mode 100644
index e492777..0000000
--- a/src/main/java/net/floodlightcontroller/perfmon/CumulativeTimeBucketJSONSerializer.java
+++ /dev/null
@@ -1,47 +0,0 @@
-package net.floodlightcontroller.perfmon;
-
-import java.io.IOException;
-import java.sql.Timestamp;
-
-
-import org.codehaus.jackson.JsonGenerator;
-import org.codehaus.jackson.JsonProcessingException;
-import org.codehaus.jackson.map.JsonSerializer;
-import org.codehaus.jackson.map.SerializerProvider;
-
-public class CumulativeTimeBucketJSONSerializer
-                                extends JsonSerializer<CumulativeTimeBucket> {
-    /**
-     * Performs the serialization of a OneComponentTime object
-     */
-   @Override
-   public void serialize(CumulativeTimeBucket ctb,
-                   JsonGenerator jGen,
-                   SerializerProvider serializer) 
-                   throws IOException, JsonProcessingException {
-       jGen.writeStartObject();
-       Timestamp ts = new Timestamp(ctb.getStartTimeNs()/1000000);
-       jGen.writeStringField("start-time", ts.toString());
-       jGen.writeStringField("current-time", 
-         new Timestamp(System.currentTimeMillis()).toString());
-       jGen.writeNumberField("total-packets", ctb.getTotalPktCnt());
-       jGen.writeNumberField("average", ctb.getAverageProcTimeNs());
-       jGen.writeNumberField("min", ctb.getMinTotalProcTimeNs());
-       jGen.writeNumberField("max", ctb.getMaxTotalProcTimeNs());
-       jGen.writeNumberField("std-dev", ctb.getTotalSigmaProcTimeNs());
-       jGen.writeArrayFieldStart("modules");
-       for (OneComponentTime oct : ctb.getModules()) {
-           serializer.defaultSerializeValue(oct, jGen);
-       }
-       jGen.writeEndArray();
-       jGen.writeEndObject();
-   }
-
-   /**
-    * Tells SimpleModule that we are the serializer for OFMatch
-    */
-   @Override
-   public Class<CumulativeTimeBucket> handledType() {
-       return CumulativeTimeBucket.class;
-   }
-}
diff --git a/src/main/java/net/floodlightcontroller/perfmon/IPktInProcessingTimeService.java b/src/main/java/net/floodlightcontroller/perfmon/IPktInProcessingTimeService.java
deleted file mode 100644
index 80dfda0..0000000
--- a/src/main/java/net/floodlightcontroller/perfmon/IPktInProcessingTimeService.java
+++ /dev/null
@@ -1,37 +0,0 @@
-package net.floodlightcontroller.perfmon;
-
-import java.util.List;
-
-import org.openflow.protocol.OFMessage;
-
-import net.floodlightcontroller.core.FloodlightContext;
-import net.floodlightcontroller.core.IOFMessageListener;
-import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.core.module.IFloodlightService;
-
-public interface IPktInProcessingTimeService extends IFloodlightService {
-
-    /**
-     * Creates time buckets for a set of modules to measure their performance
-     * @param listeners The message listeners to create time buckets for
-     */
-    public void bootstrap(List<IOFMessageListener> listeners);
-    
-    /**
-     * Stores a timestamp in ns. Used right before a service handles an
-     * OF message. Only stores if the service is enabled.
-     */
-    public void recordStartTimeComp(IOFMessageListener listener);
-    
-    public void recordEndTimeComp(IOFMessageListener listener);
-    
-    public void recordStartTimePktIn();
-    
-    public void recordEndTimePktIn(IOFSwitch sw, OFMessage m, FloodlightContext cntx);
-    
-    public boolean isEnabled();
-    
-    public void setEnabled(boolean enabled);
-    
-    public CumulativeTimeBucket getCtb();
-}
diff --git a/src/main/java/net/floodlightcontroller/perfmon/NullPktInProcessingTime.java b/src/main/java/net/floodlightcontroller/perfmon/NullPktInProcessingTime.java
deleted file mode 100644
index 3d9504b..0000000
--- a/src/main/java/net/floodlightcontroller/perfmon/NullPktInProcessingTime.java
+++ /dev/null
@@ -1,109 +0,0 @@
-package net.floodlightcontroller.perfmon;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.openflow.protocol.OFMessage;
-
-import net.floodlightcontroller.core.FloodlightContext;
-import net.floodlightcontroller.core.IOFMessageListener;
-import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.core.module.FloodlightModuleContext;
-import net.floodlightcontroller.core.module.FloodlightModuleException;
-import net.floodlightcontroller.core.module.IFloodlightModule;
-import net.floodlightcontroller.core.module.IFloodlightService;
-
-/**
- * An IPktInProcessingTimeService implementation that does nothing.
- * This is used mainly for performance testing or if you don't
- * want to use the IPktInProcessingTimeService features.
- * @author alexreimers
- *
- */
-public class NullPktInProcessingTime 
-    implements IFloodlightModule, IPktInProcessingTimeService {
-    
-    private CumulativeTimeBucket ctb;
-    private boolean inited = false;
-    
-    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
-        Collection<Class<? extends IFloodlightService>> l = 
-                new ArrayList<Class<? extends IFloodlightService>>();
-        l.add(IPktInProcessingTimeService.class);
-        return l;
-    }
-    
-    @Override
-    public Map<Class<? extends IFloodlightService>, IFloodlightService>
-            getServiceImpls() {
-        Map<Class<? extends IFloodlightService>,
-        IFloodlightService> m = 
-            new HashMap<Class<? extends IFloodlightService>,
-                        IFloodlightService>();
-        // We are the class that implements the service
-        m.put(IPktInProcessingTimeService.class, this);
-        return m;
-    }
-    
-    @Override
-    public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
-        // We don't have any dependencies
-        return null;
-    }
-    
-    @Override
-    public void init(FloodlightModuleContext context)
-                             throws FloodlightModuleException {
-
-    }
-
-    @Override
-    public void startUp(FloodlightModuleContext context) {
-        // no-op
-    }
-
-    @Override
-    public boolean isEnabled() {
-        return false;
-    }
-
-    @Override
-    public void bootstrap(List<IOFMessageListener> listeners) {
-        if (!inited)
-            ctb = new CumulativeTimeBucket(listeners);
-    }
-
-    @Override
-    public void recordStartTimeComp(IOFMessageListener listener) {
-
-    }
-
-    @Override
-    public void recordEndTimeComp(IOFMessageListener listener) {
-
-    }
-
-    @Override
-    public void recordStartTimePktIn() {
-
-    }
-
-    @Override
-    public void recordEndTimePktIn(IOFSwitch sw, OFMessage m,
-                                   FloodlightContext cntx) {
-        
-    }
-
-    @Override
-    public void setEnabled(boolean enabled) {
-    
-    }
-
-    @Override
-    public CumulativeTimeBucket getCtb() {
-        return ctb;
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/perfmon/OneComponentTime.java b/src/main/java/net/floodlightcontroller/perfmon/OneComponentTime.java
deleted file mode 100644
index 3e9734b..0000000
--- a/src/main/java/net/floodlightcontroller/perfmon/OneComponentTime.java
+++ /dev/null
@@ -1,129 +0,0 @@
-package net.floodlightcontroller.perfmon;
-
-import org.codehaus.jackson.annotate.JsonProperty;
-
-import net.floodlightcontroller.core.IOFMessageListener;
-
-/**
- * Holds OF message processing time information for one IFloodlightModule.
- * @author Subrata
- */
-public class OneComponentTime {
-    private int compId; // hascode of IOFMessageListener
-    private String compName;
-    private int pktCnt;
-    // all times in nanoseconds
-    private long totalProcTimeNs;
-    private long sumSquaredProcTimeNs2; // squared
-    private long maxProcTimeNs;
-    private long minProcTimeNs;
-    private long avgProcTimeNs;
-    private long sigmaProcTimeNs;  // std. deviation
-
-    public OneComponentTime(IOFMessageListener module) {
-        compId = module.hashCode();
-        compName = module.getClass().getCanonicalName();
-        resetAllCounters();
-    }
-    
-    public void resetAllCounters() {
-        maxProcTimeNs = Long.MIN_VALUE;
-        minProcTimeNs = Long.MAX_VALUE;
-        pktCnt = 0;
-        totalProcTimeNs = 0;
-        sumSquaredProcTimeNs2 = 0;
-        avgProcTimeNs = 0;
-        sigmaProcTimeNs = 0;
-    }
-    
-    @JsonProperty("module-name")
-    public String getCompName() {
-        return compName;
-    }
-
-    @JsonProperty("num-packets")
-    public int getPktCnt() {
-        return pktCnt;
-    }
-
-    @JsonProperty("total")
-    public long getSumProcTimeNs() {
-        return totalProcTimeNs;
-    }
-
-    @JsonProperty("max")
-    public long getMaxProcTimeNs() {
-        return maxProcTimeNs;
-    }
-
-    @JsonProperty("min")
-    public long getMinProcTimeNs() {
-        return minProcTimeNs;
-    }
-
-    @JsonProperty("average")
-    public long getAvgProcTimeNs() {
-        return avgProcTimeNs;
-    }
-
-    @JsonProperty("std-dev")
-    public long getSigmaProcTimeNs() {
-        return sigmaProcTimeNs;
-    }
-    
-    @JsonProperty("average-squared")
-    public long getSumSquaredProcTimeNs() {
-        return sumSquaredProcTimeNs2;
-    }
-
-    // Methods used to update the counters
-    
-    private void increasePktCount() {
-        pktCnt++;
-    }
-    
-    private void updateTotalProcessingTime(long procTimeNs) {
-        totalProcTimeNs += procTimeNs;
-    }
-    
-    private void updateAvgProcessTime() {
-        avgProcTimeNs = totalProcTimeNs / pktCnt;
-    }
-    
-    private void updateSquaredProcessingTime(long procTimeNs) {
-        sumSquaredProcTimeNs2 += (Math.pow(procTimeNs, 2));
-    }
-    
-    private void calculateMinProcTime(long curTimeNs) {
-        if (curTimeNs < minProcTimeNs)
-            minProcTimeNs = curTimeNs;
-    }
-    
-    private void calculateMaxProcTime(long curTimeNs) {
-        if (curTimeNs > maxProcTimeNs)
-            maxProcTimeNs = curTimeNs;
-    }
-    
-    public void computeSigma() {
-        // Computes std. deviation from the sum of count numbers and from
-        // the sum of the squares of count numbers
-        double temp = totalProcTimeNs;
-        temp = Math.pow(temp, 2) / pktCnt;
-        temp = (sumSquaredProcTimeNs2 - temp) / pktCnt;
-        sigmaProcTimeNs = (long) Math.sqrt(temp);
-    }
-    
-    public void updatePerPacketCounters(long procTimeNs) {
-        increasePktCount();
-        updateTotalProcessingTime(procTimeNs);
-        calculateMinProcTime(procTimeNs);
-        calculateMaxProcTime(procTimeNs);
-        updateAvgProcessTime();
-        updateSquaredProcessingTime(procTimeNs);
-    }
-    
-    @Override
-    public int hashCode() {
-        return compId;
-    }
-}
\ No newline at end of file
diff --git a/src/main/java/net/floodlightcontroller/perfmon/PerfMonDataResource.java b/src/main/java/net/floodlightcontroller/perfmon/PerfMonDataResource.java
deleted file mode 100644
index 297c44e..0000000
--- a/src/main/java/net/floodlightcontroller/perfmon/PerfMonDataResource.java
+++ /dev/null
@@ -1,33 +0,0 @@
-package net.floodlightcontroller.perfmon;
-
-import org.restlet.data.Status;
-import org.restlet.resource.Get;
-import org.restlet.resource.ServerResource;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-/**
- * Return the performance monitoring data for the get rest api call
- * @author subrata
- */
-public class PerfMonDataResource extends ServerResource {
-    protected final static Logger logger = LoggerFactory.getLogger(PerfMonDataResource.class);  
-    
-    @Get("json")
-    public CumulativeTimeBucket handleApiQuery() {        
-        IPktInProcessingTimeService pktinProcTime = 
-            (IPktInProcessingTimeService)getContext().getAttributes().
-                get(IPktInProcessingTimeService.class.getCanonicalName());
-        
-        setStatus(Status.SUCCESS_OK, "OK");
-        // Allocate output object
-        if (pktinProcTime.isEnabled()) {
-            CumulativeTimeBucket ctb = pktinProcTime.getCtb();
-            ctb.computeAverages();
-            return ctb;
-        }
-        
-        return null;
-    }
-}
\ No newline at end of file
diff --git a/src/main/java/net/floodlightcontroller/perfmon/PerfMonToggleResource.java b/src/main/java/net/floodlightcontroller/perfmon/PerfMonToggleResource.java
deleted file mode 100644
index 9ea1876..0000000
--- a/src/main/java/net/floodlightcontroller/perfmon/PerfMonToggleResource.java
+++ /dev/null
@@ -1,28 +0,0 @@
-package net.floodlightcontroller.perfmon;
-
-import org.restlet.data.Status;
-import org.restlet.resource.Get;
-import org.restlet.resource.ServerResource;
-
-public class PerfMonToggleResource extends ServerResource {
-    
-    @Get("json")
-    public String retrieve() {
-        IPktInProcessingTimeService pktinProcTime = 
-                (IPktInProcessingTimeService)getContext().getAttributes().
-                    get(IPktInProcessingTimeService.class.getCanonicalName());
-        
-        String param = ((String)getRequestAttributes().get("perfmonstate")).toLowerCase();
-        if (param.equals("reset")) {
-            pktinProcTime.getCtb().reset();
-        } else {
-            if (param.equals("enable") || param.equals("true")) {
-                pktinProcTime.setEnabled(true);
-            } else if (param.equals("disable") || param.equals("false")) {
-                pktinProcTime.setEnabled(false);
-            }
-        }
-        setStatus(Status.SUCCESS_OK, "OK");
-        return "{ \"enabled\" : " + pktinProcTime.isEnabled() + " }";
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/perfmon/PerfWebRoutable.java b/src/main/java/net/floodlightcontroller/perfmon/PerfWebRoutable.java
deleted file mode 100644
index ace0bc8..0000000
--- a/src/main/java/net/floodlightcontroller/perfmon/PerfWebRoutable.java
+++ /dev/null
@@ -1,23 +0,0 @@
-package net.floodlightcontroller.perfmon;
-
-import org.restlet.Context;
-import org.restlet.Restlet;
-import org.restlet.routing.Router;
-
-import net.floodlightcontroller.restserver.RestletRoutable;
-
-public class PerfWebRoutable implements RestletRoutable {
-
-    @Override
-    public Restlet getRestlet(Context context) {
-        Router router = new Router(context);
-        router.attach("/data/json", PerfMonDataResource.class);
-        router.attach("/{perfmonstate}/json", PerfMonToggleResource.class); // enable, disable, or reset
-        return router;
-    }
-
-    @Override
-    public String basePath() {
-        return "/wm/performance";
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/perfmon/PktInProcessingTime.java b/src/main/java/net/floodlightcontroller/perfmon/PktInProcessingTime.java
deleted file mode 100644
index 639623b..0000000
--- a/src/main/java/net/floodlightcontroller/perfmon/PktInProcessingTime.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/**
- * Performance monitoring package
- */
-package net.floodlightcontroller.perfmon;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import net.floodlightcontroller.core.FloodlightContext;
-import net.floodlightcontroller.core.IOFMessageListener;
-import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-import net.floodlightcontroller.core.module.FloodlightModuleContext;
-import net.floodlightcontroller.core.module.FloodlightModuleException;
-import net.floodlightcontroller.core.module.IFloodlightModule;
-import net.floodlightcontroller.core.module.IFloodlightService;
-import net.floodlightcontroller.restserver.IRestApiService;
-
-import org.openflow.protocol.OFMessage;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class contains a set of buckets (called time buckets as the
- * primarily contain 'times' that are used in a circular way to 
- * store information on packet in processing time.
- * Each bucket is meant to store the various processing time 
- * related data for a fixed duration.
- * Buckets are reused to reduce garbage generation! Once the
- * last bucket is used up the LRU bucket is reused.
- * 
- * Naming convention for variable or constants
- * variable_s : value in seconds
- * variable_ms: value in milliseconds
- * variable_us: value in microseconds
- * variable_ns: value in nanoseconds
- * 
- * Key Constants:
- * ONE_BUCKET_DURATION_SECONDS_INT:  time duration of each bucket
- * BUCKET_SET_SIZE: Number of buckets
- * TOT_PROC_TIME_WARN_THRESHOLD_US: if processing time for a packet
- *    exceeds this threshold then a warning LOG message is generated
- * TOT_PROC_TIME_ALERT_THRESHOLD_US: same as above but an alert level
- *    syslog is generated instead
- * 
- */
-@LogMessageCategory("Performance Monitoring")
-public class PktInProcessingTime
-    implements IFloodlightModule, IPktInProcessingTimeService {
-
-    
-    // Our dependencies
-    private IRestApiService restApi;
-    
-    protected long ptWarningThresholdInNano;
-
-    // DB storage tables
-    protected static final String ControllerTableName = "controller_controller";
-    public static final String COLUMN_ID = "id";
-    public static final String COLUMN_PERF_MON = "performance_monitor_feature";
-    
-    protected static  Logger  logger = 
-        LoggerFactory.getLogger(PktInProcessingTime.class);
-    
-    protected boolean isEnabled = false;
-    protected boolean isInited = false;
-    // Maintains the time when the last packet was processed
-    protected long lastPktTime_ns;
-    private CumulativeTimeBucket ctb = null;
-
-    
-    /***
-     * BUCKET_SET_SIZE buckets each holding 10s of processing time data, a total
-     * of 30*10s = 5mins of processing time data is maintained
-     */
-    protected static final int ONE_BUCKET_DURATION_SECONDS = 10;// seconds
-    protected static final long ONE_BUCKET_DURATION_NANOSECONDS  =
-                                ONE_BUCKET_DURATION_SECONDS * 1000000000;
-    
-    @Override
-    public void bootstrap(List<IOFMessageListener> listeners) {
-        if (!isInited) {
-            ctb = new CumulativeTimeBucket(listeners);
-            isInited = true;
-        }
-    }
-    
-    @Override
-    public boolean isEnabled() {
-        return isEnabled && isInited;
-    }
-    
-    @Override
-    public void setEnabled(boolean enabled) {
-        this.isEnabled = enabled;
-        logger.debug("Setting module to " + isEnabled);
-    }
-    
-    @Override
-    public CumulativeTimeBucket getCtb() {
-        return ctb;
-    }
-    
-    private long startTimePktNs;
-    private long startTimeCompNs;
-    @Override
-    public void recordStartTimeComp(IOFMessageListener listener) {
-        if (isEnabled()) {
-            startTimeCompNs = System.nanoTime();
-        }
-    }
-    
-    @Override
-    public void recordEndTimeComp(IOFMessageListener listener) {
-        if (isEnabled()) {
-            long procTime = System.nanoTime() - startTimeCompNs;
-            ctb.updateOneComponent(listener, procTime);
-        }
-    }
-    
-    @Override
-    public void recordStartTimePktIn() {
-        if (isEnabled()) {
-            startTimePktNs = System.nanoTime();
-        }
-    }
-    
-    @Override
-    @LogMessageDoc(level="WARN",
-            message="Time to process packet-in exceeded threshold: {}",
-            explanation="Time to process packet-in exceeded the configured " +
-            		"performance threshold",
-            recommendation=LogMessageDoc.CHECK_CONTROLLER)
-    public void recordEndTimePktIn(IOFSwitch sw, OFMessage m, FloodlightContext cntx) {
-        if (isEnabled()) {
-            long procTimeNs = System.nanoTime() - startTimePktNs;
-            ctb.updatePerPacketCounters(procTimeNs);
-            
-            if (ptWarningThresholdInNano > 0 && 
-                    procTimeNs > ptWarningThresholdInNano) {
-                logger.warn("Time to process packet-in exceeded threshold: {}", 
-                            procTimeNs/1000);
-            }
-        }
-    }
-    
-    // IFloodlightModule methods
-    
-    @Override
-    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
-        Collection<Class<? extends IFloodlightService>> l = 
-                new ArrayList<Class<? extends IFloodlightService>>();
-        l.add(IPktInProcessingTimeService.class);
-        return l;
-    }
-    
-    @Override
-    public Map<Class<? extends IFloodlightService>, IFloodlightService>
-            getServiceImpls() {
-        Map<Class<? extends IFloodlightService>,
-        IFloodlightService> m = 
-            new HashMap<Class<? extends IFloodlightService>,
-                        IFloodlightService>();
-        // We are the class that implements the service
-        m.put(IPktInProcessingTimeService.class, this);
-        return m;
-    }
-    
-    @Override
-    public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
-        Collection<Class<? extends IFloodlightService>> l = 
-                new ArrayList<Class<? extends IFloodlightService>>();
-        l.add(IRestApiService.class);
-        return l;
-    }
-    
-    @Override
-    public void init(FloodlightModuleContext context)
-                                             throws FloodlightModuleException {
-        restApi = context.getServiceImpl(IRestApiService.class);
-    }
-    
-    @Override
-    @LogMessageDoc(level="INFO",
-        message="Packet processing time threshold for warning" +
-            " set to {time} ms.",
-        explanation="Performance monitoring will log a warning if " +
-    		"packet processing time exceeds the configured threshold")
-    public void startUp(FloodlightModuleContext context) {
-        // Add our REST API
-        restApi.addRestletRoutable(new PerfWebRoutable());
-        
-        // TODO - Alex - change this to a config option
-        ptWarningThresholdInNano = Long.parseLong(System.getProperty(
-             "net.floodlightcontroller.core.PTWarningThresholdInMilli", "0")) * 1000000;
-        if (ptWarningThresholdInNano > 0) {
-            logger.info("Packet processing time threshold for warning" +
-            		" set to {} ms.", ptWarningThresholdInNano/1000000);
-        }
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/routing/ForwardingBase.java b/src/main/java/net/floodlightcontroller/routing/ForwardingBase.java
index 348a7af..c71ff87 100644
--- a/src/main/java/net/floodlightcontroller/routing/ForwardingBase.java
+++ b/src/main/java/net/floodlightcontroller/routing/ForwardingBase.java
@@ -18,9 +18,9 @@
 package net.floodlightcontroller.routing;
 
 import java.io.IOException;
-import java.util.EnumSet;
 import java.util.ArrayList;
 import java.util.Comparator;
+import java.util.EnumSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
@@ -33,16 +33,12 @@
 import net.floodlightcontroller.core.annotations.LogMessageDoc;
 import net.floodlightcontroller.core.annotations.LogMessageDocs;
 import net.floodlightcontroller.core.util.AppCookie;
-import net.floodlightcontroller.counter.ICounterStoreService;
 import net.floodlightcontroller.devicemanager.IDevice;
 import net.floodlightcontroller.devicemanager.IDeviceListener;
 import net.floodlightcontroller.devicemanager.IDeviceService;
 import net.floodlightcontroller.devicemanager.SwitchPort;
 import net.floodlightcontroller.packet.Ethernet;
 import net.floodlightcontroller.packet.IPacket;
-import net.floodlightcontroller.routing.IRoutingService;
-import net.floodlightcontroller.routing.IRoutingDecision;
-import net.floodlightcontroller.routing.Route;
 import net.floodlightcontroller.topology.ITopologyService;
 import net.floodlightcontroller.topology.NodePortTuple;
 import net.floodlightcontroller.util.OFMessageDamper;
@@ -81,7 +77,6 @@
     protected IDeviceService deviceManager;
     protected IRoutingService routingEngine;
     protected ITopologyService topology;
-    protected ICounterStoreService counterStore;
     
     protected OFMessageDamper messageDamper;
     
@@ -265,7 +260,6 @@
             ((OFActionOutput)fm.getActions().get(0)).setPort(outPort);
 
             try {
-                counterStore.updatePktOutFMCounterStore(sw, fm);
                 if (log.isTraceEnabled()) {
                     log.trace("Pushing Route flowmod routeIndx={} " + 
                             "sw={} inPort={} outPort={}",
@@ -383,7 +377,6 @@
         po.setLength(poLength);
 
         try {
-            counterStore.updatePktOutFMCounterStore(sw, po);
             messageDamper.write(sw, po, cntx, flush);
         } catch (IOException e) {
             log.error("Failure writing packet out", e);
@@ -466,7 +459,6 @@
         po.setLength(poLength);
 
         try {
-            counterStore.updatePktOutFMCounterStore(sw, po);
             messageDamper.write(sw, po, cntx);
         } catch (IOException e) {
             log.error("Failure writing packet out", e);
@@ -518,7 +510,6 @@
         po.setLength(poLength);
 
         try {
-            counterStore.updatePktOutFMCounterStore(sw, po);
             if (log.isTraceEnabled()) {
                 log.trace("write broadcast packet on switch-id={} " + 
                         "interfaces={} packet-out={}",
diff --git a/src/main/java/net/floodlightcontroller/staticflowentry/IStaticFlowEntryPusherService.java b/src/main/java/net/floodlightcontroller/staticflowentry/IStaticFlowEntryPusherService.java
deleted file mode 100644
index 66e02dd..0000000
--- a/src/main/java/net/floodlightcontroller/staticflowentry/IStaticFlowEntryPusherService.java
+++ /dev/null
@@ -1,44 +0,0 @@
-package net.floodlightcontroller.staticflowentry;
-
-import java.util.Map;
-
-import org.openflow.protocol.OFFlowMod;
-
-import net.floodlightcontroller.core.module.IFloodlightService;
-
-public interface IStaticFlowEntryPusherService extends IFloodlightService {
-    /**
-     * Adds a static flow.
-     * @param name Name of the flow mod. Must be unique.
-     * @param fm The flow to push.
-     * @param swDpid The switch DPID to push it to, in 00:00:00:00:00:00:00:01 notation.
-     */
-    public void addFlow(String name, OFFlowMod fm, String swDpid);
-    
-    /**
-     * Deletes a static flow
-     * @param name The name of the static flow to delete.
-     */
-    public void deleteFlow(String name);
-    
-    /**
-     * Deletes all static flows for a practicular switch
-     * @param dpid The DPID of the switch to delete flows for.
-     */
-    public void deleteFlowsForSwitch(long dpid);
-    
-    /**
-     * Deletes all flows.
-     */
-    public void deleteAllFlows();
-    
-    /**
-     * Gets all list of all flows
-     */
-    public Map<String, Map<String, OFFlowMod>> getFlows();
-    
-    /**
-     * Gets a list of flows by switch
-     */
-    public Map<String, OFFlowMod> getFlows(String dpid);
-}
diff --git a/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntries.java b/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntries.java
deleted file mode 100644
index e733843..0000000
--- a/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntries.java
+++ /dev/null
@@ -1,831 +0,0 @@
-package net.floodlightcontroller.staticflowentry;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-import net.floodlightcontroller.core.util.AppCookie;
-import net.floodlightcontroller.packet.IPv4;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-import org.codehaus.jackson.JsonParseException;
-import org.codehaus.jackson.JsonParser;
-import org.codehaus.jackson.JsonToken;
-import org.codehaus.jackson.map.MappingJsonFactory;
-import org.openflow.protocol.OFFlowMod;
-import org.openflow.protocol.OFMatch;
-import org.openflow.protocol.OFPacketOut;
-import org.openflow.protocol.OFPort;
-import org.openflow.protocol.action.OFAction;
-import org.openflow.protocol.action.OFActionDataLayerDestination;
-import org.openflow.protocol.action.OFActionDataLayerSource;
-import org.openflow.protocol.action.OFActionEnqueue;
-import org.openflow.protocol.action.OFActionNetworkLayerDestination;
-import org.openflow.protocol.action.OFActionNetworkLayerSource;
-import org.openflow.protocol.action.OFActionNetworkTypeOfService;
-import org.openflow.protocol.action.OFActionOutput;
-import org.openflow.protocol.action.OFActionStripVirtualLan;
-import org.openflow.protocol.action.OFActionTransportLayerDestination;
-import org.openflow.protocol.action.OFActionTransportLayerSource;
-import org.openflow.protocol.action.OFActionVirtualLanIdentifier;
-import org.openflow.protocol.action.OFActionVirtualLanPriorityCodePoint;
-import org.openflow.util.HexString;
-
-/**
- * Represents static flow entries to be maintained by the controller on the 
- * switches. 
- */
-@LogMessageCategory("Static Flow Pusher")
-public class StaticFlowEntries {
-    protected final static Logger log = LoggerFactory.getLogger(StaticFlowEntries.class);
-    
-    private static class SubActionStruct {
-        OFAction action;
-        int      len;
-    }
-    
-    private static byte[] zeroMac = new byte[] {0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
-    
-    /**
-     * This function generates a random hash for the bottom half of the cookie
-     * 
-     * @param fm
-     * @param userCookie
-     * @param name
-     * @return A cookie that encodes the application ID and a hash
-     */
-    public static long computeEntryCookie(OFFlowMod fm, int userCookie, String name) {
-        // flow-specific hash is next 20 bits LOOK! who knows if this 
-        int prime = 211;
-        int flowHash = 2311;
-        for (int i=0; i < name.length(); i++)
-            flowHash = flowHash * prime + (int)name.charAt(i);
-        
-        return AppCookie.makeCookie(StaticFlowEntryPusher.STATIC_FLOW_APP_ID, flowHash);
-    }
-    
-    /**
-     * Sets defaults for an OFFlowMod
-     * @param fm The OFFlowMod to set defaults for
-     * @param entryName The name of the entry. Used to compute the cookie.
-     */
-    public static void initDefaultFlowMod(OFFlowMod fm, String entryName) {
-        fm.setIdleTimeout((short) 0);   // infinite
-        fm.setHardTimeout((short) 0);   // infinite
-        fm.setBufferId(OFPacketOut.BUFFER_ID_NONE);
-        fm.setCommand((short) 0);
-        fm.setFlags((short) 0);
-        fm.setOutPort(OFPort.OFPP_NONE.getValue());
-        fm.setCookie(computeEntryCookie(fm, 0, entryName));  
-        fm.setPriority(Short.MAX_VALUE);
-    }
-    
-    /**
-     * Gets the entry name of a flow mod
-     * @param fmJson The OFFlowMod in a JSON representation
-     * @return The name of the OFFlowMod, null if not found
-     * @throws IOException If there was an error parsing the JSON
-     */
-    public static String getEntryNameFromJson(String fmJson) throws IOException{
-        MappingJsonFactory f = new MappingJsonFactory();
-        JsonParser jp;
-        
-        try {
-            jp = f.createJsonParser(fmJson);
-        } catch (JsonParseException e) {
-            throw new IOException(e);
-        }
-        
-        jp.nextToken();
-        if (jp.getCurrentToken() != JsonToken.START_OBJECT) {
-            throw new IOException("Expected START_OBJECT");
-        }
-        
-        while (jp.nextToken() != JsonToken.END_OBJECT) {
-            if (jp.getCurrentToken() != JsonToken.FIELD_NAME) {
-                throw new IOException("Expected FIELD_NAME");
-            }
-            
-            String n = jp.getCurrentName();
-            jp.nextToken();
-            if (jp.getText().equals("")) 
-                continue;
-            
-            if (n == "name")
-                return jp.getText();
-        }
-        
-        return null;
-    }
-    
-    /**
-     * Parses an OFFlowMod (and it's inner OFMatch) to the storage entry format.
-     * @param fm The FlowMod to parse
-     * @param sw The switch the FlowMod is going to be installed on
-     * @param name The name of this static flow entry
-     * @return A Map representation of the storage entry 
-     */
-    public static Map<String, Object> flowModToStorageEntry(OFFlowMod fm, String sw, String name) {
-        Map<String, Object> entry = new HashMap<String, Object>();
-        OFMatch match = fm.getMatch();
-        entry.put(StaticFlowEntryPusher.COLUMN_NAME, name);
-        entry.put(StaticFlowEntryPusher.COLUMN_SWITCH, sw);
-        entry.put(StaticFlowEntryPusher.COLUMN_ACTIVE, Boolean.toString(true));
-        entry.put(StaticFlowEntryPusher.COLUMN_PRIORITY, Short.toString(fm.getPriority()));
-        entry.put(StaticFlowEntryPusher.COLUMN_WILDCARD, Integer.toString(match.getWildcards()));
-        
-        if ((fm.getActions() != null) && (fm.getActions().size() > 0))
-        	entry.put(StaticFlowEntryPusher.COLUMN_ACTIONS, StaticFlowEntries.flowModActionsToString(fm.getActions()));
-        
-        if (match.getInputPort() != 0)
-        	entry.put(StaticFlowEntryPusher.COLUMN_IN_PORT, Short.toString(match.getInputPort()));
-        
-        if (!Arrays.equals(match.getDataLayerSource(), zeroMac))
-        	entry.put(StaticFlowEntryPusher.COLUMN_DL_SRC, HexString.toHexString(match.getDataLayerSource()));
-
-        if (!Arrays.equals(match.getDataLayerDestination(), zeroMac))
-        	entry.put(StaticFlowEntryPusher.COLUMN_DL_DST, HexString.toHexString(match.getDataLayerDestination()));
-        
-        if (match.getDataLayerVirtualLan() != -1)
-        	entry.put(StaticFlowEntryPusher.COLUMN_DL_VLAN, Short.toString(match.getDataLayerVirtualLan()));
-        
-        if (match.getDataLayerVirtualLanPriorityCodePoint() != 0)
-        	entry.put(StaticFlowEntryPusher.COLUMN_DL_VLAN_PCP, Short.toString(match.getDataLayerVirtualLanPriorityCodePoint()));
-        
-        if (match.getDataLayerType() != 0)
-        	entry.put(StaticFlowEntryPusher.COLUMN_DL_TYPE, Short.toString(match.getDataLayerType()));
-        
-        if (match.getNetworkTypeOfService() != 0)
-        	entry.put(StaticFlowEntryPusher.COLUMN_NW_TOS, Short.toString(match.getNetworkTypeOfService()));
-        
-        if (match.getNetworkProtocol() != 0)
-        	entry.put(StaticFlowEntryPusher.COLUMN_NW_PROTO, Short.toString(match.getNetworkProtocol()));
-        
-        if (match.getNetworkSource() != 0)
-        	entry.put(StaticFlowEntryPusher.COLUMN_NW_SRC, IPv4.fromIPv4Address(match.getNetworkSource()));
-        
-        if (match.getNetworkDestination() != 0)
-        	entry.put(StaticFlowEntryPusher.COLUMN_NW_DST, IPv4.fromIPv4Address(match.getNetworkDestination()));
-        
-        if (match.getTransportSource() != 0)
-        	entry.put(StaticFlowEntryPusher.COLUMN_TP_SRC, Short.toString(match.getTransportSource()));
-        
-        if (match.getTransportDestination() != 0)
-        	entry.put(StaticFlowEntryPusher.COLUMN_TP_DST, Short.toString(match.getTransportDestination()));
-        
-        return entry;
-    }
-    
-    /**
-     * Returns a String representation of all the openflow actions.
-     * @param fmActions A list of OFActions to encode into one string
-     * @return A string of the actions encoded for our database
-     */
-    @LogMessageDoc(level="ERROR",
-            message="Could not decode action {action}",
-            explanation="A static flow entry contained an invalid action",
-            recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
-    private static String flowModActionsToString(List<OFAction> fmActions) {
-        StringBuilder sb = new StringBuilder();
-        for (OFAction a : fmActions) {
-            if (sb.length() > 0) {
-                sb.append(',');
-            }
-            switch(a.getType()) {
-                case OUTPUT:
-                    sb.append("output=" + Short.toString(((OFActionOutput)a).getPort()));
-                    break;
-                case OPAQUE_ENQUEUE:
-                    int queue = ((OFActionEnqueue)a).getQueueId();
-                    short port = ((OFActionEnqueue)a).getPort();
-                    sb.append("enqueue=" + Short.toString(port) + ":0x" + String.format("%02x", queue));
-                    break;
-                case STRIP_VLAN:
-                    sb.append("strip-vlan");
-                    break;
-                case SET_VLAN_ID:
-                    sb.append("set-vlan-id=" + 
-                        Short.toString(((OFActionVirtualLanIdentifier)a).getVirtualLanIdentifier()));
-                    break;
-                case SET_VLAN_PCP:
-                    sb.append("set-vlan-priority=" +
-                        Byte.toString(((OFActionVirtualLanPriorityCodePoint)a).getVirtualLanPriorityCodePoint()));
-                    break;
-                case SET_DL_SRC:
-                    sb.append("set-src-mac=" + 
-                        HexString.toHexString(((OFActionDataLayerSource)a).getDataLayerAddress()));
-                    break;
-                case SET_DL_DST:
-                    sb.append("set-dst-mac=" + 
-                        HexString.toHexString(((OFActionDataLayerDestination)a).getDataLayerAddress()));
-                    break;
-                case SET_NW_TOS:
-                    sb.append("set-tos-bits=" +
-                        Byte.toString(((OFActionNetworkTypeOfService)a).getNetworkTypeOfService()));
-                    break;
-                case SET_NW_SRC:
-                    sb.append("set-src-ip=" +
-                        IPv4.fromIPv4Address(((OFActionNetworkLayerSource)a).getNetworkAddress()));
-                    break;
-                case SET_NW_DST:
-                    sb.append("set-dst-ip=" +
-                        IPv4.fromIPv4Address(((OFActionNetworkLayerDestination)a).getNetworkAddress()));
-                    break;
-                case SET_TP_SRC:
-                    sb.append("set-src-port=" +
-                        Short.toString(((OFActionTransportLayerSource)a).getTransportPort()));
-                    break;
-                case SET_TP_DST:
-                    sb.append("set-dst-port=" +
-                        Short.toString(((OFActionTransportLayerDestination)a).getTransportPort()));
-                    break;
-                default:
-                    log.error("Could not decode action: {}", a);
-                    break;
-            }
-                
-        }
-        return sb.toString();
-    }
-    
-    /**
-     * Turns a JSON formatted Static Flow Pusher string into a storage entry
-     * Expects a string in JSON along the lines of:
-     *        {
-     *            "switch":       "AA:BB:CC:DD:EE:FF:00:11",
-     *            "name":         "flow-mod-1",
-     *            "cookie":       "0",
-     *            "priority":     "32768",
-     *            "ingress-port": "1",
-     *            "actions":      "output=2",
-     *        }
-     * @param fmJson The JSON formatted static flow pusher entry
-     * @return The map of the storage entry
-     * @throws IOException If there was an error parsing the JSON
-     */
-    public static Map<String, Object> jsonToStorageEntry(String fmJson) throws IOException {
-        Map<String, Object> entry = new HashMap<String, Object>();
-        MappingJsonFactory f = new MappingJsonFactory();
-        JsonParser jp;
-        
-        try {
-            jp = f.createJsonParser(fmJson);
-        } catch (JsonParseException e) {
-            throw new IOException(e);
-        }
-        
-        jp.nextToken();
-        if (jp.getCurrentToken() != JsonToken.START_OBJECT) {
-            throw new IOException("Expected START_OBJECT");
-        }
-        
-        while (jp.nextToken() != JsonToken.END_OBJECT) {
-            if (jp.getCurrentToken() != JsonToken.FIELD_NAME) {
-                throw new IOException("Expected FIELD_NAME");
-            }
-            
-            String n = jp.getCurrentName();
-            jp.nextToken();
-            if (jp.getText().equals("")) 
-                continue;
-            
-            if (n == "name")
-                entry.put(StaticFlowEntryPusher.COLUMN_NAME, jp.getText());
-            else if (n == "switch")
-                entry.put(StaticFlowEntryPusher.COLUMN_SWITCH, jp.getText());
-            else if (n == "actions")
-                entry.put(StaticFlowEntryPusher.COLUMN_ACTIONS, jp.getText());
-            else if (n == "priority")
-                entry.put(StaticFlowEntryPusher.COLUMN_PRIORITY, jp.getText());
-            else if (n == "active")
-                entry.put(StaticFlowEntryPusher.COLUMN_ACTIVE, jp.getText());
-            else if (n == "wildcards")
-                entry.put(StaticFlowEntryPusher.COLUMN_WILDCARD, jp.getText());
-            else if (n == "ingress-port")
-                entry.put(StaticFlowEntryPusher.COLUMN_IN_PORT, jp.getText());
-            else if (n == "src-mac")
-                entry.put(StaticFlowEntryPusher.COLUMN_DL_SRC, jp.getText());
-            else if (n == "dst-mac")
-                entry.put(StaticFlowEntryPusher.COLUMN_DL_DST, jp.getText());
-            else if (n == "vlan-id")
-                entry.put(StaticFlowEntryPusher.COLUMN_DL_VLAN, jp.getText());
-            else if (n == "vlan-priority")
-                entry.put(StaticFlowEntryPusher.COLUMN_DL_VLAN_PCP, jp.getText());
-            else if (n == "ether-type")
-                entry.put(StaticFlowEntryPusher.COLUMN_DL_TYPE, jp.getText());
-            else if (n == "tos-bits")
-                entry.put(StaticFlowEntryPusher.COLUMN_NW_TOS, jp.getText());
-            else if (n == "protocol")
-                entry.put(StaticFlowEntryPusher.COLUMN_NW_PROTO, jp.getText());
-            else if (n == "src-ip")
-                entry.put(StaticFlowEntryPusher.COLUMN_NW_SRC, jp.getText());
-            else if (n == "dst-ip")
-                entry.put(StaticFlowEntryPusher.COLUMN_NW_DST, jp.getText());
-            else if (n == "src-port")
-                entry.put(StaticFlowEntryPusher.COLUMN_TP_SRC, jp.getText());
-            else if (n == "dst-port")
-                entry.put(StaticFlowEntryPusher.COLUMN_TP_DST, jp.getText());
-        }
-        
-        return entry;
-    }
-    
-    /**
-     * Parses OFFlowMod actions from strings.
-     * @param flowMod The OFFlowMod to set the actions for
-     * @param actionstr The string containing all the actions
-     * @param log A logger to log for errors.
-     */
-    @LogMessageDoc(level="ERROR",
-            message="Unexpected action '{action}', '{subaction}'",
-            explanation="A static flow entry contained an invalid action",
-            recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
-    public static void parseActionString(OFFlowMod flowMod, String actionstr, Logger log) {
-        List<OFAction> actions = new LinkedList<OFAction>();
-        int actionsLength = 0;
-        if (actionstr != null) {
-            actionstr = actionstr.toLowerCase();
-            for (String subaction : actionstr.split(",")) {
-                String action = subaction.split("[=:]")[0];
-                SubActionStruct subaction_struct = null;
-                
-                if (action.equals("output")) {
-                    subaction_struct = StaticFlowEntries.decode_output(subaction, log);
-                }
-                else if (action.equals("enqueue")) {
-                    subaction_struct = decode_enqueue(subaction, log);
-                }
-                else if (action.equals("strip-vlan")) {
-                    subaction_struct = decode_strip_vlan(subaction, log);
-                }
-                else if (action.equals("set-vlan-id")) {
-                    subaction_struct = decode_set_vlan_id(subaction, log);
-                }
-                else if (action.equals("set-vlan-priority")) {
-                    subaction_struct = decode_set_vlan_priority(subaction, log);
-                }
-                else if (action.equals("set-src-mac")) {
-                    subaction_struct = decode_set_src_mac(subaction, log);
-                }
-                else if (action.equals("set-dst-mac")) {
-                    subaction_struct = decode_set_dst_mac(subaction, log);
-                }
-                else if (action.equals("set-tos-bits")) {
-                    subaction_struct = decode_set_tos_bits(subaction, log);
-                }
-                else if (action.equals("set-src-ip")) {
-                    subaction_struct = decode_set_src_ip(subaction, log);
-                }
-                else if (action.equals("set-dst-ip")) {
-                    subaction_struct = decode_set_dst_ip(subaction, log);
-                }
-                else if (action.equals("set-src-port")) {
-                    subaction_struct = decode_set_src_port(subaction, log);
-                }
-                else if (action.equals("set-dst-port")) {
-                    subaction_struct = decode_set_dst_port(subaction, log);
-                }
-                else {
-                    log.error("Unexpected action '{}', '{}'", action, subaction);
-                }
-                
-                if (subaction_struct != null) {
-                    actions.add(subaction_struct.action);
-                    actionsLength += subaction_struct.len;
-                }
-            }
-        }
-        log.debug("action {}", actions);
-        
-        flowMod.setActions(actions);
-        flowMod.setLengthU(OFFlowMod.MINIMUM_LENGTH + actionsLength);
-    } 
-    
-    @LogMessageDoc(level="ERROR",
-            message="Invalid subaction: '{subaction}'",
-            explanation="A static flow entry contained an invalid subaction",
-            recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
-    private static SubActionStruct decode_output(String subaction, Logger log) {
-        SubActionStruct sa = null;
-        Matcher n;
-        
-        n = Pattern.compile("output=(?:((?:0x)?\\d+)|(all)|(controller)|(local)|(ingress-port)|(normal)|(flood))").matcher(subaction);
-        if (n.matches()) {
-            OFActionOutput action = new OFActionOutput();
-            action.setMaxLength((short) Short.MAX_VALUE);
-            short port = OFPort.OFPP_NONE.getValue();
-            if (n.group(1) != null) {
-                try {
-                    port = get_short(n.group(1));
-                }
-                catch (NumberFormatException e) {
-                    log.debug("Invalid port in: '{}' (error ignored)", subaction);
-                    return null;
-                }
-            }
-            else if (n.group(2) != null)
-                port = OFPort.OFPP_ALL.getValue();
-            else if (n.group(3) != null)
-                port = OFPort.OFPP_CONTROLLER.getValue();
-            else if (n.group(4) != null)
-                port = OFPort.OFPP_LOCAL.getValue();
-            else if (n.group(5) != null)
-                port = OFPort.OFPP_IN_PORT.getValue();
-            else if (n.group(6) != null)
-                port = OFPort.OFPP_NORMAL.getValue();
-            else if (n.group(7) != null)
-                port = OFPort.OFPP_FLOOD.getValue();
-            action.setPort(port);
-            log.debug("action {}", action);
-            
-            sa = new SubActionStruct();
-            sa.action = action;
-            sa.len = OFActionOutput.MINIMUM_LENGTH;
-        }
-        else {
-            log.error("Invalid subaction: '{}'", subaction);
-            return null;
-        }
-        
-        return sa;
-    }
-    
-    private static SubActionStruct decode_enqueue(String subaction, Logger log) {
-        SubActionStruct sa = null;
-        Matcher n;
-        
-        n = Pattern.compile("enqueue=(?:((?:0x)?\\d+)\\:((?:0x)?\\d+))").matcher(subaction);
-        if (n.matches()) {
-            short portnum = 0;
-            if (n.group(1) != null) {
-                try {
-                    portnum = get_short(n.group(1));
-                }
-                catch (NumberFormatException e) {
-                    log.debug("Invalid port-num in: '{}' (error ignored)", subaction);
-                    return null;
-                }
-            }
-
-            int queueid = 0;
-            if (n.group(2) != null) {
-                try {
-                    queueid = get_int(n.group(2));
-                }
-                catch (NumberFormatException e) {
-                    log.debug("Invalid queue-id in: '{}' (error ignored)", subaction);
-                    return null;
-               }
-            }
-            
-            OFActionEnqueue action = new OFActionEnqueue();
-            action.setPort(portnum);
-            action.setQueueId(queueid);
-            log.debug("action {}", action);
-            
-            sa = new SubActionStruct();
-            sa.action = action;
-            sa.len = OFActionEnqueue.MINIMUM_LENGTH;
-        }
-        else {
-            log.debug("Invalid action: '{}'", subaction);
-            return null;
-        }
-        
-        return sa;
-    }
-    
-    private static SubActionStruct decode_strip_vlan(String subaction, Logger log) {
-        SubActionStruct sa = null;
-        Matcher n = Pattern.compile("strip-vlan").matcher(subaction);
-        
-        if (n.matches()) {
-            OFActionStripVirtualLan action = new OFActionStripVirtualLan();
-            log.debug("action {}", action);
-            
-            sa = new SubActionStruct();
-            sa.action = action;
-            sa.len = OFActionStripVirtualLan.MINIMUM_LENGTH;
-        }
-        else {
-            log.debug("Invalid action: '{}'", subaction);
-            return null;
-        }
-
-        return sa;
-    }
-    
-    private static SubActionStruct decode_set_vlan_id(String subaction, Logger log) {
-        SubActionStruct sa = null;
-        Matcher n = Pattern.compile("set-vlan-id=((?:0x)?\\d+)").matcher(subaction);
-        
-        if (n.matches()) {            
-            if (n.group(1) != null) {
-                try {
-                    short vlanid = get_short(n.group(1));
-                    OFActionVirtualLanIdentifier action = new OFActionVirtualLanIdentifier();
-                    action.setVirtualLanIdentifier(vlanid);
-                    log.debug("  action {}", action);
-
-                    sa = new SubActionStruct();
-                    sa.action = action;
-                    sa.len = OFActionVirtualLanIdentifier.MINIMUM_LENGTH;
-                }
-                catch (NumberFormatException e) {
-                    log.debug("Invalid VLAN in: {} (error ignored)", subaction);
-                    return null;
-                }
-            }          
-        }
-        else {
-            log.debug("Invalid action: '{}'", subaction);
-            return null;
-        }
-
-        return sa;
-    }
-    
-    private static SubActionStruct decode_set_vlan_priority(String subaction, Logger log) {
-        SubActionStruct sa = null;
-        Matcher n = Pattern.compile("set-vlan-priority=((?:0x)?\\d+)").matcher(subaction); 
-        
-        if (n.matches()) {            
-            if (n.group(1) != null) {
-                try {
-                    byte prior = get_byte(n.group(1));
-                    OFActionVirtualLanPriorityCodePoint action = new OFActionVirtualLanPriorityCodePoint();
-                    action.setVirtualLanPriorityCodePoint(prior);
-                    log.debug("  action {}", action);
-                    
-                    sa = new SubActionStruct();
-                    sa.action = action;
-                    sa.len = OFActionVirtualLanPriorityCodePoint.MINIMUM_LENGTH;
-                }
-                catch (NumberFormatException e) {
-                    log.debug("Invalid VLAN priority in: {} (error ignored)", subaction);
-                    return null;
-                }
-            }
-        }
-        else {
-            log.debug("Invalid action: '{}'", subaction);
-            return null;
-        }
-
-        return sa;
-    }
-    
-    private static SubActionStruct decode_set_src_mac(String subaction, Logger log) {
-        SubActionStruct sa = null;
-        Matcher n = Pattern.compile("set-src-mac=(?:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+))").matcher(subaction); 
-
-        if (n.matches()) {
-            byte[] macaddr = get_mac_addr(n, subaction, log);
-            if (macaddr != null) {
-                OFActionDataLayerSource action = new OFActionDataLayerSource();
-                action.setDataLayerAddress(macaddr);
-                log.debug("action {}", action);
-
-                sa = new SubActionStruct();
-                sa.action = action;
-                sa.len = OFActionDataLayerSource.MINIMUM_LENGTH;
-            }            
-        }
-        else {
-            log.debug("Invalid action: '{}'", subaction);
-            return null;
-        }
-
-        return sa;
-    }
-
-    private static SubActionStruct decode_set_dst_mac(String subaction, Logger log) {
-        SubActionStruct sa = null;
-        Matcher n = Pattern.compile("set-dst-mac=(?:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+))").matcher(subaction);
-        
-        if (n.matches()) {
-            byte[] macaddr = get_mac_addr(n, subaction, log);            
-            if (macaddr != null) {
-                OFActionDataLayerDestination action = new OFActionDataLayerDestination();
-                action.setDataLayerAddress(macaddr);
-                log.debug("  action {}", action);
-                
-                sa = new SubActionStruct();
-                sa.action = action;
-                sa.len = OFActionDataLayerDestination.MINIMUM_LENGTH;
-            }
-        }
-        else {
-            log.debug("Invalid action: '{}'", subaction);
-            return null;
-        }
-
-        return sa;
-    }
-    
-    private static SubActionStruct decode_set_tos_bits(String subaction, Logger log) {
-        SubActionStruct sa = null;
-        Matcher n = Pattern.compile("set-tos-bits=((?:0x)?\\d+)").matcher(subaction); 
-
-        if (n.matches()) {
-            if (n.group(1) != null) {
-                try {
-                    byte tosbits = get_byte(n.group(1));
-                    OFActionNetworkTypeOfService action = new OFActionNetworkTypeOfService();
-                    action.setNetworkTypeOfService(tosbits);
-                    log.debug("  action {}", action);
-                    
-                    sa = new SubActionStruct();
-                    sa.action = action;
-                    sa.len = OFActionNetworkTypeOfService.MINIMUM_LENGTH;
-                }
-                catch (NumberFormatException e) {
-                    log.debug("Invalid dst-port in: {} (error ignored)", subaction);
-                    return null;
-                }
-            }
-        }
-        else {
-            log.debug("Invalid action: '{}'", subaction);
-            return null;
-        }
-
-        return sa;
-    }
-    
-    private static SubActionStruct decode_set_src_ip(String subaction, Logger log) {
-        SubActionStruct sa = null;
-        Matcher n = Pattern.compile("set-src-ip=(?:(\\d+)\\.(\\d+)\\.(\\d+)\\.(\\d+))").matcher(subaction);
-
-        if (n.matches()) {
-            int ipaddr = get_ip_addr(n, subaction, log);
-            OFActionNetworkLayerSource action = new OFActionNetworkLayerSource();
-            action.setNetworkAddress(ipaddr);
-            log.debug("  action {}", action);
-
-            sa = new SubActionStruct();
-            sa.action = action;
-            sa.len = OFActionNetworkLayerSource.MINIMUM_LENGTH;
-        }
-        else {
-            log.debug("Invalid action: '{}'", subaction);
-            return null;
-        }
-
-        return sa;
-    }
-
-    private static SubActionStruct decode_set_dst_ip(String subaction, Logger log) {
-        SubActionStruct sa = null;
-        Matcher n = Pattern.compile("set-dst-ip=(?:(\\d+)\\.(\\d+)\\.(\\d+)\\.(\\d+))").matcher(subaction);
-
-        if (n.matches()) {
-            int ipaddr = get_ip_addr(n, subaction, log);
-            OFActionNetworkLayerDestination action = new OFActionNetworkLayerDestination();
-            action.setNetworkAddress(ipaddr);
-            log.debug("action {}", action);
- 
-            sa = new SubActionStruct();
-            sa.action = action;
-            sa.len = OFActionNetworkLayerDestination.MINIMUM_LENGTH;
-        }
-        else {
-            log.debug("Invalid action: '{}'", subaction);
-            return null;
-        }
-
-        return sa;
-    }
-
-    private static SubActionStruct decode_set_src_port(String subaction, Logger log) {
-        SubActionStruct sa = null;
-        Matcher n = Pattern.compile("set-src-port=((?:0x)?\\d+)").matcher(subaction); 
-
-        if (n.matches()) {
-            if (n.group(1) != null) {
-                try {
-                    short portnum = get_short(n.group(1));
-                    OFActionTransportLayerSource action = new OFActionTransportLayerSource();
-                    action.setTransportPort(portnum);
-                    log.debug("action {}", action);
-                    
-                    sa = new SubActionStruct();
-                    sa.action = action;
-                    sa.len = OFActionTransportLayerSource.MINIMUM_LENGTH;;
-                }
-                catch (NumberFormatException e) {
-                    log.debug("Invalid src-port in: {} (error ignored)", subaction);
-                    return null;
-                }
-            }
-        }
-        else {
-            log.debug("Invalid action: '{}'", subaction);
-            return null;
-        }
-
-        return sa;
-    }
-
-    private static SubActionStruct decode_set_dst_port(String subaction, Logger log) {
-        SubActionStruct sa = null;
-        Matcher n = Pattern.compile("set-dst-port=((?:0x)?\\d+)").matcher(subaction);
-
-        if (n.matches()) {
-            if (n.group(1) != null) {
-                try {
-                    short portnum = get_short(n.group(1));
-                    OFActionTransportLayerDestination action = new OFActionTransportLayerDestination();
-                    action.setTransportPort(portnum);
-                    log.debug("action {}", action);
-                    
-                    sa = new SubActionStruct();
-                    sa.action = action;
-                    sa.len = OFActionTransportLayerDestination.MINIMUM_LENGTH;;
-                }
-                catch (NumberFormatException e) {
-                    log.debug("Invalid dst-port in: {} (error ignored)", subaction);
-                    return null;
-                }
-            }
-        }
-        else {
-            log.debug("Invalid action: '{}'", subaction);
-            return null;
-        }
-
-        return sa;
-    }
-
-    private static byte[] get_mac_addr(Matcher n, String subaction, Logger log) {
-        byte[] macaddr = new byte[6];
-        
-        for (int i=0; i<6; i++) {
-            if (n.group(i+1) != null) {
-                try {
-                    macaddr[i] = get_byte("0x" + n.group(i+1));
-                }
-                catch (NumberFormatException e) {
-                    log.debug("Invalid src-mac in: '{}' (error ignored)", subaction);
-                    return null;
-                }
-            }
-            else { 
-                log.debug("Invalid src-mac in: '{}' (null, error ignored)", subaction);
-                return null;
-            }
-        }
-        
-        return macaddr;
-    }
-    
-    private static int get_ip_addr(Matcher n, String subaction, Logger log) {
-        int ipaddr = 0;
-
-        for (int i=0; i<4; i++) {
-            if (n.group(i+1) != null) {
-                try {
-                    ipaddr = ipaddr<<8;
-                    ipaddr = ipaddr | get_int(n.group(i+1));
-                }
-                catch (NumberFormatException e) {
-                    log.debug("Invalid src-ip in: '{}' (error ignored)", subaction);
-                    return 0;
-                }
-            }
-            else {
-                log.debug("Invalid src-ip in: '{}' (null, error ignored)", subaction);
-                return 0;
-            }
-        }
-        
-        return ipaddr;
-    }
-    
-    // Parse int as decimal, hex (start with 0x or #) or octal (starts with 0)
-    private static int get_int(String str) {
-        return (int)Integer.decode(str);
-    }
-   
-    // Parse short as decimal, hex (start with 0x or #) or octal (starts with 0)
-    private static short get_short(String str) {
-        return (short)(int)Integer.decode(str);
-    }
-   
-    // Parse byte as decimal, hex (start with 0x or #) or octal (starts with 0)
-    private static byte get_byte(String str) {
-        return Integer.decode(str).byteValue();
-    }
-
-}
-
diff --git a/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntryPusher.java b/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntryPusher.java
deleted file mode 100644
index d816d66..0000000
--- a/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntryPusher.java
+++ /dev/null
@@ -1,679 +0,0 @@
-package net.floodlightcontroller.staticflowentry;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.Iterator;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-
-import net.floodlightcontroller.core.FloodlightContext;
-import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.IFloodlightProviderService.Role;
-import net.floodlightcontroller.core.IHAListener;
-import net.floodlightcontroller.core.IOFMessageListener;
-import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.core.IOFSwitchListener;
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-import net.floodlightcontroller.core.module.FloodlightModuleContext;
-import net.floodlightcontroller.core.module.FloodlightModuleException;
-import net.floodlightcontroller.core.module.IFloodlightModule;
-import net.floodlightcontroller.core.module.IFloodlightService;
-import net.floodlightcontroller.core.util.AppCookie;
-import net.floodlightcontroller.restserver.IRestApiService;
-import net.floodlightcontroller.staticflowentry.web.StaticFlowEntryWebRoutable;
-import net.floodlightcontroller.staticflowentry.IStaticFlowEntryPusherService;
-import net.floodlightcontroller.storage.IResultSet;
-import net.floodlightcontroller.storage.IStorageSourceService;
-import net.floodlightcontroller.storage.IStorageSourceListener;
-
-import net.floodlightcontroller.storage.StorageException;
-import org.openflow.protocol.OFFlowMod;
-import org.openflow.protocol.OFFlowRemoved;
-import org.openflow.protocol.OFMatch;
-import org.openflow.protocol.OFMessage;
-import org.openflow.protocol.OFType;
-import org.openflow.protocol.factory.BasicFactory;
-import org.openflow.util.HexString;
-import org.openflow.util.U16;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@LogMessageCategory("Static Flow Pusher")
-/**
- * This module is responsible for maintaining a set of static flows on
- * switches. This is just a big 'ol dumb list of flows and something external
- * is responsible for ensuring they make sense for the network.
- */
-public class StaticFlowEntryPusher 
-    implements IOFSwitchListener, IFloodlightModule, IStaticFlowEntryPusherService,
-        IStorageSourceListener, IOFMessageListener, IHAListener {
-    protected final static Logger log = LoggerFactory.getLogger(StaticFlowEntryPusher.class);
-    public static final String StaticFlowName = "staticflowentry";
-    
-    public static final int STATIC_FLOW_APP_ID = 10;
-
-    public static final String TABLE_NAME = "controller_staticflowtableentry";
-    public static final String COLUMN_NAME = "name";
-    public static final String COLUMN_SWITCH = "switch_id";
-    public static final String COLUMN_ACTIVE = "active";
-    public static final String COLUMN_IDLE_TIMEOUT = "idle_timeout";
-    public static final String COLUMN_HARD_TIMEOUT = "hard_timeout";
-    public static final String COLUMN_PRIORITY = "priority";
-    public static final String COLUMN_COOKIE = "cookie";
-    public static final String COLUMN_WILDCARD = "wildcards";
-    public static final String COLUMN_IN_PORT = "in_port";
-    public static final String COLUMN_DL_SRC = "dl_src";
-    public static final String COLUMN_DL_DST = "dl_dst";
-    public static final String COLUMN_DL_VLAN = "dl_vlan";
-    public static final String COLUMN_DL_VLAN_PCP = "dl_vlan_pcp";
-    public static final String COLUMN_DL_TYPE = "dl_type";
-    public static final String COLUMN_NW_TOS = "nw_tos";
-    public static final String COLUMN_NW_PROTO = "nw_proto";
-    public static final String COLUMN_NW_SRC = "nw_src"; // includes CIDR-style
-                                                         // netmask, e.g.
-                                                         // "128.8.128.0/24"
-    public static final String COLUMN_NW_DST = "nw_dst";
-    public static final String COLUMN_TP_DST = "tp_dst";
-    public static final String COLUMN_TP_SRC = "tp_src";
-    public static final String COLUMN_ACTIONS = "actions";
-    public static String ColumnNames[] = { COLUMN_NAME, COLUMN_SWITCH,
-            COLUMN_ACTIVE, COLUMN_IDLE_TIMEOUT, COLUMN_HARD_TIMEOUT,
-            COLUMN_PRIORITY, COLUMN_COOKIE, COLUMN_WILDCARD, COLUMN_IN_PORT,
-            COLUMN_DL_SRC, COLUMN_DL_DST, COLUMN_DL_VLAN, COLUMN_DL_VLAN_PCP,
-            COLUMN_DL_TYPE, COLUMN_NW_TOS, COLUMN_NW_PROTO, COLUMN_NW_SRC,
-            COLUMN_NW_DST, COLUMN_TP_DST, COLUMN_TP_SRC, COLUMN_ACTIONS };
- 
-
-    protected IFloodlightProviderService floodlightProvider;
-    protected IStorageSourceService storageSource;
-    protected IRestApiService restApi;
-
-    // Map<DPID, Map<Name, FlowMod>> ; FlowMod can be null to indicate non-active
-    protected Map<String, Map<String, OFFlowMod>> entriesFromStorage;
-    // Entry Name -> DPID of Switch it's on
-    protected Map<String, String> entry2dpid;
-
-    private BasicFactory ofMessageFactory;
-
-    // Class to sort FlowMod's by priority, from lowest to highest
-    class FlowModSorter implements Comparator<String> {
-        private String dpid;
-        public FlowModSorter(String dpid) {
-            this.dpid = dpid;
-        }
-        @Override
-        public int compare(String o1, String o2) {
-            OFFlowMod f1 = entriesFromStorage.get(dpid).get(o1);
-            OFFlowMod f2 = entriesFromStorage.get(dpid).get(o2);
-            if (f1 == null || f2 == null) // sort active=false flows by key
-                return o1.compareTo(o2);
-            return U16.f(f1.getPriority()) - U16.f(f2.getPriority());
-        }
-    };
-
-    /**
-     * used for debugging and unittests
-     * @return the number of static flow entries as cached from storage
-     */
-    public int countEntries() {
-        int size = 0;
-        if (entriesFromStorage == null)
-            return 0;
-        for (String ofswitch : entriesFromStorage.keySet())
-            size += entriesFromStorage.get(ofswitch).size();
-        return size;
-    }
-
-    public IFloodlightProviderService getFloodlightProvider() {
-        return floodlightProvider;
-    }
-
-    public void setFloodlightProvider(IFloodlightProviderService floodlightProvider) {
-        this.floodlightProvider = floodlightProvider;
-    }
-
-    public void setStorageSource(IStorageSourceService storageSource) {
-        this.storageSource = storageSource;
-    }
-
-    /**
-     * Reads from our entriesFromStorage for the specified switch and
-     * sends the FlowMods down to the controller in <b>sorted</b> order.
-     *
-     * Sorted is important to maintain correctness of the switch:
-     * if a packet would match both a lower and a higher priority
-     * rule, then we want it to match the higher priority or nothing,
-     * but never just the lower priority one.  Inserting from high to
-     * low priority fixes this.
-     *
-     * TODO consider adding a "block all" flow mod and then removing it
-     * while starting up.
-     *
-     * @param sw The switch to send entries to
-     */
-    protected void sendEntriesToSwitch(IOFSwitch sw) {
-        String dpid = sw.getStringId();
-
-        if ((entriesFromStorage != null) && (entriesFromStorage.containsKey(dpid))) {
-            Map<String, OFFlowMod> entries = entriesFromStorage.get(dpid);
-            List<String> sortedList = new ArrayList<String>(entries.keySet());
-            // weird that Collections.sort() returns void
-            Collections.sort( sortedList, new FlowModSorter(dpid));
-            for (String entryName : sortedList) {
-                OFFlowMod flowMod = entries.get(entryName);
-                if (flowMod != null) {
-                    if (log.isDebugEnabled()) {
-                        log.debug("Pushing static entry {} for {}", dpid, entryName);
-                    }
-                    writeFlowModToSwitch(sw, flowMod);
-                }
-            }
-        }
-    }
-    
-    /**
-     * Used only for bundle-local indexing
-     * 
-     * @param map
-     * @return
-     */
-
-    protected Map<String, String> computeEntry2DpidMap(
-                Map<String, Map<String, OFFlowMod>> map) {
-        Map<String, String> ret = new HashMap<String, String>();
-        for(String dpid : map.keySet()) {
-            for( String entry: map.get(dpid).keySet())
-                ret.put(entry, dpid);
-        }
-        return ret;
-    }
-    
-    /**
-     * Read entries from storageSource, and store them in a hash
-     * 
-     * @return
-     */
-    @LogMessageDoc(level="ERROR",
-            message="failed to access storage: {reason}",
-            explanation="Could not retrieve static flows from the system " +
-            		"database",
-            recommendation=LogMessageDoc.CHECK_CONTROLLER)
-    private Map<String, Map<String, OFFlowMod>> readEntriesFromStorage() {
-        Map<String, Map<String, OFFlowMod>> entries = new ConcurrentHashMap<String, Map<String, OFFlowMod>>();
-        try {
-            Map<String, Object> row;
-            // null1=no predicate, null2=no ordering
-            IResultSet resultSet = storageSource.executeQuery(TABLE_NAME,
-                    ColumnNames, null, null);
-            for (Iterator<IResultSet> it = resultSet.iterator(); it.hasNext();) {
-                row = it.next().getRow();
-                parseRow(row, entries);
-            }
-        } catch (StorageException e) {
-            log.error("failed to access storage: {}", e.getMessage());
-            // if the table doesn't exist, then wait to populate later via
-            // setStorageSource()
-        }
-        return entries;
-    }
-
-    /**
-     * Take a single row, turn it into a flowMod, and add it to the
-     * entries{$dpid}.{$entryName}=FlowMod 
-     * 
-     * IF an entry is in active, mark it with FlowMod = null
-     * 
-     * @param row
-     * @param entries
-     */
-
-    void parseRow(Map<String, Object> row,
-            Map<String, Map<String, OFFlowMod>> entries) {
-        String switchName = null;
-        String entryName = null;
-
-        StringBuffer matchString = new StringBuffer();
-        if (ofMessageFactory == null) // lazy init
-            ofMessageFactory = new BasicFactory();
-
-        OFFlowMod flowMod = (OFFlowMod) ofMessageFactory
-                .getMessage(OFType.FLOW_MOD);
-
-        if (!row.containsKey(COLUMN_SWITCH) || !row.containsKey(COLUMN_NAME)) {
-            log.debug(
-                    "skipping entry with missing required 'switch' or 'name' entry: {}",
-                    row);
-            return;
-        }
-        // most error checking done with ClassCastException
-        try {
-            // first, snag the required entries, for debugging info
-            switchName = (String) row.get(COLUMN_SWITCH);
-            entryName = (String) row.get(COLUMN_NAME);
-            if (!entries.containsKey(switchName))
-                entries.put(switchName, new HashMap<String, OFFlowMod>());
-            StaticFlowEntries.initDefaultFlowMod(flowMod, entryName);
-            
-            for (String key : row.keySet()) {
-                if (row.get(key) == null)
-                    continue;
-                if ( key.equals(COLUMN_SWITCH) || key.equals(COLUMN_NAME)
-                        || key.equals("id"))
-                    continue; // already handled
-                // explicitly ignore timeouts and wildcards
-                if ( key.equals(COLUMN_HARD_TIMEOUT) || key.equals(COLUMN_IDLE_TIMEOUT) ||
-                        key.equals(COLUMN_WILDCARD))
-                    continue;
-                if ( key.equals(COLUMN_ACTIVE)) {
-                    if  (! Boolean.valueOf((String) row.get(COLUMN_ACTIVE))) {
-                        log.debug("skipping inactive entry {} for switch {}",
-                                entryName, switchName);
-                        entries.get(switchName).put(entryName, null);  // mark this an inactive
-                        return;
-                    }
-                } else if ( key.equals(COLUMN_ACTIONS)){
-                    StaticFlowEntries.parseActionString(flowMod, (String) row.get(COLUMN_ACTIONS), log);
-                } else if ( key.equals(COLUMN_COOKIE)) {
-                    flowMod.setCookie(
-                            StaticFlowEntries.computeEntryCookie(flowMod, 
-                                    Integer.valueOf((String) row.get(COLUMN_COOKIE)), 
-                                    entryName)
-                        );
-                } else if ( key.equals(COLUMN_PRIORITY)) {
-                    flowMod.setPriority(U16.t(Integer.valueOf((String) row.get(COLUMN_PRIORITY))));
-                } else { // the rest of the keys are for OFMatch().fromString()
-                    if (matchString.length() > 0)
-                        matchString.append(",");
-                    matchString.append(key + "=" + row.get(key).toString());
-                }
-            }
-        } catch (ClassCastException e) {
-            if (entryName != null && switchName != null)
-                log.debug(
-                        "skipping entry {} on switch {} with bad data : "
-                                + e.getMessage(), entryName, switchName);
-            else
-                log.debug("skipping entry with bad data: {} :: {} ",
-                        e.getMessage(), e.getStackTrace());
-        }
-
-        OFMatch ofMatch = new OFMatch();
-        String match = matchString.toString();
-        try {
-            ofMatch.fromString(match);
-        } catch (IllegalArgumentException e) {
-            log.debug(
-                    "ignoring flow entry {} on switch {} with illegal OFMatch() key: "
-                            + match, entryName, switchName);
-            return;
-        }
-        flowMod.setMatch(ofMatch);
-
-        entries.get(switchName).put(entryName, flowMod);
-    }
-    
-    @Override
-    public void addedSwitch(IOFSwitch sw) {
-        log.debug("addedSwitch {}; processing its static entries", sw);
-        sendEntriesToSwitch(sw);
-    }
-
-    @Override
-    public void removedSwitch(IOFSwitch sw) {
-        log.debug("removedSwitch {}", sw);
-        // do NOT delete from our internal state; we're tracking the rules,
-        // not the switches
-    }
-    
-    @Override
-    public void switchPortChanged(Long switchId) {
-        // no-op
-    }
-
-    /**
-     * This handles both rowInsert() and rowUpdate()
-     */
-    
-    @Override
-    public void rowsModified(String tableName, Set<Object> rowKeys) {
-        log.debug("Modifying Table {}", tableName);
-
-        HashMap<String, Map<String, OFFlowMod>> entriesToAdd = 
-            new HashMap<String, Map<String, OFFlowMod>>();
-        // build up list of what was added 
-        for(Object key: rowKeys) {
-            IResultSet resultSet = storageSource.getRow(tableName, key);
-            for (Iterator<IResultSet> it = resultSet.iterator(); it.hasNext();) {
-                Map<String, Object> row = it.next().getRow();
-                parseRow(row, entriesToAdd);
-            }            
-        }
-        // batch updates by switch and blast them out
-        for (String dpid : entriesToAdd.keySet()) {
-            if (!entriesFromStorage.containsKey(dpid))
-                entriesFromStorage.put(dpid, new HashMap<String, OFFlowMod>());
-            List<OFMessage> outQueue = new ArrayList<OFMessage>();
-            for(String entry : entriesToAdd.get(dpid).keySet()) {
-                OFFlowMod newFlowMod = entriesToAdd.get(dpid).get(entry);
-                OFFlowMod oldFlowMod = entriesFromStorage.get(dpid).get(entry);
-                if (oldFlowMod != null) {  // remove any pre-existing rule
-                    oldFlowMod.setCommand(OFFlowMod.OFPFC_DELETE_STRICT);
-                    outQueue.add(oldFlowMod);
-                }
-                if (newFlowMod != null) {
-                    entriesFromStorage.get(dpid).put(entry, newFlowMod);
-                    outQueue.add(newFlowMod);
-                    entry2dpid.put(entry, dpid);
-                } else {
-                    entriesFromStorage.get(dpid).remove(entry);
-                    entry2dpid.remove(entry);
-                }
-            }
-            
-            writeOFMessagesToSwitch(HexString.toLong(dpid), outQueue);
-        }
-    }
-
-    @Override
-    public void rowsDeleted(String tableName, Set<Object> rowKeys) {
-        if (log.isDebugEnabled()) {
-            log.debug("deleting from Table {}", tableName);
-        }
-        
-        for(Object obj : rowKeys) {
-            if (!(obj instanceof String)) {
-                log.debug("tried to delete non-string key {}; ignoring", obj);
-                continue;
-            }
-            deleteStaticFlowEntry((String) obj);
-        }
-    }
-    
-    @LogMessageDoc(level="ERROR",
-            message="inconsistent internal state: no switch has rule {rule}",
-            explanation="Inconsistent internat state discovered while " +
-            		"deleting a static flow rule",
-            recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
-    private boolean deleteStaticFlowEntry(String entryName) {
-        String dpid = entry2dpid.get(entryName);
-        if (log.isDebugEnabled()) {
-            log.debug("Deleting flow {} for switch {}", entryName, dpid);
-        }
-        if (dpid == null) {
-            log.error("inconsistent internal state: no switch has rule {}",
-                    entryName);
-            return false;
-        }
-        
-        // send flow_mod delete
-        OFFlowMod flowMod = entriesFromStorage.get(dpid).get(entryName);
-        flowMod.setCommand(OFFlowMod.OFPFC_DELETE_STRICT);
-
-        if (entriesFromStorage.containsKey(dpid) && 
-                entriesFromStorage.get(dpid).containsKey(entryName)) {
-            entriesFromStorage.get(dpid).remove(entryName);
-        } else { 
-            log.debug("Tried to delete non-existent entry {} for switch {}", 
-                    entryName, dpid);
-            return false;
-        }
-        
-        writeFlowModToSwitch(HexString.toLong(dpid), flowMod);
-        return true;
-    }
-    
-    /**
-     * Writes a list of OFMessages to a switch
-     * @param dpid The datapath ID of the switch to write to
-     * @param messages The list of OFMessages to write.
-     */
-    @LogMessageDoc(level="ERROR",
-            message="Tried to write to switch {switch} but got {error}",
-            explanation="An I/O error occured while trying to write a " +
-            		"static flow to a switch",
-            recommendation=LogMessageDoc.CHECK_SWITCH)
-    private void writeOFMessagesToSwitch(long dpid, List<OFMessage> messages) {
-        IOFSwitch ofswitch = floodlightProvider.getSwitches().get(dpid);
-        if (ofswitch != null) {  // is the switch connected
-            try {
-                if (log.isDebugEnabled()) {
-                    log.debug("Sending {} new entries to {}", messages.size(), dpid);
-                }
-                ofswitch.write(messages, null);
-                ofswitch.flush();
-            } catch (IOException e) {
-                log.error("Tried to write to switch {} but got {}", dpid, e.getMessage());
-            }
-        }
-    }
-    
-    /**
-     * Writes an OFFlowMod to a switch. It checks to make sure the switch
-     * exists before it sends
-     * @param dpid The data  to write the flow mod to
-     * @param flowMod The OFFlowMod to write
-     */
-    private void writeFlowModToSwitch(long dpid, OFFlowMod flowMod) {
-        Map<Long,IOFSwitch> switches = floodlightProvider.getSwitches();
-        IOFSwitch ofSwitch = switches.get(dpid);
-        if (ofSwitch == null) {
-            if (log.isDebugEnabled()) {
-                log.debug("Not deleting key {} :: switch {} not connected", 
-                          dpid);
-            }
-            return;
-        }
-        writeFlowModToSwitch(ofSwitch, flowMod);
-    }
-    
-    /**
-     * Writes an OFFlowMod to a switch
-     * @param sw The IOFSwitch to write to
-     * @param flowMod The OFFlowMod to write
-     */
-    @LogMessageDoc(level="ERROR",
-            message="Tried to write OFFlowMod to {switch} but got {error}",
-            explanation="An I/O error occured while trying to write a " +
-                    "static flow to a switch",
-            recommendation=LogMessageDoc.CHECK_SWITCH)
-    private void writeFlowModToSwitch(IOFSwitch sw, OFFlowMod flowMod) {
-        try {
-            sw.write(flowMod, null);
-            sw.flush();
-        } catch (IOException e) {
-            log.error("Tried to write OFFlowMod to {} but failed: {}", 
-                    HexString.toHexString(sw.getId()), e.getMessage());
-        }
-    }
-
-    @Override
-    public String getName() {
-        return StaticFlowName;
-    }
-    
-    @Override
-    @LogMessageDoc(level="ERROR",
-        message="Got a FlowRemove message for a infinite " +
-                "timeout flow: {flow} from switch {switch}",
-        explanation="Flows with infinite timeouts should not expire. " +
-        		"The switch has expired the flow anyway.",
-        recommendation=LogMessageDoc.REPORT_SWITCH_BUG)
-    public Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx) {
-        switch (msg.getType()) {
-        case FLOW_REMOVED:
-            break;
-        default:
-            return Command.CONTINUE;
-        }
-        OFFlowRemoved flowRemoved = (OFFlowRemoved) msg;
-        long cookie = flowRemoved.getCookie();
-        /**
-         * This is just to sanity check our assumption that static flows 
-         * never expire.
-         */
-        if( AppCookie.extractApp(cookie) == STATIC_FLOW_APP_ID) {
-            if (flowRemoved.getReason() != 
-                    OFFlowRemoved.OFFlowRemovedReason.OFPRR_DELETE)
-                log.error("Got a FlowRemove message for a infinite " +
-                		  "timeout flow: {} from switch {}", msg, sw);
-            return Command.STOP;    // only for us
-        } else
-            return Command.CONTINUE;
-    }
-
-    @Override
-    public boolean isCallbackOrderingPrereq(OFType type, String name) {
-        return false;  // no dependency for non-packet in
-    }
-
-    @Override
-    public boolean isCallbackOrderingPostreq(OFType type, String name) {
-        return false;  // no dependency for non-packet in
-    }
-
-    // IFloodlightModule
-    
-    @Override
-    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
-        Collection<Class<? extends IFloodlightService>> l = 
-                new ArrayList<Class<? extends IFloodlightService>>();
-        l.add(IStaticFlowEntryPusherService.class);
-        return l;
-    }
-
-    @Override
-    public Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls() {
-        Map<Class<? extends IFloodlightService>,
-            IFloodlightService> m = 
-                new HashMap<Class<? extends IFloodlightService>,
-                    IFloodlightService>();
-        m.put(IStaticFlowEntryPusherService.class, this);
-        return m;
-    }
-
-    @Override
-    public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
-        Collection<Class<? extends IFloodlightService>> l = 
-                new ArrayList<Class<? extends IFloodlightService>>();
-        l.add(IFloodlightProviderService.class);
-        l.add(IStorageSourceService.class);
-        l.add(IRestApiService.class);
-        return l;
-    }
-
-    @Override
-    public void init(FloodlightModuleContext context)
-            throws FloodlightModuleException {
-        floodlightProvider =
-            context.getServiceImpl(IFloodlightProviderService.class);
-        storageSource =
-            context.getServiceImpl(IStorageSourceService.class);
-        restApi =
-            context.getServiceImpl(IRestApiService.class);
-    }
-
-    @Override
-    public void startUp(FloodlightModuleContext context) {        
-        floodlightProvider.addOFMessageListener(OFType.FLOW_REMOVED, this);
-        floodlightProvider.addOFSwitchListener(this);
-        floodlightProvider.addHAListener(this);
-        
-        // assumes no switches connected at startup()
-        storageSource.createTable(TABLE_NAME, null);
-        storageSource.setTablePrimaryKeyName(TABLE_NAME, COLUMN_NAME);
-        storageSource.addListener(TABLE_NAME, this);
-        entriesFromStorage = readEntriesFromStorage(); 
-        entry2dpid = computeEntry2DpidMap(entriesFromStorage);
-        restApi.addRestletRoutable(new StaticFlowEntryWebRoutable());
-    }
-
-    // IStaticFlowEntryPusherService methods
-    
-    @Override
-    public void addFlow(String name, OFFlowMod fm, String swDpid) {
-        Map<String, Object> fmMap = StaticFlowEntries.flowModToStorageEntry(fm, swDpid, name);
-        entry2dpid.put(name, swDpid);
-        Map<String, OFFlowMod> switchEntries = entriesFromStorage.get(swDpid);
-        if (switchEntries == null) {
-            switchEntries = new HashMap<String, OFFlowMod>();
-            entriesFromStorage.put(swDpid, switchEntries);
-        }
-        switchEntries.put(name, fm);
-        storageSource.insertRowAsync(TABLE_NAME, fmMap);
-    }
-
-    @Override
-    public void deleteFlow(String name) {
-        storageSource.deleteRowAsync(TABLE_NAME, name);
-        // TODO - What if there is a delay in storage?
-    }
-    
-    @Override
-    public void deleteAllFlows() {
-        for (String entry : entry2dpid.keySet()) {
-            deleteFlow(entry);
-        }
-    }
-    
-    @Override
-    public void deleteFlowsForSwitch(long dpid) {
-        String sDpid = HexString.toHexString(dpid);
-        
-        for (Entry<String, String> e : entry2dpid.entrySet()) {
-            if (e.getValue().equals(sDpid))
-                deleteFlow(e.getKey());
-        }
-    }
-    
-    @Override
-    public Map<String, Map<String, OFFlowMod>> getFlows() {
-        return entriesFromStorage;
-    }
-    
-    @Override
-    public Map<String, OFFlowMod> getFlows(String dpid) {
-        return entriesFromStorage.get(dpid);
-    }
-
-    
-    // IHAListener
-    
-    @Override
-    public void roleChanged(Role oldRole, Role newRole) {
-        switch(newRole) {
-            case MASTER:
-                if (oldRole == Role.SLAVE) {
-                    log.debug("Re-reading static flows from storage due " +
-                            "to HA change from SLAVE->MASTER");
-                    entriesFromStorage = readEntriesFromStorage(); 
-                    entry2dpid = computeEntry2DpidMap(entriesFromStorage);
-                }
-                break;
-            case SLAVE:
-                log.debug("Clearing in-memory flows due to " +
-                        "HA change to SLAVE");
-                entry2dpid.clear();
-                entriesFromStorage.clear();
-                break;
-            default:
-            	break;
-        }
-    }
-    
-    @Override
-    public void controllerNodeIPsChanged(
-            Map<String, String> curControllerNodeIPs,
-            Map<String, String> addedControllerNodeIPs,
-            Map<String, String> removedControllerNodeIPs) {
-        // ignore
-    }
-     
-}
diff --git a/src/main/java/net/floodlightcontroller/staticflowentry/web/ClearStaticFlowEntriesResource.java b/src/main/java/net/floodlightcontroller/staticflowentry/web/ClearStaticFlowEntriesResource.java
deleted file mode 100644
index f103e99..0000000
--- a/src/main/java/net/floodlightcontroller/staticflowentry/web/ClearStaticFlowEntriesResource.java
+++ /dev/null
@@ -1,38 +0,0 @@
-package net.floodlightcontroller.staticflowentry.web;
-
-import net.floodlightcontroller.core.web.ControllerSwitchesResource;
-import net.floodlightcontroller.staticflowentry.IStaticFlowEntryPusherService;
-
-import org.openflow.util.HexString;
-import org.restlet.data.Status;
-import org.restlet.resource.Get;
-import org.restlet.resource.ServerResource;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ClearStaticFlowEntriesResource extends ServerResource {
-    protected final static Logger log = LoggerFactory.getLogger(ClearStaticFlowEntriesResource.class);
-    
-    @Get
-    public void ClearStaticFlowEntries() {
-        IStaticFlowEntryPusherService sfpService =
-                (IStaticFlowEntryPusherService)getContext().getAttributes().
-                    get(IStaticFlowEntryPusherService.class.getCanonicalName());
-        
-        String param = (String) getRequestAttributes().get("switch");
-        if (log.isDebugEnabled())
-            log.debug("Clearing all static flow entires for switch: " + param);
-        
-        if (param.toLowerCase().equals("all")) {
-            sfpService.deleteAllFlows();
-        } else {
-            try {
-                sfpService.deleteFlowsForSwitch(HexString.toLong(param));
-            } catch (NumberFormatException e){
-                setStatus(Status.CLIENT_ERROR_BAD_REQUEST, 
-                          ControllerSwitchesResource.DPID_ERROR);
-                return;
-            }
-        }
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/staticflowentry/web/ListStaticFlowEntriesResource.java b/src/main/java/net/floodlightcontroller/staticflowentry/web/ListStaticFlowEntriesResource.java
deleted file mode 100644
index 2bb53ba..0000000
--- a/src/main/java/net/floodlightcontroller/staticflowentry/web/ListStaticFlowEntriesResource.java
+++ /dev/null
@@ -1,45 +0,0 @@
-package net.floodlightcontroller.staticflowentry.web;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import net.floodlightcontroller.core.web.ControllerSwitchesResource;
-import net.floodlightcontroller.staticflowentry.IStaticFlowEntryPusherService;
-
-import org.openflow.protocol.OFFlowMod;
-import org.restlet.data.Status;
-import org.restlet.resource.Get;
-import org.restlet.resource.ServerResource;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ListStaticFlowEntriesResource extends ServerResource {
-    protected final static Logger log = LoggerFactory.getLogger(ListStaticFlowEntriesResource.class);
-    
-    @Get
-    public Map<String, Map<String, OFFlowMod>> ListStaticFlowEntries() {
-        IStaticFlowEntryPusherService sfpService =
-                (IStaticFlowEntryPusherService)getContext().getAttributes().
-                    get(IStaticFlowEntryPusherService.class.getCanonicalName());
-        
-        String param = (String) getRequestAttributes().get("switch");
-        if (log.isDebugEnabled())
-            log.debug("Listing all static flow entires for switch: " + param);
-        
-        if (param.toLowerCase().equals("all")) {
-            return sfpService.getFlows();
-        } else {
-            try {
-                Map<String, Map<String, OFFlowMod>> retMap = 
-                        new HashMap<String, Map<String, OFFlowMod>>();
-                retMap.put(param, sfpService.getFlows(param));
-                return retMap;
-                
-            } catch (NumberFormatException e){
-                setStatus(Status.CLIENT_ERROR_BAD_REQUEST, 
-                          ControllerSwitchesResource.DPID_ERROR);
-            }
-        }
-        return null;
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryPusherResource.java b/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryPusherResource.java
deleted file mode 100644
index 2886a58..0000000
--- a/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryPusherResource.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.staticflowentry.web;
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.restlet.resource.Delete;
-import org.restlet.resource.Post;
-import org.restlet.resource.ServerResource;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-import net.floodlightcontroller.staticflowentry.StaticFlowEntries;
-import net.floodlightcontroller.staticflowentry.StaticFlowEntryPusher;
-import net.floodlightcontroller.storage.IStorageSourceService;
-
-/**
- * Pushes a static flow entry to the storage source
- * @author alexreimers
- *
- */
-@LogMessageCategory("Static Flow Pusher")
-public class StaticFlowEntryPusherResource extends ServerResource {
-    protected final static Logger log = LoggerFactory.getLogger(StaticFlowEntryPusherResource.class);
-    
-    /**
-     * Checks to see if the user matches IP information without
-     * checking for the correct ether-type (2048).
-     * @param rows The Map that is a string representation of
-     * the static flow.
-     * @reutrn True if they checked the ether-type, false otherwise
-     */
-    private boolean checkMatchIp(Map<String, Object> rows) {
-        boolean matchEther = false;
-        String val = (String) rows.get(StaticFlowEntryPusher.COLUMN_DL_TYPE);
-        if (val != null) {
-            int type = 0;
-            // check both hex and decimal
-            if (val.startsWith("0x")) {
-                type = Integer.parseInt(val.substring(2), 16);
-            } else {
-                try {
-                    type = Integer.parseInt(val);
-                } catch (NumberFormatException e) { /* fail silently */}
-            }
-            if (type == 2048) matchEther = true;
-        }
-        
-        if ((rows.containsKey(StaticFlowEntryPusher.COLUMN_NW_DST) || 
-                rows.containsKey(StaticFlowEntryPusher.COLUMN_NW_SRC) ||
-                rows.containsKey(StaticFlowEntryPusher.COLUMN_NW_PROTO) ||
-                rows.containsKey(StaticFlowEntryPusher.COLUMN_NW_TOS)) &&
-                (matchEther == false))
-            return false;
-        
-        return true;
-    }
-    
-    /**
-     * Takes a Static Flow Pusher string in JSON format and parses it into
-     * our database schema then pushes it to the database.
-     * @param fmJson The Static Flow Pusher entry in JSON format.
-     * @return A string status message
-     */
-    @Post
-    @LogMessageDoc(level="ERROR",
-        message="Error parsing push flow mod request: {request}",
-        explanation="An invalid request was sent to static flow pusher",
-        recommendation="Fix the format of the static flow mod request")
-    public String store(String fmJson) {
-        IStorageSourceService storageSource =
-                (IStorageSourceService)getContext().getAttributes().
-                    get(IStorageSourceService.class.getCanonicalName());
-        
-        Map<String, Object> rowValues;
-        try {
-            rowValues = StaticFlowEntries.jsonToStorageEntry(fmJson);
-            String status = null;
-            if (!checkMatchIp(rowValues)) {
-                status = "Warning! Pushing a static flow entry that matches IP " +
-                        "fields without matching for IP payload (ether-type 2048) will cause " +
-                        "the switch to wildcard higher level fields.";
-                log.error(status);
-            } else {
-                status = "Entry pushed";
-            }
-            storageSource.insertRowAsync(StaticFlowEntryPusher.TABLE_NAME, rowValues);
-            return ("{\"status\" : \"" + status + "\"}");
-        } catch (IOException e) {
-            log.error("Error parsing push flow mod request: " + fmJson, e);
-            e.printStackTrace();
-            return "{\"status\" : \"Error! Could not parse flod mod, see log for details.\"}";
-        }
-    }
-    
-    @Delete
-    @LogMessageDoc(level="ERROR",
-        message="Error deleting flow mod request: {request}",
-        explanation="An invalid delete request was sent to static flow pusher",
-        recommendation="Fix the format of the static flow mod request")
-    public String del(String fmJson) {
-        IStorageSourceService storageSource =
-                (IStorageSourceService)getContext().getAttributes().
-                    get(IStorageSourceService.class.getCanonicalName());
-        String fmName = null;
-        if (fmJson == null) {
-            return "{\"status\" : \"Error! No data posted.\"}";
-        }
-        try {
-            fmName = StaticFlowEntries.getEntryNameFromJson(fmJson);
-            if (fmName == null) {
-                return "{\"status\" : \"Error deleting entry, no name provided\"}";
-            }
-        } catch (IOException e) {
-            log.error("Error deleting flow mod request: " + fmJson, e);
-            e.printStackTrace();
-            return "{\"status\" : \"Error deleting entry, see log for details\"}";
-        }
-        
-        storageSource.deleteRowAsync(StaticFlowEntryPusher.TABLE_NAME, fmName);
-        return "{\"status\" : \"Entry " + fmName + " deleted\"}";
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryWebRoutable.java b/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryWebRoutable.java
deleted file mode 100644
index b5a6fe1..0000000
--- a/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryWebRoutable.java
+++ /dev/null
@@ -1,29 +0,0 @@
-package net.floodlightcontroller.staticflowentry.web;
-
-import net.floodlightcontroller.restserver.RestletRoutable;
-
-import org.restlet.Context;
-import org.restlet.Restlet;
-import org.restlet.routing.Router;
-
-public class StaticFlowEntryWebRoutable implements RestletRoutable {
-    /**
-     * Create the Restlet router and bind to the proper resources.
-     */
-    @Override
-    public Restlet getRestlet(Context context) {
-        Router router = new Router(context);
-        router.attach("/json", StaticFlowEntryPusherResource.class);
-        router.attach("/clear/{switch}/json", ClearStaticFlowEntriesResource.class);
-        router.attach("/list/{switch}/json", ListStaticFlowEntriesResource.class);
-        return router;
-    }
-
-    /**
-     * Set the base path for the Topology
-     */
-    @Override
-    public String basePath() {
-        return "/wm/staticflowentrypusher";
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/AbstractStorageSource.java b/src/main/java/net/floodlightcontroller/storage/AbstractStorageSource.java
deleted file mode 100644
index 20d6599..0000000
--- a/src/main/java/net/floodlightcontroller/storage/AbstractStorageSource.java
+++ /dev/null
@@ -1,534 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.CopyOnWriteArraySet;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-import net.floodlightcontroller.core.module.FloodlightModuleContext;
-import net.floodlightcontroller.core.module.FloodlightModuleException;
-import net.floodlightcontroller.core.module.IFloodlightModule;
-import net.floodlightcontroller.core.module.IFloodlightService;
-import net.floodlightcontroller.counter.ICounter;
-import net.floodlightcontroller.counter.CounterStore;
-import net.floodlightcontroller.counter.ICounterStoreService;
-import net.floodlightcontroller.counter.CounterValue.CounterType;
-import net.floodlightcontroller.restserver.IRestApiService;
-import net.floodlightcontroller.storage.web.StorageWebRoutable;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@LogMessageCategory("System Database")
-public abstract class AbstractStorageSource 
-    implements IStorageSourceService, IFloodlightModule {
-    protected final static Logger logger = LoggerFactory.getLogger(AbstractStorageSource.class);
-
-    // Shared instance of the executor to use to execute the storage tasks.
-    // We make this a single threaded executor, because if we used a thread pool
-    // then storage operations could be executed out of order which would cause
-    // problems in some cases (e.g. delete and update of a row getting reordered).
-    // If we wanted to make this more multi-threaded we could have multiple
-    // worker threads/executors with affinity of operations on a given table
-    // to a single worker thread. But for now, we'll keep it simple and just have
-    // a single thread for all operations.
-    protected static ExecutorService defaultExecutorService = Executors.newSingleThreadExecutor();
-
-    protected final static String STORAGE_QUERY_COUNTER_NAME = "StorageQuery";
-    protected final static String STORAGE_UPDATE_COUNTER_NAME = "StorageUpdate";
-    protected final static String STORAGE_DELETE_COUNTER_NAME = "StorageDelete";
-    
-    protected Set<String> allTableNames = new CopyOnWriteArraySet<String>();
-    protected ICounterStoreService counterStore;
-    protected ExecutorService executorService = defaultExecutorService;
-    protected IStorageExceptionHandler exceptionHandler;
-
-    private Map<String, Set<IStorageSourceListener>> listeners =
-        new ConcurrentHashMap<String, Set<IStorageSourceListener>>();
-
-    // Our dependencies
-    protected IRestApiService restApi = null;
-    
-    protected static final String DB_ERROR_EXPLANATION =
-            "An unknown error occurred while executing asynchronous " +
-            "database operation";
-    
-    @LogMessageDoc(level="ERROR",
-            message="Failure in asynchronous call to executeQuery",
-            explanation=DB_ERROR_EXPLANATION,
-            recommendation=LogMessageDoc.GENERIC_ACTION)
-    abstract class StorageCallable<V> implements Callable<V> {
-        public V call() {
-            try {
-                return doStorageOperation();
-            }
-            catch (StorageException e) {
-                logger.error("Failure in asynchronous call to executeQuery", e);
-                if (exceptionHandler != null)
-                    exceptionHandler.handleException(e);
-                throw e;
-            }
-        }
-        abstract protected V doStorageOperation();
-    }
-    
-    @LogMessageDoc(level="ERROR",
-            message="Failure in asynchronous call to updateRows",
-            explanation=DB_ERROR_EXPLANATION,
-            recommendation=LogMessageDoc.GENERIC_ACTION)
-    abstract class StorageRunnable implements Runnable {
-        public void run() {
-            try {
-                doStorageOperation();
-            }
-            catch (StorageException e) {
-                logger.error("Failure in asynchronous call to updateRows", e);
-                if (exceptionHandler != null)
-                    exceptionHandler.handleException(e);
-                throw e;
-            }
-        }
-        abstract void doStorageOperation();
-    }
-    
-    public AbstractStorageSource() {
-        this.executorService = defaultExecutorService;
-    }
-
-    public void setExecutorService(ExecutorService executorService) {
-        this.executorService = (executorService != null) ?
-                executorService : defaultExecutorService;
-    }
-    
-    @Override
-    public void setExceptionHandler(IStorageExceptionHandler exceptionHandler) {
-        this.exceptionHandler = exceptionHandler;
-    }
-    
-    @Override
-    public abstract void setTablePrimaryKeyName(String tableName, String primaryKeyName);
-
-    @Override
-    public void createTable(String tableName, Set<String> indexedColumns) {
-        allTableNames.add(tableName);
-    }
-
-    @Override
-    public Set<String> getAllTableNames() {
-        return allTableNames;
-    }
-    
-    public void setCounterStore(CounterStore counterStore) {
-        this.counterStore = counterStore;
-    }
-    
-    protected void updateCounters(String baseName, String tableName) {
-        if (counterStore != null) {
-            String counterName;
-            if (tableName != null) {
-                updateCounters(baseName, null);
-                counterName = baseName + CounterStore.TitleDelimitor + tableName;
-            } else {
-                counterName = baseName;
-            }
-            ICounter counter = counterStore.getCounter(counterName);
-            if (counter == null) {
-                counter = counterStore.createCounter(counterName, CounterType.LONG);
-            }
-            counter.increment();
-        }
-    }
-    
-    @Override
-    public abstract IQuery createQuery(String tableName, String[] columnNames,
-            IPredicate predicate, RowOrdering ordering);
-
-    @Override
-    public IResultSet executeQuery(IQuery query) {
-        updateCounters(STORAGE_QUERY_COUNTER_NAME, query.getTableName());
-        return executeQueryImpl(query);
-    }
-    
-    protected abstract IResultSet executeQueryImpl(IQuery query);
-
-    @Override
-    public IResultSet executeQuery(String tableName, String[] columnNames,
-            IPredicate predicate, RowOrdering ordering) {
-        IQuery query = createQuery(tableName, columnNames, predicate, ordering);
-        IResultSet resultSet = executeQuery(query);
-        return resultSet;
-    }
-
-    @Override
-    public Object[] executeQuery(String tableName, String[] columnNames,
-            IPredicate predicate, RowOrdering ordering, IRowMapper rowMapper) {
-        List<Object> objectList = new ArrayList<Object>();
-        IResultSet resultSet = executeQuery(tableName, columnNames, predicate, ordering);
-        while (resultSet.next()) {
-            Object object = rowMapper.mapRow(resultSet);
-            objectList.add(object);
-        }
-        return objectList.toArray();
-    }
-    
-    @Override
-    public Future<IResultSet> executeQueryAsync(final IQuery query) {
-        Future<IResultSet> future = executorService.submit(
-            new StorageCallable<IResultSet>() {
-                public IResultSet doStorageOperation() {
-                    return executeQuery(query);
-                }
-            });
-        return future;
-    }
-
-    @Override
-    public Future<IResultSet> executeQueryAsync(final String tableName,
-            final String[] columnNames,  final IPredicate predicate,
-            final RowOrdering ordering) {
-        Future<IResultSet> future = executorService.submit(
-            new StorageCallable<IResultSet>() {
-                public IResultSet doStorageOperation() {
-                    return executeQuery(tableName, columnNames,
-                            predicate, ordering);
-                }
-            });
-        return future;
-    }
-
-    @Override
-    public Future<Object[]> executeQueryAsync(final String tableName,
-            final String[] columnNames,  final IPredicate predicate,
-            final RowOrdering ordering, final IRowMapper rowMapper) {
-        Future<Object[]> future = executorService.submit(
-            new StorageCallable<Object[]>() {
-                public Object[] doStorageOperation() {
-                    return executeQuery(tableName, columnNames, predicate,
-                            ordering, rowMapper);
-                }
-            });
-        return future;
-    }
-
-    @Override
-    public Future<?> insertRowAsync(final String tableName,
-            final Map<String,Object> values) {
-        Future<?> future = executorService.submit(
-            new StorageRunnable() {
-                public void doStorageOperation() {
-                    insertRow(tableName, values);
-                }
-            }, null);
-        return future;
-    }
-
-    @Override
-    public Future<?> updateRowsAsync(final String tableName, final List<Map<String,Object>> rows) {
-        Future<?> future = executorService.submit(    
-            new StorageRunnable() {
-                public void doStorageOperation() {
-                    updateRows(tableName, rows);
-                }
-            }, null);
-        return future;
-    }
-
-    @Override
-    public Future<?> updateMatchingRowsAsync(final String tableName,
-            final IPredicate predicate, final Map<String,Object> values) {
-        Future<?> future = executorService.submit(    
-            new StorageRunnable() {
-                public void doStorageOperation() {
-                    updateMatchingRows(tableName, predicate, values);
-                }
-            }, null);
-        return future;
-    }
-
-    @Override
-    public Future<?> updateRowAsync(final String tableName,
-            final Object rowKey, final Map<String,Object> values) {
-        Future<?> future = executorService.submit(
-            new StorageRunnable() {
-                public void doStorageOperation() {
-                    updateRow(tableName, rowKey, values);
-                }
-            }, null);
-        return future;
-    }
-
-    @Override
-    public Future<?> updateRowAsync(final String tableName,
-            final Map<String,Object> values) {
-        Future<?> future = executorService.submit(
-            new StorageRunnable() {
-                public void doStorageOperation() {
-                    updateRow(tableName, values);
-                }
-            }, null);
-        return future;
-    }
-
-    @Override
-    public Future<?> deleteRowAsync(final String tableName, final Object rowKey) {
-        Future<?> future = executorService.submit(
-            new StorageRunnable() {
-                public void doStorageOperation() {
-                    deleteRow(tableName, rowKey);
-                }
-            }, null);
-        return future;
-    }
-
-    @Override
-    public Future<?> deleteRowsAsync(final String tableName, final Set<Object> rowKeys) {
-        Future<?> future = executorService.submit(
-                new StorageRunnable() {
-                    public void doStorageOperation() {
-                        deleteRows(tableName, rowKeys);
-                    }
-                }, null);
-        return future;
-    }
-
-    @Override
-    public Future<?> deleteMatchingRowsAsync(final String tableName, final IPredicate predicate) {
-        Future<?> future = executorService.submit(
-                new StorageRunnable() {
-                    public void doStorageOperation() {
-                        deleteMatchingRows(tableName, predicate);
-                    }
-                }, null);
-        return future;
-    }
-
-    @Override
-    public Future<?> getRowAsync(final String tableName, final Object rowKey) {
-        Future<?> future = executorService.submit(
-            new StorageRunnable() {
-                public void doStorageOperation() {
-                    getRow(tableName, rowKey);
-                }
-            }, null);
-        return future;
-    }
-    
-    @Override
-    public Future<?> saveAsync(final IResultSet resultSet) {
-        Future<?> future = executorService.submit(
-            new StorageRunnable() {
-                public void doStorageOperation() {
-                    resultSet.save();
-                }
-            }, null);
-        return future;
-    }
-
-    @Override
-    public void insertRow(String tableName, Map<String, Object> values) {
-        updateCounters(STORAGE_UPDATE_COUNTER_NAME, tableName);
-        insertRowImpl(tableName, values);
-    }
-
-    protected abstract void insertRowImpl(String tableName, Map<String, Object> values);
-
-    
-    @Override
-    public void updateRows(String tableName, List<Map<String,Object>> rows) {
-        updateCounters(STORAGE_UPDATE_COUNTER_NAME, tableName);
-        updateRowsImpl(tableName, rows);
-    }
-
-    protected abstract void updateRowsImpl(String tableName, List<Map<String,Object>> rows);
-
-    @Override
-    public void updateMatchingRows(String tableName, IPredicate predicate,
-            Map<String, Object> values) {
-        updateCounters(STORAGE_UPDATE_COUNTER_NAME, tableName);
-        updateMatchingRowsImpl(tableName, predicate, values);
-    }
-    
-    protected abstract void updateMatchingRowsImpl(String tableName, IPredicate predicate,
-                                    Map<String, Object> values);
-
-    @Override
-    public void updateRow(String tableName, Object rowKey,
-            Map<String, Object> values) {
-        updateCounters(STORAGE_UPDATE_COUNTER_NAME, tableName);
-        updateRowImpl(tableName, rowKey, values);
-    }
-    
-    protected abstract void updateRowImpl(String tableName, Object rowKey,
-                                   Map<String, Object> values);
-
-    @Override
-    public void updateRow(String tableName, Map<String, Object> values) {
-        updateCounters(STORAGE_UPDATE_COUNTER_NAME, tableName);
-        updateRowImpl(tableName, values);
-    }
-    
-    protected abstract void updateRowImpl(String tableName, Map<String, Object> values);
-
-    @Override
-    public void deleteRow(String tableName, Object rowKey) {
-        updateCounters(STORAGE_DELETE_COUNTER_NAME, tableName);
-        deleteRowImpl(tableName, rowKey);
-    }
-    
-    protected abstract void deleteRowImpl(String tableName, Object rowKey);
-
-    @Override
-    public void deleteRows(String tableName, Set<Object> rowKeys) {
-        updateCounters(STORAGE_DELETE_COUNTER_NAME, tableName);
-        deleteRowsImpl(tableName, rowKeys);
-    }
-
-    protected abstract void deleteRowsImpl(String tableName, Set<Object> rowKeys);
-
-    @Override
-    public void deleteMatchingRows(String tableName, IPredicate predicate) {
-        IResultSet resultSet = null;
-        try {
-            resultSet = executeQuery(tableName, null, predicate, null);
-            while (resultSet.next()) {
-                resultSet.deleteRow();
-            }
-            resultSet.save();
-        }
-        finally {
-            if (resultSet != null)
-                resultSet.close();
-        }
-    }
-    
-    @Override
-    public IResultSet getRow(String tableName, Object rowKey) {
-        updateCounters(STORAGE_QUERY_COUNTER_NAME, tableName);
-        return getRowImpl(tableName, rowKey);
-    }
-
-    protected abstract IResultSet getRowImpl(String tableName, Object rowKey);
-
-    @Override
-    public synchronized void addListener(String tableName, IStorageSourceListener listener) {
-        Set<IStorageSourceListener> tableListeners = listeners.get(tableName);
-        if (tableListeners == null) {
-            tableListeners = new CopyOnWriteArraySet<IStorageSourceListener>();
-            listeners.put(tableName, tableListeners);
-        }
-        tableListeners.add(listener);
-    }
-  
-    @Override
-    public synchronized void removeListener(String tableName, IStorageSourceListener listener) {
-        Set<IStorageSourceListener> tableListeners = listeners.get(tableName);
-        if (tableListeners != null) {
-            tableListeners.remove(listener);
-        }
-    }
-
-    @LogMessageDoc(level="ERROR",
-            message="Exception caught handling storage notification",
-            explanation="An unknown error occured while trying to notify" +
-            		" storage listeners",
-            recommendation=LogMessageDoc.GENERIC_ACTION)
-    protected synchronized void notifyListeners(StorageSourceNotification notification) {
-        String tableName = notification.getTableName();
-        Set<Object> keys = notification.getKeys();
-        Set<IStorageSourceListener> tableListeners = listeners.get(tableName);
-        if (tableListeners != null) {
-            for (IStorageSourceListener listener : tableListeners) {
-                try {
-                    switch (notification.getAction()) {
-                        case MODIFY:
-                            listener.rowsModified(tableName, keys);
-                            break;
-                        case DELETE:
-                            listener.rowsDeleted(tableName, keys);
-                            break;
-                    }
-                }
-                catch (Exception e) {
-                    logger.error("Exception caught handling storage notification", e);
-                }
-            }
-        }
-    }
-    
-    @Override
-    public void notifyListeners(List<StorageSourceNotification> notifications) {
-        for (StorageSourceNotification notification : notifications)
-            notifyListeners(notification);
-    }
-    
-    // IFloodlightModule
-
-    @Override
-    public Collection<Class<? extends IFloodlightService>> getModuleServices() {
-        Collection<Class<? extends IFloodlightService>> l = 
-                new ArrayList<Class<? extends IFloodlightService>>();
-        l.add(IStorageSourceService.class);
-        return l;
-    }
-    
-    @Override
-    public Map<Class<? extends IFloodlightService>,
-               IFloodlightService> getServiceImpls() {
-        Map<Class<? extends IFloodlightService>,
-            IFloodlightService> m = 
-                new HashMap<Class<? extends IFloodlightService>,
-                            IFloodlightService>();
-        m.put(IStorageSourceService.class, this);
-        return m;
-    }
-    
-    @Override
-    public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
-        Collection<Class<? extends IFloodlightService>> l = 
-                new ArrayList<Class<? extends IFloodlightService>>();
-        l.add(IRestApiService.class);
-        l.add(ICounterStoreService.class);
-        return l;
-    }
-
-    @Override
-    public void init(FloodlightModuleContext context)
-            throws FloodlightModuleException {
-        restApi =
-           context.getServiceImpl(IRestApiService.class);
-        counterStore =
-            context.getServiceImpl(ICounterStoreService.class);
-    }
-
-    @Override
-    public void startUp(FloodlightModuleContext context) {
-        restApi.addRestletRoutable(new StorageWebRoutable());
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/CompoundPredicate.java b/src/main/java/net/floodlightcontroller/storage/CompoundPredicate.java
deleted file mode 100644
index a23e560..0000000
--- a/src/main/java/net/floodlightcontroller/storage/CompoundPredicate.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-/** Predicate class to handle AND and OR combinations of a number
- * of child predicates. The result of the logical combination of the
- * child predicates can also be negated to support a NOT operation.
- * 
- * @author rob
- *
- */
-public class CompoundPredicate implements IPredicate {
-
-    public enum Operator { AND, OR };
-    
-    private Operator operator;
-    private boolean negated;
-    private IPredicate[] predicateList;
-    
-    public CompoundPredicate(Operator operator, boolean negated, IPredicate... predicateList) {
-        this.operator = operator;
-        this.negated = negated;
-        this.predicateList = predicateList;
-    }
-    
-    public Operator getOperator() {
-        return operator;
-    }
-    
-    public boolean isNegated() {
-        return negated;
-    }
-    
-    public IPredicate[] getPredicateList() {
-        return predicateList;
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/IPredicate.java b/src/main/java/net/floodlightcontroller/storage/IPredicate.java
deleted file mode 100644
index 291edff..0000000
--- a/src/main/java/net/floodlightcontroller/storage/IPredicate.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-/** Common base interface for the OperatorPredicate and CompoundPredicate classes.
- * It's necessary so that you can use either type of predicate as child
- * predicates of a CompoundPredicate.
- * @author rob
- */
-public interface IPredicate {
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/IQuery.java b/src/main/java/net/floodlightcontroller/storage/IQuery.java
deleted file mode 100644
index b75b8ae..0000000
--- a/src/main/java/net/floodlightcontroller/storage/IQuery.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-/** Representation of a database query. For SQL queries this maps to
- * a prepared statement, so it will be more efficient than if you use the
- * methods in IStorageSource that bypass the IQuery. For many NoSQL
- * storage sources there won't be any performance improvement from keeping
- * around the query.
- * 
- * The query interface also supports parameterized queries (i.e. which maps
- * to using ? values in a SQL query). The values of the parameters are set
- * using the setParameter method. In the storage source API the parameters
- * are named rather than positional. The format of the parameterized values
- * in the query predicates is the parameter name bracketed with question marks
- * (e.g. ?MinimumSalary? ).
- * 
- * @author rob
- *
- */
-public interface IQuery {
-    String getTableName();
-    void setParameter(String name, Object value);
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/IResultSet.java b/src/main/java/net/floodlightcontroller/storage/IResultSet.java
deleted file mode 100644
index fbd2a4a..0000000
--- a/src/main/java/net/floodlightcontroller/storage/IResultSet.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-import java.util.Date;
-import java.util.Map;
-
-/** Interface to iterate over the results from a storage query.
- * 
- * @author rob
- *
- */
-public interface IResultSet extends Iterable<IResultSet> {
-    
-    /** This should be called when the client is done using the result set.
-     * This will release any underlying resources (e.g. a database connection),
-     * which you don't want to wait for or rely on finalizers to release.
-     */
-    public void close();
-    
-    /** Advance to the next row in the result set. 
-     * @return Returns true if there are more rows to process
-     * (i.e. if there's a valid current row) and false if there are no more
-     * rows in the result set.
-     */
-    public boolean next();
-    
-    /** Save/commit any pending updates to the data in the result set.
-     * This must be called after any calls to the set methods or deleting rows
-     * for the changes to be applied/committed to the storage source. Note that
-     * this doesn't need to be called after each set method or even after each
-     * row. It is typically called at the end after updating all of the
-     * rows in the result set.
-     */
-    public void save();
-    
-    /** Get the current row in the result set. This returns all of the
-     * columns in the current row.
-     * @return Map containing all of the columns in the current row, indexed
-     * by the column name.
-     */
-    public Map<String,Object> getRow();
-    
-    /** Delete the current row in the result set.
-     */
-    public void deleteRow();
-    
-    public boolean containsColumn(String columnName);
-    
-    public String getString(String columnName);
-    public short getShort(String columnName);
-    public int getInt(String columnName);
-    public long getLong(String columnName);
-    public float getFloat(String columnName);
-    public double getDouble(String columnName);
-    public boolean getBoolean(String columnName);
-    public byte getByte(String columnName);
-    public byte[] getByteArray(String columnName);
-    public Date getDate(String columnName);
-    
-    public Short getShortObject(String columnName);
-    public Integer getIntegerObject(String columnName);
-    public Long getLongObject(String columnName);
-    public Float getFloatObject(String columnName);
-    public Double getDoubleObject(String columnName);
-    public Boolean getBooleanObject(String columnName);
-    public Byte getByteObject(String columnName);
-    
-    public boolean isNull(String columnName);
-    
-    public void setString(String columnName, String value);
-    public void setShort(String columnName, short value);
-    public void setInt(String columnName, int value);
-    public void setLong(String columnName, long value);
-    public void setFloat(String columnName, float value);
-    public void setDouble(String columnName, double value);
-    public void setBoolean(String columnName, boolean value);
-    public void setByte(String columnName, byte value);
-    public void setByteArray(String columnName, byte[] byteArray);
-    public void setDate(String columnName, Date date);
-    
-    public void setShortObject(String columnName, Short value);
-    public void setIntegerObject(String columnName, Integer value);
-    public void setLongObject(String columnName, Long value);
-    public void setFloatObject(String columnName, Float value);
-    public void setDoubleObject(String columnName, Double value);
-    public void setBooleanObject(String columnName, Boolean value);
-    public void setByteObject(String columnName, Byte value);
-    
-    public void setNull(String columnName);
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/IRowMapper.java b/src/main/java/net/floodlightcontroller/storage/IRowMapper.java
deleted file mode 100644
index 6c4502b..0000000
--- a/src/main/java/net/floodlightcontroller/storage/IRowMapper.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-/**
- * Interface for mapping the current row in a result set to an object.
- * This is based on the Spring JDBC support.
- * 
- * @author rob
- */
-public interface IRowMapper {
-
-    /** This method must be implemented by the client of the storage API
-     * to map the current row in the result set to a Java object.
-     * 
-     * @param resultSet The result set obtained from a storage source query
-     * @return The object created from the data in the result set
-     */
-    Object mapRow(IResultSet resultSet);
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/IStorageExceptionHandler.java b/src/main/java/net/floodlightcontroller/storage/IStorageExceptionHandler.java
deleted file mode 100644
index e3c8e94..0000000
--- a/src/main/java/net/floodlightcontroller/storage/IStorageExceptionHandler.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-public interface IStorageExceptionHandler {
-    public void handleException(Exception exc);
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/IStorageSourceListener.java b/src/main/java/net/floodlightcontroller/storage/IStorageSourceListener.java
deleted file mode 100644
index ea3764d..0000000
--- a/src/main/java/net/floodlightcontroller/storage/IStorageSourceListener.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-import java.util.Set;
-
-public interface IStorageSourceListener {
-
-    /**
-     * Called when rows are inserted or updated in the table.
-     * 
-     * @param tableName The table where the rows were inserted
-     * @param rowKeys The keys of the rows that were inserted
-     */
-    public void rowsModified(String tableName, Set<Object> rowKeys);
-    
-    /**
-     * Called when a new row is deleted from the table.
-     * 
-     * @param tableName The table where the rows were deleted
-     * @param rowKeys The keys of the rows that were deleted
-     */
-    public void rowsDeleted(String tableName, Set<Object> rowKeys);
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/IStorageSourceService.java b/src/main/java/net/floodlightcontroller/storage/IStorageSourceService.java
deleted file mode 100644
index b8a1be8..0000000
--- a/src/main/java/net/floodlightcontroller/storage/IStorageSourceService.java
+++ /dev/null
@@ -1,331 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.Future;
-
-import net.floodlightcontroller.core.module.IFloodlightService;
-
-public interface IStorageSourceService extends IFloodlightService {
-
-    /** Set the column to be used as the primary key for a table. This should
-     * be guaranteed to be unique for all of the rows in the table, although the
-     * storage API does not necessarily enforce this requirement. If no primary
-     * key name is specified for a table then the storage API assumes there is
-     * a column named "id" that is used as the primary key. In this case when
-     * a new row is inserted using the storage API and no id is specified
-     * explictly in the row data, the storage API automatically generates a
-     * unique ID (typically a UUID) for the id column. To work across all
-     * possible implementations of the storage API it is safest, though, to
-     * specify the primary key column explicitly.
-     * FIXME: It's sort of a kludge to have to specify the primary key column
-     * here. Ideally there would be some sort of metadata -- perhaps stored
-     * directly in the table, at least in the NoSQL case -- that the
-     * storage API could query to obtain the primary key info.
-     * @param tableName The name of the table for which we're setting the key
-     * @param primaryKeyName The name of column to be used as the primary key
-     */
-    public void setTablePrimaryKeyName(String tableName, String primaryKeyName);
-
-    /** Create a new table if one does not already exist with the given name.
-     * 
-     * @param tableName The name of the table to create.
-     * @param indexedColumns Which columns should be indexed
-     */
-    void createTable(String tableName, Set<String> indexedColumns);
-    
-    /**
-     * @return the set of all tables that have been created via createTable
-     */
-    Set<String> getAllTableNames();
-    
-    /** Create a query object representing the given query parameters. The query
-     * object can be passed to executeQuery to actually perform the query and obtain
-     * a result set.
-     * 
-     * @param tableName The name of the table to query.
-     * @param columnNames The list of columns to return in the result set.
-     * @param predicate The predicate that specifies which rows to return in the result set.
-     * @param ordering Specification of order that rows are returned from the result set
-     * returned from executing the query. If the ordering is null, then rows are returned
-     * in an implementation-specific order.
-     * @return Query object to be passed to executeQuery.
-     */
-    IQuery createQuery(String tableName, String[] columnNames, IPredicate predicate, RowOrdering ordering);
-    
-    /** Execute a query created with createQuery.
-     * 
-     * @param query The query to execute
-     * @return The result set containing the rows/columns specified in the query.
-     */
-    IResultSet executeQuery(IQuery query);
-
-    /** Execute a query created with the given query parameters.
-     *
-     * @param tableName The name of the table to query.
-     * @param columnNames The list of columns to return in the result set.
-     * @param predicate The predicate that specifies which rows to return in the result set.
-     * @param ordering Specification of order that rows are returned from the result set
-     * returned from executing the query. If the ordering is null, then rows are returned
-     * in an implementation-specific order.
-     * @return The result set containing the rows/columns specified in the query.
-     */
-    IResultSet executeQuery(String tableName, String[] columnNames, IPredicate predicate,
-            RowOrdering ordering);
-    
-    /** Execute a query and call the row mapper to map the results to Java objects.
-     * 
-     * @param tableName The name of the table to query.
-     * @param columnNames The list of columns to return in the result set.
-     * @param predicate The predicate that specifies which rows to return in the result set.
-     * @param ordering Specification of order that rows are returned from the result set
-     * returned from executing the query. If the ordering is null, then rows are returned
-     * in an implementation-specific order.
-     * @param rowMapper The client-supplied object that maps the data in a row in the result
-     * set to a client object.
-     * @return The result set containing the rows/columns specified in the query.
-     */
-    Object[] executeQuery(String tableName, String[] columnNames, IPredicate predicate,
-            RowOrdering ordering, IRowMapper rowMapper);
-    
-    /** Insert a new row in the table with the given column data.
-     * If the primary key is the default value of "id" and is not specified in the
-     * then a unique id will be automatically assigned to the row.
-     * @param tableName The name of the table to which to add the row
-     * @param values The map of column names/values to add to the table.
-     */
-    void insertRow(String tableName, Map<String,Object> values);
-
-    /** Update or insert a list of rows in the table.
-     * The primary key must be included in the map of values for each row.
-     * @param tableName The table to update or insert into
-     * @param values The map of column names/values to update the rows
-     */
-    void updateRows(String tableName, List<Map<String,Object>> rows);
-    
-    /** Update the rows in the given table. Any rows matching the predicate
-     * are updated with the column names/values specified in the values map.
-     * (The values map should not contain the special column "id".)
-     * @param tableName The table to update
-     * @param predicate The predicate to use to select which rows to update
-     * @param values The map of column names/values to update the rows.
-     */
-    void updateMatchingRows(String tableName, IPredicate predicate, Map<String,Object> values);
-    
-    /** Update or insert a row in the table with the given row key (primary
-     * key) and column names/values. (If the values map contains the special
-     * column "id", its value must match rowId.)
-     * @param tableName The table to update or insert into
-     * @param rowKey The ID (primary key) of the row to update
-     * @param values The map of column names/values to update the rows
-     */
-    void updateRow(String tableName, Object rowKey, Map<String,Object> values);
-    
-    /** Update or insert a row in the table with the given column data.
-     * The primary key must be included in the map of values.
-     * @param tableName The table to update or insert into
-     * @param values The map of column names/values to update the rows
-     */
-    void updateRow(String tableName, Map<String,Object> values);
-    
-    /** Delete the row with the given primary key.
-     * 
-     * @param tableName The table from which to delete the row
-     * @param rowKey The primary key of the row to delete.
-     */
-    void deleteRow(String tableName, Object rowKey);
-
-    /** Delete the rows with the given keys.
-     * 
-     * @param tableName The table from which to delete the rows
-     * @param rowKeys The set of primary keys of the rows to delete.
-     */
-    void deleteRows(String tableName, Set<Object> rowKeys);
-    
-    /**
-     * Delete the rows that match the predicate
-     * @param tableName
-     * @param predicate
-     */
-    void deleteMatchingRows(String tableName, IPredicate predicate);
-    
-    /** Query for a row with the given ID (primary key).
-     * 
-     * @param tableName The name of the table to query
-     * @param rowKey The primary key of the row
-     * @return The result set containing the row with the given ID
-     */
-    IResultSet getRow(String tableName, Object rowKey);
-    
-    /**
-     * Set exception handler to use for asynchronous operations.
-     * @param exceptionHandler
-     */
-    void setExceptionHandler(IStorageExceptionHandler exceptionHandler);
-    
-    /**
-     * Asynchronous variant of executeQuery.
-     * 
-     * @param query
-     * @return
-     */
-    public Future<IResultSet> executeQueryAsync(final IQuery query);
-    
-    /**
-     * Asynchronous variant of executeQuery.
-     * 
-     * @param tableName
-     * @param columnNames
-     * @param predicate
-     * @param ordering
-     * @return
-     */
-    public Future<IResultSet> executeQueryAsync(final String tableName,
-            final String[] columnNames,  final IPredicate predicate,
-            final RowOrdering ordering);
-    
-    /**
-     * Asynchronous variant of executeQuery
-     * 
-     * @param tableName
-     * @param columnNames
-     * @param predicate
-     * @param ordering
-     * @param rowMapper
-     * @return
-     */
-    public Future<Object[]> executeQueryAsync(final String tableName,
-            final String[] columnNames,  final IPredicate predicate,
-            final RowOrdering ordering, final IRowMapper rowMapper);
-    
-    /**
-     * Asynchronous variant of insertRow.
-     * 
-     * @param tableName
-     * @param values
-     * @return
-     */
-    public Future<?> insertRowAsync(final String tableName, final Map<String,Object> values);
-
-    /**
-     * Asynchronous variant of updateRows
-     * @param tableName
-     * @param rows
-     */
-    public Future<?> updateRowsAsync(final String tableName, final List<Map<String,Object>> rows);
-
-    /**
-     * Asynchronous variant of updateMatchingRows
-     * 
-     * @param tableName
-     * @param predicate
-     * @param values
-     * @return
-     */
-    public Future<?> updateMatchingRowsAsync(final String tableName, final IPredicate predicate,
-            final Map<String,Object> values);
-
-    /**
-     * Asynchronous variant of updateRow
-     * 
-     * @param tableName
-     * @param rowKey
-     * @param values
-     * @return
-     */
-    public Future<?> updateRowAsync(final String tableName, final Object rowKey,
-            final Map<String,Object> values);
-            
-    /**
-     * Asynchronous version of updateRow
-     * 
-     * @param tableName
-     * @param values
-     * @return
-     */
-    public Future<?> updateRowAsync(final String tableName, final Map<String,Object> values);
-    
-    /**
-     * Asynchronous version of deleteRow
-     * 
-     * @param tableName
-     * @param rowKey
-     * @return
-     */
-    public Future<?> deleteRowAsync(final String tableName, final Object rowKey);
-
-    /**
-     * Asynchronous version of deleteRows
-     * 
-     * @param tableName
-     * @param rowKeys
-     * @return
-     */
-    public Future<?> deleteRowsAsync(final String tableName, final Set<Object> rowKeys);
-
-    /**
-     * Asynchronous version of deleteRows
-     * 
-     * @param tableName
-     * @param predicate
-     * @return
-     */
-    public Future<?> deleteMatchingRowsAsync(final String tableName, final IPredicate predicate);
-    
-    /**
-     * Asynchronous version of getRow
-     * 
-     * @param tableName
-     * @param rowKey
-     * @return
-     */
-    public Future<?> getRowAsync(final String tableName, final Object rowKey);
-    
-    /**
-     * Asynchronous version of save
-     * 
-     * @param resultSet
-     * @return
-     */
-    public Future<?> saveAsync(final IResultSet resultSet);
-    
-    /** Add a listener to the specified table. The listener is called
-     * when any modifications are made to the table. You can add the same
-     * listener instance to multiple tables, since the table name is
-     * included as a parameter in the listener methods.
-     * @param tableName The name of the table to listen for modifications
-     * @param listener The listener instance to call
-     */
-    public void addListener(String tableName, IStorageSourceListener listener);
-    
-    /** Remove a listener from the specified table. The listener should
-     * have been previously added to the table with addListener.
-     * @param tableName The name of the table with the listener
-     * @param listener The previously installed listener instance
-     */
-    public void removeListener(String tableName, IStorageSourceListener listener);
-    
-    /** This is logically a private method and should not be called by
-     * clients of this interface.
-     * @param notifications the notifications to dispatch
-     */
-    public void notifyListeners(List<StorageSourceNotification> notifications);
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/NullValueStorageException.java b/src/main/java/net/floodlightcontroller/storage/NullValueStorageException.java
deleted file mode 100644
index 0c148b8..0000000
--- a/src/main/java/net/floodlightcontroller/storage/NullValueStorageException.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-public class NullValueStorageException extends StorageException {
-
-    private static final long serialVersionUID = 897572085681189926L;
-
-    private static String makeExceptionMessage(String columnName) {
-        String message = "Null column value could not be converted to built-in type";
-        if (columnName != null) {
-            message += ": column name = ";
-            message += columnName;
-        }
-        return message;
-    }
-    
-    public NullValueStorageException() {
-        super(makeExceptionMessage(null));
-    }
-    
-    public NullValueStorageException(String columnName) {
-        super(makeExceptionMessage(columnName));
-    }
-    
-    public NullValueStorageException(String columnName, Throwable exc) {
-        super(makeExceptionMessage(columnName), exc);
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/OperatorPredicate.java b/src/main/java/net/floodlightcontroller/storage/OperatorPredicate.java
deleted file mode 100644
index dc78260..0000000
--- a/src/main/java/net/floodlightcontroller/storage/OperatorPredicate.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-/** Predicate class to specify rows by equality or comparison operations
- * of column values. The Storage API uses the special column name of "id"
- * to specify the primary key values for the row.
- * 
- * @author rob
- */
-public class OperatorPredicate implements IPredicate {
-    
-    public enum Operator { EQ, LT, LTE, GT, GTE };
-    
-    private String columnName;
-    private Operator operator;
-    private Comparable<?> value;
-    
-    public OperatorPredicate(String columnName, Operator operator, Comparable<?> value) {
-        this.columnName = columnName;
-        this.operator = operator;
-        this.value = value;
-    }
-    
-    public String getColumnName() {
-        return columnName;
-    }
-    
-    public Operator getOperator() {
-        return operator;
-    }
-    
-    public Comparable<?> getValue() {
-        return value;
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/ResultSetIterator.java b/src/main/java/net/floodlightcontroller/storage/ResultSetIterator.java
deleted file mode 100644
index 669833d..0000000
--- a/src/main/java/net/floodlightcontroller/storage/ResultSetIterator.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-import java.util.Iterator;
-import java.util.NoSuchElementException;
-
-/** Iterator wrapper for an IResultSet, useful for iterating through query
- * results in an enhanced for (foreach) loop.
- * 
- * Note that the iterator manipulates the state of the underlying IResultSet.
- */
-public class ResultSetIterator implements Iterator<IResultSet> {
-    private IResultSet resultSet;
-    private boolean hasAnother;
-    private boolean peekedAtNext;
-    
-    public ResultSetIterator(IResultSet resultSet) {
-        this.resultSet = resultSet;
-        this.peekedAtNext = false;
-    }
-    
-    @Override
-    public IResultSet next() {
-        if (!peekedAtNext) {
-            hasAnother = resultSet.next();
-        }
-        peekedAtNext = false;
-        if (!hasAnother)
-            throw new NoSuchElementException();
-        return resultSet;
-    }
-    
-    @Override
-    public boolean hasNext() {
-        if (!peekedAtNext) {
-            hasAnother = resultSet.next();
-            peekedAtNext = true;
-        }
-        return hasAnother;
-    }
-    
-    /** Row removal is not supported; use IResultSet.deleteRow instead.
-     */
-    @Override
-    public void remove() {
-        throw new UnsupportedOperationException();
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/RowOrdering.java b/src/main/java/net/floodlightcontroller/storage/RowOrdering.java
deleted file mode 100644
index f9e61ed..0000000
--- a/src/main/java/net/floodlightcontroller/storage/RowOrdering.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-import java.util.ArrayList;
-import java.util.List;
-
-public class RowOrdering {
-    
-    public enum Direction { ASCENDING, DESCENDING };
-    
-    public class Item {
-        
-        private String column;
-        private Direction direction;
-        
-        public Item(String column, Direction direction) {
-            assert(column != null);
-            assert(direction != null);
-            this.column = column;
-            this.direction = direction;
-        }
-        
-        public String getColumn() {
-            return column;
-        }
-        
-        public Direction getDirection() {
-            return direction;
-        }
-    }
-    
-    private List<Item> itemList = new ArrayList<Item>();
-    
-    public RowOrdering() {
-    }
-    
-    public RowOrdering(String column) {
-        add(column);
-    }
-    
-    public RowOrdering(String column, Direction direction) {
-        add(column, direction);
-    }
-    
-    public RowOrdering(Item item) {
-        add(item);
-    }
-    
-    public RowOrdering(Item[] itemArray) {
-        add(itemArray);
-    }
-    
-    public RowOrdering(List<Item> itemList) {
-        add(itemList);
-    }
-    
-    public void add(String column) {
-        itemList.add(new Item(column, Direction.ASCENDING));
-    }
-    
-    public void add(String column, Direction direction) {
-        itemList.add(new Item(column, direction));
-    }
-    
-    public void add(Item item) {
-        assert(item != null);
-        itemList.add(item);
-    }
-    
-    public void add(Item[] itemArray) {
-        for (Item item: itemArray) {
-            itemList.add(item);
-        }
-    }
-    
-    public void add(List<Item> itemList) {
-        this.itemList.addAll(itemList);
-    }
-    
-    public List<Item> getItemList() {
-        return itemList;
-    }
-    
-    public boolean equals(RowOrdering rowOrdering) {
-        if (rowOrdering == null)
-            return false;
-        
-        int len1 = itemList.size();
-        int len2 = rowOrdering.getItemList().size();
-        if (len1 != len2)
-            return false;
-        
-        for (int i = 0; i < len1; i++) {
-            Item item1 = itemList.get(i);
-            Item item2 = rowOrdering.getItemList().get(i);
-            if (!item1.getColumn().equals(item2.getColumn()) ||
-                    item1.getDirection() != item2.getDirection())
-                return false;
-        }
-        
-        return true;
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/StorageException.java b/src/main/java/net/floodlightcontroller/storage/StorageException.java
deleted file mode 100644
index f5dea23..0000000
--- a/src/main/java/net/floodlightcontroller/storage/StorageException.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-public class StorageException extends RuntimeException {
-
-    static final long serialVersionUID = 7839989010156155681L;
-    
-    static private String makeExceptionMessage(String s) {
-        String message = "Storage Exception";
-        if (s != null) {
-            message += ": ";
-            message += s;
-        }
-        return message;
-    }
-
-    public StorageException() {
-        super(makeExceptionMessage(null));
-    }
-    
-    public StorageException(String s) {
-        super(makeExceptionMessage(s));
-    }
-    
-    public StorageException(String s, Throwable exc) {
-        super(makeExceptionMessage(s), exc);
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/StorageNotificationFormatException.java b/src/main/java/net/floodlightcontroller/storage/StorageNotificationFormatException.java
deleted file mode 100644
index f6ce565..0000000
--- a/src/main/java/net/floodlightcontroller/storage/StorageNotificationFormatException.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-public class StorageNotificationFormatException extends StorageException {
-    private static final long serialVersionUID = 504758477518283156L;
-
-    public StorageNotificationFormatException() {
-        super("Invalid storage notification format");
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/StorageSourceNotification.java b/src/main/java/net/floodlightcontroller/storage/StorageSourceNotification.java
deleted file mode 100644
index c9a5450..0000000
--- a/src/main/java/net/floodlightcontroller/storage/StorageSourceNotification.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-import java.util.Set;
-
-public class StorageSourceNotification {
-    
-    public enum Action { MODIFY, DELETE };
-    
-    private String tableName;
-    private Action action;
-    private Set<Object> keys;
-    
-    public StorageSourceNotification() {
-    }
-    
-    public StorageSourceNotification(String tableName, Action action, Set<Object> keys) {
-        this.tableName = tableName;
-        this.action = action;
-        this.keys = keys;
-    }
-    
-    public String getTableName() {
-        return tableName;
-    }
-    
-    public Action getAction() {
-        return action;
-    }
-    
-    public Set<Object> getKeys() {
-        return keys;
-    }
-    
-    public void setTableName(String tableName) {
-        this.tableName = tableName;
-    }
-    
-    public void setAction(Action action) {
-        this.action = action;
-    }
-    
-    public void setKeys(Set<Object> keys) {
-        this.keys = keys;
-    }
-    
-    /* (non-Javadoc)
-     * @see java.lang.Object#hashCode()
-     */
-    @Override
-    public int hashCode() {
-        final int prime = 7867;
-        int result = 1;
-        result = prime * result + tableName.hashCode();
-        result = prime * result + action.hashCode();
-        result = prime * result + keys.hashCode();
-        return result;
-    }
-    
-    @Override
-    public boolean equals(Object obj) {
-        if (this == obj)
-            return true;
-        if (obj == null)
-            return false;
-        if (!(obj instanceof StorageSourceNotification))
-            return false;
-        StorageSourceNotification other = (StorageSourceNotification) obj;
-        if (tableName == null) {
-            if (other.tableName != null)
-                return false;
-        } else if (!tableName.equals(other.tableName))
-            return false;
-        if (action == null) {
-            if (other.action != null)
-                return false;
-        } else if (action != other.action)
-            return false;
-        if (keys == null) {
-            if (other.keys != null)
-                return false;
-        } else if (!keys.equals(other.keys))
-            return false;
-        return true;
-    }
-    
-    @Override
-    public String toString() {
-        return ("StorageNotification[table=" + tableName + "; action=" +
-                 action.toString() + "; keys=" + keys.toString() + "]");
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/SynchronousExecutorService.java b/src/main/java/net/floodlightcontroller/storage/SynchronousExecutorService.java
deleted file mode 100644
index f1e7cd3..0000000
--- a/src/main/java/net/floodlightcontroller/storage/SynchronousExecutorService.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-public class SynchronousExecutorService implements ExecutorService {
-
-    class SynchronousFuture<T> implements Future<T> {
-
-        T result;
-        Exception exc;
-        
-        public SynchronousFuture() {
-        }
-        
-        public SynchronousFuture(T result) {
-            this.result = result;
-        }
-        
-        public SynchronousFuture(Exception exc) {
-            this.exc = exc;
-        }
-        
-        @Override
-        public boolean cancel(boolean mayInterruptIfRunning) {
-            return false;
-        }
-
-        @Override
-        public boolean isCancelled() {
-            return false;
-        }
-
-        @Override
-        public boolean isDone() {
-            return true;
-        }
-
-        @Override
-        public T get() throws InterruptedException, ExecutionException {
-            if (exc != null)
-                throw new ExecutionException(exc);
-            return result;
-        }
-
-        @Override
-        public T get(long timeout, TimeUnit unit) throws InterruptedException,
-                ExecutionException, TimeoutException {
-            return get();
-        }
-    }
-    
-    @Override
-    public void shutdown() {
-    }
-
-    @Override
-    public List<Runnable> shutdownNow() {
-        return null;
-    }
-
-    @Override
-    public boolean isShutdown() {
-        return false;
-    }
-
-    @Override
-    public boolean isTerminated() {
-        return false;
-    }
-
-    @Override
-    public boolean awaitTermination(long timeout, TimeUnit unit)
-            throws InterruptedException {
-        return false;
-    }
-
-    @Override
-    public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks)
-            throws InterruptedException {
-        List<Future<T>> l = new ArrayList<Future<T>>();
-        for (Callable<T> task : tasks) {
-            Future<T> future = submit(task);
-            l.add(future);
-        }
-        return l;
-    }
-
-    @Override
-    public <T> List<Future<T>> invokeAll(
-            Collection<? extends Callable<T>> tasks, long timeout, TimeUnit units)
-            throws InterruptedException {
-        return invokeAll(tasks);
-    }
-
-    @Override
-    public <T> T invokeAny(Collection<? extends Callable<T>> tasks)
-            throws InterruptedException, ExecutionException {
-        for (Callable<T> task : tasks) {
-            try {
-                task.call();
-            } catch (Exception e) {
-
-            }
-        }
-        throw new ExecutionException(new Exception("no task completed successfully"));
-    }
-
-    @Override
-    public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout,
-            TimeUnit units) throws InterruptedException, ExecutionException,
-            TimeoutException {
-        return invokeAny(tasks);
-    }
-
-    @Override
-    public <T> Future<T> submit(Callable<T> callable) {
-        try {
-            T result = callable.call();
-            return new SynchronousFuture<T>(result);
-        }
-        catch (Exception exc) {
-            return new SynchronousFuture<T>(exc);
-        }
-    }
-    
-    @Override
-    public Future<?> submit(Runnable runnable) {
-        try {
-            runnable.run();
-            return new SynchronousFuture<Void>();
-        }
-        catch (Exception exc) {
-            return new SynchronousFuture<Void>(exc);
-        }
-    }
-    
-    @Override
-    public <T> Future<T> submit(Runnable runnable, T result) {
-        try {
-            runnable.run();
-            return new SynchronousFuture<T>(result);
-        }
-        catch (Exception exc) {
-            return new SynchronousFuture<T>(exc);
-        }
-    }
-    
-    @Override
-    public void execute(Runnable command) {
-        command.run();
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/TypeMismatchStorageException.java b/src/main/java/net/floodlightcontroller/storage/TypeMismatchStorageException.java
deleted file mode 100644
index 5643140..0000000
--- a/src/main/java/net/floodlightcontroller/storage/TypeMismatchStorageException.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-public class TypeMismatchStorageException extends StorageException {
-
-    private static final long serialVersionUID = -7923586656854871345L;
-
-    private static String makeExceptionMessage(String requestedType, String actualType, String columnName) {
-        if (requestedType == null)
-            requestedType = "???";
-        if (actualType == null)
-            actualType = "???";
-        if (columnName == null)
-            columnName = "???";
-        String message = "The requested type (" + requestedType + ") does not match the actual type (" + actualType + ") of the value for column \"" + columnName + "\".";
-        return message;
-    }
-    
-    public TypeMismatchStorageException() {
-        super(makeExceptionMessage(null, null, null));
-    }
-    
-    public TypeMismatchStorageException(String requestedType, String actualType, String columnName) {
-        super(makeExceptionMessage(requestedType, actualType, columnName));
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/memory/MemoryStorageSource.java b/src/main/java/net/floodlightcontroller/storage/memory/MemoryStorageSource.java
deleted file mode 100644
index 8a69eca..0000000
--- a/src/main/java/net/floodlightcontroller/storage/memory/MemoryStorageSource.java
+++ /dev/null
@@ -1,198 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage.memory;
-
-import net.floodlightcontroller.core.module.FloodlightModuleContext;
-import net.floodlightcontroller.perfmon.IPktInProcessingTimeService;
-import net.floodlightcontroller.storage.nosql.NoSqlStorageSource;
-import net.floodlightcontroller.storage.SynchronousExecutorService;
-import net.floodlightcontroller.storage.IStorageSourceService;
-import net.floodlightcontroller.core.module.IFloodlightService;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import net.floodlightcontroller.storage.StorageException;
-
-public class MemoryStorageSource extends NoSqlStorageSource {
-    
-    private Map<String, MemoryTable> tableMap = new HashMap<String,MemoryTable>();
-    IPktInProcessingTimeService pktinProcessingTime;
-    
-    synchronized private MemoryTable getTable(String tableName, boolean create) {
-        MemoryTable table = tableMap.get(tableName);
-        if (table == null) {
-            if (!create)
-                throw new StorageException("Table " + tableName + " does not exist");
-            table = new MemoryTable(tableName);
-            tableMap.put(tableName, table);
-        }
-        return table;
-    }
-    
-    @Override
-    protected Collection<Map<String,Object>> getAllRows(String tableName, String[] columnNameList) {
-        MemoryTable table = getTable(tableName, false);
-        return table.getAllRows();
-    }
-    
-    @Override
-    protected Map<String,Object> getRow(String tableName, String[] columnNameList, Object rowKey) {
-        MemoryTable table = getTable(tableName, false);
-        return table.getRow(rowKey);
-    }
-    
-    @Override
-    protected List<Map<String,Object>> executeEqualityQuery(String tableName,
-            String[] columnNameList, String predicateColumnName, Comparable<?> value) {
-        MemoryTable table = getTable(tableName, false);
-        List<Map<String,Object>> result = new ArrayList<Map<String,Object>>();
-        synchronized (table) {
-            Collection<Map<String,Object>> allRows = table.getAllRows();
-            for (Map<String,Object> row : allRows) {
-                Object v = row.get(predicateColumnName);
-                if (value.equals(v)) {
-                    result.add(row);
-                }
-            }
-        }
-        return result;
-    }
-    
-    @SuppressWarnings({ "unchecked", "rawtypes" })
-    @Override
-    protected List<Map<String,Object>> executeRangeQuery(String tableName,
-            String[] columnNameList, String predicateColumnName,
-            Comparable<?> startValue, boolean startInclusive, Comparable<?> endValue, boolean endInclusive) {
-        MemoryTable table = getTable(tableName, false);
-        List<Map<String,Object>> result = new ArrayList<Map<String,Object>>();
-        synchronized (table) {
-            Collection<Map<String,Object>> allRows = table.getAllRows();
-            for (Map<String,Object> row : allRows) {
-                Comparable value = (Comparable) row.get(predicateColumnName);
-                if (value != null) {
-                    int compareResult = value.compareTo(startValue);
-                    if ((compareResult > 0) || (startInclusive && (compareResult >= 0))) {
-                        compareResult = value.compareTo(endValue);
-                        if ((compareResult < 0) || (startInclusive && (compareResult <= 0))) {
-                            result.add(row);
-                        }
-                    }
-                }
-            }
-        }
-        return result;
-    }
-    
-    @Override
-    protected void insertRows(String tableName, List<Map<String,Object>> insertRowList) {
-        MemoryTable table = getTable(tableName, false);
-        String primaryKeyName = getTablePrimaryKeyName(tableName);
-        synchronized (table) {
-            for (Map<String,Object> row : insertRowList) {
-                Object primaryKey = row.get(primaryKeyName);
-                if (primaryKey == null) {
-                    if (primaryKeyName.equals(DEFAULT_PRIMARY_KEY_NAME)) {
-                        row = new HashMap<String,Object>(row);
-                        primaryKey = table.getNextId();
-                        row.put(primaryKeyName, primaryKey);
-                    }
-                }
-                table.insertRow(primaryKey, row);
-            }
-        }
-    }
-    
-    @Override
-    protected void updateRows(String tableName, Set<Object> rowKeys, Map<String,Object> updateRowList) {
-        MemoryTable table = getTable(tableName, false);
-        synchronized (table) {
-            for (Object rowKey : rowKeys) {
-                Map<String,Object> row = table.getRow(rowKey);
-                if (row == null)
-                    row = table.newRow(rowKey);
-                for (Map.Entry<String,Object> entry: updateRowList.entrySet()) {
-                    row.put(entry.getKey(), entry.getValue());
-                }
-            }
-        }
-    }
-    
-    @Override
-    protected void updateRowsImpl(String tableName, List<Map<String,Object>> updateRowList) {
-        MemoryTable table = getTable(tableName, false);
-        String primaryKeyName = getTablePrimaryKeyName(tableName);
-        synchronized (table) {
-            for (Map<String,Object> updateRow : updateRowList) {
-                Object rowKey = updateRow.get(primaryKeyName);
-                if (rowKey == null)
-                    throw new StorageException("Primary key not found.");
-                Map<String,Object> row = table.getRow(rowKey);
-                if (row == null)
-                    row = table.newRow(rowKey);
-                for (Map.Entry<String,Object> entry: updateRow.entrySet()) {
-                    row.put(entry.getKey(), entry.getValue());
-                }
-            }
-        }
-    }
-    
-    @Override
-    protected void deleteRowsImpl(String tableName, Set<Object> rowKeys) {
-        MemoryTable table = getTable(tableName, false);
-        synchronized (table) {
-            for (Object rowKey : rowKeys) {
-                table.deleteRow(rowKey);
-            }
-        }
-    }
-    
-    @Override
-    public void createTable(String tableName, Set<String> indexedColumnNames) {
-        super.createTable(tableName, indexedColumnNames);
-        getTable(tableName, true);
-    }
-    
-    public void setPktinProcessingTime(
-            IPktInProcessingTimeService pktinProcessingTime) {
-        this.pktinProcessingTime = pktinProcessingTime;
-    }
-
-    // IFloodlightModule methods
-
-    @Override
-    public void startUp(FloodlightModuleContext context) {
-        super.startUp(context);
-        executorService = new SynchronousExecutorService();
-    }
-
-    @Override
-    public Map<Class<? extends IFloodlightService>,
-               IFloodlightService> getServiceImpls() {
-        Map<Class<? extends IFloodlightService>,
-            IFloodlightService> m =
-                new HashMap<Class<? extends IFloodlightService>,
-                            IFloodlightService>();
-        m.put(IStorageSourceService.class, this);
-        return m;
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/memory/MemoryTable.java b/src/main/java/net/floodlightcontroller/storage/memory/MemoryTable.java
deleted file mode 100644
index f87ee45..0000000
--- a/src/main/java/net/floodlightcontroller/storage/memory/MemoryTable.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage.memory;
-
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.TreeMap;
-
-public class MemoryTable {
-
-    private String tableName;
-    private Map<Object,Map<String,Object>> rowMap;
-    private int nextId;
-    
-    MemoryTable(String tableName) {
-        this.tableName = tableName;
-        rowMap = new TreeMap<Object,Map<String,Object>>();
-        nextId = 0;
-    }
-    
-    String getTableName() {
-        return tableName;
-    }
-    
-    Collection<Map<String,Object>> getAllRows() {
-        return rowMap.values();
-    }
-    
-    Map<String,Object> getRow(Object key) {
-        Map<String,Object> row = rowMap.get(key);
-        return row;
-    }
-    
-    // rkv: Do we still need this? Probably needs to be tweaked a bit
-    // to work with the support for specifying which column to use as the
-    // primary key
-    Map<String,Object> newRow(Object key) {
-        Map<String,Object> row = new HashMap<String, Object>();
-        row.put("id", key);
-        rowMap.put(key, row);
-        return row;
-    }
-    
-    void insertRow(Object key, Map<String,Object> rowValues) {
-        assert(key != null);
-        rowMap.put(key, rowValues);
-    }
-    
-    void deleteRow(Object rowKey) {
-        rowMap.remove(rowKey);
-    }
-    
-    Integer getNextId() {
-        return new Integer(++nextId);
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlQuery.java b/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlQuery.java
deleted file mode 100644
index 05f8fc7..0000000
--- a/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlQuery.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage.nosql;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import net.floodlightcontroller.storage.IPredicate;
-import net.floodlightcontroller.storage.IQuery;
-import net.floodlightcontroller.storage.RowOrdering;
-
-public class NoSqlQuery implements IQuery {
-
-    private String tableName;
-    private String[] columnNameList;
-    private IPredicate predicate;
-    private RowOrdering rowOrdering;
-    private Map<String,Comparable<?>> parameterMap;
-    
-    NoSqlQuery(String className, String[] columnNameList, IPredicate predicate, RowOrdering rowOrdering) {
-        this.tableName = className;
-        this.columnNameList = columnNameList;
-        this.predicate = predicate;
-        this.rowOrdering = rowOrdering;
-    }
-    
-    @Override
-    public void setParameter(String name, Object value) {
-        if (parameterMap == null)
-            parameterMap = new HashMap<String,Comparable<?>>();
-        parameterMap.put(name, (Comparable<?>)value);
-    }
-
-    @Override
-    public String getTableName() {
-        return tableName;
-    }
-    
-    String[] getColumnNameList() {
-        return columnNameList;
-    }
-    
-    IPredicate getPredicate() {
-        return predicate;
-    }
-    
-    RowOrdering getRowOrdering() {
-        return rowOrdering;
-    }
-    
-    Comparable<?> getParameter(String name) {
-        Comparable<?> value = null;
-        if (parameterMap != null) {
-            value = parameterMap.get(name);
-        }
-        return value;
-    }
-    
-    Map<String,Comparable<?>> getParameterMap() {
-        return parameterMap;
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlResultSet.java b/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlResultSet.java
deleted file mode 100644
index b3a8c20..0000000
--- a/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlResultSet.java
+++ /dev/null
@@ -1,487 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage.nosql;
-
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.Iterator;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.TimeZone;
-
-import net.floodlightcontroller.storage.IResultSet;
-import net.floodlightcontroller.storage.NullValueStorageException;
-import net.floodlightcontroller.storage.ResultSetIterator;
-import net.floodlightcontroller.storage.StorageException;
-import net.floodlightcontroller.storage.TypeMismatchStorageException;
-
-public class NoSqlResultSet implements IResultSet {
-
-    NoSqlStorageSource storageSource;
-    String tableName;
-    String primaryKeyName;
-    List<Map<String,Object>> rowList;
-    int currentIndex;
-    Map<String,Object> currentRowUpdate;
-    List<Map<String,Object>> rowUpdateList;
-    Set<Object> rowDeleteSet;
-    Iterator<IResultSet> resultSetIterator;
-    
-    NoSqlResultSet(NoSqlStorageSource storageSource, String tableName, List<Map<String,Object>> rowList) {
-        this.storageSource = storageSource;
-        this.primaryKeyName = storageSource.getTablePrimaryKeyName(tableName);
-        this.tableName = tableName;
-        if (rowList == null)
-            rowList = new ArrayList<Map<String,Object>>();
-        this.rowList = rowList;
-        currentIndex = -1;
-    }
-    
-    void addRow(Map<String,Object> row) {
-        rowList.add(row);
-    }
-    
-    @Override
-    public Map<String,Object> getRow() {
-        if ((currentIndex < 0) || (currentIndex >= rowList.size())) {
-            throw new StorageException("No current row in result set.");
-        }
-        
-        return rowList.get(currentIndex);
-    }
-
-    @Override
-    public boolean containsColumn(String columnName) {
-        return getObject(columnName) != null;
-    }
-        
-    @Override
-    public void close() {
-    }
-
-    private void endCurrentRowUpdate() {
-        if (currentRowUpdate != null) {
-            if (rowUpdateList == null)
-                rowUpdateList = new ArrayList<Map<String,Object>>();
-            rowUpdateList.add(currentRowUpdate);
-            currentRowUpdate = null;
-        }
-    }
-    
-    @Override
-    public boolean next() {
-        endCurrentRowUpdate();
-        currentIndex++;
-        return currentIndex < rowList.size();
-    }
-
-    @Override
-    public void save() {
-        endCurrentRowUpdate();
-        
-        if (rowUpdateList != null) {
-            storageSource.updateRows(tableName, rowUpdateList);
-            rowUpdateList = null;
-        }
-        
-        if (rowDeleteSet != null) {
-            storageSource.deleteRows(tableName, rowDeleteSet);
-            rowDeleteSet = null;
-        }
-    }
-
-    Object getObject(String columnName) {
-        Map<String,Object> row = rowList.get(currentIndex);
-        Object value = row.get(columnName);
-        return value;
-    }
-    
-    @Override
-    public boolean getBoolean(String columnName) {
-        Boolean b = getBooleanObject(columnName);
-        if (b == null)
-            throw new NullValueStorageException(columnName);
-        return b.booleanValue();
-    }
-
-    @Override
-    public byte getByte(String columnName) {
-        Byte b = getByteObject(columnName);
-        if (b == null)
-            throw new NullValueStorageException(columnName);
-        return b.byteValue();
-    }
-
-    @Override
-    public byte[] getByteArray(String columnName) {
-        byte[] b = null;
-        Object obj = getObject(columnName);
-        if (obj != null) {
-            if (!(obj instanceof byte[]))
-                throw new StorageException("Invalid byte array value");
-            b = (byte[])obj;
-        }
-        return b;
-    }
-
-    @Override
-    public double getDouble(String columnName) {
-        Double d = getDoubleObject(columnName);
-        if (d == null)
-            throw new NullValueStorageException(columnName);
-        return d.doubleValue();
-    }
-
-    @Override
-    public float getFloat(String columnName) {
-        Float f = getFloatObject(columnName);
-        if (f == null)
-            throw new NullValueStorageException(columnName);
-        return f.floatValue();
-    }
-
-    @Override
-    public int getInt(String columnName) {
-        Integer i = getIntegerObject(columnName);
-        if (i == null)
-            throw new NullValueStorageException(columnName);
-        return i.intValue();
-    }
-
-    @Override
-    public long getLong(String columnName) {
-        Long l = getLongObject(columnName);
-        if (l == null)
-            throw new NullValueStorageException(columnName);
-        return l.longValue();
-    }
-
-    @Override
-    public short getShort(String columnName) {
-        Short s = getShortObject(columnName);
-        if (s == null)
-            throw new NullValueStorageException(columnName);
-        return s.shortValue();
-    }
-
-    @Override
-    public String getString(String columnName) {
-        Object obj = getObject(columnName);
-        if (obj == null)
-            return null;
-        return obj.toString();
-    }
-
-    @Override
-    public Date getDate(String column) {
-        Date d;
-        Object obj = getObject(column);
-        if (obj == null) {
-            d = null;
-        } else if (obj instanceof Date) {
-            d = (Date) obj;
-        } else {
-            SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
-            dateFormat.setTimeZone(TimeZone.getTimeZone("GMT"));
-            try {
-                d = dateFormat.parse(obj.toString());
-            }
-            catch (ParseException exc) {
-                throw new TypeMismatchStorageException(Date.class.getName(), obj.getClass().getName(), column);
-            }
-        }
-        return d;
-    }
-
-
-    @Override
-    public Short getShortObject(String columnName)
-    {
-        Short s;
-        Object obj = getObject(columnName);
-        if (obj instanceof Short) {
-            s = (Short)obj;
-        } else if (obj != null) {
-            try {
-                s = Short.parseShort(obj.toString());
-            }
-            catch (NumberFormatException exc) {
-                throw new TypeMismatchStorageException(Short.class.getName(), obj.getClass().getName(), columnName);
-            }
-        } else {
-            s = null;
-        }
-        return s;
-    }
-    
-    @Override
-    public Integer getIntegerObject(String columnName)
-    {
-        Integer i;
-        Object obj = getObject(columnName);
-        if (obj instanceof Integer) {
-            i = (Integer)obj;
-        } else if (obj != null) {
-            try {
-                i = Integer.parseInt(obj.toString());
-            }
-            catch (NumberFormatException exc) {
-                throw new TypeMismatchStorageException(Integer.class.getName(), obj.getClass().getName(), columnName);
-            }
-        } else {
-            i = null;
-        }
-        return i;
-    }
-
-    @Override
-    public Long getLongObject(String columnName)
-    {
-        Long l;
-        Object obj = getObject(columnName);
-        if (obj instanceof Long) {
-            l = (Long)obj;
-        } else if (obj != null) {
-            try {
-                l = Long.parseLong(obj.toString());
-            }
-            catch (NumberFormatException exc) {
-                throw new TypeMismatchStorageException(Long.class.getName(), obj.getClass().getName(), columnName);
-            }
-        } else {
-            l = null;
-        }
-        return l;
-    }
-
-    @Override
-    public Float getFloatObject(String columnName)
-    {
-        Float f;
-        Object obj = getObject(columnName);
-        if (obj instanceof Float) {
-            f = (Float)obj;
-        } else if (obj != null) {
-            try {
-                f = Float.parseFloat(obj.toString());
-            }
-            catch (NumberFormatException exc) {
-                throw new TypeMismatchStorageException(Float.class.getName(), obj.getClass().getName(), columnName);
-            }
-        } else {
-            f = null;
-        }
-        return f;
-    }
-
-    @Override
-    public Double getDoubleObject(String columnName)
-    {
-        Double d;
-        Object obj = getObject(columnName);
-        if (obj instanceof Double) {
-            d = (Double)obj;
-        } else if (obj != null) {
-            try {
-                d = Double.parseDouble(obj.toString());
-            }
-            catch (NumberFormatException exc) {
-                throw new TypeMismatchStorageException(Double.class.getName(), obj.getClass().getName(), columnName);
-            }
-        } else {
-            d = null;
-        }
-        return d;
-    }
-
-    @Override
-    public Boolean getBooleanObject(String columnName)
-    {
-        Boolean b;
-        Object obj = getObject(columnName);
-        if (obj instanceof Boolean) {
-            b = (Boolean)obj;
-        } else if (obj != null) {
-            try {
-                b = Boolean.parseBoolean(obj.toString());
-            }
-            catch (NumberFormatException exc) {
-                throw new TypeMismatchStorageException(Boolean.class.getName(), obj.getClass().getName(), columnName);
-            }
-        } else {
-            b = null;
-        }
-        return b;
-    }
-
-    @Override
-    public Byte getByteObject(String columnName)
-    {
-        Byte b;
-        Object obj = getObject(columnName);
-        if (obj instanceof Byte) {
-            b = (Byte)obj;
-        } else if (obj != null) {
-            try {
-                b = Byte.parseByte(obj.toString());
-            }
-            catch (NumberFormatException exc) {
-                throw new TypeMismatchStorageException(Byte.class.getName(), obj.getClass().getName(), columnName);
-            }
-        } else {
-            b = null;
-        }
-        return b;
-    }
-
-    
-    @Override
-    public boolean isNull(String columnName)
-    {
-        Object obj = getObject(columnName);
-        return (obj == null);
-    }
-
-    private void addRowUpdate(String column, Object value) {
-        if (currentRowUpdate == null) {
-            currentRowUpdate = new HashMap<String,Object>();
-            Object key = rowList.get(currentIndex).get(primaryKeyName);
-            currentRowUpdate.put(primaryKeyName, key);
-        }
-        currentRowUpdate.put(column, value);
-    }
-    
-    @Override
-    public void setBoolean(String columnName, boolean value) {
-        addRowUpdate(columnName, value);
-    }
-
-    @Override
-    public void setByte(String columnName, byte value) {
-        addRowUpdate(columnName, value);
-    }
-
-    @Override
-    public void setByteArray(String columnName, byte[] byteArray) {
-        addRowUpdate(columnName, byteArray);
-    }
-
-    @Override
-    public void setDouble(String columnName, double value) {
-        addRowUpdate(columnName, value);
-    }
-
-    @Override
-    public void setFloat(String columnName, float value) {
-        addRowUpdate(columnName, value);
-    }
-
-    @Override
-    public void setInt(String columnName, int value) {
-        addRowUpdate(columnName, value);
-    }
-
-    @Override
-    public void setLong(String columnName, long value) {
-        addRowUpdate(columnName, value);
-    }
-
-    @Override
-    public void setShort(String columnName, short value) {
-        addRowUpdate(columnName, value);
-    }
-
-    @Override
-    public void setString(String columnName, String value) {
-        addRowUpdate(columnName, value);
-    }
-
-    @Override
-    public void setShortObject(String columnName, Short value)
-    {
-        addRowUpdate(columnName, value);
-    }
-    
-    @Override
-    public void setIntegerObject(String columnName, Integer value)
-    {
-        addRowUpdate(columnName, value);
-    }
-
-    @Override
-    public void setLongObject(String columnName, Long value)
-    {
-        addRowUpdate(columnName, value);
-    }
-
-    @Override
-    public void setFloatObject(String columnName, Float value)
-    {
-        addRowUpdate(columnName, value);
-    }
-
-    @Override
-    public void setDoubleObject(String columnName, Double value)
-    {
-        addRowUpdate(columnName, value);
-    }
-
-    @Override
-    public void setBooleanObject(String columnName, Boolean value)
-    {
-        addRowUpdate(columnName, value);
-    }
-
-    @Override
-    public void setByteObject(String columnName, Byte value)
-    {
-        addRowUpdate(columnName, value);
-    }
-
-    @Override
-    public void setDate(String column, Date value) {
-        addRowUpdate(column, value);
-    }
-
-    
-    public void setNull(String columnName)
-    {
-        addRowUpdate(columnName, null);
-    }
-
-    
-    @Override
-    public void deleteRow() {
-        Object key = (String) rowList.get(currentIndex).get(primaryKeyName);
-        if (rowDeleteSet == null)
-            rowDeleteSet = new HashSet<Object>();
-        rowDeleteSet.add(key);
-    }
-    
-    @Override
-    public Iterator<IResultSet> iterator() {
-        if (resultSetIterator == null)
-            resultSetIterator = new ResultSetIterator(this);
-        return resultSetIterator;
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlStorageSource.java b/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlStorageSource.java
deleted file mode 100644
index 6624932..0000000
--- a/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlStorageSource.java
+++ /dev/null
@@ -1,823 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage.nosql;
-
-import java.lang.Class;
-
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TimeZone;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import net.floodlightcontroller.storage.AbstractStorageSource;
-import net.floodlightcontroller.storage.CompoundPredicate;
-import net.floodlightcontroller.storage.IPredicate;
-import net.floodlightcontroller.storage.IQuery;
-import net.floodlightcontroller.storage.IResultSet;
-import net.floodlightcontroller.storage.OperatorPredicate;
-import net.floodlightcontroller.storage.RowOrdering;
-import net.floodlightcontroller.storage.StorageException;
-import net.floodlightcontroller.storage.StorageSourceNotification;
-import net.floodlightcontroller.storage.TypeMismatchStorageException;
-
-public abstract class NoSqlStorageSource extends AbstractStorageSource {
-    protected final static Logger log = LoggerFactory.getLogger(NoSqlStorageSource.class);
-
-    public enum ColumnIndexMode { NOT_INDEXED, RANGE_INDEXED, EQUALITY_INDEXED };
-    
-    protected static final String DEFAULT_PRIMARY_KEY_NAME = "id";
-    
-    private Map<String,String> tablePrimaryKeyMap = new HashMap<String,String>();
-    private Map<String, Map<String,ColumnIndexMode>> tableIndexedColumnMap =
-        new HashMap<String,Map<String,ColumnIndexMode>>();
-    
-    abstract class NoSqlPredicate {
-
-        public boolean incorporateComparison(String columnName,
-                OperatorPredicate.Operator operator, Comparable<?> value,
-                CompoundPredicate.Operator parentOperator) {
-            return false;
-        }
-        
-        public boolean canExecuteEfficiently() {
-            return false;
-        }
-        
-        public List<Map<String,Object>> execute(String[] columnNames) {
-            assert(false);
-            return null;
-        }
-        
-        abstract public boolean matchesRow(Map<String,Object> row);
-    }
-    
-    @SuppressWarnings({ "unchecked", "rawtypes" })
-    class NoSqlRangePredicate extends NoSqlPredicate {
-        NoSqlStorageSource storageSource;
-        String tableName;
-        String columnName;
-        Comparable<?> startValue;
-        boolean startInclusive;
-        Comparable<?> endValue;
-        boolean endInclusive;
-        
-        NoSqlRangePredicate(NoSqlStorageSource storageSource, String tableName,
-                String columnName, Comparable<?> startValue, boolean startInclusive,
-                Comparable<?> endValue, boolean endInclusive) {
-            this.storageSource = storageSource;
-            this.tableName = tableName;
-            this.columnName = columnName;
-            this.startValue = startValue;
-            this.startInclusive = startInclusive;
-            this.endValue = endValue;
-            this.endInclusive = endInclusive;
-        }
-        
-        public boolean incorporateComparison(String columnName,
-                OperatorPredicate.Operator operator, Comparable<?> value,
-                CompoundPredicate.Operator parentOperator) {
-            
-            assert(operator != null);
-            assert(parentOperator != null);
-            
-            // Must be the same column to incorporate
-            if (!this.columnName.equals(columnName))
-                return false;
-            
-            // The only time we allow a null value is if it's an EQ operator.
-            // In that case we can only incorporate if this predicate is also
-            // a null equality predicate.
-            if (value == null) {
-                return ((operator == OperatorPredicate.Operator.EQ) &&
-                        (startValue == null) && (endValue == null) &&
-                        startInclusive && endInclusive);
-            }
-            
-            // Don't incorporate parameterized values
-            if (value instanceof String) {
-                String s = (String)value;
-                if (s.startsWith("?") && s.endsWith("?")) {
-                    return false;
-                }
-            }
-            
-            if (parentOperator == CompoundPredicate.Operator.AND) {
-                switch (operator) {
-                case EQ:
-                    if (matchesValue(value)) {
-                        startValue = endValue = value;
-                        startInclusive = endInclusive = true;
-                        return true;
-                    }
-                    break;
-                case LT:
-                    if ((endValue == null) || (((Comparable)value).compareTo(endValue) <= 0)) {
-                        endValue = value;
-                        endInclusive = false;
-                        return true;
-                    }
-                    break;
-                case LTE:
-                    if ((endValue == null) || (((Comparable)value).compareTo(endValue) < 0)) {
-                        endValue = value;
-                        endInclusive = true;
-                        return true;
-                    }
-                    break;
-                case GT:
-                    if ((startValue == null) || (((Comparable)value).compareTo(startValue) >= 0)) {
-                        startValue = value;
-                        startInclusive = false;
-                        return true;
-                    }
-                    break;
-                case GTE:
-                    if ((startValue == null) || (((Comparable)value).compareTo(startValue) > 0)) {
-                        startValue = value;
-                        startInclusive = true;
-                        return true;
-                    }
-                    break;
-                }
-            } else {
-                switch (operator) {
-                case EQ:
-                    if (matchesValue(value))
-                        return true;
-                    break;
-                case LT:
-                    if ((endValue == null) || (((Comparable)value).compareTo(endValue) > 0)) {
-                        endValue = value;
-                        endInclusive = false;
-                        return true;
-                    }
-                    break;
-                case LTE:
-                    if ((endValue == null) || (((Comparable)value).compareTo(endValue) >= 0)) {
-                        endValue = value;
-                        endInclusive = true;
-                        return true;
-                    }
-                    break;
-                case GT:
-                    if ((startValue == null) || (((Comparable)value).compareTo(startValue) < 0)) {
-                        startValue = value;
-                        startInclusive = false;
-                        return true;
-                    }
-                    break;
-                case GTE:
-                    if ((startValue == null) || (((Comparable)value).compareTo(startValue) <= 0)) {
-                        startValue = value;
-                        startInclusive = true;
-                        return true;
-                    }
-                    break;
-                }
-            }
-            
-            return false;
-        }
-
-        private boolean isEqualityRange() {
-            return (startValue == endValue) && startInclusive && endInclusive;
-        }
-        
-        public boolean canExecuteEfficiently() {
-            ColumnIndexMode indexMode = storageSource.getColumnIndexMode(tableName, columnName);
-            switch (indexMode) {
-            case NOT_INDEXED:
-                return false;
-            case RANGE_INDEXED:
-                return true;
-            case EQUALITY_INDEXED:
-                return isEqualityRange();
-            }
-            return true;
-        }
-
-        public List<Map<String,Object>> execute(String columnNameList[]) {
-            List<Map<String,Object>> rowList;
-            if (isEqualityRange())
-                rowList = storageSource.executeEqualityQuery(tableName, columnNameList, columnName, startValue);
-            else
-                rowList = storageSource.executeRangeQuery(tableName, columnNameList, columnName,
-                        startValue, startInclusive, endValue, endInclusive);
-                
-            return rowList;
-        }
-        
-        Comparable<?> coerceValue(Comparable<?> value, Class targetClass) {
-            
-            if (value == null)
-                return null;
-            
-            if (value.getClass() == targetClass)
-                return value;
-            
-            // FIXME: For now we convert by first converting the source value to a
-            // string and then converting to the target type. This logic probably needs
-            // another pass to make it more robust/optimized.
-            
-            String s = value.toString();
-            Comparable<?> obj = null;
-            
-            try {
-                if (targetClass == Integer.class) {
-                    obj = new Integer(s);
-                } else if (targetClass == Long.class) {
-                    obj = new Long(s);
-                } else if (targetClass == Short.class) {
-                    obj = new Short(s);
-                } else if (targetClass == Boolean.class) {
-                    obj = new Boolean(s);
-                } else if (targetClass == Float.class) {
-                    obj = new Float(s);
-                } else if (targetClass == Double.class) {
-                    obj = new Double(s);
-                } else if (targetClass == Byte.class) {
-                    obj = new Byte(s);
-                } else if (targetClass == String.class) {
-                    obj = s;
-                } else if (targetClass == Date.class) {
-                    SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
-                    dateFormat.setTimeZone(TimeZone.getTimeZone("GMT"));
-                    try {
-                        obj = dateFormat.parse(s);
-                    }
-                    catch (ParseException exc) {
-                        throw new TypeMismatchStorageException(Date.class.getName(), value.getClass().getName(), "???");
-                    }
-                }
-            }
-            catch (Exception exc) {
-                // Ignore the exception here. In this case obj will not be set, so we'll
-                // throw the StorageException below when we check for a null obj.
-            }
-            
-            if (obj == null)
-                throw new StorageException("Column value could not be coerced to the correct type");
-            
-            return obj;
-        }
-        
-        boolean matchesValue(Comparable<?> value) {
-            boolean isNullEqPredicate = (startValue == null) && (endValue == null) && startInclusive && endInclusive;
-            if (value == null)
-                return isNullEqPredicate;
-
-            if (isNullEqPredicate)
-                return false;
-            
-            int result;
-            Comparable<?> coercedValue;
-            if (startValue != null) {
-                coercedValue = coerceValue(value, startValue.getClass());
-                result = ((Comparable)coercedValue).compareTo(startValue);
-                if ((result < 0) || (!startInclusive && (result == 0)))
-                    return false;
-            }
-            if (endValue != null) {
-                coercedValue = coerceValue(value, endValue.getClass());
-                result = ((Comparable)coercedValue).compareTo(endValue);
-                if ((result > 0) || (!endInclusive && (result == 0)))
-                    return false;
-            }
-            return true;
-        }
-        
-        public boolean matchesRow(Map<String,Object> row) {
-            Comparable value = (Comparable)row.get(columnName);
-            return matchesValue(value);
-        }
-    }
-    
-    class NoSqlOperatorPredicate extends NoSqlPredicate {
-        
-        NoSqlStorageSource storageSource;
-        String columnName;
-        OperatorPredicate.Operator operator;
-        Object value;
-        
-        NoSqlOperatorPredicate(NoSqlStorageSource storageSource, String columnName,
-                OperatorPredicate.Operator operator, Object value) {
-            this.storageSource = storageSource;
-            this.columnName = columnName;
-            this.operator = operator;
-            this.value = value;
-        }
-
-        public boolean incorporateComparison(String columnName,
-                OperatorPredicate.Operator operator, Comparable<?> value,
-                CompoundPredicate.Operator parentOperator) {
-            return false;
-        }
-
-        public boolean canExecuteEfficiently() {
-            return false;
-        }
-
-        public List<Map<String,Object>> execute(String columnNames[]) {
-            throw new StorageException("Unimplemented predicate.");
-        }
-        
-        public boolean matchesRow(Map<String,Object> row) {
-            return false;
-        }
-    }
-    
-    class NoSqlCompoundPredicate extends NoSqlPredicate {
-        
-        NoSqlStorageSource storageSource;
-        CompoundPredicate.Operator operator;
-        boolean negated;
-        List<NoSqlPredicate> predicateList;
-        
-        NoSqlCompoundPredicate(NoSqlStorageSource storageSource, CompoundPredicate.Operator operator,
-                boolean negated, List<NoSqlPredicate> predicateList) {
-            this.storageSource = storageSource;
-            this.operator = operator;
-            this.negated = negated;
-            this.predicateList = predicateList;
-        }
-
-        public boolean incorporateComparison(String columnName,
-                OperatorPredicate.Operator operator, Comparable<?> value,
-                CompoundPredicate.Operator parentOperator) {
-            // It may be possible to incorporate other operator predicate into this one,
-            // but it would need to take into account the negated attribute and I'd need
-            // to think about it some more to make sure it was correct, so for now we just
-            // disallow incorporation
-            //if (parentOperator == this.operator) {
-            //    for (NoSqlPredicate predicate: predicateList) {
-            //        if (predicate.incorporateComparison(columnName, operator, value, parentOperator))
-            //            return true;
-            //    }
-            //}
-            return false;
-        }
-
-        public boolean canExecuteEfficiently() {
-            if (operator == CompoundPredicate.Operator.AND) {
-                for (NoSqlPredicate predicate: predicateList) {
-                    if (predicate.canExecuteEfficiently()) {
-                        return true;
-                    }
-                }
-                return false;
-            } else {
-                for (NoSqlPredicate predicate: predicateList) {
-                    if (!predicate.canExecuteEfficiently()) {
-                        return false;
-                    }
-                }
-                return true;
-            }
-        }
-
-        @SuppressWarnings({ "unchecked", "rawtypes" })
-        class RowComparator implements Comparator<Map<String,Object>> {
-            private String primaryKeyName;
-            
-            public RowComparator(String primaryKeyName) {
-                this.primaryKeyName = primaryKeyName;
-            }
-            
-            public int compare(Map<String,Object> row1, Map<String,Object> row2) {
-                Comparable key1 = (Comparable)row1.get(primaryKeyName);
-                Comparable key2 = (Comparable)row2.get(primaryKeyName);
-                return key1.compareTo(key2);
-            }
-            
-            public boolean equals(Object obj) {
-                if (!(obj instanceof RowComparator))
-                    return false;
-                RowComparator rc = (RowComparator)obj;
-                if (rc.primaryKeyName == null)
-                    return this.primaryKeyName == null;
-                return rc.primaryKeyName.equals(this.primaryKeyName);
-            }
-        }
-
-        @SuppressWarnings({ "unchecked", "rawtypes" })
-        private List<Map<String,Object>> combineRowLists(String primaryKeyName,
-                List<Map<String,Object>> list1, List<Map<String,Object>> list2,
-                CompoundPredicate.Operator operator) {
-            ArrayList<Map<String,Object>> combinedRowList = new ArrayList<Map<String,Object>>();
-            RowComparator rc = new RowComparator(primaryKeyName);
-            Collections.sort(list1, rc);
-            Collections.sort(list2,rc);
-            
-            Iterator<Map<String,Object>> iterator1 = list1.iterator();
-            Iterator<Map<String,Object>> iterator2 = list2.iterator();
-            boolean update1 = true;
-            boolean update2 = true;
-            Map<String,Object> row1 = null;
-            Map<String,Object> row2 = null;
-            Comparable<?> key1 = null;
-            Comparable<?> key2 = null;
-            
-            while (true) {
-                if (update1) {
-                    if (iterator1.hasNext()) {
-                        row1 = iterator1.next();
-                        key1 = (Comparable<?>)row1.get(primaryKeyName);
-                    } else {
-                        row1 = null;
-                    }
-                }
-                if (update2) {
-                    if (iterator2.hasNext()) {
-                        row2 = iterator1.next();
-                        key2 = (Comparable<?>)row2.get(primaryKeyName);
-                    } else {
-                        row2 = null;
-                    }
-                }
-                if (operator == CompoundPredicate.Operator.AND) {
-                    if ((row1 == null) || (row2 == null))
-                        break;
-                    if (key1.equals(key2))
-                        combinedRowList.add(row1);
-                } else {
-                    if (row1 == null) {
-                        if (row2 == null)
-                            break;
-                        combinedRowList.add(row2);
-                    } else if ((row2 == null) || (((Comparable)key1).compareTo(key2) <= 0)) {
-                        combinedRowList.add(row2);
-                    } else {
-                        combinedRowList.add(row1);
-                    }
-                }
-                
-                update1 = (key2 == null) || (((Comparable)key1).compareTo(key2) <= 0);
-                update2 = (key1 == null) || (((Comparable)key2).compareTo(key1) <= 0);
-            }
-            
-            return combinedRowList;
-        }
-        
-        public List<Map<String,Object>> execute(String columnNames[]) {
-            List<Map<String,Object>> combinedRowList = null;
-            for (NoSqlPredicate predicate: predicateList) {
-                List<Map<String,Object>> rowList = predicate.execute(columnNames);
-                if (combinedRowList != null) {
-                    combinedRowList = combineRowLists("id", combinedRowList, rowList, operator);
-                } else {
-                    combinedRowList = rowList;
-                }
-            }
-            return combinedRowList;
-        }
-
-        public boolean matchesRow(Map<String,Object> row) {
-            if (operator == CompoundPredicate.Operator.AND) {
-                for (NoSqlPredicate predicate : predicateList) {
-                    if (!predicate.matchesRow(row))  {
-                        return false;
-                    }
-                }
-                return true;
-            } else {
-                for (NoSqlPredicate predicate : predicateList) {
-                    if (predicate.matchesRow(row))  {
-                        return true;
-                    }
-                }
-                return false;
-                
-            }
-        }
-    }
-    
-    public NoSqlStorageSource() {
-        super();
-    }
-    
-    @Override
-    public void createTable(String tableName, Set<String> indexedColumns) {
-        super.createTable(tableName, indexedColumns);
-        if (indexedColumns == null) return;
-        for (String columnName : indexedColumns) {
-            setColumnIndexMode(tableName, columnName,
-                               ColumnIndexMode.EQUALITY_INDEXED);
-        }
-    }
-
-    public void setTablePrimaryKeyName(String tableName, String primaryKeyName) {
-        if ((tableName == null) || (primaryKeyName == null))
-            throw new NullPointerException();
-        tablePrimaryKeyMap.put(tableName, primaryKeyName);
-    }
-    
-    protected String getTablePrimaryKeyName(String tableName) {
-        String primaryKeyName = tablePrimaryKeyMap.get(tableName);
-        if (primaryKeyName == null)
-            primaryKeyName = DEFAULT_PRIMARY_KEY_NAME;
-        return primaryKeyName;
-    }
-    
-    protected ColumnIndexMode getColumnIndexMode(String tableName, String columnName) {
-        ColumnIndexMode columnIndexMode = null;
-        Map<String, ColumnIndexMode> indexedColumnMap = tableIndexedColumnMap.get(tableName);
-        if (indexedColumnMap != null)
-            columnIndexMode = indexedColumnMap.get(columnName);
-        if (columnIndexMode == null)
-            return ColumnIndexMode.NOT_INDEXED;
-        return columnIndexMode;
-    }
-    
-    public void setColumnIndexMode(String tableName, String columnName, ColumnIndexMode indexMode) {
-        Map<String, ColumnIndexMode> indexedColumnMap = tableIndexedColumnMap.get(tableName);
-        if (indexedColumnMap == null) {
-            indexedColumnMap = new HashMap<String,ColumnIndexMode>();
-            tableIndexedColumnMap.put(tableName, indexedColumnMap);
-        }
-        indexedColumnMap.put(columnName, indexMode);
-    }
-    
-    Comparable<?> getOperatorPredicateValue(OperatorPredicate predicate, Map<String,Comparable<?>> parameterMap) {
-        Comparable<?> value = predicate.getValue();
-        if (value instanceof String) {
-            String stringValue = (String) value;
-            if ((stringValue.charAt(0) == '?') && (stringValue.charAt(stringValue.length()-1) == '?')) {
-                String parameterName = stringValue.substring(1,stringValue.length()-1);
-                value = parameterMap.get(parameterName);
-            }
-        }
-        return value;
-    }
-    
-    NoSqlPredicate convertPredicate(IPredicate predicate, String tableName, Map<String,Comparable<?>> parameterMap) {
-        if (predicate == null)
-            return null;
-        NoSqlPredicate convertedPredicate = null;
-        if (predicate instanceof CompoundPredicate) {
-            CompoundPredicate compoundPredicate = (CompoundPredicate)predicate;
-            ArrayList<NoSqlPredicate> noSqlPredicateList = new ArrayList<NoSqlPredicate>();
-            for (IPredicate childPredicate: compoundPredicate.getPredicateList()) {
-                boolean incorporated = false;
-                if (childPredicate instanceof OperatorPredicate) {
-                    OperatorPredicate childOperatorPredicate = (OperatorPredicate)childPredicate;
-                    for (NoSqlPredicate childNoSqlPredicate: noSqlPredicateList) {
-                        incorporated = childNoSqlPredicate.incorporateComparison(
-                                childOperatorPredicate.getColumnName(), childOperatorPredicate.getOperator(),
-                                getOperatorPredicateValue(childOperatorPredicate, parameterMap),
-                                compoundPredicate.getOperator());
-                        if (incorporated)
-                            break;
-                    }
-                }
-                if (!incorporated) {
-                    NoSqlPredicate noSqlPredicate = convertPredicate(childPredicate, tableName, parameterMap);
-                    noSqlPredicateList.add(noSqlPredicate);
-                }
-            }
-            convertedPredicate = new NoSqlCompoundPredicate(this, compoundPredicate.getOperator(),
-                    compoundPredicate.isNegated(), noSqlPredicateList);
-        } else if (predicate instanceof OperatorPredicate) {
-            OperatorPredicate operatorPredicate = (OperatorPredicate) predicate;
-            Comparable<?> value = getOperatorPredicateValue(operatorPredicate, parameterMap);
-            switch (operatorPredicate.getOperator()) {
-            case EQ:
-                convertedPredicate = new NoSqlRangePredicate(this, tableName,
-                        operatorPredicate.getColumnName(), value, true, value, true);
-                break;
-            case LT:
-                convertedPredicate = new NoSqlRangePredicate(this, tableName,
-                        operatorPredicate.getColumnName(), null, false, value, false);
-                break;
-            case LTE:
-                convertedPredicate = new NoSqlRangePredicate(this, tableName,
-                        operatorPredicate.getColumnName(), null, false, value, true);
-                break;
-            case GT:
-                convertedPredicate = new NoSqlRangePredicate(this, tableName,
-                        operatorPredicate.getColumnName(), value, false, null, false);
-                break;
-            case GTE:
-                convertedPredicate = new NoSqlRangePredicate(this, tableName,
-                        operatorPredicate.getColumnName(), value, true, null, false);
-                break;
-            default:
-                convertedPredicate = new NoSqlOperatorPredicate(this, operatorPredicate.getColumnName(),
-                        operatorPredicate.getOperator(), value);
-            }
-        } else {
-            throw new StorageException("Unknown predicate type");
-        }
-        
-        return convertedPredicate;
-    }
-    
-    @SuppressWarnings({ "unchecked", "rawtypes" })
-    class RowComparator implements Comparator<Map<String,Object>> {
-        private RowOrdering rowOrdering;
-        
-        public RowComparator(RowOrdering rowOrdering) {
-            this.rowOrdering = rowOrdering;
-        }
-        
-        public int compare(Map<String,Object> row1, Map<String,Object> row2) {
-            if (rowOrdering == null)
-                return 0;
-            
-            for (RowOrdering.Item item: rowOrdering.getItemList()) {
-                Comparable key1 = (Comparable)row1.get(item.getColumn());
-                Comparable key2 = (Comparable)row2.get(item.getColumn());
-                int result = key1.compareTo(key2);
-                if (result != 0) {
-                    if (item.getDirection() == RowOrdering.Direction.DESCENDING)
-                        result = -result;
-                    return result;
-                }
-            }
-            
-            return 0;
-        }
-        
-        public boolean equals(Object obj) {
-            if (!(obj instanceof RowComparator))
-                return false;
-            RowComparator rc = (RowComparator)obj;
-            if (rc.rowOrdering == null)
-                return this.rowOrdering == null;
-            return rc.rowOrdering.equals(this.rowOrdering);
-        }
-    }
-    
-    private NoSqlResultSet executeParameterizedQuery(String tableName, String[] columnNameList,
-            IPredicate predicate, RowOrdering rowOrdering, Map<String,Comparable<?>> parameterMap) {
-        NoSqlPredicate noSqlPredicate = convertPredicate(predicate, tableName, parameterMap);
-        List<Map<String,Object>> rowList;
-        if ((noSqlPredicate != null) && noSqlPredicate.canExecuteEfficiently()) {
-            rowList = noSqlPredicate.execute(columnNameList);
-        } else {
-            rowList = new ArrayList<Map<String,Object>>();
-            Collection<Map<String,Object>> allRowList = getAllRows(tableName, columnNameList);
-            for (Map<String,Object> row: allRowList) {
-                if ((noSqlPredicate == null) || noSqlPredicate.matchesRow(row)) {
-                    rowList.add(row);
-                }
-            }
-        }
-        if (rowOrdering != null)
-            Collections.sort(rowList, new RowComparator(rowOrdering));
-            
-        return new NoSqlResultSet(this, tableName, rowList);
-    }
-    
-    @Override
-    public IQuery createQuery(String tableName, String[] columnNameList,
-            IPredicate predicate, RowOrdering rowOrdering) {
-        return new NoSqlQuery(tableName, columnNameList, predicate, rowOrdering);
-    }
-
-    @Override
-    public IResultSet executeQueryImpl(IQuery query) {
-        NoSqlQuery noSqlQuery = (NoSqlQuery) query;
-        return executeParameterizedQuery(noSqlQuery.getTableName(),
-                noSqlQuery.getColumnNameList(), noSqlQuery.getPredicate(),
-                noSqlQuery.getRowOrdering(), noSqlQuery.getParameterMap());
-    }
-
-    protected void sendNotification(String tableName, StorageSourceNotification.Action action,
-            List<Map<String,Object>> rows) {
-        Set<Object> rowKeys = new HashSet<Object>();
-        String primaryKeyName = getTablePrimaryKeyName(tableName);
-        for (Map<String,Object> row : rows) {
-            Object rowKey = row.get(primaryKeyName);
-            rowKeys.add(rowKey);
-        }
-        StorageSourceNotification notification =
-            new StorageSourceNotification(tableName, action, rowKeys);
-        notifyListeners(notification);
-    }
-    
-    protected void sendNotification(String tableName,
-            StorageSourceNotification.Action action, Set<Object> rowKeys) {
-        StorageSourceNotification notification =
-            new StorageSourceNotification(tableName, action, rowKeys);
-        notifyListeners(notification);
-    }
-    
-    protected void insertRowsAndNotify(String tableName, List<Map<String,Object>> insertRowList) {
-        insertRows(tableName, insertRowList);
-        sendNotification(tableName, StorageSourceNotification.Action.MODIFY, insertRowList);
-    }
-
-    @Override
-    public void insertRowImpl(String tableName, Map<String, Object> values) {
-        ArrayList<Map<String,Object>> rowList = new ArrayList<Map<String,Object>>();
-        rowList.add(values);
-        insertRowsAndNotify(tableName, rowList);
-    }
-
-    protected void updateRowsAndNotify(String tableName, Set<Object> rowKeys, Map<String,Object> updateRowList) {
-        updateRows(tableName, rowKeys, updateRowList);
-        sendNotification(tableName, StorageSourceNotification.Action.MODIFY, rowKeys);
-    }
-
-    protected void updateRowsAndNotify(String tableName, List<Map<String,Object>> updateRowList) {
-        updateRows(tableName, updateRowList);
-        sendNotification(tableName, StorageSourceNotification.Action.MODIFY, updateRowList);
-    }
-
-    @Override
-    public void updateMatchingRowsImpl(String tableName, IPredicate predicate, Map<String,Object> values) {
-        String primaryKeyName = getTablePrimaryKeyName(tableName);
-        String[] columnNameList = {primaryKeyName};
-        IResultSet resultSet = executeQuery(tableName, columnNameList, predicate, null);
-        Set<Object> rowKeys = new HashSet<Object>();
-        while (resultSet.next()) {
-            String rowKey = resultSet.getString(primaryKeyName);
-            rowKeys.add(rowKey);
-        }
-        updateRowsAndNotify(tableName, rowKeys, values);
-    }
-    
-    @Override
-    public void updateRowImpl(String tableName, Object rowKey, Map<String,Object> values) {
-        Map<String,Object> valuesWithKey = new HashMap<String,Object>(values);
-        String primaryKeyName = getTablePrimaryKeyName(tableName);
-        valuesWithKey.put(primaryKeyName, rowKey);
-        List<Map<String,Object>> rowList = new ArrayList<Map<String,Object>>();
-        rowList.add(valuesWithKey);
-        updateRowsAndNotify(tableName, rowList);
-    }
-
-   @Override
-    public void updateRowImpl(String tableName, Map<String,Object> values) {
-        List<Map<String,Object>> rowKeys = new ArrayList<Map<String,Object>>();
-        rowKeys.add(values);
-        updateRowsAndNotify(tableName, rowKeys);
-    }
-
-   protected void deleteRowsAndNotify(String tableName, Set<Object> rowKeyList) {
-       deleteRows(tableName, rowKeyList);
-       sendNotification(tableName, StorageSourceNotification.Action.DELETE, rowKeyList);
-   }
-
-    @Override
-    public void deleteRowImpl(String tableName, Object key) {
-        HashSet<Object> keys = new HashSet<Object>();
-        keys.add(key);
-        deleteRowsAndNotify(tableName, keys);
-    }
-
-    @Override
-    public IResultSet getRowImpl(String tableName, Object rowKey) {
-        List<Map<String,Object>> rowList = new ArrayList<Map<String,Object>>();
-        Map<String,Object> row = getRow(tableName, null, rowKey);
-        if (row != null)
-            rowList.add(row);
-        NoSqlResultSet resultSet = new NoSqlResultSet(this, tableName, rowList);
-        return resultSet;
-    }
-   
-    // Below are the methods that must be implemented by the subclasses
-    
-    protected abstract Collection<Map<String,Object>> getAllRows(String tableName, String[] columnNameList);
-    
-    protected abstract Map<String,Object> getRow(String tableName, String[] columnNameList, Object rowKey);
-    
-    protected abstract List<Map<String,Object>> executeEqualityQuery(String tableName,
-            String[] columnNameList, String predicateColumnName, Comparable<?> value);
-    
-    protected abstract List<Map<String,Object>> executeRangeQuery(String tableName,
-            String[] columnNameList, String predicateColumnName,
-            Comparable<?> startValue, boolean startInclusive, Comparable<?> endValue, boolean endInclusive);
-    
-    protected abstract void insertRows(String tableName, List<Map<String,Object>> insertRowList);
-    
-    protected abstract void updateRows(String tableName, Set<Object> rowKeys, Map<String,Object> updateColumnMap);
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/web/StorageNotifyResource.java b/src/main/java/net/floodlightcontroller/storage/web/StorageNotifyResource.java
deleted file mode 100644
index 081c7f9..0000000
--- a/src/main/java/net/floodlightcontroller/storage/web/StorageNotifyResource.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage.web;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import net.floodlightcontroller.storage.IStorageSourceService;
-import net.floodlightcontroller.storage.StorageSourceNotification;
-
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.type.TypeReference;
-import org.restlet.resource.Post;
-import org.restlet.resource.ServerResource;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class StorageNotifyResource extends ServerResource {
-    protected final static Logger log = LoggerFactory.getLogger(StorageNotifyResource.class);
-    
-    @Post("json")
-    public Map<String,Object> notify(String entity) throws Exception {
-        List<StorageSourceNotification> notifications = null;
-        ObjectMapper mapper = new ObjectMapper();
-        notifications = 
-            mapper.readValue(entity, 
-                    new TypeReference<List<StorageSourceNotification>>(){});
-        
-        IStorageSourceService storageSource = 
-            (IStorageSourceService)getContext().getAttributes().
-                get(IStorageSourceService.class.getCanonicalName());
-        storageSource.notifyListeners(notifications);
-        
-        HashMap<String, Object> model = new HashMap<String,Object>();
-        model.put("output", "OK");
-        return model;
-    }
-    
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/web/StorageWebRoutable.java b/src/main/java/net/floodlightcontroller/storage/web/StorageWebRoutable.java
deleted file mode 100644
index 681847d..0000000
--- a/src/main/java/net/floodlightcontroller/storage/web/StorageWebRoutable.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage.web;
-
-import org.restlet.Context;
-import org.restlet.Restlet;
-import org.restlet.routing.Router;
-
-import net.floodlightcontroller.restserver.RestletRoutable;
-
-/**
- * Creates a router to handle the storage web URIs
- * @author readams
- *
- */
-public class StorageWebRoutable implements RestletRoutable {
-
-    @Override
-    public String basePath() {
-        return "/wm/storage";
-    }
-
-    @Override
-    public Restlet getRestlet(Context context) {
-        Router router = new Router(context);
-        router.attach("/notify/json", StorageNotifyResource.class);
-        return router;
-    }
-
-}
diff --git a/src/main/java/net/floodlightcontroller/topology/TopologyManager.java b/src/main/java/net/floodlightcontroller/topology/TopologyManager.java
index 3e624e7..103cc4d 100644
--- a/src/main/java/net/floodlightcontroller/topology/TopologyManager.java
+++ b/src/main/java/net/floodlightcontroller/topology/TopologyManager.java
@@ -16,10 +16,8 @@
 
 import net.floodlightcontroller.core.FloodlightContext;
 import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.IFloodlightProviderService.Role;
 import net.floodlightcontroller.core.IOFMessageListener;
 import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.core.IHAListener;
 import net.floodlightcontroller.core.annotations.LogMessageCategory;
 import net.floodlightcontroller.core.annotations.LogMessageDoc;
 import net.floodlightcontroller.core.module.FloodlightModuleContext;
@@ -27,7 +25,6 @@
 import net.floodlightcontroller.core.module.IFloodlightModule;
 import net.floodlightcontroller.core.module.IFloodlightService;
 import net.floodlightcontroller.core.util.SingletonTask;
-import net.floodlightcontroller.counter.ICounterStoreService;
 import net.floodlightcontroller.packet.BSN;
 import net.floodlightcontroller.packet.Ethernet;
 import net.floodlightcontroller.packet.LLDP;
@@ -44,9 +41,9 @@
 import org.openflow.protocol.OFPacketIn;
 import org.openflow.protocol.OFPacketOut;
 import org.openflow.protocol.OFPort;
+import org.openflow.protocol.OFType;
 import org.openflow.protocol.action.OFAction;
 import org.openflow.protocol.action.OFActionOutput;
-import org.openflow.protocol.OFType;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -59,7 +56,7 @@
 public class TopologyManager implements 
         IFloodlightModule, ITopologyService, 
         IRoutingService, ILinkDiscoveryListener,
-        IOFMessageListener, IHAListener {
+        IOFMessageListener {
 
     protected final static Logger log = LoggerFactory.getLogger(TopologyManager.class);
 
@@ -564,38 +561,6 @@
         return Command.CONTINUE;
     }
 
-    // ***************
-    // IHAListener
-    // ***************
-
-    @Override
-    public void roleChanged(Role oldRole, Role newRole) {
-        switch(newRole) {
-            case MASTER:
-                if (oldRole == Role.SLAVE) {
-                    log.debug("Re-computing topology due " +
-                            "to HA change from SLAVE->MASTER");
-                    newInstanceTask.reschedule(1, TimeUnit.MILLISECONDS);
-                }
-                break;
-            case SLAVE:
-                log.debug("Clearing topology due to " +
-                        "HA change to SLAVE");
-                clearCurrentTopology();
-                break;
-            default:
-            	break;
-        }
-    }
-
-    @Override
-    public void controllerNodeIPsChanged(
-                          Map<String, String> curControllerNodeIPs,
-                          Map<String, String> addedControllerNodeIPs,
-                          Map<String, String> removedControllerNodeIPs) {
-        // no-op
-    }
-
     // *****************
     // IFloodlightModule
     // *****************
@@ -630,7 +595,6 @@
         l.add(ILinkDiscoveryService.class);
         l.add(IThreadPoolService.class);
         l.add(IFloodlightProviderService.class);
-        l.add(ICounterStoreService.class);
         l.add(IRestApiService.class);
         return l;
     }
@@ -661,7 +625,6 @@
         newInstanceTask = new SingletonTask(ses, new UpdateTopologyWorker());
         linkDiscovery.addListener(this);
         floodlightProvider.addOFMessageListener(OFType.PACKET_IN, this);
-        floodlightProvider.addHAListener(this);
         addRestletRoutable();
     }
 
diff --git a/src/main/java/net/floodlightcontroller/util/MACAddress.java b/src/main/java/net/floodlightcontroller/util/MACAddress.java
index b77d4cc..b143bda 100644
--- a/src/main/java/net/floodlightcontroller/util/MACAddress.java
+++ b/src/main/java/net/floodlightcontroller/util/MACAddress.java
@@ -1,5 +1,6 @@
 package net.floodlightcontroller.util;
 
+import java.io.Serializable;
 import java.util.Arrays;
 
 import net.onrc.onos.ofcontroller.util.serializers.MACAddressDeserializer;
@@ -15,7 +16,8 @@
  */
 @JsonDeserialize(using=MACAddressDeserializer.class)
 @JsonSerialize(using=MACAddressSerializer.class)
-public class MACAddress {
+public class MACAddress implements Serializable{
+    private static final long serialVersionUID = 10000L;
     public static final int MAC_ADDRESS_LENGTH = 6;
     private byte[] address = new byte[MAC_ADDRESS_LENGTH];
 
diff --git a/src/main/java/net/onrc/onos/datagrid/HazelcastDatagrid.java b/src/main/java/net/onrc/onos/datagrid/HazelcastDatagrid.java
index 775f952..6483121 100644
--- a/src/main/java/net/onrc/onos/datagrid/HazelcastDatagrid.java
+++ b/src/main/java/net/onrc/onos/datagrid/HazelcastDatagrid.java
@@ -27,7 +27,6 @@
 import net.onrc.onos.ofcontroller.util.FlowPath;
 import net.onrc.onos.ofcontroller.util.serializers.KryoFactory;
 
-import org.openflow.util.HexString;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -99,8 +98,7 @@
 	 *
 	 * @param event the notification event for the entry.
 	 */
-	public void entryAdded(EntryEvent event) {
-	    Long keyLong = (Long)event.getKey();
+	public void entryAdded(EntryEvent<Long, byte[]> event) {
 	    byte[] valueBytes = (byte[])event.getValue();
 
 	    //
@@ -118,8 +116,7 @@
 	 *
 	 * @param event the notification event for the entry.
 	 */
-	public void entryRemoved(EntryEvent event) {
-	    Long keyLong = (Long)event.getKey();
+	public void entryRemoved(EntryEvent<Long, byte[]> event) {
 	    byte[] valueBytes = (byte[])event.getValue();
 
 	    //
@@ -137,8 +134,7 @@
 	 *
 	 * @param event the notification event for the entry.
 	 */
-	public void entryUpdated(EntryEvent event) {
-	    Long keyLong = (Long)event.getKey();
+	public void entryUpdated(EntryEvent<Long, byte[]> event) {
 	    byte[] valueBytes = (byte[])event.getValue();
 
 	    //
@@ -156,7 +152,7 @@
 	 *
 	 * @param event the notification event for the entry.
 	 */
-	public void entryEvicted(EntryEvent event) {
+	public void entryEvicted(EntryEvent<Long, byte[]> event) {
 	    // NOTE: We don't use eviction for this map
 	}
     }
@@ -174,14 +170,7 @@
 	 *
 	 * @param event the notification event for the entry.
 	 */
-	public void entryAdded(EntryEvent event) {
-	    //
-	    // NOTE: Ignore Flow Entries Events originated by this instance
-	    //
-	    if (event.getMember().localMember())
-		return;
-
-	    Long keyLong = (Long)event.getKey();
+	public void entryAdded(EntryEvent<Long, byte[]> event) {
 	    byte[] valueBytes = (byte[])event.getValue();
 
 	    //
@@ -199,14 +188,7 @@
 	 *
 	 * @param event the notification event for the entry.
 	 */
-	public void entryRemoved(EntryEvent event) {
-	    //
-	    // NOTE: Ignore Flow Entries Events originated by this instance
-	    //
-	    if (event.getMember().localMember())
-		return;
-
-	    Long keyLong = (Long)event.getKey();
+	public void entryRemoved(EntryEvent<Long, byte[]> event) {
 	    byte[] valueBytes = (byte[])event.getValue();
 
 	    //
@@ -224,14 +206,7 @@
 	 *
 	 * @param event the notification event for the entry.
 	 */
-	public void entryUpdated(EntryEvent event) {
-	    //
-	    // NOTE: Ignore Flow Entries Events originated by this instance
-	    //
-	    if (event.getMember().localMember())
-		return;
-
-	    Long keyLong = (Long)event.getKey();
+	public void entryUpdated(EntryEvent<Long, byte[]> event) {
 	    byte[] valueBytes = (byte[])event.getValue();
 
 	    //
@@ -249,7 +224,7 @@
 	 *
 	 * @param event the notification event for the entry.
 	 */
-	public void entryEvicted(EntryEvent event) {
+	public void entryEvicted(EntryEvent<Long, byte[]> event) {
 	    // NOTE: We don't use eviction for this map
 	}
     }
@@ -267,8 +242,7 @@
 	 *
 	 * @param event the notification event for the entry.
 	 */
-	public void entryAdded(EntryEvent event) {
-	    String keyString = (String)event.getKey();
+	public void entryAdded(EntryEvent<String, byte[]> event) {
 	    byte[] valueBytes = (byte[])event.getValue();
 
 	    //
@@ -287,8 +261,7 @@
 	 *
 	 * @param event the notification event for the entry.
 	 */
-	public void entryRemoved(EntryEvent event) {
-	    String keyString = (String)event.getKey();
+	public void entryRemoved(EntryEvent<String, byte[]> event) {
 	    byte[] valueBytes = (byte[])event.getValue();
 
 	    //
@@ -307,8 +280,7 @@
 	 *
 	 * @param event the notification event for the entry.
 	 */
-	public void entryUpdated(EntryEvent event) {
-	    String keyString = (String)event.getKey();
+	public void entryUpdated(EntryEvent<String, byte[]> event) {
 	    byte[] valueBytes = (byte[])event.getValue();
 
 	    //
@@ -327,7 +299,7 @@
 	 *
 	 * @param event the notification event for the entry.
 	 */
-	public void entryEvicted(EntryEvent event) {
+	public void entryEvicted(EntryEvent<String, byte[]> event) {
 	    // NOTE: We don't use eviction for this map
 	}
     }
diff --git a/src/main/java/net/onrc/onos/datagrid/IDatagridService.java b/src/main/java/net/onrc/onos/datagrid/IDatagridService.java
index 034fe25..0f03d77 100644
--- a/src/main/java/net/onrc/onos/datagrid/IDatagridService.java
+++ b/src/main/java/net/onrc/onos/datagrid/IDatagridService.java
@@ -170,5 +170,5 @@
      * Send an ARP request to other ONOS instances
      * @param arpRequest The request packet to send
      */
-    public void sendArpRequest(ArpMessage arpMessage);
+    public void sendArpRequest(ArpMessage arpMessage);  
 }
diff --git a/src/main/java/net/onrc/onos/graph/GraphDBOperation.java b/src/main/java/net/onrc/onos/graph/GraphDBOperation.java
index bfd9046..03b4c96 100644
--- a/src/main/java/net/onrc/onos/graph/GraphDBOperation.java
+++ b/src/main/java/net/onrc/onos/graph/GraphDBOperation.java
@@ -176,9 +176,14 @@
 	 */
 	public IPortObject searchPort(String dpid, Short number) {
 		FramedGraph<TitanGraph> fg = conn.getFramedGraph();
+		if ( fg == null ) return null;
 		String id = dpid + number.toString();
-		return (fg != null && fg.getVertices("port_id",id).iterator().hasNext()) ? 
-				fg.getVertices("port_id",id,IPortObject.class).iterator().next() : null;
+		Iterator<IPortObject> ports =  fg.getVertices("port_id",id,IPortObject.class).iterator();
+		if ( ports.hasNext() ) {
+			return ports.next();
+		} else {
+			return null;
+		}
 	}
 
 	/**
@@ -206,10 +211,14 @@
 	 * @param macAddr MAC address to search and get
 	 */
 	public IDeviceObject searchDevice(String macAddr) {
-		// TODO Auto-generated method stub
-		FramedGraph<TitanGraph> fg = conn.getFramedGraph();	
-		return (fg != null && fg.getVertices("dl_addr",macAddr).iterator().hasNext()) ?
-			fg.getVertices("dl_addr",macAddr, IDeviceObject.class).iterator().next() : null;
+		FramedGraph<TitanGraph> fg = conn.getFramedGraph();
+		if ( fg == null ) return null;
+		Iterator<IDeviceObject> devices =  fg.getVertices("dl_addr",macAddr, IDeviceObject.class).iterator();
+		if ( devices.hasNext() ) {
+			return devices.next();
+		} else {
+			return null;
+		}
 	}
 
 	/**
@@ -288,10 +297,13 @@
 	 */
 	public IFlowPath searchFlowPath(FlowId flowId) {
 		FramedGraph<TitanGraph> fg = conn.getFramedGraph();
-		
-		return fg.getVertices("flow_id", flowId.toString()).iterator().hasNext() ? 
-		    fg.getVertices("flow_id", flowId.toString(),
-				   IFlowPath.class).iterator().next() : null;
+		if ( fg == null ) return null;
+		Iterator<IFlowPath> flowpaths = fg.getVertices("flow_id", flowId.toString(), IFlowPath.class).iterator();
+		if ( flowpaths.hasNext() ) {
+			return flowpaths.next();
+		} else {
+			return null;
+		}
 	}
 
 	/**
@@ -299,10 +311,10 @@
 	 * @param flowEntry flow entry object
 	 */
 	public IFlowPath getFlowPathByFlowEntry(IFlowEntry flowEntry) {
-		GremlinPipeline<Vertex, IFlowPath> pipe = new GremlinPipeline<Vertex, IFlowPath>();
-		pipe.start(flowEntry.asVertex());
-		pipe.out("flow");
-		FramedVertexIterable<IFlowPath> r = new FramedVertexIterable(conn.getFramedGraph(), (Iterable) pipe, IFlowPath.class);
+		GremlinPipeline<Vertex, Vertex> pipe = new GremlinPipeline<Vertex, Vertex>();
+		pipe.start(flowEntry.asVertex()).out("flow");
+		FramedVertexIterable<IFlowPath> r = new FramedVertexIterable<IFlowPath>(conn.getFramedGraph(),
+				(Iterable<Vertex>) pipe, IFlowPath.class);
 		return r.iterator().hasNext() ? r.iterator().next() : null;
 	}
 
@@ -348,10 +360,13 @@
 	 */
 	public IFlowEntry searchFlowEntry(FlowEntryId flowEntryId) {
 		FramedGraph<TitanGraph> fg = conn.getFramedGraph();
-		
-		return fg.getVertices("flow_entry_id", flowEntryId.toString()).iterator().hasNext() ? 
-		    fg.getVertices("flow_entry_id", flowEntryId.toString(),
-				   IFlowEntry.class).iterator().next() : null;
+		if ( fg == null ) return null;
+		Iterator<IFlowEntry> flowentries = fg.getVertices("flow_entry_id", flowEntryId.toString(), IFlowEntry.class).iterator();
+		if ( flowentries.hasNext() ) {
+			return flowentries.next();
+		} else {
+			return null;
+		}
 	}
 
 	/**
diff --git a/src/main/java/net/onrc/onos/ofcontroller/bgproute/Interface.java b/src/main/java/net/onrc/onos/ofcontroller/bgproute/Interface.java
index 5db8f0a..cc454ec 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/bgproute/Interface.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/bgproute/Interface.java
@@ -14,7 +14,6 @@
 
 public class Interface {
 	private final String name;
-	private final SwitchPort switchPort;
 	private final long dpid;
 	private final short port;
 	private final InetAddress ipAddress;
@@ -31,7 +30,6 @@
 		this.port = port;
 		this.ipAddress = InetAddresses.forString(ipAddress);
 		this.prefixLength = prefixLength;
-		this.switchPort = new SwitchPort(new Dpid(this.dpid), new Port(this.port));
 	}
 	
 	public String getName() {
diff --git a/src/main/java/net/onrc/onos/ofcontroller/bgproute/Ptree.java b/src/main/java/net/onrc/onos/ofcontroller/bgproute/Ptree.java
index 041061c..c80d055 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/bgproute/Ptree.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/bgproute/Ptree.java
@@ -244,7 +244,6 @@
 		add.parent = node;
 	}
 	
-	@SuppressWarnings("unused")
     private PtreeNode node_common(PtreeNode node, byte [] key, int key_bits) {
 		int i;
 		int limit = Math.min(node.keyBits, key_bits) / 8;
@@ -275,8 +274,6 @@
 		}
 		
 		PtreeNode add = new PtreeNode(null, common_len, maxKeyOctets);
-		if (add == null)
-			return null;
 		
 		int j;
 		for (j = 0; j < i; j++)
diff --git a/src/main/java/net/onrc/onos/ofcontroller/core/IDeviceStorage.java b/src/main/java/net/onrc/onos/ofcontroller/core/IDeviceStorage.java
index be495b9..13b9182 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/core/IDeviceStorage.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/core/IDeviceStorage.java
@@ -8,6 +8,7 @@
 	public IDeviceObject addDevice(IDevice device);
 	public IDeviceObject updateDevice(IDevice device);
 	public void removeDevice(IDevice device);
+	public void removeDevice(IDeviceObject deviceObject);
 	public IDeviceObject getDeviceByMac(String mac);
 	public IDeviceObject getDeviceByIP(int ipv4Address);
 	public void changeDeviceAttachments(IDevice device);
diff --git a/src/main/java/net/onrc/onos/ofcontroller/core/INetMapTopologyObjects.java b/src/main/java/net/onrc/onos/ofcontroller/core/INetMapTopologyObjects.java
index 49ffd4e..29c4377 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/core/INetMapTopologyObjects.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/core/INetMapTopologyObjects.java
@@ -1,7 +1,5 @@
 package net.onrc.onos.ofcontroller.core;
 
-import net.onrc.onos.ofcontroller.flowmanager.web.DatapathSummarySerializer;
-
 import org.codehaus.jackson.annotate.JsonIgnore;
 import org.codehaus.jackson.annotate.JsonProperty;
 import org.codehaus.jackson.map.annotate.JsonSerialize;
@@ -13,6 +11,9 @@
 import com.tinkerpop.frames.annotations.gremlin.GremlinParam;
 import com.tinkerpop.frames.VertexFrame;
 
+import net.onrc.onos.ofcontroller.flowmanager.web.DatapathSummarySerializer;
+import net.floodlightcontroller.core.web.serializers.IPv4Serializer;
+
 /*
  * This is the interfaces to make the objects for Cassandra DB.
  * They are interfaces, but it is also implementation,
@@ -205,6 +206,7 @@
 		
 		@JsonProperty("ipv4")
 		@Property("ipv4_address")
+		@JsonSerialize(using=IPv4Serializer.class)
 		public int getIpv4Address();
 		
 		@Property("ipv4_address")
@@ -251,6 +253,20 @@
 		@Property("flow_path_flags")
 		public void setFlowPathFlags(Long flowPathFlags);
 
+		@JsonProperty("idleTimeout")
+		@Property("idle_timeout")
+		public Integer getIdleTimeout();
+
+		@Property("idle_timeout")
+		public void setIdleTimeout(Integer idleTimeout);
+
+		@JsonProperty("hardTimeout")
+		@Property("hard_timeout")
+		public Integer getHardTimeout();
+
+		@Property("hard_timeout")
+		public void setHardTimeout(Integer hardTimeout);
+
 		@JsonProperty("srcDpid")
 		@Property("src_switch")
 		public String getSrcSwitch();
@@ -405,6 +421,20 @@
 		@Property("flow_entry_id")
 		public void setFlowEntryId(String flowEntryId);
 
+		@JsonProperty("idleTimeout")
+		@Property("idle_timeout")
+		public Integer getIdleTimeout();
+
+		@Property("idle_timeout")
+		public void setIdleTimeout(Integer idleTimeout);
+
+		@JsonProperty("hardTimeout")
+		@Property("hard_timeout")
+		public Integer getHardTimeout();
+
+		@Property("hard_timeout")
+		public void setHardTimeout(Integer hardTimeout);
+
 		@Property("switch_dpid")
 		public String getSwitchDpid();
 
diff --git a/src/main/java/net/onrc/onos/ofcontroller/core/config/DefaultConfiguration.java b/src/main/java/net/onrc/onos/ofcontroller/core/config/DefaultConfiguration.java
index d9a291f..c406a91 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/core/config/DefaultConfiguration.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/core/config/DefaultConfiguration.java
@@ -1,13 +1,21 @@
 package net.onrc.onos.ofcontroller.core.config;
 
 import java.net.InetAddress;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
 
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.FloodlightModuleException;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
 import net.floodlightcontroller.util.MACAddress;
 import net.onrc.onos.ofcontroller.bgproute.Interface;
 
 import org.openflow.util.HexString;
 
-public class DefaultConfiguration implements IConfigInfoService {
+public class DefaultConfiguration implements IConfigInfoService, IFloodlightModule {
 
 	@Override
 	public boolean isInterfaceAddress(InetAddress address) {
@@ -44,4 +52,36 @@
 		return 0;
 	}
 
+	@Override
+	public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+		Collection<Class<? extends IFloodlightService>> l 
+			= new ArrayList<Class<? extends IFloodlightService>>();
+		l.add(IConfigInfoService.class);
+		return l;
+	}
+
+	@Override
+	public Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls() {
+		Map<Class<? extends IFloodlightService>, IFloodlightService> m 
+			= new HashMap<Class<? extends IFloodlightService>, IFloodlightService>();
+		m.put(IConfigInfoService.class, this);
+		return m;
+	}
+
+	@Override
+	public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
+		return null;
+	}
+
+	@Override
+	public void init(FloodlightModuleContext context)
+			throws FloodlightModuleException {
+		// no-op
+	}
+
+	@Override
+	public void startUp(FloodlightModuleContext context) {
+		// no-op
+	}
+
 }
diff --git a/src/main/java/net/onrc/onos/ofcontroller/core/config/IConfigInfoService.java b/src/main/java/net/onrc/onos/ofcontroller/core/config/IConfigInfoService.java
index 7bbf483..39e0f7c 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/core/config/IConfigInfoService.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/core/config/IConfigInfoService.java
@@ -2,16 +2,16 @@
 
 import java.net.InetAddress;
 
+import net.floodlightcontroller.core.module.IFloodlightService;
 import net.floodlightcontroller.util.MACAddress;
 import net.onrc.onos.ofcontroller.bgproute.Interface;
-import net.onrc.onos.ofcontroller.core.module.IOnosService;
 
 /**
  * Provides information about the layer 3 properties of the network.
  * This is based on IP addresses configured on ports in the network.
  *
  */
-public interface IConfigInfoService extends IOnosService {
+public interface IConfigInfoService extends IFloodlightService {
 	public boolean isInterfaceAddress(InetAddress address);
 	public boolean inConnectedNetwork(InetAddress address);
 	public boolean fromExternalNetwork(long inDpid, short inPort);
diff --git a/src/main/java/net/onrc/onos/ofcontroller/core/internal/DeviceStorageImpl.java b/src/main/java/net/onrc/onos/ofcontroller/core/internal/DeviceStorageImpl.java
index 182a20a..3eaf79d 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/core/internal/DeviceStorageImpl.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/core/internal/DeviceStorageImpl.java
@@ -89,6 +89,10 @@
 	                log.debug("Adding device {}: creating new device", device.getMACAddressString());
 	            }
 	 			
+	            if (obj == null) {
+	            	return null;
+	            }
+	            
 	            changeDeviceAttachments(device, obj);
 		        
 	            changeDeviceIpv4Addresses(device, obj);
diff --git a/src/main/java/net/onrc/onos/ofcontroller/core/internal/LinkStorageImpl.java b/src/main/java/net/onrc/onos/ofcontroller/core/internal/LinkStorageImpl.java
index 458d533..36e1c5e 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/core/internal/LinkStorageImpl.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/core/internal/LinkStorageImpl.java
@@ -490,31 +490,4 @@
 	    
 	 	return success;
 	}
-
-	// TODO should be moved to TopoLinkServiceImpl (never used in this class)
-	static class ExtractLink implements PipeFunction<PathPipe<Vertex>, Link> {
-	
-		@SuppressWarnings("unchecked")
-		@Override
-		public Link compute(PathPipe<Vertex> pipe ) {
-			long s_dpid = 0;
-			long d_dpid = 0;
-			short s_port = 0;
-			short d_port = 0;
-			List<Vertex> V = new ArrayList<Vertex>();
-			V = (List<Vertex>)pipe.next();
-			Vertex src_sw = V.get(0);
-			Vertex dest_sw = V.get(3);
-			Vertex src_port = V.get(1);
-			Vertex dest_port = V.get(2);
-			s_dpid = HexString.toLong((String) src_sw.getProperty("dpid"));
-			d_dpid = HexString.toLong((String) dest_sw.getProperty("dpid"));
-			s_port = (Short) src_port.getProperty("number");
-			d_port = (Short) dest_port.getProperty("number");
-			
-			Link l = new Link(s_dpid,s_port,d_dpid,d_port);
-			
-			return l;
-		}
-	}
 }
diff --git a/src/main/java/net/onrc/onos/ofcontroller/core/internal/SwitchStorageImpl.java b/src/main/java/net/onrc/onos/ofcontroller/core/internal/SwitchStorageImpl.java
index 15c41f3..6599366 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/core/internal/SwitchStorageImpl.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/core/internal/SwitchStorageImpl.java
@@ -353,7 +353,6 @@
 	        	IPortObject p = sw.getPort(port);
 	            if (p != null) {
 	        		log.info("SwitchStorage:deletePort dpid:{} port:{} found and set INACTIVE", dpid, port);
-	        		//deletePortImpl(p);
 	        		p.setState("INACTIVE");
 	        		
 	        		// XXX for now delete devices when we change a port to prevent
@@ -497,13 +496,4 @@
 	    			new Object[] {port.getPortId(), state, desc});
 		}
 	}
-	
-	private void deletePortImpl(IPortObject port) {
-		if (port != null) {
-			op.removePort(port);
-	    	log.info("SwitchStorage:deletePortImpl port:{} done",
-	    			port.getPortId());
-		}
-	}
-
 }
\ No newline at end of file
diff --git a/src/main/java/net/onrc/onos/ofcontroller/core/internal/TopoLinkServiceImpl.java b/src/main/java/net/onrc/onos/ofcontroller/core/internal/TopoLinkServiceImpl.java
index 5c00806..cf73c9c 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/core/internal/TopoLinkServiceImpl.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/core/internal/TopoLinkServiceImpl.java
@@ -7,14 +7,16 @@
 import net.onrc.onos.graph.DBOperation;
 import net.onrc.onos.ofcontroller.core.INetMapTopologyObjects.ISwitchObject;
 import net.onrc.onos.ofcontroller.core.INetMapTopologyService.ITopoLinkService;
-import net.onrc.onos.ofcontroller.core.internal.LinkStorageImpl.ExtractLink;
 
+import org.openflow.util.HexString;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.tinkerpop.blueprints.Vertex;
 import com.tinkerpop.gremlin.java.GremlinPipeline;
 import net.onrc.onos.graph.GraphDBManager;
+import com.tinkerpop.pipes.PipeFunction;
+import com.tinkerpop.pipes.transform.PathPipe;
 
 public class TopoLinkServiceImpl implements ITopoLinkService {
 	
@@ -58,7 +60,6 @@
 
 	@Override
 	public List<Link> getLinksOnSwitch(String dpid) {
-		// TODO Auto-generated method stub
 		List<Link> links = new ArrayList<Link>(); 
 		ISwitchObject sw = dbop.searchSwitch(dpid);
 		GremlinPipeline<Vertex, Link> pipe = new GremlinPipeline<Vertex, Link>();
@@ -75,5 +76,28 @@
 		return links;
 
 	}
-	
+
+	private class ExtractLink implements PipeFunction<PathPipe<Vertex>, Link> {
+		@Override
+		public Link compute(PathPipe<Vertex> pipe) {
+			long s_dpid = 0;
+			long d_dpid = 0;
+			short s_port = 0;
+			short d_port = 0;
+			
+			List<?> V = pipe.next();
+			Vertex src_sw = (Vertex)V.get(0);
+			Vertex dest_sw = (Vertex)V.get(3);
+			Vertex src_port = (Vertex)V.get(1);
+			Vertex dest_port = (Vertex)V.get(2);
+			s_dpid = HexString.toLong((String) src_sw.getProperty("dpid"));
+			d_dpid = HexString.toLong((String) dest_sw.getProperty("dpid"));
+			s_port = (Short) src_port.getProperty("number");
+			d_port = (Short) dest_port.getProperty("number");
+			
+			Link l = new Link(s_dpid,s_port,d_dpid,d_port);
+			
+			return l;
+		}
+	}
 }
diff --git a/src/main/java/net/onrc/onos/ofcontroller/core/module/IOnosService.java b/src/main/java/net/onrc/onos/ofcontroller/core/module/IOnosService.java
deleted file mode 100644
index 5828366..0000000
--- a/src/main/java/net/onrc/onos/ofcontroller/core/module/IOnosService.java
+++ /dev/null
@@ -1,7 +0,0 @@
-package net.onrc.onos.ofcontroller.core.module;
-
-import net.floodlightcontroller.core.module.IFloodlightService;
-
-public interface IOnosService extends IFloodlightService {
-
-}
diff --git a/src/main/java/net/onrc/onos/ofcontroller/core/module/OnosModuleLoader.java b/src/main/java/net/onrc/onos/ofcontroller/core/module/OnosModuleLoader.java
deleted file mode 100644
index 6b8b514..0000000
--- a/src/main/java/net/onrc/onos/ofcontroller/core/module/OnosModuleLoader.java
+++ /dev/null
@@ -1,94 +0,0 @@
-package net.onrc.onos.ofcontroller.core.module;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.module.FloodlightModuleContext;
-import net.floodlightcontroller.core.module.FloodlightModuleException;
-import net.floodlightcontroller.core.module.IFloodlightModule;
-import net.floodlightcontroller.core.module.IFloodlightService;
-import net.floodlightcontroller.restserver.IRestApiService;
-import net.floodlightcontroller.topology.ITopologyService;
-import net.onrc.onos.datagrid.IDatagridService;
-import net.onrc.onos.ofcontroller.core.config.DefaultConfiguration;
-import net.onrc.onos.ofcontroller.core.config.IConfigInfoService;
-import net.onrc.onos.ofcontroller.flowmanager.IFlowService;
-import net.onrc.onos.ofcontroller.forwarding.Forwarding;
-import net.onrc.onos.ofcontroller.proxyarp.IProxyArpService;
-import net.onrc.onos.ofcontroller.proxyarp.ProxyArpManager;
-
-public class OnosModuleLoader implements IFloodlightModule {
-	private IFloodlightProviderService floodlightProvider;
-	private ITopologyService topology;
-	private IConfigInfoService config;
-	private IRestApiService restApi;
-	private IFlowService flowService;
-	private IDatagridService datagrid;
-
-	private ProxyArpManager arpManager;
-	private Forwarding forwarding;
-	
-	public OnosModuleLoader() {
-		arpManager = new ProxyArpManager();
-		forwarding = new Forwarding();
-	}
-	
-	@Override
-	public Collection<Class<? extends IFloodlightService>> getModuleServices() {
-		List<Class<? extends IFloodlightService>> services = 
-				new ArrayList<Class<? extends IFloodlightService>>();
-		services.add(IProxyArpService.class);
-		return services;
-	}
-
-	@Override
-	public Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls() {
-		Map<Class<? extends IFloodlightService>, IFloodlightService> impls = 
-				new HashMap<Class<? extends IFloodlightService>, IFloodlightService>();
-		impls.put(IProxyArpService.class, arpManager);
-		return impls;
-	}
-
-	@Override
-	public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
-		List<Class<? extends IFloodlightService>> dependencies = 
-				new ArrayList<Class<? extends IFloodlightService>>();
-		dependencies.add(IFloodlightProviderService.class);
-		dependencies.add(ITopologyService.class);
-		dependencies.add(IRestApiService.class);
-		dependencies.add(IFlowService.class);
-		dependencies.add(IDatagridService.class);
-		return dependencies;
-	}
-
-	@Override
-	public void init(FloodlightModuleContext context)
-			throws FloodlightModuleException {
-		floodlightProvider = context.getServiceImpl(IFloodlightProviderService.class);
-		topology = context.getServiceImpl(ITopologyService.class);
-		restApi = context.getServiceImpl(IRestApiService.class);
-		flowService = context.getServiceImpl(IFlowService.class);
-		datagrid = context.getServiceImpl(IDatagridService.class);
-		
-		//This could be null because it's not mandatory to have an
-		//IConfigInfoService loaded.
-		config = context.getServiceImpl(IConfigInfoService.class);
-		if (config == null) {
-			config = new DefaultConfiguration();
-		}
-
-		arpManager.init(floodlightProvider, topology, datagrid, config, restApi);
-		forwarding.init(floodlightProvider, flowService, datagrid);
-	}
-
-	@Override
-	public void startUp(FloodlightModuleContext context) {
-		arpManager.startUp();
-		forwarding.startUp();
-	}
-
-}
diff --git a/src/main/java/net/onrc/onos/ofcontroller/floodlightlistener/NetworkGraphPublisher.java b/src/main/java/net/onrc/onos/ofcontroller/floodlightlistener/NetworkGraphPublisher.java
index 723fc4e..f458603 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/floodlightlistener/NetworkGraphPublisher.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/floodlightlistener/NetworkGraphPublisher.java
@@ -40,7 +40,6 @@
 import net.onrc.onos.ofcontroller.core.ISwitchStorage;
 import net.onrc.onos.ofcontroller.core.INetMapStorage.DM_OPERATION;
 import net.onrc.onos.ofcontroller.core.INetMapTopologyObjects.ISwitchObject;
-import net.onrc.onos.ofcontroller.core.ISwitchStorage.SwitchState;
 import net.onrc.onos.ofcontroller.core.internal.DeviceStorageImpl;
 import net.onrc.onos.ofcontroller.core.internal.LinkStorageImpl;
 import net.onrc.onos.ofcontroller.core.internal.SwitchStorageImpl;
diff --git a/src/main/java/net/onrc/onos/ofcontroller/flowmanager/FlowDatabaseOperation.java b/src/main/java/net/onrc/onos/ofcontroller/flowmanager/FlowDatabaseOperation.java
index da40bef..95185b9 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/flowmanager/FlowDatabaseOperation.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/flowmanager/FlowDatabaseOperation.java
@@ -3,10 +3,8 @@
 import java.io.PrintWriter;
 import java.io.StringWriter;
 import java.util.ArrayList;
-import java.util.Comparator;
 import java.util.LinkedList;
 import java.util.List;
-import java.util.concurrent.ConcurrentLinkedQueue;
 
 import net.floodlightcontroller.util.MACAddress;
 import net.onrc.onos.graph.DBOperation;
@@ -90,6 +88,8 @@
 	// - flowPath.flowPathType()
 	// - flowPath.flowPathUserState()
 	// - flowPath.flowPathFlags()
+	// - flowPath.idleTimeout()
+	// - flowPath.hardTimeout()
 	// - flowPath.dataPath().srcPort()
 	// - flowPath.dataPath().dstPort()
 	// - flowPath.matchSrcMac()
@@ -109,6 +109,8 @@
 	flowObj.setFlowPathType(flowPath.flowPathType().toString());
 	flowObj.setFlowPathUserState(flowPath.flowPathUserState().toString());
 	flowObj.setFlowPathFlags(flowPath.flowPathFlags().flags());
+	flowObj.setIdleTimeout(flowPath.idleTimeout());
+	flowObj.setHardTimeout(flowPath.hardTimeout());
 	flowObj.setSrcSwitch(flowPath.dataPath().srcPort().dpid().toString());
 	flowObj.setSrcPort(flowPath.dataPath().srcPort().port().value());
 	flowObj.setDstSwitch(flowPath.dataPath().dstPort().dpid().toString());
@@ -225,6 +227,8 @@
 	// - InPort edge
 	// - OutPort edge
 	//
+	// - flowEntry.idleTimeout()
+	// - flowEntry.hardTimeout()
 	// - flowEntry.dpid()
 	// - flowEntry.flowEntryUserState()
 	// - flowEntry.flowEntrySwitchState()
@@ -245,6 +249,8 @@
 	// - flowEntry.actions()
 	//
 	ISwitchObject sw = dbHandler.searchSwitch(flowEntry.dpid().toString());
+	flowEntryObj.setIdleTimeout(flowEntry.idleTimeout());
+	flowEntryObj.setHardTimeout(flowEntry.hardTimeout());
 	flowEntryObj.setSwitchDpid(flowEntry.dpid().toString());
 	flowEntryObj.setSwitch(sw);
 	if (flowEntry.flowEntryMatch().matchInPort()) {
@@ -509,6 +515,8 @@
 	String flowPathType = flowObj.getFlowPathType();
 	String flowPathUserState = flowObj.getFlowPathUserState();
 	Long flowPathFlags = flowObj.getFlowPathFlags();
+	Integer idleTimeout = flowObj.getIdleTimeout();
+	Integer hardTimeout = flowObj.getHardTimeout();
 	String srcSwitchStr = flowObj.getSrcSwitch();
 	Short srcPortShort = flowObj.getSrcPort();
 	String dstSwitchStr = flowObj.getDstSwitch();
@@ -519,6 +527,8 @@
 	    (flowPathType == null) ||
 	    (flowPathUserState == null) ||
 	    (flowPathFlags == null) ||
+	    (idleTimeout == null) ||
+	    (hardTimeout == null) ||
 	    (srcSwitchStr == null) ||
 	    (srcPortShort == null) ||
 	    (dstSwitchStr == null) ||
@@ -533,6 +543,8 @@
 	flowPath.setFlowPathType(FlowPathType.valueOf(flowPathType));
 	flowPath.setFlowPathUserState(FlowPathUserState.valueOf(flowPathUserState));
 	flowPath.setFlowPathFlags(new FlowPathFlags(flowPathFlags));
+	flowPath.setIdleTimeout(idleTimeout);
+	flowPath.setHardTimeout(hardTimeout);
 	flowPath.dataPath().srcPort().setDpid(new Dpid(srcSwitchStr));
 	flowPath.dataPath().srcPort().setPort(new Port(srcPortShort));
 	flowPath.dataPath().dstPort().setDpid(new Dpid(dstSwitchStr));
@@ -611,11 +623,15 @@
      */
     public static FlowEntry extractFlowEntry(IFlowEntry flowEntryObj) {
 	String flowEntryIdStr = flowEntryObj.getFlowEntryId();
+	Integer idleTimeout = flowEntryObj.getIdleTimeout();
+	Integer hardTimeout = flowEntryObj.getHardTimeout();
 	String switchDpidStr = flowEntryObj.getSwitchDpid();
 	String userState = flowEntryObj.getUserState();
 	String switchState = flowEntryObj.getSwitchState();
 
 	if ((flowEntryIdStr == null) ||
+	    (idleTimeout == null) ||
+	    (hardTimeout == null) ||
 	    (switchDpidStr == null) ||
 	    (userState == null) ||
 	    (switchState == null)) {
diff --git a/src/main/java/net/onrc/onos/ofcontroller/flowmanager/FlowEventHandler.java b/src/main/java/net/onrc/onos/ofcontroller/flowmanager/FlowEventHandler.java
index 6c200fa..3538eb4 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/flowmanager/FlowEventHandler.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/flowmanager/FlowEventHandler.java
@@ -75,11 +75,14 @@
     // Transient state for processing the Flow Paths:
     //  - The Flow Paths that should be recomputed
     //  - The Flow Paths with modified Flow Entries
+    //  - The Flow Paths that we should check if installed in all switches
     //
     private Map<Long, FlowPath> shouldRecomputeFlowPaths =
 	new HashMap<Long, FlowPath>();
     private Map<Long, FlowPath> modifiedFlowPaths =
 	new HashMap<Long, FlowPath>();
+    private Map<Long, FlowPath> checkIfInstalledFlowPaths =
+	new HashMap<Long, FlowPath>();
 
     /**
      * Constructor for a given Flow Manager and Datagrid Service.
@@ -239,6 +242,12 @@
 	for (FlowPath flowPath : modifiedFlowPaths.values())
 	    flowPath.dataPath().removeDeletedFlowEntries();
 
+	//
+	// Check if Flow Paths have been installed into all switches,
+	// and generate the appropriate events.
+	//
+	checkInstalledFlowPaths(checkIfInstalledFlowPaths.values());
+
 	// Cleanup
 	topologyEvents.clear();
 	flowPathEvents.clear();
@@ -246,6 +255,44 @@
 	//
 	shouldRecomputeFlowPaths.clear();
 	modifiedFlowPaths.clear();
+	checkIfInstalledFlowPaths.clear();
+    }
+
+    /**
+     * Check if Flow Paths have been installed into all switches,
+     * and generate the appropriate events.
+     *
+     * @param flowPaths the flowPaths to process.
+     */
+    private void checkInstalledFlowPaths(Collection<FlowPath> flowPaths) {
+	List<FlowPath> installedFlowPaths = new LinkedList<FlowPath>();
+
+	Kryo kryo = kryoFactory.newKryo();
+
+	for (FlowPath flowPath : flowPaths) {
+	    boolean isInstalled = true;
+
+	    //
+	    // Check whether all Flow Entries have been installed
+	    //
+	    for (FlowEntry flowEntry : flowPath.flowEntries()) {
+		if (flowEntry.flowEntrySwitchState() !=
+		    FlowEntrySwitchState.FE_SWITCH_UPDATED) {
+		    isInstalled = false;
+		    break;
+		}
+	    }
+
+	    if (isInstalled) {
+		// Create a copy and add it to the list
+		FlowPath copyFlowPath = kryo.copy(flowPath);
+		installedFlowPaths.add(copyFlowPath);
+	    }
+	}
+	kryoFactory.deleteKryo(kryo);
+
+	// Generate an event for the installed Flow Path.
+	flowManager.notificationFlowPathsInstalled(installedFlowPaths);
     }
 
     /**
@@ -340,6 +387,9 @@
 		    }
 		    modifiedFlowPaths.put(flowPath.flowId().value(), flowPath);
 		    break;
+		case FP_TYPE_UNKNOWN:
+		    log.error("FlowPath event with unknown type");
+		    break;
 		}
 		allFlowPaths.put(flowPath.flowId().value(), flowPath);
 
@@ -526,10 +576,12 @@
 	    }
 
 	    //
-	    // Update the local Flow Entry.
+	    // Update the local Flow Entry, and keep state to check
+	    // if the Flow Path has been installed.
 	    //
 	    localFlowEntry.setFlowEntryUserState(flowEntry.flowEntryUserState());
 	    localFlowEntry.setFlowEntrySwitchState(flowEntry.flowEntrySwitchState());
+	    checkIfInstalledFlowPaths.put(flowPath.flowId().value(), flowPath);
 	    return localFlowEntry;
 	}
 
@@ -700,6 +752,12 @@
 	    newFlowEntry.setFlowId(new FlowId(flowPath.flowId().value()));
 
 	    //
+	    // Copy the Flow timeouts
+	    //
+	    newFlowEntry.setIdleTimeout(flowPath.idleTimeout());
+	    newFlowEntry.setHardTimeout(flowPath.hardTimeout());
+
+	    //
 	    // Allocate the FlowEntryMatch by copying the default one
 	    // from the FlowPath (if set).
 	    //
diff --git a/src/main/java/net/onrc/onos/ofcontroller/flowmanager/FlowManager.java b/src/main/java/net/onrc/onos/ofcontroller/flowmanager/FlowManager.java
index 9e56797..0f9ed91 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/flowmanager/FlowManager.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/flowmanager/FlowManager.java
@@ -10,8 +10,6 @@
 import java.util.SortedMap;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
 
 import net.floodlightcontroller.core.IFloodlightProviderService;
 import net.floodlightcontroller.core.IOFSwitch;
@@ -25,15 +23,24 @@
 import net.onrc.onos.graph.GraphDBManager;
 import net.onrc.onos.datagrid.IDatagridService;
 import net.onrc.onos.ofcontroller.core.INetMapStorage;
-import net.onrc.onos.ofcontroller.core.INetMapTopologyObjects.IFlowEntry;
-import net.onrc.onos.ofcontroller.core.INetMapTopologyObjects.IFlowPath;
 import net.onrc.onos.ofcontroller.floodlightlistener.INetworkGraphService;
 import net.onrc.onos.ofcontroller.flowmanager.web.FlowWebRoutable;
 import net.onrc.onos.ofcontroller.flowprogrammer.IFlowPusherService;
+import net.onrc.onos.ofcontroller.forwarding.IForwardingService;
 import net.onrc.onos.ofcontroller.topology.Topology;
-import net.onrc.onos.ofcontroller.util.*;
+import net.onrc.onos.ofcontroller.util.Dpid;
+import net.onrc.onos.ofcontroller.util.FlowEntry;
+import net.onrc.onos.ofcontroller.util.FlowEntrySwitchState;
+import net.onrc.onos.ofcontroller.util.FlowEntryUserState;
+import net.onrc.onos.ofcontroller.util.FlowEntryId;
+import net.onrc.onos.ofcontroller.util.FlowId;
+import net.onrc.onos.ofcontroller.util.FlowPath;
+import net.onrc.onos.ofcontroller.util.FlowPathUserState;
+import net.onrc.onos.ofcontroller.util.Pair;
+import net.onrc.onos.ofcontroller.util.serializers.KryoFactory;
 
-import org.openflow.protocol.OFType;
+import com.esotericsoftware.kryo2.Kryo;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -53,7 +60,10 @@
     protected FlowEventHandler flowEventHandler;
 
     protected IFlowPusherService pusher;
-    
+    protected IForwardingService forwardingService;
+
+    private KryoFactory kryoFactory = new KryoFactory();
+
     // Flow Entry ID generation state
     private static Random randomGenerator = new Random();
     private static int nextFlowEntryIdPrefix = 0;
@@ -142,6 +152,8 @@
 	l.add(INetworkGraphService.class);
 	l.add(IDatagridService.class);
 	l.add(IRestApiService.class);
+	l.add(IFlowPusherService.class);
+	l.add(IForwardingService.class);
         return l;
     }
 
@@ -158,6 +170,7 @@
 	datagridService = context.getServiceImpl(IDatagridService.class);
 	restApi = context.getServiceImpl(IRestApiService.class);
 	pusher = context.getServiceImpl(IFlowPusherService.class);
+	forwardingService = context.getServiceImpl(IForwardingService.class);
 
 	this.init("","");
     }
@@ -359,8 +372,25 @@
      * @param sw the switch the Flow Entry expired on.
      * @param flowEntryId the Flow Entry ID of the expired Flow Entry.
      */
-    public void flowEntryOnSwitchExpired(IOFSwitch sw, FlowEntryId flowEntryId) {
-	// TODO: Not implemented yet
+    public void flowEntryOnSwitchExpired(IOFSwitch sw,
+					 FlowEntryId flowEntryId) {
+	// Find the Flow Entry
+	FlowEntry flowEntry = datagridService.getFlowEntry(flowEntryId);
+	if (flowEntryId == null)
+	    return;		// Flow Entry not found
+
+	// Find the Flow Path
+	FlowPath flowPath = datagridService.getFlow(flowEntry.flowId());
+	if (flowPath == null)
+	    return;		// Flow Path not found
+
+	//
+	// Remove the Flow if the Flow Entry expired on the first switch
+	//
+	Dpid srcDpid = flowPath.dataPath().srcPort().dpid();
+	if (srcDpid.value() != sw.getId())
+	    return;
+	deleteFlow(flowPath.flowId());
     }
 
     /**
@@ -377,7 +407,6 @@
 	// Process all entries
 	//
 	for (Pair<IOFSwitch, FlowEntry> entry : entries) {
-	    IOFSwitch sw = entry.first;
 	    FlowEntry flowEntry = entry.second;
 
 	    //
@@ -398,11 +427,24 @@
 	    case FE_USER_DELETE:
 		datagridService.notificationSendFlowEntryRemoved(flowEntry.flowEntryId());
 		break;
+	    case FE_USER_UNKNOWN:
+		assert(false);
+		break;
 	    }
 	}
     }
 
     /**
+     * Generate a notification that a collection of Flow Paths has been
+     * installed in the network.
+     *
+     * @param flowPaths the collection of installed Flow Paths.
+     */
+    void notificationFlowPathsInstalled(Collection<FlowPath> flowPaths) {
+	forwardingService.flowsInstalled(flowPaths);
+    }
+
+    /**
      * Push modified Flow-related state as appropriate.
      *
      * @param modifiedFlowPaths the collection of modified Flow Paths.
@@ -566,11 +608,24 @@
      */
     private void pushModifiedFlowPathsToDatabase(
 		Collection<FlowPath> modifiedFlowPaths) {
+	List<FlowPath> copiedFlowPaths = new LinkedList<FlowPath>();
+
+	//
+	// Create a copy of the Flow Paths to push, because the pushing
+	// itself will happen on a separate thread.
+	//
+	Kryo kryo = kryoFactory.newKryo();
+	for (FlowPath flowPath : modifiedFlowPaths) {
+	    FlowPath copyFlowPath = kryo.copy(flowPath);
+	    copiedFlowPaths.add(copyFlowPath);
+	}
+	kryoFactory.deleteKryo(kryo);
+
 	//
 	// We only add the Flow Paths to the Database Queue.
 	// The FlowDatabaseWriter thread is responsible for the actual writing.
 	//
-	flowPathsToDatabaseQueue.addAll(modifiedFlowPaths);
+	flowPathsToDatabaseQueue.addAll(copiedFlowPaths);
     }
 
     /**
diff --git a/src/main/java/net/onrc/onos/ofcontroller/flowmanager/IFlowService.java b/src/main/java/net/onrc/onos/ofcontroller/flowmanager/IFlowService.java
index a25602d..549a0fc 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/flowmanager/IFlowService.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/flowmanager/IFlowService.java
@@ -6,8 +6,6 @@
 import net.floodlightcontroller.core.IOFSwitch;
 import net.floodlightcontroller.core.module.IFloodlightService;
 import net.onrc.onos.ofcontroller.topology.Topology;
-import net.onrc.onos.ofcontroller.util.CallerId;
-import net.onrc.onos.ofcontroller.util.DataPathEndpoints;
 import net.onrc.onos.ofcontroller.util.FlowEntry;
 import net.onrc.onos.ofcontroller.util.FlowEntryId;
 import net.onrc.onos.ofcontroller.util.FlowId;
diff --git a/src/main/java/net/onrc/onos/ofcontroller/flowprogrammer/FlowProgrammer.java b/src/main/java/net/onrc/onos/ofcontroller/flowprogrammer/FlowProgrammer.java
index 461d231..a4f0a8c 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/flowprogrammer/FlowProgrammer.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/flowprogrammer/FlowProgrammer.java
@@ -42,9 +42,8 @@
 public class FlowProgrammer implements IFloodlightModule, 
 				       IOFMessageListener,
 				       IOFSwitchListener {
-    @SuppressWarnings("unused")
     // flag to enable FlowSynchronizer
-    private static final boolean enableFlowSync = false;
+    private static final boolean enableFlowSync = true;
     protected static Logger log = LoggerFactory.getLogger(FlowProgrammer.class);
     protected volatile IFloodlightProviderService floodlightProvider;
     protected volatile IControllerRegistryService registryService;
@@ -155,8 +154,10 @@
     public void addedSwitch(IOFSwitch sw) {
 	log.debug("Switch added: {}", sw.getId());
 
-	if (enableFlowSync && registryService.hasControl(sw.getId())) {
-	    synchronizer.synchronize(sw);
+	if (enableFlowSync) {
+		if (registryService.hasControl(sw.getId())) {
+		    synchronizer.synchronize(sw);
+		}
 	}
     }
 
diff --git a/src/main/java/net/onrc/onos/ofcontroller/flowprogrammer/FlowPusher.java b/src/main/java/net/onrc/onos/ofcontroller/flowprogrammer/FlowPusher.java
index c3c7107..3f61248 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/flowprogrammer/FlowPusher.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/flowprogrammer/FlowPusher.java
@@ -34,7 +34,6 @@
 import net.onrc.onos.ofcontroller.util.FlowEntryAction.*;
 import net.onrc.onos.ofcontroller.util.FlowEntry;
 import net.onrc.onos.ofcontroller.util.FlowEntryActions;
-import net.onrc.onos.ofcontroller.util.FlowEntryId;
 import net.onrc.onos.ofcontroller.util.FlowEntryMatch;
 import net.onrc.onos.ofcontroller.util.FlowEntryUserState;
 import net.onrc.onos.ofcontroller.util.IPv4Net;
@@ -68,8 +67,6 @@
     protected static final int MAX_MESSAGE_SEND = 100;
 
     public static final short PRIORITY_DEFAULT = 100;
-    public static final short FLOWMOD_DEFAULT_IDLE_TIMEOUT = 0;	// infinity
-    public static final short FLOWMOD_DEFAULT_HARD_TIMEOUT = 0;	// infinite
 
 	public enum QueueState {
 		READY,
@@ -82,8 +79,9 @@
 	 * @author Naoki Shiota
 	 *
 	 */
-	@SuppressWarnings("serial")
 	private class SwitchQueue extends ArrayDeque<OFMessage> {
+		private static final long serialVersionUID = 1L;
+
 		QueueState state;
 		
 		// Max rate of sending message (bytes/ms). 0 implies no limitation.
@@ -722,8 +720,8 @@
 			}
 		}
 
-		fm.setIdleTimeout(FLOWMOD_DEFAULT_IDLE_TIMEOUT)
-				.setHardTimeout(FLOWMOD_DEFAULT_HARD_TIMEOUT)
+		fm.setIdleTimeout((short)flowEntry.idleTimeout())
+				.setHardTimeout((short)flowEntry.hardTimeout())
 				.setPriority(PRIORITY_DEFAULT)
 				.setBufferId(OFPacketOut.BUFFER_ID_NONE).setCookie(cookie)
 				.setCommand(flowModCommand).setMatch(match)
diff --git a/src/main/java/net/onrc/onos/ofcontroller/flowprogrammer/FlowSynchronizer.java b/src/main/java/net/onrc/onos/ofcontroller/flowprogrammer/FlowSynchronizer.java
index 351df84..04d3bbc 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/flowprogrammer/FlowSynchronizer.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/flowprogrammer/FlowSynchronizer.java
@@ -7,8 +7,10 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
+import java.util.concurrent.FutureTask;
 
 import org.openflow.protocol.OFFlowMod;
 import org.openflow.protocol.OFMatch;
@@ -45,26 +47,26 @@
 
     private DBOperation dbHandler;
     protected IFlowPusherService pusher;
-    private Map<IOFSwitch, Thread> switchThreads; 
+    private Map<IOFSwitch, FutureTask<SyncResult>> switchThreads; 
 
     public FlowSynchronizer() {
 	dbHandler = GraphDBManager.getDBOperation("ramcloud", "/tmp/ramcloudconf");
-	switchThreads = new HashMap<IOFSwitch, Thread>();
-    }
+	switchThreads = new HashMap<IOFSwitch, FutureTask<SyncResult>>();    }
 
     @Override
-    public void synchronize(IOFSwitch sw) {
+    public Future<SyncResult> synchronize(IOFSwitch sw) {
 	Synchronizer sync = new Synchronizer(sw);
-	Thread t = new Thread(sync);
-	switchThreads.put(sw, t);
-	t.start();
+	FutureTask<SyncResult> task = new FutureTask<SyncResult>(sync);
+	switchThreads.put(sw, task);
+	task.run();
+	return task;
     }
     
     @Override
     public void interrupt(IOFSwitch sw) {
-	Thread t = switchThreads.remove(sw);
+	FutureTask<SyncResult> t = switchThreads.remove(sw);
 	if(t != null) {
-	    t.interrupt();
+		t.cancel(true);
 	}	
     }
 
@@ -81,7 +83,7 @@
      * @author Brian
      *
      */
-	protected class Synchronizer implements Runnable {
+	protected class Synchronizer implements Callable<SyncResult> {
 	IOFSwitch sw;
 	ISwitchObject swObj;
 
@@ -91,14 +93,45 @@
 	    this.swObj = dbHandler.searchSwitch(dpid.toString());
 	}
 
+	double graphIDTime, switchTime, compareTime, graphEntryTime, extractTime, pushTime, totalTime;
 	@Override
-	public void run() {
+	public SyncResult call() {
 	    // TODO: stop adding other flow entries while synchronizing
 	    //pusher.suspend(sw);
+	    long start = System.nanoTime();
 	    Set<FlowEntryWrapper> graphEntries = getFlowEntriesFromGraph();
+	    long step1 = System.nanoTime();
 	    Set<FlowEntryWrapper> switchEntries = getFlowEntriesFromSwitch();
-	    compare(graphEntries, switchEntries);
+	    long step2 = System.nanoTime();
+	    SyncResult result = compare(graphEntries, switchEntries);
+	    long step3 = System.nanoTime();
+	    graphIDTime = (step1 - start); 
+	    switchTime = (step2 - step1);
+	    compareTime = (step3 - step2);
+	    totalTime = (step3 - start);
+	    outputTime();
 	    //pusher.resume(sw);
+	    
+	    return result;
+	}
+	
+	private void outputTime() {
+	    double div = Math.pow(10, 6); //convert nanoseconds to ms
+	    graphIDTime /= div;
+	    switchTime /= div;
+	    compareTime = (compareTime - graphEntryTime - extractTime - pushTime) / div;
+	    graphEntryTime /= div;
+	    extractTime /= div;
+	    pushTime /= div;
+	    totalTime /= div;
+	    log.debug("Sync time (ms):" +
+	    		  graphIDTime + "," +
+	     		  switchTime + "," + 
+	    		  compareTime + "," +
+	     		  graphEntryTime + "," +
+	    		  extractTime + "," + 
+	     		  pushTime + "," +
+	              totalTime);
 	}
 
 	/**
@@ -108,7 +141,7 @@
 	 * @param graphEntries Flow entries in GraphDB.
 	 * @param switchEntries Flow entries in switch.
 	 */
-	private void compare(Set<FlowEntryWrapper> graphEntries, Set<FlowEntryWrapper> switchEntries) {
+	private SyncResult compare(Set<FlowEntryWrapper> graphEntries, Set<FlowEntryWrapper> switchEntries) {
 	    int added = 0, removed = 0, skipped = 0;
 	    for(FlowEntryWrapper entry : switchEntries) {
 		if(graphEntries.contains(entry)) {
@@ -124,11 +157,16 @@
 	    for(FlowEntryWrapper entry : graphEntries) {
 		// add flow entry to switch
 		entry.addToSwitch(sw);
+		graphEntryTime += entry.dbTime;
+		extractTime += entry.extractTime;
+		pushTime += entry.pushTime;
 		added++;
 	    }	  
 	    log.debug("Flow entries added "+ added + ", " +
 		      "Flow entries removed "+ removed + ", " +
 		      "Flow entries skipped " + skipped);
+	    
+	    return new SyncResult(added, removed, skipped);
 	}
 
 	/**
@@ -217,6 +255,7 @@
 	 * Install this FlowEntry to a switch via FlowPusher.
 	 * @param sw Switch to which flow will be installed.
 	 */
+	double dbTime, extractTime, pushTime;
 	public void addToSwitch(IOFSwitch sw) {
 	    if (statisticsReply != null) {
 		log.error("Error adding existing flow entry {} to sw {}", 
@@ -224,6 +263,7 @@
 		return;
 	    }
 
+	    double startDB = System.nanoTime();
 	    // Get the Flow Entry state from the Network Graph
 	    IFlowEntry iFlowEntry = null;
 	    try {
@@ -238,7 +278,9 @@
 			  flowEntryId, sw.getId());
 		return;
 	    }
+	    dbTime = System.nanoTime() - startDB;
 
+	    double startExtract = System.nanoTime();
 	    FlowEntry flowEntry =
 		FlowDatabaseOperation.extractFlowEntry(iFlowEntry);
 	    if (flowEntry == null) {
@@ -246,8 +288,11 @@
 			  flowEntryId, sw.getId());
 		return;
 	    }
-
+	    extractTime = System.nanoTime() - startExtract;
+	    
+	    double startPush = System.nanoTime();
 	    pusher.pushFlowEntry(sw, flowEntry);
+	    pushTime = System.nanoTime() - startPush;
 	}
 	
 	/**
diff --git a/src/main/java/net/onrc/onos/ofcontroller/flowprogrammer/IFlowSyncService.java b/src/main/java/net/onrc/onos/ofcontroller/flowprogrammer/IFlowSyncService.java
index 4e6efaf..4fe0857 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/flowprogrammer/IFlowSyncService.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/flowprogrammer/IFlowSyncService.java
@@ -1,7 +1,8 @@
 package net.onrc.onos.ofcontroller.flowprogrammer;
 
+import java.util.concurrent.Future;
+
 import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.core.IOFSwitchListener;
 import net.floodlightcontroller.core.module.IFloodlightService;
 
 /**
@@ -11,7 +12,19 @@
  *
  */
 public interface IFlowSyncService extends IFloodlightService {
-    public void synchronize(IOFSwitch sw);
+    public Future<SyncResult> synchronize(IOFSwitch sw);
     
     public void interrupt(IOFSwitch sw);
+    
+    public class SyncResult {
+    	public final int flowAdded;
+    	public final int flowRemoved;
+    	public final int flowSkipped;
+    	
+    	public SyncResult(int added, int removed, int skipped) {
+    		flowAdded = added;
+    		flowRemoved = removed;
+    		flowSkipped = skipped;
+    	}
+    }
 }
diff --git a/src/main/java/net/onrc/onos/ofcontroller/forwarding/Forwarding.java b/src/main/java/net/onrc/onos/ofcontroller/forwarding/Forwarding.java
index 05de13e..b062e2b 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/forwarding/Forwarding.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/forwarding/Forwarding.java
@@ -1,15 +1,23 @@
 package net.onrc.onos.ofcontroller.forwarding;
 
-import java.io.IOException;
+import java.net.InetAddress;
 import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
 
 import net.floodlightcontroller.core.FloodlightContext;
 import net.floodlightcontroller.core.IFloodlightProviderService;
 import net.floodlightcontroller.core.IOFMessageListener;
 import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
 import net.floodlightcontroller.packet.Ethernet;
+import net.floodlightcontroller.packet.IPv4;
 import net.floodlightcontroller.util.MACAddress;
 import net.onrc.onos.datagrid.IDatagridService;
 import net.onrc.onos.ofcontroller.core.IDeviceStorage;
@@ -18,6 +26,8 @@
 import net.onrc.onos.ofcontroller.core.INetMapTopologyObjects.ISwitchObject;
 import net.onrc.onos.ofcontroller.core.internal.DeviceStorageImpl;
 import net.onrc.onos.ofcontroller.flowmanager.IFlowService;
+import net.onrc.onos.ofcontroller.flowprogrammer.IFlowPusherService;
+import net.onrc.onos.ofcontroller.proxyarp.ArpMessage;
 import net.onrc.onos.ofcontroller.topology.TopologyManager;
 import net.onrc.onos.ofcontroller.util.CallerId;
 import net.onrc.onos.ofcontroller.util.DataPath;
@@ -41,35 +51,120 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class Forwarding implements IOFMessageListener {
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Multimap;
+import com.google.common.net.InetAddresses;
+
+public class Forwarding implements IOFMessageListener, IFloodlightModule,
+									IForwardingService {
 	private final static Logger log = LoggerFactory.getLogger(Forwarding.class);
 
+	private final int IDLE_TIMEOUT = 5; // seconds
+	private final int HARD_TIMEOUT = 0; // seconds
+	
 	private IFloodlightProviderService floodlightProvider;
 	private IFlowService flowService;
-	private IDatagridService datagridService;
+	private IFlowPusherService flowPusher;
+	private IDatagridService datagrid;
 	
 	private IDeviceStorage deviceStorage;
 	private TopologyManager topologyService;
 	
-	public Forwarding() {
+	private Map<Path, Long> pendingFlows;
+	private Multimap<Long, PacketToPush> waitingPackets;
+	
+	private final Object lock = new Object();
+	
+	public class PacketToPush {
+		public final OFPacketOut packet;
+		public final long dpid;
 		
+		public PacketToPush(OFPacketOut packet, long dpid) {
+			this.packet = packet;
+			this.dpid = dpid;
+		}
 	}
 	
-	public void init(IFloodlightProviderService floodlightProvider, 
-			IFlowService flowService, IDatagridService datagridService) {
-		this.floodlightProvider = floodlightProvider;
-		this.flowService = flowService;
-		this.datagridService = datagridService;
+	public final class Path {
+		public final SwitchPort srcPort;
+		public final SwitchPort dstPort;
+		
+		public Path(SwitchPort src, SwitchPort dst) {
+			srcPort = new SwitchPort(new Dpid(src.dpid().value()), 
+					new Port(src.port().value()));
+			dstPort = new SwitchPort(new Dpid(dst.dpid().value()), 
+					new Port(dst.port().value()));
+		}
+		
+		@Override
+		public boolean equals(Object other) {
+			if (!(other instanceof Path)) {
+				return false;
+			}
+			
+			Path otherPath = (Path) other;
+			return srcPort.equals(otherPath.srcPort) && 
+					dstPort.equals(otherPath.dstPort);
+		}
+		
+		@Override
+		public int hashCode() {
+			int hash = 17;
+			hash = 31 * hash + srcPort.hashCode();
+			hash = 31 * hash + dstPort.hashCode();
+			return hash;
+		}
+	}
+	
+	@Override
+	public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+		List<Class<? extends IFloodlightService>> services = 
+				new ArrayList<Class<? extends IFloodlightService>>(1);
+		services.add(IForwardingService.class);
+		return services;
+	}
+
+	@Override
+	public Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls() {
+		Map<Class<? extends IFloodlightService>, IFloodlightService> impls = 
+				new HashMap<Class<? extends IFloodlightService>, IFloodlightService>(1);
+		impls.put(IForwardingService.class, this);
+		return impls;
+	}
+
+	@Override
+	public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
+		List<Class<? extends IFloodlightService>> dependencies = 
+				new ArrayList<Class<? extends IFloodlightService>>();
+		dependencies.add(IFloodlightProviderService.class);
+		dependencies.add(IFlowService.class);
+		dependencies.add(IFlowPusherService.class);
+		return dependencies;
+	}
+	
+	@Override
+	public void init(FloodlightModuleContext context) {
+		floodlightProvider = 
+				context.getServiceImpl(IFloodlightProviderService.class);
+		flowService = context.getServiceImpl(IFlowService.class);
+		flowPusher = context.getServiceImpl(IFlowPusherService.class);
+		datagrid = context.getServiceImpl(IDatagridService.class);
 		
 		floodlightProvider.addOFMessageListener(OFType.PACKET_IN, this);
 		
+		pendingFlows = new ConcurrentHashMap<Path, Long>();
+		//waitingPackets = Multimaps.synchronizedSetMultimap(
+				//HashMultimap.<Long, PacketToPush>create());
+		waitingPackets = HashMultimap.create();
+		
 		deviceStorage = new DeviceStorageImpl();
 		deviceStorage.init("","");
 		topologyService = new TopologyManager();
 		topologyService.init("","");
 	}
 	
-	public void startUp() {
+	@Override
+	public void startUp(FloodlightModuleContext context) {
 		// no-op
 	}
 
@@ -102,19 +197,48 @@
 		Ethernet eth = IFloodlightProviderService.bcStore.
 				get(cntx, IFloodlightProviderService.CONTEXT_PI_PAYLOAD);
 		
-		// We only want to handle unicast IPv4
-		if (eth.isBroadcast() || eth.isMulticast() || 
-				eth.getEtherType() != Ethernet.TYPE_IPv4) {
+		if (eth.getEtherType() != Ethernet.TYPE_IPv4) {
 			return Command.CONTINUE;
 		}
 		
-		handlePacketIn(sw, pi, eth);
+		if (eth.isBroadcast() || eth.isMulticast()) {
+			handleBroadcast(sw, pi, eth);
+			//return Command.CONTINUE;
+		}
+		else {
+			// Unicast
+			handlePacketIn(sw, pi, eth);
+		}
 		
 		return Command.STOP;
 	}
 	
+	private void handleBroadcast(IOFSwitch sw, OFPacketIn pi, Ethernet eth) {
+		if (log.isTraceEnabled()) {
+			log.trace("Sending broadcast packet to other ONOS instances");
+		}
+		
+		IPv4 ipv4Packet = (IPv4) eth.getPayload();
+		
+		// TODO We'll put the destination address here, because the current
+		// architecture needs an address. Addresses are only used for replies
+		// however, which don't apply to non-ARP packets. The ArpMessage class
+		// has become a bit too overloaded and should be refactored to 
+		// handle all use cases nicely.
+		 InetAddress targetAddress = 
+				InetAddresses.fromInteger(ipv4Packet.getDestinationAddress());
+		
+		// Piggy-back on the ARP mechanism to broadcast this packet out the
+		// edge. Luckily the ARP module doesn't check that the packet is
+		// actually ARP before broadcasting, so we can trick it into sending
+		// our non-ARP packets.
+		// TODO This should be refactored later to account for the new use case.
+		datagrid.sendArpRequest(ArpMessage.newRequest(targetAddress, eth.serialize()));
+	}
+	
 	private void handlePacketIn(IOFSwitch sw, OFPacketIn pi, Ethernet eth) {
-		String destinationMac = HexString.toHexString(eth.getDestinationMACAddress()); 
+		String destinationMac = 
+				HexString.toHexString(eth.getDestinationMACAddress()); 
 		
 		IDeviceObject deviceObject = deviceStorage.getDeviceByMac(
 				destinationMac);
@@ -124,7 +248,7 @@
 			return;
 		}
 		
-		Iterator<IPortObject> ports = deviceObject.getAttachedPorts().iterator();
+		Iterator<IPortObject> ports = deviceObject.getAttachedPorts().iterator();	
 		if (!ports.hasNext()) {
 			log.debug("No attachment point found for device {}", destinationMac);
 			return;
@@ -144,95 +268,91 @@
 		MACAddress srcMacAddress = MACAddress.valueOf(eth.getSourceMACAddress());
 		MACAddress dstMacAddress = MACAddress.valueOf(eth.getDestinationMACAddress());
 		
-		if (flowExists(srcSwitchPort, srcMacAddress, 
-				dstSwitchPort, dstMacAddress)) {
-			log.debug("Not adding flow because it already exists");
+		
+		FlowPath flowPath, reverseFlowPath;
+		
+		Path pathspec = new Path(srcSwitchPort, dstSwitchPort);
+		// TODO check concurrency
+		synchronized (lock) {
+			Long existingFlowId = pendingFlows.get(pathspec);
 			
-			// TODO check reverse flow as well
-			
-			DataPath shortestPath = 
-					topologyService.getDatabaseShortestPath(srcSwitchPort, dstSwitchPort);
-			
-			if (shortestPath == null || shortestPath.flowEntries().isEmpty()) {
-				log.warn("No path found between {} and {} - not handling packet",
-						srcSwitchPort, dstSwitchPort);
+			if (existingFlowId != null) {
+				log.debug("Found existing flow {}", 
+						HexString.toHexString(existingFlowId));
+				
+				OFPacketOut po = constructPacketOut(pi, sw);
+				waitingPackets.put(existingFlowId, new PacketToPush(po, sw.getId()));
 				return;
 			}
 			
-			Port outPort = shortestPath.flowEntries().get(0).outPort();
-			forwardPacket(pi, sw, outPort.value());
-			return;
-		}
-		
-		// Calculate a shortest path before pushing flow mods.
-		// This will be used later by the packet-out processing, but it uses
-		// the database so will be slow, and we should do it before flow mods.
-		DataPath shortestPath = 
-				topologyService.getDatabaseShortestPath(srcSwitchPort, dstSwitchPort);
-		
-		if (shortestPath == null || shortestPath.flowEntries().isEmpty()) {
-			log.warn("No path found between {} and {} - not handling packet",
-					srcSwitchPort, dstSwitchPort);
-			return;
-		}
-		
-		log.debug("Adding new flow between {} at {} and {} at {}",
-				new Object[]{srcMacAddress, srcSwitchPort, dstMacAddress, dstSwitchPort});
-		
-		
-		DataPath dataPath = new DataPath();
-		dataPath.setSrcPort(srcSwitchPort);
-		dataPath.setDstPort(dstSwitchPort);
-		
-		CallerId callerId = new CallerId("Forwarding");
-		
-		//FlowId flowId = new FlowId(flowService.getNextFlowEntryId());
-		FlowPath flowPath = new FlowPath();
-		//flowPath.setFlowId(flowId);
-		flowPath.setInstallerId(callerId);
-
-		flowPath.setFlowPathType(FlowPathType.FP_TYPE_SHORTEST_PATH);
-		flowPath.setFlowPathUserState(FlowPathUserState.FP_USER_ADD);
-		flowPath.setFlowEntryMatch(new FlowEntryMatch());
-		flowPath.flowEntryMatch().enableSrcMac(srcMacAddress);
-		flowPath.flowEntryMatch().enableDstMac(dstMacAddress);
-		// For now just forward IPv4 packets. This prevents accidentally
-		// forwarding other stuff like ARP.
-		flowPath.flowEntryMatch().enableEthernetFrameType(Ethernet.TYPE_IPv4);
-		flowPath.setDataPath(dataPath);
+			log.debug("Adding new flow between {} at {} and {} at {}",
+					new Object[]{srcMacAddress, srcSwitchPort, dstMacAddress, dstSwitchPort});
 			
-		FlowId flowId = flowService.addFlow(flowPath);
-		//flowService.addFlow(flowPath, flowId);
+			
+			CallerId callerId = new CallerId("Forwarding");
+			
+			DataPath datapath = new DataPath();
+			datapath.setSrcPort(srcSwitchPort);
+			datapath.setDstPort(dstSwitchPort);
+			
+			flowPath = new FlowPath();
+			flowPath.setInstallerId(callerId);
+	
+			flowPath.setFlowPathType(FlowPathType.FP_TYPE_SHORTEST_PATH);
+			flowPath.setFlowPathUserState(FlowPathUserState.FP_USER_ADD);
+			flowPath.setFlowEntryMatch(new FlowEntryMatch());
+			flowPath.setIdleTimeout(IDLE_TIMEOUT);
+			flowPath.setHardTimeout(HARD_TIMEOUT);
+			flowPath.flowEntryMatch().enableSrcMac(srcMacAddress);
+			flowPath.flowEntryMatch().enableDstMac(dstMacAddress);
+			flowPath.flowEntryMatch().enableEthernetFrameType(Ethernet.TYPE_IPv4);
+			flowPath.setDataPath(datapath);
+			
+			
+			DataPath reverseDataPath = new DataPath();
+			// Reverse the ports for the reverse path
+			reverseDataPath.setSrcPort(dstSwitchPort);
+			reverseDataPath.setDstPort(srcSwitchPort);
+			
+			// TODO implement copy constructor for FlowPath
+			reverseFlowPath = new FlowPath();
+			reverseFlowPath.setInstallerId(callerId);
+			reverseFlowPath.setFlowPathType(FlowPathType.FP_TYPE_SHORTEST_PATH);
+			reverseFlowPath.setFlowPathUserState(FlowPathUserState.FP_USER_ADD);
+			reverseFlowPath.setIdleTimeout(IDLE_TIMEOUT);
+			reverseFlowPath.setHardTimeout(HARD_TIMEOUT);
+			reverseFlowPath.setFlowEntryMatch(new FlowEntryMatch());
+			// Reverse the MAC addresses for the reverse path
+			reverseFlowPath.flowEntryMatch().enableSrcMac(dstMacAddress);
+			reverseFlowPath.flowEntryMatch().enableDstMac(srcMacAddress);
+			reverseFlowPath.flowEntryMatch().enableEthernetFrameType(Ethernet.TYPE_IPv4);
+			reverseFlowPath.setDataPath(reverseDataPath);
+			reverseFlowPath.dataPath().srcPort().dpid().toString();
+			
+			// TODO what happens if no path exists? cleanup
+			
+			FlowId flowId = new FlowId(flowService.getNextFlowEntryId());
+			FlowId reverseFlowId = new FlowId(flowService.getNextFlowEntryId());
+			
+			flowPath.setFlowId(flowId);
+			reverseFlowPath.setFlowId(reverseFlowId);
+			
+			OFPacketOut po = constructPacketOut(pi, sw);
+			Path reversePathSpec = new Path(dstSwitchPort, srcSwitchPort);
+			
+			// Add to waiting lists
+			pendingFlows.put(pathspec, flowId.value());
+			pendingFlows.put(reversePathSpec, reverseFlowId.value());
+			waitingPackets.put(flowId.value(), new PacketToPush(po, sw.getId()));
 		
+		}
 		
-		DataPath reverseDataPath = new DataPath();
-		// Reverse the ports for the reverse path
-		reverseDataPath.setSrcPort(dstSwitchPort);
-		reverseDataPath.setDstPort(srcSwitchPort);
+		flowService.addFlow(reverseFlowPath);
+		flowService.addFlow(flowPath);
 		
-		//FlowId reverseFlowId = new FlowId(flowService.getNextFlowEntryId());
-		// TODO implement copy constructor for FlowPath
-		FlowPath reverseFlowPath = new FlowPath();
-		//reverseFlowPath.setFlowId(reverseFlowId);
-		reverseFlowPath.setInstallerId(callerId);
-		reverseFlowPath.setFlowPathType(FlowPathType.FP_TYPE_SHORTEST_PATH);
-		reverseFlowPath.setFlowPathUserState(FlowPathUserState.FP_USER_ADD);
-		reverseFlowPath.setFlowEntryMatch(new FlowEntryMatch());
-		// Reverse the MAC addresses for the reverse path
-		reverseFlowPath.flowEntryMatch().enableSrcMac(dstMacAddress);
-		reverseFlowPath.flowEntryMatch().enableDstMac(srcMacAddress);
-		reverseFlowPath.flowEntryMatch().enableEthernetFrameType(Ethernet.TYPE_IPv4);
-		reverseFlowPath.setDataPath(reverseDataPath);
-		reverseFlowPath.dataPath().srcPort().dpid().toString();
-		
-		// TODO what happens if no path exists?
-		//flowService.addFlow(reverseFlowPath, reverseFlowId);
-		FlowId reverseFlowId = flowService.addFlow(reverseFlowPath);
-		
-		Port outPort = shortestPath.flowEntries().get(0).outPort();
-		forwardPacket(pi, sw, outPort.value());
 	}
 	
+	/*
 	private boolean flowExists(SwitchPort srcPort, MACAddress srcMac, 
 			SwitchPort dstPort, MACAddress dstMac) {
 		for (FlowPath flow : datagridService.getAllFlows()) {
@@ -258,17 +378,14 @@
 		
 		return false;
 	}
+	*/
 
-	private void forwardPacket(OFPacketIn pi, IOFSwitch sw, short port) {
-		List<OFAction> actions = new ArrayList<OFAction>(1);
-		actions.add(new OFActionOutput(port));
-		
+	private OFPacketOut constructPacketOut(OFPacketIn pi, IOFSwitch sw) {	
 		OFPacketOut po = new OFPacketOut();
 		po.setInPort(OFPort.OFPP_NONE)
 		.setInPort(pi.getInPort())
-		.setActions(actions)
-		.setActionsLength((short)OFActionOutput.MINIMUM_LENGTH)
-		.setLengthU(OFPacketOut.MINIMUM_LENGTH + OFActionOutput.MINIMUM_LENGTH);
+		.setActions(new ArrayList<OFAction>())
+		.setLengthU(OFPacketOut.MINIMUM_LENGTH);
 		
 		if (sw.getBuffers() == 0) {
 			po.setBufferId(OFPacketOut.BUFFER_ID_NONE)
@@ -279,11 +396,44 @@
 			po.setBufferId(pi.getBufferId());
 		}
 		
-		try {
-			sw.write(po, null);
-			sw.flush();
-		} catch (IOException e) {
-			log.error("Error writing packet out to switch: {}", e);
+		return po;
+	}
+
+	@Override
+	public void flowsInstalled(Collection<FlowPath> installedFlowPaths) {
+		for (FlowPath flowPath : installedFlowPaths) {
+			flowInstalled(flowPath);
+		}
+	}
+
+	private void flowInstalled(FlowPath installedFlowPath) {
+		// TODO check concurrency
+		// will need to sync and access both collections at once.
+		long flowId = installedFlowPath.flowId().value();
+		
+		Collection<PacketToPush> packets;
+		synchronized (lock) {
+			packets = waitingPackets.removeAll(flowId);
+			
+			//remove pending flows entry
+			Path pathToRemove = new Path(installedFlowPath.dataPath().srcPort(),
+					installedFlowPath.dataPath().dstPort());
+			pendingFlows.remove(pathToRemove);
+			
+		}
+		
+		for (PacketToPush packet : packets) {
+			IOFSwitch sw = floodlightProvider.getSwitches().get(packet.dpid);
+			
+			OFPacketOut po = packet.packet;
+			short outPort = 
+					installedFlowPath.flowEntries().get(0).outPort().value();
+			po.getActions().add(new OFActionOutput(outPort));
+			po.setActionsLength((short)
+					(po.getActionsLength() + OFActionOutput.MINIMUM_LENGTH));
+			po.setLengthU(po.getLengthU() + OFActionOutput.MINIMUM_LENGTH);
+			
+			flowPusher.add(sw, po);
 		}
 	}
 }
diff --git a/src/main/java/net/onrc/onos/ofcontroller/forwarding/IForwardingService.java b/src/main/java/net/onrc/onos/ofcontroller/forwarding/IForwardingService.java
new file mode 100644
index 0000000..e5bd714
--- /dev/null
+++ b/src/main/java/net/onrc/onos/ofcontroller/forwarding/IForwardingService.java
@@ -0,0 +1,25 @@
+package net.onrc.onos.ofcontroller.forwarding;
+
+import java.util.Collection;
+
+import net.floodlightcontroller.core.module.IFloodlightService;
+import net.onrc.onos.ofcontroller.util.FlowPath;
+
+/**
+ * Temporary interface that allows the Forwarding module to be
+ * notified when a flow has been installed by the FlowManager.
+ * 
+ * This should be refactored to a listener framework in the future.
+ * @author jono
+ *
+ */
+public interface IForwardingService extends IFloodlightService {
+	/**
+	 * Notify the Forwarding module that a collection of flows has been
+	 * installed in the network.
+	 *
+	 * @param installedFlowPaths the collection of FlowPaths that have
+	 * been installed in the network.
+	 */
+	public void flowsInstalled(Collection<FlowPath> installedFlowPaths);
+}
diff --git a/src/main/java/net/onrc/onos/ofcontroller/linkdiscovery/internal/LinkDiscoveryManager.java b/src/main/java/net/onrc/onos/ofcontroller/linkdiscovery/internal/LinkDiscoveryManager.java
index c03b266..8077201 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/linkdiscovery/internal/LinkDiscoveryManager.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/linkdiscovery/internal/LinkDiscoveryManager.java
@@ -18,7 +18,6 @@
 package net.onrc.onos.ofcontroller.linkdiscovery.internal;
 
 import java.io.IOException;
-
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.NetworkInterface;
@@ -41,9 +40,6 @@
 
 import net.floodlightcontroller.core.FloodlightContext;
 import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.IFloodlightProviderService.Role;
-import net.floodlightcontroller.core.IHAListener;
-import net.floodlightcontroller.core.IInfoProvider;
 import net.floodlightcontroller.core.IOFMessageListener;
 import net.floodlightcontroller.core.IOFSwitch;
 import net.floodlightcontroller.core.IOFSwitchListener;
@@ -63,24 +59,17 @@
 import net.floodlightcontroller.packet.LLDPTLV;
 import net.floodlightcontroller.restserver.IRestApiService;
 import net.floodlightcontroller.routing.Link;
-import net.floodlightcontroller.storage.IResultSet;
-import net.floodlightcontroller.storage.IStorageSourceListener;
-import net.floodlightcontroller.storage.IStorageSourceService;
-import net.floodlightcontroller.storage.OperatorPredicate;
-import net.floodlightcontroller.storage.StorageException;
 import net.floodlightcontroller.threadpool.IThreadPoolService;
 import net.floodlightcontroller.topology.NodePortTuple;
 import net.floodlightcontroller.util.EventHistory;
 import net.floodlightcontroller.util.EventHistory.EvAction;
 import net.onrc.onos.ofcontroller.core.IOnosRemoteSwitch;
 import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscovery;
+import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscovery.LDUpdate;
+import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscovery.UpdateOperation;
 import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscoveryListener;
 import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscoveryService;
 import net.onrc.onos.ofcontroller.linkdiscovery.LinkInfo;
-import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscovery.LDUpdate;
-import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscovery.LinkType;
-import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscovery.SwitchType;
-import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscovery.UpdateOperation;
 import net.onrc.onos.ofcontroller.linkdiscovery.web.LinkDiscoveryWebRoutable;
 import net.onrc.onos.registry.controller.IControllerRegistryService;
 
@@ -124,27 +113,11 @@
 @LogMessageCategory("Network Topology")
 public class LinkDiscoveryManager
 implements IOFMessageListener, IOFSwitchListener, 
-IStorageSourceListener, ILinkDiscoveryService,
-IFloodlightModule, IInfoProvider, IHAListener {
+ILinkDiscoveryService, IFloodlightModule {
 	protected IFloodlightProviderService controller;
     protected final static Logger log = LoggerFactory.getLogger(LinkDiscoveryManager.class);
 
-    // Names of table/fields for links in the storage API
-    private static final String LINK_TABLE_NAME = "controller_link";
-    private static final String LINK_ID = "id";
-    private static final String LINK_SRC_SWITCH = "src_switch_id";
-    private static final String LINK_SRC_PORT = "src_port";
-    private static final String LINK_SRC_PORT_STATE = "src_port_state";
-    private static final String LINK_DST_SWITCH = "dst_switch_id";
-    private static final String LINK_DST_PORT = "dst_port";
-    private static final String LINK_DST_PORT_STATE = "dst_port_state";
-    private static final String LINK_VALID_TIME = "valid_time";
-    private static final String LINK_TYPE = "link_type";
-    private static final String SWITCH_CONFIG_TABLE_NAME = "controller_switchconfig";
-    private static final String SWITCH_CONFIG_CORE_SWITCH = "core_switch";
-
     protected IFloodlightProviderService floodlightProvider;
-    protected IStorageSourceService storageSource;
     protected IThreadPoolService threadPool;
     protected IRestApiService restApi;
     // Registry Service for ONOS
@@ -246,26 +219,24 @@
 	            recommendation=LogMessageDoc.GENERIC_ACTION)
 		@Override
 		public void dispatch() {
-			 if (linkDiscoveryAware != null) {
-	                if (log.isTraceEnabled()) {
-	                    log.trace("Dispatching link discovery update {} {} {} {} {} for {}",
-	                              new Object[]{this.getOperation(),
-	                                           HexString.toHexString(this.getSrc()), this.getSrcPort(),
-	                                           HexString.toHexString(this.getDst()), this.getDstPort(),
-	                                           linkDiscoveryAware});
-	                }
-	                try {
-	                    for (ILinkDiscoveryListener lda : linkDiscoveryAware) { // order maintained
-	                        lda.linkDiscoveryUpdate(this);
-	                    }
-	                }
-	                catch (Exception e) {
-	                    log.error("Error in link discovery updates loop", e);
-	                }
-	            }
-			
+			if (linkDiscoveryAware != null) {
+                if (log.isTraceEnabled()) {
+                    log.trace("Dispatching link discovery update {} {} {} {} {} for {}",
+                              new Object[]{this.getOperation(),
+                                           HexString.toHexString(this.getSrc()), this.getSrcPort(),
+                                           HexString.toHexString(this.getDst()), this.getDstPort(),
+                                           linkDiscoveryAware});
+                }
+                try {
+                    for (ILinkDiscoveryListener lda : linkDiscoveryAware) { // order maintained
+                        lda.linkDiscoveryUpdate(this);
+                    }
+                }
+                catch (Exception e) {
+                    log.error("Error in link discovery updates loop", e);
+                }
+            }
 		}
-    	
     }
 
     /**
@@ -1063,8 +1034,6 @@
                 // Add to portNOFLinks if the unicast valid time is null
                 if (newInfo.getUnicastValidTime() == null)
                     addLinkToBroadcastDomain(lt);
-
-                writeLinkToStorage(lt, newInfo);
                 
                 // ONOS: Distinguish added event separately from updated event
                 updateOperation = UpdateOperation.LINK_ADDED;
@@ -1118,11 +1087,6 @@
                         oldInfo.getDstPortState().intValue())
                     linkChanged = true;
 
-                // Write changes to storage. This will always write the updated
-                // valid time, plus the port states if they've changed (i.e. if
-                // they weren't set to null in the previous block of code.
-                writeLinkToStorage(lt, newInfo);
-
                 if (linkChanged) {
                     updateOperation = getUpdateOperation(newInfo.getSrcPortState(),
                                                          newInfo.getDstPortState());
@@ -1209,9 +1173,6 @@
                                ILinkDiscovery.LinkType.INVALID_LINK,
                                EvAction.LINK_DELETED, reason);
 
-                // remove link from storage.
-                removeLinkFromStorage(lt);
-
                 // TODO  Whenever link is removed, it has to checked if
                 // the switchports must be added to quarantine.
 
@@ -1304,7 +1265,7 @@
                                                      getLinkType(lt, linkInfo),
                                                      operation));
                             controller.publishUpdate(update);
-                            writeLinkToStorage(lt, linkInfo);
+                            
                             linkInfoChanged = true;
                         }
                     }
@@ -1578,123 +1539,6 @@
         }
     }
 
-    // STORAGE METHODS
-    /**
-     * Deletes all links from storage
-     */
-    void clearAllLinks() {
-        storageSource.deleteRowsAsync(LINK_TABLE_NAME, null);
-    }
-
-    /**
-     * Gets the storage key for a LinkTuple
-     * @param lt The LinkTuple to get
-     * @return The storage key as a String
-     */
-    private String getLinkId(Link lt) {
-        return HexString.toHexString(lt.getSrc()) +
-                "-" + lt.getSrcPort() + "-" +
-                HexString.toHexString(lt.getDst())+
-                "-" + lt.getDstPort();
-    }
-
-    /**
-     * Writes a LinkTuple and corresponding LinkInfo to storage
-     * @param lt The LinkTuple to write
-     * @param linkInfo The LinkInfo to write
-     */
-    protected void writeLinkToStorage(Link lt, LinkInfo linkInfo) {
-        LinkType type = getLinkType(lt, linkInfo);
-
-        // Write only direct links.  Do not write links to external
-        // L2 network.
-        // if (type != LinkType.DIRECT_LINK && type != LinkType.TUNNEL) {
-        //    return;
-        // }
-
-        Map<String, Object> rowValues = new HashMap<String, Object>();
-        String id = getLinkId(lt);
-        rowValues.put(LINK_ID, id);
-        rowValues.put(LINK_VALID_TIME, linkInfo.getUnicastValidTime());
-        String srcDpid = HexString.toHexString(lt.getSrc());
-        rowValues.put(LINK_SRC_SWITCH, srcDpid);
-        rowValues.put(LINK_SRC_PORT, lt.getSrcPort());
-
-        if (type == LinkType.DIRECT_LINK)
-            rowValues.put(LINK_TYPE, "internal");
-        else if (type == LinkType.MULTIHOP_LINK) 
-            rowValues.put(LINK_TYPE, "external");
-        else if (type == LinkType.TUNNEL) 
-            rowValues.put(LINK_TYPE, "tunnel"); 
-        else rowValues.put(LINK_TYPE, "invalid");
-
-        if (linkInfo.linkStpBlocked()) {
-            if (log.isTraceEnabled()) {
-                log.trace("writeLink, link {}, info {}, srcPortState Blocked",
-                          lt, linkInfo);
-            }
-            rowValues.put(LINK_SRC_PORT_STATE,
-                          OFPhysicalPort.OFPortState.OFPPS_STP_BLOCK.getValue());
-        } else {
-            if (log.isTraceEnabled()) {
-                log.trace("writeLink, link {}, info {}, srcPortState {}",
-                          new Object[]{ lt, linkInfo, linkInfo.getSrcPortState() });
-            }
-            rowValues.put(LINK_SRC_PORT_STATE, linkInfo.getSrcPortState());
-        }
-        String dstDpid = HexString.toHexString(lt.getDst());
-        rowValues.put(LINK_DST_SWITCH, dstDpid);
-        rowValues.put(LINK_DST_PORT, lt.getDstPort());
-        if (linkInfo.linkStpBlocked()) {
-            if (log.isTraceEnabled()) {
-                log.trace("writeLink, link {}, info {}, dstPortState Blocked",
-                          lt, linkInfo);
-            }
-            rowValues.put(LINK_DST_PORT_STATE,
-                          OFPhysicalPort.OFPortState.OFPPS_STP_BLOCK.getValue());
-        } else {
-            if (log.isTraceEnabled()) {
-                log.trace("writeLink, link {}, info {}, dstPortState {}",
-                          new Object[]{ lt, linkInfo, linkInfo.getDstPortState() });
-            }
-            rowValues.put(LINK_DST_PORT_STATE, linkInfo.getDstPortState());
-        }
-        storageSource.updateRowAsync(LINK_TABLE_NAME, rowValues);
-    }
-
-    public Long readLinkValidTime(Link lt) {
-        // FIXME: We're not currently using this right now, but if we start
-        // to use this again, we probably shouldn't use it in its current
-        // form, because it's doing synchronous storage calls. Depending
-        // on the context this may still be OK, but if it's being called
-        // on the packet in processing thread it should be reworked to
-        // use asynchronous storage calls.
-        Long validTime = null;
-        IResultSet resultSet = null;
-        try {
-            String[] columns = { LINK_VALID_TIME };
-            String id = getLinkId(lt);
-            resultSet = storageSource.executeQuery(LINK_TABLE_NAME, columns,
-                                                   new OperatorPredicate(LINK_ID, OperatorPredicate.Operator.EQ, id), null);
-            if (resultSet.next())
-                validTime = resultSet.getLong(LINK_VALID_TIME);
-        }
-        finally {
-            if (resultSet != null)
-                resultSet.close();
-        }
-        return validTime;
-    }
-
-    /**
-     * Removes a link from storage using an asynchronous call.
-     * @param lt The LinkTuple to delete.
-     */
-    protected void removeLinkFromStorage(Link lt) {
-        String id = getLinkId(lt);
-        storageSource.deleteRowAsync(LINK_TABLE_NAME, id);
-    }
-
     @Override
     public void addListener(ILinkDiscoveryListener listener) {
         linkDiscoveryAware.add(listener);
@@ -1718,22 +1562,6 @@
         this.linkDiscoveryAware.remove(linkDiscoveryAwareComponent);
     }
 
-    /**
-     * Sets the IStorageSource to use for ITology
-     * @param storageSource the storage source to use
-     */
-    public void setStorageSource(IStorageSourceService storageSource) {
-        this.storageSource = storageSource;
-    }
-
-    /**
-     * Gets the storage source for this ITopology
-     * @return The IStorageSource ITopology is writing to
-     */
-    public IStorageSourceService getStorageSource() {
-        return storageSource;
-    }
-
     @Override
     public boolean isCallbackOrderingPrereq(OFType type, String name) {
         return false;
@@ -1744,72 +1572,6 @@
         return false;
     }
 
-    @Override
-    public void rowsModified(String tableName, Set<Object> rowKeys) {
-        Map<Long, IOFSwitch> switches = floodlightProvider.getSwitches();
-        ArrayList<IOFSwitch> updated_switches = new ArrayList<IOFSwitch>();
-        for(Object key: rowKeys) {
-            Long swId = new Long(HexString.toLong((String)key));
-            if (switches.containsKey(swId)) {
-                IOFSwitch sw = switches.get(swId);
-                boolean curr_status = sw.hasAttribute(IOFSwitch.SWITCH_IS_CORE_SWITCH);
-                boolean new_status =  false;
-                IResultSet resultSet = null;
-
-                try {
-                    resultSet = storageSource.getRow(tableName, key);
-                    for (Iterator<IResultSet> it = resultSet.iterator(); it.hasNext();) {
-                        // In case of multiple rows, use the status in last row?
-                        Map<String, Object> row = it.next().getRow();
-                        if (row.containsKey(SWITCH_CONFIG_CORE_SWITCH)) {
-                            new_status = ((String)row.get(SWITCH_CONFIG_CORE_SWITCH)).equals("true");
-                        }
-                    }
-                }
-                finally {
-                    if (resultSet != null)
-                        resultSet.close();
-                }
-
-                if (curr_status != new_status) {
-                    updated_switches.add(sw);
-                }
-            } else {
-                if (log.isTraceEnabled()) {
-                    log.trace("Update for switch which has no entry in switch " +
-                            "list (dpid={}), a delete action.", (String)key);
-                }
-            }
-        }
-
-        for (IOFSwitch sw : updated_switches) {
-            // Set SWITCH_IS_CORE_SWITCH to it's inverse value
-            if (sw.hasAttribute(IOFSwitch.SWITCH_IS_CORE_SWITCH)) {
-                sw.removeAttribute(IOFSwitch.SWITCH_IS_CORE_SWITCH);
-                if (log.isTraceEnabled()) {
-                    log.trace("SWITCH_IS_CORE_SWITCH set to False for {}", sw);
-                }
-                LinkUpdate update = new LinkUpdate(new LDUpdate(sw.getId(), SwitchType.BASIC_SWITCH,
-                                         UpdateOperation.SWITCH_UPDATED));
-                controller.publishUpdate(update);
-            }
-            else {
-                sw.setAttribute(IOFSwitch.SWITCH_IS_CORE_SWITCH, new Boolean(true));
-                if (log.isTraceEnabled()) {
-                    log.trace("SWITCH_IS_CORE_SWITCH set to True for {}", sw);
-                }
-                LinkUpdate update = new LinkUpdate(new LDUpdate(sw.getId(), SwitchType.CORE_SWITCH,
-                                         UpdateOperation.SWITCH_UPDATED));
-                controller.publishUpdate(update);
-            }
-        }
-    }
-
-    @Override
-    public void rowsDeleted(String tableName, Set<Object> rowKeys) {
-        // Ignore delete events, the switch delete will do the right thing on it's own
-    }
-
     // IFloodlightModule classes
 
     @Override
@@ -1838,7 +1600,6 @@
         Collection<Class<? extends IFloodlightService>> l = 
                 new ArrayList<Class<? extends IFloodlightService>>();
         l.add(IFloodlightProviderService.class);
-        l.add(IStorageSourceService.class);
         l.add(IThreadPoolService.class);
         l.add(IRestApiService.class);
         // Added by ONOS
@@ -1850,7 +1611,6 @@
     public void init(FloodlightModuleContext context)
             throws FloodlightModuleException {
         floodlightProvider = context.getServiceImpl(IFloodlightProviderService.class);
-        storageSource = context.getServiceImpl(IStorageSourceService.class);
         threadPool = context.getServiceImpl(IThreadPoolService.class);
         restApi = context.getServiceImpl(IRestApiService.class);
         // Added by ONOS
@@ -1906,23 +1666,6 @@
                 recommendation=LogMessageDoc.CHECK_SWITCH)
     })
     public void startUp(FloodlightModuleContext context) {
-        // Create our storage tables
-        if (storageSource == null) {
-            log.error("No storage source found.");
-            return;
-        }
-
-        storageSource.createTable(LINK_TABLE_NAME, null);
-        storageSource.setTablePrimaryKeyName(LINK_TABLE_NAME, LINK_ID);
-        storageSource.deleteMatchingRows(LINK_TABLE_NAME, null);
-        // Register for storage updates for the switch table
-        try {
-            storageSource.addListener(SWITCH_CONFIG_TABLE_NAME, this);
-        } catch (StorageException ex) {
-            log.error("Error in installing listener for " +
-            		  "switch table {}", SWITCH_CONFIG_TABLE_NAME);
-        }
-
         ScheduledExecutorService ses = threadPool.getScheduledExecutor();
         controller =
                 context.getServiceImpl(IFloodlightProviderService.class);
@@ -1933,36 +1676,22 @@
             public void run() {
                 try {
                     discoverLinks();
-                } catch (StorageException e) {
-                    log.error("Storage exception in LLDP send timer; " + 
-                            "terminating process", e);
-                    floodlightProvider.terminate();
                 } catch (Exception e) {
                     log.error("Exception in LLDP send timer.", e);
                 } finally {
                     if (!shuttingDown) {
-                        // null role implies HA mode is not enabled.
-                         Role role = floodlightProvider.getRole();
-                         if (role == null || role == Role.MASTER) {
-                             log.trace("Rescheduling discovery task as role = {}", role);
-                             discoveryTask.reschedule(DISCOVERY_TASK_INTERVAL,
-                                                TimeUnit.SECONDS);
-                         } else {
-                             log.trace("Stopped LLDP rescheduling due to role = {}.", role);
-                         }
+                    	// Always reschedule link discovery if we're not 
+                    	// shutting down (no chance of SLAVE role now)
+                        log.trace("Rescheduling discovery task");
+                        discoveryTask.reschedule(DISCOVERY_TASK_INTERVAL,
+                        					TimeUnit.SECONDS);
                     }
                 }
             }
         });
 
-        // null role implies HA mode is not enabled.
-        Role role = floodlightProvider.getRole();
-        if (role == null || role == Role.MASTER) {
-            log.trace("Setup: Rescheduling discovery task. role = {}", role);
-            discoveryTask.reschedule(DISCOVERY_TASK_INTERVAL, TimeUnit.SECONDS);
-        } else {
-                log.trace("Setup: Not scheduling LLDP as role = {}.", role);
-        }
+        // Always reschedule link discovery as we are never in SLAVE role now
+        discoveryTask.reschedule(DISCOVERY_TASK_INTERVAL, TimeUnit.SECONDS);
 
         // Setup the BDDP task.  It is invoked whenever switch port tuples
         // are added to the quarantine list.
@@ -1975,8 +1704,6 @@
         floodlightProvider.addOFMessageListener(OFType.PORT_STATUS, this);
         // Register for switch updates
         floodlightProvider.addOFSwitchListener(this);
-        floodlightProvider.addHAListener(this);
-        floodlightProvider.addInfoProvider("summary", this);
         if (restApi != null)
             restApi.addRestletRoutable(new LinkDiscoveryWebRoutable());
         setControllerTLV();
@@ -2061,59 +1788,6 @@
         evTopoCluster = evHistTopologyCluster.put(evTopoCluster, action);
     }
 
-    @Override
-    public Map<String, Object> getInfo(String type) {
-        if (!"summary".equals(type)) return null;
-
-        Map<String, Object> info = new HashMap<String, Object>();
-
-        int num_links = 0;
-        for (Set<Link> links : switchLinks.values())
-            num_links += links.size();
-        info.put("# inter-switch links", num_links / 2);
-
-        return info;
-    }
-
-    // IHARoleListener
-    @Override
-    public void roleChanged(Role oldRole, Role newRole) {
-        switch(newRole) {
-            case MASTER:
-                if (oldRole == Role.SLAVE) {
-                    if (log.isTraceEnabled()) {
-                        log.trace("Sending LLDPs " +
-                                "to HA change from SLAVE->MASTER");
-                    }
-                    clearAllLinks();
-                    log.debug("Role Change to Master: Rescheduling discovery task.");
-                    discoveryTask.reschedule(1, TimeUnit.MICROSECONDS);
-                }
-                break;
-            case SLAVE:
-                if (log.isTraceEnabled()) {
-                    log.trace("Clearing links due to " +
-                            "HA change to SLAVE");
-                }
-                switchLinks.clear();
-                links.clear();
-                portLinks.clear();
-                portBroadcastDomainLinks.clear();
-                discoverOnAllPorts();
-                break;
-            default:
-                break;
-        }
-    }
-
-    @Override
-    public void controllerNodeIPsChanged(
-                                         Map<String, String> curControllerNodeIPs,
-                                         Map<String, String> addedControllerNodeIPs,
-                                         Map<String, String> removedControllerNodeIPs) {
-        // ignore
-    }
-
     public boolean isAutoPortFastFeature() {
         return autoPortFastFeature;
     }
diff --git a/src/main/java/net/onrc/onos/ofcontroller/proxyarp/ArpMessage.java b/src/main/java/net/onrc/onos/ofcontroller/proxyarp/ArpMessage.java
index 53891ef..ee8f23d 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/proxyarp/ArpMessage.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/proxyarp/ArpMessage.java
@@ -2,10 +2,7 @@
 
 import java.io.Serializable;
 import java.net.InetAddress;
-import java.util.ArrayList;
-import java.util.List;
-
-import net.onrc.onos.ofcontroller.util.SwitchPort;
+import net.floodlightcontroller.util.MACAddress;
 
 public class ArpMessage implements Serializable {
 
@@ -18,8 +15,14 @@
 	private final InetAddress forAddress;
 	private final byte[] packetData;
 	
-	private final List<SwitchPort> switchPorts = new ArrayList<SwitchPort>();
+	//ARP reply message needs MAC info
+	private final MACAddress mac;
+	//only send the ARP request message to the device attachment needs the attachment switch and port. 
+	private final long outSwitch; 
+	private final short outPort;
 	
+
+
 	public enum Type {
 		REQUEST,
 		REPLY
@@ -30,14 +33,41 @@
 		this.type = type;
 		this.forAddress = address;
 		this.packetData = eth;
+		this.mac = null;
+		this.outSwitch = -1;
+		this.outPort = -1;
 	}
 	
 	private ArpMessage(Type type, InetAddress address) {
 		this.type = type;
 		this.forAddress = address;
 		this.packetData = null;
+		this.mac = null;
+		this.outSwitch = -1;
+		this.outPort = -1;
+		
+	}
+	// the ARP reply message with MAC
+	private ArpMessage(Type type, InetAddress address, MACAddress mac) {
+		this.type = type;
+		this.forAddress = address;
+		this.packetData = null;
+		this.mac = mac;
+		this.outSwitch = -1;
+		this.outPort = -1;
 	}
 	
+	// construct ARP request message with attachment switch and port
+	private ArpMessage(Type type, InetAddress address, byte[] arpRequest,
+			long outSwitch, short outPort) {
+		this.type = type;
+		this.forAddress = address;
+		this.packetData = arpRequest; 	
+		this.mac = null;
+		this.outSwitch = outSwitch; 
+		this.outPort = outPort;	
+	}
+
 	public static ArpMessage newRequest(InetAddress forAddress, byte[] arpRequest) {
 		return new ArpMessage(Type.REQUEST, forAddress, arpRequest);
 	}
@@ -45,6 +75,16 @@
 	public static ArpMessage newReply(InetAddress forAddress) {
 		return new ArpMessage(Type.REPLY, forAddress);
 	}
+	//ARP reply message with MAC
+	public static ArpMessage newReply(InetAddress forAddress, MACAddress mac) {
+		return new ArpMessage(Type.REPLY, forAddress, mac);
+
+	}
+	//ARP reqsuest message with attachment switch and port
+	public static ArpMessage newRequest(InetAddress forAddress, byte[] arpRequest, long outSwitch, short outPort ) {
+		return new ArpMessage(Type.REQUEST, forAddress, arpRequest, outSwitch, outPort);
+
+	}
 
 	public Type getType() {
 		return type;
@@ -57,4 +97,16 @@
 	public byte[] getPacket() {
 		return packetData;
 	}
+	public MACAddress getMAC() {
+		return mac;
+	}
+
+	public long getOutSwitch() {
+		return outSwitch;
+	}
+
+	public short getOutPort() {
+		return outPort;
+	}
+
 }
diff --git a/src/main/java/net/onrc/onos/ofcontroller/proxyarp/IProxyArpService.java b/src/main/java/net/onrc/onos/ofcontroller/proxyarp/IProxyArpService.java
index 71546a1..2029513 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/proxyarp/IProxyArpService.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/proxyarp/IProxyArpService.java
@@ -3,11 +3,11 @@
 import java.net.InetAddress;
 import java.util.List;
 
+import net.floodlightcontroller.core.module.IFloodlightService;
 import net.floodlightcontroller.util.MACAddress;
-import net.onrc.onos.ofcontroller.core.module.IOnosService;
 
 //Extends IFloodlightService so we can access it from REST API resources
-public interface IProxyArpService extends IOnosService{
+public interface IProxyArpService extends IFloodlightService {
 	/**
 	 * Returns the MAC address if there is a valid entry in the cache.
 	 * Otherwise returns null.
diff --git a/src/main/java/net/onrc/onos/ofcontroller/proxyarp/ProxyArpManager.java b/src/main/java/net/onrc/onos/ofcontroller/proxyarp/ProxyArpManager.java
index f6d8c9c..5e0b752 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/proxyarp/ProxyArpManager.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/proxyarp/ProxyArpManager.java
@@ -5,6 +5,7 @@
 import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
@@ -17,6 +18,9 @@
 import net.floodlightcontroller.core.IFloodlightProviderService;
 import net.floodlightcontroller.core.IOFMessageListener;
 import net.floodlightcontroller.core.IOFSwitch;
+import net.floodlightcontroller.core.module.FloodlightModuleContext;
+import net.floodlightcontroller.core.module.IFloodlightModule;
+import net.floodlightcontroller.core.module.IFloodlightService;
 import net.floodlightcontroller.packet.ARP;
 import net.floodlightcontroller.packet.Ethernet;
 import net.floodlightcontroller.packet.IPv4;
@@ -28,11 +32,10 @@
 import net.onrc.onos.ofcontroller.core.IDeviceStorage;
 import net.onrc.onos.ofcontroller.core.INetMapTopologyObjects.IDeviceObject;
 import net.onrc.onos.ofcontroller.core.INetMapTopologyObjects.IPortObject;
-import net.onrc.onos.ofcontroller.core.INetMapTopologyService.ITopoLinkService;
+import net.onrc.onos.ofcontroller.core.INetMapTopologyObjects.ISwitchObject;
 import net.onrc.onos.ofcontroller.core.INetMapTopologyService.ITopoSwitchService;
 import net.onrc.onos.ofcontroller.core.config.IConfigInfoService;
 import net.onrc.onos.ofcontroller.core.internal.DeviceStorageImpl;
-import net.onrc.onos.ofcontroller.core.internal.TopoLinkServiceImpl;
 import net.onrc.onos.ofcontroller.core.internal.TopoSwitchServiceImpl;
 import net.onrc.onos.ofcontroller.util.Dpid;
 import net.onrc.onos.ofcontroller.util.Port;
@@ -55,12 +58,12 @@
 import com.google.common.net.InetAddresses;
 
 public class ProxyArpManager implements IProxyArpService, IOFMessageListener,
-										IArpEventHandler {
+										IArpEventHandler, IFloodlightModule {
 	private final static Logger log = LoggerFactory.getLogger(ProxyArpManager.class);
 	
-	private final long ARP_TIMER_PERIOD = 60000; //ms (== 1 min) 
-	
-	private static final int ARP_REQUEST_TIMEOUT = 2000; //ms
+	private final long ARP_TIMER_PERIOD = 100; //ms  
+
+	private static final int ARP_REQUEST_TIMEOUT = 500; //ms
 			
 	private IFloodlightProviderService floodlightProvider;
 	private ITopologyService topology;
@@ -70,7 +73,6 @@
 	
 	private IDeviceStorage deviceStorage;
 	private volatile ITopoSwitchService topoSwitchService;
-	private ITopoLinkService topoLinkService;
 	
 	private short vlan;
 	private static final short NO_VLAN = 0;
@@ -126,22 +128,42 @@
 		}
 	}
 	
-	/*
-	public ProxyArpManager(IFloodlightProviderService floodlightProvider,
-				ITopologyService topology, IConfigInfoService configService,
-				IRestApiService restApi){
-
+	@Override
+	public Collection<Class<? extends IFloodlightService>> getModuleServices() {
+		Collection<Class<? extends IFloodlightService>> l 
+			= new ArrayList<Class<? extends IFloodlightService>>();
+		l.add(IProxyArpService.class);
+		return l;
 	}
-	*/
+
+	@Override
+	public Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls() {
+		Map<Class<? extends IFloodlightService>, IFloodlightService> m 
+			= new HashMap<Class<? extends IFloodlightService>, IFloodlightService>();
+		m.put(IProxyArpService.class, this);
+		return m;
+	}
+
+	@Override
+	public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
+		Collection<Class<? extends IFloodlightService>> dependencies 
+			= new ArrayList<Class<? extends IFloodlightService>>();
+		dependencies.add(IFloodlightProviderService.class);
+		dependencies.add(ITopologyService.class);
+		dependencies.add(IRestApiService.class);
+		dependencies.add(IDatagridService.class);
+		dependencies.add(IConfigInfoService.class);
+		return dependencies;
+	}
 	
-	public void init(IFloodlightProviderService floodlightProvider,
-			ITopologyService topology, IDatagridService datagrid,
-			IConfigInfoService config, IRestApiService restApi){
-		this.floodlightProvider = floodlightProvider;
-		this.topology = topology;
-		this.datagrid = datagrid;
-		this.configService = config;
-		this.restApi = restApi;
+	@Override
+	public void init(FloodlightModuleContext context){
+		this.floodlightProvider = 
+				context.getServiceImpl(IFloodlightProviderService.class);
+		this.topology = context.getServiceImpl(ITopologyService.class);
+		this.datagrid = context.getServiceImpl(IDatagridService.class);
+		this.configService = context.getServiceImpl(IConfigInfoService.class);
+		this.restApi = context.getServiceImpl(IRestApiService.class);
 		
 		arpCache = new ArpCache();
 
@@ -149,10 +171,10 @@
 				HashMultimap.<InetAddress, ArpRequest>create());
 		
 		topoSwitchService = new TopoSwitchServiceImpl();
-		topoLinkService = new TopoLinkServiceImpl();
 	}
 	
-	public void startUp() {
+	@Override
+	public void startUp(FloodlightModuleContext context) {
 		this.vlan = configService.getVlan();
 		log.info("vlan set to {}", this.vlan);
 		
@@ -186,8 +208,8 @@
 		//Have to synchronize externally on the Multimap while using an iterator,
 		//even though it's a synchronizedMultimap
 		synchronized (arpRequests) {
-			log.debug("Current have {} outstanding requests", 
-					arpRequests.size());
+			//log.debug("Current have {} outstanding requests", 
+					//arpRequests.size());
 			
 			Iterator<Map.Entry<InetAddress, ArpRequest>> it 
 				= arpRequests.entries().iterator();
@@ -200,6 +222,15 @@
 					log.debug("Cleaning expired ARP request for {}", 
 							entry.getKey().getHostAddress());
 		
+					//if he ARP Request is expired and then delete the device
+					IDeviceObject targetDevice = 
+							deviceStorage.getDeviceByIP(InetAddresses.coerceToInteger(entry.getKey()));
+					
+					if(targetDevice!=null)
+					{deviceStorage.removeDevice(targetDevice);
+					 log.debug("RemoveDevice: {} due to no have not recieve the ARP reply", targetDevice.toString());
+					}				
+					
 					it.remove();
 					
 					if (request.shouldRetry()) {
@@ -256,6 +287,7 @@
 		if (eth.getEtherType() == Ethernet.TYPE_ARP){
 			ARP arp = (ARP) eth.getPayload();	
 			if (arp.getOpCode() == ARP.OP_REQUEST) {
+				log.debug("receive ARP request");
 				//TODO check what the DeviceManager does about propagating
 				//or swallowing ARPs. We want to go after DeviceManager in the
 				//chain but we really need it to CONTINUE ARP packets so we can
@@ -263,12 +295,16 @@
 				handleArpRequest(sw, pi, arp, eth);
 			}
 			else if (arp.getOpCode() == ARP.OP_REPLY) {
-				//handleArpReply(sw, pi, arp);
+					log.debug("receive ARP reply");
+					handleArpReply(sw, pi, arp);
+					sendToOtherNodesReply(eth, pi);
 			}
+			
+			// Stop ARP packets here
+			return Command.STOP;
 		}
 		
-		//TODO should we propagate ARP or swallow it?
-		//Always propagate for now so DeviceManager can learn the host location
+		// Propagate everything else
 		return Command.CONTINUE;
 	}
 	
@@ -280,7 +316,7 @@
 
 		InetAddress target;
 		try {
-			 target = InetAddress.getByAddress(arp.getTargetProtocolAddress());
+			target = InetAddress.getByAddress(arp.getTargetProtocolAddress());
 		} catch (UnknownHostException e) {
 			log.debug("Invalid address in ARP request", e);
 			return;
@@ -292,66 +328,72 @@
 			if (configService.isInterfaceAddress(target)) {
 				log.trace("ARP request for our interface. Sending reply {} => {}",
 						target.getHostAddress(), configService.getRouterMacAddress());
-				
+
 				sendArpReply(arp, sw.getId(), pi.getInPort(), 
 						configService.getRouterMacAddress());
 			}
-			
+
 			return;
 		}
 		
 		//MACAddress macAddress = arpCache.lookup(target);
-		
+				
 		IDeviceObject targetDevice = 
 				deviceStorage.getDeviceByIP(InetAddresses.coerceToInteger(target));
-		
 		log.debug("targetDevice: {}", targetDevice);
-		
+
+		arpRequests.put(target, new ArpRequest(
+				new HostArpRequester(arp, sw.getId(), pi.getInPort()), false));
+
 		if (targetDevice != null) {
-			// We have the device in our database, so send a reply
+			// Even the device in our database is not null, we do not reply to the request directly, but to check whether the device is still valid
 			MACAddress macAddress = MACAddress.valueOf(targetDevice.getMACAddress());
-			
+
 			if (log.isTraceEnabled()) {
-				log.trace("Sending reply: {} => {} to host at {}/{}", new Object [] {
+				log.trace("The target Device Record in DB is: {} => {} from ARP request host at {}/{}", new Object [] {
 						inetAddressToString(arp.getTargetProtocolAddress()),
 						macAddress.toString(),
 						HexString.toHexString(sw.getId()), pi.getInPort()});
 			}
-			
-			sendArpReply(arp, sw.getId(), pi.getInPort(), macAddress);
-		}
-		else {
+
+			// sendArpReply(arp, sw.getId(), pi.getInPort(), macAddress);
+
+			log.trace("Checking the device info from DB is still valid or not");
+			Iterable<IPortObject> outPorts=targetDevice.getAttachedPorts();	
+
+			if(!outPorts.iterator().hasNext()){
+				log.debug("outPort : null");
+				sendToOtherNodes(eth, pi);
+			}else{
+
+				for (IPortObject portObject : outPorts) {
+					long outSwitch=0;
+					short outPort=0;   
+
+
+					if (!portObject.getLinkedPorts().iterator().hasNext()) {
+						outPort=portObject.getNumber();					
+						log.debug("outPort:{} ", outPort);
+					}   
+
+					Iterable<ISwitchObject>  outSwitches= targetDevice.getSwitch(); 
+
+					for (ISwitchObject outswitch : outSwitches) {
+
+						outSwitch= HexString.toLong(outswitch.getDPID());
+						log.debug("outSwitch.DPID:{}; outPort: {}", outswitch.getDPID(), outPort );
+						sendToOtherNodes( eth, pi, outSwitch, outPort);
+					}
+				}
+			}
+
+		}else {
+			log.debug("The Device info in DB is {} for IP {}", targetDevice, inetAddressToString(arp.getTargetProtocolAddress()));
+
 			// We don't know the device so broadcast the request out
-			// the edge of the network
-			
-			//Record where the request came from so we know where to send the reply
-			arpRequests.put(target, new ArpRequest(
-					new HostArpRequester(arp, sw.getId(), pi.getInPort()), false));
-			
 			sendToOtherNodes(eth, pi);
 		}
-		
-		/*if (macAddress == null){
-			//MAC address is not in our ARP cache.
-			
-			//Record where the request came from so we know where to send the reply
-			//arpRequests.put(target, new ArpRequest(
-					//new HostArpRequester(arp, sw.getId(), pi.getInPort()), false));
-						
-			//Flood the request out edge ports
-			//sendArpRequestToSwitches(target, pi.getPacketData(), sw.getId(), pi.getInPort());
-		}
-		else {
-			//We know the address, so send a reply
-			if (log.isTraceEnabled()) {
-				log.trace("Sending reply: {} => {} to host at {}/{}", new Object [] {
-						inetAddressToString(arp.getTargetProtocolAddress()),
-						macAddress.toString(),
-						HexString.toHexString(sw.getId()), pi.getInPort()});
-			}
-			
-			sendArpReply(arp, sw.getId(), pi.getInPort(), macAddress);
-		}*/
+ 
 	}
 	
 	private void handleArpReply(IOFSwitch sw, OFPacketIn pi, ARP arp){
@@ -372,7 +414,7 @@
 		
 		MACAddress senderMacAddress = MACAddress.valueOf(arp.getSenderHardwareAddress());
 		
-		arpCache.update(senderIpAddress, senderMacAddress);
+		//arpCache.update(senderIpAddress, senderMacAddress);
 		
 		//See if anyone's waiting for this ARP reply
 		Set<ArpRequest> requests = arpRequests.get(senderIpAddress);
@@ -492,6 +534,51 @@
 		
 		datagrid.sendArpRequest(ArpMessage.newRequest(targetAddress, eth.serialize()));
 	}
+	//hazelcast to other ONOS instances to send the ARP packet out on outPort of outSwitch
+	private void sendToOtherNodes(Ethernet eth, OFPacketIn pi, long outSwitch, short outPort) {
+		ARP arp = (ARP) eth.getPayload();
+		
+		if (log.isTraceEnabled()) {
+				log.trace("Sending ARP request for {} to other ONOS instances with outSwitch {} ",
+				inetAddressToString(arp.getTargetProtocolAddress()), String.valueOf(outSwitch));
+			
+		}
+		
+		InetAddress targetAddress;
+		try {
+				targetAddress = InetAddress.getByAddress(arp.getTargetProtocolAddress());
+		} catch (UnknownHostException e) {
+				log.error("Unknown host", e);
+				return;
+		}
+		
+		datagrid.sendArpRequest(ArpMessage.newRequest(targetAddress, eth.serialize(), outSwitch, outPort)); 
+		//datagrid.sendArpRequest(ArpMessage.newRequest(targetAddress, eth.serialize()));
+		
+		
+	}
+	private void sendToOtherNodesReply(Ethernet eth, OFPacketIn pi) {
+		ARP arp = (ARP) eth.getPayload();
+		
+		if (log.isTraceEnabled()) {
+				log.trace("Sending ARP reply for {} to other ONOS instances",
+				inetAddressToString(arp.getSenderProtocolAddress()));
+		}
+		
+		InetAddress targetAddress;		
+		MACAddress mac = new MACAddress(arp.getSenderHardwareAddress());
+		
+		try {
+				targetAddress = InetAddress.getByAddress(arp.getSenderProtocolAddress());
+		} catch (UnknownHostException e) {
+				log.error("Unknown host", e);
+				return;
+		}
+		
+		datagrid.sendArpRequest(ArpMessage.newReply(targetAddress,mac));
+		//datagrid.sendArpReply(ArpMessage.newRequest(targetAddress, eth.serialize()));
+	
+	}
 	
 	private void broadcastArpRequestOutEdge(byte[] arpRequest, long inSwitch, short inPort) {
 		for (IOFSwitch sw : floodlightProvider.getSwitches().values()){
@@ -582,7 +669,7 @@
 			}
 		}
 		
-		log.debug("Broadcast ARP request for to: {}", switchPorts);
+		log.debug("Broadcast ARP request to: {}", switchPorts);
 	}
 	
 	private void sendArpRequestOutPort(byte[] arpRequest, long dpid, short port) {
@@ -722,23 +809,26 @@
 	
 	@Override
 	public void arpRequestNotification(ArpMessage arpMessage) {
-		//log.debug("Received ARP notification from other instances");
-		
+		log.debug("Received ARP notification from other instances");
+
 		switch (arpMessage.getType()){
 		case REQUEST:
-			log.debug("Received ARP request notification for {}", 
-					arpMessage.getAddress());
-			broadcastArpRequestOutMyEdge(arpMessage.getPacket());
+			if(arpMessage.getOutSwitch() == -1 || arpMessage.getOutPort() == -1){	
+				broadcastArpRequestOutMyEdge(arpMessage.getPacket());					
+			}else{					
+				sendArpRequestOutPort(arpMessage.getPacket(),arpMessage.getOutSwitch(),arpMessage.getOutPort());
+				log.debug("OutSwitch in ARP request message is: {}; OutPort in ARP request message is: {}",arpMessage.getOutSwitch(),arpMessage.getOutPort());
+			}
 			break;
 		case REPLY:
 			log.debug("Received ARP reply notification for {}",
 					arpMessage.getAddress());
-			sendArpReplyToWaitingRequesters(arpMessage.getAddress());
+			sendArpReplyToWaitingRequesters(arpMessage.getAddress(),arpMessage.getMAC());
 			break;
 		}
 	}
 	
-	private void sendArpReplyToWaitingRequesters(InetAddress address) {
+	private void sendArpReplyToWaitingRequesters(InetAddress address, MACAddress mac) {
 		log.debug("Sending ARP reply for {} to requesters", 
 				address.getHostAddress());
 		
@@ -756,13 +846,13 @@
 			}
 		}
 		
-		IDeviceObject deviceObject = deviceStorage.getDeviceByIP(
+		/*IDeviceObject deviceObject = deviceStorage.getDeviceByIP(
 				InetAddresses.coerceToInteger(address));
 		
 		MACAddress mac = MACAddress.valueOf(deviceObject.getMACAddress());
 		
 		log.debug("Found {} at {} in network map", 
-				address.getHostAddress(), mac);
+				address.getHostAddress(), mac);*/
 		
 		//Don't hold an ARP lock while dispatching requests
 		for (ArpRequest request : requestsToSend) {
diff --git a/src/main/java/net/onrc/onos/ofcontroller/topology/ITopologyNetService.java b/src/main/java/net/onrc/onos/ofcontroller/topology/ITopologyNetService.java
index 9585366..4269eac 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/topology/ITopologyNetService.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/topology/ITopologyNetService.java
@@ -1,7 +1,5 @@
 package net.onrc.onos.ofcontroller.topology;
 
-import java.util.Map;
-
 import net.floodlightcontroller.core.module.IFloodlightService;
 import net.onrc.onos.ofcontroller.util.DataPath;
 import net.onrc.onos.ofcontroller.util.SwitchPort;
diff --git a/src/main/java/net/onrc/onos/ofcontroller/topology/Topology.java b/src/main/java/net/onrc/onos/ofcontroller/topology/Topology.java
index c914bbe..761189c 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/topology/Topology.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/topology/Topology.java
@@ -266,6 +266,9 @@
 
 	    break;
 	}
+	case ELEMENT_UNKNOWN:
+	    // TODO: Adding "assert(false);" here can be dangerous
+	    break;
 	}
 
 	return isModified;
@@ -316,6 +319,9 @@
 	    }
 	    break;
 	}
+	case ELEMENT_UNKNOWN:
+	    // TODO: Adding "assert(false);" here can be dangerous
+	    break;
 	}
 
 	return isModified;
diff --git a/src/main/java/net/onrc/onos/ofcontroller/topology/TopologyElement.java b/src/main/java/net/onrc/onos/ofcontroller/topology/TopologyElement.java
index b01c7d3..0fefa3a 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/topology/TopologyElement.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/topology/TopologyElement.java
@@ -1,8 +1,5 @@
 package net.onrc.onos.ofcontroller.topology;
 
-import java.util.Map;
-import java.util.TreeMap;
-
 /**
  * Class for storing information about a Topology Element: Switch, Port or
  * Link.
@@ -165,6 +162,8 @@
 	    return "Link=" +
 		Long.toHexString(fromSwitchDpid) + "/" + fromSwitchPort + "/" +
 		Long.toHexString(toSwitchDpid) + "/" + toSwitchPort;
+	case ELEMENT_UNKNOWN:
+	    return "Element=UNKNOWN";
 	}
 
 	assert(false);
diff --git a/src/main/java/net/onrc/onos/ofcontroller/topology/TopologyManager.java b/src/main/java/net/onrc/onos/ofcontroller/topology/TopologyManager.java
index bc82bc0..6b2ab99 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/topology/TopologyManager.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/topology/TopologyManager.java
@@ -235,8 +235,12 @@
 	    SwitchPort dest = flowPath.dataPath().dstPort();
 	    return ShortestPath.getTopologyShortestPath(topology, src, dest);
 	}
+
 	case FP_TYPE_EXPLICIT_PATH:
 	    return flowPath.dataPath();
+
+	case FP_TYPE_UNKNOWN:
+	    return null;
 	}
 
 	return null;
diff --git a/src/main/java/net/onrc/onos/ofcontroller/topology/web/RouteResource.java b/src/main/java/net/onrc/onos/ofcontroller/topology/web/RouteResource.java
index 0d33b27..d8997dc 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/topology/web/RouteResource.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/topology/web/RouteResource.java
@@ -2,7 +2,6 @@
 
 import net.onrc.onos.ofcontroller.flowmanager.IFlowService;
 import net.onrc.onos.ofcontroller.topology.ITopologyNetService;
-import net.onrc.onos.ofcontroller.topology.TopologyManager;
 import net.onrc.onos.ofcontroller.util.DataPath;
 import net.onrc.onos.ofcontroller.util.Dpid;
 import net.onrc.onos.ofcontroller.util.Port;
diff --git a/src/main/java/net/onrc/onos/ofcontroller/util/FlowEntry.java b/src/main/java/net/onrc/onos/ofcontroller/util/FlowEntry.java
index 98dbd88..c8b206f 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/util/FlowEntry.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/util/FlowEntry.java
@@ -13,6 +13,8 @@
 public class FlowEntry {
     private FlowId flowId;			// FlowID of the Flow Entry
     private FlowEntryId flowEntryId;		// The Flow Entry ID
+    private int		idleTimeout;		// The Flow idle timeout
+    private int		hardTimeout;		// The Flow hard timeout
     private FlowEntryMatch flowEntryMatch;	// The Flow Entry Match
     private FlowEntryActions flowEntryActions;	// The Flow Entry Actions
     private Dpid dpid;				// The Switch DPID
@@ -174,6 +176,54 @@
     }
 
     /**
+     * Get the flow idle timeout in seconds.
+     *
+     * It should be an unsigned integer in the interval [0, 65535].
+     * If zero, the timeout is not set.
+     *
+     * @return the flow idle timeout.
+     */
+    @JsonProperty("idleTimeout")
+    public int idleTimeout() { return idleTimeout; }
+
+    /**
+     * Set the flow idle timeout in seconds.
+     *
+     * It should be an unsigned integer in the interval [0, 65535].
+     * If zero, the timeout is not set.
+     *
+     * @param idleTimeout the flow idle timeout to set.
+     */
+    @JsonProperty("idleTimeout")
+    public void setIdleTimeout(int idleTimeout) {
+	this.idleTimeout = 0xffff & idleTimeout;
+    }
+
+    /**
+     * Get the flow hard timeout in seconds.
+     *
+     * It should be an unsigned integer in the interval [0, 65535].
+     * If zero, the timeout is not set.
+     *
+     * @return the flow hard timeout.
+     */
+    @JsonProperty("hardTimeout")
+    public int hardTimeout() { return hardTimeout; }
+
+    /**
+     * Set the flow hard timeout in seconds.
+     *
+     * It should be an unsigned integer in the interval [0, 65535].
+     * If zero, the timeout is not set.
+     *
+     * @param hardTimeout the flow hard timeout to set.
+     */
+    @JsonProperty("hardTimeout")
+    public void setHardTimeout(int hardTimeout) {
+	this.hardTimeout = 0xffff & hardTimeout;
+    }
+
+    /**
      * Get the Flow Entry Match.
      *
      * @return the Flow Entry Match.
@@ -343,7 +393,8 @@
      * Convert the flow entry to a string.
      *
      * The string has the following form:
-     *  [flowEntryId=XXX flowEntryMatch=XXX flowEntryActions=XXX dpid=XXX
+     *  [flowEntryId=XXX idleTimeout=XXX hardTimeout=XXX
+     *   flowEntryMatch=XXX flowEntryActions=XXX dpid=XXX
      *   inPort=XXX outPort=XXX flowEntryUserState=XXX flowEntrySwitchState=XXX
      *   flowEntryErrorState=XXX]
      * @return the flow entry as a string.
@@ -359,10 +410,12 @@
 	if ( flowId != null ) {
 		ret.append(" flowId=" + this.flowId.toString());
 	}
+	ret.append(" idleTimeout=" + this.idleTimeout);
+	ret.append(" hardTimeout=" + this.hardTimeout);
 	if ( flowEntryMatch != null ) {
 		ret.append(" flowEntryMatch=" + this.flowEntryMatch.toString());
 	}
-	ret.append( " flowEntryActions=" + this.flowEntryActions.toString() );
+	ret.append(" flowEntryActions=" + this.flowEntryActions.toString() );
 	if ( dpid != null ) {
 		ret.append(" dpid=" + this.dpid.toString());
 	}
diff --git a/src/main/java/net/onrc/onos/ofcontroller/util/FlowEntryAction.java b/src/main/java/net/onrc/onos/ofcontroller/util/FlowEntryAction.java
index a1163c8..e431f8a 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/util/FlowEntryAction.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/util/FlowEntryAction.java
@@ -43,6 +43,13 @@
 	private ActionValues(short value) {
 	    this.value = value;
 	}
+
+	/**
+	 * Get the value.
+	 *
+	 * @return the value.
+	 */
+	public short getValue() { return value; }
     }
 
     /**
@@ -1564,6 +1571,9 @@
 	case ACTION_ENQUEUE:
 	    ret += " action=" + actionEnqueue.toString();
 	    break;
+	case ACTION_VENDOR:
+	    ret += " action=VENDOR";
+	    break;
 	}
 	ret += "]";
 
@@ -1656,6 +1666,9 @@
 	    case ACTION_ENQUEUE:
 		actionEnqueue = new ActionEnqueue(decode);
 		break;
+	    case ACTION_VENDOR:
+		// TODO: Handle it as appropriate
+		break;
 	    }
 	} catch (IllegalArgumentException e) {
 	    throw new IllegalArgumentException("Invalid action string");
diff --git a/src/main/java/net/onrc/onos/ofcontroller/util/FlowPath.java b/src/main/java/net/onrc/onos/ofcontroller/util/FlowPath.java
index ab3edb1..7c87a10 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/util/FlowPath.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/util/FlowPath.java
@@ -18,7 +18,9 @@
     private FlowPathType flowPathType;	// The Flow Path type
     private FlowPathUserState flowPathUserState; // The Flow Path User state
     private FlowPathFlags flowPathFlags; // The Flow Path flags
-    private DataPath dataPath;		// The data path
+    private int		idleTimeout;	// The Flow idle timeout
+    private int		hardTimeout;	// The Flow hard timeout
+    private DataPath	dataPath;	// The data path
     private FlowEntryMatch flowEntryMatch; // Common Flow Entry Match for all
 					// Flow Entries
     private FlowEntryActions flowEntryActions; // The Flow Entry Actions for
@@ -45,6 +47,8 @@
 	this.setFlowPathType(FlowPathType.valueOf(flowObj.getFlowPathType()));
 	this.setFlowPathUserState(FlowPathUserState.valueOf(flowObj.getFlowPathUserState()));
 	this.setFlowPathFlags(new FlowPathFlags(flowObj.getFlowPathFlags()));
+	this.setIdleTimeout(flowObj.getIdleTimeout());
+	this.setHardTimeout(flowObj.getHardTimeout());
     	this.dataPath().srcPort().setDpid(new Dpid(flowObj.getSrcSwitch()));
     	this.dataPath().srcPort().setPort(new Port(flowObj.getSrcPort()));
     	this.dataPath().dstPort().setDpid(new Dpid(flowObj.getDstSwitch()));
@@ -295,6 +299,54 @@
     }
 
     /**
+     * Get the flow idle timeout in seconds.
+     *
+     * It should be an unsigned integer in the interval [0, 65535].
+     * If zero, the timeout is not set.
+     *
+     * @return the flow idle timeout.
+     */
+    @JsonProperty("idleTimeout")
+    public int idleTimeout() { return idleTimeout; }
+
+    /**
+     * Set the flow idle timeout in seconds.
+     *
+     * It should be an unsigned integer in the interval [0, 65535].
+     * If zero, the timeout is not set.
+     *
+     * @param idleTimeout the flow idle timeout to set.
+     */
+    @JsonProperty("idleTimeout")
+    public void setIdleTimeout(int idleTimeout) {
+	this.idleTimeout = 0xffff & idleTimeout;
+    }
+
+    /**
+     * Get the flow hard timeout in seconds.
+     *
+     * It should be an unsigned integer in the interval [0, 65535].
+     * If zero, the timeout is not set.
+     *
+     * @return the flow hard timeout.
+     */
+    @JsonProperty("hardTimeout")
+    public int hardTimeout() { return hardTimeout; }
+
+    /**
+     * Set the flow hard timeout.
+     *
+     * It should be an unsigned integer in the interval [0, 65535].
+     * If zero, the timeout is not set.
+     *
+     * @param hardTimeout the flow hard timeout to set.
+     */
+    @JsonProperty("hardTimeout")
+    public void setHardTimeout(int hardTimeout) {
+	this.hardTimeout = 0xffff & hardTimeout;
+    }
+
+    /**
      * Get the flow path's data path.
      *
      * @return the flow path's data path.
@@ -366,8 +418,8 @@
      *
      * The string has the following form:
      *  [flowId=XXX installerId=XXX flowPathType = XXX flowPathUserState = XXX
-     *   flowPathFlags=XXX dataPath=XXX flowEntryMatch=XXX
-     *   flowEntryActions=XXX]
+     *   flowPathFlags=XXX idleTimeout=XXX hardTimeout=XXX dataPath=XXX
+     *   flowEntryMatch=XXX flowEntryActions=XXX]
      *
      * @return the flow path as a string.
      */
@@ -378,6 +430,8 @@
 	ret += " flowPathType=" + this.flowPathType;
 	ret += " flowPathUserState=" + this.flowPathUserState;
 	ret += " flowPathFlags=" + this.flowPathFlags.toString();
+	ret += " idleTimeout=" + this.idleTimeout;
+	ret += " hardTimeout=" + this.hardTimeout;
 	if (dataPath != null)
 	    ret += " dataPath=" + this.dataPath.toString();
 	if (flowEntryMatch != null)
diff --git a/src/main/java/net/onrc/onos/ofcontroller/util/FlowPathFlags.java b/src/main/java/net/onrc/onos/ofcontroller/util/FlowPathFlags.java
index 4bbd399..595eb5f 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/util/FlowPathFlags.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/util/FlowPathFlags.java
@@ -112,8 +112,6 @@
 
 	// Test all flags
 	if ((this.flags & DISCARD_FIRST_HOP_ENTRY) != 0) {
-	    if (flagsStr != null)
-		flagsStr += ",";
 	    flagsStr += "DISCARD_FIRST_HOP_ENTRY";
 	}
 	if ((this.flags & KEEP_ONLY_FIRST_HOP_ENTRY) != 0) {
@@ -121,6 +119,8 @@
 		flagsStr += ",";
 	    flagsStr += "KEEP_ONLY_FIRST_HOP_ENTRY";
 	}
+	if (flagsStr != null)
+	    ret += flagsStr;
 	ret += "]";
 
 	return ret;
diff --git a/src/main/resources/META-INF/services/net.floodlightcontroller.core.module.IFloodlightModule b/src/main/resources/META-INF/services/net.floodlightcontroller.core.module.IFloodlightModule
index fcdd6b5..a842665 100644
--- a/src/main/resources/META-INF/services/net.floodlightcontroller.core.module.IFloodlightModule
+++ b/src/main/resources/META-INF/services/net.floodlightcontroller.core.module.IFloodlightModule
@@ -1,17 +1,10 @@
 net.floodlightcontroller.core.FloodlightProvider
-net.floodlightcontroller.storage.memory.MemoryStorageSource
 net.onrc.onos.ofcontroller.floodlightlistener.NetworkGraphPublisher
 net.floodlightcontroller.devicemanager.internal.DeviceManagerImpl
 net.onrc.onos.ofcontroller.linkdiscovery.internal.LinkDiscoveryManager
 net.floodlightcontroller.topology.TopologyManager
 net.floodlightcontroller.forwarding.Forwarding
-net.floodlightcontroller.flowcache.FlowReconcileManager
-net.floodlightcontroller.staticflowentry.StaticFlowEntryPusher
-net.floodlightcontroller.perfmon.PktInProcessingTime
-net.floodlightcontroller.perfmon.NullPktInProcessingTime
 net.floodlightcontroller.restserver.RestApiServer
-net.floodlightcontroller.counter.CounterStore
-net.floodlightcontroller.counter.NullCounterStore
 net.floodlightcontroller.threadpool.ThreadPool
 net.floodlightcontroller.ui.web.StaticWebRoutable
 net.floodlightcontroller.devicemanager.internal.DefaultEntityClassifier
@@ -25,4 +18,6 @@
 net.onrc.onos.ofcontroller.bgproute.BgpRoute
 net.onrc.onos.registry.controller.ZookeeperRegistry
 net.onrc.onos.registry.controller.StandaloneRegistry
-net.onrc.onos.ofcontroller.core.module.OnosModuleLoader
+net.onrc.onos.ofcontroller.forwarding.Forwarding
+net.onrc.onos.ofcontroller.proxyarp.ProxyArpManager
+net.onrc.onos.ofcontroller.core.config.DefaultConfiguration
diff --git a/src/main/resources/hazelcast.xml b/src/main/resources/hazelcast.xml
new file mode 120000
index 0000000..f8f4972
--- /dev/null
+++ b/src/main/resources/hazelcast.xml
@@ -0,0 +1 @@
+/home/mininet/ONOS/conf/hazelcast.xml
\ No newline at end of file
diff --git a/src/test/java/net/floodlightcontroller/core/internal/ControllerTest.java b/src/test/java/net/floodlightcontroller/core/internal/ControllerTest.java
index 21788af..cbb4b17 100644
--- a/src/test/java/net/floodlightcontroller/core/internal/ControllerTest.java
+++ b/src/test/java/net/floodlightcontroller/core/internal/ControllerTest.java
@@ -32,10 +32,7 @@
 
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Date;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
@@ -44,7 +41,6 @@
 import net.floodlightcontroller.core.FloodlightProvider;
 import net.floodlightcontroller.core.IFloodlightProviderService;
 import net.floodlightcontroller.core.IFloodlightProviderService.Role;
-import net.floodlightcontroller.core.IHAListener;
 import net.floodlightcontroller.core.IListener.Command;
 import net.floodlightcontroller.core.IOFMessageListener;
 import net.floodlightcontroller.core.IOFSwitch;
@@ -55,23 +51,19 @@
 import net.floodlightcontroller.core.internal.OFChannelState.HandshakeState;
 import net.floodlightcontroller.core.module.FloodlightModuleContext;
 import net.floodlightcontroller.core.test.MockThreadPoolService;
-import net.floodlightcontroller.counter.CounterStore;
-import net.floodlightcontroller.counter.ICounterStoreService;
 import net.floodlightcontroller.packet.ARP;
 import net.floodlightcontroller.packet.Ethernet;
 import net.floodlightcontroller.packet.IPacket;
 import net.floodlightcontroller.packet.IPv4;
-import net.floodlightcontroller.perfmon.IPktInProcessingTimeService;
-import net.floodlightcontroller.perfmon.PktInProcessingTime;
 import net.floodlightcontroller.restserver.IRestApiService;
 import net.floodlightcontroller.restserver.RestApiServer;
-import net.floodlightcontroller.storage.IStorageSourceService;
-import net.floodlightcontroller.storage.memory.MemoryStorageSource;
 import net.floodlightcontroller.test.FloodlightTestCase;
 import net.floodlightcontroller.threadpool.IThreadPoolService;
 import net.onrc.onos.ofcontroller.core.IOFSwitchPortListener;
 import net.onrc.onos.ofcontroller.flowmanager.FlowManager;
 import net.onrc.onos.ofcontroller.flowmanager.IFlowService;
+import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscoveryService;
+import net.onrc.onos.ofcontroller.linkdiscovery.internal.LinkDiscoveryManager;
 import net.onrc.onos.ofcontroller.topology.ITopologyNetService;
 import net.onrc.onos.ofcontroller.topology.TopologyManager;
 import net.onrc.onos.registry.controller.IControllerRegistryService;
@@ -121,18 +113,9 @@
         controller = (Controller)cm.getServiceImpls().get(IFloodlightProviderService.class);
         fmc.addService(IFloodlightProviderService.class, controller);
         
-        MemoryStorageSource memstorage = new MemoryStorageSource();
-        fmc.addService(IStorageSourceService.class, memstorage);
-        
         RestApiServer restApi = new RestApiServer();
         fmc.addService(IRestApiService.class, restApi);
-        
-        CounterStore cs = new CounterStore();
-        fmc.addService(ICounterStoreService.class, cs);
-        
-        PktInProcessingTime ppt = new PktInProcessingTime();
-        fmc.addService(IPktInProcessingTimeService.class, ppt);
-        
+   
         tp = new MockThreadPoolService();
         fmc.addService(IThreadPoolService.class, tp);
         
@@ -142,20 +125,20 @@
         fmc.addService(ITopologyNetService.class, new TopologyManager() );
         StandaloneRegistry sr = new StandaloneRegistry();
         fmc.addService(IControllerRegistryService.class, sr );
+        LinkDiscoveryManager linkDiscovery = new LinkDiscoveryManager();
+        fmc.addService(ILinkDiscoveryService.class, linkDiscovery);
 
         
-        ppt.init(fmc);
         restApi.init(fmc);
-        memstorage.init(fmc);
         cm.init(fmc);
         tp.init(fmc);
         sr.init(fmc);
-        ppt.startUp(fmc);
+        linkDiscovery.init(fmc);
         restApi.startUp(fmc);
-        memstorage.startUp(fmc);
         cm.startUp(fmc);
         tp.startUp(fmc);
         sr.startUp(fmc);
+        linkDiscovery.startUp(fmc);
     }
 
     public Controller getController() {
@@ -183,10 +166,12 @@
                 
         expect(sw.getId()).andReturn(dpid).anyTimes();
         expect(sw.getStringId()).andReturn(dpidString).anyTimes();
-        expect(sw.getConnectedSince()).andReturn(new Date());
-        Channel channel = createMock(Channel.class);
-        expect(sw.getChannel()).andReturn(channel);
-        expect(channel.getRemoteAddress()).andReturn(null);
+        
+        //Now we don't write to storage these methods aren't called
+        //expect(sw.getConnectedSince()).andReturn(new Date());
+        //Channel channel = createMock(Channel.class);
+        //expect(sw.getChannel()).andReturn(channel);
+        //expect(channel.getRemoteAddress()).andReturn(null);
 
         expect(sw.getCapabilities()).andReturn(0).anyTimes();
         expect(sw.getBuffers()).andReturn(0).anyTimes();
@@ -403,21 +388,22 @@
         IOFSwitch newsw = createMock(IOFSwitch.class);
         expect(newsw.getId()).andReturn(0L).anyTimes();
         expect(newsw.getStringId()).andReturn("00:00:00:00:00:00:00").anyTimes();
-        expect(newsw.getConnectedSince()).andReturn(new Date());
-        Channel channel2 = createMock(Channel.class);
-        expect(newsw.getChannel()).andReturn(channel2);
-        expect(channel2.getRemoteAddress()).andReturn(null);
+        //Now we don't write to storage, these methods aren't called
+        //expect(newsw.getConnectedSince()).andReturn(new Date());
+        //Channel channel2 = createMock(Channel.class);
+        //expect(newsw.getChannel()).andReturn(channel2);
+        //expect(channel2.getRemoteAddress()).andReturn(null);
         expect(newsw.getPorts()).andReturn(new ArrayList<OFPhysicalPort>());
         expect(newsw.getCapabilities()).andReturn(0).anyTimes();
         expect(newsw.getBuffers()).andReturn(0).anyTimes();
         expect(newsw.getTables()).andReturn((byte)0).anyTimes();
         expect(newsw.getActions()).andReturn(0).anyTimes();
         controller.activeSwitches.put(0L, oldsw);
-        replay(newsw, channel, channel2);
+        replay(newsw, channel);//, channel2);
 
         controller.addSwitch(newsw);
 
-        verify(newsw, channel, channel2);
+        verify(newsw, channel);//, channel2);
     }
     
     @Test
@@ -460,6 +446,10 @@
         }
         DummySwitchListener switchListener = new DummySwitchListener();
         IOFSwitch sw = createMock(IOFSwitch.class);
+        expect(sw.getId()).andReturn(1L).anyTimes();
+        expect(sw.getEnabledPorts()).andReturn(null);
+        expect(sw.getChannel()).andReturn(null).anyTimes();
+        replay(sw);
         ControllerRunThread t = new ControllerRunThread();
         t.start();
         
@@ -482,18 +472,6 @@
                     switchListener.nPortChanged == 1);
         }
     }
-    
-
-    private Map<String,Object> getFakeControllerIPRow(String id, String controllerId, 
-            String type, int number, String discoveredIP ) {
-        HashMap<String, Object> row = new HashMap<String,Object>();
-        row.put(Controller.CONTROLLER_INTERFACE_ID, id);
-        row.put(Controller.CONTROLLER_INTERFACE_CONTROLLER_ID, controllerId);
-        row.put(Controller.CONTROLLER_INTERFACE_TYPE, type);
-        row.put(Controller.CONTROLLER_INTERFACE_NUMBER, number);
-        row.put(Controller.CONTROLLER_INTERFACE_DISCOVERED_IP, discoveredIP);
-        return row;
-    }
 
     /**
      * Test notifications for controller node IP changes. This requires
@@ -507,6 +485,7 @@
      * 
      * @throws Exception
      */
+    /*
     @Test
     public void testControllerNodeIPChanges() throws Exception {
         class DummyHAListener implements IHAListener {
@@ -617,7 +596,9 @@
             listener.do_assert(4, expectedCurMap, expectedAddedMap, expectedRemovedMap);
         }
     }
+    */
     
+    /*
     @Test
     public void testGetControllerNodeIPs() {
         HashMap<String,String> expectedCurMap = new HashMap<String, String>();
@@ -633,40 +614,7 @@
         assertEquals("expectedControllerNodeIPs is not as expected", 
                 expectedCurMap, controller.getControllerNodeIPs());
     }
-    
-    @Test
-    public void testSetRoleNull() {
-        try {
-            controller.setRole(null);
-            fail("Should have thrown an Exception");
-        }
-        catch (NullPointerException e) {
-            //exptected
-        }
-    }
-    
-    @Test 
-    public void testSetRole() {
-        controller.connectedSwitches.add(new OFSwitchImpl());
-        RoleChanger roleChanger = createMock(RoleChanger.class); 
-        roleChanger.submitRequest(controller.connectedSwitches, Role.SLAVE);
-        controller.roleChanger = roleChanger;
-        
-        assertEquals("Check that update queue is empty", 0, 
-                    controller.updates.size());
-        
-        replay(roleChanger);
-        controller.setRole(Role.SLAVE);
-        verify(roleChanger);
-        
-        IUpdate upd = controller.updates.poll();
-        assertNotNull("Check that update queue has an update", upd);
-        assertTrue("Check that update is HARoleUpdate", 
-                   upd instanceof Controller.HARoleUpdate);
-        Controller.HARoleUpdate roleUpd = (Controller.HARoleUpdate)upd;
-        assertSame(Role.MASTER, roleUpd.oldRole);
-        assertSame(Role.SLAVE, roleUpd.newRole);
-    }
+    */
     
     @Test
     public void testCheckSwitchReady() {
@@ -1153,6 +1101,7 @@
     @Test 
     public void testHandlePortStatus() throws Exception {
         IOFSwitch sw = createMock(IOFSwitch.class);
+        expect(sw.getId()).andReturn(1L).anyTimes();
         OFPhysicalPort port = new OFPhysicalPort();
         port.setName("myPortName1");
         port.setPortNumber((short)42);
diff --git a/src/test/java/net/floodlightcontroller/core/module/FloodlightTestModuleLoader.java b/src/test/java/net/floodlightcontroller/core/module/FloodlightTestModuleLoader.java
index be43a8b..89a3591 100644
--- a/src/test/java/net/floodlightcontroller/core/module/FloodlightTestModuleLoader.java
+++ b/src/test/java/net/floodlightcontroller/core/module/FloodlightTestModuleLoader.java
@@ -4,40 +4,30 @@
 import java.util.Collection;
 import java.util.Iterator;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import net.floodlightcontroller.core.module.FloodlightModuleLoader;
-import net.floodlightcontroller.core.module.IFloodlightModule;
 import net.floodlightcontroller.core.test.MockFloodlightProvider;
 import net.floodlightcontroller.core.test.MockThreadPoolService;
-import net.floodlightcontroller.counter.NullCounterStore;
 import net.floodlightcontroller.devicemanager.internal.DefaultEntityClassifier;
 import net.floodlightcontroller.devicemanager.test.MockDeviceManager;
-import net.floodlightcontroller.perfmon.NullPktInProcessingTime;
-import net.floodlightcontroller.storage.memory.MemoryStorageSource;
 import net.floodlightcontroller.topology.TopologyManager;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 public class FloodlightTestModuleLoader extends FloodlightModuleLoader {
 	protected final static Logger log = LoggerFactory.getLogger(FloodlightTestModuleLoader.class);
 	
 	// List of default modules to use unless specified otherwise
-	public static final Class<? extends IFloodlightModule> DEFAULT_STORAGE_SOURCE =
-			MemoryStorageSource.class;
 	public static final Class<? extends IFloodlightModule> DEFAULT_FLOODLIGHT_PRPOVIDER =
 			MockFloodlightProvider.class;
 	public static final Class<? extends IFloodlightModule> DEFAULT_TOPOLOGY_PROVIDER =
 			TopologyManager.class;
 	public static final Class<? extends IFloodlightModule> DEFAULT_DEVICE_SERVICE =
 			MockDeviceManager.class;
-	public static final Class<? extends IFloodlightModule> DEFAULT_COUNTER_STORE =
-			NullCounterStore.class;
 	public static final Class<? extends IFloodlightModule> DEFAULT_THREADPOOL =
 			MockThreadPoolService.class;
 	public static final Class<? extends IFloodlightModule> DEFAULT_ENTITY_CLASSIFIER =
 			DefaultEntityClassifier.class;
-	public static final Class<? extends IFloodlightModule> DEFAULT_PERFMON =
-			NullPktInProcessingTime.class;
+	
 	
 	protected static final Collection<Class<? extends IFloodlightModule>> DEFAULT_MODULE_LIST;
 	
@@ -45,12 +35,10 @@
 		DEFAULT_MODULE_LIST = new ArrayList<Class<? extends IFloodlightModule>>();
 		DEFAULT_MODULE_LIST.add(DEFAULT_DEVICE_SERVICE);
 		DEFAULT_MODULE_LIST.add(DEFAULT_FLOODLIGHT_PRPOVIDER);
-		DEFAULT_MODULE_LIST.add(DEFAULT_STORAGE_SOURCE);
 		DEFAULT_MODULE_LIST.add(DEFAULT_TOPOLOGY_PROVIDER);
-		DEFAULT_MODULE_LIST.add(DEFAULT_COUNTER_STORE);
 		DEFAULT_MODULE_LIST.add(DEFAULT_THREADPOOL);
 		DEFAULT_MODULE_LIST.add(DEFAULT_ENTITY_CLASSIFIER);
-		DEFAULT_MODULE_LIST.add(DEFAULT_PERFMON);
+
 	}
 	
 	protected IFloodlightModuleContext fmc;
diff --git a/src/test/java/net/floodlightcontroller/core/test/MockFloodlightProvider.java b/src/test/java/net/floodlightcontroller/core/test/MockFloodlightProvider.java
index 2a158e5..f3abae8 100644
--- a/src/test/java/net/floodlightcontroller/core/test/MockFloodlightProvider.java
+++ b/src/test/java/net/floodlightcontroller/core/test/MockFloodlightProvider.java
@@ -31,13 +31,11 @@
 
 import net.floodlightcontroller.core.FloodlightContext;
 import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.IHAListener;
-import net.floodlightcontroller.core.IInfoProvider;
+import net.floodlightcontroller.core.IListener.Command;
 import net.floodlightcontroller.core.IOFMessageListener;
 import net.floodlightcontroller.core.IOFSwitch;
 import net.floodlightcontroller.core.IOFSwitchFilter;
 import net.floodlightcontroller.core.IOFSwitchListener;
-import net.floodlightcontroller.core.IListener.Command;
 import net.floodlightcontroller.core.IUpdate;
 import net.floodlightcontroller.core.module.FloodlightModuleContext;
 import net.floodlightcontroller.core.module.FloodlightModuleException;
@@ -61,7 +59,6 @@
     protected final static Logger log = LoggerFactory.getLogger(MockFloodlightProvider.class);
     protected ConcurrentMap<OFType, ListenerDispatcher<OFType,IOFMessageListener>> listeners;
     protected List<IOFSwitchListener> switchListeners;
-    protected List<IHAListener> haListeners;
     protected Map<Long, IOFSwitch> switches;
     protected BasicFactory factory;
 
@@ -73,7 +70,6 @@
                                    IOFMessageListener>>();
         switches = new ConcurrentHashMap<Long, IOFSwitch>();
         switchListeners = new CopyOnWriteArrayList<IOFSwitchListener>();
-        haListeners = new CopyOnWriteArrayList<IHAListener>();
         factory = new BasicFactory();
     }
 
@@ -256,55 +252,6 @@
     }
 
     @Override
-    public void addInfoProvider(String type, IInfoProvider provider) {
-        // TODO Auto-generated method stub
-        
-    }
-
-    @Override
-    public void removeInfoProvider(String type, IInfoProvider provider) {
-        // TODO Auto-generated method stub
-        
-    }
-
-    @Override
-    public Map<String, Object> getControllerInfo(String type) {
-        // TODO Auto-generated method stub
-        return null;
-    }
-
-    @Override
-    public void addHAListener(IHAListener listener) {
-        haListeners.add(listener);
-    }
-
-    @Override
-    public void removeHAListener(IHAListener listener) {
-        haListeners.remove(listener);
-    }
-    
-    @Override
-    public Role getRole() {
-        return null;
-    }
-    
-    @Override
-    public void setRole(Role role) {
-        
-    }
-    
-    /**
-     * Dispatches a new role change notification
-     * @param oldRole
-     * @param newRole
-     */
-    public void dispatchRoleChanged(Role oldRole, Role newRole) {
-        for (IHAListener rl : haListeners) {
-            rl.roleChanged(oldRole, newRole);
-        }
-    }
-
-    @Override
     public String getControllerId() {
         return "localhost";
     }
diff --git a/src/test/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImplTest.java b/src/test/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImplTest.java
index 7afb78a..b37efe3 100644
--- a/src/test/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImplTest.java
+++ b/src/test/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImplTest.java
@@ -18,7 +18,17 @@
 package net.floodlightcontroller.devicemanager.internal;
 
 
-import static org.easymock.EasyMock.*;
+import static org.easymock.EasyMock.anyLong;
+import static org.easymock.EasyMock.anyShort;
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.createStrictMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.isA;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.reset;
+import static org.easymock.EasyMock.verify;
+import static org.junit.Assert.assertArrayEquals;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -31,46 +41,40 @@
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 
-import static org.easymock.EasyMock.expectLastCall;
 import net.floodlightcontroller.core.IFloodlightProviderService;
 import net.floodlightcontroller.core.IOFSwitch;
 import net.floodlightcontroller.core.module.FloodlightModuleContext;
 import net.floodlightcontroller.core.test.MockFloodlightProvider;
 import net.floodlightcontroller.core.test.MockThreadPoolService;
-import net.floodlightcontroller.devicemanager.IDeviceListener;
 import net.floodlightcontroller.devicemanager.IDevice;
+import net.floodlightcontroller.devicemanager.IDeviceListener;
+import net.floodlightcontroller.devicemanager.IDeviceService;
 import net.floodlightcontroller.devicemanager.IEntityClass;
 import net.floodlightcontroller.devicemanager.IEntityClassifierService;
 import net.floodlightcontroller.devicemanager.SwitchPort;
-import net.floodlightcontroller.devicemanager.IDeviceService;
 import net.floodlightcontroller.devicemanager.SwitchPort.ErrorStatus;
 import net.floodlightcontroller.devicemanager.internal.DeviceManagerImpl.ClassState;
 import net.floodlightcontroller.devicemanager.test.MockEntityClassifier;
 import net.floodlightcontroller.devicemanager.test.MockEntityClassifierMac;
 import net.floodlightcontroller.devicemanager.test.MockFlexEntityClassifier;
-import net.floodlightcontroller.flowcache.FlowReconcileManager;
-import net.floodlightcontroller.flowcache.IFlowReconcileService;
 import net.floodlightcontroller.packet.ARP;
 import net.floodlightcontroller.packet.Ethernet;
 import net.floodlightcontroller.packet.IPacket;
 import net.floodlightcontroller.packet.IPv4;
 import net.floodlightcontroller.restserver.IRestApiService;
 import net.floodlightcontroller.restserver.RestApiServer;
-import net.floodlightcontroller.storage.IStorageSourceService;
-import net.floodlightcontroller.storage.memory.MemoryStorageSource;
 import net.floodlightcontroller.test.FloodlightTestCase;
 import net.floodlightcontroller.threadpool.IThreadPoolService;
 import net.floodlightcontroller.topology.ITopologyService;
-import static org.junit.Assert.*;
 
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.openflow.protocol.OFPacketIn;
+import org.openflow.protocol.OFPacketIn.OFPacketInReason;
 import org.openflow.protocol.OFPhysicalPort;
 import org.openflow.protocol.OFType;
-import org.openflow.protocol.OFPacketIn.OFPacketInReason;
 import org.openflow.util.HexString;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -89,8 +93,6 @@
     private byte[] testARPReplyPacket_3_Serialized;
     MockFloodlightProvider mockFloodlightProvider;
     DeviceManagerImpl deviceManager;
-    MemoryStorageSource storageSource;
-    FlowReconcileManager flowReconcileMgr;
 
     private IOFSwitch makeSwitchMock(long id) {
         IOFSwitch mockSwitch = createMock(IOFSwitch.class);
@@ -115,25 +117,25 @@
         fmc.addService(IThreadPoolService.class, tp);
         mockFloodlightProvider = getMockFloodlightProvider();
         deviceManager = new DeviceManagerImpl();
-        flowReconcileMgr = new FlowReconcileManager();
+       
         DefaultEntityClassifier entityClassifier = new DefaultEntityClassifier();
         fmc.addService(IDeviceService.class, deviceManager);
-        storageSource = new MemoryStorageSource();
-        fmc.addService(IStorageSourceService.class, storageSource);
+        //storageSource = new MemoryStorageSource();
+        //fmc.addService(IStorageSourceService.class, storageSource);
         fmc.addService(IFloodlightProviderService.class, mockFloodlightProvider);
         fmc.addService(IRestApiService.class, restApi);
-        fmc.addService(IFlowReconcileService.class, flowReconcileMgr);
+ 
         fmc.addService(IEntityClassifierService.class, entityClassifier);
         fmc.addService(ITopologyService.class, topology);
         tp.init(fmc);
         restApi.init(fmc);
-        storageSource.init(fmc);
+        //storageSource.init(fmc);
         deviceManager.init(fmc);
-        flowReconcileMgr.init(fmc);
+
         entityClassifier.init(fmc);
-        storageSource.startUp(fmc);
+        //storageSource.startUp(fmc);
         deviceManager.startUp(fmc);
-        flowReconcileMgr.startUp(fmc);
+
         tp.startUp(fmc);
         entityClassifier.startUp(fmc);
 
diff --git a/src/test/java/net/floodlightcontroller/flowcache/FlowReconcileMgrTest.java b/src/test/java/net/floodlightcontroller/flowcache/FlowReconcileMgrTest.java
deleted file mode 100644
index 0427828..0000000
--- a/src/test/java/net/floodlightcontroller/flowcache/FlowReconcileMgrTest.java
+++ /dev/null
@@ -1,500 +0,0 @@
-package net.floodlightcontroller.flowcache;
-
-import static org.easymock.EasyMock.*;
-
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.ListIterator;
-
-import net.floodlightcontroller.core.IListener.Command;
-import net.floodlightcontroller.core.module.FloodlightModuleContext;
-import net.floodlightcontroller.core.test.MockFloodlightProvider;
-import net.floodlightcontroller.core.test.MockThreadPoolService;
-import net.floodlightcontroller.counter.ICounterStoreService;
-import net.floodlightcontroller.counter.SimpleCounter;
-import net.floodlightcontroller.counter.CounterValue.CounterType;
-import net.floodlightcontroller.flowcache.IFlowReconcileListener;
-import net.floodlightcontroller.flowcache.OFMatchReconcile;
-import net.floodlightcontroller.test.FloodlightTestCase;
-import net.floodlightcontroller.threadpool.IThreadPoolService;
-
-import org.easymock.EasyMock;
-import org.easymock.IAnswer;
-import org.junit.Before;
-import org.junit.Test;
-import org.openflow.protocol.OFStatisticsRequest;
-import org.openflow.protocol.OFType;
-
-public class FlowReconcileMgrTest extends FloodlightTestCase {
-
-    protected MockFloodlightProvider mockFloodlightProvider;
-    protected FlowReconcileManager flowReconcileMgr;
-    protected MockThreadPoolService threadPool;
-    protected ICounterStoreService counterStore;
-    protected FloodlightModuleContext fmc;
-    
-    OFStatisticsRequest ofStatsRequest;
-
-    protected int NUM_FLOWS_PER_THREAD = 100;
-    protected int NUM_THREADS = 100;
-    
-    @Before
-    public void setUp() throws Exception {
-        super.setUp();
-
-        fmc = new FloodlightModuleContext();
-        flowReconcileMgr = new FlowReconcileManager();
-        threadPool = new MockThreadPoolService();
-        counterStore = createMock(ICounterStoreService.class);
-        
-        fmc.addService(ICounterStoreService.class, counterStore);
-        fmc.addService(IThreadPoolService.class, threadPool);
-        
-        threadPool.init(fmc);
-        flowReconcileMgr.init(fmc);
-
-        threadPool.startUp(fmc);
-        flowReconcileMgr.startUp(fmc);
-    }
-
-    /** Verify pipeline listener registration and ordering
-     * 
-     * @throws Exception
-     */
-    @SuppressWarnings("unchecked")
-    @Test
-    public void testFlowReconcilePipeLine() throws Exception {
-        flowReconcileMgr.flowReconcileEnabled = true;
-    
-        IFlowReconcileListener r1 =
-            EasyMock.createNiceMock(IFlowReconcileListener.class);
-        IFlowReconcileListener r2 =
-            EasyMock.createNiceMock(IFlowReconcileListener.class);
-        IFlowReconcileListener r3 =
-            EasyMock.createNiceMock(IFlowReconcileListener.class);
-        
-        expect(r1.getName()).andReturn("r1").anyTimes();
-        expect(r2.getName()).andReturn("r2").anyTimes();
-        expect(r3.getName()).andReturn("r3").anyTimes();
-        
-        // Set the listeners' order: r1 -> r2 -> r3
-        expect(r1.isCallbackOrderingPrereq((OFType)anyObject(),
-            (String)anyObject())).andReturn(false).anyTimes();
-        expect(r1.isCallbackOrderingPostreq((OFType)anyObject(),
-            (String)anyObject())).andReturn(false).anyTimes();
-        expect(r2.isCallbackOrderingPrereq((OFType)anyObject(),
-            eq("r1"))).andReturn(true).anyTimes();
-        expect(r2.isCallbackOrderingPrereq((OFType)anyObject(),
-            eq("r3"))).andReturn(false).anyTimes();
-        expect(r2.isCallbackOrderingPostreq((OFType)anyObject(),
-            eq("r1"))).andReturn(false).anyTimes();
-        expect(r2.isCallbackOrderingPostreq((OFType)anyObject(),
-            eq("r3"))).andReturn(true).anyTimes();
-        expect(r3.isCallbackOrderingPrereq((OFType)anyObject(),
-            eq("r1"))).andReturn(false).anyTimes();
-        expect(r3.isCallbackOrderingPrereq((OFType)anyObject(),
-            eq("r2"))).andReturn(true).anyTimes();
-        expect(r3.isCallbackOrderingPostreq((OFType)anyObject(),
-            (String)anyObject())).andReturn(false).anyTimes();
-        
-        expect(r1.reconcileFlows((ArrayList<OFMatchReconcile>)anyObject())).
-                  andThrow(new RuntimeException("This is NOT an error! " +
-                            "We are testing exception catching."));
-        
-        SimpleCounter cnt = (SimpleCounter)SimpleCounter.createCounter(
-                            new Date(),
-                            CounterType.LONG);
-        cnt.increment();
-        expect(counterStore.getCounter(
-                flowReconcileMgr.controllerPktInCounterName))
-                .andReturn(cnt)
-                .anyTimes();
-        
-        replay(r1, r2, r3, counterStore);
-        flowReconcileMgr.clearFlowReconcileListeners();
-        flowReconcileMgr.addFlowReconcileListener(r1);
-        flowReconcileMgr.addFlowReconcileListener(r2);
-        flowReconcileMgr.addFlowReconcileListener(r3);
-        
-        int pre_flowReconcileThreadRunCount =
-                flowReconcileMgr.flowReconcileThreadRunCount;
-        Date startTime = new Date();
-        OFMatchReconcile ofmRcIn = new OFMatchReconcile();
-        try {
-            flowReconcileMgr.reconcileFlow(ofmRcIn);
-            flowReconcileMgr.doReconcile();
-        } catch (RuntimeException e) {
-            assertEquals(e.getMessage()
-                .startsWith("This is NOT an error!"), true);
-        }
-        
-        verify(r1, r2, r3);
-
-        // verify STOP works
-        reset(r1, r2, r3);
-        
-        // restart reconcileThread since it exited due to previous runtime
-        // exception.
-        flowReconcileMgr.startUp(fmc);
-        expect(r1.reconcileFlows((ArrayList<OFMatchReconcile>)anyObject()))
-        .andReturn(Command.STOP).times(1);
-        expect(r2.reconcileFlows((ArrayList<OFMatchReconcile>)anyObject()));
-        expectLastCall().andAnswer(new IAnswer<Object>() {
-            public Object answer() {
-                fail("Unexpected call");
-                return Command.STOP;
-            }
-        }).anyTimes();
-        
-        pre_flowReconcileThreadRunCount =
-            flowReconcileMgr.flowReconcileThreadRunCount;
-        startTime = new Date();
-        replay(r1, r2, r3);
-        flowReconcileMgr.reconcileFlow(ofmRcIn);
-        while (flowReconcileMgr.flowReconcileThreadRunCount <=
-                pre_flowReconcileThreadRunCount) {
-            Thread.sleep(10);
-            Date currTime = new Date();
-            assertTrue((currTime.getTime() - startTime.getTime()) < 1000);
-        }
-        verify(r1, r2, r3);
-        
-        // verify CONTINUE works
-        reset(r1, r2, r3);
-        expect(r1.reconcileFlows((ArrayList<OFMatchReconcile>)anyObject()))
-        .andReturn(Command.CONTINUE).times(1);
-        expect(r2.reconcileFlows((ArrayList<OFMatchReconcile>)anyObject()))
-        .andReturn(Command.STOP).times(1);
-        expect(r3.reconcileFlows((ArrayList<OFMatchReconcile>)anyObject()));
-        expectLastCall().andAnswer(new IAnswer<Object>() {
-            public Object answer() {
-                fail("Unexpected call");
-                return Command.STOP;
-            }
-        }).anyTimes();
-        
-        pre_flowReconcileThreadRunCount =
-            flowReconcileMgr.flowReconcileThreadRunCount;
-        startTime = new Date();
-        
-        replay(r1, r2, r3);
-        flowReconcileMgr.reconcileFlow(ofmRcIn);
-        while (flowReconcileMgr.flowReconcileThreadRunCount <=
-                pre_flowReconcileThreadRunCount) {
-            Thread.sleep(10);
-            Date currTime = new Date();
-            assertTrue((currTime.getTime() - startTime.getTime()) < 1000);
-        }
-        verify(r1, r2, r3);
-        
-        // verify CONTINUE works
-        reset(r1, r2, r3);
-        expect(r1.reconcileFlows((ArrayList<OFMatchReconcile>)anyObject()))
-        .andReturn(Command.CONTINUE).times(1);
-        expect(r2.reconcileFlows((ArrayList<OFMatchReconcile>)anyObject()))
-        .andReturn(Command.CONTINUE).times(1);
-        expect(r3.reconcileFlows((ArrayList<OFMatchReconcile>)anyObject()))
-        .andReturn(Command.STOP).times(1);
-        
-        pre_flowReconcileThreadRunCount =
-            flowReconcileMgr.flowReconcileThreadRunCount;
-        startTime = new Date();
-        
-        replay(r1, r2, r3);
-        flowReconcileMgr.reconcileFlow(ofmRcIn);
-        while (flowReconcileMgr.flowReconcileThreadRunCount <=
-                pre_flowReconcileThreadRunCount) {
-            Thread.sleep(10);
-            Date currTime = new Date();
-            assertTrue((currTime.getTime() - startTime.getTime()) < 1000);
-        }
-        verify(r1, r2, r3);
-        
-        // Verify removeFlowReconcileListener
-        flowReconcileMgr.removeFlowReconcileListener(r1);
-        reset(r1, r2, r3);
-        expect(r1.reconcileFlows((ArrayList<OFMatchReconcile>)anyObject()));
-        expectLastCall().andAnswer(new IAnswer<Object>() {
-            public Object answer() {
-                fail("Unexpected call to a listener that is " +
-                        "removed from the chain.");
-                return Command.STOP;
-            }
-        }).anyTimes();
-        expect(r2.reconcileFlows((ArrayList<OFMatchReconcile>)anyObject()))
-        .andReturn(Command.CONTINUE).times(1);
-        expect(r3.reconcileFlows((ArrayList<OFMatchReconcile>)anyObject()))
-        .andReturn(Command.STOP).times(1);
-        
-        pre_flowReconcileThreadRunCount =
-            flowReconcileMgr.flowReconcileThreadRunCount;
-        startTime = new Date();
-        replay(r1, r2, r3);
-        flowReconcileMgr.reconcileFlow(ofmRcIn);
-        while (flowReconcileMgr.flowReconcileThreadRunCount <=
-                pre_flowReconcileThreadRunCount) {
-            Thread.sleep(10);
-            Date currTime = new Date();
-            assertTrue((currTime.getTime() - startTime.getTime()) < 1000);
-        }
-        verify(r1, r2, r3);
-    }
-    
-    @Test
-    public void testGetPktInRate() {
-        internalTestGetPktInRate(CounterType.LONG);
-        internalTestGetPktInRate(CounterType.DOUBLE);
-    }
-    
-    protected void internalTestGetPktInRate(CounterType type) {
-        Date currentTime = new Date();
-        SimpleCounter newCnt = (SimpleCounter)SimpleCounter.createCounter(
-                                currentTime, type);
-        newCnt.increment(currentTime, 1);
-    
-        // Set the lastCounter time in the future of the current time
-        Date lastCounterTime = new Date(currentTime.getTime() + 1000);
-        flowReconcileMgr.lastPacketInCounter =
-                (SimpleCounter)SimpleCounter.createCounter(
-                    lastCounterTime, type);
-        flowReconcileMgr.lastPacketInCounter.increment(lastCounterTime, 1);
-    
-        assertEquals(FlowReconcileManager.MAX_SYSTEM_LOAD_PER_SECOND,
-                flowReconcileMgr.getPktInRate(newCnt, new Date()));
-    
-        // Verify the rate == 0 time difference is zero.
-        lastCounterTime = new Date(currentTime.getTime() - 1000);
-        flowReconcileMgr.lastPacketInCounter.increment(lastCounterTime, 1);
-        assertEquals(0, flowReconcileMgr.getPktInRate(newCnt, lastCounterTime));
-    
-        /** verify the computation is correct.
-         *  new = 2000, old = 1000, Tdiff = 1 second.
-         *  rate should be 1000/second
-         */
-        newCnt = (SimpleCounter)SimpleCounter.createCounter(
-                currentTime, type);
-        newCnt.increment(currentTime, 2000);
-    
-        lastCounterTime = new Date(currentTime.getTime() - 1000);
-        flowReconcileMgr.lastPacketInCounter =
-                (SimpleCounter)SimpleCounter.createCounter(
-                    lastCounterTime, type);
-        flowReconcileMgr.lastPacketInCounter.increment(lastCounterTime, 1000);
-        assertEquals(1000, flowReconcileMgr.getPktInRate(newCnt, currentTime));
-    
-        /** verify the computation is correct.
-         *  new = 2,000,000, old = 1,000,000, Tdiff = 2 second.
-         *  rate should be 1000/second
-         */
-        newCnt = (SimpleCounter)SimpleCounter.createCounter(
-                currentTime, type);
-        newCnt.increment(currentTime, 2000000);
-    
-        lastCounterTime = new Date(currentTime.getTime() - 2000);
-        flowReconcileMgr.lastPacketInCounter =
-                (SimpleCounter)SimpleCounter.createCounter(
-                    lastCounterTime, type);
-        flowReconcileMgr.lastPacketInCounter.increment(lastCounterTime,
-                1000000);
-        assertEquals(500000, flowReconcileMgr.getPktInRate(newCnt,
-                    currentTime));
-    }
-    
-    @Test
-    public void testGetCurrentCapacity() throws Exception {
-        // Disable the reconcile thread.
-        flowReconcileMgr.flowReconcileEnabled = false;
-    
-        int minFlows = FlowReconcileManager.MIN_FLOW_RECONCILE_PER_SECOND *
-                FlowReconcileManager.FLOW_RECONCILE_DELAY_MILLISEC / 1000;
-    
-        /** Verify the initial state, when packetIn counter has not
-         *  been created.
-         */
-        expect(counterStore.getCounter(
-                flowReconcileMgr.controllerPktInCounterName))
-        .andReturn(null)
-        .times(1);
-    
-        replay(counterStore);
-        assertEquals(minFlows, flowReconcileMgr.getCurrentCapacity());
-        verify(counterStore);
-    
-        /** Verify the initial state, when lastPacketInCounter is null */
-        reset(counterStore);
-        Date currentTime = new Date();
-        SimpleCounter newCnt = (SimpleCounter)SimpleCounter.createCounter(
-                        currentTime, CounterType.LONG);
-    
-        expect(counterStore.getCounter(
-            flowReconcileMgr.controllerPktInCounterName))
-        .andReturn(newCnt)
-        .times(1);
-        long initPktInCount = 10000;
-        newCnt.increment(currentTime, initPktInCount);
-    
-        replay(counterStore);
-        assertEquals(minFlows, flowReconcileMgr.getCurrentCapacity());
-        verify(counterStore);
-    
-        /** Now the lastPacketInCounter has been set.
-         *  lastCounter = 100,000 and newCounter = 300,000, t = 1 second
-         *  packetInRate = 200,000/sec.
-         *  capacity should be 500k - 200k = 300k
-         */
-        reset(counterStore);
-        newCnt = (SimpleCounter)SimpleCounter.createCounter(
-                    currentTime, CounterType.LONG);
-        currentTime = new Date(currentTime.getTime() + 200);
-        long nextPktInCount = 30000;
-        newCnt.increment(currentTime, nextPktInCount);
-    
-        expect(counterStore.getCounter(
-                flowReconcileMgr.controllerPktInCounterName))
-        .andReturn(newCnt)
-        .times(1);
-    
-        replay(counterStore);
-        // Wait for 1 second so that enough elapsed time to compute capacity.
-        Thread.sleep(1000);
-        int capacity = flowReconcileMgr.getCurrentCapacity();
-        verify(counterStore);
-        long expectedCap = (FlowReconcileManager.MAX_SYSTEM_LOAD_PER_SECOND -
-                (nextPktInCount - initPktInCount)) *
-                FlowReconcileManager.FLOW_RECONCILE_DELAY_MILLISEC / 1000;
-        assertEquals(expectedCap, capacity);
-    }
-    
-    private class FlowReconcileWorker implements Runnable {
-    @Override
-        public void run() {
-            OFMatchReconcile ofmRc = new OFMatchReconcile();
-            // push large number of flows to be reconciled.
-            for (int i = 0; i < NUM_FLOWS_PER_THREAD; i++) {
-                flowReconcileMgr.reconcileFlow(ofmRc);
-            }
-        }
-    }
-    
-    /** Verify the flows are sent to the reconcile pipeline in order.
-     */
-    @SuppressWarnings("unchecked")
-    @Test
-    public void testQueueFlowsOrder() {
-        flowReconcileMgr.flowReconcileEnabled = false;
-        
-        IFlowReconcileListener r1 =
-            EasyMock.createNiceMock(IFlowReconcileListener.class);
-        
-        expect(r1.getName()).andReturn("r1").anyTimes();
-        
-        // Set the listeners' order: r1 -> r2 -> r3
-        expect(r1.isCallbackOrderingPrereq((OFType)anyObject(),
-            (String)anyObject())).andReturn(false).anyTimes();
-        expect(r1.isCallbackOrderingPostreq((OFType)anyObject(),
-            (String)anyObject())).andReturn(false).anyTimes();
-        
-        expect(r1.reconcileFlows((ArrayList<OFMatchReconcile>)anyObject()))
-        .andAnswer(new IAnswer<Command>() {
-            @Override
-            public Command answer() throws Throwable {
-                ArrayList<OFMatchReconcile> ofmList =
-                    (ArrayList<OFMatchReconcile>)EasyMock.
-                        getCurrentArguments()[0];
-                ListIterator<OFMatchReconcile> lit = ofmList.listIterator();
-                int index = 0;
-                while (lit.hasNext()) {
-                    OFMatchReconcile ofm = lit.next();
-                    assertEquals(index++, ofm.cookie);
-                }
-                return Command.STOP;
-            }
-        }).times(1);
-        
-        SimpleCounter cnt = (SimpleCounter)SimpleCounter.createCounter(
-                            new Date(),
-                            CounterType.LONG);
-        cnt.increment();
-        expect(counterStore.getCounter(
-                flowReconcileMgr.controllerPktInCounterName))
-                .andReturn(cnt)
-                .anyTimes();
-        
-        replay(r1, counterStore);
-        flowReconcileMgr.clearFlowReconcileListeners();
-        flowReconcileMgr.addFlowReconcileListener(r1);
-        
-        OFMatchReconcile ofmRcIn = new OFMatchReconcile();
-        int index = 0;
-        for (index = 0; index < 10; index++) {
-            ofmRcIn.cookie = index;
-            flowReconcileMgr.reconcileFlow(ofmRcIn);
-        }
-        flowReconcileMgr.flowReconcileEnabled = true;
-        flowReconcileMgr.doReconcile();
-        
-        verify(r1);
-    }
-    
-    @SuppressWarnings("unchecked")
-    @Test
-    public void testQueueFlowsByManyThreads() {
-        // Disable the reconcile thread so that the queue won't be emptied.
-        flowQueueTest(false);
-    
-        // Enable the reconcile thread. The queue should be empty.
-        Date currentTime = new Date();
-        SimpleCounter newCnt = (SimpleCounter)SimpleCounter.createCounter(
-                    currentTime, CounterType.LONG);
-    
-        expect(counterStore.getCounter(
-                    flowReconcileMgr.controllerPktInCounterName))
-        .andReturn(newCnt)
-        .anyTimes();
-        long initPktInCount = 10000;
-        newCnt.increment(currentTime, initPktInCount);
-    
-        IFlowReconcileListener r1 =
-                EasyMock.createNiceMock(IFlowReconcileListener.class);
-        
-        expect(r1.getName()).andReturn("r1").anyTimes();
-        
-        // Set the listeners' order: r1 -> r2 -> r3
-        expect(r1.isCallbackOrderingPrereq((OFType)anyObject(),
-                (String)anyObject())).andReturn(false).anyTimes();
-        expect(r1.isCallbackOrderingPostreq((OFType)anyObject(),
-                (String)anyObject())).andReturn(false).anyTimes();
-        
-        expect(r1.reconcileFlows((ArrayList<OFMatchReconcile>)anyObject()))
-        .andReturn(Command.CONTINUE).anyTimes();
-        
-        flowReconcileMgr.clearFlowReconcileListeners();
-        replay(r1, counterStore);
-        flowQueueTest(true);
-        verify(r1, counterStore);
-    }
-    
-    protected void flowQueueTest(boolean enableReconcileThread) {
-        flowReconcileMgr.flowReconcileEnabled = enableReconcileThread;
-    
-        // Simulate flow
-        for (int i = 0; i < NUM_THREADS; i++) {
-            Runnable worker = this.new FlowReconcileWorker();
-            Thread t = new Thread(worker);
-            t.start();
-        }
-    
-        Date startTime = new Date();
-        int totalFlows = NUM_THREADS * NUM_FLOWS_PER_THREAD;
-        if (enableReconcileThread) {
-            totalFlows = 0;
-        }
-        while (flowReconcileMgr.flowQueue.size() != totalFlows) {
-            Date currTime = new Date();
-            assertTrue((currTime.getTime() - startTime.getTime()) < 2000);
-        }
-    
-        // Make sure all flows are in the queue.
-        assertEquals(totalFlows, flowReconcileMgr.flowQueue.size());
-    }
-}
diff --git a/src/test/java/net/floodlightcontroller/forwarding/ForwardingTest.java b/src/test/java/net/floodlightcontroller/forwarding/ForwardingTest.java
index 7a37589..f29c319 100644
--- a/src/test/java/net/floodlightcontroller/forwarding/ForwardingTest.java
+++ b/src/test/java/net/floodlightcontroller/forwarding/ForwardingTest.java
@@ -17,7 +17,16 @@
 
 package net.floodlightcontroller.forwarding;
 
-import static org.easymock.EasyMock.*;
+import static org.easymock.EasyMock.anyLong;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.anyShort;
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.reset;
+import static org.easymock.EasyMock.verify;
 
 import java.util.ArrayList;
 import java.util.Date;
@@ -31,13 +40,11 @@
 import net.floodlightcontroller.core.module.FloodlightModuleContext;
 import net.floodlightcontroller.core.test.MockFloodlightProvider;
 import net.floodlightcontroller.core.test.MockThreadPoolService;
-import net.floodlightcontroller.devicemanager.internal.DefaultEntityClassifier;
-import net.floodlightcontroller.devicemanager.test.MockDeviceManager;
-import net.floodlightcontroller.counter.CounterStore;
-import net.floodlightcontroller.counter.ICounterStoreService;
 import net.floodlightcontroller.devicemanager.IDevice;
 import net.floodlightcontroller.devicemanager.IDeviceService;
 import net.floodlightcontroller.devicemanager.IEntityClassifierService;
+import net.floodlightcontroller.devicemanager.internal.DefaultEntityClassifier;
+import net.floodlightcontroller.devicemanager.test.MockDeviceManager;
 import net.floodlightcontroller.packet.Data;
 import net.floodlightcontroller.packet.Ethernet;
 import net.floodlightcontroller.packet.IPacket;
@@ -50,9 +57,6 @@
 import net.floodlightcontroller.topology.ITopologyListener;
 import net.floodlightcontroller.topology.ITopologyService;
 import net.floodlightcontroller.topology.NodePortTuple;
-import net.floodlightcontroller.flowcache.FlowReconcileManager;
-import net.floodlightcontroller.flowcache.IFlowReconcileService;
-import net.floodlightcontroller.forwarding.Forwarding;
 
 import org.easymock.Capture;
 import org.easymock.CaptureType;
@@ -63,10 +67,10 @@
 import org.openflow.protocol.OFMatch;
 import org.openflow.protocol.OFMessage;
 import org.openflow.protocol.OFPacketIn;
+import org.openflow.protocol.OFPacketIn.OFPacketInReason;
 import org.openflow.protocol.OFPacketOut;
 import org.openflow.protocol.OFPort;
 import org.openflow.protocol.OFType;
-import org.openflow.protocol.OFPacketIn.OFPacketInReason;
 import org.openflow.protocol.action.OFAction;
 import org.openflow.protocol.action.OFActionOutput;
 import org.openflow.util.HexString;
@@ -77,7 +81,6 @@
     protected MockDeviceManager deviceManager;
     protected IRoutingService routingEngine;
     protected Forwarding forwarding;
-    protected FlowReconcileManager flowReconcileMgr;
     protected ITopologyService topology;
     protected MockThreadPoolService threadPool;
     protected IOFSwitch sw1, sw2;
@@ -121,7 +124,6 @@
         forwarding = new Forwarding();
         threadPool = new MockThreadPoolService();
         deviceManager = new MockDeviceManager();
-        flowReconcileMgr = new FlowReconcileManager();
         routingEngine = createMock(IRoutingService.class);
         topology = createMock(ITopologyService.class);
         DefaultEntityClassifier entityClassifier = new DefaultEntityClassifier();
@@ -133,9 +135,7 @@
         fmc.addService(IThreadPoolService.class, threadPool);
         fmc.addService(ITopologyService.class, topology);
         fmc.addService(IRoutingService.class, routingEngine);
-        fmc.addService(ICounterStoreService.class, new CounterStore());
         fmc.addService(IDeviceService.class, deviceManager);
-        fmc.addService(IFlowReconcileService.class, flowReconcileMgr);
         fmc.addService(IEntityClassifierService.class, entityClassifier);
 
         topology.addListener(anyObject(ITopologyListener.class));
@@ -144,12 +144,10 @@
         threadPool.init(fmc);
         forwarding.init(fmc);
         deviceManager.init(fmc);
-        flowReconcileMgr.init(fmc);
         entityClassifier.init(fmc);
         threadPool.startUp(fmc);
         deviceManager.startUp(fmc);
         forwarding.startUp(fmc);
-        flowReconcileMgr.startUp(fmc);
         entityClassifier.startUp(fmc);
         verify(topology);
         
diff --git a/src/test/java/net/floodlightcontroller/staticflowentry/StaticFlowTests.java b/src/test/java/net/floodlightcontroller/staticflowentry/StaticFlowTests.java
index 186fd69..4f53342 100644
--- a/src/test/java/net/floodlightcontroller/staticflowentry/StaticFlowTests.java
+++ b/src/test/java/net/floodlightcontroller/staticflowentry/StaticFlowTests.java
@@ -1,49 +1,16 @@
 package net.floodlightcontroller.staticflowentry;
 
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
 
-
-import org.easymock.Capture;
-import org.easymock.CaptureType;
-import org.junit.Test;
-import org.openflow.protocol.OFFlowMod;
-import org.openflow.protocol.OFMatch;
-import org.openflow.protocol.OFMessage;
-import org.openflow.protocol.OFPort;
-import org.openflow.protocol.action.OFAction;
-import org.openflow.protocol.action.OFActionOutput;
-import org.openflow.util.HexString;
-
-
-import net.floodlightcontroller.core.FloodlightContext;
-import net.floodlightcontroller.core.IFloodlightProviderService.Role;
-import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.core.module.FloodlightModuleContext;
-import net.floodlightcontroller.core.module.FloodlightModuleException;
-import net.floodlightcontroller.core.test.MockFloodlightProvider;
-import net.floodlightcontroller.test.FloodlightTestCase;
-import net.floodlightcontroller.restserver.RestApiServer;
-import net.floodlightcontroller.staticflowentry.StaticFlowEntryPusher;
-import net.floodlightcontroller.storage.IStorageSourceService;
-import net.floodlightcontroller.storage.memory.MemoryStorageSource;
-import static net.floodlightcontroller.staticflowentry.StaticFlowEntryPusher.*;
-import static org.easymock.EasyMock.*;
-
+/*
 public class StaticFlowTests extends FloodlightTestCase {    
     
     static String TestSwitch1DPID = "00:00:00:00:00:00:00:01";
     static int TotalTestRules = 3;
     
-    /***
-     * Create TestRuleXXX and the corresponding FlowModXXX
-     * for X = 1..3
-     */
+    //
+    // Create TestRuleXXX and the corresponding FlowModXXX
+    // for X = 1..3
+    //
     static Map<String,Object> TestRule1;
     static OFFlowMod FlowMod1;
     static {
@@ -332,3 +299,4 @@
         assert(staticFlowEntryPusher.entriesFromStorage.containsValue(FlowMod3));
     }
 }
+*/
diff --git a/src/test/java/net/floodlightcontroller/storage/memory/tests/MemoryStorageTest.java b/src/test/java/net/floodlightcontroller/storage/memory/tests/MemoryStorageTest.java
deleted file mode 100644
index c250066..0000000
--- a/src/test/java/net/floodlightcontroller/storage/memory/tests/MemoryStorageTest.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage.memory.tests;
-
-import net.floodlightcontroller.core.module.FloodlightModuleContext;
-import net.floodlightcontroller.restserver.IRestApiService;
-import net.floodlightcontroller.restserver.RestApiServer;
-import net.floodlightcontroller.storage.memory.MemoryStorageSource;
-import net.floodlightcontroller.storage.tests.StorageTest;
-import org.junit.Before;
-
-public class MemoryStorageTest extends StorageTest {
-
-    @Before
-    public void setUp() throws Exception {
-        storageSource = new MemoryStorageSource();
-        restApi = new RestApiServer();
-        FloodlightModuleContext fmc = new FloodlightModuleContext();
-        fmc.addService(IRestApiService.class, restApi);
-        restApi.init(fmc);
-        storageSource.init(fmc);
-        restApi.startUp(fmc);
-        storageSource.startUp(fmc);
-        super.setUp();
-    }
-}
diff --git a/src/test/java/net/floodlightcontroller/storage/tests/StorageTest.java b/src/test/java/net/floodlightcontroller/storage/tests/StorageTest.java
deleted file mode 100644
index 29cc15b..0000000
--- a/src/test/java/net/floodlightcontroller/storage/tests/StorageTest.java
+++ /dev/null
@@ -1,743 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.storage.tests;
-
-import static org.easymock.EasyMock.*;
-
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.TimeUnit;
-
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import net.floodlightcontroller.restserver.RestApiServer;
-import net.floodlightcontroller.storage.CompoundPredicate;
-import net.floodlightcontroller.storage.IStorageExceptionHandler;
-import net.floodlightcontroller.storage.IPredicate;
-import net.floodlightcontroller.storage.IQuery;
-import net.floodlightcontroller.storage.IResultSet;
-import net.floodlightcontroller.storage.IRowMapper;
-import net.floodlightcontroller.storage.IStorageSourceListener;
-import net.floodlightcontroller.storage.NullValueStorageException;
-import net.floodlightcontroller.storage.OperatorPredicate;
-import net.floodlightcontroller.storage.RowOrdering;
-import net.floodlightcontroller.storage.nosql.NoSqlStorageSource;
-import net.floodlightcontroller.test.FloodlightTestCase;
-
-import org.junit.Test;
-
-public abstract class StorageTest extends FloodlightTestCase {
-    
-    protected NoSqlStorageSource storageSource;
-    protected RestApiServer restApi;
-    
-    protected String PERSON_TABLE_NAME = "Person";
-    
-    protected String PERSON_SSN = "SSN";
-    protected String PERSON_FIRST_NAME = "FirstName";
-    protected String PERSON_LAST_NAME = "LastName";
-    protected String PERSON_AGE = "Age";
-    protected String PERSON_REGISTERED = "Registered";
-    
-    protected String[] PERSON_COLUMN_LIST = {PERSON_SSN, PERSON_FIRST_NAME, PERSON_LAST_NAME, PERSON_AGE, PERSON_REGISTERED};
-    
-    class Person {
-        private String ssn;
-        private String firstName;
-        private String lastName;
-        int age;
-        boolean registered;
-        
-        public Person(String ssn, String firstName, String lastName, int age, boolean registered) {
-            this.ssn = ssn;
-            this.firstName = firstName;
-            this.lastName = lastName;
-            this.age = age;
-            this.registered = registered;
-        }
-        
-        public String getSSN() {
-            return ssn;
-        }
-        
-        public String getFirstName() {
-            return firstName;
-        }
-        
-        public String getLastName() {
-            return lastName;
-            
-        }
-        
-        public int getAge() {
-            return age;
-        }
-        
-        public boolean isRegistered() {
-            return registered;
-        }
-    }
-    
-    class PersonRowMapper implements IRowMapper {
-        public Object mapRow(IResultSet resultSet) {
-            String ssn = resultSet.getString(PERSON_SSN);
-            String firstName = resultSet.getString(PERSON_FIRST_NAME);
-            String lastName = resultSet.getString(PERSON_LAST_NAME);
-            int age = resultSet.getInt(PERSON_AGE);
-            boolean registered = resultSet.getBoolean(PERSON_REGISTERED);
-            return new Person(ssn, firstName, lastName, age, registered);
-        }
-    }
-    
-    Object[][] PERSON_INIT_DATA = {
-            {"111-11-1111", "John", "Smith", 40, true},
-            {"222-22-2222", "Jim", "White", 24, false},
-            {"333-33-3333", "Lisa", "Jones", 27, true},
-            {"444-44-4444", "Susan", "Jones", 14, false},
-            {"555-55-5555", "Jose", "Garcia", 31, true},
-            {"666-66-6666", "Abigail", "Johnson", 35, false},
-            {"777-77-7777", "Bjorn", "Borg", 55, true},
-            {"888-88-8888", "John", "McEnroe", 53, false}
-    };
-
-    Map<String,Object> createPersonRowValues(Object[] personData) {
-        Map<String,Object> rowValues = new HashMap<String,Object>();
-        for (int i = 0; i < PERSON_COLUMN_LIST.length; i++) {
-            rowValues.put(PERSON_COLUMN_LIST[i], personData[i]);
-        }
-        return rowValues;
-    }
-    
-    public void insertPerson(Object[] personData) {
-        Map<String,Object> rowValues = createPersonRowValues(personData);
-        storageSource.insertRow(PERSON_TABLE_NAME, rowValues);
-    }
-    
-    public void initPersons() {
-        for (Object[] row: PERSON_INIT_DATA) {
-            insertPerson(row);
-        }
-    }
-    
-    public void setUp() throws Exception {
-        super.setUp();
-        Set<String> indexedColumnNames = new HashSet<String>();
-        indexedColumnNames.add(PERSON_LAST_NAME);
-        storageSource.setExceptionHandler(null);
-        storageSource.createTable(PERSON_TABLE_NAME, indexedColumnNames);
-        storageSource.setTablePrimaryKeyName(PERSON_TABLE_NAME, PERSON_SSN);        
-        initPersons();
-    }
-
-    public void checkExpectedResults(IResultSet resultSet, String[] columnNameList, Object[][] expectedRowList) {
-        boolean nextResult;
-        for (Object[] expectedRow: expectedRowList) {
-            nextResult = resultSet.next();
-            assertEquals(nextResult,true);
-            assertEquals(expectedRow.length, columnNameList.length);
-            for (int i = 0; i < expectedRow.length; i++) {
-                Object expectedObject = expectedRow[i];
-                String columnName = columnNameList[i];
-                if (expectedObject instanceof Boolean)
-                    assertEquals(((Boolean)expectedObject).booleanValue(), resultSet.getBoolean(columnName));
-                else if (expectedObject instanceof Byte)
-                    assertEquals(((Byte)expectedObject).byteValue(), resultSet.getByte(columnName));
-                else if (expectedObject instanceof Short)
-                    assertEquals(((Short)expectedObject).shortValue(), resultSet.getShort(columnName));
-                else if (expectedObject instanceof Integer)
-                    assertEquals(((Integer)expectedObject).intValue(), resultSet.getInt(columnName));
-                else if (expectedObject instanceof Long)
-                    assertEquals(((Long)expectedObject).longValue(), resultSet.getLong(columnName));
-                else if (expectedObject instanceof Float)
-                    assertEquals(((Float)expectedObject).floatValue(), resultSet.getFloat(columnName), 0.00001);
-                else if (expectedObject instanceof Double)
-                    assertEquals(((Double)expectedObject).doubleValue(), resultSet.getDouble(columnName), 0.00001);
-                else if (expectedObject instanceof byte[])
-                    assertEquals((byte[])expectedObject, resultSet.getByteArray(columnName));
-                else if (expectedObject instanceof String)
-                    assertEquals((String)expectedObject, resultSet.getString(columnName));
-                else
-                    assertTrue("Unexpected column value type", false);
-            }
-        }
-        nextResult = resultSet.next();
-        assertEquals(nextResult,false);
-        resultSet.close();
-    }
-    
-    @Test
-    public void testInsertRows() {
-        IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, null, new RowOrdering(PERSON_SSN));
-        checkExpectedResults(resultSet, PERSON_COLUMN_LIST, PERSON_INIT_DATA);
-    }
-    
-    @Test
-    public void testOperatorQuery() {
-        Object[][] expectedResults = {
-                {"John", "Smith", 40},
-                {"Jim", "White", 24},
-        };
-        String[] columnList = {PERSON_FIRST_NAME,PERSON_LAST_NAME,PERSON_AGE};
-        IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, columnList,
-                new OperatorPredicate(PERSON_LAST_NAME, OperatorPredicate.Operator.GTE, "Sm"),
-                new RowOrdering(PERSON_SSN));
-        checkExpectedResults(resultSet, columnList, expectedResults);
-    }
-    
-    @Test
-    public void testAndQuery() {
-        String[] columnList = {PERSON_FIRST_NAME,PERSON_LAST_NAME};        
-        Object[][] expectedResults = {
-                {"Lisa", "Jones"},
-                {"Susan", "Jones"},
-                {"Jose", "Garcia"},
-                {"Abigail", "Johnson"},
-                {"John", "McEnroe"}
-        };
-        IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, columnList,
-                new CompoundPredicate(CompoundPredicate.Operator.AND, false,
-                        new OperatorPredicate(PERSON_LAST_NAME, OperatorPredicate.Operator.GTE, "G"),
-                        new OperatorPredicate(PERSON_LAST_NAME, OperatorPredicate.Operator.LT, "N")
-                ),
-                new RowOrdering(PERSON_SSN));
-        checkExpectedResults(resultSet, columnList, expectedResults);
-    }
-    
-    @Test
-    public void testOrQuery() {
-        String[] columnList = {PERSON_FIRST_NAME,PERSON_LAST_NAME, PERSON_AGE};        
-        Object[][] expectedResults = {
-                {"John", "Smith", 40},
-                {"Lisa", "Jones", 27},
-                {"Abigail", "Johnson", 35},
-                {"Bjorn", "Borg", 55},
-                {"John", "McEnroe", 53}
-        };
-        IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, columnList,
-                new CompoundPredicate(CompoundPredicate.Operator.OR, false,
-                        new OperatorPredicate(PERSON_AGE, OperatorPredicate.Operator.GTE, 35),
-                        new OperatorPredicate(PERSON_FIRST_NAME, OperatorPredicate.Operator.EQ, "Lisa")
-                ),
-                new RowOrdering(PERSON_SSN));
-        checkExpectedResults(resultSet, columnList, expectedResults);
-}
-    
-    @Test
-    public void testCreateQuery() {
-        String[] columnList = {PERSON_FIRST_NAME,PERSON_LAST_NAME};
-        Object[][] expectedResults = {
-                {"Lisa", "Jones"},
-                {"Susan", "Jones"}
-        };
-        IPredicate predicate = new OperatorPredicate(PERSON_LAST_NAME, OperatorPredicate.Operator.EQ, "Jones");
-        IQuery query = storageSource.createQuery(PERSON_TABLE_NAME, columnList, predicate, new RowOrdering(PERSON_SSN));
-        IResultSet resultSet = storageSource.executeQuery(query);
-        checkExpectedResults(resultSet, columnList, expectedResults);
-    }
-    
-    @Test
-    public void testQueryParameters() {
-        String[] columnList = {PERSON_FIRST_NAME,PERSON_LAST_NAME, PERSON_AGE};        
-        Object[][] expectedResults = {
-                {"John", "Smith", 40},
-                {"Bjorn", "Borg", 55},
-                {"John", "McEnroe", 53}
-        };
-        IPredicate predicate = new OperatorPredicate(PERSON_AGE, OperatorPredicate.Operator.GTE, "?MinimumAge?");
-        IQuery query = storageSource.createQuery(PERSON_TABLE_NAME, columnList, predicate, new RowOrdering(PERSON_SSN));
-        query.setParameter("MinimumAge", 40);
-        IResultSet resultSet = storageSource.executeQuery(query);
-        checkExpectedResults(resultSet, columnList, expectedResults);
-    }
-    
-    private void checkPerson(Person person, Object[] expectedValues) {
-        assertEquals(person.getSSN(), expectedValues[0]);
-        assertEquals(person.getFirstName(), expectedValues[1]);
-        assertEquals(person.getLastName(), expectedValues[2]);
-        assertEquals(person.getAge(), expectedValues[3]);
-        assertEquals(person.isRegistered(), expectedValues[4]);
-    }
-    
-    @Test
-    public void testRowMapper() {
-        Object[][] expectedResults = {
-                PERSON_INIT_DATA[2],
-                PERSON_INIT_DATA[3]
-        };
-        IPredicate predicate = new OperatorPredicate(PERSON_LAST_NAME, OperatorPredicate.Operator.EQ, "Jones");
-        IRowMapper rowMapper = new PersonRowMapper();
-        Object[] personList = storageSource.executeQuery(PERSON_TABLE_NAME, null, predicate, new RowOrdering(PERSON_SSN), rowMapper);
-        assertEquals(personList.length, 2);
-        for (int i = 0; i < personList.length; i++)
-            checkPerson((Person)personList[i], expectedResults[i]);
-    }
-    
-    @Test
-    public void testDeleteRowsDirect() {
-        
-        storageSource.deleteRow(PERSON_TABLE_NAME, "111-11-1111");
-        storageSource.deleteRow(PERSON_TABLE_NAME, "222-22-2222");
-        storageSource.deleteRow(PERSON_TABLE_NAME, "333-33-3333");
-        storageSource.deleteRow(PERSON_TABLE_NAME, "444-44-4444");
-        
-        Object[][] expectedResults = {
-                {"555-55-5555", "Jose", "Garcia", 31, true},
-                {"666-66-6666", "Abigail", "Johnson", 35, false},
-                {"777-77-7777", "Bjorn", "Borg", 55, true},
-                {"888-88-8888", "John", "McEnroe", 53, false}
-        };
-        IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, PERSON_COLUMN_LIST, null, new RowOrdering(PERSON_SSN));
-        checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedResults);
-    }
-    
-    @Test
-    public void testDeleteRowsFromResultSet() {
-        Object[][] expectedResults = {
-                {"555-55-5555", "Jose", "Garcia", 31, true},
-                {"666-66-6666", "Abigail", "Johnson", 35, false},
-                {"777-77-7777", "Bjorn", "Borg", 55, true},
-                {"888-88-8888", "John", "McEnroe", 53, false}
-        };
-        
-        // Query once to delete the rows
-        IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, PERSON_COLUMN_LIST, null, new RowOrdering(PERSON_SSN));
-        for (int i = 0; i < 4; i++) {
-            resultSet.next();
-            resultSet.deleteRow();
-        }
-        resultSet.save();
-        resultSet.close();
-        
-        // Now query again to verify that the rows were deleted
-        resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, PERSON_COLUMN_LIST, null, new RowOrdering(PERSON_SSN));
-        checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedResults);
-    }
-    
-    @Test
-    public void testDeleteMatchingRows() {
-        Object[][] expectedResults = {
-                {"111-11-1111", "John", "Smith", 40, true},
-                {"777-77-7777", "Bjorn", "Borg", 55, true},
-                {"888-88-8888", "John", "McEnroe", 53, false}
-        };
-        storageSource.deleteMatchingRows(PERSON_TABLE_NAME, new OperatorPredicate(PERSON_AGE, OperatorPredicate.Operator.LT, 40));
-        
-        // Now query again to verify that the rows were deleted
-        IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, PERSON_COLUMN_LIST, null, new RowOrdering(PERSON_SSN));
-        checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedResults);
-        
-        storageSource.deleteMatchingRows(PERSON_TABLE_NAME, null);
-
-        // Now query again to verify that all rows were deleted
-        resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, PERSON_COLUMN_LIST, null, new RowOrdering(PERSON_SSN));
-        checkExpectedResults(resultSet, PERSON_COLUMN_LIST, new Object[0][]);
-    }
-    
-    @Test
-    public void testUpdateRowsDirect() {
-        
-        Object[][] expectedResults = {
-                {"777-77-7777", "Tennis", "Borg", 60, true},
-                {"888-88-8888", "Tennis", "McEnroe", 60, false}
-        };
-        Map<String,Object> updateValues = new HashMap<String,Object>();
-        updateValues.put(PERSON_FIRST_NAME, "Tennis");
-        updateValues.put(PERSON_AGE, 60);
-        
-        IPredicate predicate = new OperatorPredicate(PERSON_AGE, OperatorPredicate.Operator.GT, 50);
-        IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, predicate, new RowOrdering(PERSON_SSN));
-        while (resultSet.next()) {
-            String key = resultSet.getString(PERSON_SSN);
-            storageSource.updateRow(PERSON_TABLE_NAME, key, updateValues);
-        }
-        resultSet.close();
-        
-        resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, PERSON_COLUMN_LIST, predicate, new RowOrdering(PERSON_SSN));
-        checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedResults);
-    }
-    
-    @Test
-    public void testUpdateRowsFromResultSet() {
-        
-        Object[][] expectedResults = {
-                {"777-77-7777", "Tennis", "Borg", 60, true},
-                {"888-88-8888", "Tennis", "McEnroe", 60, false}
-        };
-        
-        IPredicate predicate = new OperatorPredicate(PERSON_AGE, OperatorPredicate.Operator.GT, 50);
-        IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, predicate, null);
-        while (resultSet.next()) {
-            resultSet.setString(PERSON_FIRST_NAME, "Tennis");
-            resultSet.setInt(PERSON_AGE, 60);
-        }
-        resultSet.save();
-        resultSet.close();
-        
-        resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, PERSON_COLUMN_LIST, predicate, new RowOrdering(PERSON_SSN));
-        checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedResults);
-    }
-    
-    @Test
-    public void testNullValues() {
-        
-        IPredicate predicate = new OperatorPredicate(PERSON_LAST_NAME, OperatorPredicate.Operator.EQ, "Jones");
-        IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, predicate, new RowOrdering(PERSON_SSN));
-        while (resultSet.next()) {
-            resultSet.setNull(PERSON_FIRST_NAME);
-            resultSet.setIntegerObject(PERSON_AGE, null);
-        }
-        resultSet.save();
-        resultSet.close();
-
-        resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, predicate, new RowOrdering(PERSON_SSN));
-        int count = 0;
-        while (resultSet.next()) {
-            boolean checkNull = resultSet.isNull(PERSON_FIRST_NAME);
-            assertTrue(checkNull);
-            String s = resultSet.getString(PERSON_FIRST_NAME);
-            assertEquals(s, null);
-            checkNull = resultSet.isNull(PERSON_AGE);
-            assertTrue(checkNull);
-            Integer intObj = resultSet.getIntegerObject(PERSON_AGE);
-            assertEquals(intObj, null);
-            Short shortObj = resultSet.getShortObject(PERSON_AGE);
-            assertEquals(shortObj, null);
-            boolean excThrown = false;
-            try {
-                resultSet.getInt(PERSON_AGE);
-            }
-            catch (NullValueStorageException exc) {
-                excThrown = true;
-            }
-            assertTrue(excThrown);
-            count++;
-        }
-        resultSet.close();
-        assertEquals(count, 2);
-        
-        predicate = new OperatorPredicate(PERSON_FIRST_NAME, OperatorPredicate.Operator.EQ, null);
-        resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, predicate, new RowOrdering(PERSON_SSN));
-        count = 0;
-        while (resultSet.next()) {
-            boolean checkNull = resultSet.isNull(PERSON_FIRST_NAME);
-            assertTrue(checkNull);
-            count++;
-            checkNull = resultSet.isNull(PERSON_AGE);
-            assertTrue(checkNull);
-        }
-        resultSet.close();
-        assertEquals(count, 2);
-    }
-    
-    @Test
-    public void testInsertNotification() {
-        // Set up the listener and record the expected notification
-        IStorageSourceListener mockListener = createNiceMock(IStorageSourceListener.class);
-        Set<Object> expectedKeys = new HashSet<Object>();
-        expectedKeys.add("999-99-9999");
-        mockListener.rowsModified(PERSON_TABLE_NAME, expectedKeys);
-        
-        replay(mockListener);
-
-        // Now try it for real
-        storageSource.addListener(PERSON_TABLE_NAME, mockListener);
-
-        // Create a new person, which should trigger the listener
-        Object[] newPerson = {"999-99-9999", "Serena", "Williams", 22, true};
-        insertPerson(newPerson);
-        
-        verify(mockListener);
-    }
-    
-    @Test
-    public void testUpdateNotification() {
-        // Set up the listener and record the expected notification
-        IStorageSourceListener mockListener = createNiceMock(IStorageSourceListener.class);
-        Set<Object> expectedKeys = new HashSet<Object>();
-        expectedKeys.add("111-11-1111");
-        mockListener.rowsModified(PERSON_TABLE_NAME, expectedKeys);
-        
-        replay(mockListener);
-
-        // Now try it for real
-        storageSource.addListener(PERSON_TABLE_NAME, mockListener);
-
-        // Create a new person, which should trigger the listener
-        Map<String,Object> updateValues = new HashMap<String,Object>();
-        updateValues.put(PERSON_FIRST_NAME, "Tennis");
-        storageSource.updateRow(PERSON_TABLE_NAME, "111-11-1111", updateValues);
-        
-        verify(mockListener);
-    }
-    
-    @Test
-    public void testDeleteNotification() {
-        IStorageSourceListener mockListener = createNiceMock(IStorageSourceListener.class);
-        Set<Object> expectedKeys = new HashSet<Object>();
-        expectedKeys.add("111-11-1111");
-        mockListener.rowsDeleted(PERSON_TABLE_NAME, expectedKeys);
-        
-        replay(mockListener);
-
-        // Now try it for real
-        storageSource.addListener(PERSON_TABLE_NAME, mockListener);
-
-        // Create a new person, which should trigger the listener
-        storageSource.deleteRow(PERSON_TABLE_NAME, "111-11-1111");
-        
-        verify(mockListener);
-    }
-    
-    public void waitForFuture(Future<?> future) {
-        try
-        {
-            future.get(10, TimeUnit.SECONDS);
-        }
-        catch (InterruptedException exc)
-        {
-            fail("Async storage operation interrupted");
-        }
-        catch (ExecutionException exc) {
-            fail("Async storage operation failed");
-        }
-        catch (TimeoutException exc) {
-            fail("Async storage operation timed out");
-        }
-    }
-    
-    @Test
-    public void testAsyncQuery1() {
-        Object[][] expectedResults = {
-                {"John", "Smith", 40},
-                {"Jim", "White", 24},
-        };
-        String[] columnList = {PERSON_FIRST_NAME,PERSON_LAST_NAME,PERSON_AGE};
-        IPredicate predicate = new OperatorPredicate(PERSON_LAST_NAME, OperatorPredicate.Operator.GTE, "Sm");
-        IQuery query = storageSource.createQuery(PERSON_TABLE_NAME, columnList, predicate, new RowOrdering(PERSON_SSN));
-        Future<IResultSet> future = storageSource.executeQueryAsync(query);
-        waitForFuture(future);
-        try {
-            IResultSet resultSet = future.get();
-            checkExpectedResults(resultSet, columnList, expectedResults);
-        }
-        catch (Exception e) {
-            fail("Exception thrown in async storage operation: " + e.toString());
-        }
-    }
-    
-    @Test
-    public void testAsyncQuery2() {
-        Object[][] expectedResults = {
-                {"John", "Smith", 40},
-                {"Jim", "White", 24},
-        };
-        String[] columnList = {PERSON_FIRST_NAME,PERSON_LAST_NAME,PERSON_AGE};
-        IPredicate predicate = new OperatorPredicate(PERSON_LAST_NAME, OperatorPredicate.Operator.GTE, "Sm");
-        Future<IResultSet> future = storageSource.executeQueryAsync(PERSON_TABLE_NAME,
-                columnList, predicate, new RowOrdering(PERSON_SSN));
-        waitForFuture(future);
-        try {
-            IResultSet resultSet = future.get();
-            checkExpectedResults(resultSet, columnList, expectedResults);
-        }
-        catch (Exception e) {
-            fail("Exception thrown in async storage operation: " + e.toString());
-        }
-    }
-    
-    @Test
-    public void testAsyncQuery3() {
-        Object[][] expectedResults = {
-                PERSON_INIT_DATA[2],
-                PERSON_INIT_DATA[3]
-        };
-        IPredicate predicate = new OperatorPredicate(PERSON_LAST_NAME, OperatorPredicate.Operator.EQ, "Jones");
-        IRowMapper rowMapper = new PersonRowMapper();
-        Future<Object[]> future = storageSource.executeQueryAsync(PERSON_TABLE_NAME,
-                null, predicate, new RowOrdering(PERSON_SSN), rowMapper);
-        waitForFuture(future);
-        try {
-            Object[] personList = future.get();
-            assertEquals(personList.length, 2);
-            for (int i = 0; i < personList.length; i++)
-                checkPerson((Person)personList[i], expectedResults[i]);
-        }
-        catch (Exception e) {
-            fail("Exception thrown in async storage operation: " + e.toString());
-        }
-    }
-    
-    @Test
-    public void testAsyncException() {
-        class TestExceptionHandler implements IStorageExceptionHandler {
-            public int exceptionCount = 0;
-            @Override
-            public void handleException(Exception exception) {
-                exceptionCount++;
-            }
-        }
-        TestExceptionHandler exceptionHandler = new TestExceptionHandler();
-        storageSource.setExceptionHandler(exceptionHandler);
-        
-        // Use an invalid table name, which should cause the storage API call to throw
-        // an exception, which should then be converted to an ExecutionException.
-        Future<IResultSet> future = storageSource.executeQueryAsync("InvalidTableName",
-                null, null, null);
-        try {
-            future.get(10, TimeUnit.SECONDS);
-            fail("Expected ExecutionException was not thrown");
-        }
-        catch (ExecutionException e) {
-            assertTrue(true);
-        }
-        catch (Exception e) {
-            fail("Exception thrown in async storage operation: " + e.toString());
-        }
-        assertEquals(exceptionHandler.exceptionCount, 1);
-    }
-    
-    @Test
-    public void testAsyncInsertRow() {
-        Object[][] newPersonInfo = {{"999-99-9999", "Ellen", "Wilson", 40, true}};
-        Map<String,Object> rowValues = createPersonRowValues(newPersonInfo[0]);
-        Future<?> future = storageSource.insertRowAsync(PERSON_TABLE_NAME, rowValues);
-        waitForFuture(future);
-        try {
-            IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, null, new RowOrdering(PERSON_SSN));
-            Object[][] expectedPersons = Arrays.copyOf(PERSON_INIT_DATA, PERSON_INIT_DATA.length + newPersonInfo.length);
-            System.arraycopy(newPersonInfo, 0, expectedPersons, PERSON_INIT_DATA.length, newPersonInfo.length);
-            checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedPersons);
-        }
-        catch (Exception e) {
-            fail("Exception thrown in async storage operation: " + e.toString());
-        }
-    }
-    
-    @Test
-    public void testAsyncUpdateRow() {
-        Map<String,Object> updateValues = new HashMap<String,Object>();
-        updateValues.put(PERSON_SSN, "777-77-7777");
-        updateValues.put(PERSON_FIRST_NAME, "Tennis");
-        updateValues.put(PERSON_AGE, 60);
-
-        Future<?> future = storageSource.updateRowAsync(PERSON_TABLE_NAME, updateValues);
-        waitForFuture(future);
-
-        try {
-            IResultSet resultSet = storageSource.getRow(PERSON_TABLE_NAME, "777-77-7777");
-            Object[][] expectedPersons = {{"777-77-7777", "Tennis", "Borg", 60, true}};
-            checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedPersons);
-        }
-        catch (Exception e) {
-            fail("Exception thrown in async storage operation: " + e.toString());
-        }
-    }
-    
-    @Test
-    public void testAsyncUpdateRow2() {
-        Map<String,Object> updateValues = new HashMap<String,Object>();
-        updateValues.put(PERSON_FIRST_NAME, "Tennis");
-        updateValues.put(PERSON_AGE, 60);
-
-        Future<?> future = storageSource.updateRowAsync(PERSON_TABLE_NAME, "777-77-7777", updateValues);
-        waitForFuture(future);
-
-        try {
-            IResultSet resultSet = storageSource.getRow(PERSON_TABLE_NAME, "777-77-7777");
-            Object[][] expectedPersons = {{"777-77-7777", "Tennis", "Borg", 60, true}};
-            checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedPersons);
-        }
-        catch (Exception e) {
-            fail("Exception thrown in async storage operation: " + e.toString());
-        }
-    }
-    
-    @Test
-    public void testAsyncUpdateMatchingRows() {
-        Map<String,Object> updateValues = new HashMap<String,Object>();
-        updateValues.put(PERSON_FIRST_NAME, "Tennis");
-        updateValues.put(PERSON_AGE, 60);
-
-        IPredicate predicate = new OperatorPredicate(PERSON_SSN, OperatorPredicate.Operator.EQ, "777-77-7777");
-        Future<?> future = storageSource.updateMatchingRowsAsync(PERSON_TABLE_NAME, predicate, updateValues);
-        waitForFuture(future);
-        try {
-            IResultSet resultSet = storageSource.getRow(PERSON_TABLE_NAME, "777-77-7777");
-            Object[][] expectedPersons = {{"777-77-7777", "Tennis", "Borg", 60, true}};
-            checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedPersons);
-        }
-        catch (Exception e) {
-            fail("Exception thrown in async storage operation: " + e.toString());
-        }
-    }
-    
-    @Test
-    public void testAsyncDeleteRow() {
-        Future<?> future = storageSource.deleteRowAsync(PERSON_TABLE_NAME, "111-11-1111");
-        waitForFuture(future);
-        try {
-            IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, null, new RowOrdering(PERSON_SSN));
-            Object[][] expectedPersons = Arrays.copyOfRange(PERSON_INIT_DATA, 1, PERSON_INIT_DATA.length);
-            checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedPersons);
-        }
-        catch (Exception e) {
-            fail("Exception thrown in async storage operation: " + e.toString());
-        }
-    }
-    
-    @Test
-    public void testAsyncDeleteMatchingRows() {
-        Future<?> future = storageSource.deleteMatchingRowsAsync(PERSON_TABLE_NAME, null);
-        waitForFuture(future);
-        try {
-            IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, null, new RowOrdering(PERSON_SSN));
-            checkExpectedResults(resultSet, PERSON_COLUMN_LIST, new Object[0][]);
-        }
-        catch (Exception e) {
-            fail("Exception thrown in async storage operation: " + e.toString());
-        }
-    }
-    
-    @Test
-    public void testAsyncSave() {
-        // Get a result set and make some changes to it
-        IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, null, new RowOrdering(PERSON_SSN));
-        resultSet.next();
-        resultSet.deleteRow();
-        resultSet.next();
-        resultSet.setString(PERSON_FIRST_NAME, "John");
-        
-        Future<?> future = storageSource.saveAsync(resultSet);
-        waitForFuture(future);
-        try {
-            resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, null, new RowOrdering(PERSON_SSN));
-            Object[][] expectedPersons = Arrays.copyOfRange(PERSON_INIT_DATA, 1, PERSON_INIT_DATA.length);
-            expectedPersons[0][1] = "John";
-            checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedPersons);
-        }
-        catch (Exception e) {
-            fail("Exception thrown in async storage operation: " + e.toString());
-        }
-        
-    }
-}
diff --git a/src/test/java/net/floodlightcontroller/topology/TopologyManagerTest.java b/src/test/java/net/floodlightcontroller/topology/TopologyManagerTest.java
index 06b48a2..280c336 100644
--- a/src/test/java/net/floodlightcontroller/topology/TopologyManagerTest.java
+++ b/src/test/java/net/floodlightcontroller/topology/TopologyManagerTest.java
@@ -1,12 +1,10 @@
 package net.floodlightcontroller.topology;
 
 import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.IFloodlightProviderService.Role;
 import net.floodlightcontroller.core.module.FloodlightModuleContext;
 import net.floodlightcontroller.core.test.MockThreadPoolService;
 import net.floodlightcontroller.test.FloodlightTestCase;
 import net.floodlightcontroller.threadpool.IThreadPoolService;
-import net.floodlightcontroller.topology.TopologyManager;
 import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscovery;
 
 import org.junit.Before;
@@ -129,14 +127,4 @@
         assertTrue(tm.getPortBroadcastDomainLinks().size()==0);
         assertTrue(tm.getTunnelPorts().size()==0);
     }
-
-    @Test
-    public void testHARoleChange() throws Exception {
-        testBasic2();
-        getMockFloodlightProvider().dispatchRoleChanged(null, Role.SLAVE);
-        assert(tm.switchPorts.isEmpty());
-        assert(tm.switchPortLinks.isEmpty());
-        assert(tm.portBroadcastDomainLinks.isEmpty());
-        assert(tm.tunnelLinks.isEmpty());
-    }
 }
diff --git a/src/test/java/net/onrc/onos/ofcontroller/core/INetMapTopologyObjectsIFlowEntryTest.java b/src/test/java/net/onrc/onos/ofcontroller/core/INetMapTopologyObjectsIFlowEntryTest.java
index 06d8522..f1c2c71 100644
--- a/src/test/java/net/onrc/onos/ofcontroller/core/INetMapTopologyObjectsIFlowEntryTest.java
+++ b/src/test/java/net/onrc/onos/ofcontroller/core/INetMapTopologyObjectsIFlowEntryTest.java
@@ -78,6 +78,38 @@
 		flowEntry.setFlowEntryId(flowEntryId);
 		assertEquals(flowEntry.getFlowEntryId(), flowEntryId);
 	}
+
+	/**
+	 * Desc:
+	 *  Test method for set and get Idle Timeout.
+	 * Condition:
+	 *  N/A
+	 * Expect:
+	 * 1. Should set Idle Timeout.
+	 * 2. Should get Idle Timeout.
+	 */
+	@Test
+	public void testSetGetIdleTimeout() {
+		Integer idleTimeout = 5;
+		flowEntry.setIdleTimeout(idleTimeout);
+		assertEquals(flowEntry.getIdleTimeout(), idleTimeout);
+	}
+
+	/**
+	 * Desc:
+	 *  Test method for set and get Hard Timeout.
+	 * Condition:
+	 *  N/A
+	 * Expect:
+	 * 1. Should set Hard Timeout.
+	 * 2. Should get Hard Timeout.
+	 */
+	@Test
+	public void testSetGetHardTimeout() {
+		Integer hardTimeout = 5;
+		flowEntry.setHardTimeout(hardTimeout);
+		assertEquals(flowEntry.getHardTimeout(), hardTimeout);
+	}
 	
 	/**
 	 * Desc:
diff --git a/src/test/java/net/onrc/onos/ofcontroller/core/INetMapTopologyObjectsIFlowPathTest.java b/src/test/java/net/onrc/onos/ofcontroller/core/INetMapTopologyObjectsIFlowPathTest.java
index 9a1e34a..39e4955 100644
--- a/src/test/java/net/onrc/onos/ofcontroller/core/INetMapTopologyObjectsIFlowPathTest.java
+++ b/src/test/java/net/onrc/onos/ofcontroller/core/INetMapTopologyObjectsIFlowPathTest.java
@@ -158,6 +158,42 @@
 
 	/**
 	 * Desc:
+	 *  Test method for get and set Idle Timeout method.
+	 * Condition:
+	 *  N/A
+	 * Expect:
+	 * 1. Should set the Idle Timeout.
+	 * 2. Should get the Idle Timeout.
+	 */
+	@Test
+	public void testSetGetIdleTimeout() {
+		String flowId = "xx";
+		Integer idleTimeout = 5;
+		flowPath.setFlowId(flowId);
+		flowPath.setIdleTimeout(idleTimeout);
+		assertEquals(flowPath.getIdleTimeout(), idleTimeout);
+	}
+
+	/**
+	 * Desc:
+	 *  Test method for get and set Hard Timeout method.
+	 * Condition:
+	 *  N/A
+	 * Expect:
+	 * 1. Should set the Hard Timeout.
+	 * 2. Should get the Hard Timeout.
+	 */
+	@Test
+	public void testSetGetHardTimeout() {
+		String flowId = "xx";
+		Integer hardTimeout = 5;
+		flowPath.setFlowId(flowId);
+		flowPath.setHardTimeout(hardTimeout);
+		assertEquals(flowPath.getHardTimeout(), hardTimeout);
+	}
+
+	/**
+	 * Desc:
 	 *  Test method for get and set SourceSwitch method.
 	 * Condition:
 	 *  N/A
diff --git a/src/test/java/net/onrc/onos/ofcontroller/core/internal/LinkStorageImplTest.java b/src/test/java/net/onrc/onos/ofcontroller/core/internal/LinkStorageImplTest.java
index 0081264..69c3c8a 100644
--- a/src/test/java/net/onrc/onos/ofcontroller/core/internal/LinkStorageImplTest.java
+++ b/src/test/java/net/onrc/onos/ofcontroller/core/internal/LinkStorageImplTest.java
@@ -37,6 +37,7 @@
  */
 @RunWith(PowerMockRunner.class)
 @PrepareForTest({LinkStorageImpl.class, GraphDBConnection.class, GraphDBOperation.class})
+@SuppressWarnings("serial")
 public class LinkStorageImplTest {
 	protected final static Logger log = LoggerFactory.getLogger(LinkStorageImplTest.class);
 
@@ -519,7 +520,6 @@
 	 * Create a mock {@link GraphDBOperation} which hooks port-related methods.
 	 * @return EasyMock-wrapped GraphDBOperation object.
 	 */
-	@SuppressWarnings("serial")
 	private GraphDBOperation createMockGraphDBOperation() {
 		GraphDBOperation mockDBOpe = EasyMock.createNiceMock(GraphDBOperation.class);
 		
@@ -672,7 +672,6 @@
 	 * @param dpid DPID of the switch
 	 * @return List of port number
 	 */
-	@SuppressWarnings("serial")
 	private List<Short> getPorts(long dpid) {
 		List<Short> ports;
 		
@@ -699,7 +698,6 @@
 	 * Returns list of DPIDs in test topology.
 	 * @return List of DPIDs
 	 */
-	@SuppressWarnings("serial")
 	private List<Long> getDpids() {
 		List<Long> dpids = new ArrayList<Long>() {{
 			add(Long.decode("0x0000000000000a01"));
@@ -726,12 +724,6 @@
 		return new Link(Long.decode("0x0000000000000a01"), 3, Long.decode("0x0000000000000a03"), 1);
 	}
 	
-	// make NO sense while test-network data doesn't define physical network (i.e. any link is feasible)
-	@SuppressWarnings("unused")
-	private Link createInfeasibleLink() {
-		return new Link(Long.decode("0x0000000000000a01"), 1, Long.decode("0x0000000000000a03"), 3);
-	}
-
 	/**
 	 * Returns list of existing {@link Link} objects
 	 * @return ArrayList of new Link objects
diff --git a/src/test/java/net/onrc/onos/ofcontroller/core/internal/TestableGraphDBOperation.java b/src/test/java/net/onrc/onos/ofcontroller/core/internal/TestableGraphDBOperation.java
index d7724ae..8da306f 100644
--- a/src/test/java/net/onrc/onos/ofcontroller/core/internal/TestableGraphDBOperation.java
+++ b/src/test/java/net/onrc/onos/ofcontroller/core/internal/TestableGraphDBOperation.java
@@ -326,6 +326,7 @@
 	 * tests in net.onrc.onos.ofcontroller.core.*
 	 */
 	public static class TestDeviceObject implements IDeviceObject {
+		@SuppressWarnings("unused")
 		private String state,type,mac,ipaddr;
 		private List<IPortObject> ports;
 		private List<ISwitchObject> switches;
@@ -456,6 +457,8 @@
 		private String flowPathType;
 		private String flowPathUserState;
 		private Long flowPathFlags;
+		private Integer idleTimeout;
+		private Integer hardTimeout;
 		private String dataPathSummary;
 		private Short srcPort,dstPort;
 		private String matchSrcMac,matchDstMac;
@@ -474,6 +477,8 @@
 		private String flowPathTypeToUpdate;
 		private String flowPathUserStateToUpdate;
 		private Long flowPathFlagsToUpdate;
+		private Integer idleTimeoutToUpdate;
+		private Integer hardTimeoutToUpdate;
 		private String dataPathSummaryToUpdate;
 		private Short srcPortToUpdate,dstPortToUpdate;
 		private String matchSrcMacToUpdate,matchDstMacToUpdate;
@@ -514,6 +519,8 @@
 			if(flowPathTypeToUpdate != null) { flowPathType = flowPathTypeToUpdate; }
 			if(flowPathUserStateToUpdate != null) { flowPathUserState = flowPathUserStateToUpdate; }
 			if(flowPathFlagsToUpdate != null) { flowPathFlags = flowPathFlagsToUpdate; }
+			if(idleTimeoutToUpdate != null) { idleTimeout = idleTimeoutToUpdate; }
+			if(hardTimeoutToUpdate != null) { hardTimeout = hardTimeoutToUpdate; }
 			if(srcSwToUpdate != null) { srcSw = srcSwToUpdate; }
 			if(dstSwToUpdate != null) { dstSw = dstSwToUpdate; }
 			if(dataPathSummaryToUpdate != null) { dataPathSummary = dataPathSummaryToUpdate; }
@@ -545,6 +552,8 @@
 			flowPathTypeToUpdate = null;
 			flowPathUserStateToUpdate = null;
 			flowPathFlagsToUpdate = null;
+			idleTimeoutToUpdate = null;
+			hardTimeoutToUpdate = null;
 			srcSwToUpdate = dstSwToUpdate = dataPathSummaryToUpdate = null;
 			srcPortToUpdate = dstPortToUpdate = null;
 			matchSrcMacToUpdate = matchDstMacToUpdate = null;
@@ -565,6 +574,8 @@
 		public void setFlowPathTypeForTest(String flowPathType) { this.flowPathType = flowPathType; }
 		public void setFlowPathUserStateForTest(String flowPathUserState) { this.flowPathUserState = flowPathUserState; }
 		public void setFlowPathFlagsForTest(Long flowPathFlags) { this.flowPathFlags = flowPathFlags; }
+		public void setIdleTimeoutForTest(Integer idleTimeout) { this.idleTimeout = idleTimeout; }
+		public void setHardTimeoutForTest(Integer hardTimeout) { this.hardTimeout = hardTimeout; }
 		public void setSrcSwForTest(String srcSw) { this.srcSw = srcSw; }
 		public void setDstSwForTest(String dstSw) { this.dstSw = dstSw; }
 		public void setDataPathSummaryForTest(String dataPathSummary) { this.dataPathSummary = dataPathSummary; }
@@ -634,6 +645,18 @@
 		public void setFlowPathFlags(Long flowPathFlags) { flowPathFlagsToUpdate = flowPathFlags; }
 
 		@Override
+		public Integer getIdleTimeout() { return idleTimeout; }
+
+		@Override
+		public void setIdleTimeout(Integer idleTimeout) { idleTimeoutToUpdate = idleTimeout; }
+
+		@Override
+		public Integer getHardTimeout() { return hardTimeout; }
+
+		@Override
+		public void setHardTimeout(Integer hardTimeout) { hardTimeoutToUpdate = hardTimeout; }
+
+		@Override
 		public String getSrcSwitch() { return srcSw; }
 
 		@Override
@@ -768,6 +791,8 @@
 
 	public static class TestFlowEntry implements IFlowEntry {
 		private String state,type,entryId,dpid,userState,switchState,errorStateType,errorStateCode;
+		private Integer idleTimeout;
+		private Integer hardTimeout;
 		private Short matchInPort;
 		private String matchSrcMac,matchDstMac;
 		private Short matchEtherFrameType;
@@ -785,6 +810,8 @@
 	
 		private String stateToUpdate,typeToUpdate,entryIdToUpdate,dpidToUpdate,
 			userStateToUpdate,switchStateToUpdate,errorStateTypeToUpdate,errorStateCodeToUpdate;
+		private Integer idleTimeoutToUpdate;
+		private Integer hardTimeoutToUpdate;
 		private Short matchInPortToUpdate;
 		private String matchSrcMacToUpdate,matchDstMacToUpdate;
 		private Short matchEtherFrameTypeToUpdate;
@@ -810,6 +837,8 @@
 			if(stateToUpdate != null) { state = stateToUpdate; }
 			if(typeToUpdate != null) { type = typeToUpdate; }
 			if(entryIdToUpdate != null) { entryId = entryIdToUpdate; }
+			if(idleTimeoutToUpdate != null) { idleTimeout = idleTimeoutToUpdate; }
+			if(hardTimeoutToUpdate != null) { hardTimeout = hardTimeoutToUpdate; }
 			if(dpidToUpdate != null) { dpid = dpidToUpdate; }
 			if(userStateToUpdate != null) { userState = userStateToUpdate; }
 			if(switchStateToUpdate != null) { switchState = switchStateToUpdate; }
@@ -844,6 +873,7 @@
 		
 		public void clearUncommitedData() {
 			stateToUpdate = typeToUpdate = entryIdToUpdate = dpidToUpdate = null;
+			idleTimeoutToUpdate = hardTimeoutToUpdate = null;
 			userStateToUpdate = switchStateToUpdate = errorStateTypeToUpdate = errorStateCodeToUpdate = null;
 			matchInPortToUpdate = null;
 			matchSrcMacToUpdate = matchDstMacToUpdate = null;
@@ -864,6 +894,8 @@
 		public void setStateForTest(String state) { this.state = state; }
 		public void setTypeForTest(String type) { this.type = type; }
 		public void setEntryIdForTest(String entryId) { this.entryId = entryId; }
+		public void setIdleTimeoutForTest(Integer idleTimeout) { this.idleTimeout = idleTimeout; }
+		public void setHardTimeoutForTest(Integer hardTimeout) { this.hardTimeout = hardTimeout; }
 		public void setDpidForTest(String dpid) { this.dpid = dpid; }
 		public void setUserStateForTest(String userState) { this.userState = userState; }
 		public void setSwitchStateForTest(String switchState) { this.switchState = switchState; }
@@ -911,6 +943,18 @@
 	
 		@Override
 		public void setFlowEntryId(String flowEntryId) { entryIdToUpdate = flowEntryId; }
+
+		@Override
+		public Integer getIdleTimeout() { return idleTimeout; }
+	
+		@Override
+		public void setIdleTimeout(Integer idleTimeout) { idleTimeoutToUpdate = idleTimeout; }
+
+		@Override
+		public Integer getHardTimeout() { return hardTimeout; }
+	
+		@Override
+		public void setHardTimeout(Integer hardTimeout) { hardTimeoutToUpdate = hardTimeout; }
 	
 		@Override
 		public String getSwitchDpid() { return dpid; }
diff --git a/src/test/java/net/onrc/onos/ofcontroller/devicemanager/internal/DeviceStorageImplTestBB.java b/src/test/java/net/onrc/onos/ofcontroller/devicemanager/internal/DeviceStorageImplTestBB.java
index b207b9b..c80b52a 100644
--- a/src/test/java/net/onrc/onos/ofcontroller/devicemanager/internal/DeviceStorageImplTestBB.java
+++ b/src/test/java/net/onrc/onos/ofcontroller/devicemanager/internal/DeviceStorageImplTestBB.java
@@ -2,6 +2,8 @@
 
 import static org.junit.Assert.*;
 
+import java.net.InetAddress;
+import java.net.UnknownHostException;
 import java.util.Arrays;
 import java.util.List;
 
@@ -487,9 +489,10 @@
 			IDeviceObject dev1 = ope.searchDevice(macAddr);
 			assertEquals(macAddr, dev1.getMACAddress());
 
+			int ip_int = getPackedIPv4Address(ip);
 			//XXX not updated to new interface
-		    //IDeviceObject dev = deviceImpl.getDeviceByIP(ip);
-			IDeviceObject dev = null;
+		    IDeviceObject dev = deviceImpl.getDeviceByIP(ip_int);
+			//IDeviceObject dev = null;
 			
 		    assertNotNull(dev);
 		    
@@ -658,4 +661,14 @@
 		}
 	}
 
+	int getPackedIPv4Address(String ip) throws UnknownHostException {
+		byte[] bytes = InetAddress.getByName(ip).getAddress();
+
+		int val = 0;
+		  for (int i = 0; i < bytes.length; i++) {
+		    val <<= 8;
+		    val |= bytes[i] & 0xff;
+		  }
+		  return val;
+	}
 }
diff --git a/src/test/java/net/onrc/onos/ofcontroller/flowmanager/FlowManagerTest.java b/src/test/java/net/onrc/onos/ofcontroller/flowmanager/FlowManagerTest.java
index 9876c0d..73577c1 100644
--- a/src/test/java/net/onrc/onos/ofcontroller/flowmanager/FlowManagerTest.java
+++ b/src/test/java/net/onrc/onos/ofcontroller/flowmanager/FlowManagerTest.java
@@ -26,7 +26,6 @@
 import net.onrc.onos.ofcontroller.util.*;
 
 import org.easymock.EasyMock;
-import org.easymock.IAnswer;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Ignore;
@@ -124,6 +123,7 @@
 		return flowPath;
 	}
 	
+	/*
 	private ArrayList<FlowPath> createTestFlowPaths() {
 		FlowPath flowPath1 = createTestFlowPath(1, "foo caller id", "FP_TYPE_SHORTEST_PATH", "FP_USER_ADD", 0, 1, 1, 2, 2); 
 		FlowPath flowPath2 = createTestFlowPath(2, "caller id", "FP_TYPE_SHORTEST_PATH", "FP_USER_ADD", 0, 1, 1, 2, 2); 
@@ -136,6 +136,7 @@
 		
 		return flowPaths;
 	}
+	*/
 	
 
 	// IFlowService methods
@@ -710,7 +711,7 @@
 		fm.init(context);
 		// Use reflection to test the private method
 		// Boolean result = fm.reconcileFlow(iFlowPath1, dataPath);
-		Class fmClass = FlowManager.class;
+		Class<?> fmClass = FlowManager.class;
 		Method method = fmClass.getDeclaredMethod(
 			"reconcileFlow",
 			new Class[] { IFlowPath.class, DataPath.class });
@@ -772,7 +773,7 @@
 		fm.init(context);
 		// Use reflection to test the private method
 		// Boolean result = fm.installFlowEntry(iofSwitch, iFlowPath, iFlowEntry);
-		Class fmClass = FlowManager.class;
+		Class<?> fmClass = FlowManager.class;
 		Method method = fmClass.getDeclaredMethod(
 			"installFlowEntry",
 			new Class[] { IOFSwitch.class, IFlowPath.class, IFlowEntry.class });
diff --git a/src/test/java/net/onrc/onos/ofcontroller/flowprogrammer/FlowSynchronizerTest.java b/src/test/java/net/onrc/onos/ofcontroller/flowprogrammer/FlowSynchronizerTest.java
index 5b1bbdd..68b4f1f 100644
--- a/src/test/java/net/onrc/onos/ofcontroller/flowprogrammer/FlowSynchronizerTest.java
+++ b/src/test/java/net/onrc/onos/ofcontroller/flowprogrammer/FlowSynchronizerTest.java
@@ -6,13 +6,14 @@
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
 
-import io.netty.util.concurrent.Future;
 import net.floodlightcontroller.core.IOFSwitch;
 import net.onrc.onos.graph.GraphDBOperation;
 import net.onrc.onos.ofcontroller.core.INetMapTopologyObjects.IFlowEntry;
 import net.onrc.onos.ofcontroller.core.INetMapTopologyObjects.ISwitchObject;
 import net.onrc.onos.ofcontroller.flowmanager.FlowDatabaseOperation;
+import net.onrc.onos.ofcontroller.flowprogrammer.IFlowSyncService.SyncResult;
 import net.onrc.onos.ofcontroller.util.FlowEntry;
 import net.onrc.onos.ofcontroller.util.FlowEntryId;
 
@@ -91,7 +92,7 @@
 		initMockGraph(new long[] {1});
 		
 		// synchronize
-		doSynchronization(sw,100);
+		doSynchronization(sw);
 		
 		// check if flow is not changed
 		assertEquals(0, idAdded.size());
@@ -110,7 +111,7 @@
 		initMockGraph(new long[] {1});
 		
 		// synchronize
-		doSynchronization(sw,100);
+		doSynchronization(sw);
 		
 		// check if single flow is installed
 		assertEquals(1, idAdded.size());
@@ -130,7 +131,7 @@
 		initMockGraph(new long[] {});
 		
 		// synchronize
-		doSynchronization(sw,100);
+		doSynchronization(sw);
 		
 		// check if single flow is deleted
 		assertEquals(0, idAdded.size());
@@ -151,7 +152,7 @@
 		initMockGraph(new long[] {2,3,4,5});
 		
 		// synchronize
-		doSynchronization(sw,100);
+		doSynchronization(sw);
 		
 		// check if two flows {4,5} is installed and one flow {1} is deleted
 		assertEquals(2, idAdded.size());
@@ -179,7 +180,7 @@
 		initMockGraph(dbIdList);
 
 		// synchronize
-		doSynchronization(sw, 3000);
+		doSynchronization(sw);
 		
 		// check if 1500 flows {2000-3499} is installed and 1500 flows {0,...,1499} is deleted
 		assertEquals(1500, idAdded.size());
@@ -299,15 +300,14 @@
 	 * Instantiate FlowSynchronizer and sync flows.
 	 * @param sw Target IOFSwitch object
 	 */
-	private void doSynchronization(IOFSwitch sw, long wait) {
+	private void doSynchronization(IOFSwitch sw) {
 		sync = new FlowSynchronizer();
 		sync.init(pusher);
-		sync.synchronize(sw);
-		
+		Future<SyncResult> future = sync.synchronize(sw);
 		try {
-			Thread.sleep(wait);
-		} catch (InterruptedException e) {
-			fail("Failed to sleep");
+			future.get();
+		} catch (Exception e) {
+			fail("Failed to Future#get()");
 		}
 	}
 }
diff --git a/src/test/java/net/onrc/onos/ofcontroller/linkdiscovery/internal/LinkDiscoveryManagerTest.java b/src/test/java/net/onrc/onos/ofcontroller/linkdiscovery/internal/LinkDiscoveryManagerTest.java
index c3647f2..ecb87cf 100644
--- a/src/test/java/net/onrc/onos/ofcontroller/linkdiscovery/internal/LinkDiscoveryManagerTest.java
+++ b/src/test/java/net/onrc/onos/ofcontroller/linkdiscovery/internal/LinkDiscoveryManagerTest.java
@@ -17,21 +17,17 @@
 
 package net.onrc.onos.ofcontroller.linkdiscovery.internal;
 
-import static org.easymock.EasyMock.*;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
 
-import java.util.Collections;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 
-import org.junit.Before;
-import org.junit.Test;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.IFloodlightProviderService.Role;
 import net.floodlightcontroller.core.IOFSwitch;
 import net.floodlightcontroller.core.module.FloodlightModuleContext;
 import net.floodlightcontroller.core.test.MockThreadPoolService;
@@ -39,8 +35,6 @@
 import net.floodlightcontroller.restserver.RestApiServer;
 import net.floodlightcontroller.routing.IRoutingService;
 import net.floodlightcontroller.routing.Link;
-import net.floodlightcontroller.storage.IStorageSourceService;
-import net.floodlightcontroller.storage.memory.MemoryStorageSource;
 import net.floodlightcontroller.test.FloodlightTestCase;
 import net.floodlightcontroller.threadpool.IThreadPoolService;
 import net.floodlightcontroller.topology.ITopologyService;
@@ -49,7 +43,11 @@
 import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscoveryListener;
 import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscoveryService;
 import net.onrc.onos.ofcontroller.linkdiscovery.LinkInfo;
-import net.onrc.onos.ofcontroller.linkdiscovery.internal.LinkDiscoveryManager;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  *
@@ -74,12 +72,6 @@
             isSendLLDPsCalled = false;
             isClearLinksCalled = false;
         }
-
-        @Override
-        protected void clearAllLinks() {
-            isClearLinksCalled = true;
-            super.clearAllLinks();
-        }
     }
     
     public LinkDiscoveryManager getTopology() {
@@ -106,7 +98,6 @@
         cntx.addService(IRoutingService.class, routingEngine);
         cntx.addService(ILinkDiscoveryService.class, ldm);
         cntx.addService(ITopologyService.class, ldm);
-        cntx.addService(IStorageSourceService.class, new MemoryStorageSource());
         cntx.addService(IFloodlightProviderService.class, getMockFloodlightProvider());
         restApi.init(cntx);
         tp.init(cntx);
@@ -396,37 +387,4 @@
         assertTrue(topology.portBroadcastDomainLinks.get(srcNpt).contains(lt));
         assertTrue(topology.portBroadcastDomainLinks.get(dstNpt).contains(lt));
     }
-
-    @Test
-    public void testHARoleChange() throws Exception {
-        LinkDiscoveryManager topology = getTopology();
-        IOFSwitch sw1 = createMockSwitch(1L);
-        IOFSwitch sw2 = createMockSwitch(2L);
-        replay(sw1, sw2);
-        Link lt = new Link(1L, 2, 2L, 1);
-        NodePortTuple srcNpt = new NodePortTuple(1L, 2);
-        NodePortTuple dstNpt = new NodePortTuple(2L, 1);
-        LinkInfo info = new LinkInfo(System.currentTimeMillis(),
-                                     System.currentTimeMillis(), null,
-                                     0, 0);
-        topology.addOrUpdateLink(lt, info);
-
-        // check invariants hold
-        assertNotNull(topology.switchLinks.get(lt.getSrc()));
-        assertTrue(topology.switchLinks.get(lt.getSrc()).contains(lt));
-        assertNotNull(topology.portLinks.get(srcNpt));
-        assertTrue(topology.portLinks.get(srcNpt).contains(lt));
-        assertNotNull(topology.portLinks.get(dstNpt));
-        assertTrue(topology.portLinks.get(dstNpt).contains(lt));
-        assertTrue(topology.links.containsKey(lt));
-        
-        // check that it clears from memory
-        getMockFloodlightProvider().dispatchRoleChanged(null, Role.SLAVE);
-        assertTrue(topology.switchLinks.isEmpty());
-        getMockFloodlightProvider().dispatchRoleChanged(Role.SLAVE, Role.MASTER);
-        // check that lldps were sent
-        assertTrue(ldm.isSendLLDPsCalled);
-        assertTrue(ldm.isClearLinksCalled);
-        ldm.reset();
-    }
 }
diff --git a/src/test/java/net/onrc/onos/ofcontroller/topology/TopologyManagerTest.java b/src/test/java/net/onrc/onos/ofcontroller/topology/TopologyManagerTest.java
index 0461d72..8d4ccc0 100644
--- a/src/test/java/net/onrc/onos/ofcontroller/topology/TopologyManagerTest.java
+++ b/src/test/java/net/onrc/onos/ofcontroller/topology/TopologyManagerTest.java
@@ -2,7 +2,6 @@
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
-import java.util.Map;
 
 import org.easymock.EasyMock;
 
diff --git a/src/test/java/net/onrc/onos/ofcontroller/util/FlowEntryTest.java b/src/test/java/net/onrc/onos/ofcontroller/util/FlowEntryTest.java
index fc17178..696f9e5 100644
--- a/src/test/java/net/onrc/onos/ofcontroller/util/FlowEntryTest.java
+++ b/src/test/java/net/onrc/onos/ofcontroller/util/FlowEntryTest.java
@@ -13,6 +13,8 @@
 	
 	FlowId flowId = new FlowId(0x1234);
 	FlowEntryId flowEntryId = new FlowEntryId(0x5678);
+	int idleTimeout = 5;
+	int hardTimeout = 10;
 	FlowEntryMatch match;
 	FlowEntryActions actions;
 	
@@ -50,6 +52,9 @@
 
 		flowEntryId = new FlowEntryId("0x5678");
 		entry.setFlowEntryId(flowEntryId);
+
+		entry.setIdleTimeout(5);
+		entry.setHardTimeout(10);
 		
 		dpid = new Dpid("CA:FE");
 		entry.setDpid( dpid );
@@ -188,6 +193,16 @@
 	}
 
 	@Test
+	public void testIdleTimeout(){
+		assertEquals("idleTimeout", idleTimeout, entry.idleTimeout() );
+	}
+
+	@Test
+	public void testHardTimeout(){
+		assertEquals("hardTimeout", hardTimeout, entry.hardTimeout() );
+	}
+
+	@Test
 	public void testFlowEntryMatch(){
 		assertEquals("flowEntryMatch", match, entry.flowEntryMatch() );
 	}
@@ -237,8 +252,8 @@
 	@Test
 	public void testToString(){
 		FlowEntry def = new FlowEntry();
-		assertEquals("toString", def.toString(), "[ flowEntryActions=[] flowEntryUserState=FE_USER_UNKNOWN flowEntrySwitchState=FE_SWITCH_UNKNOWN]" );
-		assertEquals("toString", entry.toString(), "[flowEntryId=0x5678 flowId=0x1234 flowEntryMatch=[inPort=1 srcMac=01:02:03:04:05:06 dstMac=06:05:04:03:02:01 ethernetFrameType=2 vlanId=3 vlanPriority=4 srcIPv4Net=127.0.0.1/32 dstIPv4Net=127.0.0.2/32 ipProto=5 ipToS=6 srcTcpUdpPort=7 dstTcpUdpPort=8] flowEntryActions=[[type=ACTION_OUTPUT action=[port=9 maxLen=0]];[type=ACTION_OUTPUT action=[port=-3 maxLen=0]];[type=ACTION_SET_VLAN_VID action=[vlanId=3]];[type=ACTION_SET_VLAN_PCP action=[vlanPriority=4]];[type=ACTION_STRIP_VLAN action=[stripVlan=true]];[type=ACTION_SET_DL_SRC action=[addr=01:02:03:04:05:06]];[type=ACTION_SET_DL_DST action=[addr=06:05:04:03:02:01]];[type=ACTION_SET_NW_SRC action=[addr=127.0.0.3]];[type=ACTION_SET_NW_DST action=[addr=127.0.0.4]];[type=ACTION_SET_NW_TOS action=[ipToS=6]];[type=ACTION_SET_TP_SRC action=[port=7]];[type=ACTION_SET_TP_DST action=[port=8]];[type=ACTION_ENQUEUE action=[port=10 queueId=11]];] dpid=00:00:00:00:00:00:ca:fe inPort=1 outPort=9 flowEntryUserState=FE_USER_ADD flowEntrySwitchState=FE_SWITCH_UPDATED flowEntryErrorState=[type=12 code=13]]" );
+		assertEquals("toString", def.toString(), "[ idleTimeout=0 hardTimeout=0 flowEntryActions=[] flowEntryUserState=FE_USER_UNKNOWN flowEntrySwitchState=FE_SWITCH_UNKNOWN]" );
+		assertEquals("toString", entry.toString(), "[flowEntryId=0x5678 flowId=0x1234 idleTimeout=5 hardTimeout=10 flowEntryMatch=[inPort=1 srcMac=01:02:03:04:05:06 dstMac=06:05:04:03:02:01 ethernetFrameType=2 vlanId=3 vlanPriority=4 srcIPv4Net=127.0.0.1/32 dstIPv4Net=127.0.0.2/32 ipProto=5 ipToS=6 srcTcpUdpPort=7 dstTcpUdpPort=8] flowEntryActions=[[type=ACTION_OUTPUT action=[port=9 maxLen=0]];[type=ACTION_OUTPUT action=[port=-3 maxLen=0]];[type=ACTION_SET_VLAN_VID action=[vlanId=3]];[type=ACTION_SET_VLAN_PCP action=[vlanPriority=4]];[type=ACTION_STRIP_VLAN action=[stripVlan=true]];[type=ACTION_SET_DL_SRC action=[addr=01:02:03:04:05:06]];[type=ACTION_SET_DL_DST action=[addr=06:05:04:03:02:01]];[type=ACTION_SET_NW_SRC action=[addr=127.0.0.3]];[type=ACTION_SET_NW_DST action=[addr=127.0.0.4]];[type=ACTION_SET_NW_TOS action=[ipToS=6]];[type=ACTION_SET_TP_SRC action=[port=7]];[type=ACTION_SET_TP_DST action=[port=8]];[type=ACTION_ENQUEUE action=[port=10 queueId=11]];] dpid=00:00:00:00:00:00:ca:fe inPort=1 outPort=9 flowEntryUserState=FE_USER_ADD flowEntrySwitchState=FE_SWITCH_UPDATED flowEntryErrorState=[type=12 code=13]]" );
 	}
 
 }
diff --git a/src/test/java/net/onrc/onos/ofcontroller/util/FlowPathTest.java b/src/test/java/net/onrc/onos/ofcontroller/util/FlowPathTest.java
index bd42ac8..76ccf9f 100644
--- a/src/test/java/net/onrc/onos/ofcontroller/util/FlowPathTest.java
+++ b/src/test/java/net/onrc/onos/ofcontroller/util/FlowPathTest.java
@@ -19,6 +19,8 @@
 		iFlowPath.setFlowPathTypeForTest("FP_TYPE_SHORTEST_PATH");
 		iFlowPath.setFlowPathUserStateForTest("FP_USER_ADD");
 		iFlowPath.setFlowPathFlagsForTest(0L);
+		iFlowPath.setIdleTimeoutForTest(5);
+		iFlowPath.setHardTimeoutForTest(10);
 		iFlowPath.setSrcSwForTest("CA:FE");
 		iFlowPath.setSrcPortForTest((short)1);
 		iFlowPath.setDstSwForTest("BA:BE");
@@ -44,6 +46,8 @@
 		assertTrue ( flowPath.flowPathUserState() == FlowPathUserState.FP_USER_UNKNOWN);
 		assertFalse( flowPath.flowPathFlags().isDiscardFirstHopEntry() );
 		assertFalse( flowPath.flowPathFlags().isKeepOnlyFirstHopEntry() );
+		assertTrue (flowPath.idleTimeout() == 0);
+		assertTrue (flowPath.hardTimeout() == 0);
 		assertTrue( flowPath.flowEntryActions().isEmpty() );
 	}
 
@@ -55,6 +59,8 @@
 		iFlowPath.setFlowPathTypeForTest("FP_TYPE_SHORTEST_PATH");
 		iFlowPath.setFlowPathUserStateForTest("FP_USER_ADD");
 		iFlowPath.setFlowPathFlagsForTest(0L);
+		iFlowPath.setIdleTimeoutForTest(5);
+		iFlowPath.setHardTimeoutForTest(10);
 		iFlowPath.setSrcSwForTest("CA:FE");
 		iFlowPath.setSrcPortForTest((short)1);
 		iFlowPath.setDstSwForTest("BA:BE");
@@ -100,6 +106,8 @@
 		assertEquals(flowPath.flowPathType(), FlowPathType.FP_TYPE_SHORTEST_PATH);
 		assertEquals(flowPath.flowPathUserState(), FlowPathUserState.FP_USER_ADD);
 		assertEquals(flowPath.flowPathFlags().flags(), 0);
+		assertEquals(flowPath.idleTimeout(), 5);
+		assertEquals(flowPath.hardTimeout(), 10);
 		assertEquals(flowPath.dataPath().srcPort().dpid().value(), 0xCAFE);
 		assertEquals(flowPath.dataPath().srcPort().port().value(), 1);
 		assertEquals(flowPath.dataPath().dstPort().dpid().value(), 0xBABE);
@@ -123,6 +131,8 @@
 		
 		assertEquals(0x14, flowPath.dataPath().flowEntries().get(0).flowEntryId().value() );
 		assertEquals(0xBEEF, flowPath.dataPath().flowEntries().get(0).dpid().value() );
+		assertEquals(0, flowPath.dataPath().flowEntries().get(0).idleTimeout() );
+		assertEquals(0, flowPath.dataPath().flowEntries().get(0).hardTimeout() );
 		assertEquals(15, flowPath.dataPath().flowEntries().get(0).flowEntryMatch().inPort().value() );
 		assertEquals("11:22:33:44:55:66", flowPath.dataPath().flowEntries().get(0).flowEntryMatch().srcMac().toString());
 		assertEquals("66:55:44:33:22:11", flowPath.dataPath().flowEntries().get(0).flowEntryMatch().dstMac().toString());
@@ -179,6 +189,22 @@
 	}
 
 	@Test
+	public void testSetIdleTimeout(){
+		FlowPath flowPath = new FlowPath();
+		int idleTimeout = 15;
+		flowPath.setIdleTimeout( idleTimeout );
+		assertTrue( flowPath.idleTimeout() == 15 );
+	}
+
+	@Test
+	public void testSetHardTimeout(){
+		FlowPath flowPath = new FlowPath();
+		int hardTimeout = 20;
+		flowPath.setHardTimeout( hardTimeout );
+		assertTrue( flowPath.hardTimeout() == 20 );
+	}
+
+	@Test
 	public void testSetDataPath(){
 		FlowPath flowPath = new FlowPath();
 		DataPath dataPath = new DataPath();
@@ -189,7 +215,7 @@
 	@Test
 	public void testToString(){
 
-		assertEquals("[flowId=0x1234 installerId=installerId flowPathType=FP_TYPE_SHORTEST_PATH flowPathUserState=FP_USER_ADD flowPathFlags=[flags=] dataPath=[src=00:00:00:00:00:00:ca:fe/1 flowEntry=[flowEntryId=0x14 flowEntryMatch=[] flowEntryActions=[[type=ACTION_OUTPUT action=[port=23 maxLen=24]];[type=ACTION_OUTPUT action=[port=25 maxLen=26]];] dpid=00:00:00:00:00:00:be:ef flowEntryUserState=FE_USER_MODIFY flowEntrySwitchState=FE_SWITCH_UPDATE_IN_PROGRESS] dst=00:00:00:00:00:00:ba:be/2] flowEntryMatch=[] flowEntryActions=[[type=ACTION_OUTPUT action=[port=10 maxLen=11]];[type=ACTION_OUTPUT action=[port=12 maxLen=13]];]]", flowPath.toString());
+		assertEquals("[flowId=0x1234 installerId=installerId flowPathType=FP_TYPE_SHORTEST_PATH flowPathUserState=FP_USER_ADD flowPathFlags=[flags=] idleTimeout=5 hardTimeout=10 dataPath=[src=00:00:00:00:00:00:ca:fe/1 flowEntry=[flowEntryId=0x14 idleTimeout=0 hardTimeout=0 flowEntryMatch=[] flowEntryActions=[[type=ACTION_OUTPUT action=[port=23 maxLen=24]];[type=ACTION_OUTPUT action=[port=25 maxLen=26]];] dpid=00:00:00:00:00:00:be:ef flowEntryUserState=FE_USER_MODIFY flowEntrySwitchState=FE_SWITCH_UPDATE_IN_PROGRESS] dst=00:00:00:00:00:00:ba:be/2] flowEntryMatch=[] flowEntryActions=[[type=ACTION_OUTPUT action=[port=10 maxLen=11]];[type=ACTION_OUTPUT action=[port=12 maxLen=13]];]]", flowPath.toString());
 	}
 
 	@Test
diff --git a/start-onos-embedded.sh b/start-onos-embedded.sh
index 8688f69..6fc9362 100755
--- a/start-onos-embedded.sh
+++ b/start-onos-embedded.sh
@@ -17,7 +17,7 @@
 #JVM_OPTS="$JVM_OPTS -XX:+UseParallelGC -XX:+AggressiveOpts -XX:+UseFastAccessorMethods"
 JVM_OPTS="$JVM_OPTS -XX:+UseConcMarkSweepGC -XX:+UseAdaptiveSizePolicy -XX:+AggressiveOpts -XX:+UseFastAccessorMethods"
 JVM_OPTS="$JVM_OPTS -XX:MaxInlineSize=8192 -XX:FreqInlineSize=8192"
-JVM_OPTS="$JVM_OPTS -javaagent:lib/jamm-0.2.5.jar"
+JVM_OPTS="$JVM_OPTS -javaagent:$ONOS_HOME/lib/jamm-0.2.5.jar"
 JVM_OPTS="$JVM_OPTS -XX:CompileThreshold=1500 -XX:PreBlockSpin=8 \
 		-XX:+UseThreadPriorities \
 		-XX:ThreadPriorityPolicy=42 \
@@ -90,7 +90,7 @@
 </configuration>
 EOF_LOGBACK
 
-  # Run floodlight
+  # Run ONOS
   echo "Starting ONOS controller ..."
   echo 
 
diff --git a/start-onos-jacoco.sh b/start-onos-jacoco.sh
new file mode 100755
index 0000000..2e04216
--- /dev/null
+++ b/start-onos-jacoco.sh
@@ -0,0 +1,184 @@
+#!/bin/bash
+
+# Set paths
+if [ -z "${ONOS_HOME}" ]; then
+        ONOS_HOME=`dirname $0`
+fi
+
+## Because the script change dir to $ONOS_HOME, we can set ONOS_LOGBACK and LOGDIR relative to $ONOS_HOME
+#ONOS_LOGBACK="${ONOS_HOME}/logback.`hostname`.xml"
+#LOGDIR=${ONOS_HOME}/onos-logs
+ONOS_LOGBACK="./logback.`hostname`.xml"
+LOGDIR=./onos-logs
+ONOS_LOG="${LOGDIR}/onos.`hostname`.log"
+PCAP_LOG="${LOGDIR}/onos.`hostname`.pcap"
+LOGS="$ONOS_LOG $PCAP_LOG"
+
+# Set JVM options
+JVM_OPTS=""
+## If you want JaCoCo Code Coverage reports... uncomment line below
+JVM_OPTS="$JVM_OPTS -javaagent:${ONOS_HOME}/lib/jacocoagent.jar=dumponexit=true,output=file,destfile=${LOGDIR}/jacoco.exec"
+JVM_OPTS="$JVM_OPTS -server -d64"
+#JVM_OPTS="$JVM_OPTS -Xmx2g -Xms2g -Xmn800m"
+JVM_OPTS="$JVM_OPTS -Xmx1g -Xms1g -Xmn800m"
+#JVM_OPTS="$JVM_OPTS -XX:+UseParallelGC -XX:+AggressiveOpts -XX:+UseFastAccessorMethods"
+JVM_OPTS="$JVM_OPTS -XX:+UseConcMarkSweepGC -XX:+UseAdaptiveSizePolicy -XX:+AggressiveOpts -XX:+UseFastAccessorMethods"
+JVM_OPTS="$JVM_OPTS -XX:MaxInlineSize=8192 -XX:FreqInlineSize=8192"
+JVM_OPTS="$JVM_OPTS -XX:CompileThreshold=1500 -XX:PreBlockSpin=8"
+JVM_OPTS="$JVM_OPTS -XX:OnError=crash-logger" ;# For dumping core
+#JVM_OPTS="$JVM_OPTS -Dpython.security.respectJavaAccessibility=false"
+JVM_OPTS="$JVM_OPTS -XX:CompileThreshold=1500 -XX:PreBlockSpin=8 \
+		-XX:+UseThreadPriorities \
+		-XX:ThreadPriorityPolicy=42 \
+		-XX:+UseCompressedOops \
+		-Dcom.sun.management.jmxremote.port=7189 \
+		-Dcom.sun.management.jmxremote.ssl=false \
+		-Dcom.sun.management.jmxremote.authenticate=false"
+JVM_OPTS="$JVM_OPTS -Dhazelcast.logging.type=slf4j"
+
+# Set ONOS core main class
+MAIN_CLASS="net.onrc.onos.ofcontroller.core.Main"
+
+if [ -z "${MVN}" ]; then
+    MVN="mvn -o"
+fi
+
+#<logger name="net.floodlightcontroller.linkdiscovery.internal" level="TRACE"/>
+#<appender-ref ref="STDOUT" />
+
+function lotate {
+    logfile=$1
+    nr_max=${2:-10}
+    if [ -f $logfile ]; then
+	for i in `seq $(expr $nr_max - 1) -1 1`; do
+	    if [ -f ${logfile}.${i} ]; then
+		mv -f ${logfile}.${i} ${logfile}.`expr $i + 1`
+	    fi
+	done
+	mv $logfile $logfile.1
+    fi
+}
+
+function start {
+  if [ ! -d ${LOGDIR} ]; then
+    mkdir -p ${LOGDIR}
+  fi
+  # Backup log files
+  for log in ${LOGS}; do
+    echo "rotate log: $log"
+    if [ -f ${log} ]; then
+      lotate ${log}
+    fi
+  done
+
+# Create a logback file if required
+  if [ ! -f ${ONOS_LOGBACK} ]; then
+    cat <<EOF_LOGBACK >${ONOS_LOGBACK}
+<configuration scan="true" debug="true">
+<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+<encoder>
+<pattern>%level [%logger:%thread] %msg%n</pattern>
+</encoder>
+</appender>
+
+<appender name="FILE" class="ch.qos.logback.core.FileAppender">
+<file>${ONOS_LOG}</file>
+<encoder>
+<pattern>%date %level [%thread] %logger{10} [%file:%line] %msg%n</pattern>
+</encoder>
+</appender>
+
+<logger name="org" level="WARN"/>
+<logger name="LogService" level="WARN"/> <!-- Restlet access logging -->
+<logger name="net.floodlightcontroller.logging" level="WARN"/>
+
+<root level="DEBUG">
+<appender-ref ref="FILE" />
+</root>
+</configuration>
+EOF_LOGBACK
+  fi
+
+  # Run ONOS
+  echo "Starting ONOS controller ..."
+  echo 
+
+  # XXX : MVN has to run at the project top dir 
+  echo $ONOS_HOME
+  cd ${ONOS_HOME}
+  pwd 
+  echo "${MVN} exec:exec -Dexec.executable=\"java\" -Dexec.args=\"${JVM_OPTS} -Dlogback.configurationFile=${ONOS_LOGBACK} -cp %classpath ${MAIN_CLASS} -cf ./conf/onos.properties\""
+
+  ${MVN} exec:exec -Dexec.executable="java" -Dexec.args="${JVM_OPTS} -Dlogback.configurationFile=${ONOS_LOGBACK} -cp %classpath ${MAIN_CLASS} -cf ./conf/onos.properties" > ${LOGDIR}/onos.`hostname`.stdout 2>${LOGDIR}/onos.`hostname`.stderr &
+
+  echo "Waiting for ONOS to start..."
+  COUNT=0
+  ESTATE=0
+  while [ "$COUNT" != "10" ]; do
+    echo -n "."
+    sleep 1
+#    COUNT=$((COUNT + 1))
+#    sleep $COUNT
+    n=`jps -l |grep "${MAIN_CLASS}" | wc -l`
+    if [ "$n" -ge "1" ]; then
+      echo ""
+      exit 0
+    fi
+  done
+  echo "Timed out"
+  exit 1
+
+#  echo "java ${JVM_OPTS} -Dlogback.configurationFile=${ONOS_LOGBACK} -jar ${ONOS_JAR} -cf ./onos.properties > /dev/null 2>&1 &"
+#  sudo -b /usr/sbin/tcpdump -n -i eth0 -s0 -w ${PCAP_LOG} 'tcp port 6633' > /dev/null  2>&1
+}
+
+function stop {
+  # Kill the existing processes
+  flpid=`jps -l |grep ${MAIN_CLASS} | awk '{print $1}'`
+  tdpid=`ps -edalf |grep tcpdump |grep ${PCAP_LOG} | awk '{print $4}'`
+  pids="$flpid $tdpid"
+  for p in ${pids}; do
+    if [ x$p != "x" ]; then
+      kill -TERM $p
+      echo "Killed existing process (pid: $p)"
+    fi
+  done
+}
+
+function check_db {
+   if [ -d "/tmp/cassandra.titan" ]; then
+      echo "Cassandra is running on local berkely db. Exitting"
+      exit
+   fi
+   n=`ps -edalf |grep java |grep apache-cassandra | wc -l`
+   if [ x$n == "x0" ]; then
+      echo "Cassandra is not running. Exitting"
+      exit
+   fi
+}
+
+case "$1" in
+  start)
+    stop
+    check_db
+    start 
+    ;;
+  startifdown)
+    n=`jps -l |grep "${MAIN_CLASS}" | wc -l`
+    if [ $n == 0 ]; then
+      start
+    else 
+      echo "$n instance of onos running"
+    fi
+    ;;
+  stop)
+    stop
+    ;;
+  status)
+    n=`jps -l |grep "${MAIN_CLASS}" | wc -l`
+    echo "$n instance of onos running"
+    ;;
+  *)
+    echo "Usage: $0 {start|stop|restart|status|startifdown}"
+    exit 1
+esac
diff --git a/start-onos.sh b/start-onos.sh
index 59913d8..1f67368 100755
--- a/start-onos.sh
+++ b/start-onos.sh
@@ -17,7 +17,7 @@
 # Set JVM options
 JVM_OPTS=""
 ## If you want JaCoCo Code Coverage reports... uncomment line below
-JVM_OPTS="$JVM_OPTS -javaagent:${ONOS_HOME}/lib/jacocoagent.jar=dumponexit=true,output=file,destfile=${LOGDIR}/jacoco.exec"
+#JVM_OPTS="$JVM_OPTS -javaagent:${ONOS_HOME}/lib/jacocoagent.jar=dumponexit=true,output=file,destfile=${LOGDIR}/jacoco.exec"
 JVM_OPTS="$JVM_OPTS -server -d64"
 #JVM_OPTS="$JVM_OPTS -Xmx2g -Xms2g -Xmn800m"
 JVM_OPTS="$JVM_OPTS -Xmx1g -Xms1g -Xmn800m"
@@ -99,7 +99,7 @@
 EOF_LOGBACK
   fi
 
-  # Run floodlight
+  # Run ONOS
   echo "Starting ONOS controller ..."
   echo 
 
diff --git a/web/add_flow.py b/web/add_flow.py
index c621c30..9690024 100755
--- a/web/add_flow.py
+++ b/web/add_flow.py
@@ -129,9 +129,12 @@
   my_dst_port = my_args[5]
 
   #
-  # Extract the "flowPathFlags", "match" and "action" arguments
+  # Extract the "flowPathFlags", "idleTimeout", "hardTimeout",
+  # "match" and "action" arguments.
   #
   flowPathFlags = 0L
+  idleTimeout = 0
+  hardTimeout = 0
   match = {}
   matchInPortEnabled = True		# NOTE: Enabled by default
   actions = []
@@ -155,6 +158,10 @@
 	flowPathFlags = flowPathFlags + 0x1
       if "KEEP_ONLY_FIRST_HOP_ENTRY" in arg2:
 	flowPathFlags = flowPathFlags + 0x2
+    elif arg1 == "idleTimeout":
+     idleTimeout = arg2
+    elif arg1 == "hardTimeout":
+     hardTimeout = arg2
     elif arg1 == "matchInPort":
       # Just mark whether inPort matching is enabled
       matchInPortEnabled = arg2 in ['True', 'true']
@@ -310,6 +317,8 @@
     'my_dst_dpid' : my_dst_dpid,
     'my_dst_port' : my_dst_port,
     'flowPathFlags' : flowPathFlags,
+    'idleTimeout' : idleTimeout,
+    'hardTimeout' : hardTimeout,
     'match' : match,
     'matchInPortEnabled' : matchInPortEnabled,
     'actions' : actions,
@@ -334,6 +343,8 @@
   my_flow_id = parsed_args['my_flow_id']
   my_installer_id = parsed_args['my_installer_id']
   myFlowPathFlags = parsed_args['flowPathFlags']
+  myIdleTimeout = parsed_args['idleTimeout']
+  myHardTimeout = parsed_args['hardTimeout']
   match = parsed_args['match']
   matchInPortEnabled = parsed_args['matchInPortEnabled']
   actions = parsed_args['actions']
@@ -356,6 +367,8 @@
   flow_path['flowPathType'] = 'FP_TYPE_EXPLICIT_PATH'
   flow_path['flowPathUserState'] = 'FP_USER_ADD'
   flow_path['flowPathFlags'] = flowPathFlags
+  flow_path['idleTimeout'] = myIdleTimeout
+  flow_path['hardTimeout'] = myHardTimeout
 
   if (len(match) > 0):
     flow_path['flowEntryMatch'] = copy.deepcopy(match)
@@ -506,6 +519,10 @@
   usage_msg = usage_msg + "            DISCARD_FIRST_HOP_ENTRY    : Discard the first-hop flow entry\n"
   usage_msg = usage_msg + "            KEEP_ONLY_FIRST_HOP_ENTRY  : Keep only the first-hop flow entry\n"
   usage_msg = usage_msg + "\n"
+  usage_msg = usage_msg + "    Timeouts (in seconds in the [0, 65535] interval):\n"
+  usage_msg = usage_msg + "        idleTimeout <idleTimeoutInSeconds> (default to 0: no timeout)\n"
+  usage_msg = usage_msg + "        hardTimeout <hardTimeoutInSeconds> (default to 0: no timeout)\n"
+  usage_msg = usage_msg + "\n"
   usage_msg = usage_msg + "    Match Conditions:\n"
   usage_msg = usage_msg + "        matchInPort <True|False> (default to True)\n"
   usage_msg = usage_msg + "        matchSrcMac <source MAC address>\n"
@@ -516,7 +533,7 @@
   usage_msg = usage_msg + "        matchSrcIPv4Net <source IPv4 network address>\n"
   usage_msg = usage_msg + "        matchDstIPv4Net <destination IPv4 network address>\n"
   usage_msg = usage_msg + "        matchIpProto <IP protocol>\n"
-  usage_msg = usage_msg + "        matchIpToS <IP ToS (DSCP field, 6 bits)>\n"
+  usage_msg = usage_msg + "        matchIpToS <IP ToS> (DSCP field, 6 bits)\n"
   usage_msg = usage_msg + "        matchSrcTcpUdpPort <source TCP/UDP port>\n"
   usage_msg = usage_msg + "        matchDstTcpUdpPort <destination TCP/UDP port>\n"
   usage_msg = usage_msg + "\n"
@@ -529,7 +546,7 @@
   usage_msg = usage_msg + "        actionSetEthernetDstAddr <destination MAC address>\n"
   usage_msg = usage_msg + "        actionSetIPv4SrcAddr <source IPv4 address>\n"
   usage_msg = usage_msg + "        actionSetIPv4DstAddr <destination IPv4 address>\n"
-  usage_msg = usage_msg + "        actionSetIpToS <IP ToS (DSCP field, 6 bits)>\n"
+  usage_msg = usage_msg + "        actionSetIpToS <IP ToS> (DSCP field, 6 bits)\n"
   usage_msg = usage_msg + "        actionSetTcpUdpSrcPort <source TCP/UDP port>\n"
   usage_msg = usage_msg + "        actionSetTcpUdpDstPort <destination TCP/UDP port>\n"
   usage_msg = usage_msg + "    Actions (not implemented yet):\n"
diff --git a/web/get_flow.py b/web/get_flow.py
index 72fbd4a..94b9a61 100755
--- a/web/get_flow.py
+++ b/web/get_flow.py
@@ -164,6 +164,8 @@
   flowPathType = parsedResult['flowPathType']
   flowPathUserState = parsedResult['flowPathUserState']
   flowPathFlags = parsedResult['flowPathFlags']['flags']
+  idleTimeout = parsedResult['idleTimeout']
+  hardTimeout = parsedResult['hardTimeout']
   srcSwitch = parsedResult['dataPath']['srcPort']['dpid']['value']
   srcPort = parsedResult['dataPath']['srcPort']['port']['value']
   dstSwitch = parsedResult['dataPath']['dstPort']['dpid']['value']
@@ -181,7 +183,7 @@
       flowPathFlagsStr += ","
     flowPathFlagsStr += "KEEP_ONLY_FIRST_HOP_ENTRY"
 
-  print "FlowPath: (flowId = %s installerId = %s flowPathType = %s flowPathUserState = %s flowPathFlags = 0x%x(%s) src = %s/%s dst = %s/%s)" % (flowId, installerId, flowPathType, flowPathUserState, flowPathFlags, flowPathFlagsStr, srcSwitch, srcPort, dstSwitch, dstPort)
+  print "FlowPath: (flowId = %s installerId = %s flowPathType = %s flowPathUserState = %s flowPathFlags = 0x%x(%s) src = %s/%s dst = %s/%s idleTimeout = %s hardTimeout = %s)" % (flowId, installerId, flowPathType, flowPathUserState, flowPathFlags, flowPathFlagsStr, srcSwitch, srcPort, dstSwitch, dstPort, idleTimeout, hardTimeout)
 
   #
   # Print the common match conditions
@@ -205,13 +207,15 @@
   #
   for f in parsedResult['dataPath']['flowEntries']:
     flowEntryId = f['flowEntryId']
+    idleTimeout = f['idleTimeout']
+    hardTimeout = f['hardTimeout']
     dpid = f['dpid']['value']
     userState = f['flowEntryUserState']
     switchState = f['flowEntrySwitchState']
     match = f['flowEntryMatch'];
     actions = f['flowEntryActions']['actions']
 
-    print "  FlowEntry: (%s, %s, %s, %s)" % (flowEntryId, dpid, userState, switchState)
+    print "  FlowEntry: (%s, %s, %s, %s, idleTimeout = %s, hardTimeout = %s)" % (flowEntryId, dpid, userState, switchState, idleTimeout, hardTimeout)
 
     #
     # Print the match conditions