Merge pull request #488 from n-shiota/debug
Removed one SuppressWarning annotation.
diff --git a/.gitignore b/.gitignore
index dd8d359..7e971f7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,6 +3,7 @@
.classpath
.project
.pydevproject
+.settings
target
onos-logs
onos.log
diff --git a/LICENSE.txt b/LICENSE.txt
new file mode 100644
index 0000000..e06d208
--- /dev/null
+++ b/LICENSE.txt
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/NOTICE.txt b/NOTICE.txt
new file mode 100644
index 0000000..5be37b5
--- /dev/null
+++ b/NOTICE.txt
@@ -0,0 +1,12 @@
+INDEMNITY AND DISCLAIMER OF WARRANTIES
+
+SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
+BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+OR 27973/00100/SF/5339045.1 BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/PARTNERS.txt b/PARTNERS.txt
new file mode 100644
index 0000000..df868d6
--- /dev/null
+++ b/PARTNERS.txt
@@ -0,0 +1 @@
+Add special notes for partnership relationship.
diff --git a/conf/hazelcast.xml b/conf/hazelcast.xml
index 11bef59..84c7354 100644
--- a/conf/hazelcast.xml
+++ b/conf/hazelcast.xml
@@ -101,5 +101,6 @@
<properties>
<property name="hazelcast.logging.type">slf4j</property>
+ <property name="hazelcast.version.check.enabled">false</property>
</properties>
</hazelcast>
diff --git a/conf/onos-embedded.properties b/conf/onos-embedded.properties
index 8ec84f4..8ae143e 100644
--- a/conf/onos-embedded.properties
+++ b/conf/onos-embedded.properties
@@ -1,9 +1,7 @@
-floodlight.modules = net.floodlightcontroller.storage.memory.MemoryStorageSource,\
-net.floodlightcontroller.core.FloodlightProvider,\
+floodlight.modules = net.floodlightcontroller.core.FloodlightProvider,\
net.floodlightcontroller.threadpool.ThreadPool,\
net.onrc.onos.ofcontroller.floodlightlistener.NetworkGraphPublisher, \
net.floodlightcontroller.devicemanager.internal.DeviceManagerImpl,\
-net.floodlightcontroller.counter.CounterStore,\
net.floodlightcontroller.ui.web.StaticWebRoutable,\
net.onrc.onos.datagrid.HazelcastDatagrid,\
net.onrc.onos.ofcontroller.flowmanager.FlowManager,\
diff --git a/conf/onos.properties b/conf/onos.properties
index 174df20..b748b40 100644
--- a/conf/onos.properties
+++ b/conf/onos.properties
@@ -1,9 +1,7 @@
-floodlight.modules = net.floodlightcontroller.storage.memory.MemoryStorageSource,\
-net.floodlightcontroller.core.FloodlightProvider,\
+floodlight.modules = net.floodlightcontroller.core.FloodlightProvider,\
net.floodlightcontroller.threadpool.ThreadPool,\
net.onrc.onos.ofcontroller.floodlightlistener.NetworkGraphPublisher, \
net.floodlightcontroller.devicemanager.internal.DeviceManagerImpl,\
-net.floodlightcontroller.counter.CounterStore,\
net.floodlightcontroller.ui.web.StaticWebRoutable,\
net.onrc.onos.datagrid.HazelcastDatagrid,\
net.onrc.onos.ofcontroller.flowmanager.FlowManager,\
diff --git a/perf-scripts/flow-sync-perf.py b/perf-scripts/flow-sync-perf.py
index d552404..f0af050 100755
--- a/perf-scripts/flow-sync-perf.py
+++ b/perf-scripts/flow-sync-perf.py
@@ -31,6 +31,8 @@
import pexpect
ONOS_HOME = '..'
+ONOS_LOG = '%s/onos-logs/onos.%s.log' % ( ONOS_HOME, check_output( 'hostname').strip() )
+print "ONOS Log File:", ONOS_LOG
# Verify that tcpkill is installed
if not Popen( 'which tcpkill', stdout=PIPE, shell=True).communicate():
@@ -42,7 +44,7 @@
print "Doing nothing with %d flows..." % n
def addFakeFlows(n):
- print "Adding %d random flows..." % n
+ print "Adding %d random flows to switch..." % n
for i in range( 1, (n+1) ):
a = i / (256*256) % 256
b = i / 256 % 256
@@ -56,19 +58,20 @@
# ----------------- Utility Functions -------------------------
-def disconnect():
- tail = Popen( "exec tail -0f ../onos-logs/onos.onosdev1.log", stdout=PIPE, shell=True )
- tcp = Popen( 'exec tcpkill -i lo -9 port 6633 > /dev/null 2>&1', shell=True )
- tcp = Popen( 'exec tcpkill -i lo -9 port 6633 > /tmp/tcp 2>&1', shell=True )
- sleep(1)
- tcp.kill()
- results = waitForResult(tail)
- tail.kill()
- return results
+def wait(time, msg=None):
+ if msg:
+ print msg,
+ for i in range(time):
+ sys.stdout.write('.')
+ sys.stdout.flush()
+ sleep(1)
+ print ". done"
def startNet(net):
- tail = pexpect.spawn( 'tail -0f %s/onos-logs/onos.onosdev1.log' % ONOS_HOME )
+ tail = pexpect.spawn( 'tail -0f %s' % ONOS_LOG )
+ sleep(1)
net.start()
+ print "Waiting for ONOS to detech the switch..."
index = tail.expect(['Sync time \(ms\)', pexpect.EOF, pexpect.TIMEOUT])
if index >= 1:
print '* ONOS not started'
@@ -80,43 +83,71 @@
return check_output( 'ovs-ofctl dump-flows s1', shell=True )
def addFlowsToONOS(n):
+ print "Adding %d flows to ONOS" % n,
call( './generate_flows.py 1 %d > /tmp/flows.txt' % n, shell=True )
- call( '%s/web/add_flow.py -m onos -f /tmp/flows.txt' % ONOS_HOME, shell=True )
+ #call( '%s/web/add_flow.py -m onos -f /tmp/flows.txt' % ONOS_HOME, shell=True )
+ p = Popen( '%s/web/add_flow.py -m onos -f /tmp/flows.txt' % ONOS_HOME, shell=True )
+ while p.poll() is None:
+ sys.stdout.write('.')
+ sys.stdout.flush()
+ sleep(1)
+ print ". done\nWaiting for flow entries to be added to switch",
while True:
output = check_output( 'ovs-ofctl dump-flows s1', shell=True )
lines = len(output.split('\n'))
if lines >= (n+2):
break
+ sys.stdout.write('.')
+ sys.stdout.flush()
sleep(1)
- count = 0
+ print ". done\nWaiting for flow entries to be visible in network graph",
while True:
output = pexpect.spawn( '%s/web/get_flow.py all' % ONOS_HOME )
+ count = 0
while count < n:
if output.expect(['FlowEntry', pexpect.EOF], timeout=2000) == 1:
break
count += 1
+ print '. done'
return
+ sys.stdout.write('.')
+ sys.stdout.flush()
sleep(5)
-def removeFlowsFromONOS():
- call( '%s/web/delete_flow.py all' % ONOS_HOME, shell=True )
- while True:
- output = check_output( 'ovs-ofctl dump-flows s1', shell=True )
- lines = len(output.split('\n'))
- if lines == 2:
- break
+def removeFlowsFromONOS(checkSwitch=True):
+ print "Removing all flows from ONOS",
+ #call( '%s/web/delete_flow.py all' % ONOS_HOME, shell=True )
+ p = Popen( '%s/web/delete_flow.py all' % ONOS_HOME, shell=True )
+ while p.poll() is None:
+ sys.stdout.write('.')
+ sys.stdout.flush()
sleep(1)
+ print ". done"
+ if checkSwitch:
+ print "Waiting for flow entries to be removed from switch",
+ while True:
+ output = check_output( 'ovs-ofctl dump-flows s1', shell=True )
+ lines = len(output.split('\n'))
+ if lines == 2:
+ break
+ sys.stdout.write('.')
+ sys.stdout.flush()
+ sleep(1)
+ print ". done"
+ print "Waiting for flow entries to be removed from network graph",
while True:
output = pexpect.spawn( '%s/web/get_flow.py all' % ONOS_HOME )
if output.expect(['FlowEntry', pexpect.EOF], timeout=2000) == 1:
break
+ sys.stdout.write('.')
+ sys.stdout.flush()
sleep(5)
-
+ print '. done'
# ----------------- Running the test and output -------------------------
def test(i, fn):
# Start tailing the onos log
- tail = pexpect.spawn( "tail -0f %s/onos-logs/onos.onosdev1.log" % ONOS_HOME )
+ tail = pexpect.spawn( "tail -0f %s" % ONOS_LOG )
# disconnect the switch from the controller using tcpkill
tcp = Popen( 'exec tcpkill -i lo -9 port 6633 > /dev/null 2>&1', shell=True )
# wait until the switch has been disconnected
@@ -129,9 +160,9 @@
tcp.terminate()
tail.expect('Sync time \(ms\):', timeout=6000)
tail.expect('([\d.]+,?)+\s')
- print tail.match.group(0)
+ print "* Results:", tail.match.group(0)
tail.terminate()
- sleep(3)
+ wait(3, "Waiting for 3 seconds between tests")
return tail.match.group(0).strip().split(',')
def initResults(files):
@@ -144,7 +175,6 @@
def outputResults(filename, n, results):
results.insert(0, n)
- print results
with open(filename, 'a') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(results)
@@ -154,12 +184,13 @@
'delete': os.path.join(resultDir, 'delete.csv'),
'sync': os.path.join(resultDir, 'sync.csv') }
initResults(fileMap)
+ removeFlowsFromONOS(checkSwitch=False) # clear ONOS before starting
# start Mininet
topo = SingleSwitchTopo()
net = Mininet(topo=topo, controller=RemoteController)
+ print "Starting Mininet"
startNet(net)
- removeFlowsFromONOS() # clear ONOS before starting
- sleep(30) # let ONOS "warm-up"
+ wait(30, "Give ONOS 30 seconds to warm up") # let ONOS "warm-up"
for i in tests:
addFlowsToONOS(i)
outputResults(fileMap['sync'], i, test(i, doNothing))
@@ -168,18 +199,6 @@
outputResults(fileMap['add'], i, test(i, addFakeFlows)) # test needs empty DB
net.stop()
-def waitForResult(tail):
- while True:
- line = tail.stdout.readline()
- index = line.find('n.o.o.o.f.FlowSynchronizer')
- if index > 0:
- print line,
- index = line.find('Sync time (ms):')
- if index > 0:
- line = line[index + 15:].strip()
- line = line.replace('-->', '')
- return line.split() # graph, switch, compare, total
-
if __name__ == '__main__':
setLogLevel( 'output' )
resultDir = strftime( '%Y%m%d-%H%M%S' )
@@ -189,6 +208,3 @@
tests = [1, 10, 100, 1000, 10000]
runPerf( resultDir, tests )
-exit()
-
-# ---------------------------
diff --git a/src/main/java/net/floodlightcontroller/core/FloodlightProvider.java b/src/main/java/net/floodlightcontroller/core/FloodlightProvider.java
index 67fcabb..41676c4 100644
--- a/src/main/java/net/floodlightcontroller/core/FloodlightProvider.java
+++ b/src/main/java/net/floodlightcontroller/core/FloodlightProvider.java
@@ -10,9 +10,7 @@
import net.floodlightcontroller.core.module.FloodlightModuleException;
import net.floodlightcontroller.core.module.IFloodlightModule;
import net.floodlightcontroller.core.module.IFloodlightService;
-import net.floodlightcontroller.counter.ICounterStoreService;
import net.floodlightcontroller.restserver.IRestApiService;
-import net.floodlightcontroller.storage.IStorageSourceService;
import net.floodlightcontroller.threadpool.IThreadPoolService;
import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscoveryService;
import net.onrc.onos.registry.controller.IControllerRegistryService;
@@ -45,10 +43,7 @@
public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
Collection<Class<? extends IFloodlightService>> dependencies =
new ArrayList<Class<? extends IFloodlightService>>(4);
- dependencies.add(IStorageSourceService.class);
-
dependencies.add(IRestApiService.class);
- dependencies.add(ICounterStoreService.class);
dependencies.add(IThreadPoolService.class);
// Following added by ONOS
dependencies.add(IControllerRegistryService.class);
@@ -59,11 +54,6 @@
@Override
public void init(FloodlightModuleContext context) throws FloodlightModuleException {
- controller.setStorageSourceService(
- context.getServiceImpl(IStorageSourceService.class));
-
- controller.setCounterStore(
- context.getServiceImpl(ICounterStoreService.class));
controller.setRestApiService(
context.getServiceImpl(IRestApiService.class));
controller.setThreadPoolService(
diff --git a/src/main/java/net/floodlightcontroller/core/IFloodlightProviderService.java b/src/main/java/net/floodlightcontroller/core/IFloodlightProviderService.java
index 22ff029..436af3d 100644
--- a/src/main/java/net/floodlightcontroller/core/IFloodlightProviderService.java
+++ b/src/main/java/net/floodlightcontroller/core/IFloodlightProviderService.java
@@ -83,11 +83,6 @@
public Map<Long, IOFSwitch> getSwitches();
/**
- * Get the current role of the controller
- */
- public Role getRole();
-
- /**
* Get the current mapping of controller IDs to their IP addresses
* Returns a copy of the current mapping.
* @see IHAListener
@@ -100,11 +95,6 @@
public String getControllerId();
/**
- * Set the role of the controller
- */
- public void setRole(Role role);
-
- /**
* Add a switch listener
* @param listener The module that wants to listen for events
*/
@@ -117,18 +107,6 @@
public void removeOFSwitchListener(IOFSwitchListener listener);
/**
- * Adds a listener for HA role events
- * @param listener The module that wants to listen for events
- */
- public void addHAListener(IHAListener listener);
-
- /**
- * Removes a listener for HA role events
- * @param listener The module that no longer wants to listen for events
- */
- public void removeHAListener(IHAListener listener);
-
- /**
* Terminate the process
*/
public void terminate();
@@ -170,28 +148,6 @@
* Run the main I/O loop of the Controller.
*/
public void run();
-
- /**
- * Add an info provider of a particular type
- * @param type
- * @param provider
- */
- public void addInfoProvider(String type, IInfoProvider provider);
-
- /**
- * Remove an info provider of a particular type
- * @param type
- * @param provider
- */
- public void removeInfoProvider(String type, IInfoProvider provider);
-
- /**
- * Return information of a particular type (for rest services)
- * @param type
- * @return
- */
- public Map<String, Object> getControllerInfo(String type);
-
/**
* Return the controller start time in milliseconds
diff --git a/src/main/java/net/floodlightcontroller/core/IHAListener.java b/src/main/java/net/floodlightcontroller/core/IHAListener.java
deleted file mode 100644
index c76f46a..0000000
--- a/src/main/java/net/floodlightcontroller/core/IHAListener.java
+++ /dev/null
@@ -1,30 +0,0 @@
-package net.floodlightcontroller.core;
-
-import java.util.Map;
-
-import net.floodlightcontroller.core.IFloodlightProviderService.Role;
-
-public interface IHAListener {
- /**
- * Gets called when the controller changes role (i.e. Master -> Slave).
- * Note that oldRole CAN be null.
- * @param oldRole The controller's old role
- * @param newRole The controller's new role
- */
- public void roleChanged(Role oldRole, Role newRole);
-
- /**
- * Gets called when the IP addresses of the controller nodes in the
- * controller cluster change. All parameters map controller ID to
- * the controller's IP.
- *
- * @param curControllerNodeIPs The current mapping of controller IDs to IP
- * @param addedControllerNodeIPs These IPs were added since the last update
- * @param removedControllerNodeIPs These IPs were removed since the last update
- */
- public void controllerNodeIPsChanged(
- Map<String, String> curControllerNodeIPs,
- Map<String, String> addedControllerNodeIPs,
- Map<String, String> removedControllerNodeIPs
- );
-}
diff --git a/src/main/java/net/floodlightcontroller/core/IInfoProvider.java b/src/main/java/net/floodlightcontroller/core/IInfoProvider.java
deleted file mode 100644
index 8bfae0d..0000000
--- a/src/main/java/net/floodlightcontroller/core/IInfoProvider.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.core;
-
-import java.util.Map;
-
-/**
- *
- *
- * @author Shudong Zhou
- */
-public interface IInfoProvider {
-
- /**
- * Called when rest API requests information of a particular type
- * @param type
- * @return
- */
- public Map<String, Object> getInfo(String type);
-}
diff --git a/src/main/java/net/floodlightcontroller/core/internal/Controller.java b/src/main/java/net/floodlightcontroller/core/internal/Controller.java
index 6b16964..39bdf3c 100644
--- a/src/main/java/net/floodlightcontroller/core/internal/Controller.java
+++ b/src/main/java/net/floodlightcontroller/core/internal/Controller.java
@@ -19,19 +19,14 @@
import java.io.FileInputStream;
import java.io.IOException;
-import java.net.InetAddress;
import java.net.InetSocketAddress;
-import java.net.SocketAddress;
import java.net.UnknownHostException;
import java.nio.channels.ClosedChannelException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
-import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
@@ -51,8 +46,6 @@
import net.floodlightcontroller.core.FloodlightContext;
import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.IHAListener;
-import net.floodlightcontroller.core.IInfoProvider;
import net.floodlightcontroller.core.IListener.Command;
import net.floodlightcontroller.core.IOFMessageListener;
import net.floodlightcontroller.core.IOFSwitch;
@@ -64,14 +57,8 @@
import net.floodlightcontroller.core.internal.OFChannelState.HandshakeState;
import net.floodlightcontroller.core.util.ListenerDispatcher;
import net.floodlightcontroller.core.web.CoreWebRoutable;
-import net.floodlightcontroller.counter.ICounterStoreService;
import net.floodlightcontroller.packet.Ethernet;
import net.floodlightcontroller.restserver.IRestApiService;
-import net.floodlightcontroller.storage.IResultSet;
-import net.floodlightcontroller.storage.IStorageSourceListener;
-import net.floodlightcontroller.storage.IStorageSourceService;
-import net.floodlightcontroller.storage.OperatorPredicate;
-import net.floodlightcontroller.storage.StorageException;
import net.floodlightcontroller.threadpool.IThreadPoolService;
import net.onrc.onos.ofcontroller.core.IOFSwitchPortListener;
import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscoveryService;
@@ -82,7 +69,6 @@
import org.jboss.netty.bootstrap.ServerBootstrap;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers;
-import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.ChannelPipelineFactory;
import org.jboss.netty.channel.ChannelStateEvent;
@@ -128,8 +114,6 @@
import org.openflow.protocol.vendor.OFBasicVendorId;
import org.openflow.protocol.vendor.OFVendorId;
import org.openflow.util.HexString;
-import org.openflow.util.U16;
-import org.openflow.util.U32;
import org.openflow.vendor.nicira.OFNiciraVendorData;
import org.openflow.vendor.nicira.OFRoleReplyVendorData;
import org.openflow.vendor.nicira.OFRoleRequestVendorData;
@@ -149,8 +133,7 @@
* - Additional DEBUG logs
* - Try using hostname as controller ID, when ID was not explicitly given.
*/
-public class Controller implements IFloodlightProviderService,
- IStorageSourceListener {
+public class Controller implements IFloodlightProviderService {
protected final static Logger log = LoggerFactory.getLogger(Controller.class);
@@ -177,14 +160,10 @@
protected HashMap<String, String> controllerNodeIPsCache;
protected Set<IOFSwitchListener> switchListeners;
- protected Set<IHAListener> haListeners;
- protected Map<String, List<IInfoProvider>> providerMap;
protected BlockingQueue<IUpdate> updates;
// Module dependencies
protected IRestApiService restApi;
- protected ICounterStoreService counterStore = null;
- protected IStorageSourceService storageSource;
protected IThreadPoolService threadPool;
protected IControllerRegistryService registryService;
@@ -209,47 +188,6 @@
// Flag to always flush flow table on switch reconnect (HA or otherwise)
protected boolean alwaysClearFlowsOnSwAdd = false;
- // Storage table names
- protected static final String CONTROLLER_TABLE_NAME = "controller_controller";
- protected static final String CONTROLLER_ID = "id";
-
- protected static final String SWITCH_TABLE_NAME = "controller_switch";
- protected static final String SWITCH_DATAPATH_ID = "dpid";
- protected static final String SWITCH_SOCKET_ADDRESS = "socket_address";
- protected static final String SWITCH_IP = "ip";
- protected static final String SWITCH_CONTROLLER_ID = "controller_id";
- protected static final String SWITCH_ACTIVE = "active";
- protected static final String SWITCH_CONNECTED_SINCE = "connected_since";
- protected static final String SWITCH_CAPABILITIES = "capabilities";
- protected static final String SWITCH_BUFFERS = "buffers";
- protected static final String SWITCH_TABLES = "tables";
- protected static final String SWITCH_ACTIONS = "actions";
-
- protected static final String SWITCH_CONFIG_TABLE_NAME = "controller_switchconfig";
- protected static final String SWITCH_CONFIG_CORE_SWITCH = "core_switch";
-
- protected static final String PORT_TABLE_NAME = "controller_port";
- protected static final String PORT_ID = "id";
- protected static final String PORT_SWITCH = "switch_id";
- protected static final String PORT_NUMBER = "number";
- protected static final String PORT_HARDWARE_ADDRESS = "hardware_address";
- protected static final String PORT_NAME = "name";
- protected static final String PORT_CONFIG = "config";
- protected static final String PORT_STATE = "state";
- protected static final String PORT_CURRENT_FEATURES = "current_features";
- protected static final String PORT_ADVERTISED_FEATURES = "advertised_features";
- protected static final String PORT_SUPPORTED_FEATURES = "supported_features";
- protected static final String PORT_PEER_FEATURES = "peer_features";
-
- protected static final String CONTROLLER_INTERFACE_TABLE_NAME = "controller_controllerinterface";
- protected static final String CONTROLLER_INTERFACE_ID = "id";
- protected static final String CONTROLLER_INTERFACE_CONTROLLER_ID = "controller_id";
- protected static final String CONTROLLER_INTERFACE_TYPE = "type";
- protected static final String CONTROLLER_INTERFACE_NUMBER = "number";
- protected static final String CONTROLLER_INTERFACE_DISCOVERED_IP = "discovered_ip";
-
-
-
// Perf. related configuration
protected static final int SEND_BUFFER_SIZE = 4 * 1024 * 1024;
protected static final int BATCH_MAX_SIZE = 100;
@@ -315,84 +253,9 @@
}
}
- /**
- * Update message indicating controller's role has changed
- */
- protected class HARoleUpdate implements IUpdate {
- public Role oldRole;
- public Role newRole;
- public HARoleUpdate(Role newRole, Role oldRole) {
- this.oldRole = oldRole;
- this.newRole = newRole;
- }
- public void dispatch() {
- // Make sure that old and new roles are different.
- if (oldRole == newRole) {
- if (log.isTraceEnabled()) {
- log.trace("HA role update ignored as the old and " +
- "new roles are the same. newRole = {}" +
- "oldRole = {}", newRole, oldRole);
- }
- return;
- }
- if (log.isTraceEnabled()) {
- log.trace("Dispatching HA Role update newRole = {}, oldRole = {}",
- newRole, oldRole);
- }
- if (haListeners != null) {
- for (IHAListener listener : haListeners) {
- listener.roleChanged(oldRole, newRole);
- }
- }
- }
- }
-
- /**
- * Update message indicating
- * IPs of controllers in controller cluster have changed.
- */
- protected class HAControllerNodeIPUpdate implements IUpdate {
- public Map<String,String> curControllerNodeIPs;
- public Map<String,String> addedControllerNodeIPs;
- public Map<String,String> removedControllerNodeIPs;
- public HAControllerNodeIPUpdate(
- HashMap<String,String> curControllerNodeIPs,
- HashMap<String,String> addedControllerNodeIPs,
- HashMap<String,String> removedControllerNodeIPs) {
- this.curControllerNodeIPs = curControllerNodeIPs;
- this.addedControllerNodeIPs = addedControllerNodeIPs;
- this.removedControllerNodeIPs = removedControllerNodeIPs;
- }
- public void dispatch() {
- if (log.isTraceEnabled()) {
- log.trace("Dispatching HA Controller Node IP update "
- + "curIPs = {}, addedIPs = {}, removedIPs = {}",
- new Object[] { curControllerNodeIPs, addedControllerNodeIPs,
- removedControllerNodeIPs }
- );
- }
- if (haListeners != null) {
- for (IHAListener listener: haListeners) {
- listener.controllerNodeIPsChanged(curControllerNodeIPs,
- addedControllerNodeIPs, removedControllerNodeIPs);
- }
- }
- }
- }
-
// ***************
// Getters/Setters
- // ***************
-
- public void setStorageSourceService(IStorageSourceService storageSource) {
- this.storageSource = storageSource;
- }
-
- public void setCounterStore(ICounterStoreService counterStore) {
- this.counterStore = counterStore;
- }
-
-
+ // ***************
public void setRestApiService(IRestApiService restApi) {
this.restApi = restApi;
@@ -409,47 +272,6 @@
public void setLinkDiscoveryService(ILinkDiscoveryService linkDiscovery) {
this.linkDiscovery = linkDiscovery;
}
-
- @Override
- public Role getRole() {
- synchronized(roleChanger) {
- return role;
- }
- }
-
- @Override
- public void setRole(Role role) {
- if (role == null) throw new NullPointerException("Role can not be null.");
- if (role == Role.MASTER && this.role == Role.SLAVE) {
- // Reset db state to Inactive for all switches.
- updateAllInactiveSwitchInfo();
- }
-
- // Need to synchronize to ensure a reliable ordering on role request
- // messages send and to ensure the list of connected switches is stable
- // RoleChanger will handle the actual sending of the message and
- // timeout handling
- // @see RoleChanger
- synchronized(roleChanger) {
- if (role.equals(this.role)) {
- log.debug("Ignoring role change: role is already {}", role);
- return;
- }
-
- Role oldRole = this.role;
- this.role = role;
-
- log.debug("Submitting role change request to role {}", role);
- roleChanger.submitRequest(connectedSwitches, role);
-
- // Enqueue an update for our listeners.
- try {
- this.updates.put(new HARoleUpdate(role, oldRole));
- } catch (InterruptedException e) {
- log.error("Failure adding update to queue", e);
- }
- }
- }
public void publishUpdate(IUpdate update) {
try {
@@ -603,10 +425,6 @@
explanation="Could not parse a message from the switch",
recommendation=LogMessageDoc.CHECK_SWITCH),
@LogMessageDoc(level="ERROR",
- message="Terminating controller due to storage exception",
- explanation=ERROR_DATABASE,
- recommendation=LogMessageDoc.CHECK_CONTROLLER),
- @LogMessageDoc(level="ERROR",
message="Could not process message: queue full",
explanation="OpenFlow messages are arriving faster than " +
" the controller can process them.",
@@ -642,10 +460,6 @@
" due to message parse failure",
e.getCause());
ctx.getChannel().close();
- } else if (e.getCause() instanceof StorageException) {
- log.error("Terminating controller due to storage exception",
- e.getCause());
- terminate();
} else if (e.getCause() instanceof RejectedExecutionException) {
log.warn("Could not process message: queue full");
} else {
@@ -718,40 +532,8 @@
description);
sw.setSwitchProperties(description);
data = null;
-
- // At this time, also set other switch properties from storage
- boolean is_core_switch = false;
- IResultSet resultSet = null;
- try {
- String swid = sw.getStringId();
- resultSet =
- storageSource.getRow(SWITCH_CONFIG_TABLE_NAME, swid);
- for (Iterator<IResultSet> it =
- resultSet.iterator(); it.hasNext();) {
- // In case of multiple rows, use the status
- // in last row?
- Map<String, Object> row = it.next().getRow();
- if (row.containsKey(SWITCH_CONFIG_CORE_SWITCH)) {
- if (log.isDebugEnabled()) {
- log.debug("Reading SWITCH_IS_CORE_SWITCH " +
- "config for switch={}, is-core={}",
- sw, row.get(SWITCH_CONFIG_CORE_SWITCH));
- }
- String ics =
- (String)row.get(SWITCH_CONFIG_CORE_SWITCH);
- is_core_switch = ics.equals("true");
- }
- }
- }
- finally {
- if (resultSet != null)
- resultSet.close();
- }
- if (is_core_switch) {
- sw.setAttribute(IOFSwitch.SWITCH_IS_CORE_SWITCH,
- true);
- }
}
+
sw.removeAttribute(IOFSwitch.SWITCH_DESCRIPTION_FUTURE);
state.hasDescription = true;
checkSwitchReady();
@@ -964,7 +746,7 @@
// Some switches don't seem to update us with port
// status messages while in slave role.
- readSwitchPortStateFromStorage(sw);
+ //readSwitchPortStateFromStorage(sw);
// Only add the switch to the active switch list if
// we're not in the slave role. Note that if the role
@@ -1082,7 +864,7 @@
// return results to rest api caller
sw.deliverOFFeaturesReply(m);
// update database */
- updateActiveSwitchInfo(sw);
+ //updateActiveSwitchInfo(sw);
}
break;
case GET_CONFIG_REPLY:
@@ -1297,8 +1079,8 @@
log.error("Failure adding update to queue", e);
}
}
- if (updateStorage)
- updatePortInfo(sw, port);
+ //if (updateStorage)
+ //updatePortInfo(sw, port);
log.debug("Port #{} modified for {}", portNumber, sw);
} else if (m.getReason() == (byte)OFPortReason.OFPPR_ADD.ordinal()) {
// XXX Workaround to prevent race condition where a link is detected
@@ -1314,8 +1096,8 @@
} catch (InterruptedException e) {
log.error("Failure adding update to queue", e);
}
- if (updateStorage)
- updatePortInfo(sw, port);
+ //if (updateStorage)
+ //updatePortInfo(sw, port);
log.debug("Port #{} added for {}", portNumber, sw);
} else if (m.getReason() ==
(byte)OFPortReason.OFPPR_DELETE.ordinal()) {
@@ -1326,8 +1108,8 @@
} catch (InterruptedException e) {
log.error("Failure adding update to queue", e);
}
- if (updateStorage)
- removePortInfo(sw, portNumber);
+ //if (updateStorage)
+ //removePortInfo(sw, portNumber);
log.debug("Port #{} deleted for {}", portNumber, sw);
}
SwitchUpdate update = new SwitchUpdate(sw, SwitchUpdateType.PORTCHANGED);
@@ -1414,7 +1196,6 @@
eth = new Ethernet();
eth.deserialize(pi.getPacketData(), 0,
pi.getPacketData().length);
- counterStore.updatePacketInCounters(sw, m, eth);
}
// fall through to default case...
@@ -1577,7 +1358,7 @@
oldSw.cancelAllStatisticsReplies();
- updateInactiveSwitchInfo(oldSw);
+ //updateInactiveSwitchInfo(oldSw);
// we need to clean out old switch state definitively
// before adding the new switch
@@ -1605,7 +1386,7 @@
}
}
- updateActiveSwitchInfo(sw);
+ //updateActiveSwitchInfo(sw);
SwitchUpdate update = new SwitchUpdate(sw, SwitchUpdateType.ADDED);
try {
this.updates.put(update);
@@ -1644,7 +1425,7 @@
// written out by the new master. Maybe need to revisit how we handle all
// of the switch state that's written to storage.
- updateInactiveSwitchInfo(sw);
+ //updateInactiveSwitchInfo(sw);
SwitchUpdate update = new SwitchUpdate(sw, SwitchUpdateType.REMOVED);
try {
this.updates.put(update);
@@ -1828,213 +1609,6 @@
// Initialization
// **************
- protected void updateAllInactiveSwitchInfo() {
- if (role == Role.SLAVE) {
- return;
- }
- String controllerId = getControllerId();
- String[] switchColumns = { SWITCH_DATAPATH_ID,
- SWITCH_CONTROLLER_ID,
- SWITCH_ACTIVE };
- String[] portColumns = { PORT_ID, PORT_SWITCH };
- IResultSet switchResultSet = null;
- try {
- OperatorPredicate op =
- new OperatorPredicate(SWITCH_CONTROLLER_ID,
- OperatorPredicate.Operator.EQ,
- controllerId);
- switchResultSet =
- storageSource.executeQuery(SWITCH_TABLE_NAME,
- switchColumns,
- op, null);
- while (switchResultSet.next()) {
- IResultSet portResultSet = null;
- try {
- String datapathId =
- switchResultSet.getString(SWITCH_DATAPATH_ID);
- switchResultSet.setBoolean(SWITCH_ACTIVE, Boolean.FALSE);
- op = new OperatorPredicate(PORT_SWITCH,
- OperatorPredicate.Operator.EQ,
- datapathId);
- portResultSet =
- storageSource.executeQuery(PORT_TABLE_NAME,
- portColumns,
- op, null);
- while (portResultSet.next()) {
- portResultSet.deleteRow();
- }
- portResultSet.save();
- }
- finally {
- if (portResultSet != null)
- portResultSet.close();
- }
- }
- switchResultSet.save();
- }
- finally {
- if (switchResultSet != null)
- switchResultSet.close();
- }
- }
-
- protected void updateControllerInfo() {
- updateAllInactiveSwitchInfo();
-
- // Write out the controller info to the storage source
- Map<String, Object> controllerInfo = new HashMap<String, Object>();
- String id = getControllerId();
- controllerInfo.put(CONTROLLER_ID, id);
- storageSource.updateRow(CONTROLLER_TABLE_NAME, controllerInfo);
- }
-
- protected void updateActiveSwitchInfo(IOFSwitch sw) {
- if (role == Role.SLAVE) {
- return;
- }
- // Obtain the row info for the switch
- Map<String, Object> switchInfo = new HashMap<String, Object>();
- String datapathIdString = sw.getStringId();
- switchInfo.put(SWITCH_DATAPATH_ID, datapathIdString);
- String controllerId = getControllerId();
- switchInfo.put(SWITCH_CONTROLLER_ID, controllerId);
- Date connectedSince = sw.getConnectedSince();
- switchInfo.put(SWITCH_CONNECTED_SINCE, connectedSince);
- Channel channel = sw.getChannel();
- SocketAddress socketAddress = channel.getRemoteAddress();
- if (socketAddress != null) {
- String socketAddressString = socketAddress.toString();
- switchInfo.put(SWITCH_SOCKET_ADDRESS, socketAddressString);
- if (socketAddress instanceof InetSocketAddress) {
- InetSocketAddress inetSocketAddress =
- (InetSocketAddress)socketAddress;
- InetAddress inetAddress = inetSocketAddress.getAddress();
- String ip = inetAddress.getHostAddress();
- switchInfo.put(SWITCH_IP, ip);
- }
- }
-
- // Write out the switch features info
- long capabilities = U32.f(sw.getCapabilities());
- switchInfo.put(SWITCH_CAPABILITIES, capabilities);
- long buffers = U32.f(sw.getBuffers());
- switchInfo.put(SWITCH_BUFFERS, buffers);
- long tables = U32.f(sw.getTables());
- switchInfo.put(SWITCH_TABLES, tables);
- long actions = U32.f(sw.getActions());
- switchInfo.put(SWITCH_ACTIONS, actions);
- switchInfo.put(SWITCH_ACTIVE, Boolean.TRUE);
-
- // Update the switch
- storageSource.updateRowAsync(SWITCH_TABLE_NAME, switchInfo);
-
- // Update the ports
- for (OFPhysicalPort port: sw.getPorts()) {
- updatePortInfo(sw, port);
- }
- }
-
- protected void updateInactiveSwitchInfo(IOFSwitch sw) {
- if (role == Role.SLAVE) {
- return;
- }
- log.debug("Update DB with inactiveSW {}", sw);
- // Update the controller info in the storage source to be inactive
- Map<String, Object> switchInfo = new HashMap<String, Object>();
- String datapathIdString = sw.getStringId();
- switchInfo.put(SWITCH_DATAPATH_ID, datapathIdString);
- //switchInfo.put(SWITCH_CONNECTED_SINCE, null);
- switchInfo.put(SWITCH_ACTIVE, Boolean.FALSE);
- storageSource.updateRowAsync(SWITCH_TABLE_NAME, switchInfo);
- }
-
- protected void updatePortInfo(IOFSwitch sw, OFPhysicalPort port) {
- if (role == Role.SLAVE) {
- return;
- }
- String datapathIdString = sw.getStringId();
- Map<String, Object> portInfo = new HashMap<String, Object>();
- int portNumber = U16.f(port.getPortNumber());
- String id = datapathIdString + "|" + portNumber;
- portInfo.put(PORT_ID, id);
- portInfo.put(PORT_SWITCH, datapathIdString);
- portInfo.put(PORT_NUMBER, portNumber);
- byte[] hardwareAddress = port.getHardwareAddress();
- String hardwareAddressString = HexString.toHexString(hardwareAddress);
- portInfo.put(PORT_HARDWARE_ADDRESS, hardwareAddressString);
- String name = port.getName();
- portInfo.put(PORT_NAME, name);
- long config = U32.f(port.getConfig());
- portInfo.put(PORT_CONFIG, config);
- long state = U32.f(port.getState());
- portInfo.put(PORT_STATE, state);
- long currentFeatures = U32.f(port.getCurrentFeatures());
- portInfo.put(PORT_CURRENT_FEATURES, currentFeatures);
- long advertisedFeatures = U32.f(port.getAdvertisedFeatures());
- portInfo.put(PORT_ADVERTISED_FEATURES, advertisedFeatures);
- long supportedFeatures = U32.f(port.getSupportedFeatures());
- portInfo.put(PORT_SUPPORTED_FEATURES, supportedFeatures);
- long peerFeatures = U32.f(port.getPeerFeatures());
- portInfo.put(PORT_PEER_FEATURES, peerFeatures);
- storageSource.updateRowAsync(PORT_TABLE_NAME, portInfo);
- }
-
- /**
- * Read switch port data from storage and write it into a switch object
- * @param sw the switch to update
- */
- protected void readSwitchPortStateFromStorage(OFSwitchImpl sw) {
- OperatorPredicate op =
- new OperatorPredicate(PORT_SWITCH,
- OperatorPredicate.Operator.EQ,
- sw.getStringId());
- IResultSet portResultSet =
- storageSource.executeQuery(PORT_TABLE_NAME,
- null, op, null);
- //Map<Short, OFPhysicalPort> oldports =
- // new HashMap<Short, OFPhysicalPort>();
- //oldports.putAll(sw.getPorts());
-
- while (portResultSet.next()) {
- try {
- OFPhysicalPort p = new OFPhysicalPort();
- p.setPortNumber((short)portResultSet.getInt(PORT_NUMBER));
- p.setName(portResultSet.getString(PORT_NAME));
- p.setConfig((int)portResultSet.getLong(PORT_CONFIG));
- p.setState((int)portResultSet.getLong(PORT_STATE));
- String portMac = portResultSet.getString(PORT_HARDWARE_ADDRESS);
- p.setHardwareAddress(HexString.fromHexString(portMac));
- p.setCurrentFeatures((int)portResultSet.
- getLong(PORT_CURRENT_FEATURES));
- p.setAdvertisedFeatures((int)portResultSet.
- getLong(PORT_ADVERTISED_FEATURES));
- p.setSupportedFeatures((int)portResultSet.
- getLong(PORT_SUPPORTED_FEATURES));
- p.setPeerFeatures((int)portResultSet.
- getLong(PORT_PEER_FEATURES));
- //oldports.remove(Short.valueOf(p.getPortNumber()));
- sw.setPort(p);
- } catch (NullPointerException e) {
- // ignore
- }
- }
- SwitchUpdate update = new SwitchUpdate(sw, SwitchUpdateType.PORTCHANGED);
- try {
- this.updates.put(update);
- } catch (InterruptedException e) {
- log.error("Failure adding update to queue", e);
- }
- }
-
- protected void removePortInfo(IOFSwitch sw, short portNumber) {
- if (role == Role.SLAVE) {
- return;
- }
- String datapathIdString = sw.getStringId();
- String id = datapathIdString + "|" + portNumber;
- storageSource.deleteRowAsync(PORT_TABLE_NAME, id);
- }
-
/**
* Sets the initial role based on properties in the config params.
* It looks for two different properties.
@@ -2150,10 +1724,6 @@
update.dispatch();
} catch (InterruptedException e) {
return;
- } catch (StorageException e) {
- log.error("Storage exception in controller " +
- "updates loop; terminating process", e);
- return;
} catch (Exception e) {
log.error("Exception in controller updates loop", e);
}
@@ -2231,15 +1801,12 @@
ListenerDispatcher<OFType,
IOFMessageListener>>();
this.switchListeners = new CopyOnWriteArraySet<IOFSwitchListener>();
- this.haListeners = new CopyOnWriteArraySet<IHAListener>();
this.activeSwitches = new ConcurrentHashMap<Long, IOFSwitch>();
this.connectedSwitches = new HashSet<OFSwitchImpl>();
this.controllerNodeIPsCache = new HashMap<String, String>();
this.updates = new LinkedBlockingQueue<IUpdate>();
this.factory = new BasicFactory();
- this.providerMap = new HashMap<String, List<IInfoProvider>>();
setConfigParams(configParams);
- //this.role = getInitialRole(configParams);
//Set the controller's role to MASTER so it always tries to do role requests.
this.role = Role.MASTER;
this.roleChanger = new RoleChanger();
@@ -2258,144 +1825,13 @@
public void startupComponents() {
try {
registryService.registerController(controllerId);
- } catch (RegistryException e2) {
- log.warn("Registry service error: {}", e2.getMessage());
+ } catch (RegistryException e) {
+ log.warn("Registry service error: {}", e.getMessage());
}
-
- // Create the table names we use
- storageSource.createTable(CONTROLLER_TABLE_NAME, null);
- storageSource.createTable(SWITCH_TABLE_NAME, null);
- storageSource.createTable(PORT_TABLE_NAME, null);
- storageSource.createTable(CONTROLLER_INTERFACE_TABLE_NAME, null);
- storageSource.createTable(SWITCH_CONFIG_TABLE_NAME, null);
- storageSource.setTablePrimaryKeyName(CONTROLLER_TABLE_NAME,
- CONTROLLER_ID);
- storageSource.setTablePrimaryKeyName(SWITCH_TABLE_NAME,
- SWITCH_DATAPATH_ID);
- storageSource.setTablePrimaryKeyName(PORT_TABLE_NAME, PORT_ID);
- storageSource.setTablePrimaryKeyName(CONTROLLER_INTERFACE_TABLE_NAME,
- CONTROLLER_INTERFACE_ID);
- storageSource.addListener(CONTROLLER_INTERFACE_TABLE_NAME, this);
-
- while (true) {
- try {
- updateControllerInfo();
- break;
- }
- catch (StorageException e) {
- log.info("Waiting for storage source");
- try {
- Thread.sleep(1000);
- } catch (InterruptedException e1) {
- }
- }
- }
// Add our REST API
restApi.addRestletRoutable(new CoreWebRoutable());
}
-
- @Override
- public void addInfoProvider(String type, IInfoProvider provider) {
- if (!providerMap.containsKey(type)) {
- providerMap.put(type, new ArrayList<IInfoProvider>());
- }
- providerMap.get(type).add(provider);
- }
-
- @Override
- public void removeInfoProvider(String type, IInfoProvider provider) {
- if (!providerMap.containsKey(type)) {
- log.debug("Provider type {} doesn't exist.", type);
- return;
- }
-
- providerMap.get(type).remove(provider);
- }
-
- public Map<String, Object> getControllerInfo(String type) {
- if (!providerMap.containsKey(type)) return null;
-
- Map<String, Object> result = new LinkedHashMap<String, Object>();
- for (IInfoProvider provider : providerMap.get(type)) {
- result.putAll(provider.getInfo(type));
- }
-
- return result;
- }
-
- @Override
- public void addHAListener(IHAListener listener) {
- this.haListeners.add(listener);
- }
-
- @Override
- public void removeHAListener(IHAListener listener) {
- this.haListeners.remove(listener);
- }
-
-
- /**
- * Handle changes to the controller nodes IPs and dispatch update.
- */
- @SuppressWarnings("unchecked")
- protected void handleControllerNodeIPChanges() {
- HashMap<String,String> curControllerNodeIPs = new HashMap<String,String>();
- HashMap<String,String> addedControllerNodeIPs = new HashMap<String,String>();
- HashMap<String,String> removedControllerNodeIPs =new HashMap<String,String>();
- String[] colNames = { CONTROLLER_INTERFACE_CONTROLLER_ID,
- CONTROLLER_INTERFACE_TYPE,
- CONTROLLER_INTERFACE_NUMBER,
- CONTROLLER_INTERFACE_DISCOVERED_IP };
- synchronized(controllerNodeIPsCache) {
- // We currently assume that interface Ethernet0 is the relevant
- // controller interface. Might change.
- // We could (should?) implement this using
- // predicates, but creating the individual and compound predicate
- // seems more overhead then just checking every row. Particularly,
- // since the number of rows is small and changes infrequent
- IResultSet res = storageSource.executeQuery(CONTROLLER_INTERFACE_TABLE_NAME,
- colNames,null, null);
- while (res.next()) {
- if (res.getString(CONTROLLER_INTERFACE_TYPE).equals("Ethernet") &&
- res.getInt(CONTROLLER_INTERFACE_NUMBER) == 0) {
- String controllerID = res.getString(CONTROLLER_INTERFACE_CONTROLLER_ID);
- String discoveredIP = res.getString(CONTROLLER_INTERFACE_DISCOVERED_IP);
- String curIP = controllerNodeIPsCache.get(controllerID);
-
- curControllerNodeIPs.put(controllerID, discoveredIP);
- if (curIP == null) {
- // new controller node IP
- addedControllerNodeIPs.put(controllerID, discoveredIP);
- }
- else if (!curIP.equals(discoveredIP)) {
- // IP changed
- removedControllerNodeIPs.put(controllerID, curIP);
- addedControllerNodeIPs.put(controllerID, discoveredIP);
- }
- }
- }
- // Now figure out if rows have been deleted. We can't use the
- // rowKeys from rowsDeleted directly, since the tables primary
- // key is a compound that we can't disassemble
- Set<String> curEntries = curControllerNodeIPs.keySet();
- Set<String> removedEntries = controllerNodeIPsCache.keySet();
- removedEntries.removeAll(curEntries);
- for (String removedControllerID : removedEntries)
- removedControllerNodeIPs.put(removedControllerID, controllerNodeIPsCache.get(removedControllerID));
- controllerNodeIPsCache = (HashMap<String, String>) curControllerNodeIPs.clone();
- HAControllerNodeIPUpdate update = new HAControllerNodeIPUpdate(
- curControllerNodeIPs, addedControllerNodeIPs,
- removedControllerNodeIPs);
- if (!removedControllerNodeIPs.isEmpty() || !addedControllerNodeIPs.isEmpty()) {
- try {
- this.updates.put(update);
- } catch (InterruptedException e) {
- log.error("Failure adding update to queue", e);
- }
- }
- }
- }
@Override
public Map<String, String> getControllerNodeIPs() {
@@ -2410,21 +1846,6 @@
}
@Override
- public void rowsModified(String tableName, Set<Object> rowKeys) {
- if (tableName.equals(CONTROLLER_INTERFACE_TABLE_NAME)) {
- handleControllerNodeIPChanges();
- }
-
- }
-
- @Override
- public void rowsDeleted(String tableName, Set<Object> rowKeys) {
- if (tableName.equals(CONTROLLER_INTERFACE_TABLE_NAME)) {
- handleControllerNodeIPChanges();
- }
- }
-
- @Override
public long getSystemStartTime() {
return (this.systemStartTime);
}
diff --git a/src/main/java/net/floodlightcontroller/core/internal/OFSwitchImpl.java b/src/main/java/net/floodlightcontroller/core/internal/OFSwitchImpl.java
index 1a52418..752675a 100644
--- a/src/main/java/net/floodlightcontroller/core/internal/OFSwitchImpl.java
+++ b/src/main/java/net/floodlightcontroller/core/internal/OFSwitchImpl.java
@@ -36,8 +36,8 @@
import net.floodlightcontroller.core.FloodlightContext;
import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.IOFMessageListener;
import net.floodlightcontroller.core.IFloodlightProviderService.Role;
+import net.floodlightcontroller.core.IOFMessageListener;
import net.floodlightcontroller.core.IOFSwitch;
import net.floodlightcontroller.core.annotations.LogMessageDoc;
import net.floodlightcontroller.core.annotations.LogMessageDocs;
@@ -57,12 +57,12 @@
import org.openflow.protocol.OFMatch;
import org.openflow.protocol.OFMessage;
import org.openflow.protocol.OFPhysicalPort;
-import org.openflow.protocol.OFPort;
-import org.openflow.protocol.OFType;
-import org.openflow.protocol.OFVendor;
import org.openflow.protocol.OFPhysicalPort.OFPortConfig;
import org.openflow.protocol.OFPhysicalPort.OFPortState;
+import org.openflow.protocol.OFPort;
import org.openflow.protocol.OFStatisticsRequest;
+import org.openflow.protocol.OFType;
+import org.openflow.protocol.OFVendor;
import org.openflow.protocol.statistics.OFDescriptionStatistics;
import org.openflow.protocol.statistics.OFStatistics;
import org.openflow.util.HexString;
diff --git a/src/main/java/net/floodlightcontroller/core/web/ControllerRoleResource.java b/src/main/java/net/floodlightcontroller/core/web/ControllerRoleResource.java
deleted file mode 100644
index 2ed87cb..0000000
--- a/src/main/java/net/floodlightcontroller/core/web/ControllerRoleResource.java
+++ /dev/null
@@ -1,57 +0,0 @@
-package net.floodlightcontroller.core.web;
-
-import org.restlet.data.Status;
-import org.restlet.resource.ServerResource;
-
-import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.IFloodlightProviderService.Role;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-
-import org.restlet.resource.Get;
-import org.restlet.resource.Post;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ControllerRoleResource extends ServerResource {
-
- protected final static Logger log = LoggerFactory.getLogger(ControllerRoleResource.class);
-
- @Get("json")
- public RoleInfo getRole() {
- IFloodlightProviderService floodlightProvider =
- (IFloodlightProviderService)getContext().getAttributes().
- get(IFloodlightProviderService.class.getCanonicalName());
- return new RoleInfo(floodlightProvider.getRole());
- }
-
- @Post("json")
- @LogMessageDoc(level="WARN",
- message="Invalid role value specified in REST API to " +
- "set controller role",
- explanation="An HA role change request was malformed.",
- recommendation=LogMessageDoc.CHECK_CONTROLLER)
- public void setRole(RoleInfo roleInfo) {
- //Role role = Role.lookupRole(roleInfo.getRole());
- Role role = null;
- try {
- role = Role.valueOf(roleInfo.getRole().toUpperCase());
- }
- catch (IllegalArgumentException e) {
- // The role value in the REST call didn't match a valid
- // role name, so just leave the role as null and handle
- // the error below.
- }
- if (role == null) {
- log.warn ("Invalid role value specified in REST API to " +
- "set controller role");
- setStatus(Status.CLIENT_ERROR_BAD_REQUEST, "Invalid role value");
- return;
- }
-
- IFloodlightProviderService floodlightProvider =
- (IFloodlightProviderService)getContext().getAttributes().
- get(IFloodlightProviderService.class.getCanonicalName());
-
- floodlightProvider.setRole(role);
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/core/web/ControllerSummaryResource.java b/src/main/java/net/floodlightcontroller/core/web/ControllerSummaryResource.java
deleted file mode 100644
index 20fbf85..0000000
--- a/src/main/java/net/floodlightcontroller/core/web/ControllerSummaryResource.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
-* Copyright 2012, Big Switch Networks, Inc.
-* Originally created by Shudong Zhou, Big Switch Networks
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.core.web;
-
-import java.util.Map;
-
-import org.restlet.resource.Get;
-import org.restlet.resource.ServerResource;
-
-import net.floodlightcontroller.core.IFloodlightProviderService;
-
-/**
- * Get summary counters registered by all modules
- * @author shudongz
- */
-public class ControllerSummaryResource extends ServerResource {
- @Get("json")
- public Map<String, Object> retrieve() {
- IFloodlightProviderService floodlightProvider =
- (IFloodlightProviderService)getContext().getAttributes().
- get(IFloodlightProviderService.class.getCanonicalName());
- return floodlightProvider.getControllerInfo("summary");
- }
-
-}
diff --git a/src/main/java/net/floodlightcontroller/core/web/CoreWebRoutable.java b/src/main/java/net/floodlightcontroller/core/web/CoreWebRoutable.java
index 7604d7c..9b22617 100644
--- a/src/main/java/net/floodlightcontroller/core/web/CoreWebRoutable.java
+++ b/src/main/java/net/floodlightcontroller/core/web/CoreWebRoutable.java
@@ -47,9 +47,6 @@
router.attach("/switch/all/{statType}/json", AllSwitchStatisticsResource.class);
router.attach("/switch/{switchId}/{statType}/json", SwitchStatisticsResource.class);
router.attach("/controller/switches/json", ControllerSwitchesResource.class);
- router.attach("/counter/{counterTitle}/json", CounterResource.class);
- router.attach("/counter/{switchId}/{counterName}/json", SwitchCounterResource.class);
- router.attach("/counter/categories/{switchId}/{counterName}/{layer}/json", SwitchCounterCategoriesResource.class);
router.attach("/memory/json", ControllerMemoryResource.class);
// Get the last {count} events from the event histories
router.attach("/event-history/topology-switch/{count}/json",
@@ -58,9 +55,6 @@
EventHistoryTopologyLinkResource.class);
router.attach("/event-history/topology-cluster/{count}/json",
EventHistoryTopologyClusterResource.class);
- router.attach("/storage/tables/json", StorageSourceTablesResource.class);
- router.attach("/controller/summary/json", ControllerSummaryResource.class);
- router.attach("/role/json", ControllerRoleResource.class);
router.attach("/health/json", HealthCheckResource.class);
router.attach("/system/uptime/json", SystemUptimeResource.class);
// Following added by ONOS
diff --git a/src/main/java/net/floodlightcontroller/core/web/CounterResource.java b/src/main/java/net/floodlightcontroller/core/web/CounterResource.java
deleted file mode 100644
index fb680d7..0000000
--- a/src/main/java/net/floodlightcontroller/core/web/CounterResource.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.core.web;
-
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import net.floodlightcontroller.counter.CounterValue;
-import net.floodlightcontroller.counter.ICounter;
-
-import org.restlet.resource.Get;
-
-public class CounterResource extends CounterResourceBase {
- @Get("json")
- public Map<String, Object> retrieve() {
- String counterTitle =
- (String) getRequestAttributes().get("counterTitle");
- Map<String, Object> model = new HashMap<String,Object>();
- CounterValue v;
- if (counterTitle.equalsIgnoreCase("all")) {
- Map<String, ICounter> counters = this.counterStore.getAll();
- if (counters != null) {
- Iterator<Map.Entry<String, ICounter>> it =
- counters.entrySet().iterator();
- while (it.hasNext()) {
- Entry<String, ICounter> entry = it.next();
- String counterName = entry.getKey();
- v = entry.getValue().getCounterValue();
-
- if (CounterValue.CounterType.LONG == v.getType()) {
- model.put(counterName, v.getLong());
- } else if (v.getType() == CounterValue.CounterType.DOUBLE) {
- model.put(counterName, v.getDouble());
- }
- }
- }
- } else {
- ICounter counter = this.counterStore.getCounter(counterTitle);
- if (counter != null) {
- v = counter.getCounterValue();
- } else {
- v = new CounterValue(CounterValue.CounterType.LONG);
- }
-
- if (CounterValue.CounterType.LONG == v.getType()) {
- model.put(counterTitle, v.getLong());
- } else if (v.getType() == CounterValue.CounterType.DOUBLE) {
- model.put(counterTitle, v.getDouble());
- }
- }
- return model;
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/core/web/CounterResourceBase.java b/src/main/java/net/floodlightcontroller/core/web/CounterResourceBase.java
deleted file mode 100644
index 70e90ed..0000000
--- a/src/main/java/net/floodlightcontroller/core/web/CounterResourceBase.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.core.web;
-
-import net.floodlightcontroller.counter.ICounterStoreService;
-
-import org.restlet.resource.ResourceException;
-import org.restlet.resource.ServerResource;
-
-public class CounterResourceBase extends ServerResource {
- protected ICounterStoreService counterStore;
-
- @Override
- protected void doInit() throws ResourceException {
- super.doInit();
- counterStore =
- (ICounterStoreService)getContext().getAttributes().
- get(ICounterStoreService.class.getCanonicalName());
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/core/web/StorageSourceTablesResource.java b/src/main/java/net/floodlightcontroller/core/web/StorageSourceTablesResource.java
deleted file mode 100644
index 51f514f..0000000
--- a/src/main/java/net/floodlightcontroller/core/web/StorageSourceTablesResource.java
+++ /dev/null
@@ -1,18 +0,0 @@
-package net.floodlightcontroller.core.web;
-
-import java.util.Set;
-
-import net.floodlightcontroller.storage.IStorageSourceService;
-
-import org.restlet.resource.Get;
-import org.restlet.resource.ServerResource;
-
-public class StorageSourceTablesResource extends ServerResource {
- @Get("json")
- public Set<String> retrieve() {
- IStorageSourceService storageSource = (IStorageSourceService)getContext().
- getAttributes().get(IStorageSourceService.class.getCanonicalName());
- Set<String> allTableNames = storageSource.getAllTableNames();
- return allTableNames;
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/core/web/SwitchCounterCategoriesResource.java b/src/main/java/net/floodlightcontroller/core/web/SwitchCounterCategoriesResource.java
deleted file mode 100644
index f14d706..0000000
--- a/src/main/java/net/floodlightcontroller/core/web/SwitchCounterCategoriesResource.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.core.web;
-
-import java.io.UnsupportedEncodingException;
-import java.net.URLDecoder;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.openflow.util.HexString;
-import org.restlet.resource.Get;
-
-import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.counter.CounterStore.NetworkLayer;
-import net.floodlightcontroller.counter.ICounterStoreService;
-
-/**
- * Get the counter categories for a particular switch
- * @author readams
- */
-public class SwitchCounterCategoriesResource extends CounterResourceBase {
- @Get("json")
- public Map<String, Object> retrieve() {
- IFloodlightProviderService floodlightProvider =
- (IFloodlightProviderService)getContext().getAttributes().
- get(IFloodlightProviderService.class.getCanonicalName());
- HashMap<String,Object> model = new HashMap<String,Object>();
-
- String switchID = (String) getRequestAttributes().get("switchId");
- String counterName = (String) getRequestAttributes().get("counterName");
- String layer = (String) getRequestAttributes().get("layer");
-
- Long[] switchDpids;
- if (switchID.equalsIgnoreCase("all")) {
- switchDpids = floodlightProvider.getSwitches().keySet().toArray(new Long[0]);
- for (Long dpid : switchDpids) {
- switchID = HexString.toHexString(dpid);
-
- getOneSwitchCounterCategoriesJson(model, switchID, counterName, layer);
- }
- } else {
- getOneSwitchCounterCategoriesJson(model, switchID, counterName, layer);
- }
-
- return model;
- }
-
- protected void getOneSwitchCounterCategoriesJson(Map<String, Object> model,
- String switchID,
- String counterName,
- String layer) {
- String fullCounterName = "";
- NetworkLayer nl = NetworkLayer.L3;
-
- try {
- counterName = URLDecoder.decode(counterName, "UTF-8");
- layer = URLDecoder.decode(layer, "UTF-8");
- fullCounterName = switchID + ICounterStoreService.TitleDelimitor + counterName;
- } catch (UnsupportedEncodingException e) {
- //Just leave counterTitle undecoded if there is an issue - fail silently
- }
-
- if (layer.compareToIgnoreCase("4") == 0) {
- nl = NetworkLayer.L4;
- }
- List<String> categories = this.counterStore.getAllCategories(fullCounterName, nl);
- if (categories != null) {
- model.put(fullCounterName + "." + layer, categories);
- }
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/core/web/SwitchCounterResource.java b/src/main/java/net/floodlightcontroller/core/web/SwitchCounterResource.java
deleted file mode 100644
index 188836d..0000000
--- a/src/main/java/net/floodlightcontroller/core/web/SwitchCounterResource.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.core.web;
-
-import java.io.UnsupportedEncodingException;
-import java.net.URLDecoder;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.openflow.util.HexString;
-import org.restlet.resource.Get;
-
-import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.counter.ICounter;
-import net.floodlightcontroller.counter.ICounterStoreService;
-
-/**
- * Get counters for a particular switch
- * @author readams
- */
-public class SwitchCounterResource extends CounterResourceBase {
- @Get("json")
- public Map<String, Object> retrieve() {
- IFloodlightProviderService floodlightProvider =
- (IFloodlightProviderService)getContext().getAttributes().
- get(IFloodlightProviderService.class.getCanonicalName());
- HashMap<String,Object> model = new HashMap<String,Object>();
-
- String switchID = (String) getRequestAttributes().get("switchId");
- String counterName = (String) getRequestAttributes().get("counterName");
-
- Long[] switchDpids;
- if (switchID.equalsIgnoreCase("all")) {
- switchDpids = floodlightProvider.getSwitches().keySet().toArray(new Long[0]);
- getOneSwitchCounterJson(model, ICounterStoreService.CONTROLLER_NAME, counterName);
- for (Long dpid : switchDpids) {
- switchID = HexString.toHexString(dpid);
-
- getOneSwitchCounterJson(model, switchID, counterName);
- }
- } else {
- getOneSwitchCounterJson(model, switchID, counterName);
- }
- return model;
- }
-
- protected void getOneSwitchCounterJson(Map<String, Object> model,
- String switchID, String counterName) {
- String fullCounterName = "";
-
- try {
- counterName = URLDecoder.decode(counterName, "UTF-8");
- fullCounterName =
- switchID + ICounterStoreService.TitleDelimitor + counterName;
- } catch (UnsupportedEncodingException e) {
- //Just leave counterTitle undecoded if there is an issue - fail silently
- }
-
- ICounter counter = this.counterStore.getCounter(fullCounterName);
- Map<String, Long> sample = new HashMap<String, Long> ();
- if (counter != null) {
- sample.put(counter.getCounterDate().toString(),
- counter.getCounterValue().getLong());
- model.put(switchID, sample);
- }
- }
-
-}
diff --git a/src/main/java/net/floodlightcontroller/counter/ConcurrentCounter.java b/src/main/java/net/floodlightcontroller/counter/ConcurrentCounter.java
deleted file mode 100644
index cdec1e0..0000000
--- a/src/main/java/net/floodlightcontroller/counter/ConcurrentCounter.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-/**
- *
- */
-package net.floodlightcontroller.counter;
-
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Queue;
-import java.util.Set;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-import net.floodlightcontroller.counter.CounterValue.CounterType;
-
-
-/**
- * This module needs to be updated with CounterValue.
- *
- * This is a crumby attempt at a highly concurrent implementation of the Counter interface.
- *
- * (Help! Help! Someone please re-write me! This will almost certainly break at high loads.)
- *
- * The gist is that this class, ConcurrentCounter, keeps an internal highly transient buffer that is occasionally flushed
- * in to a set of CountBuffers (circular buffers) which store a longer term historical view of the count values at different
- * moments in time.
- *
- * This Counter implementation may be a bit over-engineered... The goal here was to present an implementation that is very
- * predictable with respect to memory and CPU time and, at the same time, present a very fast increment() method. The reasoning
- * here is that this will be a go-to class when it comes to debugging, particularly in high-load situations where logging
- * may introduce so much variability to the system that it foils the results.
- *
- * @author kyle
- *
- */
-public class ConcurrentCounter implements ICounter {
-
- protected static final Map<DateSpan, Integer> MAX_HISTORY = new HashMap<DateSpan, Integer>();
- static {
- MAX_HISTORY.put(DateSpan.REALTIME, new Integer(1));
- MAX_HISTORY.put(DateSpan.SECONDS, new Integer(120));
- MAX_HISTORY.put(DateSpan.MINUTES, new Integer(60));
- MAX_HISTORY.put(DateSpan.HOURS, new Integer(48));
- MAX_HISTORY.put(DateSpan.DAYS, new Integer(60));
- MAX_HISTORY.put(DateSpan.WEEKS, new Integer(2));
- }
-
- protected static Set<ConcurrentCounter> liveCounters;
-
- static {
- liveCounters = Collections.newSetFromMap(new ConcurrentHashMap<ConcurrentCounter, Boolean>()); //nifty way to get concurrent hash set
- //Set a background thread to flush any liveCounters every 100 milliseconds
- Executors.newScheduledThreadPool(1).scheduleAtFixedRate(new Runnable() {
- public void run() {
- for(ConcurrentCounter c : liveCounters) {
- c.flush();
- }
- }}, 100, 100, TimeUnit.MILLISECONDS);
- }
-
- /**
- * Very simple data structure to store off a single count entry at a single point in time
- * @author kyle
- *
- */
- protected static final class CountAtom {
- protected Date date;
- protected Long delta;
-
- protected CountAtom(Date date, Long delta) {
- this.date = date;
- this.delta = delta;
- }
-
- public String toString() {
- return "[" + this.date + ": " + this.delta + "]";
- }
- }
-
-
- protected Queue<CountAtom> unprocessedCountBuffer;
- protected Map<DateSpan, CountBuffer> counts;
- protected Date startDate;
-
- /**
- * Factory method to create a new counter instance. (Design note -
- * use a factory pattern here as it may be necessary to hook in other
- * registrations around counter objects as they are created.)
- *
- * @param startDate
- * @return
- */
- public static ICounter createCounter(Date startDate) {
- ConcurrentCounter cc = new ConcurrentCounter(startDate);
- ConcurrentCounter.liveCounters.add(cc);
- return cc;
-
- }
-
- /**
- * Protected constructor - use createCounter factory method instead
- * @param startDate
- */
- protected ConcurrentCounter(Date startDate) {
- init(startDate);
- }
-
- protected void init(Date startDate) {
- this.startDate = startDate;
- this.unprocessedCountBuffer = new ConcurrentLinkedQueue<CountAtom>();
- this.counts = new HashMap<DateSpan, CountBuffer>();
-
- for(DateSpan ds : DateSpan.values()) {
- CountBuffer cb = new CountBuffer(startDate, ds, MAX_HISTORY.get(ds));
- counts.put(ds, cb);
- }
- }
- /**
- * This is the key method that has to be both fast and very thread-safe.
- */
- @Override
- public void increment() {
- this.increment(new Date(), (long)1);
- }
-
- @Override
- public void increment(Date d, long delta) {
- this.unprocessedCountBuffer.add(new CountAtom(d, delta));
- }
-
- @Override
- public void setCounter(Date d, CounterValue value) {
- // To be done later
- }
-
- /**
- * Reset the value.
- */
- @Override
- public void reset(Date startDate) {
- init(startDate);
- }
-
- /**
- * Flushes values out of the internal buffer and in to structures
- * that can be fetched with a call to snapshot()
- */
- public synchronized void flush() {
- for(CountAtom c = this.unprocessedCountBuffer.poll(); c != null; c = this.unprocessedCountBuffer.poll()) {
- for(DateSpan ds : DateSpan.values()) {
- CountBuffer cb = counts.get(ds);
- cb.increment(c.date, c.delta);
- }
- }
- }
-
- @Override
- public CounterValue getCounterValue() {
- // To be done later
- //CountSeries cs = counts.get(DateSpan.REALTIME).snapshot();
- //return cs.getSeries()[0];
- return new CounterValue(CounterType.LONG);
- }
-
- @Override
- public Date getCounterDate() {
- // To be done later
- //CountSeries cs = counts.get(DateSpan.REALTIME).snapshot();
- //return cs.getSeries()[0];
- return new Date();
- }
-
- @Override
- /**
- * This method returns a disconnected copy of the underlying CountSeries corresponding to dateSpan.
- */
- public CountSeries snapshot(DateSpan dateSpan) {
- flush();
- CountSeries cs = counts.get(dateSpan).snapshot();
- return cs;
- }
-
-
-
-}
diff --git a/src/main/java/net/floodlightcontroller/counter/CountBuffer.java b/src/main/java/net/floodlightcontroller/counter/CountBuffer.java
deleted file mode 100644
index fa45862..0000000
--- a/src/main/java/net/floodlightcontroller/counter/CountBuffer.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.counter;
-
-import java.util.Date;
-
-import net.floodlightcontroller.counter.ICounter.DateSpan;
-
-
-/**
- * Implements a circular buffer to store the last x time-based counter values. This is pretty crumby
- * implementation, basically wrapping everything with synchronized blocks, in order to ensure that threads
- * which will be updating the series don't result in a thread which is reading the series getting stuck with
- * a start date which does not correspond to the count values in getSeries.
- *
- * This could probably use a re-think...
- *
- * @author kyle
- *
- */
-public class CountBuffer {
- protected long[] counterValues;
- protected Date startDate;
- protected DateSpan dateSpan;
- protected int currentIndex;
- protected int seriesLength;
-
-
- public CountBuffer(Date startDate, DateSpan dateSpan, int seriesLength) {
- this.seriesLength = seriesLength;
- this.counterValues = new long[seriesLength];
- this.dateSpan = dateSpan;
-
- this.startDate = startDate;
- this.currentIndex = 0;
- }
-
- /**
- * Increment the count associated with Date d, forgetting some of the older count values if necessary to ensure
- * that the total span of time covered by this series corresponds to DateSpan * seriesLength (circular buffer).
- *
- * Note - fails silently if the Date falls prior to the start of the tracked count values.
- *
- * Note - this should be a reasonably fast method, though it will have to block if there is another thread reading the
- * series at the same time.
- *
- * @param d
- * @param delta
- */
- public synchronized void increment(Date d, long delta) {
-
- long dsMillis = CountSeries.dateSpanToMilliseconds(this.dateSpan);
- Date endDate = new Date(startDate.getTime() + seriesLength * dsMillis - 1);
-
- if(d.getTime() < startDate.getTime()) {
- return; //silently fail rather than insert a count at a time older than the history buffer we're keeping
- }
- else if (d.getTime() >= startDate.getTime() && d.getTime() <= endDate.getTime()) {
- int index = (int) (( d.getTime() - startDate.getTime() ) / dsMillis); // java rounds down on long/long
- int modIndex = (index + currentIndex) % seriesLength;
- long currentValue = counterValues[modIndex];
- counterValues[modIndex] = currentValue + delta;
- }
- else if (d.getTime() > endDate.getTime()) {
- //Initialize new buckets
- int newBuckets = (int)((d.getTime() - endDate.getTime()) / dsMillis) + 1; // java rounds down on long/long
- for(int i = 0; i < newBuckets; i++) {
- int modIndex = (i + currentIndex) % seriesLength;
- counterValues[modIndex] = 0;
- }
- //Update internal vars
- this.startDate = new Date(startDate.getTime() + dsMillis * newBuckets);
- this.currentIndex = (currentIndex + newBuckets) % this.seriesLength;
-
- //Call again (date should be in the range this time)
- this.increment(d, delta);
- }
- }
-
- /**
- * Relatively slow method, expected to be called primarily from UI rather than from in-packet-path.
- *
- * @return the count values associated with each time interval starting with startDate and demarc'ed by dateSpan
- */
- public long[] getSeries() { //synchronized here should lock on 'this', implying that it shares the lock with increment
- long[] ret = new long[this.seriesLength];
- for(int i = 0; i < this.seriesLength; i++) {
- int modIndex = (currentIndex + i) % this.seriesLength;
- ret[i] = this.counterValues[modIndex];
- }
- return ret;
- }
-
-
- /**
- * Returns an immutable count series that represents a snapshot of this
- * series at a specific moment in time.
- * @return
- */
- public synchronized CountSeries snapshot() {
- long[] cvs = new long[this.seriesLength];
- for(int i = 0; i < this.seriesLength; i++) {
- int modIndex = (this.currentIndex + i) % this.seriesLength;
- cvs[i] = this.counterValues[modIndex];
- }
-
- return new CountSeries(this.startDate, this.dateSpan, cvs);
- }
-
-}
diff --git a/src/main/java/net/floodlightcontroller/counter/CountSeries.java b/src/main/java/net/floodlightcontroller/counter/CountSeries.java
deleted file mode 100644
index e8a547a..0000000
--- a/src/main/java/net/floodlightcontroller/counter/CountSeries.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.counter;
-
-import java.util.Arrays;
-import java.util.Date;
-
-import net.floodlightcontroller.counter.ICounter.DateSpan;
-
-/**
- * Simple immutable class to store a series of historic counter values
- *
- * This could probably use a re-think...
- *
- * @author kyle
- *
- */
-public class CountSeries {
- protected long[] counterValues;
- protected Date startDate;
- protected DateSpan dateSpan;
-
- public CountSeries(Date startDate, DateSpan dateSpan, long[] counterValues) {
- this.counterValues = counterValues.clone();
- this.dateSpan = dateSpan;
- this.startDate = startDate;
- }
-
-
- public long[] getSeries() { //synchronized here should lock on 'this', implying that it shares the lock with increment
- return this.counterValues.clone();
- }
-
- /**
- * Returns the startDate of this series. The first long in getSeries represents the sum of deltas from increment calls with dates
- * that correspond to >= startDate and < startDate + DateSpan.
- * @return
- */
- public Date getStartDate() {//synchronized here should lock on 'this', implying that it shares the lock with increment
- return this.startDate;
- }
-
- public String toString() {
- String ret = "{start: " + this.startDate + ", span: " + this.dateSpan + ", series: " + Arrays.toString(getSeries()) + "}";
- return ret;
- }
-
- /**
- * Return a long that is the number of milliseconds in a ds (second/minute/hour/day/week). (Utility method.)
- *
- * @param ds
- * @return
- */
- public static final long dateSpanToMilliseconds(DateSpan ds) {
- long delta = 1;
- switch(ds) {
- case WEEKS:
- delta *= 7;
- case DAYS:
- delta *= 24;
- case HOURS:
- delta *= 60;
- case MINUTES:
- delta *= 60;
- case SECONDS:
- delta *= 1000;
- default:
- break;
- }
- return delta;
- }
-
-}
diff --git a/src/main/java/net/floodlightcontroller/counter/CounterStore.java b/src/main/java/net/floodlightcontroller/counter/CounterStore.java
deleted file mode 100644
index 15ce53b..0000000
--- a/src/main/java/net/floodlightcontroller/counter/CounterStore.java
+++ /dev/null
@@ -1,461 +0,0 @@
-/**
- * Copyright 2011, Big Switch Networks, Inc.
- * Originally created by David Erickson, Stanford University
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License. You may obtain
- * a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations
- * under the License.
- **/
-
-/**
- * Implements a very simple central store for system counters
- */
-package net.floodlightcontroller.counter;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-import javax.annotation.PostConstruct;
-
-import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.core.module.FloodlightModuleContext;
-import net.floodlightcontroller.core.module.FloodlightModuleException;
-import net.floodlightcontroller.core.module.IFloodlightModule;
-import net.floodlightcontroller.core.module.IFloodlightService;
-import net.floodlightcontroller.counter.CounterValue.CounterType;
-import net.floodlightcontroller.packet.Ethernet;
-import net.floodlightcontroller.packet.IPv4;
-
-import org.openflow.protocol.OFMessage;
-import org.openflow.protocol.OFPacketIn;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-/**
- * @author kyle
- *
- */
-public class CounterStore implements IFloodlightModule, ICounterStoreService {
- protected final static Logger log = LoggerFactory.getLogger(CounterStore.class);
-
- public enum NetworkLayer {
- L2, L3, L4
- }
-
- protected class CounterEntry {
- protected ICounter counter;
- String title;
- }
-
- /**
- * A map of counterName --> Counter
- */
- protected ConcurrentHashMap<String, CounterEntry> nameToCEIndex =
- new ConcurrentHashMap<String, CounterEntry>();
-
- protected ICounter heartbeatCounter;
- protected ICounter randomCounter;
-
- /**
- * Counter Categories grouped by network layers
- * NetworkLayer -> CounterToCategories
- */
- protected static Map<NetworkLayer, Map<String, List<String>>> layeredCategories =
- new ConcurrentHashMap<NetworkLayer, Map<String, List<String>>> ();
-
- public void updatePacketInCounters(IOFSwitch sw, OFMessage m, Ethernet eth) {
- OFPacketIn packet = (OFPacketIn)m;
-
- // Make sure there is data
- if (packet.getPacketData().length <= 0) return;
-
- /* Extract the etherType and protocol field for IPv4 packet.
- */
- String etherType = String.format("%04x", eth.getEtherType());
-
- /*
- * Valid EtherType must be greater than or equal to 0x0600
- * It is V1 Ethernet Frame if EtherType < 0x0600
- */
- if (eth.getEtherType() < 0x0600) {
- etherType = "0599";
- }
-
- if (TypeAliases.l3TypeAliasMap != null &&
- TypeAliases.l3TypeAliasMap.containsKey(etherType)) {
- etherType = TypeAliases.l3TypeAliasMap.get(etherType);
- } else {
- etherType = "L3_" + etherType;
- }
- String switchIdHex = sw.getStringId();
-
- String packetName = m.getType().toClass().getName();
- packetName = packetName.substring(packetName.lastIndexOf('.')+1);
-
- // Construct controller counter for the packet_in
- String controllerCounterName =
- CounterStore.createCounterName(CONTROLLER_NAME,
- -1,
- packetName);
-
- String controllerL3CategoryCounterName =
- CounterStore.createCounterName(CONTROLLER_NAME,
- -1,
- packetName,
- etherType,
- NetworkLayer.L3);
-
- String l2Type = null;
- if (eth.isBroadcast()) {
- l2Type = BROADCAST;
- } else if (eth.isMulticast()) {
- l2Type = MULTICAST;
- } else {
- l2Type = UNICAST;
- }
-
- // Construct both port and switch L3 counter for the packet_in
- String controllerL2CategoryCounterName = CounterStore.createCounterName(CONTROLLER_NAME,
- -1,
- packetName,
- l2Type,
- NetworkLayer.L2);
- String switchL2CategoryCounterName = CounterStore.createCounterName(switchIdHex,
- -1,
- packetName,
- l2Type,
- NetworkLayer.L2);
- String portL2CategoryCounterName = CounterStore.createCounterName(switchIdHex,
- packet.getInPort(),
- packetName,
- l2Type,
- NetworkLayer.L2);
-
- // Construct both port and switch L3 counter for the packet_in
- String portCounterName =
- CounterStore.createCounterName(switchIdHex,
- packet.getInPort(),
- packetName);
- String switchCounterName =
- CounterStore.createCounterName(switchIdHex,
- -1,
- packetName);
-
- String portL3CategoryCounterName =
- CounterStore.createCounterName(switchIdHex,
- packet.getInPort(),
- packetName,
- etherType,
- NetworkLayer.L3);
- String switchL3CategoryCounterName =
- CounterStore.createCounterName(switchIdHex,
- -1,
- packetName,
- etherType,
- NetworkLayer.L3);
-
- // Controller counters
- ICounter controllerCounter = getCounter(controllerCounterName);
- if (controllerCounter == null) {
- controllerCounter = createCounter(controllerCounterName,
- CounterType.LONG);
- }
- controllerCounter.increment();
- ICounter portCounter = getCounter(portCounterName);
- if (portCounter == null) {
- portCounter = createCounter(portCounterName,
- CounterType.LONG);
- }
- portCounter.increment();
- ICounter switchCounter = getCounter(switchCounterName);
- if (switchCounter == null) {
- switchCounter = createCounter(switchCounterName,
- CounterType.LONG);
- }
- switchCounter.increment();
-
- // L2 counters
- ICounter controllerL2Counter = getCounter(controllerL2CategoryCounterName);
- if (controllerL2Counter == null) {
- controllerL2Counter = createCounter(controllerL2CategoryCounterName,
- CounterType.LONG);
- }
- controllerL2Counter.increment();
- ICounter switchL2Counter = getCounter(switchL2CategoryCounterName);
- if (switchL2Counter == null) {
- switchL2Counter = createCounter(switchL2CategoryCounterName,
- CounterType.LONG);
- }
- switchL2Counter.increment();
- ICounter portL2Counter = getCounter(portL2CategoryCounterName);
- if (portL2Counter == null) {
- portL2Counter = createCounter(portL2CategoryCounterName,
- CounterType.LONG);
- }
- portL2Counter.increment();
-
- // L3 counters
- ICounter controllerL3Counter = getCounter(controllerL3CategoryCounterName);
- if (controllerL3Counter == null) {
- controllerL3Counter = createCounter(controllerL3CategoryCounterName,
- CounterType.LONG);
- }
- controllerL3Counter.increment();
- ICounter portL3Counter = getCounter(portL3CategoryCounterName);
- if (portL3Counter == null) {
- portL3Counter = createCounter(portL3CategoryCounterName,
- CounterType.LONG);
- }
- portL3Counter.increment();
- ICounter switchL3Counter = getCounter(switchL3CategoryCounterName);
- if (switchL3Counter == null) {
- switchL3Counter = createCounter(switchL3CategoryCounterName,
- CounterType.LONG);
- }
- switchL3Counter.increment();
-
- // L4 counters
- if (etherType.compareTo(CounterStore.L3ET_IPV4) == 0) {
- IPv4 ipV4 = (IPv4)eth.getPayload();
- String l4Type = String.format("%02x", ipV4.getProtocol());
- if (TypeAliases.l4TypeAliasMap != null &&
- TypeAliases.l4TypeAliasMap.containsKey(l4Type)) {
- l4Type = TypeAliases.l4TypeAliasMap.get(l4Type);
- } else {
- l4Type = "L4_" + l4Type;
- }
- String controllerL4CategoryCounterName =
- CounterStore.createCounterName(CONTROLLER_NAME,
- -1,
- packetName,
- l4Type,
- NetworkLayer.L4);
- String portL4CategoryCounterName =
- CounterStore.createCounterName(switchIdHex,
- packet.getInPort(),
- packetName,
- l4Type,
- NetworkLayer.L4);
- String switchL4CategoryCounterName =
- CounterStore.createCounterName(switchIdHex,
- -1,
- packetName,
- l4Type,
- NetworkLayer.L4);
- ICounter controllerL4Counter = getCounter(controllerL4CategoryCounterName);
- if (controllerL4Counter == null) {
- controllerL4Counter = createCounter(controllerL4CategoryCounterName,
- CounterType.LONG);
- }
- controllerL4Counter.increment();
- ICounter portL4Counter = getCounter(portL4CategoryCounterName);
- if (portL4Counter == null) {
- portL4Counter = createCounter(portL4CategoryCounterName,
- CounterType.LONG);
- }
- portL4Counter.increment();
- ICounter switchL4Counter = getCounter(switchL4CategoryCounterName);
- if (switchL4Counter == null) {
- switchL4Counter = createCounter(switchL4CategoryCounterName,
- CounterType.LONG);
- }
- switchL4Counter.increment();
- }
- }
-
- /**
- * This method can only be used to update packetOut and flowmod counters
- *
- * @param sw
- * @param ofMsg
- */
- public void updatePktOutFMCounterStore(IOFSwitch sw, OFMessage ofMsg) {
- String packetName = ofMsg.getType().toClass().getName();
- packetName = packetName.substring(packetName.lastIndexOf('.')+1);
- // flowmod is per switch and controller. portid = -1
- String controllerFMCounterName = CounterStore.createCounterName(CONTROLLER_NAME, -1, packetName);
- ICounter counter = getCounter(controllerFMCounterName);
- if (counter == null) {
- counter = createCounter(controllerFMCounterName, CounterValue.CounterType.LONG);
- }
- counter.increment();
-
- String switchFMCounterName = CounterStore.createCounterName(sw.getStringId(), -1, packetName);
- counter = getCounter(switchFMCounterName);
- if (counter == null) {
- counter = createCounter(switchFMCounterName, CounterValue.CounterType.LONG);
- }
- counter.increment();
- }
-
-
- /**
- * Create a title based on switch ID, portID, vlanID, and counterName
- * If portID is -1, the title represents the given switch only
- * If portID is a non-negative number, the title represents the port on the given switch
- */
- public static String createCounterName(String switchID, int portID, String counterName) {
- if (portID < 0) {
- return switchID + TitleDelimitor + counterName;
- } else {
- return switchID + TitleDelimitor + portID + TitleDelimitor + counterName;
- }
- }
-
- /**
- * Create a title based on switch ID, portID, vlanID, counterName, and subCategory
- * If portID is -1, the title represents the given switch only
- * If portID is a non-negative number, the title represents the port on the given switch
- * For example: PacketIns can be further categorized based on L2 etherType or L3 protocol
- */
- public static String createCounterName(String switchID, int portID, String counterName,
- String subCategory, NetworkLayer layer) {
- String fullCounterName = "";
- String groupCounterName = "";
-
- if (portID < 0) {
- groupCounterName = switchID + TitleDelimitor + counterName;
- fullCounterName = groupCounterName + TitleDelimitor + subCategory;
- } else {
- groupCounterName = switchID + TitleDelimitor + portID + TitleDelimitor + counterName;
- fullCounterName = groupCounterName + TitleDelimitor + subCategory;
- }
-
- Map<String, List<String>> counterToCategories;
- if (layeredCategories.containsKey(layer)) {
- counterToCategories = layeredCategories.get(layer);
- } else {
- counterToCategories = new ConcurrentHashMap<String, List<String>> ();
- layeredCategories.put(layer, counterToCategories);
- }
-
- List<String> categories;
- if (counterToCategories.containsKey(groupCounterName)) {
- categories = counterToCategories.get(groupCounterName);
- } else {
- categories = new ArrayList<String>();
- counterToCategories.put(groupCounterName, categories);
- }
-
- if (!categories.contains(subCategory)) {
- categories.add(subCategory);
- }
- return fullCounterName;
- }
-
- @Override
- public List<String> getAllCategories(String counterName, NetworkLayer layer) {
- if (layeredCategories.containsKey(layer)) {
- Map<String, List<String>> counterToCategories = layeredCategories.get(layer);
- if (counterToCategories.containsKey(counterName)) {
- return counterToCategories.get(counterName);
- }
- }
- return null;
- }
-
- @Override
- public ICounter createCounter(String key, CounterValue.CounterType type) {
- CounterEntry ce;
- ICounter c;
-
- c = SimpleCounter.createCounter(new Date(), type);
- ce = new CounterEntry();
- ce.counter = c;
- ce.title = key;
- nameToCEIndex.putIfAbsent(key, ce);
-
- return nameToCEIndex.get(key).counter;
- }
-
- /**
- * Post construction init method to kick off the health check and random (test) counter threads
- */
- @PostConstruct
- public void startUp() {
- this.heartbeatCounter = this.createCounter("CounterStore heartbeat", CounterValue.CounterType.LONG);
- this.randomCounter = this.createCounter("CounterStore random", CounterValue.CounterType.LONG);
- //Set a background thread to flush any liveCounters every 100 milliseconds
- Executors.newScheduledThreadPool(1).scheduleAtFixedRate(new Runnable() {
- public void run() {
- heartbeatCounter.increment();
- randomCounter.increment(new Date(), (long) (Math.random() * 100)); //TODO - pull this in to random timing
- }}, 100, 100, TimeUnit.MILLISECONDS);
- }
-
- @Override
- public ICounter getCounter(String key) {
- CounterEntry counter = nameToCEIndex.get(key);
- if (counter != null) {
- return counter.counter;
- } else {
- return null;
- }
- }
-
- /* (non-Javadoc)
- * @see net.floodlightcontroller.counter.ICounterStoreService#getAll()
- */
- @Override
- public Map<String, ICounter> getAll() {
- Map<String, ICounter> ret = new ConcurrentHashMap<String, ICounter>();
- for(Map.Entry<String, CounterEntry> counterEntry : this.nameToCEIndex.entrySet()) {
- String key = counterEntry.getKey();
- ICounter counter = counterEntry.getValue().counter;
- ret.put(key, counter);
- }
- return ret;
- }
-
- @Override
- public Collection<Class<? extends IFloodlightService>> getModuleServices() {
- Collection<Class<? extends IFloodlightService>> services =
- new ArrayList<Class<? extends IFloodlightService>>(1);
- services.add(ICounterStoreService.class);
- return services;
- }
-
- @Override
- public Map<Class<? extends IFloodlightService>, IFloodlightService>
- getServiceImpls() {
- Map<Class<? extends IFloodlightService>,
- IFloodlightService> m =
- new HashMap<Class<? extends IFloodlightService>,
- IFloodlightService>();
- m.put(ICounterStoreService.class, this);
- return m;
- }
-
- @Override
- public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
- // no-op, no dependencies
- return null;
- }
-
- @Override
- public void init(FloodlightModuleContext context)
- throws FloodlightModuleException {
- // no-op for now
- }
-
- @Override
- public void startUp(FloodlightModuleContext context) {
- // no-op for now
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/counter/CounterValue.java b/src/main/java/net/floodlightcontroller/counter/CounterValue.java
deleted file mode 100644
index 1852d5c..0000000
--- a/src/main/java/net/floodlightcontroller/counter/CounterValue.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.counter;
-
-/**
- * The class defines the counter value type and value
- *
- * @author Kanzhe
- *
- */
-public class CounterValue {
- public enum CounterType {
- LONG,
- DOUBLE
- }
-
- protected CounterType type;
- protected long longValue;
- protected double doubleValue;
-
- public CounterValue(CounterType type) {
- this.type = CounterType.LONG;
- this.longValue = 0;
- this.doubleValue = 0.0;
- }
-
- /**
- * This method is only applicable to type long.
- * Setter() should be used for type double
- */
- public void increment(long delta) {
- if (this.type == CounterType.LONG) {
- this.longValue += delta;
- } else {
- throw new IllegalArgumentException("Invalid counter type. This counter is not a long type.");
- }
- }
-
- public void setLongValue(long value) {
- if (this.type == CounterType.LONG) {
- this.longValue = value;
- } else {
- throw new IllegalArgumentException("Invalid counter type. This counter is not a long type.");
- }
- }
-
- public void setDoubleValue(double value) {
- if (this.type == CounterType.DOUBLE) {
- this.doubleValue = value;
- } else {
- throw new IllegalArgumentException("Invalid counter type. This counter is not a double type.");
- }
- }
-
- public long getLong() {
- if (this.type == CounterType.LONG) {
- return this.longValue;
- } else {
- throw new IllegalArgumentException("Invalid counter type. This counter is not a long type.");
- }
- }
-
- public double getDouble() {
- if (this.type == CounterType.DOUBLE) {
- return this.doubleValue;
- } else {
- throw new IllegalArgumentException("Invalid counter type. This counter is not a double type.");
- }
- }
-
-
- public CounterType getType() {
- return this.type;
- }
-
- public String toString() {
- String ret = "{type: ";
- if (this.type == CounterType.DOUBLE) {
- ret += "Double" + ", value: " + this.doubleValue + "}";
- } else {
- ret += "Long" + ", value: " + this.longValue + "}";
- }
- return ret;
- }
-
-
-}
diff --git a/src/main/java/net/floodlightcontroller/counter/ICounter.java b/src/main/java/net/floodlightcontroller/counter/ICounter.java
deleted file mode 100644
index 625bebd..0000000
--- a/src/main/java/net/floodlightcontroller/counter/ICounter.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-/**
- * Simple interface for a counter whose value can be retrieved in several different
- * time increments (last x seconds, minutes, hours, days)
- */
-package net.floodlightcontroller.counter;
-
-import java.util.Date;
-
-/**
- * @author kyle
- *
- */
-public interface ICounter {
-
- /**
- * Most commonly used method
- */
- public void increment();
-
- /**
- * Used primarily for testing - no performance guarantees
- */
- public void increment(Date d, long delta);
-
- /**
- * Counter value setter
- */
- public void setCounter(Date d, CounterValue value);
-
- /**
- * Return the most current value
- */
- public Date getCounterDate();
-
- /**
- * Return the most current value
- */
- public CounterValue getCounterValue();
-
- /**
- * Reset the value
- */
- public void reset(Date d);
-
- /**
- * Returns a CountSeries that is a snapshot of the counter's values for the given dateSpan. (Further changes
- * to this counter won't be reflected in the CountSeries that comes back.)
- *
- * @param dateSpan
- * @return
- */
- public CountSeries snapshot(DateSpan dateSpan);
-
-
- public static enum DateSpan {
- REALTIME,
- SECONDS,
- MINUTES,
- HOURS,
- DAYS,
- WEEKS
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/counter/ICounterStoreService.java b/src/main/java/net/floodlightcontroller/counter/ICounterStoreService.java
deleted file mode 100644
index c89eee0..0000000
--- a/src/main/java/net/floodlightcontroller/counter/ICounterStoreService.java
+++ /dev/null
@@ -1,71 +0,0 @@
-package net.floodlightcontroller.counter;
-
-import java.util.List;
-import java.util.Map;
-
-import org.openflow.protocol.OFMessage;
-
-import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.core.module.IFloodlightService;
-import net.floodlightcontroller.counter.CounterStore.NetworkLayer;
-import net.floodlightcontroller.packet.Ethernet;
-
-public interface ICounterStoreService extends IFloodlightService {
-
- public final static String CONTROLLER_NAME = "controller";
- public final static String TitleDelimitor = "__";
-
- /** Broadcast and multicast */
- public final static String BROADCAST = "broadcast";
- public final static String MULTICAST = "multicast";
- public final static String UNICAST = "unicast";
-
- /** L2 EtherType subCategories */
- public final static String L3ET_IPV4 = "L3_IPv4";
-
- /**
- * Update packetIn counters
- *
- * @param sw
- * @param m
- * @param eth
- */
- public void updatePacketInCounters(IOFSwitch sw, OFMessage m, Ethernet eth);
-
- /**
- * This method can only be used to update packetOut and flowmod counters
- *
- * @param sw
- * @param ofMsg
- */
- public void updatePktOutFMCounterStore(IOFSwitch sw, OFMessage ofMsg);
-
- /**
- * Retrieve a list of subCategories by counterName.
- * null if nothing.
- */
- public List<String> getAllCategories(String counterName,
- NetworkLayer layer);
-
- /**
- * Create a new ICounter and set the title. Note that the title must be
- * unique, otherwise this will throw an IllegalArgumentException.
- *
- * @param key
- * @param type
- * @return
- */
- public ICounter createCounter(String key, CounterValue.CounterType type);
-
- /**
- * Retrieves a counter with the given title, or null if none can be found.
- */
- public ICounter getCounter(String key);
-
- /**
- * Returns an immutable map of title:counter with all of the counters in the store.
- *
- * (Note - this method may be slow - primarily for debugging/UI)
- */
- public Map<String, ICounter> getAll();
-}
diff --git a/src/main/java/net/floodlightcontroller/counter/NullCounterStore.java b/src/main/java/net/floodlightcontroller/counter/NullCounterStore.java
deleted file mode 100644
index fed8c1e..0000000
--- a/src/main/java/net/floodlightcontroller/counter/NullCounterStore.java
+++ /dev/null
@@ -1,104 +0,0 @@
-package net.floodlightcontroller.counter;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.openflow.protocol.OFMessage;
-
-import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.core.module.FloodlightModuleContext;
-import net.floodlightcontroller.core.module.FloodlightModuleException;
-import net.floodlightcontroller.core.module.IFloodlightModule;
-import net.floodlightcontroller.core.module.IFloodlightService;
-import net.floodlightcontroller.counter.CounterStore.NetworkLayer;
-import net.floodlightcontroller.counter.CounterValue.CounterType;
-import net.floodlightcontroller.packet.Ethernet;
-
-/**
- * An ICounsterStoreService implementation that does nothing.
- * This is used mainly for performance testing or if you don't
- * want to use the counterstore.
- * @author alexreimers
- *
- */
-public class NullCounterStore implements IFloodlightModule,
- ICounterStoreService {
-
- private ICounter emptyCounter;
- private List<String> emptyList;
- private Map<String, ICounter> emptyMap;
-
- @Override
- public void updatePacketInCounters(IOFSwitch sw, OFMessage m, Ethernet eth) {
- // no-op
- }
-
- @Override
- public void updatePktOutFMCounterStore(IOFSwitch sw, OFMessage ofMsg) {
- // no-op
- }
-
- @Override
- public List<String>
- getAllCategories(String counterName, NetworkLayer layer) {
- return emptyList;
- }
-
- @Override
- public ICounter createCounter(String key, CounterType type) {
- return emptyCounter;
- }
-
- @Override
- public ICounter getCounter(String key) {
- return emptyCounter;
- }
-
- @Override
- public Map<String, ICounter> getAll() {
- return emptyMap;
- }
-
- @Override
- public Collection<Class<? extends IFloodlightService>> getModuleServices() {
- Collection<Class<? extends IFloodlightService>> services =
- new ArrayList<Class<? extends IFloodlightService>>(1);
- services.add(ICounterStoreService.class);
- return services;
- }
-
- @Override
- public Map<Class<? extends IFloodlightService>, IFloodlightService>
- getServiceImpls() {
- Map<Class<? extends IFloodlightService>,
- IFloodlightService> m =
- new HashMap<Class<? extends IFloodlightService>,
- IFloodlightService>();
- m.put(ICounterStoreService.class, this);
- return m;
- }
-
- @Override
- public Collection<Class<? extends IFloodlightService>>
- getModuleDependencies() {
- // None, return null
- return null;
- }
-
- @Override
- public void init(FloodlightModuleContext context)
- throws FloodlightModuleException {
- emptyCounter = new SimpleCounter(new Date(), CounterType.LONG);
- emptyList = new ArrayList<String>();
- emptyMap = new HashMap<String, ICounter>();
- }
-
- @Override
- public void startUp(FloodlightModuleContext context) {
- // no-op
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/counter/SimpleCounter.java b/src/main/java/net/floodlightcontroller/counter/SimpleCounter.java
deleted file mode 100644
index 01a0428..0000000
--- a/src/main/java/net/floodlightcontroller/counter/SimpleCounter.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-/**
- *
- */
-package net.floodlightcontroller.counter;
-
-import java.util.Date;
-
-
-
-/**
- * This is a simple counter implementation that doesn't support data series.
- * The idea is that floodlight only keeps the realtime value for each counter,
- * statd, a statistics collection daemon, samples counters at a user-defined interval
- * and pushes the values to a database, which keeps time-based data series.
- * @author Kanzhe
- *
- */
-public class SimpleCounter implements ICounter {
-
- protected CounterValue counter;
- protected Date samplingTime;
- protected Date startDate;
-
- /**
- * Factory method to create a new counter instance.
- *
- * @param startDate
- * @return
- */
- public static ICounter createCounter(Date startDate, CounterValue.CounterType type) {
- SimpleCounter cc = new SimpleCounter(startDate, type);
- return cc;
- }
-
- /**
- * Factory method to create a copy of a counter instance.
- *
- * @param startDate
- * @return
- */
- public static ICounter createCounter(ICounter copy) {
- if (copy == null ||
- copy.getCounterDate() == null ||
- copy.getCounterValue() == null) {
- return null;
- }
-
- SimpleCounter cc = new SimpleCounter(copy.getCounterDate(),
- copy.getCounterValue().getType());
- cc.setCounter(copy.getCounterDate(), copy.getCounterValue());
- return cc;
- }
-
- /**
- * Protected constructor - use createCounter factory method instead
- * @param startDate
- */
- protected SimpleCounter(Date startDate, CounterValue.CounterType type) {
- init(startDate, type);
- }
-
- protected void init(Date startDate, CounterValue.CounterType type) {
- this.startDate = startDate;
- this.samplingTime = new Date();
- this.counter = new CounterValue(type);
- }
-
- /**
- * This is the key method that has to be both fast and very thread-safe.
- */
- @Override
- synchronized public void increment() {
- this.increment(new Date(), (long)1);
- }
-
- @Override
- synchronized public void increment(Date d, long delta) {
- this.samplingTime = d;
- this.counter.increment(delta);
- }
-
- synchronized public void setCounter(Date d, CounterValue value) {
- this.samplingTime = d;
- this.counter = value;
- }
-
- /**
- * This is the method to retrieve the current value.
- */
- @Override
- synchronized public CounterValue getCounterValue() {
- return this.counter;
- }
-
- /**
- * This is the method to retrieve the last sampling time.
- */
- @Override
- synchronized public Date getCounterDate() {
- return this.samplingTime;
- }
-
- /**
- * Reset value.
- */
- @Override
- synchronized public void reset(Date startDate) {
- init(startDate, this.counter.getType());
- }
-
- @Override
- /**
- * This method only returns the real-time value.
- */
- synchronized public CountSeries snapshot(DateSpan dateSpan) {
- long[] values = new long[1];
- values[0] = this.counter.getLong();
- return new CountSeries(this.samplingTime, DateSpan.DAYS, values);
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/counter/TypeAliases.java b/src/main/java/net/floodlightcontroller/counter/TypeAliases.java
deleted file mode 100644
index 0d7e2b5..0000000
--- a/src/main/java/net/floodlightcontroller/counter/TypeAliases.java
+++ /dev/null
@@ -1,190 +0,0 @@
-package net.floodlightcontroller.counter;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Class to contain some statically initialized data
- * @author readams
- *
- */
-public class TypeAliases {
- protected static final Map<String,String> l3TypeAliasMap =
- new HashMap<String, String>();
- static {
- l3TypeAliasMap.put("0599", "L3_V1Ether");
- l3TypeAliasMap.put("0800", "L3_IPv4");
- l3TypeAliasMap.put("0806", "L3_ARP");
- l3TypeAliasMap.put("8035", "L3_RARP");
- l3TypeAliasMap.put("809b", "L3_AppleTalk");
- l3TypeAliasMap.put("80f3", "L3_AARP");
- l3TypeAliasMap.put("8100", "L3_802_1Q");
- l3TypeAliasMap.put("8137", "L3_Novell_IPX");
- l3TypeAliasMap.put("8138", "L3_Novell");
- l3TypeAliasMap.put("86dd", "L3_IPv6");
- l3TypeAliasMap.put("8847", "L3_MPLS_uni");
- l3TypeAliasMap.put("8848", "L3_MPLS_multi");
- l3TypeAliasMap.put("8863", "L3_PPPoE_DS");
- l3TypeAliasMap.put("8864", "L3_PPPoE_SS");
- l3TypeAliasMap.put("886f", "L3_MSFT_NLB");
- l3TypeAliasMap.put("8870", "L3_Jumbo");
- l3TypeAliasMap.put("889a", "L3_HyperSCSI");
- l3TypeAliasMap.put("88a2", "L3_ATA_Ethernet");
- l3TypeAliasMap.put("88a4", "L3_EtherCAT");
- l3TypeAliasMap.put("88a8", "L3_802_1ad");
- l3TypeAliasMap.put("88ab", "L3_Ether_Powerlink");
- l3TypeAliasMap.put("88cc", "L3_LLDP");
- l3TypeAliasMap.put("88cd", "L3_SERCOS_III");
- l3TypeAliasMap.put("88e5", "L3_802_1ae");
- l3TypeAliasMap.put("88f7", "L3_IEEE_1588");
- l3TypeAliasMap.put("8902", "L3_802_1ag_CFM");
- l3TypeAliasMap.put("8906", "L3_FCoE");
- l3TypeAliasMap.put("9000", "L3_Loop");
- l3TypeAliasMap.put("9100", "L3_Q_in_Q");
- l3TypeAliasMap.put("cafe", "L3_LLT");
- }
-
- protected static final Map<String,String> l4TypeAliasMap =
- new HashMap<String, String>();
- static {
- l4TypeAliasMap.put("00", "L4_HOPOPT");
- l4TypeAliasMap.put("01", "L4_ICMP");
- l4TypeAliasMap.put("02", "L4_IGAP_IGMP_RGMP");
- l4TypeAliasMap.put("03", "L4_GGP");
- l4TypeAliasMap.put("04", "L4_IP");
- l4TypeAliasMap.put("05", "L4_ST");
- l4TypeAliasMap.put("06", "L4_TCP");
- l4TypeAliasMap.put("07", "L4_UCL");
- l4TypeAliasMap.put("08", "L4_EGP");
- l4TypeAliasMap.put("09", "L4_IGRP");
- l4TypeAliasMap.put("0a", "L4_BBN");
- l4TypeAliasMap.put("0b", "L4_NVP");
- l4TypeAliasMap.put("0c", "L4_PUP");
- l4TypeAliasMap.put("0d", "L4_ARGUS");
- l4TypeAliasMap.put("0e", "L4_EMCON");
- l4TypeAliasMap.put("0f", "L4_XNET");
- l4TypeAliasMap.put("10", "L4_Chaos");
- l4TypeAliasMap.put("11", "L4_UDP");
- l4TypeAliasMap.put("12", "L4_TMux");
- l4TypeAliasMap.put("13", "L4_DCN");
- l4TypeAliasMap.put("14", "L4_HMP");
- l4TypeAliasMap.put("15", "L4_Packet_Radio");
- l4TypeAliasMap.put("16", "L4_XEROX_NS_IDP");
- l4TypeAliasMap.put("17", "L4_Trunk_1");
- l4TypeAliasMap.put("18", "L4_Trunk_2");
- l4TypeAliasMap.put("19", "L4_Leaf_1");
- l4TypeAliasMap.put("1a", "L4_Leaf_2");
- l4TypeAliasMap.put("1b", "L4_RDP");
- l4TypeAliasMap.put("1c", "L4_IRTP");
- l4TypeAliasMap.put("1d", "L4_ISO_TP4");
- l4TypeAliasMap.put("1e", "L4_NETBLT");
- l4TypeAliasMap.put("1f", "L4_MFE");
- l4TypeAliasMap.put("20", "L4_MERIT");
- l4TypeAliasMap.put("21", "L4_DCCP");
- l4TypeAliasMap.put("22", "L4_Third_Party_Connect");
- l4TypeAliasMap.put("23", "L4_IDPR");
- l4TypeAliasMap.put("24", "L4_XTP");
- l4TypeAliasMap.put("25", "L4_Datagram_Delivery");
- l4TypeAliasMap.put("26", "L4_IDPR");
- l4TypeAliasMap.put("27", "L4_TP");
- l4TypeAliasMap.put("28", "L4_ILTP");
- l4TypeAliasMap.put("29", "L4_IPv6_over_IPv4");
- l4TypeAliasMap.put("2a", "L4_SDRP");
- l4TypeAliasMap.put("2b", "L4_IPv6_RH");
- l4TypeAliasMap.put("2c", "L4_IPv6_FH");
- l4TypeAliasMap.put("2d", "L4_IDRP");
- l4TypeAliasMap.put("2e", "L4_RSVP");
- l4TypeAliasMap.put("2f", "L4_GRE");
- l4TypeAliasMap.put("30", "L4_DSR");
- l4TypeAliasMap.put("31", "L4_BNA");
- l4TypeAliasMap.put("32", "L4_ESP");
- l4TypeAliasMap.put("33", "L4_AH");
- l4TypeAliasMap.put("34", "L4_I_NLSP");
- l4TypeAliasMap.put("35", "L4_SWIPE");
- l4TypeAliasMap.put("36", "L4_NARP");
- l4TypeAliasMap.put("37", "L4_Minimal_Encapsulation");
- l4TypeAliasMap.put("38", "L4_TLSP");
- l4TypeAliasMap.put("39", "L4_SKIP");
- l4TypeAliasMap.put("3a", "L4_ICMPv6");
- l4TypeAliasMap.put("3b", "L4_IPv6_No_Next_Header");
- l4TypeAliasMap.put("3c", "L4_IPv6_Destination_Options");
- l4TypeAliasMap.put("3d", "L4_Any_host_IP");
- l4TypeAliasMap.put("3e", "L4_CFTP");
- l4TypeAliasMap.put("3f", "L4_Any_local");
- l4TypeAliasMap.put("40", "L4_SATNET");
- l4TypeAliasMap.put("41", "L4_Kryptolan");
- l4TypeAliasMap.put("42", "L4_MIT_RVDP");
- l4TypeAliasMap.put("43", "L4_Internet_Pluribus");
- l4TypeAliasMap.put("44", "L4_Distributed_FS");
- l4TypeAliasMap.put("45", "L4_SATNET");
- l4TypeAliasMap.put("46", "L4_VISA");
- l4TypeAliasMap.put("47", "L4_IP_Core");
- l4TypeAliasMap.put("4a", "L4_Wang_Span");
- l4TypeAliasMap.put("4b", "L4_Packet_Video");
- l4TypeAliasMap.put("4c", "L4_Backroom_SATNET");
- l4TypeAliasMap.put("4d", "L4_SUN_ND");
- l4TypeAliasMap.put("4e", "L4_WIDEBAND_Monitoring");
- l4TypeAliasMap.put("4f", "L4_WIDEBAND_EXPAK");
- l4TypeAliasMap.put("50", "L4_ISO_IP");
- l4TypeAliasMap.put("51", "L4_VMTP");
- l4TypeAliasMap.put("52", "L4_SECURE_VMTP");
- l4TypeAliasMap.put("53", "L4_VINES");
- l4TypeAliasMap.put("54", "L4_TTP");
- l4TypeAliasMap.put("55", "L4_NSFNET_IGP");
- l4TypeAliasMap.put("56", "L4_Dissimilar_GP");
- l4TypeAliasMap.put("57", "L4_TCF");
- l4TypeAliasMap.put("58", "L4_EIGRP");
- l4TypeAliasMap.put("59", "L4_OSPF");
- l4TypeAliasMap.put("5a", "L4_Sprite_RPC");
- l4TypeAliasMap.put("5b", "L4_Locus_ARP");
- l4TypeAliasMap.put("5c", "L4_MTP");
- l4TypeAliasMap.put("5d", "L4_AX");
- l4TypeAliasMap.put("5e", "L4_IP_within_IP");
- l4TypeAliasMap.put("5f", "L4_Mobile_ICP");
- l4TypeAliasMap.put("61", "L4_EtherIP");
- l4TypeAliasMap.put("62", "L4_Encapsulation_Header");
- l4TypeAliasMap.put("64", "L4_GMTP");
- l4TypeAliasMap.put("65", "L4_IFMP");
- l4TypeAliasMap.put("66", "L4_PNNI");
- l4TypeAliasMap.put("67", "L4_PIM");
- l4TypeAliasMap.put("68", "L4_ARIS");
- l4TypeAliasMap.put("69", "L4_SCPS");
- l4TypeAliasMap.put("6a", "L4_QNX");
- l4TypeAliasMap.put("6b", "L4_Active_Networks");
- l4TypeAliasMap.put("6c", "L4_IPPCP");
- l4TypeAliasMap.put("6d", "L4_SNP");
- l4TypeAliasMap.put("6e", "L4_Compaq_Peer_Protocol");
- l4TypeAliasMap.put("6f", "L4_IPX_in_IP");
- l4TypeAliasMap.put("70", "L4_VRRP");
- l4TypeAliasMap.put("71", "L4_PGM");
- l4TypeAliasMap.put("72", "L4_0_hop");
- l4TypeAliasMap.put("73", "L4_L2TP");
- l4TypeAliasMap.put("74", "L4_DDX");
- l4TypeAliasMap.put("75", "L4_IATP");
- l4TypeAliasMap.put("76", "L4_ST");
- l4TypeAliasMap.put("77", "L4_SRP");
- l4TypeAliasMap.put("78", "L4_UTI");
- l4TypeAliasMap.put("79", "L4_SMP");
- l4TypeAliasMap.put("7a", "L4_SM");
- l4TypeAliasMap.put("7b", "L4_PTP");
- l4TypeAliasMap.put("7c", "L4_ISIS");
- l4TypeAliasMap.put("7d", "L4_FIRE");
- l4TypeAliasMap.put("7e", "L4_CRTP");
- l4TypeAliasMap.put("7f", "L4_CRUDP");
- l4TypeAliasMap.put("80", "L4_SSCOPMCE");
- l4TypeAliasMap.put("81", "L4_IPLT");
- l4TypeAliasMap.put("82", "L4_SPS");
- l4TypeAliasMap.put("83", "L4_PIPE");
- l4TypeAliasMap.put("84", "L4_SCTP");
- l4TypeAliasMap.put("85", "L4_Fibre_Channel");
- l4TypeAliasMap.put("86", "L4_RSVP_E2E_IGNORE");
- l4TypeAliasMap.put("87", "L4_Mobility_Header");
- l4TypeAliasMap.put("88", "L4_UDP_Lite");
- l4TypeAliasMap.put("89", "L4_MPLS");
- l4TypeAliasMap.put("8a", "L4_MANET");
- l4TypeAliasMap.put("8b", "L4_HIP");
- l4TypeAliasMap.put("8c", "L4_Shim6");
- l4TypeAliasMap.put("8d", "L4_WESP");
- l4TypeAliasMap.put("8e", "L4_ROHC");
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImpl.java b/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImpl.java
index bbeaa48..9956a29 100755
--- a/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImpl.java
+++ b/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImpl.java
@@ -29,7 +29,6 @@
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
-import java.util.ListIterator;
import java.util.Map;
import java.util.Queue;
import java.util.Set;
@@ -39,9 +38,6 @@
import net.floodlightcontroller.core.FloodlightContext;
import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.IFloodlightProviderService.Role;
-import net.floodlightcontroller.core.IHAListener;
-import net.floodlightcontroller.core.IInfoProvider;
import net.floodlightcontroller.core.IOFMessageListener;
import net.floodlightcontroller.core.IOFSwitch;
import net.floodlightcontroller.core.IUpdate;
@@ -63,7 +59,6 @@
import net.floodlightcontroller.packet.IPv4;
import net.floodlightcontroller.packet.UDP;
import net.floodlightcontroller.restserver.IRestApiService;
-import net.floodlightcontroller.storage.IStorageSourceService;
import net.floodlightcontroller.threadpool.IThreadPoolService;
import net.floodlightcontroller.topology.ITopologyListener;
import net.floodlightcontroller.topology.ITopologyService;
@@ -85,14 +80,12 @@
*/
public class DeviceManagerImpl implements
IDeviceService, IOFMessageListener, ITopologyListener,
-IFloodlightModule, IEntityClassListener,
-IInfoProvider, IHAListener {
+IFloodlightModule, IEntityClassListener {
protected final static Logger logger =
LoggerFactory.getLogger(DeviceManagerImpl.class);
protected IFloodlightProviderService floodlightProvider;
protected ITopologyService topology;
- protected IStorageSourceService storageSource;
protected IRestApiService restApi;
protected IThreadPoolService threadPool;
@@ -551,20 +544,6 @@
deviceListeners.add(listener);
}
- // *************
- // IInfoProvider
- // *************
-
- @Override
- public Map<String, Object> getInfo(String type) {
- if (!"summary".equals(type))
- return null;
-
- Map<String, Object> info = new HashMap<String, Object>();
- info.put("# hosts", deviceMap.size());
- return info;
- }
-
// ******************
// IOFMessageListener
// ******************
@@ -628,7 +607,6 @@
Collection<Class<? extends IFloodlightService>> l =
new ArrayList<Class<? extends IFloodlightService>>();
l.add(IFloodlightProviderService.class);
- l.add(IStorageSourceService.class);
l.add(ITopologyService.class);
l.add(IRestApiService.class);
l.add(IThreadPoolService.class);
@@ -648,8 +626,6 @@
this.floodlightProvider =
fmc.getServiceImpl(IFloodlightProviderService.class);
- this.storageSource =
- fmc.getServiceImpl(IStorageSourceService.class);
this.topology =
fmc.getServiceImpl(ITopologyService.class);
this.restApi = fmc.getServiceImpl(IRestApiService.class);
@@ -668,7 +644,6 @@
apComparator = new AttachmentPointComparator();
floodlightProvider.addOFMessageListener(OFType.PACKET_IN, this);
- floodlightProvider.addHAListener(this);
if (topology != null)
topology.addListener(this);
entityClassifier.addListener(this);
@@ -693,30 +668,6 @@
}
}
- // ***************
- // IHAListener
- // ***************
-
- @Override
- public void roleChanged(Role oldRole, Role newRole) {
- switch(newRole) {
- case SLAVE:
- logger.debug("Resetting device state because of role change");
- startUp(null);
- break;
- default:
- break;
- }
- }
-
- @Override
- public void controllerNodeIPsChanged(
- Map<String, String> curControllerNodeIPs,
- Map<String, String> addedControllerNodeIPs,
- Map<String, String> removedControllerNodeIPs) {
- // no-op
- }
-
// ****************
// Internal methods
// ****************
diff --git a/src/main/java/net/floodlightcontroller/forwarding/Forwarding.java b/src/main/java/net/floodlightcontroller/forwarding/Forwarding.java
index 28369eb..a245c02 100644
--- a/src/main/java/net/floodlightcontroller/forwarding/Forwarding.java
+++ b/src/main/java/net/floodlightcontroller/forwarding/Forwarding.java
@@ -27,9 +27,6 @@
import net.floodlightcontroller.core.FloodlightContext;
import net.floodlightcontroller.core.IFloodlightProviderService;
import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.devicemanager.IDevice;
-import net.floodlightcontroller.devicemanager.IDeviceService;
-import net.floodlightcontroller.devicemanager.SwitchPort;
import net.floodlightcontroller.core.annotations.LogMessageCategory;
import net.floodlightcontroller.core.annotations.LogMessageDoc;
import net.floodlightcontroller.core.annotations.LogMessageDocs;
@@ -38,7 +35,9 @@
import net.floodlightcontroller.core.module.IFloodlightModule;
import net.floodlightcontroller.core.module.IFloodlightService;
import net.floodlightcontroller.core.util.AppCookie;
-import net.floodlightcontroller.counter.ICounterStoreService;
+import net.floodlightcontroller.devicemanager.IDevice;
+import net.floodlightcontroller.devicemanager.IDeviceService;
+import net.floodlightcontroller.devicemanager.SwitchPort;
import net.floodlightcontroller.packet.Ethernet;
import net.floodlightcontroller.routing.ForwardingBase;
import net.floodlightcontroller.routing.IRoutingDecision;
@@ -389,7 +388,6 @@
l.add(IDeviceService.class);
l.add(IRoutingService.class);
l.add(ITopologyService.class);
- l.add(ICounterStoreService.class);
return l;
}
@@ -416,7 +414,6 @@
this.deviceManager = context.getServiceImpl(IDeviceService.class);
this.routingEngine = context.getServiceImpl(IRoutingService.class);
this.topology = context.getServiceImpl(ITopologyService.class);
- this.counterStore = context.getServiceImpl(ICounterStoreService.class);
// read our config options
Map<String, String> configOptions = context.getConfigParams(this);
diff --git a/src/main/java/net/floodlightcontroller/routing/ForwardingBase.java b/src/main/java/net/floodlightcontroller/routing/ForwardingBase.java
index 348a7af..c71ff87 100644
--- a/src/main/java/net/floodlightcontroller/routing/ForwardingBase.java
+++ b/src/main/java/net/floodlightcontroller/routing/ForwardingBase.java
@@ -18,9 +18,9 @@
package net.floodlightcontroller.routing;
import java.io.IOException;
-import java.util.EnumSet;
import java.util.ArrayList;
import java.util.Comparator;
+import java.util.EnumSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
@@ -33,16 +33,12 @@
import net.floodlightcontroller.core.annotations.LogMessageDoc;
import net.floodlightcontroller.core.annotations.LogMessageDocs;
import net.floodlightcontroller.core.util.AppCookie;
-import net.floodlightcontroller.counter.ICounterStoreService;
import net.floodlightcontroller.devicemanager.IDevice;
import net.floodlightcontroller.devicemanager.IDeviceListener;
import net.floodlightcontroller.devicemanager.IDeviceService;
import net.floodlightcontroller.devicemanager.SwitchPort;
import net.floodlightcontroller.packet.Ethernet;
import net.floodlightcontroller.packet.IPacket;
-import net.floodlightcontroller.routing.IRoutingService;
-import net.floodlightcontroller.routing.IRoutingDecision;
-import net.floodlightcontroller.routing.Route;
import net.floodlightcontroller.topology.ITopologyService;
import net.floodlightcontroller.topology.NodePortTuple;
import net.floodlightcontroller.util.OFMessageDamper;
@@ -81,7 +77,6 @@
protected IDeviceService deviceManager;
protected IRoutingService routingEngine;
protected ITopologyService topology;
- protected ICounterStoreService counterStore;
protected OFMessageDamper messageDamper;
@@ -265,7 +260,6 @@
((OFActionOutput)fm.getActions().get(0)).setPort(outPort);
try {
- counterStore.updatePktOutFMCounterStore(sw, fm);
if (log.isTraceEnabled()) {
log.trace("Pushing Route flowmod routeIndx={} " +
"sw={} inPort={} outPort={}",
@@ -383,7 +377,6 @@
po.setLength(poLength);
try {
- counterStore.updatePktOutFMCounterStore(sw, po);
messageDamper.write(sw, po, cntx, flush);
} catch (IOException e) {
log.error("Failure writing packet out", e);
@@ -466,7 +459,6 @@
po.setLength(poLength);
try {
- counterStore.updatePktOutFMCounterStore(sw, po);
messageDamper.write(sw, po, cntx);
} catch (IOException e) {
log.error("Failure writing packet out", e);
@@ -518,7 +510,6 @@
po.setLength(poLength);
try {
- counterStore.updatePktOutFMCounterStore(sw, po);
if (log.isTraceEnabled()) {
log.trace("write broadcast packet on switch-id={} " +
"interfaces={} packet-out={}",
diff --git a/src/main/java/net/floodlightcontroller/staticflowentry/IStaticFlowEntryPusherService.java b/src/main/java/net/floodlightcontroller/staticflowentry/IStaticFlowEntryPusherService.java
deleted file mode 100644
index 66e02dd..0000000
--- a/src/main/java/net/floodlightcontroller/staticflowentry/IStaticFlowEntryPusherService.java
+++ /dev/null
@@ -1,44 +0,0 @@
-package net.floodlightcontroller.staticflowentry;
-
-import java.util.Map;
-
-import org.openflow.protocol.OFFlowMod;
-
-import net.floodlightcontroller.core.module.IFloodlightService;
-
-public interface IStaticFlowEntryPusherService extends IFloodlightService {
- /**
- * Adds a static flow.
- * @param name Name of the flow mod. Must be unique.
- * @param fm The flow to push.
- * @param swDpid The switch DPID to push it to, in 00:00:00:00:00:00:00:01 notation.
- */
- public void addFlow(String name, OFFlowMod fm, String swDpid);
-
- /**
- * Deletes a static flow
- * @param name The name of the static flow to delete.
- */
- public void deleteFlow(String name);
-
- /**
- * Deletes all static flows for a practicular switch
- * @param dpid The DPID of the switch to delete flows for.
- */
- public void deleteFlowsForSwitch(long dpid);
-
- /**
- * Deletes all flows.
- */
- public void deleteAllFlows();
-
- /**
- * Gets all list of all flows
- */
- public Map<String, Map<String, OFFlowMod>> getFlows();
-
- /**
- * Gets a list of flows by switch
- */
- public Map<String, OFFlowMod> getFlows(String dpid);
-}
diff --git a/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntries.java b/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntries.java
deleted file mode 100644
index e733843..0000000
--- a/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntries.java
+++ /dev/null
@@ -1,831 +0,0 @@
-package net.floodlightcontroller.staticflowentry;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-import net.floodlightcontroller.core.util.AppCookie;
-import net.floodlightcontroller.packet.IPv4;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-import org.codehaus.jackson.JsonParseException;
-import org.codehaus.jackson.JsonParser;
-import org.codehaus.jackson.JsonToken;
-import org.codehaus.jackson.map.MappingJsonFactory;
-import org.openflow.protocol.OFFlowMod;
-import org.openflow.protocol.OFMatch;
-import org.openflow.protocol.OFPacketOut;
-import org.openflow.protocol.OFPort;
-import org.openflow.protocol.action.OFAction;
-import org.openflow.protocol.action.OFActionDataLayerDestination;
-import org.openflow.protocol.action.OFActionDataLayerSource;
-import org.openflow.protocol.action.OFActionEnqueue;
-import org.openflow.protocol.action.OFActionNetworkLayerDestination;
-import org.openflow.protocol.action.OFActionNetworkLayerSource;
-import org.openflow.protocol.action.OFActionNetworkTypeOfService;
-import org.openflow.protocol.action.OFActionOutput;
-import org.openflow.protocol.action.OFActionStripVirtualLan;
-import org.openflow.protocol.action.OFActionTransportLayerDestination;
-import org.openflow.protocol.action.OFActionTransportLayerSource;
-import org.openflow.protocol.action.OFActionVirtualLanIdentifier;
-import org.openflow.protocol.action.OFActionVirtualLanPriorityCodePoint;
-import org.openflow.util.HexString;
-
-/**
- * Represents static flow entries to be maintained by the controller on the
- * switches.
- */
-@LogMessageCategory("Static Flow Pusher")
-public class StaticFlowEntries {
- protected final static Logger log = LoggerFactory.getLogger(StaticFlowEntries.class);
-
- private static class SubActionStruct {
- OFAction action;
- int len;
- }
-
- private static byte[] zeroMac = new byte[] {0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
-
- /**
- * This function generates a random hash for the bottom half of the cookie
- *
- * @param fm
- * @param userCookie
- * @param name
- * @return A cookie that encodes the application ID and a hash
- */
- public static long computeEntryCookie(OFFlowMod fm, int userCookie, String name) {
- // flow-specific hash is next 20 bits LOOK! who knows if this
- int prime = 211;
- int flowHash = 2311;
- for (int i=0; i < name.length(); i++)
- flowHash = flowHash * prime + (int)name.charAt(i);
-
- return AppCookie.makeCookie(StaticFlowEntryPusher.STATIC_FLOW_APP_ID, flowHash);
- }
-
- /**
- * Sets defaults for an OFFlowMod
- * @param fm The OFFlowMod to set defaults for
- * @param entryName The name of the entry. Used to compute the cookie.
- */
- public static void initDefaultFlowMod(OFFlowMod fm, String entryName) {
- fm.setIdleTimeout((short) 0); // infinite
- fm.setHardTimeout((short) 0); // infinite
- fm.setBufferId(OFPacketOut.BUFFER_ID_NONE);
- fm.setCommand((short) 0);
- fm.setFlags((short) 0);
- fm.setOutPort(OFPort.OFPP_NONE.getValue());
- fm.setCookie(computeEntryCookie(fm, 0, entryName));
- fm.setPriority(Short.MAX_VALUE);
- }
-
- /**
- * Gets the entry name of a flow mod
- * @param fmJson The OFFlowMod in a JSON representation
- * @return The name of the OFFlowMod, null if not found
- * @throws IOException If there was an error parsing the JSON
- */
- public static String getEntryNameFromJson(String fmJson) throws IOException{
- MappingJsonFactory f = new MappingJsonFactory();
- JsonParser jp;
-
- try {
- jp = f.createJsonParser(fmJson);
- } catch (JsonParseException e) {
- throw new IOException(e);
- }
-
- jp.nextToken();
- if (jp.getCurrentToken() != JsonToken.START_OBJECT) {
- throw new IOException("Expected START_OBJECT");
- }
-
- while (jp.nextToken() != JsonToken.END_OBJECT) {
- if (jp.getCurrentToken() != JsonToken.FIELD_NAME) {
- throw new IOException("Expected FIELD_NAME");
- }
-
- String n = jp.getCurrentName();
- jp.nextToken();
- if (jp.getText().equals(""))
- continue;
-
- if (n == "name")
- return jp.getText();
- }
-
- return null;
- }
-
- /**
- * Parses an OFFlowMod (and it's inner OFMatch) to the storage entry format.
- * @param fm The FlowMod to parse
- * @param sw The switch the FlowMod is going to be installed on
- * @param name The name of this static flow entry
- * @return A Map representation of the storage entry
- */
- public static Map<String, Object> flowModToStorageEntry(OFFlowMod fm, String sw, String name) {
- Map<String, Object> entry = new HashMap<String, Object>();
- OFMatch match = fm.getMatch();
- entry.put(StaticFlowEntryPusher.COLUMN_NAME, name);
- entry.put(StaticFlowEntryPusher.COLUMN_SWITCH, sw);
- entry.put(StaticFlowEntryPusher.COLUMN_ACTIVE, Boolean.toString(true));
- entry.put(StaticFlowEntryPusher.COLUMN_PRIORITY, Short.toString(fm.getPriority()));
- entry.put(StaticFlowEntryPusher.COLUMN_WILDCARD, Integer.toString(match.getWildcards()));
-
- if ((fm.getActions() != null) && (fm.getActions().size() > 0))
- entry.put(StaticFlowEntryPusher.COLUMN_ACTIONS, StaticFlowEntries.flowModActionsToString(fm.getActions()));
-
- if (match.getInputPort() != 0)
- entry.put(StaticFlowEntryPusher.COLUMN_IN_PORT, Short.toString(match.getInputPort()));
-
- if (!Arrays.equals(match.getDataLayerSource(), zeroMac))
- entry.put(StaticFlowEntryPusher.COLUMN_DL_SRC, HexString.toHexString(match.getDataLayerSource()));
-
- if (!Arrays.equals(match.getDataLayerDestination(), zeroMac))
- entry.put(StaticFlowEntryPusher.COLUMN_DL_DST, HexString.toHexString(match.getDataLayerDestination()));
-
- if (match.getDataLayerVirtualLan() != -1)
- entry.put(StaticFlowEntryPusher.COLUMN_DL_VLAN, Short.toString(match.getDataLayerVirtualLan()));
-
- if (match.getDataLayerVirtualLanPriorityCodePoint() != 0)
- entry.put(StaticFlowEntryPusher.COLUMN_DL_VLAN_PCP, Short.toString(match.getDataLayerVirtualLanPriorityCodePoint()));
-
- if (match.getDataLayerType() != 0)
- entry.put(StaticFlowEntryPusher.COLUMN_DL_TYPE, Short.toString(match.getDataLayerType()));
-
- if (match.getNetworkTypeOfService() != 0)
- entry.put(StaticFlowEntryPusher.COLUMN_NW_TOS, Short.toString(match.getNetworkTypeOfService()));
-
- if (match.getNetworkProtocol() != 0)
- entry.put(StaticFlowEntryPusher.COLUMN_NW_PROTO, Short.toString(match.getNetworkProtocol()));
-
- if (match.getNetworkSource() != 0)
- entry.put(StaticFlowEntryPusher.COLUMN_NW_SRC, IPv4.fromIPv4Address(match.getNetworkSource()));
-
- if (match.getNetworkDestination() != 0)
- entry.put(StaticFlowEntryPusher.COLUMN_NW_DST, IPv4.fromIPv4Address(match.getNetworkDestination()));
-
- if (match.getTransportSource() != 0)
- entry.put(StaticFlowEntryPusher.COLUMN_TP_SRC, Short.toString(match.getTransportSource()));
-
- if (match.getTransportDestination() != 0)
- entry.put(StaticFlowEntryPusher.COLUMN_TP_DST, Short.toString(match.getTransportDestination()));
-
- return entry;
- }
-
- /**
- * Returns a String representation of all the openflow actions.
- * @param fmActions A list of OFActions to encode into one string
- * @return A string of the actions encoded for our database
- */
- @LogMessageDoc(level="ERROR",
- message="Could not decode action {action}",
- explanation="A static flow entry contained an invalid action",
- recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
- private static String flowModActionsToString(List<OFAction> fmActions) {
- StringBuilder sb = new StringBuilder();
- for (OFAction a : fmActions) {
- if (sb.length() > 0) {
- sb.append(',');
- }
- switch(a.getType()) {
- case OUTPUT:
- sb.append("output=" + Short.toString(((OFActionOutput)a).getPort()));
- break;
- case OPAQUE_ENQUEUE:
- int queue = ((OFActionEnqueue)a).getQueueId();
- short port = ((OFActionEnqueue)a).getPort();
- sb.append("enqueue=" + Short.toString(port) + ":0x" + String.format("%02x", queue));
- break;
- case STRIP_VLAN:
- sb.append("strip-vlan");
- break;
- case SET_VLAN_ID:
- sb.append("set-vlan-id=" +
- Short.toString(((OFActionVirtualLanIdentifier)a).getVirtualLanIdentifier()));
- break;
- case SET_VLAN_PCP:
- sb.append("set-vlan-priority=" +
- Byte.toString(((OFActionVirtualLanPriorityCodePoint)a).getVirtualLanPriorityCodePoint()));
- break;
- case SET_DL_SRC:
- sb.append("set-src-mac=" +
- HexString.toHexString(((OFActionDataLayerSource)a).getDataLayerAddress()));
- break;
- case SET_DL_DST:
- sb.append("set-dst-mac=" +
- HexString.toHexString(((OFActionDataLayerDestination)a).getDataLayerAddress()));
- break;
- case SET_NW_TOS:
- sb.append("set-tos-bits=" +
- Byte.toString(((OFActionNetworkTypeOfService)a).getNetworkTypeOfService()));
- break;
- case SET_NW_SRC:
- sb.append("set-src-ip=" +
- IPv4.fromIPv4Address(((OFActionNetworkLayerSource)a).getNetworkAddress()));
- break;
- case SET_NW_DST:
- sb.append("set-dst-ip=" +
- IPv4.fromIPv4Address(((OFActionNetworkLayerDestination)a).getNetworkAddress()));
- break;
- case SET_TP_SRC:
- sb.append("set-src-port=" +
- Short.toString(((OFActionTransportLayerSource)a).getTransportPort()));
- break;
- case SET_TP_DST:
- sb.append("set-dst-port=" +
- Short.toString(((OFActionTransportLayerDestination)a).getTransportPort()));
- break;
- default:
- log.error("Could not decode action: {}", a);
- break;
- }
-
- }
- return sb.toString();
- }
-
- /**
- * Turns a JSON formatted Static Flow Pusher string into a storage entry
- * Expects a string in JSON along the lines of:
- * {
- * "switch": "AA:BB:CC:DD:EE:FF:00:11",
- * "name": "flow-mod-1",
- * "cookie": "0",
- * "priority": "32768",
- * "ingress-port": "1",
- * "actions": "output=2",
- * }
- * @param fmJson The JSON formatted static flow pusher entry
- * @return The map of the storage entry
- * @throws IOException If there was an error parsing the JSON
- */
- public static Map<String, Object> jsonToStorageEntry(String fmJson) throws IOException {
- Map<String, Object> entry = new HashMap<String, Object>();
- MappingJsonFactory f = new MappingJsonFactory();
- JsonParser jp;
-
- try {
- jp = f.createJsonParser(fmJson);
- } catch (JsonParseException e) {
- throw new IOException(e);
- }
-
- jp.nextToken();
- if (jp.getCurrentToken() != JsonToken.START_OBJECT) {
- throw new IOException("Expected START_OBJECT");
- }
-
- while (jp.nextToken() != JsonToken.END_OBJECT) {
- if (jp.getCurrentToken() != JsonToken.FIELD_NAME) {
- throw new IOException("Expected FIELD_NAME");
- }
-
- String n = jp.getCurrentName();
- jp.nextToken();
- if (jp.getText().equals(""))
- continue;
-
- if (n == "name")
- entry.put(StaticFlowEntryPusher.COLUMN_NAME, jp.getText());
- else if (n == "switch")
- entry.put(StaticFlowEntryPusher.COLUMN_SWITCH, jp.getText());
- else if (n == "actions")
- entry.put(StaticFlowEntryPusher.COLUMN_ACTIONS, jp.getText());
- else if (n == "priority")
- entry.put(StaticFlowEntryPusher.COLUMN_PRIORITY, jp.getText());
- else if (n == "active")
- entry.put(StaticFlowEntryPusher.COLUMN_ACTIVE, jp.getText());
- else if (n == "wildcards")
- entry.put(StaticFlowEntryPusher.COLUMN_WILDCARD, jp.getText());
- else if (n == "ingress-port")
- entry.put(StaticFlowEntryPusher.COLUMN_IN_PORT, jp.getText());
- else if (n == "src-mac")
- entry.put(StaticFlowEntryPusher.COLUMN_DL_SRC, jp.getText());
- else if (n == "dst-mac")
- entry.put(StaticFlowEntryPusher.COLUMN_DL_DST, jp.getText());
- else if (n == "vlan-id")
- entry.put(StaticFlowEntryPusher.COLUMN_DL_VLAN, jp.getText());
- else if (n == "vlan-priority")
- entry.put(StaticFlowEntryPusher.COLUMN_DL_VLAN_PCP, jp.getText());
- else if (n == "ether-type")
- entry.put(StaticFlowEntryPusher.COLUMN_DL_TYPE, jp.getText());
- else if (n == "tos-bits")
- entry.put(StaticFlowEntryPusher.COLUMN_NW_TOS, jp.getText());
- else if (n == "protocol")
- entry.put(StaticFlowEntryPusher.COLUMN_NW_PROTO, jp.getText());
- else if (n == "src-ip")
- entry.put(StaticFlowEntryPusher.COLUMN_NW_SRC, jp.getText());
- else if (n == "dst-ip")
- entry.put(StaticFlowEntryPusher.COLUMN_NW_DST, jp.getText());
- else if (n == "src-port")
- entry.put(StaticFlowEntryPusher.COLUMN_TP_SRC, jp.getText());
- else if (n == "dst-port")
- entry.put(StaticFlowEntryPusher.COLUMN_TP_DST, jp.getText());
- }
-
- return entry;
- }
-
- /**
- * Parses OFFlowMod actions from strings.
- * @param flowMod The OFFlowMod to set the actions for
- * @param actionstr The string containing all the actions
- * @param log A logger to log for errors.
- */
- @LogMessageDoc(level="ERROR",
- message="Unexpected action '{action}', '{subaction}'",
- explanation="A static flow entry contained an invalid action",
- recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
- public static void parseActionString(OFFlowMod flowMod, String actionstr, Logger log) {
- List<OFAction> actions = new LinkedList<OFAction>();
- int actionsLength = 0;
- if (actionstr != null) {
- actionstr = actionstr.toLowerCase();
- for (String subaction : actionstr.split(",")) {
- String action = subaction.split("[=:]")[0];
- SubActionStruct subaction_struct = null;
-
- if (action.equals("output")) {
- subaction_struct = StaticFlowEntries.decode_output(subaction, log);
- }
- else if (action.equals("enqueue")) {
- subaction_struct = decode_enqueue(subaction, log);
- }
- else if (action.equals("strip-vlan")) {
- subaction_struct = decode_strip_vlan(subaction, log);
- }
- else if (action.equals("set-vlan-id")) {
- subaction_struct = decode_set_vlan_id(subaction, log);
- }
- else if (action.equals("set-vlan-priority")) {
- subaction_struct = decode_set_vlan_priority(subaction, log);
- }
- else if (action.equals("set-src-mac")) {
- subaction_struct = decode_set_src_mac(subaction, log);
- }
- else if (action.equals("set-dst-mac")) {
- subaction_struct = decode_set_dst_mac(subaction, log);
- }
- else if (action.equals("set-tos-bits")) {
- subaction_struct = decode_set_tos_bits(subaction, log);
- }
- else if (action.equals("set-src-ip")) {
- subaction_struct = decode_set_src_ip(subaction, log);
- }
- else if (action.equals("set-dst-ip")) {
- subaction_struct = decode_set_dst_ip(subaction, log);
- }
- else if (action.equals("set-src-port")) {
- subaction_struct = decode_set_src_port(subaction, log);
- }
- else if (action.equals("set-dst-port")) {
- subaction_struct = decode_set_dst_port(subaction, log);
- }
- else {
- log.error("Unexpected action '{}', '{}'", action, subaction);
- }
-
- if (subaction_struct != null) {
- actions.add(subaction_struct.action);
- actionsLength += subaction_struct.len;
- }
- }
- }
- log.debug("action {}", actions);
-
- flowMod.setActions(actions);
- flowMod.setLengthU(OFFlowMod.MINIMUM_LENGTH + actionsLength);
- }
-
- @LogMessageDoc(level="ERROR",
- message="Invalid subaction: '{subaction}'",
- explanation="A static flow entry contained an invalid subaction",
- recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
- private static SubActionStruct decode_output(String subaction, Logger log) {
- SubActionStruct sa = null;
- Matcher n;
-
- n = Pattern.compile("output=(?:((?:0x)?\\d+)|(all)|(controller)|(local)|(ingress-port)|(normal)|(flood))").matcher(subaction);
- if (n.matches()) {
- OFActionOutput action = new OFActionOutput();
- action.setMaxLength((short) Short.MAX_VALUE);
- short port = OFPort.OFPP_NONE.getValue();
- if (n.group(1) != null) {
- try {
- port = get_short(n.group(1));
- }
- catch (NumberFormatException e) {
- log.debug("Invalid port in: '{}' (error ignored)", subaction);
- return null;
- }
- }
- else if (n.group(2) != null)
- port = OFPort.OFPP_ALL.getValue();
- else if (n.group(3) != null)
- port = OFPort.OFPP_CONTROLLER.getValue();
- else if (n.group(4) != null)
- port = OFPort.OFPP_LOCAL.getValue();
- else if (n.group(5) != null)
- port = OFPort.OFPP_IN_PORT.getValue();
- else if (n.group(6) != null)
- port = OFPort.OFPP_NORMAL.getValue();
- else if (n.group(7) != null)
- port = OFPort.OFPP_FLOOD.getValue();
- action.setPort(port);
- log.debug("action {}", action);
-
- sa = new SubActionStruct();
- sa.action = action;
- sa.len = OFActionOutput.MINIMUM_LENGTH;
- }
- else {
- log.error("Invalid subaction: '{}'", subaction);
- return null;
- }
-
- return sa;
- }
-
- private static SubActionStruct decode_enqueue(String subaction, Logger log) {
- SubActionStruct sa = null;
- Matcher n;
-
- n = Pattern.compile("enqueue=(?:((?:0x)?\\d+)\\:((?:0x)?\\d+))").matcher(subaction);
- if (n.matches()) {
- short portnum = 0;
- if (n.group(1) != null) {
- try {
- portnum = get_short(n.group(1));
- }
- catch (NumberFormatException e) {
- log.debug("Invalid port-num in: '{}' (error ignored)", subaction);
- return null;
- }
- }
-
- int queueid = 0;
- if (n.group(2) != null) {
- try {
- queueid = get_int(n.group(2));
- }
- catch (NumberFormatException e) {
- log.debug("Invalid queue-id in: '{}' (error ignored)", subaction);
- return null;
- }
- }
-
- OFActionEnqueue action = new OFActionEnqueue();
- action.setPort(portnum);
- action.setQueueId(queueid);
- log.debug("action {}", action);
-
- sa = new SubActionStruct();
- sa.action = action;
- sa.len = OFActionEnqueue.MINIMUM_LENGTH;
- }
- else {
- log.debug("Invalid action: '{}'", subaction);
- return null;
- }
-
- return sa;
- }
-
- private static SubActionStruct decode_strip_vlan(String subaction, Logger log) {
- SubActionStruct sa = null;
- Matcher n = Pattern.compile("strip-vlan").matcher(subaction);
-
- if (n.matches()) {
- OFActionStripVirtualLan action = new OFActionStripVirtualLan();
- log.debug("action {}", action);
-
- sa = new SubActionStruct();
- sa.action = action;
- sa.len = OFActionStripVirtualLan.MINIMUM_LENGTH;
- }
- else {
- log.debug("Invalid action: '{}'", subaction);
- return null;
- }
-
- return sa;
- }
-
- private static SubActionStruct decode_set_vlan_id(String subaction, Logger log) {
- SubActionStruct sa = null;
- Matcher n = Pattern.compile("set-vlan-id=((?:0x)?\\d+)").matcher(subaction);
-
- if (n.matches()) {
- if (n.group(1) != null) {
- try {
- short vlanid = get_short(n.group(1));
- OFActionVirtualLanIdentifier action = new OFActionVirtualLanIdentifier();
- action.setVirtualLanIdentifier(vlanid);
- log.debug(" action {}", action);
-
- sa = new SubActionStruct();
- sa.action = action;
- sa.len = OFActionVirtualLanIdentifier.MINIMUM_LENGTH;
- }
- catch (NumberFormatException e) {
- log.debug("Invalid VLAN in: {} (error ignored)", subaction);
- return null;
- }
- }
- }
- else {
- log.debug("Invalid action: '{}'", subaction);
- return null;
- }
-
- return sa;
- }
-
- private static SubActionStruct decode_set_vlan_priority(String subaction, Logger log) {
- SubActionStruct sa = null;
- Matcher n = Pattern.compile("set-vlan-priority=((?:0x)?\\d+)").matcher(subaction);
-
- if (n.matches()) {
- if (n.group(1) != null) {
- try {
- byte prior = get_byte(n.group(1));
- OFActionVirtualLanPriorityCodePoint action = new OFActionVirtualLanPriorityCodePoint();
- action.setVirtualLanPriorityCodePoint(prior);
- log.debug(" action {}", action);
-
- sa = new SubActionStruct();
- sa.action = action;
- sa.len = OFActionVirtualLanPriorityCodePoint.MINIMUM_LENGTH;
- }
- catch (NumberFormatException e) {
- log.debug("Invalid VLAN priority in: {} (error ignored)", subaction);
- return null;
- }
- }
- }
- else {
- log.debug("Invalid action: '{}'", subaction);
- return null;
- }
-
- return sa;
- }
-
- private static SubActionStruct decode_set_src_mac(String subaction, Logger log) {
- SubActionStruct sa = null;
- Matcher n = Pattern.compile("set-src-mac=(?:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+))").matcher(subaction);
-
- if (n.matches()) {
- byte[] macaddr = get_mac_addr(n, subaction, log);
- if (macaddr != null) {
- OFActionDataLayerSource action = new OFActionDataLayerSource();
- action.setDataLayerAddress(macaddr);
- log.debug("action {}", action);
-
- sa = new SubActionStruct();
- sa.action = action;
- sa.len = OFActionDataLayerSource.MINIMUM_LENGTH;
- }
- }
- else {
- log.debug("Invalid action: '{}'", subaction);
- return null;
- }
-
- return sa;
- }
-
- private static SubActionStruct decode_set_dst_mac(String subaction, Logger log) {
- SubActionStruct sa = null;
- Matcher n = Pattern.compile("set-dst-mac=(?:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+))").matcher(subaction);
-
- if (n.matches()) {
- byte[] macaddr = get_mac_addr(n, subaction, log);
- if (macaddr != null) {
- OFActionDataLayerDestination action = new OFActionDataLayerDestination();
- action.setDataLayerAddress(macaddr);
- log.debug(" action {}", action);
-
- sa = new SubActionStruct();
- sa.action = action;
- sa.len = OFActionDataLayerDestination.MINIMUM_LENGTH;
- }
- }
- else {
- log.debug("Invalid action: '{}'", subaction);
- return null;
- }
-
- return sa;
- }
-
- private static SubActionStruct decode_set_tos_bits(String subaction, Logger log) {
- SubActionStruct sa = null;
- Matcher n = Pattern.compile("set-tos-bits=((?:0x)?\\d+)").matcher(subaction);
-
- if (n.matches()) {
- if (n.group(1) != null) {
- try {
- byte tosbits = get_byte(n.group(1));
- OFActionNetworkTypeOfService action = new OFActionNetworkTypeOfService();
- action.setNetworkTypeOfService(tosbits);
- log.debug(" action {}", action);
-
- sa = new SubActionStruct();
- sa.action = action;
- sa.len = OFActionNetworkTypeOfService.MINIMUM_LENGTH;
- }
- catch (NumberFormatException e) {
- log.debug("Invalid dst-port in: {} (error ignored)", subaction);
- return null;
- }
- }
- }
- else {
- log.debug("Invalid action: '{}'", subaction);
- return null;
- }
-
- return sa;
- }
-
- private static SubActionStruct decode_set_src_ip(String subaction, Logger log) {
- SubActionStruct sa = null;
- Matcher n = Pattern.compile("set-src-ip=(?:(\\d+)\\.(\\d+)\\.(\\d+)\\.(\\d+))").matcher(subaction);
-
- if (n.matches()) {
- int ipaddr = get_ip_addr(n, subaction, log);
- OFActionNetworkLayerSource action = new OFActionNetworkLayerSource();
- action.setNetworkAddress(ipaddr);
- log.debug(" action {}", action);
-
- sa = new SubActionStruct();
- sa.action = action;
- sa.len = OFActionNetworkLayerSource.MINIMUM_LENGTH;
- }
- else {
- log.debug("Invalid action: '{}'", subaction);
- return null;
- }
-
- return sa;
- }
-
- private static SubActionStruct decode_set_dst_ip(String subaction, Logger log) {
- SubActionStruct sa = null;
- Matcher n = Pattern.compile("set-dst-ip=(?:(\\d+)\\.(\\d+)\\.(\\d+)\\.(\\d+))").matcher(subaction);
-
- if (n.matches()) {
- int ipaddr = get_ip_addr(n, subaction, log);
- OFActionNetworkLayerDestination action = new OFActionNetworkLayerDestination();
- action.setNetworkAddress(ipaddr);
- log.debug("action {}", action);
-
- sa = new SubActionStruct();
- sa.action = action;
- sa.len = OFActionNetworkLayerDestination.MINIMUM_LENGTH;
- }
- else {
- log.debug("Invalid action: '{}'", subaction);
- return null;
- }
-
- return sa;
- }
-
- private static SubActionStruct decode_set_src_port(String subaction, Logger log) {
- SubActionStruct sa = null;
- Matcher n = Pattern.compile("set-src-port=((?:0x)?\\d+)").matcher(subaction);
-
- if (n.matches()) {
- if (n.group(1) != null) {
- try {
- short portnum = get_short(n.group(1));
- OFActionTransportLayerSource action = new OFActionTransportLayerSource();
- action.setTransportPort(portnum);
- log.debug("action {}", action);
-
- sa = new SubActionStruct();
- sa.action = action;
- sa.len = OFActionTransportLayerSource.MINIMUM_LENGTH;;
- }
- catch (NumberFormatException e) {
- log.debug("Invalid src-port in: {} (error ignored)", subaction);
- return null;
- }
- }
- }
- else {
- log.debug("Invalid action: '{}'", subaction);
- return null;
- }
-
- return sa;
- }
-
- private static SubActionStruct decode_set_dst_port(String subaction, Logger log) {
- SubActionStruct sa = null;
- Matcher n = Pattern.compile("set-dst-port=((?:0x)?\\d+)").matcher(subaction);
-
- if (n.matches()) {
- if (n.group(1) != null) {
- try {
- short portnum = get_short(n.group(1));
- OFActionTransportLayerDestination action = new OFActionTransportLayerDestination();
- action.setTransportPort(portnum);
- log.debug("action {}", action);
-
- sa = new SubActionStruct();
- sa.action = action;
- sa.len = OFActionTransportLayerDestination.MINIMUM_LENGTH;;
- }
- catch (NumberFormatException e) {
- log.debug("Invalid dst-port in: {} (error ignored)", subaction);
- return null;
- }
- }
- }
- else {
- log.debug("Invalid action: '{}'", subaction);
- return null;
- }
-
- return sa;
- }
-
- private static byte[] get_mac_addr(Matcher n, String subaction, Logger log) {
- byte[] macaddr = new byte[6];
-
- for (int i=0; i<6; i++) {
- if (n.group(i+1) != null) {
- try {
- macaddr[i] = get_byte("0x" + n.group(i+1));
- }
- catch (NumberFormatException e) {
- log.debug("Invalid src-mac in: '{}' (error ignored)", subaction);
- return null;
- }
- }
- else {
- log.debug("Invalid src-mac in: '{}' (null, error ignored)", subaction);
- return null;
- }
- }
-
- return macaddr;
- }
-
- private static int get_ip_addr(Matcher n, String subaction, Logger log) {
- int ipaddr = 0;
-
- for (int i=0; i<4; i++) {
- if (n.group(i+1) != null) {
- try {
- ipaddr = ipaddr<<8;
- ipaddr = ipaddr | get_int(n.group(i+1));
- }
- catch (NumberFormatException e) {
- log.debug("Invalid src-ip in: '{}' (error ignored)", subaction);
- return 0;
- }
- }
- else {
- log.debug("Invalid src-ip in: '{}' (null, error ignored)", subaction);
- return 0;
- }
- }
-
- return ipaddr;
- }
-
- // Parse int as decimal, hex (start with 0x or #) or octal (starts with 0)
- private static int get_int(String str) {
- return (int)Integer.decode(str);
- }
-
- // Parse short as decimal, hex (start with 0x or #) or octal (starts with 0)
- private static short get_short(String str) {
- return (short)(int)Integer.decode(str);
- }
-
- // Parse byte as decimal, hex (start with 0x or #) or octal (starts with 0)
- private static byte get_byte(String str) {
- return Integer.decode(str).byteValue();
- }
-
-}
-
diff --git a/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntryPusher.java b/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntryPusher.java
deleted file mode 100644
index d816d66..0000000
--- a/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntryPusher.java
+++ /dev/null
@@ -1,679 +0,0 @@
-package net.floodlightcontroller.staticflowentry;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.Iterator;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-
-import net.floodlightcontroller.core.FloodlightContext;
-import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.IFloodlightProviderService.Role;
-import net.floodlightcontroller.core.IHAListener;
-import net.floodlightcontroller.core.IOFMessageListener;
-import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.core.IOFSwitchListener;
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-import net.floodlightcontroller.core.module.FloodlightModuleContext;
-import net.floodlightcontroller.core.module.FloodlightModuleException;
-import net.floodlightcontroller.core.module.IFloodlightModule;
-import net.floodlightcontroller.core.module.IFloodlightService;
-import net.floodlightcontroller.core.util.AppCookie;
-import net.floodlightcontroller.restserver.IRestApiService;
-import net.floodlightcontroller.staticflowentry.web.StaticFlowEntryWebRoutable;
-import net.floodlightcontroller.staticflowentry.IStaticFlowEntryPusherService;
-import net.floodlightcontroller.storage.IResultSet;
-import net.floodlightcontroller.storage.IStorageSourceService;
-import net.floodlightcontroller.storage.IStorageSourceListener;
-
-import net.floodlightcontroller.storage.StorageException;
-import org.openflow.protocol.OFFlowMod;
-import org.openflow.protocol.OFFlowRemoved;
-import org.openflow.protocol.OFMatch;
-import org.openflow.protocol.OFMessage;
-import org.openflow.protocol.OFType;
-import org.openflow.protocol.factory.BasicFactory;
-import org.openflow.util.HexString;
-import org.openflow.util.U16;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@LogMessageCategory("Static Flow Pusher")
-/**
- * This module is responsible for maintaining a set of static flows on
- * switches. This is just a big 'ol dumb list of flows and something external
- * is responsible for ensuring they make sense for the network.
- */
-public class StaticFlowEntryPusher
- implements IOFSwitchListener, IFloodlightModule, IStaticFlowEntryPusherService,
- IStorageSourceListener, IOFMessageListener, IHAListener {
- protected final static Logger log = LoggerFactory.getLogger(StaticFlowEntryPusher.class);
- public static final String StaticFlowName = "staticflowentry";
-
- public static final int STATIC_FLOW_APP_ID = 10;
-
- public static final String TABLE_NAME = "controller_staticflowtableentry";
- public static final String COLUMN_NAME = "name";
- public static final String COLUMN_SWITCH = "switch_id";
- public static final String COLUMN_ACTIVE = "active";
- public static final String COLUMN_IDLE_TIMEOUT = "idle_timeout";
- public static final String COLUMN_HARD_TIMEOUT = "hard_timeout";
- public static final String COLUMN_PRIORITY = "priority";
- public static final String COLUMN_COOKIE = "cookie";
- public static final String COLUMN_WILDCARD = "wildcards";
- public static final String COLUMN_IN_PORT = "in_port";
- public static final String COLUMN_DL_SRC = "dl_src";
- public static final String COLUMN_DL_DST = "dl_dst";
- public static final String COLUMN_DL_VLAN = "dl_vlan";
- public static final String COLUMN_DL_VLAN_PCP = "dl_vlan_pcp";
- public static final String COLUMN_DL_TYPE = "dl_type";
- public static final String COLUMN_NW_TOS = "nw_tos";
- public static final String COLUMN_NW_PROTO = "nw_proto";
- public static final String COLUMN_NW_SRC = "nw_src"; // includes CIDR-style
- // netmask, e.g.
- // "128.8.128.0/24"
- public static final String COLUMN_NW_DST = "nw_dst";
- public static final String COLUMN_TP_DST = "tp_dst";
- public static final String COLUMN_TP_SRC = "tp_src";
- public static final String COLUMN_ACTIONS = "actions";
- public static String ColumnNames[] = { COLUMN_NAME, COLUMN_SWITCH,
- COLUMN_ACTIVE, COLUMN_IDLE_TIMEOUT, COLUMN_HARD_TIMEOUT,
- COLUMN_PRIORITY, COLUMN_COOKIE, COLUMN_WILDCARD, COLUMN_IN_PORT,
- COLUMN_DL_SRC, COLUMN_DL_DST, COLUMN_DL_VLAN, COLUMN_DL_VLAN_PCP,
- COLUMN_DL_TYPE, COLUMN_NW_TOS, COLUMN_NW_PROTO, COLUMN_NW_SRC,
- COLUMN_NW_DST, COLUMN_TP_DST, COLUMN_TP_SRC, COLUMN_ACTIONS };
-
-
- protected IFloodlightProviderService floodlightProvider;
- protected IStorageSourceService storageSource;
- protected IRestApiService restApi;
-
- // Map<DPID, Map<Name, FlowMod>> ; FlowMod can be null to indicate non-active
- protected Map<String, Map<String, OFFlowMod>> entriesFromStorage;
- // Entry Name -> DPID of Switch it's on
- protected Map<String, String> entry2dpid;
-
- private BasicFactory ofMessageFactory;
-
- // Class to sort FlowMod's by priority, from lowest to highest
- class FlowModSorter implements Comparator<String> {
- private String dpid;
- public FlowModSorter(String dpid) {
- this.dpid = dpid;
- }
- @Override
- public int compare(String o1, String o2) {
- OFFlowMod f1 = entriesFromStorage.get(dpid).get(o1);
- OFFlowMod f2 = entriesFromStorage.get(dpid).get(o2);
- if (f1 == null || f2 == null) // sort active=false flows by key
- return o1.compareTo(o2);
- return U16.f(f1.getPriority()) - U16.f(f2.getPriority());
- }
- };
-
- /**
- * used for debugging and unittests
- * @return the number of static flow entries as cached from storage
- */
- public int countEntries() {
- int size = 0;
- if (entriesFromStorage == null)
- return 0;
- for (String ofswitch : entriesFromStorage.keySet())
- size += entriesFromStorage.get(ofswitch).size();
- return size;
- }
-
- public IFloodlightProviderService getFloodlightProvider() {
- return floodlightProvider;
- }
-
- public void setFloodlightProvider(IFloodlightProviderService floodlightProvider) {
- this.floodlightProvider = floodlightProvider;
- }
-
- public void setStorageSource(IStorageSourceService storageSource) {
- this.storageSource = storageSource;
- }
-
- /**
- * Reads from our entriesFromStorage for the specified switch and
- * sends the FlowMods down to the controller in <b>sorted</b> order.
- *
- * Sorted is important to maintain correctness of the switch:
- * if a packet would match both a lower and a higher priority
- * rule, then we want it to match the higher priority or nothing,
- * but never just the lower priority one. Inserting from high to
- * low priority fixes this.
- *
- * TODO consider adding a "block all" flow mod and then removing it
- * while starting up.
- *
- * @param sw The switch to send entries to
- */
- protected void sendEntriesToSwitch(IOFSwitch sw) {
- String dpid = sw.getStringId();
-
- if ((entriesFromStorage != null) && (entriesFromStorage.containsKey(dpid))) {
- Map<String, OFFlowMod> entries = entriesFromStorage.get(dpid);
- List<String> sortedList = new ArrayList<String>(entries.keySet());
- // weird that Collections.sort() returns void
- Collections.sort( sortedList, new FlowModSorter(dpid));
- for (String entryName : sortedList) {
- OFFlowMod flowMod = entries.get(entryName);
- if (flowMod != null) {
- if (log.isDebugEnabled()) {
- log.debug("Pushing static entry {} for {}", dpid, entryName);
- }
- writeFlowModToSwitch(sw, flowMod);
- }
- }
- }
- }
-
- /**
- * Used only for bundle-local indexing
- *
- * @param map
- * @return
- */
-
- protected Map<String, String> computeEntry2DpidMap(
- Map<String, Map<String, OFFlowMod>> map) {
- Map<String, String> ret = new HashMap<String, String>();
- for(String dpid : map.keySet()) {
- for( String entry: map.get(dpid).keySet())
- ret.put(entry, dpid);
- }
- return ret;
- }
-
- /**
- * Read entries from storageSource, and store them in a hash
- *
- * @return
- */
- @LogMessageDoc(level="ERROR",
- message="failed to access storage: {reason}",
- explanation="Could not retrieve static flows from the system " +
- "database",
- recommendation=LogMessageDoc.CHECK_CONTROLLER)
- private Map<String, Map<String, OFFlowMod>> readEntriesFromStorage() {
- Map<String, Map<String, OFFlowMod>> entries = new ConcurrentHashMap<String, Map<String, OFFlowMod>>();
- try {
- Map<String, Object> row;
- // null1=no predicate, null2=no ordering
- IResultSet resultSet = storageSource.executeQuery(TABLE_NAME,
- ColumnNames, null, null);
- for (Iterator<IResultSet> it = resultSet.iterator(); it.hasNext();) {
- row = it.next().getRow();
- parseRow(row, entries);
- }
- } catch (StorageException e) {
- log.error("failed to access storage: {}", e.getMessage());
- // if the table doesn't exist, then wait to populate later via
- // setStorageSource()
- }
- return entries;
- }
-
- /**
- * Take a single row, turn it into a flowMod, and add it to the
- * entries{$dpid}.{$entryName}=FlowMod
- *
- * IF an entry is in active, mark it with FlowMod = null
- *
- * @param row
- * @param entries
- */
-
- void parseRow(Map<String, Object> row,
- Map<String, Map<String, OFFlowMod>> entries) {
- String switchName = null;
- String entryName = null;
-
- StringBuffer matchString = new StringBuffer();
- if (ofMessageFactory == null) // lazy init
- ofMessageFactory = new BasicFactory();
-
- OFFlowMod flowMod = (OFFlowMod) ofMessageFactory
- .getMessage(OFType.FLOW_MOD);
-
- if (!row.containsKey(COLUMN_SWITCH) || !row.containsKey(COLUMN_NAME)) {
- log.debug(
- "skipping entry with missing required 'switch' or 'name' entry: {}",
- row);
- return;
- }
- // most error checking done with ClassCastException
- try {
- // first, snag the required entries, for debugging info
- switchName = (String) row.get(COLUMN_SWITCH);
- entryName = (String) row.get(COLUMN_NAME);
- if (!entries.containsKey(switchName))
- entries.put(switchName, new HashMap<String, OFFlowMod>());
- StaticFlowEntries.initDefaultFlowMod(flowMod, entryName);
-
- for (String key : row.keySet()) {
- if (row.get(key) == null)
- continue;
- if ( key.equals(COLUMN_SWITCH) || key.equals(COLUMN_NAME)
- || key.equals("id"))
- continue; // already handled
- // explicitly ignore timeouts and wildcards
- if ( key.equals(COLUMN_HARD_TIMEOUT) || key.equals(COLUMN_IDLE_TIMEOUT) ||
- key.equals(COLUMN_WILDCARD))
- continue;
- if ( key.equals(COLUMN_ACTIVE)) {
- if (! Boolean.valueOf((String) row.get(COLUMN_ACTIVE))) {
- log.debug("skipping inactive entry {} for switch {}",
- entryName, switchName);
- entries.get(switchName).put(entryName, null); // mark this an inactive
- return;
- }
- } else if ( key.equals(COLUMN_ACTIONS)){
- StaticFlowEntries.parseActionString(flowMod, (String) row.get(COLUMN_ACTIONS), log);
- } else if ( key.equals(COLUMN_COOKIE)) {
- flowMod.setCookie(
- StaticFlowEntries.computeEntryCookie(flowMod,
- Integer.valueOf((String) row.get(COLUMN_COOKIE)),
- entryName)
- );
- } else if ( key.equals(COLUMN_PRIORITY)) {
- flowMod.setPriority(U16.t(Integer.valueOf((String) row.get(COLUMN_PRIORITY))));
- } else { // the rest of the keys are for OFMatch().fromString()
- if (matchString.length() > 0)
- matchString.append(",");
- matchString.append(key + "=" + row.get(key).toString());
- }
- }
- } catch (ClassCastException e) {
- if (entryName != null && switchName != null)
- log.debug(
- "skipping entry {} on switch {} with bad data : "
- + e.getMessage(), entryName, switchName);
- else
- log.debug("skipping entry with bad data: {} :: {} ",
- e.getMessage(), e.getStackTrace());
- }
-
- OFMatch ofMatch = new OFMatch();
- String match = matchString.toString();
- try {
- ofMatch.fromString(match);
- } catch (IllegalArgumentException e) {
- log.debug(
- "ignoring flow entry {} on switch {} with illegal OFMatch() key: "
- + match, entryName, switchName);
- return;
- }
- flowMod.setMatch(ofMatch);
-
- entries.get(switchName).put(entryName, flowMod);
- }
-
- @Override
- public void addedSwitch(IOFSwitch sw) {
- log.debug("addedSwitch {}; processing its static entries", sw);
- sendEntriesToSwitch(sw);
- }
-
- @Override
- public void removedSwitch(IOFSwitch sw) {
- log.debug("removedSwitch {}", sw);
- // do NOT delete from our internal state; we're tracking the rules,
- // not the switches
- }
-
- @Override
- public void switchPortChanged(Long switchId) {
- // no-op
- }
-
- /**
- * This handles both rowInsert() and rowUpdate()
- */
-
- @Override
- public void rowsModified(String tableName, Set<Object> rowKeys) {
- log.debug("Modifying Table {}", tableName);
-
- HashMap<String, Map<String, OFFlowMod>> entriesToAdd =
- new HashMap<String, Map<String, OFFlowMod>>();
- // build up list of what was added
- for(Object key: rowKeys) {
- IResultSet resultSet = storageSource.getRow(tableName, key);
- for (Iterator<IResultSet> it = resultSet.iterator(); it.hasNext();) {
- Map<String, Object> row = it.next().getRow();
- parseRow(row, entriesToAdd);
- }
- }
- // batch updates by switch and blast them out
- for (String dpid : entriesToAdd.keySet()) {
- if (!entriesFromStorage.containsKey(dpid))
- entriesFromStorage.put(dpid, new HashMap<String, OFFlowMod>());
- List<OFMessage> outQueue = new ArrayList<OFMessage>();
- for(String entry : entriesToAdd.get(dpid).keySet()) {
- OFFlowMod newFlowMod = entriesToAdd.get(dpid).get(entry);
- OFFlowMod oldFlowMod = entriesFromStorage.get(dpid).get(entry);
- if (oldFlowMod != null) { // remove any pre-existing rule
- oldFlowMod.setCommand(OFFlowMod.OFPFC_DELETE_STRICT);
- outQueue.add(oldFlowMod);
- }
- if (newFlowMod != null) {
- entriesFromStorage.get(dpid).put(entry, newFlowMod);
- outQueue.add(newFlowMod);
- entry2dpid.put(entry, dpid);
- } else {
- entriesFromStorage.get(dpid).remove(entry);
- entry2dpid.remove(entry);
- }
- }
-
- writeOFMessagesToSwitch(HexString.toLong(dpid), outQueue);
- }
- }
-
- @Override
- public void rowsDeleted(String tableName, Set<Object> rowKeys) {
- if (log.isDebugEnabled()) {
- log.debug("deleting from Table {}", tableName);
- }
-
- for(Object obj : rowKeys) {
- if (!(obj instanceof String)) {
- log.debug("tried to delete non-string key {}; ignoring", obj);
- continue;
- }
- deleteStaticFlowEntry((String) obj);
- }
- }
-
- @LogMessageDoc(level="ERROR",
- message="inconsistent internal state: no switch has rule {rule}",
- explanation="Inconsistent internat state discovered while " +
- "deleting a static flow rule",
- recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
- private boolean deleteStaticFlowEntry(String entryName) {
- String dpid = entry2dpid.get(entryName);
- if (log.isDebugEnabled()) {
- log.debug("Deleting flow {} for switch {}", entryName, dpid);
- }
- if (dpid == null) {
- log.error("inconsistent internal state: no switch has rule {}",
- entryName);
- return false;
- }
-
- // send flow_mod delete
- OFFlowMod flowMod = entriesFromStorage.get(dpid).get(entryName);
- flowMod.setCommand(OFFlowMod.OFPFC_DELETE_STRICT);
-
- if (entriesFromStorage.containsKey(dpid) &&
- entriesFromStorage.get(dpid).containsKey(entryName)) {
- entriesFromStorage.get(dpid).remove(entryName);
- } else {
- log.debug("Tried to delete non-existent entry {} for switch {}",
- entryName, dpid);
- return false;
- }
-
- writeFlowModToSwitch(HexString.toLong(dpid), flowMod);
- return true;
- }
-
- /**
- * Writes a list of OFMessages to a switch
- * @param dpid The datapath ID of the switch to write to
- * @param messages The list of OFMessages to write.
- */
- @LogMessageDoc(level="ERROR",
- message="Tried to write to switch {switch} but got {error}",
- explanation="An I/O error occured while trying to write a " +
- "static flow to a switch",
- recommendation=LogMessageDoc.CHECK_SWITCH)
- private void writeOFMessagesToSwitch(long dpid, List<OFMessage> messages) {
- IOFSwitch ofswitch = floodlightProvider.getSwitches().get(dpid);
- if (ofswitch != null) { // is the switch connected
- try {
- if (log.isDebugEnabled()) {
- log.debug("Sending {} new entries to {}", messages.size(), dpid);
- }
- ofswitch.write(messages, null);
- ofswitch.flush();
- } catch (IOException e) {
- log.error("Tried to write to switch {} but got {}", dpid, e.getMessage());
- }
- }
- }
-
- /**
- * Writes an OFFlowMod to a switch. It checks to make sure the switch
- * exists before it sends
- * @param dpid The data to write the flow mod to
- * @param flowMod The OFFlowMod to write
- */
- private void writeFlowModToSwitch(long dpid, OFFlowMod flowMod) {
- Map<Long,IOFSwitch> switches = floodlightProvider.getSwitches();
- IOFSwitch ofSwitch = switches.get(dpid);
- if (ofSwitch == null) {
- if (log.isDebugEnabled()) {
- log.debug("Not deleting key {} :: switch {} not connected",
- dpid);
- }
- return;
- }
- writeFlowModToSwitch(ofSwitch, flowMod);
- }
-
- /**
- * Writes an OFFlowMod to a switch
- * @param sw The IOFSwitch to write to
- * @param flowMod The OFFlowMod to write
- */
- @LogMessageDoc(level="ERROR",
- message="Tried to write OFFlowMod to {switch} but got {error}",
- explanation="An I/O error occured while trying to write a " +
- "static flow to a switch",
- recommendation=LogMessageDoc.CHECK_SWITCH)
- private void writeFlowModToSwitch(IOFSwitch sw, OFFlowMod flowMod) {
- try {
- sw.write(flowMod, null);
- sw.flush();
- } catch (IOException e) {
- log.error("Tried to write OFFlowMod to {} but failed: {}",
- HexString.toHexString(sw.getId()), e.getMessage());
- }
- }
-
- @Override
- public String getName() {
- return StaticFlowName;
- }
-
- @Override
- @LogMessageDoc(level="ERROR",
- message="Got a FlowRemove message for a infinite " +
- "timeout flow: {flow} from switch {switch}",
- explanation="Flows with infinite timeouts should not expire. " +
- "The switch has expired the flow anyway.",
- recommendation=LogMessageDoc.REPORT_SWITCH_BUG)
- public Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx) {
- switch (msg.getType()) {
- case FLOW_REMOVED:
- break;
- default:
- return Command.CONTINUE;
- }
- OFFlowRemoved flowRemoved = (OFFlowRemoved) msg;
- long cookie = flowRemoved.getCookie();
- /**
- * This is just to sanity check our assumption that static flows
- * never expire.
- */
- if( AppCookie.extractApp(cookie) == STATIC_FLOW_APP_ID) {
- if (flowRemoved.getReason() !=
- OFFlowRemoved.OFFlowRemovedReason.OFPRR_DELETE)
- log.error("Got a FlowRemove message for a infinite " +
- "timeout flow: {} from switch {}", msg, sw);
- return Command.STOP; // only for us
- } else
- return Command.CONTINUE;
- }
-
- @Override
- public boolean isCallbackOrderingPrereq(OFType type, String name) {
- return false; // no dependency for non-packet in
- }
-
- @Override
- public boolean isCallbackOrderingPostreq(OFType type, String name) {
- return false; // no dependency for non-packet in
- }
-
- // IFloodlightModule
-
- @Override
- public Collection<Class<? extends IFloodlightService>> getModuleServices() {
- Collection<Class<? extends IFloodlightService>> l =
- new ArrayList<Class<? extends IFloodlightService>>();
- l.add(IStaticFlowEntryPusherService.class);
- return l;
- }
-
- @Override
- public Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls() {
- Map<Class<? extends IFloodlightService>,
- IFloodlightService> m =
- new HashMap<Class<? extends IFloodlightService>,
- IFloodlightService>();
- m.put(IStaticFlowEntryPusherService.class, this);
- return m;
- }
-
- @Override
- public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
- Collection<Class<? extends IFloodlightService>> l =
- new ArrayList<Class<? extends IFloodlightService>>();
- l.add(IFloodlightProviderService.class);
- l.add(IStorageSourceService.class);
- l.add(IRestApiService.class);
- return l;
- }
-
- @Override
- public void init(FloodlightModuleContext context)
- throws FloodlightModuleException {
- floodlightProvider =
- context.getServiceImpl(IFloodlightProviderService.class);
- storageSource =
- context.getServiceImpl(IStorageSourceService.class);
- restApi =
- context.getServiceImpl(IRestApiService.class);
- }
-
- @Override
- public void startUp(FloodlightModuleContext context) {
- floodlightProvider.addOFMessageListener(OFType.FLOW_REMOVED, this);
- floodlightProvider.addOFSwitchListener(this);
- floodlightProvider.addHAListener(this);
-
- // assumes no switches connected at startup()
- storageSource.createTable(TABLE_NAME, null);
- storageSource.setTablePrimaryKeyName(TABLE_NAME, COLUMN_NAME);
- storageSource.addListener(TABLE_NAME, this);
- entriesFromStorage = readEntriesFromStorage();
- entry2dpid = computeEntry2DpidMap(entriesFromStorage);
- restApi.addRestletRoutable(new StaticFlowEntryWebRoutable());
- }
-
- // IStaticFlowEntryPusherService methods
-
- @Override
- public void addFlow(String name, OFFlowMod fm, String swDpid) {
- Map<String, Object> fmMap = StaticFlowEntries.flowModToStorageEntry(fm, swDpid, name);
- entry2dpid.put(name, swDpid);
- Map<String, OFFlowMod> switchEntries = entriesFromStorage.get(swDpid);
- if (switchEntries == null) {
- switchEntries = new HashMap<String, OFFlowMod>();
- entriesFromStorage.put(swDpid, switchEntries);
- }
- switchEntries.put(name, fm);
- storageSource.insertRowAsync(TABLE_NAME, fmMap);
- }
-
- @Override
- public void deleteFlow(String name) {
- storageSource.deleteRowAsync(TABLE_NAME, name);
- // TODO - What if there is a delay in storage?
- }
-
- @Override
- public void deleteAllFlows() {
- for (String entry : entry2dpid.keySet()) {
- deleteFlow(entry);
- }
- }
-
- @Override
- public void deleteFlowsForSwitch(long dpid) {
- String sDpid = HexString.toHexString(dpid);
-
- for (Entry<String, String> e : entry2dpid.entrySet()) {
- if (e.getValue().equals(sDpid))
- deleteFlow(e.getKey());
- }
- }
-
- @Override
- public Map<String, Map<String, OFFlowMod>> getFlows() {
- return entriesFromStorage;
- }
-
- @Override
- public Map<String, OFFlowMod> getFlows(String dpid) {
- return entriesFromStorage.get(dpid);
- }
-
-
- // IHAListener
-
- @Override
- public void roleChanged(Role oldRole, Role newRole) {
- switch(newRole) {
- case MASTER:
- if (oldRole == Role.SLAVE) {
- log.debug("Re-reading static flows from storage due " +
- "to HA change from SLAVE->MASTER");
- entriesFromStorage = readEntriesFromStorage();
- entry2dpid = computeEntry2DpidMap(entriesFromStorage);
- }
- break;
- case SLAVE:
- log.debug("Clearing in-memory flows due to " +
- "HA change to SLAVE");
- entry2dpid.clear();
- entriesFromStorage.clear();
- break;
- default:
- break;
- }
- }
-
- @Override
- public void controllerNodeIPsChanged(
- Map<String, String> curControllerNodeIPs,
- Map<String, String> addedControllerNodeIPs,
- Map<String, String> removedControllerNodeIPs) {
- // ignore
- }
-
-}
diff --git a/src/main/java/net/floodlightcontroller/staticflowentry/web/ClearStaticFlowEntriesResource.java b/src/main/java/net/floodlightcontroller/staticflowentry/web/ClearStaticFlowEntriesResource.java
deleted file mode 100644
index f103e99..0000000
--- a/src/main/java/net/floodlightcontroller/staticflowentry/web/ClearStaticFlowEntriesResource.java
+++ /dev/null
@@ -1,38 +0,0 @@
-package net.floodlightcontroller.staticflowentry.web;
-
-import net.floodlightcontroller.core.web.ControllerSwitchesResource;
-import net.floodlightcontroller.staticflowentry.IStaticFlowEntryPusherService;
-
-import org.openflow.util.HexString;
-import org.restlet.data.Status;
-import org.restlet.resource.Get;
-import org.restlet.resource.ServerResource;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ClearStaticFlowEntriesResource extends ServerResource {
- protected final static Logger log = LoggerFactory.getLogger(ClearStaticFlowEntriesResource.class);
-
- @Get
- public void ClearStaticFlowEntries() {
- IStaticFlowEntryPusherService sfpService =
- (IStaticFlowEntryPusherService)getContext().getAttributes().
- get(IStaticFlowEntryPusherService.class.getCanonicalName());
-
- String param = (String) getRequestAttributes().get("switch");
- if (log.isDebugEnabled())
- log.debug("Clearing all static flow entires for switch: " + param);
-
- if (param.toLowerCase().equals("all")) {
- sfpService.deleteAllFlows();
- } else {
- try {
- sfpService.deleteFlowsForSwitch(HexString.toLong(param));
- } catch (NumberFormatException e){
- setStatus(Status.CLIENT_ERROR_BAD_REQUEST,
- ControllerSwitchesResource.DPID_ERROR);
- return;
- }
- }
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/staticflowentry/web/ListStaticFlowEntriesResource.java b/src/main/java/net/floodlightcontroller/staticflowentry/web/ListStaticFlowEntriesResource.java
deleted file mode 100644
index 2bb53ba..0000000
--- a/src/main/java/net/floodlightcontroller/staticflowentry/web/ListStaticFlowEntriesResource.java
+++ /dev/null
@@ -1,45 +0,0 @@
-package net.floodlightcontroller.staticflowentry.web;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import net.floodlightcontroller.core.web.ControllerSwitchesResource;
-import net.floodlightcontroller.staticflowentry.IStaticFlowEntryPusherService;
-
-import org.openflow.protocol.OFFlowMod;
-import org.restlet.data.Status;
-import org.restlet.resource.Get;
-import org.restlet.resource.ServerResource;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ListStaticFlowEntriesResource extends ServerResource {
- protected final static Logger log = LoggerFactory.getLogger(ListStaticFlowEntriesResource.class);
-
- @Get
- public Map<String, Map<String, OFFlowMod>> ListStaticFlowEntries() {
- IStaticFlowEntryPusherService sfpService =
- (IStaticFlowEntryPusherService)getContext().getAttributes().
- get(IStaticFlowEntryPusherService.class.getCanonicalName());
-
- String param = (String) getRequestAttributes().get("switch");
- if (log.isDebugEnabled())
- log.debug("Listing all static flow entires for switch: " + param);
-
- if (param.toLowerCase().equals("all")) {
- return sfpService.getFlows();
- } else {
- try {
- Map<String, Map<String, OFFlowMod>> retMap =
- new HashMap<String, Map<String, OFFlowMod>>();
- retMap.put(param, sfpService.getFlows(param));
- return retMap;
-
- } catch (NumberFormatException e){
- setStatus(Status.CLIENT_ERROR_BAD_REQUEST,
- ControllerSwitchesResource.DPID_ERROR);
- }
- }
- return null;
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryPusherResource.java b/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryPusherResource.java
deleted file mode 100644
index 2886a58..0000000
--- a/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryPusherResource.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.staticflowentry.web;
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.restlet.resource.Delete;
-import org.restlet.resource.Post;
-import org.restlet.resource.ServerResource;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-import net.floodlightcontroller.staticflowentry.StaticFlowEntries;
-import net.floodlightcontroller.staticflowentry.StaticFlowEntryPusher;
-import net.floodlightcontroller.storage.IStorageSourceService;
-
-/**
- * Pushes a static flow entry to the storage source
- * @author alexreimers
- *
- */
-@LogMessageCategory("Static Flow Pusher")
-public class StaticFlowEntryPusherResource extends ServerResource {
- protected final static Logger log = LoggerFactory.getLogger(StaticFlowEntryPusherResource.class);
-
- /**
- * Checks to see if the user matches IP information without
- * checking for the correct ether-type (2048).
- * @param rows The Map that is a string representation of
- * the static flow.
- * @reutrn True if they checked the ether-type, false otherwise
- */
- private boolean checkMatchIp(Map<String, Object> rows) {
- boolean matchEther = false;
- String val = (String) rows.get(StaticFlowEntryPusher.COLUMN_DL_TYPE);
- if (val != null) {
- int type = 0;
- // check both hex and decimal
- if (val.startsWith("0x")) {
- type = Integer.parseInt(val.substring(2), 16);
- } else {
- try {
- type = Integer.parseInt(val);
- } catch (NumberFormatException e) { /* fail silently */}
- }
- if (type == 2048) matchEther = true;
- }
-
- if ((rows.containsKey(StaticFlowEntryPusher.COLUMN_NW_DST) ||
- rows.containsKey(StaticFlowEntryPusher.COLUMN_NW_SRC) ||
- rows.containsKey(StaticFlowEntryPusher.COLUMN_NW_PROTO) ||
- rows.containsKey(StaticFlowEntryPusher.COLUMN_NW_TOS)) &&
- (matchEther == false))
- return false;
-
- return true;
- }
-
- /**
- * Takes a Static Flow Pusher string in JSON format and parses it into
- * our database schema then pushes it to the database.
- * @param fmJson The Static Flow Pusher entry in JSON format.
- * @return A string status message
- */
- @Post
- @LogMessageDoc(level="ERROR",
- message="Error parsing push flow mod request: {request}",
- explanation="An invalid request was sent to static flow pusher",
- recommendation="Fix the format of the static flow mod request")
- public String store(String fmJson) {
- IStorageSourceService storageSource =
- (IStorageSourceService)getContext().getAttributes().
- get(IStorageSourceService.class.getCanonicalName());
-
- Map<String, Object> rowValues;
- try {
- rowValues = StaticFlowEntries.jsonToStorageEntry(fmJson);
- String status = null;
- if (!checkMatchIp(rowValues)) {
- status = "Warning! Pushing a static flow entry that matches IP " +
- "fields without matching for IP payload (ether-type 2048) will cause " +
- "the switch to wildcard higher level fields.";
- log.error(status);
- } else {
- status = "Entry pushed";
- }
- storageSource.insertRowAsync(StaticFlowEntryPusher.TABLE_NAME, rowValues);
- return ("{\"status\" : \"" + status + "\"}");
- } catch (IOException e) {
- log.error("Error parsing push flow mod request: " + fmJson, e);
- e.printStackTrace();
- return "{\"status\" : \"Error! Could not parse flod mod, see log for details.\"}";
- }
- }
-
- @Delete
- @LogMessageDoc(level="ERROR",
- message="Error deleting flow mod request: {request}",
- explanation="An invalid delete request was sent to static flow pusher",
- recommendation="Fix the format of the static flow mod request")
- public String del(String fmJson) {
- IStorageSourceService storageSource =
- (IStorageSourceService)getContext().getAttributes().
- get(IStorageSourceService.class.getCanonicalName());
- String fmName = null;
- if (fmJson == null) {
- return "{\"status\" : \"Error! No data posted.\"}";
- }
- try {
- fmName = StaticFlowEntries.getEntryNameFromJson(fmJson);
- if (fmName == null) {
- return "{\"status\" : \"Error deleting entry, no name provided\"}";
- }
- } catch (IOException e) {
- log.error("Error deleting flow mod request: " + fmJson, e);
- e.printStackTrace();
- return "{\"status\" : \"Error deleting entry, see log for details\"}";
- }
-
- storageSource.deleteRowAsync(StaticFlowEntryPusher.TABLE_NAME, fmName);
- return "{\"status\" : \"Entry " + fmName + " deleted\"}";
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryWebRoutable.java b/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryWebRoutable.java
deleted file mode 100644
index b5a6fe1..0000000
--- a/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryWebRoutable.java
+++ /dev/null
@@ -1,29 +0,0 @@
-package net.floodlightcontroller.staticflowentry.web;
-
-import net.floodlightcontroller.restserver.RestletRoutable;
-
-import org.restlet.Context;
-import org.restlet.Restlet;
-import org.restlet.routing.Router;
-
-public class StaticFlowEntryWebRoutable implements RestletRoutable {
- /**
- * Create the Restlet router and bind to the proper resources.
- */
- @Override
- public Restlet getRestlet(Context context) {
- Router router = new Router(context);
- router.attach("/json", StaticFlowEntryPusherResource.class);
- router.attach("/clear/{switch}/json", ClearStaticFlowEntriesResource.class);
- router.attach("/list/{switch}/json", ListStaticFlowEntriesResource.class);
- return router;
- }
-
- /**
- * Set the base path for the Topology
- */
- @Override
- public String basePath() {
- return "/wm/staticflowentrypusher";
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/AbstractStorageSource.java b/src/main/java/net/floodlightcontroller/storage/AbstractStorageSource.java
deleted file mode 100644
index 20d6599..0000000
--- a/src/main/java/net/floodlightcontroller/storage/AbstractStorageSource.java
+++ /dev/null
@@ -1,534 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.CopyOnWriteArraySet;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-import net.floodlightcontroller.core.module.FloodlightModuleContext;
-import net.floodlightcontroller.core.module.FloodlightModuleException;
-import net.floodlightcontroller.core.module.IFloodlightModule;
-import net.floodlightcontroller.core.module.IFloodlightService;
-import net.floodlightcontroller.counter.ICounter;
-import net.floodlightcontroller.counter.CounterStore;
-import net.floodlightcontroller.counter.ICounterStoreService;
-import net.floodlightcontroller.counter.CounterValue.CounterType;
-import net.floodlightcontroller.restserver.IRestApiService;
-import net.floodlightcontroller.storage.web.StorageWebRoutable;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@LogMessageCategory("System Database")
-public abstract class AbstractStorageSource
- implements IStorageSourceService, IFloodlightModule {
- protected final static Logger logger = LoggerFactory.getLogger(AbstractStorageSource.class);
-
- // Shared instance of the executor to use to execute the storage tasks.
- // We make this a single threaded executor, because if we used a thread pool
- // then storage operations could be executed out of order which would cause
- // problems in some cases (e.g. delete and update of a row getting reordered).
- // If we wanted to make this more multi-threaded we could have multiple
- // worker threads/executors with affinity of operations on a given table
- // to a single worker thread. But for now, we'll keep it simple and just have
- // a single thread for all operations.
- protected static ExecutorService defaultExecutorService = Executors.newSingleThreadExecutor();
-
- protected final static String STORAGE_QUERY_COUNTER_NAME = "StorageQuery";
- protected final static String STORAGE_UPDATE_COUNTER_NAME = "StorageUpdate";
- protected final static String STORAGE_DELETE_COUNTER_NAME = "StorageDelete";
-
- protected Set<String> allTableNames = new CopyOnWriteArraySet<String>();
- protected ICounterStoreService counterStore;
- protected ExecutorService executorService = defaultExecutorService;
- protected IStorageExceptionHandler exceptionHandler;
-
- private Map<String, Set<IStorageSourceListener>> listeners =
- new ConcurrentHashMap<String, Set<IStorageSourceListener>>();
-
- // Our dependencies
- protected IRestApiService restApi = null;
-
- protected static final String DB_ERROR_EXPLANATION =
- "An unknown error occurred while executing asynchronous " +
- "database operation";
-
- @LogMessageDoc(level="ERROR",
- message="Failure in asynchronous call to executeQuery",
- explanation=DB_ERROR_EXPLANATION,
- recommendation=LogMessageDoc.GENERIC_ACTION)
- abstract class StorageCallable<V> implements Callable<V> {
- public V call() {
- try {
- return doStorageOperation();
- }
- catch (StorageException e) {
- logger.error("Failure in asynchronous call to executeQuery", e);
- if (exceptionHandler != null)
- exceptionHandler.handleException(e);
- throw e;
- }
- }
- abstract protected V doStorageOperation();
- }
-
- @LogMessageDoc(level="ERROR",
- message="Failure in asynchronous call to updateRows",
- explanation=DB_ERROR_EXPLANATION,
- recommendation=LogMessageDoc.GENERIC_ACTION)
- abstract class StorageRunnable implements Runnable {
- public void run() {
- try {
- doStorageOperation();
- }
- catch (StorageException e) {
- logger.error("Failure in asynchronous call to updateRows", e);
- if (exceptionHandler != null)
- exceptionHandler.handleException(e);
- throw e;
- }
- }
- abstract void doStorageOperation();
- }
-
- public AbstractStorageSource() {
- this.executorService = defaultExecutorService;
- }
-
- public void setExecutorService(ExecutorService executorService) {
- this.executorService = (executorService != null) ?
- executorService : defaultExecutorService;
- }
-
- @Override
- public void setExceptionHandler(IStorageExceptionHandler exceptionHandler) {
- this.exceptionHandler = exceptionHandler;
- }
-
- @Override
- public abstract void setTablePrimaryKeyName(String tableName, String primaryKeyName);
-
- @Override
- public void createTable(String tableName, Set<String> indexedColumns) {
- allTableNames.add(tableName);
- }
-
- @Override
- public Set<String> getAllTableNames() {
- return allTableNames;
- }
-
- public void setCounterStore(CounterStore counterStore) {
- this.counterStore = counterStore;
- }
-
- protected void updateCounters(String baseName, String tableName) {
- if (counterStore != null) {
- String counterName;
- if (tableName != null) {
- updateCounters(baseName, null);
- counterName = baseName + CounterStore.TitleDelimitor + tableName;
- } else {
- counterName = baseName;
- }
- ICounter counter = counterStore.getCounter(counterName);
- if (counter == null) {
- counter = counterStore.createCounter(counterName, CounterType.LONG);
- }
- counter.increment();
- }
- }
-
- @Override
- public abstract IQuery createQuery(String tableName, String[] columnNames,
- IPredicate predicate, RowOrdering ordering);
-
- @Override
- public IResultSet executeQuery(IQuery query) {
- updateCounters(STORAGE_QUERY_COUNTER_NAME, query.getTableName());
- return executeQueryImpl(query);
- }
-
- protected abstract IResultSet executeQueryImpl(IQuery query);
-
- @Override
- public IResultSet executeQuery(String tableName, String[] columnNames,
- IPredicate predicate, RowOrdering ordering) {
- IQuery query = createQuery(tableName, columnNames, predicate, ordering);
- IResultSet resultSet = executeQuery(query);
- return resultSet;
- }
-
- @Override
- public Object[] executeQuery(String tableName, String[] columnNames,
- IPredicate predicate, RowOrdering ordering, IRowMapper rowMapper) {
- List<Object> objectList = new ArrayList<Object>();
- IResultSet resultSet = executeQuery(tableName, columnNames, predicate, ordering);
- while (resultSet.next()) {
- Object object = rowMapper.mapRow(resultSet);
- objectList.add(object);
- }
- return objectList.toArray();
- }
-
- @Override
- public Future<IResultSet> executeQueryAsync(final IQuery query) {
- Future<IResultSet> future = executorService.submit(
- new StorageCallable<IResultSet>() {
- public IResultSet doStorageOperation() {
- return executeQuery(query);
- }
- });
- return future;
- }
-
- @Override
- public Future<IResultSet> executeQueryAsync(final String tableName,
- final String[] columnNames, final IPredicate predicate,
- final RowOrdering ordering) {
- Future<IResultSet> future = executorService.submit(
- new StorageCallable<IResultSet>() {
- public IResultSet doStorageOperation() {
- return executeQuery(tableName, columnNames,
- predicate, ordering);
- }
- });
- return future;
- }
-
- @Override
- public Future<Object[]> executeQueryAsync(final String tableName,
- final String[] columnNames, final IPredicate predicate,
- final RowOrdering ordering, final IRowMapper rowMapper) {
- Future<Object[]> future = executorService.submit(
- new StorageCallable<Object[]>() {
- public Object[] doStorageOperation() {
- return executeQuery(tableName, columnNames, predicate,
- ordering, rowMapper);
- }
- });
- return future;
- }
-
- @Override
- public Future<?> insertRowAsync(final String tableName,
- final Map<String,Object> values) {
- Future<?> future = executorService.submit(
- new StorageRunnable() {
- public void doStorageOperation() {
- insertRow(tableName, values);
- }
- }, null);
- return future;
- }
-
- @Override
- public Future<?> updateRowsAsync(final String tableName, final List<Map<String,Object>> rows) {
- Future<?> future = executorService.submit(
- new StorageRunnable() {
- public void doStorageOperation() {
- updateRows(tableName, rows);
- }
- }, null);
- return future;
- }
-
- @Override
- public Future<?> updateMatchingRowsAsync(final String tableName,
- final IPredicate predicate, final Map<String,Object> values) {
- Future<?> future = executorService.submit(
- new StorageRunnable() {
- public void doStorageOperation() {
- updateMatchingRows(tableName, predicate, values);
- }
- }, null);
- return future;
- }
-
- @Override
- public Future<?> updateRowAsync(final String tableName,
- final Object rowKey, final Map<String,Object> values) {
- Future<?> future = executorService.submit(
- new StorageRunnable() {
- public void doStorageOperation() {
- updateRow(tableName, rowKey, values);
- }
- }, null);
- return future;
- }
-
- @Override
- public Future<?> updateRowAsync(final String tableName,
- final Map<String,Object> values) {
- Future<?> future = executorService.submit(
- new StorageRunnable() {
- public void doStorageOperation() {
- updateRow(tableName, values);
- }
- }, null);
- return future;
- }
-
- @Override
- public Future<?> deleteRowAsync(final String tableName, final Object rowKey) {
- Future<?> future = executorService.submit(
- new StorageRunnable() {
- public void doStorageOperation() {
- deleteRow(tableName, rowKey);
- }
- }, null);
- return future;
- }
-
- @Override
- public Future<?> deleteRowsAsync(final String tableName, final Set<Object> rowKeys) {
- Future<?> future = executorService.submit(
- new StorageRunnable() {
- public void doStorageOperation() {
- deleteRows(tableName, rowKeys);
- }
- }, null);
- return future;
- }
-
- @Override
- public Future<?> deleteMatchingRowsAsync(final String tableName, final IPredicate predicate) {
- Future<?> future = executorService.submit(
- new StorageRunnable() {
- public void doStorageOperation() {
- deleteMatchingRows(tableName, predicate);
- }
- }, null);
- return future;
- }
-
- @Override
- public Future<?> getRowAsync(final String tableName, final Object rowKey) {
- Future<?> future = executorService.submit(
- new StorageRunnable() {
- public void doStorageOperation() {
- getRow(tableName, rowKey);
- }
- }, null);
- return future;
- }
-
- @Override
- public Future<?> saveAsync(final IResultSet resultSet) {
- Future<?> future = executorService.submit(
- new StorageRunnable() {
- public void doStorageOperation() {
- resultSet.save();
- }
- }, null);
- return future;
- }
-
- @Override
- public void insertRow(String tableName, Map<String, Object> values) {
- updateCounters(STORAGE_UPDATE_COUNTER_NAME, tableName);
- insertRowImpl(tableName, values);
- }
-
- protected abstract void insertRowImpl(String tableName, Map<String, Object> values);
-
-
- @Override
- public void updateRows(String tableName, List<Map<String,Object>> rows) {
- updateCounters(STORAGE_UPDATE_COUNTER_NAME, tableName);
- updateRowsImpl(tableName, rows);
- }
-
- protected abstract void updateRowsImpl(String tableName, List<Map<String,Object>> rows);
-
- @Override
- public void updateMatchingRows(String tableName, IPredicate predicate,
- Map<String, Object> values) {
- updateCounters(STORAGE_UPDATE_COUNTER_NAME, tableName);
- updateMatchingRowsImpl(tableName, predicate, values);
- }
-
- protected abstract void updateMatchingRowsImpl(String tableName, IPredicate predicate,
- Map<String, Object> values);
-
- @Override
- public void updateRow(String tableName, Object rowKey,
- Map<String, Object> values) {
- updateCounters(STORAGE_UPDATE_COUNTER_NAME, tableName);
- updateRowImpl(tableName, rowKey, values);
- }
-
- protected abstract void updateRowImpl(String tableName, Object rowKey,
- Map<String, Object> values);
-
- @Override
- public void updateRow(String tableName, Map<String, Object> values) {
- updateCounters(STORAGE_UPDATE_COUNTER_NAME, tableName);
- updateRowImpl(tableName, values);
- }
-
- protected abstract void updateRowImpl(String tableName, Map<String, Object> values);
-
- @Override
- public void deleteRow(String tableName, Object rowKey) {
- updateCounters(STORAGE_DELETE_COUNTER_NAME, tableName);
- deleteRowImpl(tableName, rowKey);
- }
-
- protected abstract void deleteRowImpl(String tableName, Object rowKey);
-
- @Override
- public void deleteRows(String tableName, Set<Object> rowKeys) {
- updateCounters(STORAGE_DELETE_COUNTER_NAME, tableName);
- deleteRowsImpl(tableName, rowKeys);
- }
-
- protected abstract void deleteRowsImpl(String tableName, Set<Object> rowKeys);
-
- @Override
- public void deleteMatchingRows(String tableName, IPredicate predicate) {
- IResultSet resultSet = null;
- try {
- resultSet = executeQuery(tableName, null, predicate, null);
- while (resultSet.next()) {
- resultSet.deleteRow();
- }
- resultSet.save();
- }
- finally {
- if (resultSet != null)
- resultSet.close();
- }
- }
-
- @Override
- public IResultSet getRow(String tableName, Object rowKey) {
- updateCounters(STORAGE_QUERY_COUNTER_NAME, tableName);
- return getRowImpl(tableName, rowKey);
- }
-
- protected abstract IResultSet getRowImpl(String tableName, Object rowKey);
-
- @Override
- public synchronized void addListener(String tableName, IStorageSourceListener listener) {
- Set<IStorageSourceListener> tableListeners = listeners.get(tableName);
- if (tableListeners == null) {
- tableListeners = new CopyOnWriteArraySet<IStorageSourceListener>();
- listeners.put(tableName, tableListeners);
- }
- tableListeners.add(listener);
- }
-
- @Override
- public synchronized void removeListener(String tableName, IStorageSourceListener listener) {
- Set<IStorageSourceListener> tableListeners = listeners.get(tableName);
- if (tableListeners != null) {
- tableListeners.remove(listener);
- }
- }
-
- @LogMessageDoc(level="ERROR",
- message="Exception caught handling storage notification",
- explanation="An unknown error occured while trying to notify" +
- " storage listeners",
- recommendation=LogMessageDoc.GENERIC_ACTION)
- protected synchronized void notifyListeners(StorageSourceNotification notification) {
- String tableName = notification.getTableName();
- Set<Object> keys = notification.getKeys();
- Set<IStorageSourceListener> tableListeners = listeners.get(tableName);
- if (tableListeners != null) {
- for (IStorageSourceListener listener : tableListeners) {
- try {
- switch (notification.getAction()) {
- case MODIFY:
- listener.rowsModified(tableName, keys);
- break;
- case DELETE:
- listener.rowsDeleted(tableName, keys);
- break;
- }
- }
- catch (Exception e) {
- logger.error("Exception caught handling storage notification", e);
- }
- }
- }
- }
-
- @Override
- public void notifyListeners(List<StorageSourceNotification> notifications) {
- for (StorageSourceNotification notification : notifications)
- notifyListeners(notification);
- }
-
- // IFloodlightModule
-
- @Override
- public Collection<Class<? extends IFloodlightService>> getModuleServices() {
- Collection<Class<? extends IFloodlightService>> l =
- new ArrayList<Class<? extends IFloodlightService>>();
- l.add(IStorageSourceService.class);
- return l;
- }
-
- @Override
- public Map<Class<? extends IFloodlightService>,
- IFloodlightService> getServiceImpls() {
- Map<Class<? extends IFloodlightService>,
- IFloodlightService> m =
- new HashMap<Class<? extends IFloodlightService>,
- IFloodlightService>();
- m.put(IStorageSourceService.class, this);
- return m;
- }
-
- @Override
- public Collection<Class<? extends IFloodlightService>> getModuleDependencies() {
- Collection<Class<? extends IFloodlightService>> l =
- new ArrayList<Class<? extends IFloodlightService>>();
- l.add(IRestApiService.class);
- l.add(ICounterStoreService.class);
- return l;
- }
-
- @Override
- public void init(FloodlightModuleContext context)
- throws FloodlightModuleException {
- restApi =
- context.getServiceImpl(IRestApiService.class);
- counterStore =
- context.getServiceImpl(ICounterStoreService.class);
- }
-
- @Override
- public void startUp(FloodlightModuleContext context) {
- restApi.addRestletRoutable(new StorageWebRoutable());
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/CompoundPredicate.java b/src/main/java/net/floodlightcontroller/storage/CompoundPredicate.java
deleted file mode 100644
index a23e560..0000000
--- a/src/main/java/net/floodlightcontroller/storage/CompoundPredicate.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-/** Predicate class to handle AND and OR combinations of a number
- * of child predicates. The result of the logical combination of the
- * child predicates can also be negated to support a NOT operation.
- *
- * @author rob
- *
- */
-public class CompoundPredicate implements IPredicate {
-
- public enum Operator { AND, OR };
-
- private Operator operator;
- private boolean negated;
- private IPredicate[] predicateList;
-
- public CompoundPredicate(Operator operator, boolean negated, IPredicate... predicateList) {
- this.operator = operator;
- this.negated = negated;
- this.predicateList = predicateList;
- }
-
- public Operator getOperator() {
- return operator;
- }
-
- public boolean isNegated() {
- return negated;
- }
-
- public IPredicate[] getPredicateList() {
- return predicateList;
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/IPredicate.java b/src/main/java/net/floodlightcontroller/storage/IPredicate.java
deleted file mode 100644
index 291edff..0000000
--- a/src/main/java/net/floodlightcontroller/storage/IPredicate.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-/** Common base interface for the OperatorPredicate and CompoundPredicate classes.
- * It's necessary so that you can use either type of predicate as child
- * predicates of a CompoundPredicate.
- * @author rob
- */
-public interface IPredicate {
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/IQuery.java b/src/main/java/net/floodlightcontroller/storage/IQuery.java
deleted file mode 100644
index b75b8ae..0000000
--- a/src/main/java/net/floodlightcontroller/storage/IQuery.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-/** Representation of a database query. For SQL queries this maps to
- * a prepared statement, so it will be more efficient than if you use the
- * methods in IStorageSource that bypass the IQuery. For many NoSQL
- * storage sources there won't be any performance improvement from keeping
- * around the query.
- *
- * The query interface also supports parameterized queries (i.e. which maps
- * to using ? values in a SQL query). The values of the parameters are set
- * using the setParameter method. In the storage source API the parameters
- * are named rather than positional. The format of the parameterized values
- * in the query predicates is the parameter name bracketed with question marks
- * (e.g. ?MinimumSalary? ).
- *
- * @author rob
- *
- */
-public interface IQuery {
- String getTableName();
- void setParameter(String name, Object value);
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/IResultSet.java b/src/main/java/net/floodlightcontroller/storage/IResultSet.java
deleted file mode 100644
index fbd2a4a..0000000
--- a/src/main/java/net/floodlightcontroller/storage/IResultSet.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-import java.util.Date;
-import java.util.Map;
-
-/** Interface to iterate over the results from a storage query.
- *
- * @author rob
- *
- */
-public interface IResultSet extends Iterable<IResultSet> {
-
- /** This should be called when the client is done using the result set.
- * This will release any underlying resources (e.g. a database connection),
- * which you don't want to wait for or rely on finalizers to release.
- */
- public void close();
-
- /** Advance to the next row in the result set.
- * @return Returns true if there are more rows to process
- * (i.e. if there's a valid current row) and false if there are no more
- * rows in the result set.
- */
- public boolean next();
-
- /** Save/commit any pending updates to the data in the result set.
- * This must be called after any calls to the set methods or deleting rows
- * for the changes to be applied/committed to the storage source. Note that
- * this doesn't need to be called after each set method or even after each
- * row. It is typically called at the end after updating all of the
- * rows in the result set.
- */
- public void save();
-
- /** Get the current row in the result set. This returns all of the
- * columns in the current row.
- * @return Map containing all of the columns in the current row, indexed
- * by the column name.
- */
- public Map<String,Object> getRow();
-
- /** Delete the current row in the result set.
- */
- public void deleteRow();
-
- public boolean containsColumn(String columnName);
-
- public String getString(String columnName);
- public short getShort(String columnName);
- public int getInt(String columnName);
- public long getLong(String columnName);
- public float getFloat(String columnName);
- public double getDouble(String columnName);
- public boolean getBoolean(String columnName);
- public byte getByte(String columnName);
- public byte[] getByteArray(String columnName);
- public Date getDate(String columnName);
-
- public Short getShortObject(String columnName);
- public Integer getIntegerObject(String columnName);
- public Long getLongObject(String columnName);
- public Float getFloatObject(String columnName);
- public Double getDoubleObject(String columnName);
- public Boolean getBooleanObject(String columnName);
- public Byte getByteObject(String columnName);
-
- public boolean isNull(String columnName);
-
- public void setString(String columnName, String value);
- public void setShort(String columnName, short value);
- public void setInt(String columnName, int value);
- public void setLong(String columnName, long value);
- public void setFloat(String columnName, float value);
- public void setDouble(String columnName, double value);
- public void setBoolean(String columnName, boolean value);
- public void setByte(String columnName, byte value);
- public void setByteArray(String columnName, byte[] byteArray);
- public void setDate(String columnName, Date date);
-
- public void setShortObject(String columnName, Short value);
- public void setIntegerObject(String columnName, Integer value);
- public void setLongObject(String columnName, Long value);
- public void setFloatObject(String columnName, Float value);
- public void setDoubleObject(String columnName, Double value);
- public void setBooleanObject(String columnName, Boolean value);
- public void setByteObject(String columnName, Byte value);
-
- public void setNull(String columnName);
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/IRowMapper.java b/src/main/java/net/floodlightcontroller/storage/IRowMapper.java
deleted file mode 100644
index 6c4502b..0000000
--- a/src/main/java/net/floodlightcontroller/storage/IRowMapper.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-/**
- * Interface for mapping the current row in a result set to an object.
- * This is based on the Spring JDBC support.
- *
- * @author rob
- */
-public interface IRowMapper {
-
- /** This method must be implemented by the client of the storage API
- * to map the current row in the result set to a Java object.
- *
- * @param resultSet The result set obtained from a storage source query
- * @return The object created from the data in the result set
- */
- Object mapRow(IResultSet resultSet);
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/IStorageExceptionHandler.java b/src/main/java/net/floodlightcontroller/storage/IStorageExceptionHandler.java
deleted file mode 100644
index e3c8e94..0000000
--- a/src/main/java/net/floodlightcontroller/storage/IStorageExceptionHandler.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-public interface IStorageExceptionHandler {
- public void handleException(Exception exc);
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/IStorageSourceListener.java b/src/main/java/net/floodlightcontroller/storage/IStorageSourceListener.java
deleted file mode 100644
index ea3764d..0000000
--- a/src/main/java/net/floodlightcontroller/storage/IStorageSourceListener.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-import java.util.Set;
-
-public interface IStorageSourceListener {
-
- /**
- * Called when rows are inserted or updated in the table.
- *
- * @param tableName The table where the rows were inserted
- * @param rowKeys The keys of the rows that were inserted
- */
- public void rowsModified(String tableName, Set<Object> rowKeys);
-
- /**
- * Called when a new row is deleted from the table.
- *
- * @param tableName The table where the rows were deleted
- * @param rowKeys The keys of the rows that were deleted
- */
- public void rowsDeleted(String tableName, Set<Object> rowKeys);
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/IStorageSourceService.java b/src/main/java/net/floodlightcontroller/storage/IStorageSourceService.java
deleted file mode 100644
index b8a1be8..0000000
--- a/src/main/java/net/floodlightcontroller/storage/IStorageSourceService.java
+++ /dev/null
@@ -1,331 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.Future;
-
-import net.floodlightcontroller.core.module.IFloodlightService;
-
-public interface IStorageSourceService extends IFloodlightService {
-
- /** Set the column to be used as the primary key for a table. This should
- * be guaranteed to be unique for all of the rows in the table, although the
- * storage API does not necessarily enforce this requirement. If no primary
- * key name is specified for a table then the storage API assumes there is
- * a column named "id" that is used as the primary key. In this case when
- * a new row is inserted using the storage API and no id is specified
- * explictly in the row data, the storage API automatically generates a
- * unique ID (typically a UUID) for the id column. To work across all
- * possible implementations of the storage API it is safest, though, to
- * specify the primary key column explicitly.
- * FIXME: It's sort of a kludge to have to specify the primary key column
- * here. Ideally there would be some sort of metadata -- perhaps stored
- * directly in the table, at least in the NoSQL case -- that the
- * storage API could query to obtain the primary key info.
- * @param tableName The name of the table for which we're setting the key
- * @param primaryKeyName The name of column to be used as the primary key
- */
- public void setTablePrimaryKeyName(String tableName, String primaryKeyName);
-
- /** Create a new table if one does not already exist with the given name.
- *
- * @param tableName The name of the table to create.
- * @param indexedColumns Which columns should be indexed
- */
- void createTable(String tableName, Set<String> indexedColumns);
-
- /**
- * @return the set of all tables that have been created via createTable
- */
- Set<String> getAllTableNames();
-
- /** Create a query object representing the given query parameters. The query
- * object can be passed to executeQuery to actually perform the query and obtain
- * a result set.
- *
- * @param tableName The name of the table to query.
- * @param columnNames The list of columns to return in the result set.
- * @param predicate The predicate that specifies which rows to return in the result set.
- * @param ordering Specification of order that rows are returned from the result set
- * returned from executing the query. If the ordering is null, then rows are returned
- * in an implementation-specific order.
- * @return Query object to be passed to executeQuery.
- */
- IQuery createQuery(String tableName, String[] columnNames, IPredicate predicate, RowOrdering ordering);
-
- /** Execute a query created with createQuery.
- *
- * @param query The query to execute
- * @return The result set containing the rows/columns specified in the query.
- */
- IResultSet executeQuery(IQuery query);
-
- /** Execute a query created with the given query parameters.
- *
- * @param tableName The name of the table to query.
- * @param columnNames The list of columns to return in the result set.
- * @param predicate The predicate that specifies which rows to return in the result set.
- * @param ordering Specification of order that rows are returned from the result set
- * returned from executing the query. If the ordering is null, then rows are returned
- * in an implementation-specific order.
- * @return The result set containing the rows/columns specified in the query.
- */
- IResultSet executeQuery(String tableName, String[] columnNames, IPredicate predicate,
- RowOrdering ordering);
-
- /** Execute a query and call the row mapper to map the results to Java objects.
- *
- * @param tableName The name of the table to query.
- * @param columnNames The list of columns to return in the result set.
- * @param predicate The predicate that specifies which rows to return in the result set.
- * @param ordering Specification of order that rows are returned from the result set
- * returned from executing the query. If the ordering is null, then rows are returned
- * in an implementation-specific order.
- * @param rowMapper The client-supplied object that maps the data in a row in the result
- * set to a client object.
- * @return The result set containing the rows/columns specified in the query.
- */
- Object[] executeQuery(String tableName, String[] columnNames, IPredicate predicate,
- RowOrdering ordering, IRowMapper rowMapper);
-
- /** Insert a new row in the table with the given column data.
- * If the primary key is the default value of "id" and is not specified in the
- * then a unique id will be automatically assigned to the row.
- * @param tableName The name of the table to which to add the row
- * @param values The map of column names/values to add to the table.
- */
- void insertRow(String tableName, Map<String,Object> values);
-
- /** Update or insert a list of rows in the table.
- * The primary key must be included in the map of values for each row.
- * @param tableName The table to update or insert into
- * @param values The map of column names/values to update the rows
- */
- void updateRows(String tableName, List<Map<String,Object>> rows);
-
- /** Update the rows in the given table. Any rows matching the predicate
- * are updated with the column names/values specified in the values map.
- * (The values map should not contain the special column "id".)
- * @param tableName The table to update
- * @param predicate The predicate to use to select which rows to update
- * @param values The map of column names/values to update the rows.
- */
- void updateMatchingRows(String tableName, IPredicate predicate, Map<String,Object> values);
-
- /** Update or insert a row in the table with the given row key (primary
- * key) and column names/values. (If the values map contains the special
- * column "id", its value must match rowId.)
- * @param tableName The table to update or insert into
- * @param rowKey The ID (primary key) of the row to update
- * @param values The map of column names/values to update the rows
- */
- void updateRow(String tableName, Object rowKey, Map<String,Object> values);
-
- /** Update or insert a row in the table with the given column data.
- * The primary key must be included in the map of values.
- * @param tableName The table to update or insert into
- * @param values The map of column names/values to update the rows
- */
- void updateRow(String tableName, Map<String,Object> values);
-
- /** Delete the row with the given primary key.
- *
- * @param tableName The table from which to delete the row
- * @param rowKey The primary key of the row to delete.
- */
- void deleteRow(String tableName, Object rowKey);
-
- /** Delete the rows with the given keys.
- *
- * @param tableName The table from which to delete the rows
- * @param rowKeys The set of primary keys of the rows to delete.
- */
- void deleteRows(String tableName, Set<Object> rowKeys);
-
- /**
- * Delete the rows that match the predicate
- * @param tableName
- * @param predicate
- */
- void deleteMatchingRows(String tableName, IPredicate predicate);
-
- /** Query for a row with the given ID (primary key).
- *
- * @param tableName The name of the table to query
- * @param rowKey The primary key of the row
- * @return The result set containing the row with the given ID
- */
- IResultSet getRow(String tableName, Object rowKey);
-
- /**
- * Set exception handler to use for asynchronous operations.
- * @param exceptionHandler
- */
- void setExceptionHandler(IStorageExceptionHandler exceptionHandler);
-
- /**
- * Asynchronous variant of executeQuery.
- *
- * @param query
- * @return
- */
- public Future<IResultSet> executeQueryAsync(final IQuery query);
-
- /**
- * Asynchronous variant of executeQuery.
- *
- * @param tableName
- * @param columnNames
- * @param predicate
- * @param ordering
- * @return
- */
- public Future<IResultSet> executeQueryAsync(final String tableName,
- final String[] columnNames, final IPredicate predicate,
- final RowOrdering ordering);
-
- /**
- * Asynchronous variant of executeQuery
- *
- * @param tableName
- * @param columnNames
- * @param predicate
- * @param ordering
- * @param rowMapper
- * @return
- */
- public Future<Object[]> executeQueryAsync(final String tableName,
- final String[] columnNames, final IPredicate predicate,
- final RowOrdering ordering, final IRowMapper rowMapper);
-
- /**
- * Asynchronous variant of insertRow.
- *
- * @param tableName
- * @param values
- * @return
- */
- public Future<?> insertRowAsync(final String tableName, final Map<String,Object> values);
-
- /**
- * Asynchronous variant of updateRows
- * @param tableName
- * @param rows
- */
- public Future<?> updateRowsAsync(final String tableName, final List<Map<String,Object>> rows);
-
- /**
- * Asynchronous variant of updateMatchingRows
- *
- * @param tableName
- * @param predicate
- * @param values
- * @return
- */
- public Future<?> updateMatchingRowsAsync(final String tableName, final IPredicate predicate,
- final Map<String,Object> values);
-
- /**
- * Asynchronous variant of updateRow
- *
- * @param tableName
- * @param rowKey
- * @param values
- * @return
- */
- public Future<?> updateRowAsync(final String tableName, final Object rowKey,
- final Map<String,Object> values);
-
- /**
- * Asynchronous version of updateRow
- *
- * @param tableName
- * @param values
- * @return
- */
- public Future<?> updateRowAsync(final String tableName, final Map<String,Object> values);
-
- /**
- * Asynchronous version of deleteRow
- *
- * @param tableName
- * @param rowKey
- * @return
- */
- public Future<?> deleteRowAsync(final String tableName, final Object rowKey);
-
- /**
- * Asynchronous version of deleteRows
- *
- * @param tableName
- * @param rowKeys
- * @return
- */
- public Future<?> deleteRowsAsync(final String tableName, final Set<Object> rowKeys);
-
- /**
- * Asynchronous version of deleteRows
- *
- * @param tableName
- * @param predicate
- * @return
- */
- public Future<?> deleteMatchingRowsAsync(final String tableName, final IPredicate predicate);
-
- /**
- * Asynchronous version of getRow
- *
- * @param tableName
- * @param rowKey
- * @return
- */
- public Future<?> getRowAsync(final String tableName, final Object rowKey);
-
- /**
- * Asynchronous version of save
- *
- * @param resultSet
- * @return
- */
- public Future<?> saveAsync(final IResultSet resultSet);
-
- /** Add a listener to the specified table. The listener is called
- * when any modifications are made to the table. You can add the same
- * listener instance to multiple tables, since the table name is
- * included as a parameter in the listener methods.
- * @param tableName The name of the table to listen for modifications
- * @param listener The listener instance to call
- */
- public void addListener(String tableName, IStorageSourceListener listener);
-
- /** Remove a listener from the specified table. The listener should
- * have been previously added to the table with addListener.
- * @param tableName The name of the table with the listener
- * @param listener The previously installed listener instance
- */
- public void removeListener(String tableName, IStorageSourceListener listener);
-
- /** This is logically a private method and should not be called by
- * clients of this interface.
- * @param notifications the notifications to dispatch
- */
- public void notifyListeners(List<StorageSourceNotification> notifications);
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/NullValueStorageException.java b/src/main/java/net/floodlightcontroller/storage/NullValueStorageException.java
deleted file mode 100644
index 0c148b8..0000000
--- a/src/main/java/net/floodlightcontroller/storage/NullValueStorageException.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-public class NullValueStorageException extends StorageException {
-
- private static final long serialVersionUID = 897572085681189926L;
-
- private static String makeExceptionMessage(String columnName) {
- String message = "Null column value could not be converted to built-in type";
- if (columnName != null) {
- message += ": column name = ";
- message += columnName;
- }
- return message;
- }
-
- public NullValueStorageException() {
- super(makeExceptionMessage(null));
- }
-
- public NullValueStorageException(String columnName) {
- super(makeExceptionMessage(columnName));
- }
-
- public NullValueStorageException(String columnName, Throwable exc) {
- super(makeExceptionMessage(columnName), exc);
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/OperatorPredicate.java b/src/main/java/net/floodlightcontroller/storage/OperatorPredicate.java
deleted file mode 100644
index dc78260..0000000
--- a/src/main/java/net/floodlightcontroller/storage/OperatorPredicate.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-/** Predicate class to specify rows by equality or comparison operations
- * of column values. The Storage API uses the special column name of "id"
- * to specify the primary key values for the row.
- *
- * @author rob
- */
-public class OperatorPredicate implements IPredicate {
-
- public enum Operator { EQ, LT, LTE, GT, GTE };
-
- private String columnName;
- private Operator operator;
- private Comparable<?> value;
-
- public OperatorPredicate(String columnName, Operator operator, Comparable<?> value) {
- this.columnName = columnName;
- this.operator = operator;
- this.value = value;
- }
-
- public String getColumnName() {
- return columnName;
- }
-
- public Operator getOperator() {
- return operator;
- }
-
- public Comparable<?> getValue() {
- return value;
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/ResultSetIterator.java b/src/main/java/net/floodlightcontroller/storage/ResultSetIterator.java
deleted file mode 100644
index 669833d..0000000
--- a/src/main/java/net/floodlightcontroller/storage/ResultSetIterator.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-import java.util.Iterator;
-import java.util.NoSuchElementException;
-
-/** Iterator wrapper for an IResultSet, useful for iterating through query
- * results in an enhanced for (foreach) loop.
- *
- * Note that the iterator manipulates the state of the underlying IResultSet.
- */
-public class ResultSetIterator implements Iterator<IResultSet> {
- private IResultSet resultSet;
- private boolean hasAnother;
- private boolean peekedAtNext;
-
- public ResultSetIterator(IResultSet resultSet) {
- this.resultSet = resultSet;
- this.peekedAtNext = false;
- }
-
- @Override
- public IResultSet next() {
- if (!peekedAtNext) {
- hasAnother = resultSet.next();
- }
- peekedAtNext = false;
- if (!hasAnother)
- throw new NoSuchElementException();
- return resultSet;
- }
-
- @Override
- public boolean hasNext() {
- if (!peekedAtNext) {
- hasAnother = resultSet.next();
- peekedAtNext = true;
- }
- return hasAnother;
- }
-
- /** Row removal is not supported; use IResultSet.deleteRow instead.
- */
- @Override
- public void remove() {
- throw new UnsupportedOperationException();
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/RowOrdering.java b/src/main/java/net/floodlightcontroller/storage/RowOrdering.java
deleted file mode 100644
index f9e61ed..0000000
--- a/src/main/java/net/floodlightcontroller/storage/RowOrdering.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-import java.util.ArrayList;
-import java.util.List;
-
-public class RowOrdering {
-
- public enum Direction { ASCENDING, DESCENDING };
-
- public class Item {
-
- private String column;
- private Direction direction;
-
- public Item(String column, Direction direction) {
- assert(column != null);
- assert(direction != null);
- this.column = column;
- this.direction = direction;
- }
-
- public String getColumn() {
- return column;
- }
-
- public Direction getDirection() {
- return direction;
- }
- }
-
- private List<Item> itemList = new ArrayList<Item>();
-
- public RowOrdering() {
- }
-
- public RowOrdering(String column) {
- add(column);
- }
-
- public RowOrdering(String column, Direction direction) {
- add(column, direction);
- }
-
- public RowOrdering(Item item) {
- add(item);
- }
-
- public RowOrdering(Item[] itemArray) {
- add(itemArray);
- }
-
- public RowOrdering(List<Item> itemList) {
- add(itemList);
- }
-
- public void add(String column) {
- itemList.add(new Item(column, Direction.ASCENDING));
- }
-
- public void add(String column, Direction direction) {
- itemList.add(new Item(column, direction));
- }
-
- public void add(Item item) {
- assert(item != null);
- itemList.add(item);
- }
-
- public void add(Item[] itemArray) {
- for (Item item: itemArray) {
- itemList.add(item);
- }
- }
-
- public void add(List<Item> itemList) {
- this.itemList.addAll(itemList);
- }
-
- public List<Item> getItemList() {
- return itemList;
- }
-
- public boolean equals(RowOrdering rowOrdering) {
- if (rowOrdering == null)
- return false;
-
- int len1 = itemList.size();
- int len2 = rowOrdering.getItemList().size();
- if (len1 != len2)
- return false;
-
- for (int i = 0; i < len1; i++) {
- Item item1 = itemList.get(i);
- Item item2 = rowOrdering.getItemList().get(i);
- if (!item1.getColumn().equals(item2.getColumn()) ||
- item1.getDirection() != item2.getDirection())
- return false;
- }
-
- return true;
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/StorageException.java b/src/main/java/net/floodlightcontroller/storage/StorageException.java
deleted file mode 100644
index f5dea23..0000000
--- a/src/main/java/net/floodlightcontroller/storage/StorageException.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-public class StorageException extends RuntimeException {
-
- static final long serialVersionUID = 7839989010156155681L;
-
- static private String makeExceptionMessage(String s) {
- String message = "Storage Exception";
- if (s != null) {
- message += ": ";
- message += s;
- }
- return message;
- }
-
- public StorageException() {
- super(makeExceptionMessage(null));
- }
-
- public StorageException(String s) {
- super(makeExceptionMessage(s));
- }
-
- public StorageException(String s, Throwable exc) {
- super(makeExceptionMessage(s), exc);
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/StorageNotificationFormatException.java b/src/main/java/net/floodlightcontroller/storage/StorageNotificationFormatException.java
deleted file mode 100644
index f6ce565..0000000
--- a/src/main/java/net/floodlightcontroller/storage/StorageNotificationFormatException.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-public class StorageNotificationFormatException extends StorageException {
- private static final long serialVersionUID = 504758477518283156L;
-
- public StorageNotificationFormatException() {
- super("Invalid storage notification format");
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/StorageSourceNotification.java b/src/main/java/net/floodlightcontroller/storage/StorageSourceNotification.java
deleted file mode 100644
index c9a5450..0000000
--- a/src/main/java/net/floodlightcontroller/storage/StorageSourceNotification.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-import java.util.Set;
-
-public class StorageSourceNotification {
-
- public enum Action { MODIFY, DELETE };
-
- private String tableName;
- private Action action;
- private Set<Object> keys;
-
- public StorageSourceNotification() {
- }
-
- public StorageSourceNotification(String tableName, Action action, Set<Object> keys) {
- this.tableName = tableName;
- this.action = action;
- this.keys = keys;
- }
-
- public String getTableName() {
- return tableName;
- }
-
- public Action getAction() {
- return action;
- }
-
- public Set<Object> getKeys() {
- return keys;
- }
-
- public void setTableName(String tableName) {
- this.tableName = tableName;
- }
-
- public void setAction(Action action) {
- this.action = action;
- }
-
- public void setKeys(Set<Object> keys) {
- this.keys = keys;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#hashCode()
- */
- @Override
- public int hashCode() {
- final int prime = 7867;
- int result = 1;
- result = prime * result + tableName.hashCode();
- result = prime * result + action.hashCode();
- result = prime * result + keys.hashCode();
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj)
- return true;
- if (obj == null)
- return false;
- if (!(obj instanceof StorageSourceNotification))
- return false;
- StorageSourceNotification other = (StorageSourceNotification) obj;
- if (tableName == null) {
- if (other.tableName != null)
- return false;
- } else if (!tableName.equals(other.tableName))
- return false;
- if (action == null) {
- if (other.action != null)
- return false;
- } else if (action != other.action)
- return false;
- if (keys == null) {
- if (other.keys != null)
- return false;
- } else if (!keys.equals(other.keys))
- return false;
- return true;
- }
-
- @Override
- public String toString() {
- return ("StorageNotification[table=" + tableName + "; action=" +
- action.toString() + "; keys=" + keys.toString() + "]");
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/SynchronousExecutorService.java b/src/main/java/net/floodlightcontroller/storage/SynchronousExecutorService.java
deleted file mode 100644
index f1e7cd3..0000000
--- a/src/main/java/net/floodlightcontroller/storage/SynchronousExecutorService.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-public class SynchronousExecutorService implements ExecutorService {
-
- class SynchronousFuture<T> implements Future<T> {
-
- T result;
- Exception exc;
-
- public SynchronousFuture() {
- }
-
- public SynchronousFuture(T result) {
- this.result = result;
- }
-
- public SynchronousFuture(Exception exc) {
- this.exc = exc;
- }
-
- @Override
- public boolean cancel(boolean mayInterruptIfRunning) {
- return false;
- }
-
- @Override
- public boolean isCancelled() {
- return false;
- }
-
- @Override
- public boolean isDone() {
- return true;
- }
-
- @Override
- public T get() throws InterruptedException, ExecutionException {
- if (exc != null)
- throw new ExecutionException(exc);
- return result;
- }
-
- @Override
- public T get(long timeout, TimeUnit unit) throws InterruptedException,
- ExecutionException, TimeoutException {
- return get();
- }
- }
-
- @Override
- public void shutdown() {
- }
-
- @Override
- public List<Runnable> shutdownNow() {
- return null;
- }
-
- @Override
- public boolean isShutdown() {
- return false;
- }
-
- @Override
- public boolean isTerminated() {
- return false;
- }
-
- @Override
- public boolean awaitTermination(long timeout, TimeUnit unit)
- throws InterruptedException {
- return false;
- }
-
- @Override
- public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks)
- throws InterruptedException {
- List<Future<T>> l = new ArrayList<Future<T>>();
- for (Callable<T> task : tasks) {
- Future<T> future = submit(task);
- l.add(future);
- }
- return l;
- }
-
- @Override
- public <T> List<Future<T>> invokeAll(
- Collection<? extends Callable<T>> tasks, long timeout, TimeUnit units)
- throws InterruptedException {
- return invokeAll(tasks);
- }
-
- @Override
- public <T> T invokeAny(Collection<? extends Callable<T>> tasks)
- throws InterruptedException, ExecutionException {
- for (Callable<T> task : tasks) {
- try {
- task.call();
- } catch (Exception e) {
-
- }
- }
- throw new ExecutionException(new Exception("no task completed successfully"));
- }
-
- @Override
- public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout,
- TimeUnit units) throws InterruptedException, ExecutionException,
- TimeoutException {
- return invokeAny(tasks);
- }
-
- @Override
- public <T> Future<T> submit(Callable<T> callable) {
- try {
- T result = callable.call();
- return new SynchronousFuture<T>(result);
- }
- catch (Exception exc) {
- return new SynchronousFuture<T>(exc);
- }
- }
-
- @Override
- public Future<?> submit(Runnable runnable) {
- try {
- runnable.run();
- return new SynchronousFuture<Void>();
- }
- catch (Exception exc) {
- return new SynchronousFuture<Void>(exc);
- }
- }
-
- @Override
- public <T> Future<T> submit(Runnable runnable, T result) {
- try {
- runnable.run();
- return new SynchronousFuture<T>(result);
- }
- catch (Exception exc) {
- return new SynchronousFuture<T>(exc);
- }
- }
-
- @Override
- public void execute(Runnable command) {
- command.run();
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/TypeMismatchStorageException.java b/src/main/java/net/floodlightcontroller/storage/TypeMismatchStorageException.java
deleted file mode 100644
index 5643140..0000000
--- a/src/main/java/net/floodlightcontroller/storage/TypeMismatchStorageException.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage;
-
-public class TypeMismatchStorageException extends StorageException {
-
- private static final long serialVersionUID = -7923586656854871345L;
-
- private static String makeExceptionMessage(String requestedType, String actualType, String columnName) {
- if (requestedType == null)
- requestedType = "???";
- if (actualType == null)
- actualType = "???";
- if (columnName == null)
- columnName = "???";
- String message = "The requested type (" + requestedType + ") does not match the actual type (" + actualType + ") of the value for column \"" + columnName + "\".";
- return message;
- }
-
- public TypeMismatchStorageException() {
- super(makeExceptionMessage(null, null, null));
- }
-
- public TypeMismatchStorageException(String requestedType, String actualType, String columnName) {
- super(makeExceptionMessage(requestedType, actualType, columnName));
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/memory/MemoryStorageSource.java b/src/main/java/net/floodlightcontroller/storage/memory/MemoryStorageSource.java
deleted file mode 100644
index 3c8d663..0000000
--- a/src/main/java/net/floodlightcontroller/storage/memory/MemoryStorageSource.java
+++ /dev/null
@@ -1,193 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage.memory;
-
-import net.floodlightcontroller.core.module.FloodlightModuleContext;
-import net.floodlightcontroller.storage.nosql.NoSqlStorageSource;
-import net.floodlightcontroller.storage.SynchronousExecutorService;
-import net.floodlightcontroller.storage.IStorageSourceService;
-import net.floodlightcontroller.core.module.IFloodlightService;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import net.floodlightcontroller.storage.StorageException;
-
-public class MemoryStorageSource extends NoSqlStorageSource {
-
- private Map<String, MemoryTable> tableMap = new HashMap<String,MemoryTable>();
-
-
- synchronized private MemoryTable getTable(String tableName, boolean create) {
- MemoryTable table = tableMap.get(tableName);
- if (table == null) {
- if (!create)
- throw new StorageException("Table " + tableName + " does not exist");
- table = new MemoryTable(tableName);
- tableMap.put(tableName, table);
- }
- return table;
- }
-
- @Override
- protected Collection<Map<String,Object>> getAllRows(String tableName, String[] columnNameList) {
- MemoryTable table = getTable(tableName, false);
- return table.getAllRows();
- }
-
- @Override
- protected Map<String,Object> getRow(String tableName, String[] columnNameList, Object rowKey) {
- MemoryTable table = getTable(tableName, false);
- return table.getRow(rowKey);
- }
-
- @Override
- protected List<Map<String,Object>> executeEqualityQuery(String tableName,
- String[] columnNameList, String predicateColumnName, Comparable<?> value) {
- MemoryTable table = getTable(tableName, false);
- List<Map<String,Object>> result = new ArrayList<Map<String,Object>>();
- synchronized (table) {
- Collection<Map<String,Object>> allRows = table.getAllRows();
- for (Map<String,Object> row : allRows) {
- Object v = row.get(predicateColumnName);
- if (value.equals(v)) {
- result.add(row);
- }
- }
- }
- return result;
- }
-
- @SuppressWarnings({ "unchecked", "rawtypes" })
- @Override
- protected List<Map<String,Object>> executeRangeQuery(String tableName,
- String[] columnNameList, String predicateColumnName,
- Comparable<?> startValue, boolean startInclusive, Comparable<?> endValue, boolean endInclusive) {
- MemoryTable table = getTable(tableName, false);
- List<Map<String,Object>> result = new ArrayList<Map<String,Object>>();
- synchronized (table) {
- Collection<Map<String,Object>> allRows = table.getAllRows();
- for (Map<String,Object> row : allRows) {
- Comparable value = (Comparable) row.get(predicateColumnName);
- if (value != null) {
- int compareResult = value.compareTo(startValue);
- if ((compareResult > 0) || (startInclusive && (compareResult >= 0))) {
- compareResult = value.compareTo(endValue);
- if ((compareResult < 0) || (startInclusive && (compareResult <= 0))) {
- result.add(row);
- }
- }
- }
- }
- }
- return result;
- }
-
- @Override
- protected void insertRows(String tableName, List<Map<String,Object>> insertRowList) {
- MemoryTable table = getTable(tableName, false);
- String primaryKeyName = getTablePrimaryKeyName(tableName);
- synchronized (table) {
- for (Map<String,Object> row : insertRowList) {
- Object primaryKey = row.get(primaryKeyName);
- if (primaryKey == null) {
- if (primaryKeyName.equals(DEFAULT_PRIMARY_KEY_NAME)) {
- row = new HashMap<String,Object>(row);
- primaryKey = table.getNextId();
- row.put(primaryKeyName, primaryKey);
- }
- }
- table.insertRow(primaryKey, row);
- }
- }
- }
-
- @Override
- protected void updateRows(String tableName, Set<Object> rowKeys, Map<String,Object> updateRowList) {
- MemoryTable table = getTable(tableName, false);
- synchronized (table) {
- for (Object rowKey : rowKeys) {
- Map<String,Object> row = table.getRow(rowKey);
- if (row == null)
- row = table.newRow(rowKey);
- for (Map.Entry<String,Object> entry: updateRowList.entrySet()) {
- row.put(entry.getKey(), entry.getValue());
- }
- }
- }
- }
-
- @Override
- protected void updateRowsImpl(String tableName, List<Map<String,Object>> updateRowList) {
- MemoryTable table = getTable(tableName, false);
- String primaryKeyName = getTablePrimaryKeyName(tableName);
- synchronized (table) {
- for (Map<String,Object> updateRow : updateRowList) {
- Object rowKey = updateRow.get(primaryKeyName);
- if (rowKey == null)
- throw new StorageException("Primary key not found.");
- Map<String,Object> row = table.getRow(rowKey);
- if (row == null)
- row = table.newRow(rowKey);
- for (Map.Entry<String,Object> entry: updateRow.entrySet()) {
- row.put(entry.getKey(), entry.getValue());
- }
- }
- }
- }
-
- @Override
- protected void deleteRowsImpl(String tableName, Set<Object> rowKeys) {
- MemoryTable table = getTable(tableName, false);
- synchronized (table) {
- for (Object rowKey : rowKeys) {
- table.deleteRow(rowKey);
- }
- }
- }
-
- @Override
- public void createTable(String tableName, Set<String> indexedColumnNames) {
- super.createTable(tableName, indexedColumnNames);
- getTable(tableName, true);
- }
-
-
- // IFloodlightModule methods
-
- @Override
- public void startUp(FloodlightModuleContext context) {
- super.startUp(context);
- executorService = new SynchronousExecutorService();
- }
-
- @Override
- public Map<Class<? extends IFloodlightService>,
- IFloodlightService> getServiceImpls() {
- Map<Class<? extends IFloodlightService>,
- IFloodlightService> m =
- new HashMap<Class<? extends IFloodlightService>,
- IFloodlightService>();
- m.put(IStorageSourceService.class, this);
- return m;
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/memory/MemoryTable.java b/src/main/java/net/floodlightcontroller/storage/memory/MemoryTable.java
deleted file mode 100644
index f87ee45..0000000
--- a/src/main/java/net/floodlightcontroller/storage/memory/MemoryTable.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage.memory;
-
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.TreeMap;
-
-public class MemoryTable {
-
- private String tableName;
- private Map<Object,Map<String,Object>> rowMap;
- private int nextId;
-
- MemoryTable(String tableName) {
- this.tableName = tableName;
- rowMap = new TreeMap<Object,Map<String,Object>>();
- nextId = 0;
- }
-
- String getTableName() {
- return tableName;
- }
-
- Collection<Map<String,Object>> getAllRows() {
- return rowMap.values();
- }
-
- Map<String,Object> getRow(Object key) {
- Map<String,Object> row = rowMap.get(key);
- return row;
- }
-
- // rkv: Do we still need this? Probably needs to be tweaked a bit
- // to work with the support for specifying which column to use as the
- // primary key
- Map<String,Object> newRow(Object key) {
- Map<String,Object> row = new HashMap<String, Object>();
- row.put("id", key);
- rowMap.put(key, row);
- return row;
- }
-
- void insertRow(Object key, Map<String,Object> rowValues) {
- assert(key != null);
- rowMap.put(key, rowValues);
- }
-
- void deleteRow(Object rowKey) {
- rowMap.remove(rowKey);
- }
-
- Integer getNextId() {
- return new Integer(++nextId);
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlQuery.java b/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlQuery.java
deleted file mode 100644
index 05f8fc7..0000000
--- a/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlQuery.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage.nosql;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import net.floodlightcontroller.storage.IPredicate;
-import net.floodlightcontroller.storage.IQuery;
-import net.floodlightcontroller.storage.RowOrdering;
-
-public class NoSqlQuery implements IQuery {
-
- private String tableName;
- private String[] columnNameList;
- private IPredicate predicate;
- private RowOrdering rowOrdering;
- private Map<String,Comparable<?>> parameterMap;
-
- NoSqlQuery(String className, String[] columnNameList, IPredicate predicate, RowOrdering rowOrdering) {
- this.tableName = className;
- this.columnNameList = columnNameList;
- this.predicate = predicate;
- this.rowOrdering = rowOrdering;
- }
-
- @Override
- public void setParameter(String name, Object value) {
- if (parameterMap == null)
- parameterMap = new HashMap<String,Comparable<?>>();
- parameterMap.put(name, (Comparable<?>)value);
- }
-
- @Override
- public String getTableName() {
- return tableName;
- }
-
- String[] getColumnNameList() {
- return columnNameList;
- }
-
- IPredicate getPredicate() {
- return predicate;
- }
-
- RowOrdering getRowOrdering() {
- return rowOrdering;
- }
-
- Comparable<?> getParameter(String name) {
- Comparable<?> value = null;
- if (parameterMap != null) {
- value = parameterMap.get(name);
- }
- return value;
- }
-
- Map<String,Comparable<?>> getParameterMap() {
- return parameterMap;
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlResultSet.java b/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlResultSet.java
deleted file mode 100644
index b3a8c20..0000000
--- a/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlResultSet.java
+++ /dev/null
@@ -1,487 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage.nosql;
-
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.Iterator;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.TimeZone;
-
-import net.floodlightcontroller.storage.IResultSet;
-import net.floodlightcontroller.storage.NullValueStorageException;
-import net.floodlightcontroller.storage.ResultSetIterator;
-import net.floodlightcontroller.storage.StorageException;
-import net.floodlightcontroller.storage.TypeMismatchStorageException;
-
-public class NoSqlResultSet implements IResultSet {
-
- NoSqlStorageSource storageSource;
- String tableName;
- String primaryKeyName;
- List<Map<String,Object>> rowList;
- int currentIndex;
- Map<String,Object> currentRowUpdate;
- List<Map<String,Object>> rowUpdateList;
- Set<Object> rowDeleteSet;
- Iterator<IResultSet> resultSetIterator;
-
- NoSqlResultSet(NoSqlStorageSource storageSource, String tableName, List<Map<String,Object>> rowList) {
- this.storageSource = storageSource;
- this.primaryKeyName = storageSource.getTablePrimaryKeyName(tableName);
- this.tableName = tableName;
- if (rowList == null)
- rowList = new ArrayList<Map<String,Object>>();
- this.rowList = rowList;
- currentIndex = -1;
- }
-
- void addRow(Map<String,Object> row) {
- rowList.add(row);
- }
-
- @Override
- public Map<String,Object> getRow() {
- if ((currentIndex < 0) || (currentIndex >= rowList.size())) {
- throw new StorageException("No current row in result set.");
- }
-
- return rowList.get(currentIndex);
- }
-
- @Override
- public boolean containsColumn(String columnName) {
- return getObject(columnName) != null;
- }
-
- @Override
- public void close() {
- }
-
- private void endCurrentRowUpdate() {
- if (currentRowUpdate != null) {
- if (rowUpdateList == null)
- rowUpdateList = new ArrayList<Map<String,Object>>();
- rowUpdateList.add(currentRowUpdate);
- currentRowUpdate = null;
- }
- }
-
- @Override
- public boolean next() {
- endCurrentRowUpdate();
- currentIndex++;
- return currentIndex < rowList.size();
- }
-
- @Override
- public void save() {
- endCurrentRowUpdate();
-
- if (rowUpdateList != null) {
- storageSource.updateRows(tableName, rowUpdateList);
- rowUpdateList = null;
- }
-
- if (rowDeleteSet != null) {
- storageSource.deleteRows(tableName, rowDeleteSet);
- rowDeleteSet = null;
- }
- }
-
- Object getObject(String columnName) {
- Map<String,Object> row = rowList.get(currentIndex);
- Object value = row.get(columnName);
- return value;
- }
-
- @Override
- public boolean getBoolean(String columnName) {
- Boolean b = getBooleanObject(columnName);
- if (b == null)
- throw new NullValueStorageException(columnName);
- return b.booleanValue();
- }
-
- @Override
- public byte getByte(String columnName) {
- Byte b = getByteObject(columnName);
- if (b == null)
- throw new NullValueStorageException(columnName);
- return b.byteValue();
- }
-
- @Override
- public byte[] getByteArray(String columnName) {
- byte[] b = null;
- Object obj = getObject(columnName);
- if (obj != null) {
- if (!(obj instanceof byte[]))
- throw new StorageException("Invalid byte array value");
- b = (byte[])obj;
- }
- return b;
- }
-
- @Override
- public double getDouble(String columnName) {
- Double d = getDoubleObject(columnName);
- if (d == null)
- throw new NullValueStorageException(columnName);
- return d.doubleValue();
- }
-
- @Override
- public float getFloat(String columnName) {
- Float f = getFloatObject(columnName);
- if (f == null)
- throw new NullValueStorageException(columnName);
- return f.floatValue();
- }
-
- @Override
- public int getInt(String columnName) {
- Integer i = getIntegerObject(columnName);
- if (i == null)
- throw new NullValueStorageException(columnName);
- return i.intValue();
- }
-
- @Override
- public long getLong(String columnName) {
- Long l = getLongObject(columnName);
- if (l == null)
- throw new NullValueStorageException(columnName);
- return l.longValue();
- }
-
- @Override
- public short getShort(String columnName) {
- Short s = getShortObject(columnName);
- if (s == null)
- throw new NullValueStorageException(columnName);
- return s.shortValue();
- }
-
- @Override
- public String getString(String columnName) {
- Object obj = getObject(columnName);
- if (obj == null)
- return null;
- return obj.toString();
- }
-
- @Override
- public Date getDate(String column) {
- Date d;
- Object obj = getObject(column);
- if (obj == null) {
- d = null;
- } else if (obj instanceof Date) {
- d = (Date) obj;
- } else {
- SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
- dateFormat.setTimeZone(TimeZone.getTimeZone("GMT"));
- try {
- d = dateFormat.parse(obj.toString());
- }
- catch (ParseException exc) {
- throw new TypeMismatchStorageException(Date.class.getName(), obj.getClass().getName(), column);
- }
- }
- return d;
- }
-
-
- @Override
- public Short getShortObject(String columnName)
- {
- Short s;
- Object obj = getObject(columnName);
- if (obj instanceof Short) {
- s = (Short)obj;
- } else if (obj != null) {
- try {
- s = Short.parseShort(obj.toString());
- }
- catch (NumberFormatException exc) {
- throw new TypeMismatchStorageException(Short.class.getName(), obj.getClass().getName(), columnName);
- }
- } else {
- s = null;
- }
- return s;
- }
-
- @Override
- public Integer getIntegerObject(String columnName)
- {
- Integer i;
- Object obj = getObject(columnName);
- if (obj instanceof Integer) {
- i = (Integer)obj;
- } else if (obj != null) {
- try {
- i = Integer.parseInt(obj.toString());
- }
- catch (NumberFormatException exc) {
- throw new TypeMismatchStorageException(Integer.class.getName(), obj.getClass().getName(), columnName);
- }
- } else {
- i = null;
- }
- return i;
- }
-
- @Override
- public Long getLongObject(String columnName)
- {
- Long l;
- Object obj = getObject(columnName);
- if (obj instanceof Long) {
- l = (Long)obj;
- } else if (obj != null) {
- try {
- l = Long.parseLong(obj.toString());
- }
- catch (NumberFormatException exc) {
- throw new TypeMismatchStorageException(Long.class.getName(), obj.getClass().getName(), columnName);
- }
- } else {
- l = null;
- }
- return l;
- }
-
- @Override
- public Float getFloatObject(String columnName)
- {
- Float f;
- Object obj = getObject(columnName);
- if (obj instanceof Float) {
- f = (Float)obj;
- } else if (obj != null) {
- try {
- f = Float.parseFloat(obj.toString());
- }
- catch (NumberFormatException exc) {
- throw new TypeMismatchStorageException(Float.class.getName(), obj.getClass().getName(), columnName);
- }
- } else {
- f = null;
- }
- return f;
- }
-
- @Override
- public Double getDoubleObject(String columnName)
- {
- Double d;
- Object obj = getObject(columnName);
- if (obj instanceof Double) {
- d = (Double)obj;
- } else if (obj != null) {
- try {
- d = Double.parseDouble(obj.toString());
- }
- catch (NumberFormatException exc) {
- throw new TypeMismatchStorageException(Double.class.getName(), obj.getClass().getName(), columnName);
- }
- } else {
- d = null;
- }
- return d;
- }
-
- @Override
- public Boolean getBooleanObject(String columnName)
- {
- Boolean b;
- Object obj = getObject(columnName);
- if (obj instanceof Boolean) {
- b = (Boolean)obj;
- } else if (obj != null) {
- try {
- b = Boolean.parseBoolean(obj.toString());
- }
- catch (NumberFormatException exc) {
- throw new TypeMismatchStorageException(Boolean.class.getName(), obj.getClass().getName(), columnName);
- }
- } else {
- b = null;
- }
- return b;
- }
-
- @Override
- public Byte getByteObject(String columnName)
- {
- Byte b;
- Object obj = getObject(columnName);
- if (obj instanceof Byte) {
- b = (Byte)obj;
- } else if (obj != null) {
- try {
- b = Byte.parseByte(obj.toString());
- }
- catch (NumberFormatException exc) {
- throw new TypeMismatchStorageException(Byte.class.getName(), obj.getClass().getName(), columnName);
- }
- } else {
- b = null;
- }
- return b;
- }
-
-
- @Override
- public boolean isNull(String columnName)
- {
- Object obj = getObject(columnName);
- return (obj == null);
- }
-
- private void addRowUpdate(String column, Object value) {
- if (currentRowUpdate == null) {
- currentRowUpdate = new HashMap<String,Object>();
- Object key = rowList.get(currentIndex).get(primaryKeyName);
- currentRowUpdate.put(primaryKeyName, key);
- }
- currentRowUpdate.put(column, value);
- }
-
- @Override
- public void setBoolean(String columnName, boolean value) {
- addRowUpdate(columnName, value);
- }
-
- @Override
- public void setByte(String columnName, byte value) {
- addRowUpdate(columnName, value);
- }
-
- @Override
- public void setByteArray(String columnName, byte[] byteArray) {
- addRowUpdate(columnName, byteArray);
- }
-
- @Override
- public void setDouble(String columnName, double value) {
- addRowUpdate(columnName, value);
- }
-
- @Override
- public void setFloat(String columnName, float value) {
- addRowUpdate(columnName, value);
- }
-
- @Override
- public void setInt(String columnName, int value) {
- addRowUpdate(columnName, value);
- }
-
- @Override
- public void setLong(String columnName, long value) {
- addRowUpdate(columnName, value);
- }
-
- @Override
- public void setShort(String columnName, short value) {
- addRowUpdate(columnName, value);
- }
-
- @Override
- public void setString(String columnName, String value) {
- addRowUpdate(columnName, value);
- }
-
- @Override
- public void setShortObject(String columnName, Short value)
- {
- addRowUpdate(columnName, value);
- }
-
- @Override
- public void setIntegerObject(String columnName, Integer value)
- {
- addRowUpdate(columnName, value);
- }
-
- @Override
- public void setLongObject(String columnName, Long value)
- {
- addRowUpdate(columnName, value);
- }
-
- @Override
- public void setFloatObject(String columnName, Float value)
- {
- addRowUpdate(columnName, value);
- }
-
- @Override
- public void setDoubleObject(String columnName, Double value)
- {
- addRowUpdate(columnName, value);
- }
-
- @Override
- public void setBooleanObject(String columnName, Boolean value)
- {
- addRowUpdate(columnName, value);
- }
-
- @Override
- public void setByteObject(String columnName, Byte value)
- {
- addRowUpdate(columnName, value);
- }
-
- @Override
- public void setDate(String column, Date value) {
- addRowUpdate(column, value);
- }
-
-
- public void setNull(String columnName)
- {
- addRowUpdate(columnName, null);
- }
-
-
- @Override
- public void deleteRow() {
- Object key = (String) rowList.get(currentIndex).get(primaryKeyName);
- if (rowDeleteSet == null)
- rowDeleteSet = new HashSet<Object>();
- rowDeleteSet.add(key);
- }
-
- @Override
- public Iterator<IResultSet> iterator() {
- if (resultSetIterator == null)
- resultSetIterator = new ResultSetIterator(this);
- return resultSetIterator;
- }
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlStorageSource.java b/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlStorageSource.java
deleted file mode 100644
index 6624932..0000000
--- a/src/main/java/net/floodlightcontroller/storage/nosql/NoSqlStorageSource.java
+++ /dev/null
@@ -1,823 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage.nosql;
-
-import java.lang.Class;
-
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TimeZone;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import net.floodlightcontroller.storage.AbstractStorageSource;
-import net.floodlightcontroller.storage.CompoundPredicate;
-import net.floodlightcontroller.storage.IPredicate;
-import net.floodlightcontroller.storage.IQuery;
-import net.floodlightcontroller.storage.IResultSet;
-import net.floodlightcontroller.storage.OperatorPredicate;
-import net.floodlightcontroller.storage.RowOrdering;
-import net.floodlightcontroller.storage.StorageException;
-import net.floodlightcontroller.storage.StorageSourceNotification;
-import net.floodlightcontroller.storage.TypeMismatchStorageException;
-
-public abstract class NoSqlStorageSource extends AbstractStorageSource {
- protected final static Logger log = LoggerFactory.getLogger(NoSqlStorageSource.class);
-
- public enum ColumnIndexMode { NOT_INDEXED, RANGE_INDEXED, EQUALITY_INDEXED };
-
- protected static final String DEFAULT_PRIMARY_KEY_NAME = "id";
-
- private Map<String,String> tablePrimaryKeyMap = new HashMap<String,String>();
- private Map<String, Map<String,ColumnIndexMode>> tableIndexedColumnMap =
- new HashMap<String,Map<String,ColumnIndexMode>>();
-
- abstract class NoSqlPredicate {
-
- public boolean incorporateComparison(String columnName,
- OperatorPredicate.Operator operator, Comparable<?> value,
- CompoundPredicate.Operator parentOperator) {
- return false;
- }
-
- public boolean canExecuteEfficiently() {
- return false;
- }
-
- public List<Map<String,Object>> execute(String[] columnNames) {
- assert(false);
- return null;
- }
-
- abstract public boolean matchesRow(Map<String,Object> row);
- }
-
- @SuppressWarnings({ "unchecked", "rawtypes" })
- class NoSqlRangePredicate extends NoSqlPredicate {
- NoSqlStorageSource storageSource;
- String tableName;
- String columnName;
- Comparable<?> startValue;
- boolean startInclusive;
- Comparable<?> endValue;
- boolean endInclusive;
-
- NoSqlRangePredicate(NoSqlStorageSource storageSource, String tableName,
- String columnName, Comparable<?> startValue, boolean startInclusive,
- Comparable<?> endValue, boolean endInclusive) {
- this.storageSource = storageSource;
- this.tableName = tableName;
- this.columnName = columnName;
- this.startValue = startValue;
- this.startInclusive = startInclusive;
- this.endValue = endValue;
- this.endInclusive = endInclusive;
- }
-
- public boolean incorporateComparison(String columnName,
- OperatorPredicate.Operator operator, Comparable<?> value,
- CompoundPredicate.Operator parentOperator) {
-
- assert(operator != null);
- assert(parentOperator != null);
-
- // Must be the same column to incorporate
- if (!this.columnName.equals(columnName))
- return false;
-
- // The only time we allow a null value is if it's an EQ operator.
- // In that case we can only incorporate if this predicate is also
- // a null equality predicate.
- if (value == null) {
- return ((operator == OperatorPredicate.Operator.EQ) &&
- (startValue == null) && (endValue == null) &&
- startInclusive && endInclusive);
- }
-
- // Don't incorporate parameterized values
- if (value instanceof String) {
- String s = (String)value;
- if (s.startsWith("?") && s.endsWith("?")) {
- return false;
- }
- }
-
- if (parentOperator == CompoundPredicate.Operator.AND) {
- switch (operator) {
- case EQ:
- if (matchesValue(value)) {
- startValue = endValue = value;
- startInclusive = endInclusive = true;
- return true;
- }
- break;
- case LT:
- if ((endValue == null) || (((Comparable)value).compareTo(endValue) <= 0)) {
- endValue = value;
- endInclusive = false;
- return true;
- }
- break;
- case LTE:
- if ((endValue == null) || (((Comparable)value).compareTo(endValue) < 0)) {
- endValue = value;
- endInclusive = true;
- return true;
- }
- break;
- case GT:
- if ((startValue == null) || (((Comparable)value).compareTo(startValue) >= 0)) {
- startValue = value;
- startInclusive = false;
- return true;
- }
- break;
- case GTE:
- if ((startValue == null) || (((Comparable)value).compareTo(startValue) > 0)) {
- startValue = value;
- startInclusive = true;
- return true;
- }
- break;
- }
- } else {
- switch (operator) {
- case EQ:
- if (matchesValue(value))
- return true;
- break;
- case LT:
- if ((endValue == null) || (((Comparable)value).compareTo(endValue) > 0)) {
- endValue = value;
- endInclusive = false;
- return true;
- }
- break;
- case LTE:
- if ((endValue == null) || (((Comparable)value).compareTo(endValue) >= 0)) {
- endValue = value;
- endInclusive = true;
- return true;
- }
- break;
- case GT:
- if ((startValue == null) || (((Comparable)value).compareTo(startValue) < 0)) {
- startValue = value;
- startInclusive = false;
- return true;
- }
- break;
- case GTE:
- if ((startValue == null) || (((Comparable)value).compareTo(startValue) <= 0)) {
- startValue = value;
- startInclusive = true;
- return true;
- }
- break;
- }
- }
-
- return false;
- }
-
- private boolean isEqualityRange() {
- return (startValue == endValue) && startInclusive && endInclusive;
- }
-
- public boolean canExecuteEfficiently() {
- ColumnIndexMode indexMode = storageSource.getColumnIndexMode(tableName, columnName);
- switch (indexMode) {
- case NOT_INDEXED:
- return false;
- case RANGE_INDEXED:
- return true;
- case EQUALITY_INDEXED:
- return isEqualityRange();
- }
- return true;
- }
-
- public List<Map<String,Object>> execute(String columnNameList[]) {
- List<Map<String,Object>> rowList;
- if (isEqualityRange())
- rowList = storageSource.executeEqualityQuery(tableName, columnNameList, columnName, startValue);
- else
- rowList = storageSource.executeRangeQuery(tableName, columnNameList, columnName,
- startValue, startInclusive, endValue, endInclusive);
-
- return rowList;
- }
-
- Comparable<?> coerceValue(Comparable<?> value, Class targetClass) {
-
- if (value == null)
- return null;
-
- if (value.getClass() == targetClass)
- return value;
-
- // FIXME: For now we convert by first converting the source value to a
- // string and then converting to the target type. This logic probably needs
- // another pass to make it more robust/optimized.
-
- String s = value.toString();
- Comparable<?> obj = null;
-
- try {
- if (targetClass == Integer.class) {
- obj = new Integer(s);
- } else if (targetClass == Long.class) {
- obj = new Long(s);
- } else if (targetClass == Short.class) {
- obj = new Short(s);
- } else if (targetClass == Boolean.class) {
- obj = new Boolean(s);
- } else if (targetClass == Float.class) {
- obj = new Float(s);
- } else if (targetClass == Double.class) {
- obj = new Double(s);
- } else if (targetClass == Byte.class) {
- obj = new Byte(s);
- } else if (targetClass == String.class) {
- obj = s;
- } else if (targetClass == Date.class) {
- SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
- dateFormat.setTimeZone(TimeZone.getTimeZone("GMT"));
- try {
- obj = dateFormat.parse(s);
- }
- catch (ParseException exc) {
- throw new TypeMismatchStorageException(Date.class.getName(), value.getClass().getName(), "???");
- }
- }
- }
- catch (Exception exc) {
- // Ignore the exception here. In this case obj will not be set, so we'll
- // throw the StorageException below when we check for a null obj.
- }
-
- if (obj == null)
- throw new StorageException("Column value could not be coerced to the correct type");
-
- return obj;
- }
-
- boolean matchesValue(Comparable<?> value) {
- boolean isNullEqPredicate = (startValue == null) && (endValue == null) && startInclusive && endInclusive;
- if (value == null)
- return isNullEqPredicate;
-
- if (isNullEqPredicate)
- return false;
-
- int result;
- Comparable<?> coercedValue;
- if (startValue != null) {
- coercedValue = coerceValue(value, startValue.getClass());
- result = ((Comparable)coercedValue).compareTo(startValue);
- if ((result < 0) || (!startInclusive && (result == 0)))
- return false;
- }
- if (endValue != null) {
- coercedValue = coerceValue(value, endValue.getClass());
- result = ((Comparable)coercedValue).compareTo(endValue);
- if ((result > 0) || (!endInclusive && (result == 0)))
- return false;
- }
- return true;
- }
-
- public boolean matchesRow(Map<String,Object> row) {
- Comparable value = (Comparable)row.get(columnName);
- return matchesValue(value);
- }
- }
-
- class NoSqlOperatorPredicate extends NoSqlPredicate {
-
- NoSqlStorageSource storageSource;
- String columnName;
- OperatorPredicate.Operator operator;
- Object value;
-
- NoSqlOperatorPredicate(NoSqlStorageSource storageSource, String columnName,
- OperatorPredicate.Operator operator, Object value) {
- this.storageSource = storageSource;
- this.columnName = columnName;
- this.operator = operator;
- this.value = value;
- }
-
- public boolean incorporateComparison(String columnName,
- OperatorPredicate.Operator operator, Comparable<?> value,
- CompoundPredicate.Operator parentOperator) {
- return false;
- }
-
- public boolean canExecuteEfficiently() {
- return false;
- }
-
- public List<Map<String,Object>> execute(String columnNames[]) {
- throw new StorageException("Unimplemented predicate.");
- }
-
- public boolean matchesRow(Map<String,Object> row) {
- return false;
- }
- }
-
- class NoSqlCompoundPredicate extends NoSqlPredicate {
-
- NoSqlStorageSource storageSource;
- CompoundPredicate.Operator operator;
- boolean negated;
- List<NoSqlPredicate> predicateList;
-
- NoSqlCompoundPredicate(NoSqlStorageSource storageSource, CompoundPredicate.Operator operator,
- boolean negated, List<NoSqlPredicate> predicateList) {
- this.storageSource = storageSource;
- this.operator = operator;
- this.negated = negated;
- this.predicateList = predicateList;
- }
-
- public boolean incorporateComparison(String columnName,
- OperatorPredicate.Operator operator, Comparable<?> value,
- CompoundPredicate.Operator parentOperator) {
- // It may be possible to incorporate other operator predicate into this one,
- // but it would need to take into account the negated attribute and I'd need
- // to think about it some more to make sure it was correct, so for now we just
- // disallow incorporation
- //if (parentOperator == this.operator) {
- // for (NoSqlPredicate predicate: predicateList) {
- // if (predicate.incorporateComparison(columnName, operator, value, parentOperator))
- // return true;
- // }
- //}
- return false;
- }
-
- public boolean canExecuteEfficiently() {
- if (operator == CompoundPredicate.Operator.AND) {
- for (NoSqlPredicate predicate: predicateList) {
- if (predicate.canExecuteEfficiently()) {
- return true;
- }
- }
- return false;
- } else {
- for (NoSqlPredicate predicate: predicateList) {
- if (!predicate.canExecuteEfficiently()) {
- return false;
- }
- }
- return true;
- }
- }
-
- @SuppressWarnings({ "unchecked", "rawtypes" })
- class RowComparator implements Comparator<Map<String,Object>> {
- private String primaryKeyName;
-
- public RowComparator(String primaryKeyName) {
- this.primaryKeyName = primaryKeyName;
- }
-
- public int compare(Map<String,Object> row1, Map<String,Object> row2) {
- Comparable key1 = (Comparable)row1.get(primaryKeyName);
- Comparable key2 = (Comparable)row2.get(primaryKeyName);
- return key1.compareTo(key2);
- }
-
- public boolean equals(Object obj) {
- if (!(obj instanceof RowComparator))
- return false;
- RowComparator rc = (RowComparator)obj;
- if (rc.primaryKeyName == null)
- return this.primaryKeyName == null;
- return rc.primaryKeyName.equals(this.primaryKeyName);
- }
- }
-
- @SuppressWarnings({ "unchecked", "rawtypes" })
- private List<Map<String,Object>> combineRowLists(String primaryKeyName,
- List<Map<String,Object>> list1, List<Map<String,Object>> list2,
- CompoundPredicate.Operator operator) {
- ArrayList<Map<String,Object>> combinedRowList = new ArrayList<Map<String,Object>>();
- RowComparator rc = new RowComparator(primaryKeyName);
- Collections.sort(list1, rc);
- Collections.sort(list2,rc);
-
- Iterator<Map<String,Object>> iterator1 = list1.iterator();
- Iterator<Map<String,Object>> iterator2 = list2.iterator();
- boolean update1 = true;
- boolean update2 = true;
- Map<String,Object> row1 = null;
- Map<String,Object> row2 = null;
- Comparable<?> key1 = null;
- Comparable<?> key2 = null;
-
- while (true) {
- if (update1) {
- if (iterator1.hasNext()) {
- row1 = iterator1.next();
- key1 = (Comparable<?>)row1.get(primaryKeyName);
- } else {
- row1 = null;
- }
- }
- if (update2) {
- if (iterator2.hasNext()) {
- row2 = iterator1.next();
- key2 = (Comparable<?>)row2.get(primaryKeyName);
- } else {
- row2 = null;
- }
- }
- if (operator == CompoundPredicate.Operator.AND) {
- if ((row1 == null) || (row2 == null))
- break;
- if (key1.equals(key2))
- combinedRowList.add(row1);
- } else {
- if (row1 == null) {
- if (row2 == null)
- break;
- combinedRowList.add(row2);
- } else if ((row2 == null) || (((Comparable)key1).compareTo(key2) <= 0)) {
- combinedRowList.add(row2);
- } else {
- combinedRowList.add(row1);
- }
- }
-
- update1 = (key2 == null) || (((Comparable)key1).compareTo(key2) <= 0);
- update2 = (key1 == null) || (((Comparable)key2).compareTo(key1) <= 0);
- }
-
- return combinedRowList;
- }
-
- public List<Map<String,Object>> execute(String columnNames[]) {
- List<Map<String,Object>> combinedRowList = null;
- for (NoSqlPredicate predicate: predicateList) {
- List<Map<String,Object>> rowList = predicate.execute(columnNames);
- if (combinedRowList != null) {
- combinedRowList = combineRowLists("id", combinedRowList, rowList, operator);
- } else {
- combinedRowList = rowList;
- }
- }
- return combinedRowList;
- }
-
- public boolean matchesRow(Map<String,Object> row) {
- if (operator == CompoundPredicate.Operator.AND) {
- for (NoSqlPredicate predicate : predicateList) {
- if (!predicate.matchesRow(row)) {
- return false;
- }
- }
- return true;
- } else {
- for (NoSqlPredicate predicate : predicateList) {
- if (predicate.matchesRow(row)) {
- return true;
- }
- }
- return false;
-
- }
- }
- }
-
- public NoSqlStorageSource() {
- super();
- }
-
- @Override
- public void createTable(String tableName, Set<String> indexedColumns) {
- super.createTable(tableName, indexedColumns);
- if (indexedColumns == null) return;
- for (String columnName : indexedColumns) {
- setColumnIndexMode(tableName, columnName,
- ColumnIndexMode.EQUALITY_INDEXED);
- }
- }
-
- public void setTablePrimaryKeyName(String tableName, String primaryKeyName) {
- if ((tableName == null) || (primaryKeyName == null))
- throw new NullPointerException();
- tablePrimaryKeyMap.put(tableName, primaryKeyName);
- }
-
- protected String getTablePrimaryKeyName(String tableName) {
- String primaryKeyName = tablePrimaryKeyMap.get(tableName);
- if (primaryKeyName == null)
- primaryKeyName = DEFAULT_PRIMARY_KEY_NAME;
- return primaryKeyName;
- }
-
- protected ColumnIndexMode getColumnIndexMode(String tableName, String columnName) {
- ColumnIndexMode columnIndexMode = null;
- Map<String, ColumnIndexMode> indexedColumnMap = tableIndexedColumnMap.get(tableName);
- if (indexedColumnMap != null)
- columnIndexMode = indexedColumnMap.get(columnName);
- if (columnIndexMode == null)
- return ColumnIndexMode.NOT_INDEXED;
- return columnIndexMode;
- }
-
- public void setColumnIndexMode(String tableName, String columnName, ColumnIndexMode indexMode) {
- Map<String, ColumnIndexMode> indexedColumnMap = tableIndexedColumnMap.get(tableName);
- if (indexedColumnMap == null) {
- indexedColumnMap = new HashMap<String,ColumnIndexMode>();
- tableIndexedColumnMap.put(tableName, indexedColumnMap);
- }
- indexedColumnMap.put(columnName, indexMode);
- }
-
- Comparable<?> getOperatorPredicateValue(OperatorPredicate predicate, Map<String,Comparable<?>> parameterMap) {
- Comparable<?> value = predicate.getValue();
- if (value instanceof String) {
- String stringValue = (String) value;
- if ((stringValue.charAt(0) == '?') && (stringValue.charAt(stringValue.length()-1) == '?')) {
- String parameterName = stringValue.substring(1,stringValue.length()-1);
- value = parameterMap.get(parameterName);
- }
- }
- return value;
- }
-
- NoSqlPredicate convertPredicate(IPredicate predicate, String tableName, Map<String,Comparable<?>> parameterMap) {
- if (predicate == null)
- return null;
- NoSqlPredicate convertedPredicate = null;
- if (predicate instanceof CompoundPredicate) {
- CompoundPredicate compoundPredicate = (CompoundPredicate)predicate;
- ArrayList<NoSqlPredicate> noSqlPredicateList = new ArrayList<NoSqlPredicate>();
- for (IPredicate childPredicate: compoundPredicate.getPredicateList()) {
- boolean incorporated = false;
- if (childPredicate instanceof OperatorPredicate) {
- OperatorPredicate childOperatorPredicate = (OperatorPredicate)childPredicate;
- for (NoSqlPredicate childNoSqlPredicate: noSqlPredicateList) {
- incorporated = childNoSqlPredicate.incorporateComparison(
- childOperatorPredicate.getColumnName(), childOperatorPredicate.getOperator(),
- getOperatorPredicateValue(childOperatorPredicate, parameterMap),
- compoundPredicate.getOperator());
- if (incorporated)
- break;
- }
- }
- if (!incorporated) {
- NoSqlPredicate noSqlPredicate = convertPredicate(childPredicate, tableName, parameterMap);
- noSqlPredicateList.add(noSqlPredicate);
- }
- }
- convertedPredicate = new NoSqlCompoundPredicate(this, compoundPredicate.getOperator(),
- compoundPredicate.isNegated(), noSqlPredicateList);
- } else if (predicate instanceof OperatorPredicate) {
- OperatorPredicate operatorPredicate = (OperatorPredicate) predicate;
- Comparable<?> value = getOperatorPredicateValue(operatorPredicate, parameterMap);
- switch (operatorPredicate.getOperator()) {
- case EQ:
- convertedPredicate = new NoSqlRangePredicate(this, tableName,
- operatorPredicate.getColumnName(), value, true, value, true);
- break;
- case LT:
- convertedPredicate = new NoSqlRangePredicate(this, tableName,
- operatorPredicate.getColumnName(), null, false, value, false);
- break;
- case LTE:
- convertedPredicate = new NoSqlRangePredicate(this, tableName,
- operatorPredicate.getColumnName(), null, false, value, true);
- break;
- case GT:
- convertedPredicate = new NoSqlRangePredicate(this, tableName,
- operatorPredicate.getColumnName(), value, false, null, false);
- break;
- case GTE:
- convertedPredicate = new NoSqlRangePredicate(this, tableName,
- operatorPredicate.getColumnName(), value, true, null, false);
- break;
- default:
- convertedPredicate = new NoSqlOperatorPredicate(this, operatorPredicate.getColumnName(),
- operatorPredicate.getOperator(), value);
- }
- } else {
- throw new StorageException("Unknown predicate type");
- }
-
- return convertedPredicate;
- }
-
- @SuppressWarnings({ "unchecked", "rawtypes" })
- class RowComparator implements Comparator<Map<String,Object>> {
- private RowOrdering rowOrdering;
-
- public RowComparator(RowOrdering rowOrdering) {
- this.rowOrdering = rowOrdering;
- }
-
- public int compare(Map<String,Object> row1, Map<String,Object> row2) {
- if (rowOrdering == null)
- return 0;
-
- for (RowOrdering.Item item: rowOrdering.getItemList()) {
- Comparable key1 = (Comparable)row1.get(item.getColumn());
- Comparable key2 = (Comparable)row2.get(item.getColumn());
- int result = key1.compareTo(key2);
- if (result != 0) {
- if (item.getDirection() == RowOrdering.Direction.DESCENDING)
- result = -result;
- return result;
- }
- }
-
- return 0;
- }
-
- public boolean equals(Object obj) {
- if (!(obj instanceof RowComparator))
- return false;
- RowComparator rc = (RowComparator)obj;
- if (rc.rowOrdering == null)
- return this.rowOrdering == null;
- return rc.rowOrdering.equals(this.rowOrdering);
- }
- }
-
- private NoSqlResultSet executeParameterizedQuery(String tableName, String[] columnNameList,
- IPredicate predicate, RowOrdering rowOrdering, Map<String,Comparable<?>> parameterMap) {
- NoSqlPredicate noSqlPredicate = convertPredicate(predicate, tableName, parameterMap);
- List<Map<String,Object>> rowList;
- if ((noSqlPredicate != null) && noSqlPredicate.canExecuteEfficiently()) {
- rowList = noSqlPredicate.execute(columnNameList);
- } else {
- rowList = new ArrayList<Map<String,Object>>();
- Collection<Map<String,Object>> allRowList = getAllRows(tableName, columnNameList);
- for (Map<String,Object> row: allRowList) {
- if ((noSqlPredicate == null) || noSqlPredicate.matchesRow(row)) {
- rowList.add(row);
- }
- }
- }
- if (rowOrdering != null)
- Collections.sort(rowList, new RowComparator(rowOrdering));
-
- return new NoSqlResultSet(this, tableName, rowList);
- }
-
- @Override
- public IQuery createQuery(String tableName, String[] columnNameList,
- IPredicate predicate, RowOrdering rowOrdering) {
- return new NoSqlQuery(tableName, columnNameList, predicate, rowOrdering);
- }
-
- @Override
- public IResultSet executeQueryImpl(IQuery query) {
- NoSqlQuery noSqlQuery = (NoSqlQuery) query;
- return executeParameterizedQuery(noSqlQuery.getTableName(),
- noSqlQuery.getColumnNameList(), noSqlQuery.getPredicate(),
- noSqlQuery.getRowOrdering(), noSqlQuery.getParameterMap());
- }
-
- protected void sendNotification(String tableName, StorageSourceNotification.Action action,
- List<Map<String,Object>> rows) {
- Set<Object> rowKeys = new HashSet<Object>();
- String primaryKeyName = getTablePrimaryKeyName(tableName);
- for (Map<String,Object> row : rows) {
- Object rowKey = row.get(primaryKeyName);
- rowKeys.add(rowKey);
- }
- StorageSourceNotification notification =
- new StorageSourceNotification(tableName, action, rowKeys);
- notifyListeners(notification);
- }
-
- protected void sendNotification(String tableName,
- StorageSourceNotification.Action action, Set<Object> rowKeys) {
- StorageSourceNotification notification =
- new StorageSourceNotification(tableName, action, rowKeys);
- notifyListeners(notification);
- }
-
- protected void insertRowsAndNotify(String tableName, List<Map<String,Object>> insertRowList) {
- insertRows(tableName, insertRowList);
- sendNotification(tableName, StorageSourceNotification.Action.MODIFY, insertRowList);
- }
-
- @Override
- public void insertRowImpl(String tableName, Map<String, Object> values) {
- ArrayList<Map<String,Object>> rowList = new ArrayList<Map<String,Object>>();
- rowList.add(values);
- insertRowsAndNotify(tableName, rowList);
- }
-
- protected void updateRowsAndNotify(String tableName, Set<Object> rowKeys, Map<String,Object> updateRowList) {
- updateRows(tableName, rowKeys, updateRowList);
- sendNotification(tableName, StorageSourceNotification.Action.MODIFY, rowKeys);
- }
-
- protected void updateRowsAndNotify(String tableName, List<Map<String,Object>> updateRowList) {
- updateRows(tableName, updateRowList);
- sendNotification(tableName, StorageSourceNotification.Action.MODIFY, updateRowList);
- }
-
- @Override
- public void updateMatchingRowsImpl(String tableName, IPredicate predicate, Map<String,Object> values) {
- String primaryKeyName = getTablePrimaryKeyName(tableName);
- String[] columnNameList = {primaryKeyName};
- IResultSet resultSet = executeQuery(tableName, columnNameList, predicate, null);
- Set<Object> rowKeys = new HashSet<Object>();
- while (resultSet.next()) {
- String rowKey = resultSet.getString(primaryKeyName);
- rowKeys.add(rowKey);
- }
- updateRowsAndNotify(tableName, rowKeys, values);
- }
-
- @Override
- public void updateRowImpl(String tableName, Object rowKey, Map<String,Object> values) {
- Map<String,Object> valuesWithKey = new HashMap<String,Object>(values);
- String primaryKeyName = getTablePrimaryKeyName(tableName);
- valuesWithKey.put(primaryKeyName, rowKey);
- List<Map<String,Object>> rowList = new ArrayList<Map<String,Object>>();
- rowList.add(valuesWithKey);
- updateRowsAndNotify(tableName, rowList);
- }
-
- @Override
- public void updateRowImpl(String tableName, Map<String,Object> values) {
- List<Map<String,Object>> rowKeys = new ArrayList<Map<String,Object>>();
- rowKeys.add(values);
- updateRowsAndNotify(tableName, rowKeys);
- }
-
- protected void deleteRowsAndNotify(String tableName, Set<Object> rowKeyList) {
- deleteRows(tableName, rowKeyList);
- sendNotification(tableName, StorageSourceNotification.Action.DELETE, rowKeyList);
- }
-
- @Override
- public void deleteRowImpl(String tableName, Object key) {
- HashSet<Object> keys = new HashSet<Object>();
- keys.add(key);
- deleteRowsAndNotify(tableName, keys);
- }
-
- @Override
- public IResultSet getRowImpl(String tableName, Object rowKey) {
- List<Map<String,Object>> rowList = new ArrayList<Map<String,Object>>();
- Map<String,Object> row = getRow(tableName, null, rowKey);
- if (row != null)
- rowList.add(row);
- NoSqlResultSet resultSet = new NoSqlResultSet(this, tableName, rowList);
- return resultSet;
- }
-
- // Below are the methods that must be implemented by the subclasses
-
- protected abstract Collection<Map<String,Object>> getAllRows(String tableName, String[] columnNameList);
-
- protected abstract Map<String,Object> getRow(String tableName, String[] columnNameList, Object rowKey);
-
- protected abstract List<Map<String,Object>> executeEqualityQuery(String tableName,
- String[] columnNameList, String predicateColumnName, Comparable<?> value);
-
- protected abstract List<Map<String,Object>> executeRangeQuery(String tableName,
- String[] columnNameList, String predicateColumnName,
- Comparable<?> startValue, boolean startInclusive, Comparable<?> endValue, boolean endInclusive);
-
- protected abstract void insertRows(String tableName, List<Map<String,Object>> insertRowList);
-
- protected abstract void updateRows(String tableName, Set<Object> rowKeys, Map<String,Object> updateColumnMap);
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/web/StorageNotifyResource.java b/src/main/java/net/floodlightcontroller/storage/web/StorageNotifyResource.java
deleted file mode 100644
index 081c7f9..0000000
--- a/src/main/java/net/floodlightcontroller/storage/web/StorageNotifyResource.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage.web;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import net.floodlightcontroller.storage.IStorageSourceService;
-import net.floodlightcontroller.storage.StorageSourceNotification;
-
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.type.TypeReference;
-import org.restlet.resource.Post;
-import org.restlet.resource.ServerResource;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class StorageNotifyResource extends ServerResource {
- protected final static Logger log = LoggerFactory.getLogger(StorageNotifyResource.class);
-
- @Post("json")
- public Map<String,Object> notify(String entity) throws Exception {
- List<StorageSourceNotification> notifications = null;
- ObjectMapper mapper = new ObjectMapper();
- notifications =
- mapper.readValue(entity,
- new TypeReference<List<StorageSourceNotification>>(){});
-
- IStorageSourceService storageSource =
- (IStorageSourceService)getContext().getAttributes().
- get(IStorageSourceService.class.getCanonicalName());
- storageSource.notifyListeners(notifications);
-
- HashMap<String, Object> model = new HashMap<String,Object>();
- model.put("output", "OK");
- return model;
- }
-
-}
diff --git a/src/main/java/net/floodlightcontroller/storage/web/StorageWebRoutable.java b/src/main/java/net/floodlightcontroller/storage/web/StorageWebRoutable.java
deleted file mode 100644
index 681847d..0000000
--- a/src/main/java/net/floodlightcontroller/storage/web/StorageWebRoutable.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage.web;
-
-import org.restlet.Context;
-import org.restlet.Restlet;
-import org.restlet.routing.Router;
-
-import net.floodlightcontroller.restserver.RestletRoutable;
-
-/**
- * Creates a router to handle the storage web URIs
- * @author readams
- *
- */
-public class StorageWebRoutable implements RestletRoutable {
-
- @Override
- public String basePath() {
- return "/wm/storage";
- }
-
- @Override
- public Restlet getRestlet(Context context) {
- Router router = new Router(context);
- router.attach("/notify/json", StorageNotifyResource.class);
- return router;
- }
-
-}
diff --git a/src/main/java/net/floodlightcontroller/topology/TopologyManager.java b/src/main/java/net/floodlightcontroller/topology/TopologyManager.java
index 3e624e7..103cc4d 100644
--- a/src/main/java/net/floodlightcontroller/topology/TopologyManager.java
+++ b/src/main/java/net/floodlightcontroller/topology/TopologyManager.java
@@ -16,10 +16,8 @@
import net.floodlightcontroller.core.FloodlightContext;
import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.IFloodlightProviderService.Role;
import net.floodlightcontroller.core.IOFMessageListener;
import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.core.IHAListener;
import net.floodlightcontroller.core.annotations.LogMessageCategory;
import net.floodlightcontroller.core.annotations.LogMessageDoc;
import net.floodlightcontroller.core.module.FloodlightModuleContext;
@@ -27,7 +25,6 @@
import net.floodlightcontroller.core.module.IFloodlightModule;
import net.floodlightcontroller.core.module.IFloodlightService;
import net.floodlightcontroller.core.util.SingletonTask;
-import net.floodlightcontroller.counter.ICounterStoreService;
import net.floodlightcontroller.packet.BSN;
import net.floodlightcontroller.packet.Ethernet;
import net.floodlightcontroller.packet.LLDP;
@@ -44,9 +41,9 @@
import org.openflow.protocol.OFPacketIn;
import org.openflow.protocol.OFPacketOut;
import org.openflow.protocol.OFPort;
+import org.openflow.protocol.OFType;
import org.openflow.protocol.action.OFAction;
import org.openflow.protocol.action.OFActionOutput;
-import org.openflow.protocol.OFType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -59,7 +56,7 @@
public class TopologyManager implements
IFloodlightModule, ITopologyService,
IRoutingService, ILinkDiscoveryListener,
- IOFMessageListener, IHAListener {
+ IOFMessageListener {
protected final static Logger log = LoggerFactory.getLogger(TopologyManager.class);
@@ -564,38 +561,6 @@
return Command.CONTINUE;
}
- // ***************
- // IHAListener
- // ***************
-
- @Override
- public void roleChanged(Role oldRole, Role newRole) {
- switch(newRole) {
- case MASTER:
- if (oldRole == Role.SLAVE) {
- log.debug("Re-computing topology due " +
- "to HA change from SLAVE->MASTER");
- newInstanceTask.reschedule(1, TimeUnit.MILLISECONDS);
- }
- break;
- case SLAVE:
- log.debug("Clearing topology due to " +
- "HA change to SLAVE");
- clearCurrentTopology();
- break;
- default:
- break;
- }
- }
-
- @Override
- public void controllerNodeIPsChanged(
- Map<String, String> curControllerNodeIPs,
- Map<String, String> addedControllerNodeIPs,
- Map<String, String> removedControllerNodeIPs) {
- // no-op
- }
-
// *****************
// IFloodlightModule
// *****************
@@ -630,7 +595,6 @@
l.add(ILinkDiscoveryService.class);
l.add(IThreadPoolService.class);
l.add(IFloodlightProviderService.class);
- l.add(ICounterStoreService.class);
l.add(IRestApiService.class);
return l;
}
@@ -661,7 +625,6 @@
newInstanceTask = new SingletonTask(ses, new UpdateTopologyWorker());
linkDiscovery.addListener(this);
floodlightProvider.addOFMessageListener(OFType.PACKET_IN, this);
- floodlightProvider.addHAListener(this);
addRestletRoutable();
}
diff --git a/src/main/java/net/floodlightcontroller/util/MACAddress.java b/src/main/java/net/floodlightcontroller/util/MACAddress.java
index b77d4cc..b143bda 100644
--- a/src/main/java/net/floodlightcontroller/util/MACAddress.java
+++ b/src/main/java/net/floodlightcontroller/util/MACAddress.java
@@ -1,5 +1,6 @@
package net.floodlightcontroller.util;
+import java.io.Serializable;
import java.util.Arrays;
import net.onrc.onos.ofcontroller.util.serializers.MACAddressDeserializer;
@@ -15,7 +16,8 @@
*/
@JsonDeserialize(using=MACAddressDeserializer.class)
@JsonSerialize(using=MACAddressSerializer.class)
-public class MACAddress {
+public class MACAddress implements Serializable{
+ private static final long serialVersionUID = 10000L;
public static final int MAC_ADDRESS_LENGTH = 6;
private byte[] address = new byte[MAC_ADDRESS_LENGTH];
diff --git a/src/main/java/net/onrc/onos/datagrid/IDatagridService.java b/src/main/java/net/onrc/onos/datagrid/IDatagridService.java
index 034fe25..0f03d77 100644
--- a/src/main/java/net/onrc/onos/datagrid/IDatagridService.java
+++ b/src/main/java/net/onrc/onos/datagrid/IDatagridService.java
@@ -170,5 +170,5 @@
* Send an ARP request to other ONOS instances
* @param arpRequest The request packet to send
*/
- public void sendArpRequest(ArpMessage arpMessage);
+ public void sendArpRequest(ArpMessage arpMessage);
}
diff --git a/src/main/java/net/onrc/onos/graph/GraphDBOperation.java b/src/main/java/net/onrc/onos/graph/GraphDBOperation.java
index 316cde6..03b4c96 100644
--- a/src/main/java/net/onrc/onos/graph/GraphDBOperation.java
+++ b/src/main/java/net/onrc/onos/graph/GraphDBOperation.java
@@ -176,9 +176,14 @@
*/
public IPortObject searchPort(String dpid, Short number) {
FramedGraph<TitanGraph> fg = conn.getFramedGraph();
+ if ( fg == null ) return null;
String id = dpid + number.toString();
- return (fg != null && fg.getVertices("port_id",id).iterator().hasNext()) ?
- fg.getVertices("port_id",id,IPortObject.class).iterator().next() : null;
+ Iterator<IPortObject> ports = fg.getVertices("port_id",id,IPortObject.class).iterator();
+ if ( ports.hasNext() ) {
+ return ports.next();
+ } else {
+ return null;
+ }
}
/**
@@ -206,10 +211,14 @@
* @param macAddr MAC address to search and get
*/
public IDeviceObject searchDevice(String macAddr) {
- // TODO Auto-generated method stub
- FramedGraph<TitanGraph> fg = conn.getFramedGraph();
- return (fg != null && fg.getVertices("dl_addr",macAddr).iterator().hasNext()) ?
- fg.getVertices("dl_addr",macAddr, IDeviceObject.class).iterator().next() : null;
+ FramedGraph<TitanGraph> fg = conn.getFramedGraph();
+ if ( fg == null ) return null;
+ Iterator<IDeviceObject> devices = fg.getVertices("dl_addr",macAddr, IDeviceObject.class).iterator();
+ if ( devices.hasNext() ) {
+ return devices.next();
+ } else {
+ return null;
+ }
}
/**
@@ -288,10 +297,13 @@
*/
public IFlowPath searchFlowPath(FlowId flowId) {
FramedGraph<TitanGraph> fg = conn.getFramedGraph();
-
- return fg.getVertices("flow_id", flowId.toString()).iterator().hasNext() ?
- fg.getVertices("flow_id", flowId.toString(),
- IFlowPath.class).iterator().next() : null;
+ if ( fg == null ) return null;
+ Iterator<IFlowPath> flowpaths = fg.getVertices("flow_id", flowId.toString(), IFlowPath.class).iterator();
+ if ( flowpaths.hasNext() ) {
+ return flowpaths.next();
+ } else {
+ return null;
+ }
}
/**
@@ -348,10 +360,13 @@
*/
public IFlowEntry searchFlowEntry(FlowEntryId flowEntryId) {
FramedGraph<TitanGraph> fg = conn.getFramedGraph();
-
- return fg.getVertices("flow_entry_id", flowEntryId.toString()).iterator().hasNext() ?
- fg.getVertices("flow_entry_id", flowEntryId.toString(),
- IFlowEntry.class).iterator().next() : null;
+ if ( fg == null ) return null;
+ Iterator<IFlowEntry> flowentries = fg.getVertices("flow_entry_id", flowEntryId.toString(), IFlowEntry.class).iterator();
+ if ( flowentries.hasNext() ) {
+ return flowentries.next();
+ } else {
+ return null;
+ }
}
/**
diff --git a/src/main/java/net/onrc/onos/ofcontroller/core/IDeviceStorage.java b/src/main/java/net/onrc/onos/ofcontroller/core/IDeviceStorage.java
index be495b9..13b9182 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/core/IDeviceStorage.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/core/IDeviceStorage.java
@@ -8,6 +8,7 @@
public IDeviceObject addDevice(IDevice device);
public IDeviceObject updateDevice(IDevice device);
public void removeDevice(IDevice device);
+ public void removeDevice(IDeviceObject deviceObject);
public IDeviceObject getDeviceByMac(String mac);
public IDeviceObject getDeviceByIP(int ipv4Address);
public void changeDeviceAttachments(IDevice device);
diff --git a/src/main/java/net/onrc/onos/ofcontroller/core/INetMapTopologyObjects.java b/src/main/java/net/onrc/onos/ofcontroller/core/INetMapTopologyObjects.java
index 256a98e..29c4377 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/core/INetMapTopologyObjects.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/core/INetMapTopologyObjects.java
@@ -1,7 +1,5 @@
package net.onrc.onos.ofcontroller.core;
-import net.onrc.onos.ofcontroller.flowmanager.web.DatapathSummarySerializer;
-
import org.codehaus.jackson.annotate.JsonIgnore;
import org.codehaus.jackson.annotate.JsonProperty;
import org.codehaus.jackson.map.annotate.JsonSerialize;
@@ -13,6 +11,9 @@
import com.tinkerpop.frames.annotations.gremlin.GremlinParam;
import com.tinkerpop.frames.VertexFrame;
+import net.onrc.onos.ofcontroller.flowmanager.web.DatapathSummarySerializer;
+import net.floodlightcontroller.core.web.serializers.IPv4Serializer;
+
/*
* This is the interfaces to make the objects for Cassandra DB.
* They are interfaces, but it is also implementation,
@@ -205,6 +206,7 @@
@JsonProperty("ipv4")
@Property("ipv4_address")
+ @JsonSerialize(using=IPv4Serializer.class)
public int getIpv4Address();
@Property("ipv4_address")
diff --git a/src/main/java/net/onrc/onos/ofcontroller/flowmanager/FlowEventHandler.java b/src/main/java/net/onrc/onos/ofcontroller/flowmanager/FlowEventHandler.java
index f49b10a..3538eb4 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/flowmanager/FlowEventHandler.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/flowmanager/FlowEventHandler.java
@@ -75,11 +75,14 @@
// Transient state for processing the Flow Paths:
// - The Flow Paths that should be recomputed
// - The Flow Paths with modified Flow Entries
+ // - The Flow Paths that we should check if installed in all switches
//
private Map<Long, FlowPath> shouldRecomputeFlowPaths =
new HashMap<Long, FlowPath>();
private Map<Long, FlowPath> modifiedFlowPaths =
new HashMap<Long, FlowPath>();
+ private Map<Long, FlowPath> checkIfInstalledFlowPaths =
+ new HashMap<Long, FlowPath>();
/**
* Constructor for a given Flow Manager and Datagrid Service.
@@ -239,6 +242,12 @@
for (FlowPath flowPath : modifiedFlowPaths.values())
flowPath.dataPath().removeDeletedFlowEntries();
+ //
+ // Check if Flow Paths have been installed into all switches,
+ // and generate the appropriate events.
+ //
+ checkInstalledFlowPaths(checkIfInstalledFlowPaths.values());
+
// Cleanup
topologyEvents.clear();
flowPathEvents.clear();
@@ -246,6 +255,44 @@
//
shouldRecomputeFlowPaths.clear();
modifiedFlowPaths.clear();
+ checkIfInstalledFlowPaths.clear();
+ }
+
+ /**
+ * Check if Flow Paths have been installed into all switches,
+ * and generate the appropriate events.
+ *
+ * @param flowPaths the flowPaths to process.
+ */
+ private void checkInstalledFlowPaths(Collection<FlowPath> flowPaths) {
+ List<FlowPath> installedFlowPaths = new LinkedList<FlowPath>();
+
+ Kryo kryo = kryoFactory.newKryo();
+
+ for (FlowPath flowPath : flowPaths) {
+ boolean isInstalled = true;
+
+ //
+ // Check whether all Flow Entries have been installed
+ //
+ for (FlowEntry flowEntry : flowPath.flowEntries()) {
+ if (flowEntry.flowEntrySwitchState() !=
+ FlowEntrySwitchState.FE_SWITCH_UPDATED) {
+ isInstalled = false;
+ break;
+ }
+ }
+
+ if (isInstalled) {
+ // Create a copy and add it to the list
+ FlowPath copyFlowPath = kryo.copy(flowPath);
+ installedFlowPaths.add(copyFlowPath);
+ }
+ }
+ kryoFactory.deleteKryo(kryo);
+
+ // Generate an event for the installed Flow Path.
+ flowManager.notificationFlowPathsInstalled(installedFlowPaths);
}
/**
@@ -529,10 +576,12 @@
}
//
- // Update the local Flow Entry.
+ // Update the local Flow Entry, and keep state to check
+ // if the Flow Path has been installed.
//
localFlowEntry.setFlowEntryUserState(flowEntry.flowEntryUserState());
localFlowEntry.setFlowEntrySwitchState(flowEntry.flowEntrySwitchState());
+ checkIfInstalledFlowPaths.put(flowPath.flowId().value(), flowPath);
return localFlowEntry;
}
diff --git a/src/main/java/net/onrc/onos/ofcontroller/flowmanager/FlowManager.java b/src/main/java/net/onrc/onos/ofcontroller/flowmanager/FlowManager.java
index bd498cc..84b175f 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/flowmanager/FlowManager.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/flowmanager/FlowManager.java
@@ -26,7 +26,18 @@
import net.onrc.onos.ofcontroller.flowprogrammer.IFlowPusherService;
import net.onrc.onos.ofcontroller.forwarding.IForwardingService;
import net.onrc.onos.ofcontroller.topology.Topology;
-import net.onrc.onos.ofcontroller.util.*;
+import net.onrc.onos.ofcontroller.util.Dpid;
+import net.onrc.onos.ofcontroller.util.FlowEntry;
+import net.onrc.onos.ofcontroller.util.FlowEntrySwitchState;
+import net.onrc.onos.ofcontroller.util.FlowEntryUserState;
+import net.onrc.onos.ofcontroller.util.FlowEntryId;
+import net.onrc.onos.ofcontroller.util.FlowId;
+import net.onrc.onos.ofcontroller.util.FlowPath;
+import net.onrc.onos.ofcontroller.util.FlowPathUserState;
+import net.onrc.onos.ofcontroller.util.Pair;
+import net.onrc.onos.ofcontroller.util.serializers.KryoFactory;
+
+import com.esotericsoftware.kryo2.Kryo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -45,7 +56,10 @@
protected FlowEventHandler flowEventHandler;
protected IFlowPusherService pusher;
-
+ protected IForwardingService forwardingService;
+
+ private KryoFactory kryoFactory = new KryoFactory();
+
// Flow Entry ID generation state
private static Random randomGenerator = new Random();
private static int nextFlowEntryIdPrefix = 0;
@@ -148,6 +162,7 @@
datagridService = context.getServiceImpl(IDatagridService.class);
restApi = context.getServiceImpl(IRestApiService.class);
pusher = context.getServiceImpl(IFlowPusherService.class);
+ forwardingService = context.getServiceImpl(IForwardingService.class);
this.init("");
}
@@ -412,6 +427,16 @@
}
/**
+ * Generate a notification that a collection of Flow Paths has been
+ * installed in the network.
+ *
+ * @param flowPaths the collection of installed Flow Paths.
+ */
+ void notificationFlowPathsInstalled(Collection<FlowPath> flowPaths) {
+ forwardingService.flowsInstalled(flowPaths);
+ }
+
+ /**
* Push modified Flow-related state as appropriate.
*
* @param modifiedFlowPaths the collection of modified Flow Paths.
@@ -575,11 +600,24 @@
*/
private void pushModifiedFlowPathsToDatabase(
Collection<FlowPath> modifiedFlowPaths) {
+ List<FlowPath> copiedFlowPaths = new LinkedList<FlowPath>();
+
+ //
+ // Create a copy of the Flow Paths to push, because the pushing
+ // itself will happen on a separate thread.
+ //
+ Kryo kryo = kryoFactory.newKryo();
+ for (FlowPath flowPath : modifiedFlowPaths) {
+ FlowPath copyFlowPath = kryo.copy(flowPath);
+ copiedFlowPaths.add(copyFlowPath);
+ }
+ kryoFactory.deleteKryo(kryo);
+
//
// We only add the Flow Paths to the Database Queue.
// The FlowDatabaseWriter thread is responsible for the actual writing.
//
- flowPathsToDatabaseQueue.addAll(modifiedFlowPaths);
+ flowPathsToDatabaseQueue.addAll(copiedFlowPaths);
}
/**
diff --git a/src/main/java/net/onrc/onos/ofcontroller/flowprogrammer/FlowSynchronizer.java b/src/main/java/net/onrc/onos/ofcontroller/flowprogrammer/FlowSynchronizer.java
index 64f6cac..6ef44be 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/flowprogrammer/FlowSynchronizer.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/flowprogrammer/FlowSynchronizer.java
@@ -123,6 +123,7 @@
graphEntryTime /= div;
extractTime /= div;
pushTime /= div;
+ totalTime /= div;
log.debug("Sync time (ms):" +
graphIDTime + "," +
switchTime + "," +
diff --git a/src/main/java/net/onrc/onos/ofcontroller/forwarding/Forwarding.java b/src/main/java/net/onrc/onos/ofcontroller/forwarding/Forwarding.java
index 1f50a98..430d05d 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/forwarding/Forwarding.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/forwarding/Forwarding.java
@@ -1,6 +1,6 @@
package net.onrc.onos.ofcontroller.forwarding;
-import java.io.IOException;
+import java.net.InetAddress;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
@@ -17,6 +17,7 @@
import net.floodlightcontroller.core.module.IFloodlightModule;
import net.floodlightcontroller.core.module.IFloodlightService;
import net.floodlightcontroller.packet.Ethernet;
+import net.floodlightcontroller.packet.IPv4;
import net.floodlightcontroller.util.MACAddress;
import net.onrc.onos.datagrid.IDatagridService;
import net.onrc.onos.ofcontroller.core.IDeviceStorage;
@@ -25,6 +26,8 @@
import net.onrc.onos.ofcontroller.core.INetMapTopologyObjects.ISwitchObject;
import net.onrc.onos.ofcontroller.core.internal.DeviceStorageImpl;
import net.onrc.onos.ofcontroller.flowmanager.IFlowService;
+import net.onrc.onos.ofcontroller.flowprogrammer.IFlowPusherService;
+import net.onrc.onos.ofcontroller.proxyarp.ArpMessage;
import net.onrc.onos.ofcontroller.topology.TopologyManager;
import net.onrc.onos.ofcontroller.util.CallerId;
import net.onrc.onos.ofcontroller.util.DataPath;
@@ -50,16 +53,19 @@
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
-import com.google.common.collect.Multimaps;
+import com.google.common.net.InetAddresses;
public class Forwarding implements IOFMessageListener, IFloodlightModule,
IForwardingService {
private final static Logger log = LoggerFactory.getLogger(Forwarding.class);
+ private final int IDLE_TIMEOUT = 5; // seconds
+ private final int HARD_TIMEOUT = 0; // seconds
+
private IFloodlightProviderService floodlightProvider;
private IFlowService flowService;
- @SuppressWarnings("unused")
- private IDatagridService datagridService;
+ private IFlowPusherService flowPusher;
+ private IDatagridService datagrid;
private IDeviceStorage deviceStorage;
private TopologyManager topologyService;
@@ -67,6 +73,8 @@
private Map<Path, Long> pendingFlows;
private Multimap<Long, PacketToPush> waitingPackets;
+ private final Object lock = new Object();
+
public class PacketToPush {
public final OFPacketOut packet;
public final long dpid;
@@ -130,22 +138,24 @@
new ArrayList<Class<? extends IFloodlightService>>();
dependencies.add(IFloodlightProviderService.class);
dependencies.add(IFlowService.class);
- dependencies.add(IDatagridService.class);
+ dependencies.add(IFlowPusherService.class);
return dependencies;
}
@Override
public void init(FloodlightModuleContext context) {
- this.floodlightProvider =
+ floodlightProvider =
context.getServiceImpl(IFloodlightProviderService.class);
- this.flowService = context.getServiceImpl(IFlowService.class);
- this.datagridService = context.getServiceImpl(IDatagridService.class);
+ flowService = context.getServiceImpl(IFlowService.class);
+ flowPusher = context.getServiceImpl(IFlowPusherService.class);
+ datagrid = context.getServiceImpl(IDatagridService.class);
floodlightProvider.addOFMessageListener(OFType.PACKET_IN, this);
pendingFlows = new ConcurrentHashMap<Path, Long>();
- waitingPackets = Multimaps.synchronizedSetMultimap(
- HashMultimap.<Long, PacketToPush>create());
+ //waitingPackets = Multimaps.synchronizedSetMultimap(
+ //HashMultimap.<Long, PacketToPush>create());
+ waitingPackets = HashMultimap.create();
deviceStorage = new DeviceStorageImpl();
deviceStorage.init("");
@@ -187,17 +197,45 @@
Ethernet eth = IFloodlightProviderService.bcStore.
get(cntx, IFloodlightProviderService.CONTEXT_PI_PAYLOAD);
- // We only want to handle unicast IPv4
- if (eth.isBroadcast() || eth.isMulticast() ||
- eth.getEtherType() != Ethernet.TYPE_IPv4) {
+ if (eth.getEtherType() != Ethernet.TYPE_IPv4) {
return Command.CONTINUE;
}
- handlePacketIn(sw, pi, eth);
+ if (eth.isBroadcast() || eth.isMulticast()) {
+ handleBroadcast(sw, pi, eth);
+ //return Command.CONTINUE;
+ }
+ else {
+ // Unicast
+ handlePacketIn(sw, pi, eth);
+ }
return Command.STOP;
}
+ private void handleBroadcast(IOFSwitch sw, OFPacketIn pi, Ethernet eth) {
+ if (log.isTraceEnabled()) {
+ log.trace("Sending broadcast packet to other ONOS instances");
+ }
+
+ IPv4 ipv4Packet = (IPv4) eth.getPayload();
+
+ // TODO We'll put the destination address here, because the current
+ // architecture needs an address. Addresses are only used for replies
+ // however, which don't apply to non-ARP packets. The ArpMessage class
+ // has become a bit too overloaded and should be refactored to
+ // handle all use cases nicely.
+ InetAddress targetAddress =
+ InetAddresses.fromInteger(ipv4Packet.getDestinationAddress());
+
+ // Piggy-back on the ARP mechanism to broadcast this packet out the
+ // edge. Luckily the ARP module doesn't check that the packet is
+ // actually ARP before broadcasting, so we can trick it into sending
+ // our non-ARP packets.
+ // TODO This should be refactored later to account for the new use case.
+ datagrid.sendArpRequest(ArpMessage.newRequest(targetAddress, eth.serialize()));
+ }
+
private void handlePacketIn(IOFSwitch sw, OFPacketIn pi, Ethernet eth) {
String destinationMac =
HexString.toHexString(eth.getDestinationMACAddress());
@@ -231,84 +269,87 @@
MACAddress dstMacAddress = MACAddress.valueOf(eth.getDestinationMACAddress());
- DataPath datapath = new DataPath();
- datapath.setSrcPort(srcSwitchPort);
- datapath.setDstPort(dstSwitchPort);
-
-
+ FlowPath flowPath, reverseFlowPath;
Path pathspec = new Path(srcSwitchPort, dstSwitchPort);
// TODO check concurrency
- Long existingFlowId = pendingFlows.get(pathspec);
-
- if (existingFlowId != null) {
- log.debug("Found existing flow {}",
- HexString.toHexString(existingFlowId));
+ synchronized (lock) {
+ Long existingFlowId = pendingFlows.get(pathspec);
- // TODO do stuff.
- OFPacketOut po = constructPacketOut(datapath, pi, sw);
- waitingPackets.put(existingFlowId, new PacketToPush(po, sw.getId()));
- return;
+ if (existingFlowId != null) {
+ log.debug("Found existing flow {}",
+ HexString.toHexString(existingFlowId));
+
+ OFPacketOut po = constructPacketOut(pi, sw);
+ waitingPackets.put(existingFlowId, new PacketToPush(po, sw.getId()));
+ return;
+ }
+
+ log.debug("Adding new flow between {} at {} and {} at {}",
+ new Object[]{srcMacAddress, srcSwitchPort, dstMacAddress, dstSwitchPort});
+
+
+ CallerId callerId = new CallerId("Forwarding");
+
+ DataPath datapath = new DataPath();
+ datapath.setSrcPort(srcSwitchPort);
+ datapath.setDstPort(dstSwitchPort);
+
+ flowPath = new FlowPath();
+ flowPath.setInstallerId(callerId);
+
+ flowPath.setFlowPathType(FlowPathType.FP_TYPE_SHORTEST_PATH);
+ flowPath.setFlowPathUserState(FlowPathUserState.FP_USER_ADD);
+ flowPath.setFlowEntryMatch(new FlowEntryMatch());
+ flowPath.setIdleTimeout(IDLE_TIMEOUT);
+ flowPath.setHardTimeout(HARD_TIMEOUT);
+ flowPath.flowEntryMatch().enableSrcMac(srcMacAddress);
+ flowPath.flowEntryMatch().enableDstMac(dstMacAddress);
+ flowPath.flowEntryMatch().enableEthernetFrameType(Ethernet.TYPE_IPv4);
+ flowPath.setDataPath(datapath);
+
+
+ DataPath reverseDataPath = new DataPath();
+ // Reverse the ports for the reverse path
+ reverseDataPath.setSrcPort(dstSwitchPort);
+ reverseDataPath.setDstPort(srcSwitchPort);
+
+ // TODO implement copy constructor for FlowPath
+ reverseFlowPath = new FlowPath();
+ reverseFlowPath.setInstallerId(callerId);
+ reverseFlowPath.setFlowPathType(FlowPathType.FP_TYPE_SHORTEST_PATH);
+ reverseFlowPath.setFlowPathUserState(FlowPathUserState.FP_USER_ADD);
+ reverseFlowPath.setIdleTimeout(IDLE_TIMEOUT);
+ reverseFlowPath.setHardTimeout(HARD_TIMEOUT);
+ reverseFlowPath.setFlowEntryMatch(new FlowEntryMatch());
+ // Reverse the MAC addresses for the reverse path
+ reverseFlowPath.flowEntryMatch().enableSrcMac(dstMacAddress);
+ reverseFlowPath.flowEntryMatch().enableDstMac(srcMacAddress);
+ reverseFlowPath.flowEntryMatch().enableEthernetFrameType(Ethernet.TYPE_IPv4);
+ reverseFlowPath.setDataPath(reverseDataPath);
+ reverseFlowPath.dataPath().srcPort().dpid().toString();
+
+ // TODO what happens if no path exists? cleanup
+
+ FlowId flowId = new FlowId(flowService.getNextFlowEntryId());
+ FlowId reverseFlowId = new FlowId(flowService.getNextFlowEntryId());
+
+ flowPath.setFlowId(flowId);
+ reverseFlowPath.setFlowId(reverseFlowId);
+
+ OFPacketOut po = constructPacketOut(pi, sw);
+ Path reversePathSpec = new Path(dstSwitchPort, srcSwitchPort);
+
+ // Add to waiting lists
+ pendingFlows.put(pathspec, flowId.value());
+ pendingFlows.put(reversePathSpec, reverseFlowId.value());
+ waitingPackets.put(flowId.value(), new PacketToPush(po, sw.getId()));
+
}
-
- log.debug("Adding new flow between {} at {} and {} at {}",
- new Object[]{srcMacAddress, srcSwitchPort, dstMacAddress, dstSwitchPort});
-
-
- CallerId callerId = new CallerId("Forwarding");
-
- //FlowId flowId = new FlowId(flowService.getNextFlowEntryId());
- FlowPath flowPath = new FlowPath();
- //flowPath.setFlowId(flowId);
- flowPath.setInstallerId(callerId);
-
- flowPath.setFlowPathType(FlowPathType.FP_TYPE_SHORTEST_PATH);
- flowPath.setFlowPathUserState(FlowPathUserState.FP_USER_ADD);
- flowPath.setFlowEntryMatch(new FlowEntryMatch());
- flowPath.flowEntryMatch().enableSrcMac(srcMacAddress);
- flowPath.flowEntryMatch().enableDstMac(dstMacAddress);
- // For now just forward IPv4 packets. This prevents accidentally
- // forwarding other stuff like ARP.
- flowPath.flowEntryMatch().enableEthernetFrameType(Ethernet.TYPE_IPv4);
- flowPath.setDataPath(datapath);
-
- DataPath reverseDataPath = new DataPath();
- // Reverse the ports for the reverse path
- reverseDataPath.setSrcPort(dstSwitchPort);
- reverseDataPath.setDstPort(srcSwitchPort);
-
- // TODO implement copy constructor for FlowPath
- FlowPath reverseFlowPath = new FlowPath();
- //reverseFlowPath.setFlowId(reverseFlowId);
- reverseFlowPath.setInstallerId(callerId);
- reverseFlowPath.setFlowPathType(FlowPathType.FP_TYPE_SHORTEST_PATH);
- reverseFlowPath.setFlowPathUserState(FlowPathUserState.FP_USER_ADD);
- reverseFlowPath.setFlowEntryMatch(new FlowEntryMatch());
- // Reverse the MAC addresses for the reverse path
- reverseFlowPath.flowEntryMatch().enableSrcMac(dstMacAddress);
- reverseFlowPath.flowEntryMatch().enableDstMac(srcMacAddress);
- reverseFlowPath.flowEntryMatch().enableEthernetFrameType(Ethernet.TYPE_IPv4);
- reverseFlowPath.setDataPath(reverseDataPath);
- reverseFlowPath.dataPath().srcPort().dpid().toString();
-
- // TODO what happens if no path exists?
- FlowId flowId = new FlowId(flowService.getNextFlowEntryId());
- FlowId reverseFlowId = new FlowId(flowService.getNextFlowEntryId());
-
- flowPath.setFlowId(flowId);
- reverseFlowPath.setFlowId(reverseFlowId);
-
- OFPacketOut po = constructPacketOut(datapath, pi, sw);
- Path reversePathSpec = new Path(dstSwitchPort, srcSwitchPort);
-
- // Add to waiting lists
- pendingFlows.put(pathspec, flowId.value());
- pendingFlows.put(reversePathSpec, reverseFlowId.value());
- waitingPackets.put(flowId.value(), new PacketToPush(po, sw.getId()));
-
flowService.addFlow(reverseFlowPath);
flowService.addFlow(flowPath);
+
}
/*
@@ -339,11 +380,7 @@
}
*/
- private OFPacketOut constructPacketOut(DataPath datapath, OFPacketIn pi,
- IOFSwitch sw) {
- //List<OFAction> actions = new ArrayList<OFAction>(1);
- //actions.add(new OFActionOutput(port));
-
+ private OFPacketOut constructPacketOut(OFPacketIn pi, IOFSwitch sw) {
OFPacketOut po = new OFPacketOut();
po.setInPort(OFPort.OFPP_NONE)
.setInPort(pi.getInPort())
@@ -361,18 +398,29 @@
return po;
}
-
+
@Override
- public void flowInstalled(FlowPath installedFlowPath) {
+ public void flowsInstalled(Collection<FlowPath> installedFlowPaths) {
+ for (FlowPath flowPath : installedFlowPaths) {
+ flowInstalled(flowPath);
+ }
+ }
+
+ private void flowInstalled(FlowPath installedFlowPath) {
// TODO check concurrency
// will need to sync and access both collections at once.
long flowId = installedFlowPath.flowId().value();
- Collection<PacketToPush> packets = waitingPackets.removeAll(flowId);
- //remove pending flows entry
- Path pathToRemove = new Path(installedFlowPath.dataPath().srcPort(),
- installedFlowPath.dataPath().dstPort());
- pendingFlows.remove(pathToRemove);
+ Collection<PacketToPush> packets;
+ synchronized (lock) {
+ packets = waitingPackets.removeAll(flowId);
+
+ //remove pending flows entry
+ Path pathToRemove = new Path(installedFlowPath.dataPath().srcPort(),
+ installedFlowPath.dataPath().dstPort());
+ pendingFlows.remove(pathToRemove);
+
+ }
for (PacketToPush packet : packets) {
IOFSwitch sw = floodlightProvider.getSwitches().get(packet.dpid);
@@ -385,13 +433,7 @@
(po.getActionsLength() + OFActionOutput.MINIMUM_LENGTH));
po.setLengthU(po.getLengthU() + OFActionOutput.MINIMUM_LENGTH);
- try {
- sw.write(packet.packet, null);
- sw.flush();
- } catch (IOException e) {
- log.error("Error writing packet out to switch {}:",
- sw.getId(), e);
- }
+ flowPusher.add(sw, po);
}
}
}
diff --git a/src/main/java/net/onrc/onos/ofcontroller/forwarding/IForwardingService.java b/src/main/java/net/onrc/onos/ofcontroller/forwarding/IForwardingService.java
index 07f6733..e5bd714 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/forwarding/IForwardingService.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/forwarding/IForwardingService.java
@@ -1,5 +1,7 @@
package net.onrc.onos.ofcontroller.forwarding;
+import java.util.Collection;
+
import net.floodlightcontroller.core.module.IFloodlightService;
import net.onrc.onos.ofcontroller.util.FlowPath;
@@ -13,9 +15,11 @@
*/
public interface IForwardingService extends IFloodlightService {
/**
- * Notify the Forwarding module that a flow has been installed
- * in the network.
- * @param flowPath The FlowPath object describing the installed flow
+ * Notify the Forwarding module that a collection of flows has been
+ * installed in the network.
+ *
+ * @param installedFlowPaths the collection of FlowPaths that have
+ * been installed in the network.
*/
- public void flowInstalled(FlowPath flowPath);
+ public void flowsInstalled(Collection<FlowPath> installedFlowPaths);
}
diff --git a/src/main/java/net/onrc/onos/ofcontroller/linkdiscovery/internal/LinkDiscoveryManager.java b/src/main/java/net/onrc/onos/ofcontroller/linkdiscovery/internal/LinkDiscoveryManager.java
index c03b266..8077201 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/linkdiscovery/internal/LinkDiscoveryManager.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/linkdiscovery/internal/LinkDiscoveryManager.java
@@ -18,7 +18,6 @@
package net.onrc.onos.ofcontroller.linkdiscovery.internal;
import java.io.IOException;
-
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.NetworkInterface;
@@ -41,9 +40,6 @@
import net.floodlightcontroller.core.FloodlightContext;
import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.IFloodlightProviderService.Role;
-import net.floodlightcontroller.core.IHAListener;
-import net.floodlightcontroller.core.IInfoProvider;
import net.floodlightcontroller.core.IOFMessageListener;
import net.floodlightcontroller.core.IOFSwitch;
import net.floodlightcontroller.core.IOFSwitchListener;
@@ -63,24 +59,17 @@
import net.floodlightcontroller.packet.LLDPTLV;
import net.floodlightcontroller.restserver.IRestApiService;
import net.floodlightcontroller.routing.Link;
-import net.floodlightcontroller.storage.IResultSet;
-import net.floodlightcontroller.storage.IStorageSourceListener;
-import net.floodlightcontroller.storage.IStorageSourceService;
-import net.floodlightcontroller.storage.OperatorPredicate;
-import net.floodlightcontroller.storage.StorageException;
import net.floodlightcontroller.threadpool.IThreadPoolService;
import net.floodlightcontroller.topology.NodePortTuple;
import net.floodlightcontroller.util.EventHistory;
import net.floodlightcontroller.util.EventHistory.EvAction;
import net.onrc.onos.ofcontroller.core.IOnosRemoteSwitch;
import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscovery;
+import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscovery.LDUpdate;
+import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscovery.UpdateOperation;
import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscoveryListener;
import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscoveryService;
import net.onrc.onos.ofcontroller.linkdiscovery.LinkInfo;
-import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscovery.LDUpdate;
-import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscovery.LinkType;
-import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscovery.SwitchType;
-import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscovery.UpdateOperation;
import net.onrc.onos.ofcontroller.linkdiscovery.web.LinkDiscoveryWebRoutable;
import net.onrc.onos.registry.controller.IControllerRegistryService;
@@ -124,27 +113,11 @@
@LogMessageCategory("Network Topology")
public class LinkDiscoveryManager
implements IOFMessageListener, IOFSwitchListener,
-IStorageSourceListener, ILinkDiscoveryService,
-IFloodlightModule, IInfoProvider, IHAListener {
+ILinkDiscoveryService, IFloodlightModule {
protected IFloodlightProviderService controller;
protected final static Logger log = LoggerFactory.getLogger(LinkDiscoveryManager.class);
- // Names of table/fields for links in the storage API
- private static final String LINK_TABLE_NAME = "controller_link";
- private static final String LINK_ID = "id";
- private static final String LINK_SRC_SWITCH = "src_switch_id";
- private static final String LINK_SRC_PORT = "src_port";
- private static final String LINK_SRC_PORT_STATE = "src_port_state";
- private static final String LINK_DST_SWITCH = "dst_switch_id";
- private static final String LINK_DST_PORT = "dst_port";
- private static final String LINK_DST_PORT_STATE = "dst_port_state";
- private static final String LINK_VALID_TIME = "valid_time";
- private static final String LINK_TYPE = "link_type";
- private static final String SWITCH_CONFIG_TABLE_NAME = "controller_switchconfig";
- private static final String SWITCH_CONFIG_CORE_SWITCH = "core_switch";
-
protected IFloodlightProviderService floodlightProvider;
- protected IStorageSourceService storageSource;
protected IThreadPoolService threadPool;
protected IRestApiService restApi;
// Registry Service for ONOS
@@ -246,26 +219,24 @@
recommendation=LogMessageDoc.GENERIC_ACTION)
@Override
public void dispatch() {
- if (linkDiscoveryAware != null) {
- if (log.isTraceEnabled()) {
- log.trace("Dispatching link discovery update {} {} {} {} {} for {}",
- new Object[]{this.getOperation(),
- HexString.toHexString(this.getSrc()), this.getSrcPort(),
- HexString.toHexString(this.getDst()), this.getDstPort(),
- linkDiscoveryAware});
- }
- try {
- for (ILinkDiscoveryListener lda : linkDiscoveryAware) { // order maintained
- lda.linkDiscoveryUpdate(this);
- }
- }
- catch (Exception e) {
- log.error("Error in link discovery updates loop", e);
- }
- }
-
+ if (linkDiscoveryAware != null) {
+ if (log.isTraceEnabled()) {
+ log.trace("Dispatching link discovery update {} {} {} {} {} for {}",
+ new Object[]{this.getOperation(),
+ HexString.toHexString(this.getSrc()), this.getSrcPort(),
+ HexString.toHexString(this.getDst()), this.getDstPort(),
+ linkDiscoveryAware});
+ }
+ try {
+ for (ILinkDiscoveryListener lda : linkDiscoveryAware) { // order maintained
+ lda.linkDiscoveryUpdate(this);
+ }
+ }
+ catch (Exception e) {
+ log.error("Error in link discovery updates loop", e);
+ }
+ }
}
-
}
/**
@@ -1063,8 +1034,6 @@
// Add to portNOFLinks if the unicast valid time is null
if (newInfo.getUnicastValidTime() == null)
addLinkToBroadcastDomain(lt);
-
- writeLinkToStorage(lt, newInfo);
// ONOS: Distinguish added event separately from updated event
updateOperation = UpdateOperation.LINK_ADDED;
@@ -1118,11 +1087,6 @@
oldInfo.getDstPortState().intValue())
linkChanged = true;
- // Write changes to storage. This will always write the updated
- // valid time, plus the port states if they've changed (i.e. if
- // they weren't set to null in the previous block of code.
- writeLinkToStorage(lt, newInfo);
-
if (linkChanged) {
updateOperation = getUpdateOperation(newInfo.getSrcPortState(),
newInfo.getDstPortState());
@@ -1209,9 +1173,6 @@
ILinkDiscovery.LinkType.INVALID_LINK,
EvAction.LINK_DELETED, reason);
- // remove link from storage.
- removeLinkFromStorage(lt);
-
// TODO Whenever link is removed, it has to checked if
// the switchports must be added to quarantine.
@@ -1304,7 +1265,7 @@
getLinkType(lt, linkInfo),
operation));
controller.publishUpdate(update);
- writeLinkToStorage(lt, linkInfo);
+
linkInfoChanged = true;
}
}
@@ -1578,123 +1539,6 @@
}
}
- // STORAGE METHODS
- /**
- * Deletes all links from storage
- */
- void clearAllLinks() {
- storageSource.deleteRowsAsync(LINK_TABLE_NAME, null);
- }
-
- /**
- * Gets the storage key for a LinkTuple
- * @param lt The LinkTuple to get
- * @return The storage key as a String
- */
- private String getLinkId(Link lt) {
- return HexString.toHexString(lt.getSrc()) +
- "-" + lt.getSrcPort() + "-" +
- HexString.toHexString(lt.getDst())+
- "-" + lt.getDstPort();
- }
-
- /**
- * Writes a LinkTuple and corresponding LinkInfo to storage
- * @param lt The LinkTuple to write
- * @param linkInfo The LinkInfo to write
- */
- protected void writeLinkToStorage(Link lt, LinkInfo linkInfo) {
- LinkType type = getLinkType(lt, linkInfo);
-
- // Write only direct links. Do not write links to external
- // L2 network.
- // if (type != LinkType.DIRECT_LINK && type != LinkType.TUNNEL) {
- // return;
- // }
-
- Map<String, Object> rowValues = new HashMap<String, Object>();
- String id = getLinkId(lt);
- rowValues.put(LINK_ID, id);
- rowValues.put(LINK_VALID_TIME, linkInfo.getUnicastValidTime());
- String srcDpid = HexString.toHexString(lt.getSrc());
- rowValues.put(LINK_SRC_SWITCH, srcDpid);
- rowValues.put(LINK_SRC_PORT, lt.getSrcPort());
-
- if (type == LinkType.DIRECT_LINK)
- rowValues.put(LINK_TYPE, "internal");
- else if (type == LinkType.MULTIHOP_LINK)
- rowValues.put(LINK_TYPE, "external");
- else if (type == LinkType.TUNNEL)
- rowValues.put(LINK_TYPE, "tunnel");
- else rowValues.put(LINK_TYPE, "invalid");
-
- if (linkInfo.linkStpBlocked()) {
- if (log.isTraceEnabled()) {
- log.trace("writeLink, link {}, info {}, srcPortState Blocked",
- lt, linkInfo);
- }
- rowValues.put(LINK_SRC_PORT_STATE,
- OFPhysicalPort.OFPortState.OFPPS_STP_BLOCK.getValue());
- } else {
- if (log.isTraceEnabled()) {
- log.trace("writeLink, link {}, info {}, srcPortState {}",
- new Object[]{ lt, linkInfo, linkInfo.getSrcPortState() });
- }
- rowValues.put(LINK_SRC_PORT_STATE, linkInfo.getSrcPortState());
- }
- String dstDpid = HexString.toHexString(lt.getDst());
- rowValues.put(LINK_DST_SWITCH, dstDpid);
- rowValues.put(LINK_DST_PORT, lt.getDstPort());
- if (linkInfo.linkStpBlocked()) {
- if (log.isTraceEnabled()) {
- log.trace("writeLink, link {}, info {}, dstPortState Blocked",
- lt, linkInfo);
- }
- rowValues.put(LINK_DST_PORT_STATE,
- OFPhysicalPort.OFPortState.OFPPS_STP_BLOCK.getValue());
- } else {
- if (log.isTraceEnabled()) {
- log.trace("writeLink, link {}, info {}, dstPortState {}",
- new Object[]{ lt, linkInfo, linkInfo.getDstPortState() });
- }
- rowValues.put(LINK_DST_PORT_STATE, linkInfo.getDstPortState());
- }
- storageSource.updateRowAsync(LINK_TABLE_NAME, rowValues);
- }
-
- public Long readLinkValidTime(Link lt) {
- // FIXME: We're not currently using this right now, but if we start
- // to use this again, we probably shouldn't use it in its current
- // form, because it's doing synchronous storage calls. Depending
- // on the context this may still be OK, but if it's being called
- // on the packet in processing thread it should be reworked to
- // use asynchronous storage calls.
- Long validTime = null;
- IResultSet resultSet = null;
- try {
- String[] columns = { LINK_VALID_TIME };
- String id = getLinkId(lt);
- resultSet = storageSource.executeQuery(LINK_TABLE_NAME, columns,
- new OperatorPredicate(LINK_ID, OperatorPredicate.Operator.EQ, id), null);
- if (resultSet.next())
- validTime = resultSet.getLong(LINK_VALID_TIME);
- }
- finally {
- if (resultSet != null)
- resultSet.close();
- }
- return validTime;
- }
-
- /**
- * Removes a link from storage using an asynchronous call.
- * @param lt The LinkTuple to delete.
- */
- protected void removeLinkFromStorage(Link lt) {
- String id = getLinkId(lt);
- storageSource.deleteRowAsync(LINK_TABLE_NAME, id);
- }
-
@Override
public void addListener(ILinkDiscoveryListener listener) {
linkDiscoveryAware.add(listener);
@@ -1718,22 +1562,6 @@
this.linkDiscoveryAware.remove(linkDiscoveryAwareComponent);
}
- /**
- * Sets the IStorageSource to use for ITology
- * @param storageSource the storage source to use
- */
- public void setStorageSource(IStorageSourceService storageSource) {
- this.storageSource = storageSource;
- }
-
- /**
- * Gets the storage source for this ITopology
- * @return The IStorageSource ITopology is writing to
- */
- public IStorageSourceService getStorageSource() {
- return storageSource;
- }
-
@Override
public boolean isCallbackOrderingPrereq(OFType type, String name) {
return false;
@@ -1744,72 +1572,6 @@
return false;
}
- @Override
- public void rowsModified(String tableName, Set<Object> rowKeys) {
- Map<Long, IOFSwitch> switches = floodlightProvider.getSwitches();
- ArrayList<IOFSwitch> updated_switches = new ArrayList<IOFSwitch>();
- for(Object key: rowKeys) {
- Long swId = new Long(HexString.toLong((String)key));
- if (switches.containsKey(swId)) {
- IOFSwitch sw = switches.get(swId);
- boolean curr_status = sw.hasAttribute(IOFSwitch.SWITCH_IS_CORE_SWITCH);
- boolean new_status = false;
- IResultSet resultSet = null;
-
- try {
- resultSet = storageSource.getRow(tableName, key);
- for (Iterator<IResultSet> it = resultSet.iterator(); it.hasNext();) {
- // In case of multiple rows, use the status in last row?
- Map<String, Object> row = it.next().getRow();
- if (row.containsKey(SWITCH_CONFIG_CORE_SWITCH)) {
- new_status = ((String)row.get(SWITCH_CONFIG_CORE_SWITCH)).equals("true");
- }
- }
- }
- finally {
- if (resultSet != null)
- resultSet.close();
- }
-
- if (curr_status != new_status) {
- updated_switches.add(sw);
- }
- } else {
- if (log.isTraceEnabled()) {
- log.trace("Update for switch which has no entry in switch " +
- "list (dpid={}), a delete action.", (String)key);
- }
- }
- }
-
- for (IOFSwitch sw : updated_switches) {
- // Set SWITCH_IS_CORE_SWITCH to it's inverse value
- if (sw.hasAttribute(IOFSwitch.SWITCH_IS_CORE_SWITCH)) {
- sw.removeAttribute(IOFSwitch.SWITCH_IS_CORE_SWITCH);
- if (log.isTraceEnabled()) {
- log.trace("SWITCH_IS_CORE_SWITCH set to False for {}", sw);
- }
- LinkUpdate update = new LinkUpdate(new LDUpdate(sw.getId(), SwitchType.BASIC_SWITCH,
- UpdateOperation.SWITCH_UPDATED));
- controller.publishUpdate(update);
- }
- else {
- sw.setAttribute(IOFSwitch.SWITCH_IS_CORE_SWITCH, new Boolean(true));
- if (log.isTraceEnabled()) {
- log.trace("SWITCH_IS_CORE_SWITCH set to True for {}", sw);
- }
- LinkUpdate update = new LinkUpdate(new LDUpdate(sw.getId(), SwitchType.CORE_SWITCH,
- UpdateOperation.SWITCH_UPDATED));
- controller.publishUpdate(update);
- }
- }
- }
-
- @Override
- public void rowsDeleted(String tableName, Set<Object> rowKeys) {
- // Ignore delete events, the switch delete will do the right thing on it's own
- }
-
// IFloodlightModule classes
@Override
@@ -1838,7 +1600,6 @@
Collection<Class<? extends IFloodlightService>> l =
new ArrayList<Class<? extends IFloodlightService>>();
l.add(IFloodlightProviderService.class);
- l.add(IStorageSourceService.class);
l.add(IThreadPoolService.class);
l.add(IRestApiService.class);
// Added by ONOS
@@ -1850,7 +1611,6 @@
public void init(FloodlightModuleContext context)
throws FloodlightModuleException {
floodlightProvider = context.getServiceImpl(IFloodlightProviderService.class);
- storageSource = context.getServiceImpl(IStorageSourceService.class);
threadPool = context.getServiceImpl(IThreadPoolService.class);
restApi = context.getServiceImpl(IRestApiService.class);
// Added by ONOS
@@ -1906,23 +1666,6 @@
recommendation=LogMessageDoc.CHECK_SWITCH)
})
public void startUp(FloodlightModuleContext context) {
- // Create our storage tables
- if (storageSource == null) {
- log.error("No storage source found.");
- return;
- }
-
- storageSource.createTable(LINK_TABLE_NAME, null);
- storageSource.setTablePrimaryKeyName(LINK_TABLE_NAME, LINK_ID);
- storageSource.deleteMatchingRows(LINK_TABLE_NAME, null);
- // Register for storage updates for the switch table
- try {
- storageSource.addListener(SWITCH_CONFIG_TABLE_NAME, this);
- } catch (StorageException ex) {
- log.error("Error in installing listener for " +
- "switch table {}", SWITCH_CONFIG_TABLE_NAME);
- }
-
ScheduledExecutorService ses = threadPool.getScheduledExecutor();
controller =
context.getServiceImpl(IFloodlightProviderService.class);
@@ -1933,36 +1676,22 @@
public void run() {
try {
discoverLinks();
- } catch (StorageException e) {
- log.error("Storage exception in LLDP send timer; " +
- "terminating process", e);
- floodlightProvider.terminate();
} catch (Exception e) {
log.error("Exception in LLDP send timer.", e);
} finally {
if (!shuttingDown) {
- // null role implies HA mode is not enabled.
- Role role = floodlightProvider.getRole();
- if (role == null || role == Role.MASTER) {
- log.trace("Rescheduling discovery task as role = {}", role);
- discoveryTask.reschedule(DISCOVERY_TASK_INTERVAL,
- TimeUnit.SECONDS);
- } else {
- log.trace("Stopped LLDP rescheduling due to role = {}.", role);
- }
+ // Always reschedule link discovery if we're not
+ // shutting down (no chance of SLAVE role now)
+ log.trace("Rescheduling discovery task");
+ discoveryTask.reschedule(DISCOVERY_TASK_INTERVAL,
+ TimeUnit.SECONDS);
}
}
}
});
- // null role implies HA mode is not enabled.
- Role role = floodlightProvider.getRole();
- if (role == null || role == Role.MASTER) {
- log.trace("Setup: Rescheduling discovery task. role = {}", role);
- discoveryTask.reschedule(DISCOVERY_TASK_INTERVAL, TimeUnit.SECONDS);
- } else {
- log.trace("Setup: Not scheduling LLDP as role = {}.", role);
- }
+ // Always reschedule link discovery as we are never in SLAVE role now
+ discoveryTask.reschedule(DISCOVERY_TASK_INTERVAL, TimeUnit.SECONDS);
// Setup the BDDP task. It is invoked whenever switch port tuples
// are added to the quarantine list.
@@ -1975,8 +1704,6 @@
floodlightProvider.addOFMessageListener(OFType.PORT_STATUS, this);
// Register for switch updates
floodlightProvider.addOFSwitchListener(this);
- floodlightProvider.addHAListener(this);
- floodlightProvider.addInfoProvider("summary", this);
if (restApi != null)
restApi.addRestletRoutable(new LinkDiscoveryWebRoutable());
setControllerTLV();
@@ -2061,59 +1788,6 @@
evTopoCluster = evHistTopologyCluster.put(evTopoCluster, action);
}
- @Override
- public Map<String, Object> getInfo(String type) {
- if (!"summary".equals(type)) return null;
-
- Map<String, Object> info = new HashMap<String, Object>();
-
- int num_links = 0;
- for (Set<Link> links : switchLinks.values())
- num_links += links.size();
- info.put("# inter-switch links", num_links / 2);
-
- return info;
- }
-
- // IHARoleListener
- @Override
- public void roleChanged(Role oldRole, Role newRole) {
- switch(newRole) {
- case MASTER:
- if (oldRole == Role.SLAVE) {
- if (log.isTraceEnabled()) {
- log.trace("Sending LLDPs " +
- "to HA change from SLAVE->MASTER");
- }
- clearAllLinks();
- log.debug("Role Change to Master: Rescheduling discovery task.");
- discoveryTask.reschedule(1, TimeUnit.MICROSECONDS);
- }
- break;
- case SLAVE:
- if (log.isTraceEnabled()) {
- log.trace("Clearing links due to " +
- "HA change to SLAVE");
- }
- switchLinks.clear();
- links.clear();
- portLinks.clear();
- portBroadcastDomainLinks.clear();
- discoverOnAllPorts();
- break;
- default:
- break;
- }
- }
-
- @Override
- public void controllerNodeIPsChanged(
- Map<String, String> curControllerNodeIPs,
- Map<String, String> addedControllerNodeIPs,
- Map<String, String> removedControllerNodeIPs) {
- // ignore
- }
-
public boolean isAutoPortFastFeature() {
return autoPortFastFeature;
}
diff --git a/src/main/java/net/onrc/onos/ofcontroller/proxyarp/ArpMessage.java b/src/main/java/net/onrc/onos/ofcontroller/proxyarp/ArpMessage.java
index 5f22ca2..ee8f23d 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/proxyarp/ArpMessage.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/proxyarp/ArpMessage.java
@@ -2,6 +2,7 @@
import java.io.Serializable;
import java.net.InetAddress;
+import net.floodlightcontroller.util.MACAddress;
public class ArpMessage implements Serializable {
@@ -14,6 +15,14 @@
private final InetAddress forAddress;
private final byte[] packetData;
+ //ARP reply message needs MAC info
+ private final MACAddress mac;
+ //only send the ARP request message to the device attachment needs the attachment switch and port.
+ private final long outSwitch;
+ private final short outPort;
+
+
+
public enum Type {
REQUEST,
REPLY
@@ -24,14 +33,41 @@
this.type = type;
this.forAddress = address;
this.packetData = eth;
+ this.mac = null;
+ this.outSwitch = -1;
+ this.outPort = -1;
}
private ArpMessage(Type type, InetAddress address) {
this.type = type;
this.forAddress = address;
this.packetData = null;
+ this.mac = null;
+ this.outSwitch = -1;
+ this.outPort = -1;
+
+ }
+ // the ARP reply message with MAC
+ private ArpMessage(Type type, InetAddress address, MACAddress mac) {
+ this.type = type;
+ this.forAddress = address;
+ this.packetData = null;
+ this.mac = mac;
+ this.outSwitch = -1;
+ this.outPort = -1;
}
+ // construct ARP request message with attachment switch and port
+ private ArpMessage(Type type, InetAddress address, byte[] arpRequest,
+ long outSwitch, short outPort) {
+ this.type = type;
+ this.forAddress = address;
+ this.packetData = arpRequest;
+ this.mac = null;
+ this.outSwitch = outSwitch;
+ this.outPort = outPort;
+ }
+
public static ArpMessage newRequest(InetAddress forAddress, byte[] arpRequest) {
return new ArpMessage(Type.REQUEST, forAddress, arpRequest);
}
@@ -39,6 +75,16 @@
public static ArpMessage newReply(InetAddress forAddress) {
return new ArpMessage(Type.REPLY, forAddress);
}
+ //ARP reply message with MAC
+ public static ArpMessage newReply(InetAddress forAddress, MACAddress mac) {
+ return new ArpMessage(Type.REPLY, forAddress, mac);
+
+ }
+ //ARP reqsuest message with attachment switch and port
+ public static ArpMessage newRequest(InetAddress forAddress, byte[] arpRequest, long outSwitch, short outPort ) {
+ return new ArpMessage(Type.REQUEST, forAddress, arpRequest, outSwitch, outPort);
+
+ }
public Type getType() {
return type;
@@ -51,4 +97,16 @@
public byte[] getPacket() {
return packetData;
}
+ public MACAddress getMAC() {
+ return mac;
+ }
+
+ public long getOutSwitch() {
+ return outSwitch;
+ }
+
+ public short getOutPort() {
+ return outPort;
+ }
+
}
diff --git a/src/main/java/net/onrc/onos/ofcontroller/proxyarp/ProxyArpManager.java b/src/main/java/net/onrc/onos/ofcontroller/proxyarp/ProxyArpManager.java
index 415d697..eadbbdd 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/proxyarp/ProxyArpManager.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/proxyarp/ProxyArpManager.java
@@ -32,6 +32,7 @@
import net.onrc.onos.ofcontroller.core.IDeviceStorage;
import net.onrc.onos.ofcontroller.core.INetMapTopologyObjects.IDeviceObject;
import net.onrc.onos.ofcontroller.core.INetMapTopologyObjects.IPortObject;
+import net.onrc.onos.ofcontroller.core.INetMapTopologyObjects.ISwitchObject;
import net.onrc.onos.ofcontroller.core.INetMapTopologyService.ITopoSwitchService;
import net.onrc.onos.ofcontroller.core.config.IConfigInfoService;
import net.onrc.onos.ofcontroller.core.internal.DeviceStorageImpl;
@@ -60,9 +61,9 @@
IArpEventHandler, IFloodlightModule {
private final static Logger log = LoggerFactory.getLogger(ProxyArpManager.class);
- private final long ARP_TIMER_PERIOD = 60000; //ms (== 1 min)
-
- private static final int ARP_REQUEST_TIMEOUT = 2000; //ms
+ private final long ARP_TIMER_PERIOD = 100; //ms
+
+ private static final int ARP_REQUEST_TIMEOUT = 500; //ms
private IFloodlightProviderService floodlightProvider;
private ITopologyService topology;
@@ -207,8 +208,8 @@
//Have to synchronize externally on the Multimap while using an iterator,
//even though it's a synchronizedMultimap
synchronized (arpRequests) {
- log.debug("Current have {} outstanding requests",
- arpRequests.size());
+ //log.debug("Current have {} outstanding requests",
+ //arpRequests.size());
Iterator<Map.Entry<InetAddress, ArpRequest>> it
= arpRequests.entries().iterator();
@@ -221,6 +222,15 @@
log.debug("Cleaning expired ARP request for {}",
entry.getKey().getHostAddress());
+ //if he ARP Request is expired and then delete the device
+ IDeviceObject targetDevice =
+ deviceStorage.getDeviceByIP(InetAddresses.coerceToInteger(entry.getKey()));
+
+ if(targetDevice!=null)
+ {deviceStorage.removeDevice(targetDevice);
+ log.debug("RemoveDevice: {} due to no have not recieve the ARP reply", targetDevice.toString());
+ }
+
it.remove();
if (request.shouldRetry()) {
@@ -277,6 +287,7 @@
if (eth.getEtherType() == Ethernet.TYPE_ARP){
ARP arp = (ARP) eth.getPayload();
if (arp.getOpCode() == ARP.OP_REQUEST) {
+ log.debug("receive ARP request");
//TODO check what the DeviceManager does about propagating
//or swallowing ARPs. We want to go after DeviceManager in the
//chain but we really need it to CONTINUE ARP packets so we can
@@ -284,12 +295,16 @@
handleArpRequest(sw, pi, arp, eth);
}
else if (arp.getOpCode() == ARP.OP_REPLY) {
- //handleArpReply(sw, pi, arp);
+ log.debug("receive ARP reply");
+ handleArpReply(sw, pi, arp);
+ sendToOtherNodesReply(eth, pi);
}
+
+ // Stop ARP packets here
+ return Command.STOP;
}
- //TODO should we propagate ARP or swallow it?
- //Always propagate for now so DeviceManager can learn the host location
+ // Propagate everything else
return Command.CONTINUE;
}
@@ -301,7 +316,7 @@
InetAddress target;
try {
- target = InetAddress.getByAddress(arp.getTargetProtocolAddress());
+ target = InetAddress.getByAddress(arp.getTargetProtocolAddress());
} catch (UnknownHostException e) {
log.debug("Invalid address in ARP request", e);
return;
@@ -313,69 +328,74 @@
if (configService.isInterfaceAddress(target)) {
log.trace("ARP request for our interface. Sending reply {} => {}",
target.getHostAddress(), configService.getRouterMacAddress());
-
+
sendArpReply(arp, sw.getId(), pi.getInPort(),
configService.getRouterMacAddress());
}
-
+
return;
}
//MACAddress macAddress = arpCache.lookup(target);
-
+
IDeviceObject targetDevice =
deviceStorage.getDeviceByIP(InetAddresses.coerceToInteger(target));
-
log.debug("targetDevice: {}", targetDevice);
-
+
+ arpRequests.put(target, new ArpRequest(
+ new HostArpRequester(arp, sw.getId(), pi.getInPort()), false));
+
if (targetDevice != null) {
- // We have the device in our database, so send a reply
+ // Even the device in our database is not null, we do not reply to the request directly, but to check whether the device is still valid
MACAddress macAddress = MACAddress.valueOf(targetDevice.getMACAddress());
-
+
if (log.isTraceEnabled()) {
- log.trace("Sending reply: {} => {} to host at {}/{}", new Object [] {
+ log.trace("The target Device Record in DB is: {} => {} from ARP request host at {}/{}", new Object [] {
inetAddressToString(arp.getTargetProtocolAddress()),
macAddress.toString(),
HexString.toHexString(sw.getId()), pi.getInPort()});
}
-
- sendArpReply(arp, sw.getId(), pi.getInPort(), macAddress);
- }
- else {
+
+ // sendArpReply(arp, sw.getId(), pi.getInPort(), macAddress);
+
+ log.trace("Checking the device info from DB is still valid or not");
+ Iterable<IPortObject> outPorts=targetDevice.getAttachedPorts();
+
+ if(!outPorts.iterator().hasNext()){
+ log.debug("outPort : null");
+ sendToOtherNodes(eth, pi);
+ }else{
+
+ for (IPortObject portObject : outPorts) {
+ long outSwitch=0;
+ short outPort=0;
+
+
+ if (!portObject.getLinkedPorts().iterator().hasNext()) {
+ outPort=portObject.getNumber();
+ log.debug("outPort:{} ", outPort);
+ }
+
+ Iterable<ISwitchObject> outSwitches= targetDevice.getSwitch();
+
+ for (ISwitchObject outswitch : outSwitches) {
+
+ outSwitch= HexString.toLong(outswitch.getDPID());
+ log.debug("outSwitch.DPID:{}; outPort: {}", outswitch.getDPID(), outPort );
+ sendToOtherNodes( eth, pi, outSwitch, outPort);
+ }
+ }
+ }
+
+ }else {
+ log.debug("The Device info in DB is {} for IP {}", targetDevice, inetAddressToString(arp.getTargetProtocolAddress()));
+
// We don't know the device so broadcast the request out
- // the edge of the network
-
- //Record where the request came from so we know where to send the reply
- arpRequests.put(target, new ArpRequest(
- new HostArpRequester(arp, sw.getId(), pi.getInPort()), false));
-
sendToOtherNodes(eth, pi);
}
-
- /*if (macAddress == null){
- //MAC address is not in our ARP cache.
-
- //Record where the request came from so we know where to send the reply
- //arpRequests.put(target, new ArpRequest(
- //new HostArpRequester(arp, sw.getId(), pi.getInPort()), false));
-
- //Flood the request out edge ports
- //sendArpRequestToSwitches(target, pi.getPacketData(), sw.getId(), pi.getInPort());
- }
- else {
- //We know the address, so send a reply
- if (log.isTraceEnabled()) {
- log.trace("Sending reply: {} => {} to host at {}/{}", new Object [] {
- inetAddressToString(arp.getTargetProtocolAddress()),
- macAddress.toString(),
- HexString.toHexString(sw.getId()), pi.getInPort()});
- }
-
- sendArpReply(arp, sw.getId(), pi.getInPort(), macAddress);
- }*/
+
}
- @SuppressWarnings("unused")
private void handleArpReply(IOFSwitch sw, OFPacketIn pi, ARP arp){
if (log.isTraceEnabled()) {
log.trace("ARP reply recieved: {} => {}, on {}/{}", new Object[] {
@@ -394,7 +414,7 @@
MACAddress senderMacAddress = MACAddress.valueOf(arp.getSenderHardwareAddress());
- arpCache.update(senderIpAddress, senderMacAddress);
+ //arpCache.update(senderIpAddress, senderMacAddress);
//See if anyone's waiting for this ARP reply
Set<ArpRequest> requests = arpRequests.get(senderIpAddress);
@@ -514,6 +534,51 @@
datagrid.sendArpRequest(ArpMessage.newRequest(targetAddress, eth.serialize()));
}
+ //hazelcast to other ONOS instances to send the ARP packet out on outPort of outSwitch
+ private void sendToOtherNodes(Ethernet eth, OFPacketIn pi, long outSwitch, short outPort) {
+ ARP arp = (ARP) eth.getPayload();
+
+ if (log.isTraceEnabled()) {
+ log.trace("Sending ARP request for {} to other ONOS instances with outSwitch {} ",
+ inetAddressToString(arp.getTargetProtocolAddress()), String.valueOf(outSwitch));
+
+ }
+
+ InetAddress targetAddress;
+ try {
+ targetAddress = InetAddress.getByAddress(arp.getTargetProtocolAddress());
+ } catch (UnknownHostException e) {
+ log.error("Unknown host", e);
+ return;
+ }
+
+ datagrid.sendArpRequest(ArpMessage.newRequest(targetAddress, eth.serialize(), outSwitch, outPort));
+ //datagrid.sendArpRequest(ArpMessage.newRequest(targetAddress, eth.serialize()));
+
+
+ }
+ private void sendToOtherNodesReply(Ethernet eth, OFPacketIn pi) {
+ ARP arp = (ARP) eth.getPayload();
+
+ if (log.isTraceEnabled()) {
+ log.trace("Sending ARP reply for {} to other ONOS instances",
+ inetAddressToString(arp.getSenderProtocolAddress()));
+ }
+
+ InetAddress targetAddress;
+ MACAddress mac = new MACAddress(arp.getSenderHardwareAddress());
+
+ try {
+ targetAddress = InetAddress.getByAddress(arp.getSenderProtocolAddress());
+ } catch (UnknownHostException e) {
+ log.error("Unknown host", e);
+ return;
+ }
+
+ datagrid.sendArpRequest(ArpMessage.newReply(targetAddress,mac));
+ //datagrid.sendArpReply(ArpMessage.newRequest(targetAddress, eth.serialize()));
+
+ }
private void broadcastArpRequestOutEdge(byte[] arpRequest, long inSwitch, short inPort) {
for (IOFSwitch sw : floodlightProvider.getSwitches().values()){
@@ -604,7 +669,7 @@
}
}
- log.debug("Broadcast ARP request for to: {}", switchPorts);
+ log.debug("Broadcast ARP request to: {}", switchPorts);
}
private void sendArpRequestOutPort(byte[] arpRequest, long dpid, short port) {
@@ -744,23 +809,26 @@
@Override
public void arpRequestNotification(ArpMessage arpMessage) {
- //log.debug("Received ARP notification from other instances");
-
+ log.debug("Received ARP notification from other instances");
+
switch (arpMessage.getType()){
case REQUEST:
- log.debug("Received ARP request notification for {}",
- arpMessage.getAddress());
- broadcastArpRequestOutMyEdge(arpMessage.getPacket());
+ if(arpMessage.getOutSwitch() == -1 || arpMessage.getOutPort() == -1){
+ broadcastArpRequestOutMyEdge(arpMessage.getPacket());
+ }else{
+ sendArpRequestOutPort(arpMessage.getPacket(),arpMessage.getOutSwitch(),arpMessage.getOutPort());
+ log.debug("OutSwitch in ARP request message is: {}; OutPort in ARP request message is: {}",arpMessage.getOutSwitch(),arpMessage.getOutPort());
+ }
break;
case REPLY:
log.debug("Received ARP reply notification for {}",
arpMessage.getAddress());
- sendArpReplyToWaitingRequesters(arpMessage.getAddress());
+ sendArpReplyToWaitingRequesters(arpMessage.getAddress(),arpMessage.getMAC());
break;
}
}
- private void sendArpReplyToWaitingRequesters(InetAddress address) {
+ private void sendArpReplyToWaitingRequesters(InetAddress address, MACAddress mac) {
log.debug("Sending ARP reply for {} to requesters",
address.getHostAddress());
@@ -778,13 +846,13 @@
}
}
- IDeviceObject deviceObject = deviceStorage.getDeviceByIP(
+ /*IDeviceObject deviceObject = deviceStorage.getDeviceByIP(
InetAddresses.coerceToInteger(address));
MACAddress mac = MACAddress.valueOf(deviceObject.getMACAddress());
log.debug("Found {} at {} in network map",
- address.getHostAddress(), mac);
+ address.getHostAddress(), mac);*/
//Don't hold an ARP lock while dispatching requests
for (ArpRequest request : requestsToSend) {
diff --git a/src/main/java/net/onrc/onos/ofcontroller/util/FlowPathFlags.java b/src/main/java/net/onrc/onos/ofcontroller/util/FlowPathFlags.java
index 97113ca..595eb5f 100644
--- a/src/main/java/net/onrc/onos/ofcontroller/util/FlowPathFlags.java
+++ b/src/main/java/net/onrc/onos/ofcontroller/util/FlowPathFlags.java
@@ -119,7 +119,9 @@
flagsStr += ",";
flagsStr += "KEEP_ONLY_FIRST_HOP_ENTRY";
}
- ret += flagsStr + "]";
+ if (flagsStr != null)
+ ret += flagsStr;
+ ret += "]";
return ret;
}
diff --git a/src/main/resources/META-INF/services/net.floodlightcontroller.core.module.IFloodlightModule b/src/main/resources/META-INF/services/net.floodlightcontroller.core.module.IFloodlightModule
index 4a60d2a..a842665 100644
--- a/src/main/resources/META-INF/services/net.floodlightcontroller.core.module.IFloodlightModule
+++ b/src/main/resources/META-INF/services/net.floodlightcontroller.core.module.IFloodlightModule
@@ -1,13 +1,10 @@
net.floodlightcontroller.core.FloodlightProvider
-net.floodlightcontroller.storage.memory.MemoryStorageSource
net.onrc.onos.ofcontroller.floodlightlistener.NetworkGraphPublisher
net.floodlightcontroller.devicemanager.internal.DeviceManagerImpl
net.onrc.onos.ofcontroller.linkdiscovery.internal.LinkDiscoveryManager
net.floodlightcontroller.topology.TopologyManager
net.floodlightcontroller.forwarding.Forwarding
net.floodlightcontroller.restserver.RestApiServer
-net.floodlightcontroller.counter.CounterStore
-net.floodlightcontroller.counter.NullCounterStore
net.floodlightcontroller.threadpool.ThreadPool
net.floodlightcontroller.ui.web.StaticWebRoutable
net.floodlightcontroller.devicemanager.internal.DefaultEntityClassifier
diff --git a/src/test/java/net/floodlightcontroller/core/internal/ControllerTest.java b/src/test/java/net/floodlightcontroller/core/internal/ControllerTest.java
index fcdbcf0..cbb4b17 100644
--- a/src/test/java/net/floodlightcontroller/core/internal/ControllerTest.java
+++ b/src/test/java/net/floodlightcontroller/core/internal/ControllerTest.java
@@ -32,10 +32,7 @@
import java.util.ArrayList;
import java.util.Collection;
-import java.util.Date;
-import java.util.HashMap;
import java.util.List;
-import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
@@ -44,7 +41,6 @@
import net.floodlightcontroller.core.FloodlightProvider;
import net.floodlightcontroller.core.IFloodlightProviderService;
import net.floodlightcontroller.core.IFloodlightProviderService.Role;
-import net.floodlightcontroller.core.IHAListener;
import net.floodlightcontroller.core.IListener.Command;
import net.floodlightcontroller.core.IOFMessageListener;
import net.floodlightcontroller.core.IOFSwitch;
@@ -55,16 +51,12 @@
import net.floodlightcontroller.core.internal.OFChannelState.HandshakeState;
import net.floodlightcontroller.core.module.FloodlightModuleContext;
import net.floodlightcontroller.core.test.MockThreadPoolService;
-import net.floodlightcontroller.counter.CounterStore;
-import net.floodlightcontroller.counter.ICounterStoreService;
import net.floodlightcontroller.packet.ARP;
import net.floodlightcontroller.packet.Ethernet;
import net.floodlightcontroller.packet.IPacket;
import net.floodlightcontroller.packet.IPv4;
import net.floodlightcontroller.restserver.IRestApiService;
import net.floodlightcontroller.restserver.RestApiServer;
-import net.floodlightcontroller.storage.IStorageSourceService;
-import net.floodlightcontroller.storage.memory.MemoryStorageSource;
import net.floodlightcontroller.test.FloodlightTestCase;
import net.floodlightcontroller.threadpool.IThreadPoolService;
import net.onrc.onos.ofcontroller.core.IOFSwitchPortListener;
@@ -121,15 +113,8 @@
controller = (Controller)cm.getServiceImpls().get(IFloodlightProviderService.class);
fmc.addService(IFloodlightProviderService.class, controller);
- MemoryStorageSource memstorage = new MemoryStorageSource();
- fmc.addService(IStorageSourceService.class, memstorage);
-
RestApiServer restApi = new RestApiServer();
fmc.addService(IRestApiService.class, restApi);
-
- CounterStore cs = new CounterStore();
- fmc.addService(ICounterStoreService.class, cs);
-
tp = new MockThreadPoolService();
fmc.addService(IThreadPoolService.class, tp);
@@ -145,13 +130,11 @@
restApi.init(fmc);
- memstorage.init(fmc);
cm.init(fmc);
tp.init(fmc);
sr.init(fmc);
linkDiscovery.init(fmc);
restApi.startUp(fmc);
- memstorage.startUp(fmc);
cm.startUp(fmc);
tp.startUp(fmc);
sr.startUp(fmc);
@@ -183,10 +166,12 @@
expect(sw.getId()).andReturn(dpid).anyTimes();
expect(sw.getStringId()).andReturn(dpidString).anyTimes();
- expect(sw.getConnectedSince()).andReturn(new Date());
- Channel channel = createMock(Channel.class);
- expect(sw.getChannel()).andReturn(channel);
- expect(channel.getRemoteAddress()).andReturn(null);
+
+ //Now we don't write to storage these methods aren't called
+ //expect(sw.getConnectedSince()).andReturn(new Date());
+ //Channel channel = createMock(Channel.class);
+ //expect(sw.getChannel()).andReturn(channel);
+ //expect(channel.getRemoteAddress()).andReturn(null);
expect(sw.getCapabilities()).andReturn(0).anyTimes();
expect(sw.getBuffers()).andReturn(0).anyTimes();
@@ -403,21 +388,22 @@
IOFSwitch newsw = createMock(IOFSwitch.class);
expect(newsw.getId()).andReturn(0L).anyTimes();
expect(newsw.getStringId()).andReturn("00:00:00:00:00:00:00").anyTimes();
- expect(newsw.getConnectedSince()).andReturn(new Date());
- Channel channel2 = createMock(Channel.class);
- expect(newsw.getChannel()).andReturn(channel2);
- expect(channel2.getRemoteAddress()).andReturn(null);
- expect(newsw.getPorts()).andReturn(new ArrayList<OFPhysicalPort>()).times(2);
+ //Now we don't write to storage, these methods aren't called
+ //expect(newsw.getConnectedSince()).andReturn(new Date());
+ //Channel channel2 = createMock(Channel.class);
+ //expect(newsw.getChannel()).andReturn(channel2);
+ //expect(channel2.getRemoteAddress()).andReturn(null);
+ expect(newsw.getPorts()).andReturn(new ArrayList<OFPhysicalPort>());
expect(newsw.getCapabilities()).andReturn(0).anyTimes();
expect(newsw.getBuffers()).andReturn(0).anyTimes();
expect(newsw.getTables()).andReturn((byte)0).anyTimes();
expect(newsw.getActions()).andReturn(0).anyTimes();
controller.activeSwitches.put(0L, oldsw);
- replay(newsw, channel, channel2);
+ replay(newsw, channel);//, channel2);
controller.addSwitch(newsw);
- verify(newsw, channel, channel2);
+ verify(newsw, channel);//, channel2);
}
@Test
@@ -486,18 +472,6 @@
switchListener.nPortChanged == 1);
}
}
-
-
- private Map<String,Object> getFakeControllerIPRow(String id, String controllerId,
- String type, int number, String discoveredIP ) {
- HashMap<String, Object> row = new HashMap<String,Object>();
- row.put(Controller.CONTROLLER_INTERFACE_ID, id);
- row.put(Controller.CONTROLLER_INTERFACE_CONTROLLER_ID, controllerId);
- row.put(Controller.CONTROLLER_INTERFACE_TYPE, type);
- row.put(Controller.CONTROLLER_INTERFACE_NUMBER, number);
- row.put(Controller.CONTROLLER_INTERFACE_DISCOVERED_IP, discoveredIP);
- return row;
- }
/**
* Test notifications for controller node IP changes. This requires
@@ -511,6 +485,7 @@
*
* @throws Exception
*/
+ /*
@Test
public void testControllerNodeIPChanges() throws Exception {
class DummyHAListener implements IHAListener {
@@ -621,7 +596,9 @@
listener.do_assert(4, expectedCurMap, expectedAddedMap, expectedRemovedMap);
}
}
+ */
+ /*
@Test
public void testGetControllerNodeIPs() {
HashMap<String,String> expectedCurMap = new HashMap<String, String>();
@@ -637,40 +614,7 @@
assertEquals("expectedControllerNodeIPs is not as expected",
expectedCurMap, controller.getControllerNodeIPs());
}
-
- @Test
- public void testSetRoleNull() {
- try {
- controller.setRole(null);
- fail("Should have thrown an Exception");
- }
- catch (NullPointerException e) {
- //exptected
- }
- }
-
- @Test
- public void testSetRole() {
- controller.connectedSwitches.add(new OFSwitchImpl());
- RoleChanger roleChanger = createMock(RoleChanger.class);
- roleChanger.submitRequest(controller.connectedSwitches, Role.SLAVE);
- controller.roleChanger = roleChanger;
-
- assertEquals("Check that update queue is empty", 0,
- controller.updates.size());
-
- replay(roleChanger);
- controller.setRole(Role.SLAVE);
- verify(roleChanger);
-
- IUpdate upd = controller.updates.poll();
- assertNotNull("Check that update queue has an update", upd);
- assertTrue("Check that update is HARoleUpdate",
- upd instanceof Controller.HARoleUpdate);
- Controller.HARoleUpdate roleUpd = (Controller.HARoleUpdate)upd;
- assertSame(Role.MASTER, roleUpd.oldRole);
- assertSame(Role.SLAVE, roleUpd.newRole);
- }
+ */
@Test
public void testCheckSwitchReady() {
diff --git a/src/test/java/net/floodlightcontroller/core/module/FloodlightTestModuleLoader.java b/src/test/java/net/floodlightcontroller/core/module/FloodlightTestModuleLoader.java
index 716c7da..89a3591 100644
--- a/src/test/java/net/floodlightcontroller/core/module/FloodlightTestModuleLoader.java
+++ b/src/test/java/net/floodlightcontroller/core/module/FloodlightTestModuleLoader.java
@@ -6,10 +6,8 @@
import net.floodlightcontroller.core.test.MockFloodlightProvider;
import net.floodlightcontroller.core.test.MockThreadPoolService;
-import net.floodlightcontroller.counter.NullCounterStore;
import net.floodlightcontroller.devicemanager.internal.DefaultEntityClassifier;
import net.floodlightcontroller.devicemanager.test.MockDeviceManager;
-import net.floodlightcontroller.storage.memory.MemoryStorageSource;
import net.floodlightcontroller.topology.TopologyManager;
import org.slf4j.Logger;
@@ -19,16 +17,12 @@
protected final static Logger log = LoggerFactory.getLogger(FloodlightTestModuleLoader.class);
// List of default modules to use unless specified otherwise
- public static final Class<? extends IFloodlightModule> DEFAULT_STORAGE_SOURCE =
- MemoryStorageSource.class;
public static final Class<? extends IFloodlightModule> DEFAULT_FLOODLIGHT_PRPOVIDER =
MockFloodlightProvider.class;
public static final Class<? extends IFloodlightModule> DEFAULT_TOPOLOGY_PROVIDER =
TopologyManager.class;
public static final Class<? extends IFloodlightModule> DEFAULT_DEVICE_SERVICE =
MockDeviceManager.class;
- public static final Class<? extends IFloodlightModule> DEFAULT_COUNTER_STORE =
- NullCounterStore.class;
public static final Class<? extends IFloodlightModule> DEFAULT_THREADPOOL =
MockThreadPoolService.class;
public static final Class<? extends IFloodlightModule> DEFAULT_ENTITY_CLASSIFIER =
@@ -41,9 +35,7 @@
DEFAULT_MODULE_LIST = new ArrayList<Class<? extends IFloodlightModule>>();
DEFAULT_MODULE_LIST.add(DEFAULT_DEVICE_SERVICE);
DEFAULT_MODULE_LIST.add(DEFAULT_FLOODLIGHT_PRPOVIDER);
- DEFAULT_MODULE_LIST.add(DEFAULT_STORAGE_SOURCE);
DEFAULT_MODULE_LIST.add(DEFAULT_TOPOLOGY_PROVIDER);
- DEFAULT_MODULE_LIST.add(DEFAULT_COUNTER_STORE);
DEFAULT_MODULE_LIST.add(DEFAULT_THREADPOOL);
DEFAULT_MODULE_LIST.add(DEFAULT_ENTITY_CLASSIFIER);
diff --git a/src/test/java/net/floodlightcontroller/core/test/MockFloodlightProvider.java b/src/test/java/net/floodlightcontroller/core/test/MockFloodlightProvider.java
index 2a158e5..f3abae8 100644
--- a/src/test/java/net/floodlightcontroller/core/test/MockFloodlightProvider.java
+++ b/src/test/java/net/floodlightcontroller/core/test/MockFloodlightProvider.java
@@ -31,13 +31,11 @@
import net.floodlightcontroller.core.FloodlightContext;
import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.IHAListener;
-import net.floodlightcontroller.core.IInfoProvider;
+import net.floodlightcontroller.core.IListener.Command;
import net.floodlightcontroller.core.IOFMessageListener;
import net.floodlightcontroller.core.IOFSwitch;
import net.floodlightcontroller.core.IOFSwitchFilter;
import net.floodlightcontroller.core.IOFSwitchListener;
-import net.floodlightcontroller.core.IListener.Command;
import net.floodlightcontroller.core.IUpdate;
import net.floodlightcontroller.core.module.FloodlightModuleContext;
import net.floodlightcontroller.core.module.FloodlightModuleException;
@@ -61,7 +59,6 @@
protected final static Logger log = LoggerFactory.getLogger(MockFloodlightProvider.class);
protected ConcurrentMap<OFType, ListenerDispatcher<OFType,IOFMessageListener>> listeners;
protected List<IOFSwitchListener> switchListeners;
- protected List<IHAListener> haListeners;
protected Map<Long, IOFSwitch> switches;
protected BasicFactory factory;
@@ -73,7 +70,6 @@
IOFMessageListener>>();
switches = new ConcurrentHashMap<Long, IOFSwitch>();
switchListeners = new CopyOnWriteArrayList<IOFSwitchListener>();
- haListeners = new CopyOnWriteArrayList<IHAListener>();
factory = new BasicFactory();
}
@@ -256,55 +252,6 @@
}
@Override
- public void addInfoProvider(String type, IInfoProvider provider) {
- // TODO Auto-generated method stub
-
- }
-
- @Override
- public void removeInfoProvider(String type, IInfoProvider provider) {
- // TODO Auto-generated method stub
-
- }
-
- @Override
- public Map<String, Object> getControllerInfo(String type) {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public void addHAListener(IHAListener listener) {
- haListeners.add(listener);
- }
-
- @Override
- public void removeHAListener(IHAListener listener) {
- haListeners.remove(listener);
- }
-
- @Override
- public Role getRole() {
- return null;
- }
-
- @Override
- public void setRole(Role role) {
-
- }
-
- /**
- * Dispatches a new role change notification
- * @param oldRole
- * @param newRole
- */
- public void dispatchRoleChanged(Role oldRole, Role newRole) {
- for (IHAListener rl : haListeners) {
- rl.roleChanged(oldRole, newRole);
- }
- }
-
- @Override
public String getControllerId() {
return "localhost";
}
diff --git a/src/test/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImplTest.java b/src/test/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImplTest.java
index eb84b42..b37efe3 100644
--- a/src/test/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImplTest.java
+++ b/src/test/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImplTest.java
@@ -18,7 +18,17 @@
package net.floodlightcontroller.devicemanager.internal;
-import static org.easymock.EasyMock.*;
+import static org.easymock.EasyMock.anyLong;
+import static org.easymock.EasyMock.anyShort;
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.createStrictMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.isA;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.reset;
+import static org.easymock.EasyMock.verify;
+import static org.junit.Assert.assertArrayEquals;
import java.util.ArrayList;
import java.util.Arrays;
@@ -31,18 +41,17 @@
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
-import static org.easymock.EasyMock.expectLastCall;
import net.floodlightcontroller.core.IFloodlightProviderService;
import net.floodlightcontroller.core.IOFSwitch;
import net.floodlightcontroller.core.module.FloodlightModuleContext;
import net.floodlightcontroller.core.test.MockFloodlightProvider;
import net.floodlightcontroller.core.test.MockThreadPoolService;
-import net.floodlightcontroller.devicemanager.IDeviceListener;
import net.floodlightcontroller.devicemanager.IDevice;
+import net.floodlightcontroller.devicemanager.IDeviceListener;
+import net.floodlightcontroller.devicemanager.IDeviceService;
import net.floodlightcontroller.devicemanager.IEntityClass;
import net.floodlightcontroller.devicemanager.IEntityClassifierService;
import net.floodlightcontroller.devicemanager.SwitchPort;
-import net.floodlightcontroller.devicemanager.IDeviceService;
import net.floodlightcontroller.devicemanager.SwitchPort.ErrorStatus;
import net.floodlightcontroller.devicemanager.internal.DeviceManagerImpl.ClassState;
import net.floodlightcontroller.devicemanager.test.MockEntityClassifier;
@@ -54,21 +63,18 @@
import net.floodlightcontroller.packet.IPv4;
import net.floodlightcontroller.restserver.IRestApiService;
import net.floodlightcontroller.restserver.RestApiServer;
-import net.floodlightcontroller.storage.IStorageSourceService;
-import net.floodlightcontroller.storage.memory.MemoryStorageSource;
import net.floodlightcontroller.test.FloodlightTestCase;
import net.floodlightcontroller.threadpool.IThreadPoolService;
import net.floodlightcontroller.topology.ITopologyService;
-import static org.junit.Assert.*;
import org.easymock.EasyMock;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.openflow.protocol.OFPacketIn;
+import org.openflow.protocol.OFPacketIn.OFPacketInReason;
import org.openflow.protocol.OFPhysicalPort;
import org.openflow.protocol.OFType;
-import org.openflow.protocol.OFPacketIn.OFPacketInReason;
import org.openflow.util.HexString;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -87,8 +93,6 @@
private byte[] testARPReplyPacket_3_Serialized;
MockFloodlightProvider mockFloodlightProvider;
DeviceManagerImpl deviceManager;
- MemoryStorageSource storageSource;
-
private IOFSwitch makeSwitchMock(long id) {
IOFSwitch mockSwitch = createMock(IOFSwitch.class);
@@ -116,8 +120,8 @@
DefaultEntityClassifier entityClassifier = new DefaultEntityClassifier();
fmc.addService(IDeviceService.class, deviceManager);
- storageSource = new MemoryStorageSource();
- fmc.addService(IStorageSourceService.class, storageSource);
+ //storageSource = new MemoryStorageSource();
+ //fmc.addService(IStorageSourceService.class, storageSource);
fmc.addService(IFloodlightProviderService.class, mockFloodlightProvider);
fmc.addService(IRestApiService.class, restApi);
@@ -125,11 +129,11 @@
fmc.addService(ITopologyService.class, topology);
tp.init(fmc);
restApi.init(fmc);
- storageSource.init(fmc);
+ //storageSource.init(fmc);
deviceManager.init(fmc);
entityClassifier.init(fmc);
- storageSource.startUp(fmc);
+ //storageSource.startUp(fmc);
deviceManager.startUp(fmc);
tp.startUp(fmc);
diff --git a/src/test/java/net/floodlightcontroller/forwarding/ForwardingTest.java b/src/test/java/net/floodlightcontroller/forwarding/ForwardingTest.java
index 3e262af..f29c319 100644
--- a/src/test/java/net/floodlightcontroller/forwarding/ForwardingTest.java
+++ b/src/test/java/net/floodlightcontroller/forwarding/ForwardingTest.java
@@ -17,7 +17,16 @@
package net.floodlightcontroller.forwarding;
-import static org.easymock.EasyMock.*;
+import static org.easymock.EasyMock.anyLong;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.anyShort;
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.reset;
+import static org.easymock.EasyMock.verify;
import java.util.ArrayList;
import java.util.Date;
@@ -31,13 +40,11 @@
import net.floodlightcontroller.core.module.FloodlightModuleContext;
import net.floodlightcontroller.core.test.MockFloodlightProvider;
import net.floodlightcontroller.core.test.MockThreadPoolService;
-import net.floodlightcontroller.devicemanager.internal.DefaultEntityClassifier;
-import net.floodlightcontroller.devicemanager.test.MockDeviceManager;
-import net.floodlightcontroller.counter.CounterStore;
-import net.floodlightcontroller.counter.ICounterStoreService;
import net.floodlightcontroller.devicemanager.IDevice;
import net.floodlightcontroller.devicemanager.IDeviceService;
import net.floodlightcontroller.devicemanager.IEntityClassifierService;
+import net.floodlightcontroller.devicemanager.internal.DefaultEntityClassifier;
+import net.floodlightcontroller.devicemanager.test.MockDeviceManager;
import net.floodlightcontroller.packet.Data;
import net.floodlightcontroller.packet.Ethernet;
import net.floodlightcontroller.packet.IPacket;
@@ -50,7 +57,6 @@
import net.floodlightcontroller.topology.ITopologyListener;
import net.floodlightcontroller.topology.ITopologyService;
import net.floodlightcontroller.topology.NodePortTuple;
-import net.floodlightcontroller.forwarding.Forwarding;
import org.easymock.Capture;
import org.easymock.CaptureType;
@@ -61,10 +67,10 @@
import org.openflow.protocol.OFMatch;
import org.openflow.protocol.OFMessage;
import org.openflow.protocol.OFPacketIn;
+import org.openflow.protocol.OFPacketIn.OFPacketInReason;
import org.openflow.protocol.OFPacketOut;
import org.openflow.protocol.OFPort;
import org.openflow.protocol.OFType;
-import org.openflow.protocol.OFPacketIn.OFPacketInReason;
import org.openflow.protocol.action.OFAction;
import org.openflow.protocol.action.OFActionOutput;
import org.openflow.util.HexString;
@@ -129,7 +135,6 @@
fmc.addService(IThreadPoolService.class, threadPool);
fmc.addService(ITopologyService.class, topology);
fmc.addService(IRoutingService.class, routingEngine);
- fmc.addService(ICounterStoreService.class, new CounterStore());
fmc.addService(IDeviceService.class, deviceManager);
fmc.addService(IEntityClassifierService.class, entityClassifier);
diff --git a/src/test/java/net/floodlightcontroller/staticflowentry/StaticFlowTests.java b/src/test/java/net/floodlightcontroller/staticflowentry/StaticFlowTests.java
index 186fd69..4f53342 100644
--- a/src/test/java/net/floodlightcontroller/staticflowentry/StaticFlowTests.java
+++ b/src/test/java/net/floodlightcontroller/staticflowentry/StaticFlowTests.java
@@ -1,49 +1,16 @@
package net.floodlightcontroller.staticflowentry;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.easymock.Capture;
-import org.easymock.CaptureType;
-import org.junit.Test;
-import org.openflow.protocol.OFFlowMod;
-import org.openflow.protocol.OFMatch;
-import org.openflow.protocol.OFMessage;
-import org.openflow.protocol.OFPort;
-import org.openflow.protocol.action.OFAction;
-import org.openflow.protocol.action.OFActionOutput;
-import org.openflow.util.HexString;
-
-
-import net.floodlightcontroller.core.FloodlightContext;
-import net.floodlightcontroller.core.IFloodlightProviderService.Role;
-import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.core.module.FloodlightModuleContext;
-import net.floodlightcontroller.core.module.FloodlightModuleException;
-import net.floodlightcontroller.core.test.MockFloodlightProvider;
-import net.floodlightcontroller.test.FloodlightTestCase;
-import net.floodlightcontroller.restserver.RestApiServer;
-import net.floodlightcontroller.staticflowentry.StaticFlowEntryPusher;
-import net.floodlightcontroller.storage.IStorageSourceService;
-import net.floodlightcontroller.storage.memory.MemoryStorageSource;
-import static net.floodlightcontroller.staticflowentry.StaticFlowEntryPusher.*;
-import static org.easymock.EasyMock.*;
-
+/*
public class StaticFlowTests extends FloodlightTestCase {
static String TestSwitch1DPID = "00:00:00:00:00:00:00:01";
static int TotalTestRules = 3;
- /***
- * Create TestRuleXXX and the corresponding FlowModXXX
- * for X = 1..3
- */
+ //
+ // Create TestRuleXXX and the corresponding FlowModXXX
+ // for X = 1..3
+ //
static Map<String,Object> TestRule1;
static OFFlowMod FlowMod1;
static {
@@ -332,3 +299,4 @@
assert(staticFlowEntryPusher.entriesFromStorage.containsValue(FlowMod3));
}
}
+*/
diff --git a/src/test/java/net/floodlightcontroller/storage/memory/tests/MemoryStorageTest.java b/src/test/java/net/floodlightcontroller/storage/memory/tests/MemoryStorageTest.java
deleted file mode 100644
index c250066..0000000
--- a/src/test/java/net/floodlightcontroller/storage/memory/tests/MemoryStorageTest.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage.memory.tests;
-
-import net.floodlightcontroller.core.module.FloodlightModuleContext;
-import net.floodlightcontroller.restserver.IRestApiService;
-import net.floodlightcontroller.restserver.RestApiServer;
-import net.floodlightcontroller.storage.memory.MemoryStorageSource;
-import net.floodlightcontroller.storage.tests.StorageTest;
-import org.junit.Before;
-
-public class MemoryStorageTest extends StorageTest {
-
- @Before
- public void setUp() throws Exception {
- storageSource = new MemoryStorageSource();
- restApi = new RestApiServer();
- FloodlightModuleContext fmc = new FloodlightModuleContext();
- fmc.addService(IRestApiService.class, restApi);
- restApi.init(fmc);
- storageSource.init(fmc);
- restApi.startUp(fmc);
- storageSource.startUp(fmc);
- super.setUp();
- }
-}
diff --git a/src/test/java/net/floodlightcontroller/storage/tests/StorageTest.java b/src/test/java/net/floodlightcontroller/storage/tests/StorageTest.java
deleted file mode 100644
index 29cc15b..0000000
--- a/src/test/java/net/floodlightcontroller/storage/tests/StorageTest.java
+++ /dev/null
@@ -1,743 +0,0 @@
-/**
-* Copyright 2011, Big Switch Networks, Inc.
-* Originally created by David Erickson, Stanford University
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-**/
-
-package net.floodlightcontroller.storage.tests;
-
-import static org.easymock.EasyMock.*;
-
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.TimeUnit;
-
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import net.floodlightcontroller.restserver.RestApiServer;
-import net.floodlightcontroller.storage.CompoundPredicate;
-import net.floodlightcontroller.storage.IStorageExceptionHandler;
-import net.floodlightcontroller.storage.IPredicate;
-import net.floodlightcontroller.storage.IQuery;
-import net.floodlightcontroller.storage.IResultSet;
-import net.floodlightcontroller.storage.IRowMapper;
-import net.floodlightcontroller.storage.IStorageSourceListener;
-import net.floodlightcontroller.storage.NullValueStorageException;
-import net.floodlightcontroller.storage.OperatorPredicate;
-import net.floodlightcontroller.storage.RowOrdering;
-import net.floodlightcontroller.storage.nosql.NoSqlStorageSource;
-import net.floodlightcontroller.test.FloodlightTestCase;
-
-import org.junit.Test;
-
-public abstract class StorageTest extends FloodlightTestCase {
-
- protected NoSqlStorageSource storageSource;
- protected RestApiServer restApi;
-
- protected String PERSON_TABLE_NAME = "Person";
-
- protected String PERSON_SSN = "SSN";
- protected String PERSON_FIRST_NAME = "FirstName";
- protected String PERSON_LAST_NAME = "LastName";
- protected String PERSON_AGE = "Age";
- protected String PERSON_REGISTERED = "Registered";
-
- protected String[] PERSON_COLUMN_LIST = {PERSON_SSN, PERSON_FIRST_NAME, PERSON_LAST_NAME, PERSON_AGE, PERSON_REGISTERED};
-
- class Person {
- private String ssn;
- private String firstName;
- private String lastName;
- int age;
- boolean registered;
-
- public Person(String ssn, String firstName, String lastName, int age, boolean registered) {
- this.ssn = ssn;
- this.firstName = firstName;
- this.lastName = lastName;
- this.age = age;
- this.registered = registered;
- }
-
- public String getSSN() {
- return ssn;
- }
-
- public String getFirstName() {
- return firstName;
- }
-
- public String getLastName() {
- return lastName;
-
- }
-
- public int getAge() {
- return age;
- }
-
- public boolean isRegistered() {
- return registered;
- }
- }
-
- class PersonRowMapper implements IRowMapper {
- public Object mapRow(IResultSet resultSet) {
- String ssn = resultSet.getString(PERSON_SSN);
- String firstName = resultSet.getString(PERSON_FIRST_NAME);
- String lastName = resultSet.getString(PERSON_LAST_NAME);
- int age = resultSet.getInt(PERSON_AGE);
- boolean registered = resultSet.getBoolean(PERSON_REGISTERED);
- return new Person(ssn, firstName, lastName, age, registered);
- }
- }
-
- Object[][] PERSON_INIT_DATA = {
- {"111-11-1111", "John", "Smith", 40, true},
- {"222-22-2222", "Jim", "White", 24, false},
- {"333-33-3333", "Lisa", "Jones", 27, true},
- {"444-44-4444", "Susan", "Jones", 14, false},
- {"555-55-5555", "Jose", "Garcia", 31, true},
- {"666-66-6666", "Abigail", "Johnson", 35, false},
- {"777-77-7777", "Bjorn", "Borg", 55, true},
- {"888-88-8888", "John", "McEnroe", 53, false}
- };
-
- Map<String,Object> createPersonRowValues(Object[] personData) {
- Map<String,Object> rowValues = new HashMap<String,Object>();
- for (int i = 0; i < PERSON_COLUMN_LIST.length; i++) {
- rowValues.put(PERSON_COLUMN_LIST[i], personData[i]);
- }
- return rowValues;
- }
-
- public void insertPerson(Object[] personData) {
- Map<String,Object> rowValues = createPersonRowValues(personData);
- storageSource.insertRow(PERSON_TABLE_NAME, rowValues);
- }
-
- public void initPersons() {
- for (Object[] row: PERSON_INIT_DATA) {
- insertPerson(row);
- }
- }
-
- public void setUp() throws Exception {
- super.setUp();
- Set<String> indexedColumnNames = new HashSet<String>();
- indexedColumnNames.add(PERSON_LAST_NAME);
- storageSource.setExceptionHandler(null);
- storageSource.createTable(PERSON_TABLE_NAME, indexedColumnNames);
- storageSource.setTablePrimaryKeyName(PERSON_TABLE_NAME, PERSON_SSN);
- initPersons();
- }
-
- public void checkExpectedResults(IResultSet resultSet, String[] columnNameList, Object[][] expectedRowList) {
- boolean nextResult;
- for (Object[] expectedRow: expectedRowList) {
- nextResult = resultSet.next();
- assertEquals(nextResult,true);
- assertEquals(expectedRow.length, columnNameList.length);
- for (int i = 0; i < expectedRow.length; i++) {
- Object expectedObject = expectedRow[i];
- String columnName = columnNameList[i];
- if (expectedObject instanceof Boolean)
- assertEquals(((Boolean)expectedObject).booleanValue(), resultSet.getBoolean(columnName));
- else if (expectedObject instanceof Byte)
- assertEquals(((Byte)expectedObject).byteValue(), resultSet.getByte(columnName));
- else if (expectedObject instanceof Short)
- assertEquals(((Short)expectedObject).shortValue(), resultSet.getShort(columnName));
- else if (expectedObject instanceof Integer)
- assertEquals(((Integer)expectedObject).intValue(), resultSet.getInt(columnName));
- else if (expectedObject instanceof Long)
- assertEquals(((Long)expectedObject).longValue(), resultSet.getLong(columnName));
- else if (expectedObject instanceof Float)
- assertEquals(((Float)expectedObject).floatValue(), resultSet.getFloat(columnName), 0.00001);
- else if (expectedObject instanceof Double)
- assertEquals(((Double)expectedObject).doubleValue(), resultSet.getDouble(columnName), 0.00001);
- else if (expectedObject instanceof byte[])
- assertEquals((byte[])expectedObject, resultSet.getByteArray(columnName));
- else if (expectedObject instanceof String)
- assertEquals((String)expectedObject, resultSet.getString(columnName));
- else
- assertTrue("Unexpected column value type", false);
- }
- }
- nextResult = resultSet.next();
- assertEquals(nextResult,false);
- resultSet.close();
- }
-
- @Test
- public void testInsertRows() {
- IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, null, new RowOrdering(PERSON_SSN));
- checkExpectedResults(resultSet, PERSON_COLUMN_LIST, PERSON_INIT_DATA);
- }
-
- @Test
- public void testOperatorQuery() {
- Object[][] expectedResults = {
- {"John", "Smith", 40},
- {"Jim", "White", 24},
- };
- String[] columnList = {PERSON_FIRST_NAME,PERSON_LAST_NAME,PERSON_AGE};
- IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, columnList,
- new OperatorPredicate(PERSON_LAST_NAME, OperatorPredicate.Operator.GTE, "Sm"),
- new RowOrdering(PERSON_SSN));
- checkExpectedResults(resultSet, columnList, expectedResults);
- }
-
- @Test
- public void testAndQuery() {
- String[] columnList = {PERSON_FIRST_NAME,PERSON_LAST_NAME};
- Object[][] expectedResults = {
- {"Lisa", "Jones"},
- {"Susan", "Jones"},
- {"Jose", "Garcia"},
- {"Abigail", "Johnson"},
- {"John", "McEnroe"}
- };
- IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, columnList,
- new CompoundPredicate(CompoundPredicate.Operator.AND, false,
- new OperatorPredicate(PERSON_LAST_NAME, OperatorPredicate.Operator.GTE, "G"),
- new OperatorPredicate(PERSON_LAST_NAME, OperatorPredicate.Operator.LT, "N")
- ),
- new RowOrdering(PERSON_SSN));
- checkExpectedResults(resultSet, columnList, expectedResults);
- }
-
- @Test
- public void testOrQuery() {
- String[] columnList = {PERSON_FIRST_NAME,PERSON_LAST_NAME, PERSON_AGE};
- Object[][] expectedResults = {
- {"John", "Smith", 40},
- {"Lisa", "Jones", 27},
- {"Abigail", "Johnson", 35},
- {"Bjorn", "Borg", 55},
- {"John", "McEnroe", 53}
- };
- IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, columnList,
- new CompoundPredicate(CompoundPredicate.Operator.OR, false,
- new OperatorPredicate(PERSON_AGE, OperatorPredicate.Operator.GTE, 35),
- new OperatorPredicate(PERSON_FIRST_NAME, OperatorPredicate.Operator.EQ, "Lisa")
- ),
- new RowOrdering(PERSON_SSN));
- checkExpectedResults(resultSet, columnList, expectedResults);
-}
-
- @Test
- public void testCreateQuery() {
- String[] columnList = {PERSON_FIRST_NAME,PERSON_LAST_NAME};
- Object[][] expectedResults = {
- {"Lisa", "Jones"},
- {"Susan", "Jones"}
- };
- IPredicate predicate = new OperatorPredicate(PERSON_LAST_NAME, OperatorPredicate.Operator.EQ, "Jones");
- IQuery query = storageSource.createQuery(PERSON_TABLE_NAME, columnList, predicate, new RowOrdering(PERSON_SSN));
- IResultSet resultSet = storageSource.executeQuery(query);
- checkExpectedResults(resultSet, columnList, expectedResults);
- }
-
- @Test
- public void testQueryParameters() {
- String[] columnList = {PERSON_FIRST_NAME,PERSON_LAST_NAME, PERSON_AGE};
- Object[][] expectedResults = {
- {"John", "Smith", 40},
- {"Bjorn", "Borg", 55},
- {"John", "McEnroe", 53}
- };
- IPredicate predicate = new OperatorPredicate(PERSON_AGE, OperatorPredicate.Operator.GTE, "?MinimumAge?");
- IQuery query = storageSource.createQuery(PERSON_TABLE_NAME, columnList, predicate, new RowOrdering(PERSON_SSN));
- query.setParameter("MinimumAge", 40);
- IResultSet resultSet = storageSource.executeQuery(query);
- checkExpectedResults(resultSet, columnList, expectedResults);
- }
-
- private void checkPerson(Person person, Object[] expectedValues) {
- assertEquals(person.getSSN(), expectedValues[0]);
- assertEquals(person.getFirstName(), expectedValues[1]);
- assertEquals(person.getLastName(), expectedValues[2]);
- assertEquals(person.getAge(), expectedValues[3]);
- assertEquals(person.isRegistered(), expectedValues[4]);
- }
-
- @Test
- public void testRowMapper() {
- Object[][] expectedResults = {
- PERSON_INIT_DATA[2],
- PERSON_INIT_DATA[3]
- };
- IPredicate predicate = new OperatorPredicate(PERSON_LAST_NAME, OperatorPredicate.Operator.EQ, "Jones");
- IRowMapper rowMapper = new PersonRowMapper();
- Object[] personList = storageSource.executeQuery(PERSON_TABLE_NAME, null, predicate, new RowOrdering(PERSON_SSN), rowMapper);
- assertEquals(personList.length, 2);
- for (int i = 0; i < personList.length; i++)
- checkPerson((Person)personList[i], expectedResults[i]);
- }
-
- @Test
- public void testDeleteRowsDirect() {
-
- storageSource.deleteRow(PERSON_TABLE_NAME, "111-11-1111");
- storageSource.deleteRow(PERSON_TABLE_NAME, "222-22-2222");
- storageSource.deleteRow(PERSON_TABLE_NAME, "333-33-3333");
- storageSource.deleteRow(PERSON_TABLE_NAME, "444-44-4444");
-
- Object[][] expectedResults = {
- {"555-55-5555", "Jose", "Garcia", 31, true},
- {"666-66-6666", "Abigail", "Johnson", 35, false},
- {"777-77-7777", "Bjorn", "Borg", 55, true},
- {"888-88-8888", "John", "McEnroe", 53, false}
- };
- IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, PERSON_COLUMN_LIST, null, new RowOrdering(PERSON_SSN));
- checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedResults);
- }
-
- @Test
- public void testDeleteRowsFromResultSet() {
- Object[][] expectedResults = {
- {"555-55-5555", "Jose", "Garcia", 31, true},
- {"666-66-6666", "Abigail", "Johnson", 35, false},
- {"777-77-7777", "Bjorn", "Borg", 55, true},
- {"888-88-8888", "John", "McEnroe", 53, false}
- };
-
- // Query once to delete the rows
- IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, PERSON_COLUMN_LIST, null, new RowOrdering(PERSON_SSN));
- for (int i = 0; i < 4; i++) {
- resultSet.next();
- resultSet.deleteRow();
- }
- resultSet.save();
- resultSet.close();
-
- // Now query again to verify that the rows were deleted
- resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, PERSON_COLUMN_LIST, null, new RowOrdering(PERSON_SSN));
- checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedResults);
- }
-
- @Test
- public void testDeleteMatchingRows() {
- Object[][] expectedResults = {
- {"111-11-1111", "John", "Smith", 40, true},
- {"777-77-7777", "Bjorn", "Borg", 55, true},
- {"888-88-8888", "John", "McEnroe", 53, false}
- };
- storageSource.deleteMatchingRows(PERSON_TABLE_NAME, new OperatorPredicate(PERSON_AGE, OperatorPredicate.Operator.LT, 40));
-
- // Now query again to verify that the rows were deleted
- IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, PERSON_COLUMN_LIST, null, new RowOrdering(PERSON_SSN));
- checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedResults);
-
- storageSource.deleteMatchingRows(PERSON_TABLE_NAME, null);
-
- // Now query again to verify that all rows were deleted
- resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, PERSON_COLUMN_LIST, null, new RowOrdering(PERSON_SSN));
- checkExpectedResults(resultSet, PERSON_COLUMN_LIST, new Object[0][]);
- }
-
- @Test
- public void testUpdateRowsDirect() {
-
- Object[][] expectedResults = {
- {"777-77-7777", "Tennis", "Borg", 60, true},
- {"888-88-8888", "Tennis", "McEnroe", 60, false}
- };
- Map<String,Object> updateValues = new HashMap<String,Object>();
- updateValues.put(PERSON_FIRST_NAME, "Tennis");
- updateValues.put(PERSON_AGE, 60);
-
- IPredicate predicate = new OperatorPredicate(PERSON_AGE, OperatorPredicate.Operator.GT, 50);
- IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, predicate, new RowOrdering(PERSON_SSN));
- while (resultSet.next()) {
- String key = resultSet.getString(PERSON_SSN);
- storageSource.updateRow(PERSON_TABLE_NAME, key, updateValues);
- }
- resultSet.close();
-
- resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, PERSON_COLUMN_LIST, predicate, new RowOrdering(PERSON_SSN));
- checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedResults);
- }
-
- @Test
- public void testUpdateRowsFromResultSet() {
-
- Object[][] expectedResults = {
- {"777-77-7777", "Tennis", "Borg", 60, true},
- {"888-88-8888", "Tennis", "McEnroe", 60, false}
- };
-
- IPredicate predicate = new OperatorPredicate(PERSON_AGE, OperatorPredicate.Operator.GT, 50);
- IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, predicate, null);
- while (resultSet.next()) {
- resultSet.setString(PERSON_FIRST_NAME, "Tennis");
- resultSet.setInt(PERSON_AGE, 60);
- }
- resultSet.save();
- resultSet.close();
-
- resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, PERSON_COLUMN_LIST, predicate, new RowOrdering(PERSON_SSN));
- checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedResults);
- }
-
- @Test
- public void testNullValues() {
-
- IPredicate predicate = new OperatorPredicate(PERSON_LAST_NAME, OperatorPredicate.Operator.EQ, "Jones");
- IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, predicate, new RowOrdering(PERSON_SSN));
- while (resultSet.next()) {
- resultSet.setNull(PERSON_FIRST_NAME);
- resultSet.setIntegerObject(PERSON_AGE, null);
- }
- resultSet.save();
- resultSet.close();
-
- resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, predicate, new RowOrdering(PERSON_SSN));
- int count = 0;
- while (resultSet.next()) {
- boolean checkNull = resultSet.isNull(PERSON_FIRST_NAME);
- assertTrue(checkNull);
- String s = resultSet.getString(PERSON_FIRST_NAME);
- assertEquals(s, null);
- checkNull = resultSet.isNull(PERSON_AGE);
- assertTrue(checkNull);
- Integer intObj = resultSet.getIntegerObject(PERSON_AGE);
- assertEquals(intObj, null);
- Short shortObj = resultSet.getShortObject(PERSON_AGE);
- assertEquals(shortObj, null);
- boolean excThrown = false;
- try {
- resultSet.getInt(PERSON_AGE);
- }
- catch (NullValueStorageException exc) {
- excThrown = true;
- }
- assertTrue(excThrown);
- count++;
- }
- resultSet.close();
- assertEquals(count, 2);
-
- predicate = new OperatorPredicate(PERSON_FIRST_NAME, OperatorPredicate.Operator.EQ, null);
- resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, predicate, new RowOrdering(PERSON_SSN));
- count = 0;
- while (resultSet.next()) {
- boolean checkNull = resultSet.isNull(PERSON_FIRST_NAME);
- assertTrue(checkNull);
- count++;
- checkNull = resultSet.isNull(PERSON_AGE);
- assertTrue(checkNull);
- }
- resultSet.close();
- assertEquals(count, 2);
- }
-
- @Test
- public void testInsertNotification() {
- // Set up the listener and record the expected notification
- IStorageSourceListener mockListener = createNiceMock(IStorageSourceListener.class);
- Set<Object> expectedKeys = new HashSet<Object>();
- expectedKeys.add("999-99-9999");
- mockListener.rowsModified(PERSON_TABLE_NAME, expectedKeys);
-
- replay(mockListener);
-
- // Now try it for real
- storageSource.addListener(PERSON_TABLE_NAME, mockListener);
-
- // Create a new person, which should trigger the listener
- Object[] newPerson = {"999-99-9999", "Serena", "Williams", 22, true};
- insertPerson(newPerson);
-
- verify(mockListener);
- }
-
- @Test
- public void testUpdateNotification() {
- // Set up the listener and record the expected notification
- IStorageSourceListener mockListener = createNiceMock(IStorageSourceListener.class);
- Set<Object> expectedKeys = new HashSet<Object>();
- expectedKeys.add("111-11-1111");
- mockListener.rowsModified(PERSON_TABLE_NAME, expectedKeys);
-
- replay(mockListener);
-
- // Now try it for real
- storageSource.addListener(PERSON_TABLE_NAME, mockListener);
-
- // Create a new person, which should trigger the listener
- Map<String,Object> updateValues = new HashMap<String,Object>();
- updateValues.put(PERSON_FIRST_NAME, "Tennis");
- storageSource.updateRow(PERSON_TABLE_NAME, "111-11-1111", updateValues);
-
- verify(mockListener);
- }
-
- @Test
- public void testDeleteNotification() {
- IStorageSourceListener mockListener = createNiceMock(IStorageSourceListener.class);
- Set<Object> expectedKeys = new HashSet<Object>();
- expectedKeys.add("111-11-1111");
- mockListener.rowsDeleted(PERSON_TABLE_NAME, expectedKeys);
-
- replay(mockListener);
-
- // Now try it for real
- storageSource.addListener(PERSON_TABLE_NAME, mockListener);
-
- // Create a new person, which should trigger the listener
- storageSource.deleteRow(PERSON_TABLE_NAME, "111-11-1111");
-
- verify(mockListener);
- }
-
- public void waitForFuture(Future<?> future) {
- try
- {
- future.get(10, TimeUnit.SECONDS);
- }
- catch (InterruptedException exc)
- {
- fail("Async storage operation interrupted");
- }
- catch (ExecutionException exc) {
- fail("Async storage operation failed");
- }
- catch (TimeoutException exc) {
- fail("Async storage operation timed out");
- }
- }
-
- @Test
- public void testAsyncQuery1() {
- Object[][] expectedResults = {
- {"John", "Smith", 40},
- {"Jim", "White", 24},
- };
- String[] columnList = {PERSON_FIRST_NAME,PERSON_LAST_NAME,PERSON_AGE};
- IPredicate predicate = new OperatorPredicate(PERSON_LAST_NAME, OperatorPredicate.Operator.GTE, "Sm");
- IQuery query = storageSource.createQuery(PERSON_TABLE_NAME, columnList, predicate, new RowOrdering(PERSON_SSN));
- Future<IResultSet> future = storageSource.executeQueryAsync(query);
- waitForFuture(future);
- try {
- IResultSet resultSet = future.get();
- checkExpectedResults(resultSet, columnList, expectedResults);
- }
- catch (Exception e) {
- fail("Exception thrown in async storage operation: " + e.toString());
- }
- }
-
- @Test
- public void testAsyncQuery2() {
- Object[][] expectedResults = {
- {"John", "Smith", 40},
- {"Jim", "White", 24},
- };
- String[] columnList = {PERSON_FIRST_NAME,PERSON_LAST_NAME,PERSON_AGE};
- IPredicate predicate = new OperatorPredicate(PERSON_LAST_NAME, OperatorPredicate.Operator.GTE, "Sm");
- Future<IResultSet> future = storageSource.executeQueryAsync(PERSON_TABLE_NAME,
- columnList, predicate, new RowOrdering(PERSON_SSN));
- waitForFuture(future);
- try {
- IResultSet resultSet = future.get();
- checkExpectedResults(resultSet, columnList, expectedResults);
- }
- catch (Exception e) {
- fail("Exception thrown in async storage operation: " + e.toString());
- }
- }
-
- @Test
- public void testAsyncQuery3() {
- Object[][] expectedResults = {
- PERSON_INIT_DATA[2],
- PERSON_INIT_DATA[3]
- };
- IPredicate predicate = new OperatorPredicate(PERSON_LAST_NAME, OperatorPredicate.Operator.EQ, "Jones");
- IRowMapper rowMapper = new PersonRowMapper();
- Future<Object[]> future = storageSource.executeQueryAsync(PERSON_TABLE_NAME,
- null, predicate, new RowOrdering(PERSON_SSN), rowMapper);
- waitForFuture(future);
- try {
- Object[] personList = future.get();
- assertEquals(personList.length, 2);
- for (int i = 0; i < personList.length; i++)
- checkPerson((Person)personList[i], expectedResults[i]);
- }
- catch (Exception e) {
- fail("Exception thrown in async storage operation: " + e.toString());
- }
- }
-
- @Test
- public void testAsyncException() {
- class TestExceptionHandler implements IStorageExceptionHandler {
- public int exceptionCount = 0;
- @Override
- public void handleException(Exception exception) {
- exceptionCount++;
- }
- }
- TestExceptionHandler exceptionHandler = new TestExceptionHandler();
- storageSource.setExceptionHandler(exceptionHandler);
-
- // Use an invalid table name, which should cause the storage API call to throw
- // an exception, which should then be converted to an ExecutionException.
- Future<IResultSet> future = storageSource.executeQueryAsync("InvalidTableName",
- null, null, null);
- try {
- future.get(10, TimeUnit.SECONDS);
- fail("Expected ExecutionException was not thrown");
- }
- catch (ExecutionException e) {
- assertTrue(true);
- }
- catch (Exception e) {
- fail("Exception thrown in async storage operation: " + e.toString());
- }
- assertEquals(exceptionHandler.exceptionCount, 1);
- }
-
- @Test
- public void testAsyncInsertRow() {
- Object[][] newPersonInfo = {{"999-99-9999", "Ellen", "Wilson", 40, true}};
- Map<String,Object> rowValues = createPersonRowValues(newPersonInfo[0]);
- Future<?> future = storageSource.insertRowAsync(PERSON_TABLE_NAME, rowValues);
- waitForFuture(future);
- try {
- IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, null, new RowOrdering(PERSON_SSN));
- Object[][] expectedPersons = Arrays.copyOf(PERSON_INIT_DATA, PERSON_INIT_DATA.length + newPersonInfo.length);
- System.arraycopy(newPersonInfo, 0, expectedPersons, PERSON_INIT_DATA.length, newPersonInfo.length);
- checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedPersons);
- }
- catch (Exception e) {
- fail("Exception thrown in async storage operation: " + e.toString());
- }
- }
-
- @Test
- public void testAsyncUpdateRow() {
- Map<String,Object> updateValues = new HashMap<String,Object>();
- updateValues.put(PERSON_SSN, "777-77-7777");
- updateValues.put(PERSON_FIRST_NAME, "Tennis");
- updateValues.put(PERSON_AGE, 60);
-
- Future<?> future = storageSource.updateRowAsync(PERSON_TABLE_NAME, updateValues);
- waitForFuture(future);
-
- try {
- IResultSet resultSet = storageSource.getRow(PERSON_TABLE_NAME, "777-77-7777");
- Object[][] expectedPersons = {{"777-77-7777", "Tennis", "Borg", 60, true}};
- checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedPersons);
- }
- catch (Exception e) {
- fail("Exception thrown in async storage operation: " + e.toString());
- }
- }
-
- @Test
- public void testAsyncUpdateRow2() {
- Map<String,Object> updateValues = new HashMap<String,Object>();
- updateValues.put(PERSON_FIRST_NAME, "Tennis");
- updateValues.put(PERSON_AGE, 60);
-
- Future<?> future = storageSource.updateRowAsync(PERSON_TABLE_NAME, "777-77-7777", updateValues);
- waitForFuture(future);
-
- try {
- IResultSet resultSet = storageSource.getRow(PERSON_TABLE_NAME, "777-77-7777");
- Object[][] expectedPersons = {{"777-77-7777", "Tennis", "Borg", 60, true}};
- checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedPersons);
- }
- catch (Exception e) {
- fail("Exception thrown in async storage operation: " + e.toString());
- }
- }
-
- @Test
- public void testAsyncUpdateMatchingRows() {
- Map<String,Object> updateValues = new HashMap<String,Object>();
- updateValues.put(PERSON_FIRST_NAME, "Tennis");
- updateValues.put(PERSON_AGE, 60);
-
- IPredicate predicate = new OperatorPredicate(PERSON_SSN, OperatorPredicate.Operator.EQ, "777-77-7777");
- Future<?> future = storageSource.updateMatchingRowsAsync(PERSON_TABLE_NAME, predicate, updateValues);
- waitForFuture(future);
- try {
- IResultSet resultSet = storageSource.getRow(PERSON_TABLE_NAME, "777-77-7777");
- Object[][] expectedPersons = {{"777-77-7777", "Tennis", "Borg", 60, true}};
- checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedPersons);
- }
- catch (Exception e) {
- fail("Exception thrown in async storage operation: " + e.toString());
- }
- }
-
- @Test
- public void testAsyncDeleteRow() {
- Future<?> future = storageSource.deleteRowAsync(PERSON_TABLE_NAME, "111-11-1111");
- waitForFuture(future);
- try {
- IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, null, new RowOrdering(PERSON_SSN));
- Object[][] expectedPersons = Arrays.copyOfRange(PERSON_INIT_DATA, 1, PERSON_INIT_DATA.length);
- checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedPersons);
- }
- catch (Exception e) {
- fail("Exception thrown in async storage operation: " + e.toString());
- }
- }
-
- @Test
- public void testAsyncDeleteMatchingRows() {
- Future<?> future = storageSource.deleteMatchingRowsAsync(PERSON_TABLE_NAME, null);
- waitForFuture(future);
- try {
- IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, null, new RowOrdering(PERSON_SSN));
- checkExpectedResults(resultSet, PERSON_COLUMN_LIST, new Object[0][]);
- }
- catch (Exception e) {
- fail("Exception thrown in async storage operation: " + e.toString());
- }
- }
-
- @Test
- public void testAsyncSave() {
- // Get a result set and make some changes to it
- IResultSet resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, null, new RowOrdering(PERSON_SSN));
- resultSet.next();
- resultSet.deleteRow();
- resultSet.next();
- resultSet.setString(PERSON_FIRST_NAME, "John");
-
- Future<?> future = storageSource.saveAsync(resultSet);
- waitForFuture(future);
- try {
- resultSet = storageSource.executeQuery(PERSON_TABLE_NAME, null, null, new RowOrdering(PERSON_SSN));
- Object[][] expectedPersons = Arrays.copyOfRange(PERSON_INIT_DATA, 1, PERSON_INIT_DATA.length);
- expectedPersons[0][1] = "John";
- checkExpectedResults(resultSet, PERSON_COLUMN_LIST, expectedPersons);
- }
- catch (Exception e) {
- fail("Exception thrown in async storage operation: " + e.toString());
- }
-
- }
-}
diff --git a/src/test/java/net/floodlightcontroller/topology/TopologyManagerTest.java b/src/test/java/net/floodlightcontroller/topology/TopologyManagerTest.java
index 06b48a2..280c336 100644
--- a/src/test/java/net/floodlightcontroller/topology/TopologyManagerTest.java
+++ b/src/test/java/net/floodlightcontroller/topology/TopologyManagerTest.java
@@ -1,12 +1,10 @@
package net.floodlightcontroller.topology;
import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.IFloodlightProviderService.Role;
import net.floodlightcontroller.core.module.FloodlightModuleContext;
import net.floodlightcontroller.core.test.MockThreadPoolService;
import net.floodlightcontroller.test.FloodlightTestCase;
import net.floodlightcontroller.threadpool.IThreadPoolService;
-import net.floodlightcontroller.topology.TopologyManager;
import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscovery;
import org.junit.Before;
@@ -129,14 +127,4 @@
assertTrue(tm.getPortBroadcastDomainLinks().size()==0);
assertTrue(tm.getTunnelPorts().size()==0);
}
-
- @Test
- public void testHARoleChange() throws Exception {
- testBasic2();
- getMockFloodlightProvider().dispatchRoleChanged(null, Role.SLAVE);
- assert(tm.switchPorts.isEmpty());
- assert(tm.switchPortLinks.isEmpty());
- assert(tm.portBroadcastDomainLinks.isEmpty());
- assert(tm.tunnelLinks.isEmpty());
- }
}
diff --git a/src/test/java/net/onrc/onos/ofcontroller/linkdiscovery/internal/LinkDiscoveryManagerTest.java b/src/test/java/net/onrc/onos/ofcontroller/linkdiscovery/internal/LinkDiscoveryManagerTest.java
index c3647f2..ecb87cf 100644
--- a/src/test/java/net/onrc/onos/ofcontroller/linkdiscovery/internal/LinkDiscoveryManagerTest.java
+++ b/src/test/java/net/onrc/onos/ofcontroller/linkdiscovery/internal/LinkDiscoveryManagerTest.java
@@ -17,21 +17,17 @@
package net.onrc.onos.ofcontroller.linkdiscovery.internal;
-import static org.easymock.EasyMock.*;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
-import java.util.Collections;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
-import org.junit.Before;
-import org.junit.Test;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
import net.floodlightcontroller.core.IFloodlightProviderService;
-import net.floodlightcontroller.core.IFloodlightProviderService.Role;
import net.floodlightcontroller.core.IOFSwitch;
import net.floodlightcontroller.core.module.FloodlightModuleContext;
import net.floodlightcontroller.core.test.MockThreadPoolService;
@@ -39,8 +35,6 @@
import net.floodlightcontroller.restserver.RestApiServer;
import net.floodlightcontroller.routing.IRoutingService;
import net.floodlightcontroller.routing.Link;
-import net.floodlightcontroller.storage.IStorageSourceService;
-import net.floodlightcontroller.storage.memory.MemoryStorageSource;
import net.floodlightcontroller.test.FloodlightTestCase;
import net.floodlightcontroller.threadpool.IThreadPoolService;
import net.floodlightcontroller.topology.ITopologyService;
@@ -49,7 +43,11 @@
import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscoveryListener;
import net.onrc.onos.ofcontroller.linkdiscovery.ILinkDiscoveryService;
import net.onrc.onos.ofcontroller.linkdiscovery.LinkInfo;
-import net.onrc.onos.ofcontroller.linkdiscovery.internal.LinkDiscoveryManager;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
*
@@ -74,12 +72,6 @@
isSendLLDPsCalled = false;
isClearLinksCalled = false;
}
-
- @Override
- protected void clearAllLinks() {
- isClearLinksCalled = true;
- super.clearAllLinks();
- }
}
public LinkDiscoveryManager getTopology() {
@@ -106,7 +98,6 @@
cntx.addService(IRoutingService.class, routingEngine);
cntx.addService(ILinkDiscoveryService.class, ldm);
cntx.addService(ITopologyService.class, ldm);
- cntx.addService(IStorageSourceService.class, new MemoryStorageSource());
cntx.addService(IFloodlightProviderService.class, getMockFloodlightProvider());
restApi.init(cntx);
tp.init(cntx);
@@ -396,37 +387,4 @@
assertTrue(topology.portBroadcastDomainLinks.get(srcNpt).contains(lt));
assertTrue(topology.portBroadcastDomainLinks.get(dstNpt).contains(lt));
}
-
- @Test
- public void testHARoleChange() throws Exception {
- LinkDiscoveryManager topology = getTopology();
- IOFSwitch sw1 = createMockSwitch(1L);
- IOFSwitch sw2 = createMockSwitch(2L);
- replay(sw1, sw2);
- Link lt = new Link(1L, 2, 2L, 1);
- NodePortTuple srcNpt = new NodePortTuple(1L, 2);
- NodePortTuple dstNpt = new NodePortTuple(2L, 1);
- LinkInfo info = new LinkInfo(System.currentTimeMillis(),
- System.currentTimeMillis(), null,
- 0, 0);
- topology.addOrUpdateLink(lt, info);
-
- // check invariants hold
- assertNotNull(topology.switchLinks.get(lt.getSrc()));
- assertTrue(topology.switchLinks.get(lt.getSrc()).contains(lt));
- assertNotNull(topology.portLinks.get(srcNpt));
- assertTrue(topology.portLinks.get(srcNpt).contains(lt));
- assertNotNull(topology.portLinks.get(dstNpt));
- assertTrue(topology.portLinks.get(dstNpt).contains(lt));
- assertTrue(topology.links.containsKey(lt));
-
- // check that it clears from memory
- getMockFloodlightProvider().dispatchRoleChanged(null, Role.SLAVE);
- assertTrue(topology.switchLinks.isEmpty());
- getMockFloodlightProvider().dispatchRoleChanged(Role.SLAVE, Role.MASTER);
- // check that lldps were sent
- assertTrue(ldm.isSendLLDPsCalled);
- assertTrue(ldm.isClearLinksCalled);
- ldm.reset();
- }
}