Changed copyMininetFile of onosdriver to scp function in clidriver
Change-Id: I52a11e7f97e727777e3ea891b453d4741fc32d1e
diff --git a/TestON/tests/SAMPscaleTopo/Dependency/multiovs.py b/TestON/tests/SAMPscaleTopo/Dependency/multiovs.py
new file mode 100755
index 0000000..0545013
--- /dev/null
+++ b/TestON/tests/SAMPscaleTopo/Dependency/multiovs.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+
+"""
+Multiple ovsdb OVS!!
+
+We scale up by creating multiple ovsdb instances,
+each of which is shared by several OVS switches
+
+The shell may also be shared among switch instances,
+which causes switch.cmd() and switch.popen() to be
+delegated to the ovsdb instance.
+
+"""
+
+from mininet.net import Mininet
+from mininet.node import Node, OVSSwitch
+from mininet.node import OVSBridge
+from mininet.link import Link, OVSIntf
+from mininet.topo import LinearTopo, SingleSwitchTopo
+from mininet.topolib import TreeTopo
+from mininet.log import setLogLevel, info
+from mininet.cli import CLI
+from mininet.clean import Cleanup, sh
+
+from itertools import groupby
+from operator import attrgetter
+
+class OVSDB( Node ):
+ "Namespace for an OVSDB instance"
+
+ privateDirs = [ '/etc/openvswitch',
+ '/var/run/openvswitch',
+ '/var/log/openvswitch' ]
+
+ # Control network
+ ipBase = '172.123.123.0/24'
+ cnet = None
+ nat = None
+
+ @classmethod
+ def startControlNet( cls ):
+ "Start control net if necessary and return it"
+ cnet = cls.cnet
+ if not cnet:
+ info( '### Starting control network\n' )
+ cnet = Mininet( ipBase=cls.ipBase )
+ cswitch = cnet.addSwitch( 'ovsbr0', cls=OVSBridge )
+ # Add NAT - note this can conflict with data network NAT
+ info( '### Adding NAT for control and data networks'
+ ' (use --nat flush=0 for data network)\n' )
+ cls.cnet = cnet
+ cls.nat = cnet.addNAT( 'ovsdbnat0')
+ cnet.start()
+ info( '### Control network started\n' )
+ return cnet
+
+ def stopControlNet( self ):
+ info( '\n### Stopping control network\n' )
+ cls = self.__class__
+ cls.cnet.stop()
+ info( '### Control network stopped\n' )
+
+ def addSwitch( self, switch ):
+ "Add a switch to our namespace"
+ # Attach first switch to cswitch!
+ self.switches.append( switch )
+
+ def delSwitch( self, switch ):
+ "Delete a switch from our namespace, and terminate if none left"
+ self.switches.remove( switch )
+ if not self.switches:
+ self.stopOVS()
+
+ ovsdbCount = 0
+
+ def startOVS( self ):
+ "Start new OVS instance"
+ self.cmd( 'ovsdb-tool create /etc/openvswitch/conf.db' )
+ self.cmd( 'ovsdb-server /etc/openvswitch/conf.db'
+ ' -vfile:emer -vfile:err -vfile:info'
+ ' --remote=punix:/var/run/openvswitch/db.sock '
+ ' --log-file=/var/log/openvswitch/ovsdb-server.log'
+ ' --pidfile=/var/run/openvswitch/ovsdb-server-mn.pid'
+ ' --no-chdir'
+ ' --detach' )
+
+ self.cmd( 'ovs-vswitchd unix:/var/run/openvswitch/db.sock'
+ ' -vfile:emer -vfile:err -vfile:info'
+ ' --mlockall --log-file=/var/log/openvswitch/ovs-vswitchd.log'
+ ' --pidfile=/var/run/openvswitch/ovs-vswitchd-mn.pid'
+ ' --no-chdir'
+ ' --detach' )
+
+ def stopOVS( self ):
+ self.cmd( 'kill',
+ '`cat /var/run/openvswitch/ovs-vswitchd-mn.pid`',
+ '`cat /var/run/openvswitch/ovsdb-server-mn.pid`' )
+ self.cmd( 'wait' )
+ self.__class__.ovsdbCount -= 1
+ if self.__class__.ovsdbCount <= 0:
+ self.stopControlNet()
+
+ @classmethod
+ def cleanUpOVS( cls ):
+ "Clean up leftover ovsdb-server/ovs-vswitchd processes"
+ info( '*** Shutting down extra ovsdb-server/ovs-vswitchd processes\n' )
+ sh( 'pkill -f mn.pid' )
+
+ def self( self, *args, **kwargs ):
+ "A fake constructor that sets params and returns self"
+ self.params = kwargs
+ return self
+
+ def __init__( self, **kwargs ):
+ cls = self.__class__
+ cls.ovsdbCount += 1
+ cnet = self.startControlNet()
+ # Create a new ovsdb namespace
+ self.switches = []
+ name = 'ovsdb%d' % cls.ovsdbCount
+ kwargs.update( inNamespace=True )
+ kwargs.setdefault( 'privateDirs', self.privateDirs )
+ super( OVSDB, self ).__init__( name, **kwargs )
+ ovsdb = cnet.addHost( name, cls=self.self, **kwargs )
+ link = cnet.addLink( ovsdb, cnet.switches[ 0 ] )
+ cnet.switches[ 0 ].attach( link.intf2 )
+ ovsdb.configDefault()
+ ovsdb.setDefaultRoute( 'via %s' % self.nat.intfs[ 0 ].IP() )
+ ovsdb.startOVS()
+
+
+# Install cleanup callback
+Cleanup.addCleanupCallback( OVSDB.cleanUpOVS )
+
+
+class OVSSwitchNS( OVSSwitch ):
+ "OVS Switch in shared OVSNS namespace"
+
+ isSetup = False
+
+ @classmethod
+ def batchStartup( cls, switches ):
+ result = []
+ for ovsdb, switchGroup in groupby( switches, attrgetter( 'ovsdb') ):
+ switchGroup = list( switchGroup )
+ info( '(%s)' % ovsdb )
+ result += OVSSwitch.batchStartup( switchGroup, run=ovsdb.cmd )
+ return result
+
+ @classmethod
+ def batchShutdown( cls, switches ):
+ result = []
+ for ovsdb, switchGroup in groupby( switches, attrgetter( 'ovsdb') ):
+ switchGroup = list( switchGroup )
+ info( '(%s)' % ovsdb )
+ for switch in switches:
+ if switch.pid == ovsdb.pid:
+ switch.pid = None
+ switch.shell = None
+ result += OVSSwitch.batchShutdown( switchGroup, run=ovsdb.cmd )
+ for switch in switchGroup:
+ switch.ovsdbFree()
+ return result
+
+ # OVSDB allocation
+ groupSize = 64
+ switchCount = 0
+ lastOvsdb = None
+
+ @classmethod
+ def ovsdbAlloc( cls, switch ):
+ "Allocate (possibly new) OVSDB instance for switch"
+ if cls.switchCount % switch.groupSize == 0:
+ cls.lastOvsdb = OVSDB()
+ cls.switchCount += 1
+ cls.lastOvsdb.addSwitch( switch )
+ return cls.lastOvsdb
+
+ def ovsdbFree( self ):
+ "Deallocate OVSDB instance for switch"
+ self.ovsdb.delSwitch( self )
+
+ def startShell( self, *args, **kwargs ):
+ "Start shell in shared OVSDB namespace"
+ ovsdb = self.ovsdbAlloc( self )
+ kwargs.update( mnopts='-da %d ' % ovsdb.pid )
+ self.ns = [ 'net' ]
+ self.ovsdb = ovsdb
+ self._waiting = False
+ if self.privateShell:
+ super( OVSSwitchNS, self ).startShell( *args, **kwargs )
+ else:
+ # Delegate methods and initialize local vars
+ attrs = ( 'cmd', 'cmdPrint', 'sendCmd', 'waitOutput',
+ 'monitor', 'write', 'read',
+ 'pid', 'shell', 'stdout',)
+ for attr in attrs:
+ setattr( self, attr, getattr( ovsdb, attr ) )
+ self.defaultIntf().updateIP()
+
+ @property
+ def waiting( self ):
+ "Optionally delegated to ovsdb"
+ return self._waiting if self.privateShell else self.ovsdb.waiting
+
+ @waiting.setter
+ def waiting( self, value ):
+ "Optionally delegated to ovsdb (read only!)"
+ if self.privateShell:
+ _waiting = value
+
+ def start( self, controllers ):
+ "Update controller IP addresses if necessary"
+ for controller in controllers:
+ if controller.IP() == '127.0.0.1' and not controller.intfs:
+ controller.intfs[ 0 ] = self.ovsdb.nat.intfs[ 0 ]
+ super( OVSSwitchNS, self ).start( controllers )
+
+ def stop( self, *args, **kwargs ):
+ "Stop and free OVSDB namespace if necessary"
+ self.ovsdbFree()
+
+ def terminate( self, *args, **kwargs ):
+ if self.privateShell:
+ super( OVSSwitchNS, self ).terminate( *args, **kwargs )
+ else:
+ self.pid = None
+ self.shell= None
+
+ def defaultIntf( self ):
+ return self.ovsdb.defaultIntf()
+
+ def __init__( self, *args, **kwargs ):
+ """n: number of OVS instances per OVSDB
+ shell: run private shell/bash process? (False)
+ If shell is shared/not private, cmd() and popen() are
+ delegated to the OVSDB instance, which is different than
+ regular OVSSwitch semantics!!"""
+ self.groupSize = kwargs.pop( 'n', self.groupSize )
+ self.privateShell = kwargs.pop( 'shell', False )
+ super( OVSSwitchNS, self ).__init__( *args, **kwargs )
+
+class OVSLinkNS( Link ):
+ "OVSLink that supports OVSSwitchNS"
+
+ def __init__( self, node1, node2, **kwargs ):
+ "See Link.__init__() for options"
+ self.isPatchLink = False
+ if ( isinstance( node1, OVSSwitch ) and
+ isinstance( node2, OVSSwitch ) and
+ getattr( node1, 'ovsdb', None ) ==
+ getattr( node2, 'ovsdb', None ) ):
+ self.isPatchLink = True
+ kwargs.update( cls1=OVSIntf, cls2=OVSIntf )
+ Link.__init__( self, node1, node2, **kwargs )
+
+switches = { 'ovsns': OVSSwitchNS, 'ovsm': OVSSwitchNS }
+
+links = { 'ovs': OVSLinkNS }
+
+def test():
+ "Test OVSNS switch"
+ setLogLevel( 'info' )
+ topo = TreeTopo( depth=4, fanout=2 )
+ net = Mininet( topo=topo, switch=OVSSwitchNS )
+ # Add connectivity to controller which is on LAN or in root NS
+ # net.addNAT().configDefault()
+ net.start()
+ CLI( net )
+ net.stop()
+
+
+if __name__ == '__main__':
+ test()
diff --git a/TestON/tests/SAMPscaleTopo/Dependency/scaleTopoFunction.py b/TestON/tests/SAMPscaleTopo/Dependency/scaleTopoFunction.py
index b634f83..95eb195 100644
--- a/TestON/tests/SAMPscaleTopo/Dependency/scaleTopoFunction.py
+++ b/TestON/tests/SAMPscaleTopo/Dependency/scaleTopoFunction.py
@@ -9,7 +9,7 @@
def __init__( self ):
self.default = ''
-def testTopology( main, topoFile='', args='', mnCmd='', clean=True ):
+def testTopology( main, topoFile='', args='', mnCmd='', timeout=300, clean=True ):
"""
Description:
This function combines different wrapper functions in this module
@@ -44,25 +44,17 @@
reinstallOnosResult = reinstallOnos( main )
# Starts topology
- startResult = startNewTopology( main, topoFile, args, mnCmd )
+ startResult = startNewTopology( main, topoFile, args, mnCmd, timeout=timeout )
# Gets list of switches in mininet
- assignSwitch( main )
+ #assignSwitch( main )
- # This function activates fwd app then does pingall as well as store
- # hosts data in a variable main.hostsData
- getHostsResult = getHostsData( main )
-
- # Compare Topology
- compareTopoResult = compareTopo( main )
-
- testTopoResult = startResult and topoObjectResult and \
- compareTopoResult and getHostsResult
+ testTopoResult = startResult and topoObjectResult
return testTopoResult
-def startNewTopology( main, topoFile='', args='', mnCmd='' ):
+def startNewTopology( main, topoFile='', args='', mnCmd='', timeout=900 ):
"""
Description:
This wrapper function starts new topology
@@ -97,7 +89,8 @@
result = main.Mininet1.startNet( topoFile=topoFile,
args=args,
- mnCmd=mnCmd )
+ mnCmd=mnCmd,
+ timeout=timeout)
return result
@@ -214,6 +207,41 @@
return switchList
+def connectivity( main, timeout=900, shortCircuit=True, acceptableFailed=20 ):
+ """
+ Use fwd app and pingall to discover all the hosts
+ """
+ activateResult = main.TRUE
+ appCheck = main.TRUE
+ getDataResult = main.TRUE
+ main.log.info( main.topoName + ": Activating reactive forwarding app " )
+ activateResult = main.CLIs[ 0 ].activateApp( "org.onosproject.fwd" )
+
+ if main.hostsData:
+ main.hostsData = {}
+ for i in range( main.numCtrls ):
+ appCheck = appCheck and main.CLIs[ i ].appToIDCheck()
+ if appCheck != main.TRUE:
+ main.log.warn( main.CLIs[ i ].apps() )
+ main.log.warn( main.CLIs[ i ].appIDs() )
+
+ time.sleep( main.fwdSleep )
+
+ # Discover hosts using pingall
+ pingResult = main.Mininet1.pingall( timeout=timeout,
+ shortCircuit=shortCircuit,
+ acceptableFailed=acceptableFailed )
+
+ main.log.info( main.topoName + ": Deactivate reactive forwarding app " )
+ activateResult = main.CLIs[ 0 ].deactivateApp( "org.onosproject.fwd" )
+ for i in range( main.numCtrls ):
+ appCheck = appCheck and main.CLIs[ i ].appToIDCheck()
+ if appCheck != main.TRUE:
+ main.log.warn( main.CLIs[ i ].apps() )
+ main.log.warn( main.CLIs[ i ].appIDs() )
+
+ return pingResult
+
def getHostsData( main ):
"""
Use fwd app and pingall to discover all the hosts
diff --git a/TestON/tests/SAMPscaleTopo/Dependency/spine.py b/TestON/tests/SAMPscaleTopo/Dependency/spine.py
new file mode 100644
index 0000000..4aa67b3
--- /dev/null
+++ b/TestON/tests/SAMPscaleTopo/Dependency/spine.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+
+"""
+Custom topology for Mininet
+Author: kelvin@onlab.us
+"""
+from mininet.topo import Topo
+from mininet.net import Mininet
+from mininet.node import Host, RemoteController
+from mininet.node import Node
+from mininet.node import CPULimitedHost
+from mininet.link import TCLink
+from mininet.cli import CLI
+from mininet.log import setLogLevel
+from mininet.util import dumpNodeConnections
+from mininet.node import ( UserSwitch, OVSSwitch, IVSSwitch )
+import sys
+coreSwitches = {}
+spineSwitches = {}
+leafSwitches = {}
+endSwitches = {}
+allSwitches = {}
+# Counters for nodes
+totalSwitches = 0
+totalEndSwitches = 0
+totalHosts = 0
+endSwitchCount = 0 # total count of end switch in each row in gui
+class spineTopo( Topo ):
+
+ def __init__( self, **opts ):
+ "Create a topology."
+ Topo.__init__( self, **opts )
+
+ def build( self, s, l, c, e, h, **opts ):
+ """
+ s = number of spine switches
+ l = number of leaf switches
+ c = number of core
+ e = number of end switch
+ h = number of end host
+ """
+ global totalSwitches
+ global coreSwitches
+ global spineSwitches
+ global leafSwitches
+ global endSwitches
+ global totalEndSwitches
+ global totalHosts
+ global allSwitches
+ global endSwitchCount
+ endSwitchCount = e
+
+ print "Creating topology with", s,"spine", l,"leaf", c,"core",\
+ e,"end switches and",h,"host for each end switches"
+
+ self.addCore( c )
+ self.addSpine( s )
+ self.addLeaf( l )
+ self.linkLayer( coreSwitches, spineSwitches )
+ self.linkLayer( spineSwitches, leafSwitches )
+ self.linkEndSwitch( e, leafSwitches )
+ self.linkHosts( h )
+
+ allSwitches = coreSwitches
+ allSwitches.update( spineSwitches )
+ allSwitches.update( leafSwitches )
+ allSwitches.update( endSwitches )
+ deviceData = self.createSwitchDict()
+ self.genCfgJson( deviceData )
+
+
+ def addCore( self, numSwitch ):
+ global totalSwitches
+ global coreSwitches
+ for i in range( numSwitch ):
+ coreSwitches[ 'core' + str( i + 1 ) ] = self.addSwitch(
+ 's' + str( totalSwitches + 1 ) )
+ totalSwitches += 1
+
+ def addSpine( self, numSwitch ):
+ global totalSwitches
+ global spineSwitches
+ for i in range( numSwitch ):
+ spineSwitches[ 'spine' + str( i + 1 ) ] = self.addSwitch(
+ 's' + str( totalSwitches + 1 ) )
+ totalSwitches += 1
+
+ def addLeaf( self, numSwitch ):
+ global totalSwitches
+ global leafSwitches
+ for i in range( numSwitch ):
+ leafSwitches[ 'leaf' + str( i + 1 ) ] = self.addSwitch(
+ 's' + str( totalSwitches + 1 ) )
+ totalSwitches += 1
+
+ def addEnd( self ):
+ global totalSwitches
+ global totalEndSwitches
+ global endSwitches
+
+ endSwitches[ 'end' + str( totalEndSwitches + 1 ) ] = self.addSwitch(
+ 's' + str( totalSwitches + 1 ) )
+ totalSwitches += 1
+ totalEndSwitches += 1
+
+ return endSwitches[ 'end' + str( totalEndSwitches ) ]
+
+ def addEndHosts( self ):
+ global totalHosts
+
+ totalHosts += 1
+ host = self.addHost( 'h' + str( totalHosts ) )
+
+ return host
+
+
+ def linkHosts( self, numHosts ):
+ global endSwitches
+ switches = sorted( endSwitches.values() )
+
+ for sw in switches:
+ for i in xrange( numHosts ):
+ self.addLink( sw, self.addEndHosts() )
+
+
+ def linkLayer( self, topLayer, botLayer ):
+ """
+ Description:
+ The top layer is the upper layer in the spine topology eg. top layer
+ can be the spine and the bottom layer is the leaf, another is the
+ core layer is the top layer and the spine is the bottom layer and
+ so on.
+ Required:
+ topLayer - Upper layer in the spine topology to be linked in the
+ layer below
+ botLater - Layer that is below the upper layer to be linked at
+ """
+
+ topSwitches = sorted( topLayer.keys() )
+ botSwitches = sorted( botLayer.keys() )
+
+ for topSw in topSwitches:
+ for botSw in botSwitches:
+ self.addLink( topLayer.get( topSw ), botLayer.get( botSw ) )
+
+
+ def linkEndSwitch( self, numSwitch, leafLayer ):
+ global totalSwitches
+ global totalEndSwitches
+
+ leaf = sorted( leafLayer.values() )
+
+ for i in xrange( len( leafSwitches ) ):
+ if len( leafSwitches ) == 1:
+ for j in xrange( numSwitch ):
+ self.addLink( leaf[ 0 ], self.addEnd() )
+ break
+ if len( leafSwitches ) == 2:
+ for j in xrange( numSwitch ):
+ endSw = self.addEnd()
+ self.addLink( leaf[ i ], endSw )
+ self.addLink( leaf[ i + 1 ], endSw )
+ break
+ if i == ( len( leafSwitches ) - 1 ) and len( leafSwitches )%2:
+ for j in xrange( numSwitch ):
+ self.addLink( leaf[ i ], self.addEnd() )
+ break
+ if i == 0:
+ for j in xrange( numSwitch ):
+ endSw = self.addEnd()
+ self.addLink( leaf[ i ], endSw )
+ self.addLink( leaf[ i + 1 ], endSw )
+ continue
+ if i == 1:
+ continue
+ if i%2 == 0:
+ for j in xrange( numSwitch ):
+ endSw = self.addEnd()
+ self.addLink( leaf[ i ], endSw )
+ self.addLink( leaf[ i + 1 ], endSw )
+
+ def genCfgJson( self, deviceData ):
+ import json
+ configJson = {}
+ configJson[ "devices" ] = deviceData
+ with open( 'spine.json', 'w+' ) as outfile:
+ json.dump( configJson, outfile )
+ #cfgFile = open( "spine.json" , 'w+' )
+ #cfgFile.write( configJson )
+ #cfgFile.close()
+
+
+
+ def createSwitchDict( self ):
+ global allSwitches
+ global endSwitchCount
+
+ latitude = 0
+ longitude = 0
+ coreLong = -70
+ spineLong = -80
+ leafLong = -90
+ endLat = 30
+ rowCount = 0 # count of end switches or rows
+ colOffSet = 0 # off set for end switches; longitude
+
+ #for i in xrange( len( allSwitches ) ):
+ deviceList = []
+ deviceDict = {}
+ for sw in allSwitches:
+ tempSw = allSwitches.get( sw )
+ uri = str( "{0:0>16}".format( str( hex( int( tempSw[ 1: ] ) )\
+ ).split( "x" )[ 1 ] ) )
+ mac = str( "{0:0>12}".format( str( hex( int( tempSw[ 1: ] ) )\
+ ).split( "x" )[ 1 ] ) )
+
+ if "core" in sw:
+ latitude = 45
+ longitude = coreLong
+ coreLong += 2.5
+ elif "spine" in sw:
+ latitude = 40
+ longitude = spineLong
+ spineLong += 1.5
+ elif "leaf" in sw:
+ latitude = 35
+ longitude = leafLong
+ leafLong += 1.5
+ elif "end" in sw:
+ # Reset position and move to the right once every
+ # number of end switches
+ if rowCount == endSwitchCount:
+ colOffSet += 2.5
+ rowCount = 0
+ endLat = 30
+ longitude = -80 + colOffSet
+ latitude = endLat
+ endLat -= 1
+ rowCount += 1
+
+ tempItem = { "alias": allSwitches.get( sw ) ,
+ "uri": "of:" + uri,
+ "mac": mac,
+ "annotations": { "name": sw,
+ "latitude": latitude,
+ "longitude": longitude },
+ "type": "SWITCH" }
+ deviceList.append( tempItem )
+
+ return deviceList
+ #def createHostsJson( hostDict ):
+
+topos = { 'spine': ( lambda s=2, l=3, c=1, e=5, h=1: spineTopo( s=s,
+ l=l,
+ c=c,
+ e=e,
+ h=h) ) }
+
+# HERE THE CODE DEFINITION OF THE TOPOLOGY ENDS
+
+def setupNetwork():
+ "Create network"
+ topo = spineTopo()
+ #if controller_ip == '':
+ #controller_ip = '10.0.2.2';
+ # controller_ip = '127.0.0.1';
+ network = Mininet( topo=topo,
+ autoSetMacs=True,
+ controller=None )
+ network.start()
+ CLI( network )
+ network.stop()
+
+if __name__ == '__main__':
+ setLogLevel( 'info' )
+ #setLogLevel('debug')
+ setupNetwork()
diff --git a/TestON/tests/SAMPscaleTopo/Dependency/topo.py b/TestON/tests/SAMPscaleTopo/Dependency/topo.py
new file mode 100644
index 0000000..b44e3fc
--- /dev/null
+++ b/TestON/tests/SAMPscaleTopo/Dependency/topo.py
@@ -0,0 +1,100 @@
+"""
+ These functions can be used for topology comparisons
+"""
+
+import time
+import os
+import json
+
+def getAllDevices( main ):
+ """
+ Return a list containing the devices output from each ONOS node
+ """
+ devices = []
+ threads = []
+ for i in range( main.numCtrls ):
+ t = main.Thread( target=main.CLIs[i].devices,
+ name="devices-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ devices.append( t.result )
+ return devices
+
+def getAllHosts( main ):
+ """
+ Return a list containing the hosts output from each ONOS node
+ """
+ hosts = []
+ ipResult = main.TRUE
+ threads = []
+ for i in range( main.numCtrls ):
+ t = main.Thread( target=main.CLIs[i].hosts,
+ name="hosts-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ hosts.append( t.result )
+ return hosts
+
+def getAllPorts( main ):
+ """
+ Return a list containing the ports output from each ONOS node
+ """
+ ports = []
+ threads = []
+ for i in range( main.numCtrls ):
+ t = main.Thread( target=main.CLIs[i].ports,
+ name="ports-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ ports.append( t.result )
+ return ports
+
+def getAllLinks( main ):
+ """
+ Return a list containing the links output from each ONOS node
+ """
+ links = []
+ threads = []
+ for i in range( main.numCtrls ):
+ t = main.Thread( target=main.CLIs[i].links,
+ name="links-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ links.append( t.result )
+ return links
+
+def getAllClusters( main ):
+ """
+ Return a list containing the clusters output from each ONOS node
+ """
+ clusters = []
+ threads = []
+ for i in range( main.numCtrls ):
+ t = main.Thread( target=main.CLIs[i].clusters,
+ name="clusters-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ clusters.append( t.result )
+ return clusters
+
+