Stability fixes for SR Tests and small fixes for running ONOS in docker
- move checkOptions to component driver
- use karafTimeout for SR tests
- Fix some docker options
- Make sure mn docker can resolve own hostname
- Fix config file ip format for DHCP relay app
Change-Id: I85e8c52384e0fb478462fa9bbaf0b31a599b632b
(cherry picked from commit 521ecde3f8d28288303ea11129faa0cfd86b9bcd)
diff --git a/TestON/drivers/common/cli/networkdriver.py b/TestON/drivers/common/cli/networkdriver.py
index 968973d..c151aab 100755
--- a/TestON/drivers/common/cli/networkdriver.py
+++ b/TestON/drivers/common/cli/networkdriver.py
@@ -53,11 +53,6 @@
super( NetworkDriver, self ).__init__()
self.graph = Graph()
- def checkOptions( self, var, defaultVar ):
- if var is None or var == "":
- return defaultVar
- return var
-
def connect( self, **connectargs ):
"""
Creates ssh handle for the SDN network "bench".
@@ -635,7 +630,7 @@
output = dstHost.checkFilter()
main.log.debug( output )
if output:
- #TODO: parse output?
+ # TODO: parse output?
packets = dstHost.readPackets()
for packet in packets.splitlines():
main.log.debug( packet )
diff --git a/TestON/drivers/common/cli/onosclidriver.py b/TestON/drivers/common/cli/onosclidriver.py
index 7a0641e..72ed62c 100755
--- a/TestON/drivers/common/cli/onosclidriver.py
+++ b/TestON/drivers/common/cli/onosclidriver.py
@@ -58,15 +58,12 @@
self.handle = None
self.karafUser = None
self.karafPass = None
+ self.karafTimeout = None
+
self.dockerPrompt = None
self.graph = Graph()
super( OnosCliDriver, self ).__init__()
- def checkOptions( self, var, defaultVar ):
- if var is None or var == "":
- return defaultVar
- return var
-
def connect( self, **connectargs ):
"""
Creates ssh handle for ONOS cli.
@@ -85,10 +82,13 @@
self.karafPass = self.options[ key ]
elif key == "docker_prompt":
self.dockerPrompt = self.options[ key ]
+ elif key == "karaf_timeout":
+ self.karafTimeout = self.options[ key ]
self.home = self.checkOptions( self.home, "~/onos" )
self.karafUser = self.checkOptions( self.karafUser, self.user_name )
self.karafPass = self.checkOptions( self.karafPass, self.pwd )
self.dockerPrompt = self.checkOptions( self.dockerPrompt, "~/onos#" )
+ self.karafTimeout = self.checkOptions( self.karafTimeout, 7200000 )
for key in self.options:
if key == 'onosIp':
@@ -301,6 +301,7 @@
startCliCommand = "onos " + str( ONOSIp )
self.handle.sendline( startCliCommand )
tries = 0
+ setTimeout = False
while tries < 5:
i = self.handle.expect( [
self.karafPrompt,
@@ -308,14 +309,17 @@
pexpect.TIMEOUT ], onosStartTimeout )
if i == 0:
- main.log.info( str( ONOSIp ) + " CLI Started successfully" )
+ if setTimeout:
+ main.log.info( str( ONOSIp ) + " CLI Started successfully" )
+ return main.TRUE
if karafTimeout:
self.handle.sendline(
"config:property-set -p org.apache.karaf.shell\
sshIdleTimeout " +
- karafTimeout )
- self.handle.expect( self.karafPrompt )
- return main.TRUE
+ str( karafTimeout ) )
+ self.handle.expect( "closed by remote host" )
+ self.handle.sendline( startCliCommand )
+ setTimeout = True
elif i == 1:
main.log.info( str( ONOSIp ) + " CLI asking for password" )
main.log.debug( "Sending %s" % self.karafPass )
@@ -6739,8 +6743,6 @@
if not ready:
self.handle.expect( self.dockerPrompt )
time.sleep( 1 )
- #main.log.debug( "%s: It took %s tries for onos log folder to %sbe created" %
- # ( self.name, retries, "" if ready else "NOT " ) )
cmdList = []
cmdList.append( "apt-get update" )
diff --git a/TestON/drivers/common/cli/onosclusterdriver.py b/TestON/drivers/common/cli/onosclusterdriver.py
index 520250b..6ca91b7 100755
--- a/TestON/drivers/common/cli/onosclusterdriver.py
+++ b/TestON/drivers/common/cli/onosclusterdriver.py
@@ -96,11 +96,6 @@
self.nodes = []
super( OnosClusterDriver, self ).__init__()
- def checkOptions( self, var, defaultVar ):
- if var is None or var == "":
- return defaultVar
- return var
-
def connect( self, **connectargs ):
"""
Creates ssh handle for ONOS "bench".
@@ -126,10 +121,16 @@
self.useDocker = "True" == self.options[ key ]
elif key == "docker_prompt":
self.dockerPrompt = self.options[ key ]
+ elif key == "web_user":
+ self.webUser = self.options[ key ]
+ elif key == "web_pass":
+ self.webPass = self.options[ key ]
self.home = self.checkOptions( self.home, "~/onos" )
self.karafUser = self.checkOptions( self.karafUser, self.user_name )
self.karafPass = self.checkOptions( self.karafPass, self.pwd )
+ self.webUser = self.checkOptions( self.webUser, "onos" )
+ self.webPass = self.checkOptions( self.webPass, "rocks" )
prefix = self.checkOptions( prefix, "ONOS" )
self.useDocker = self.checkOptions( self.useDocker, False )
self.dockerPrompt = self.checkOptions( self.dockerPrompt, "~/onos#" )
@@ -250,7 +251,6 @@
main.componentDictionary[ name ][ 'host' ] = host
main.componentDictionary[name]['type'] = "OnosCliDriver"
main.componentDictionary[name]['connect_order'] = str( int( main.componentDictionary[name]['connect_order'] ) + 1 )
- main.log.debug( main.componentDictionary[name] )
def createCliComponent( self, name, host ):
"""
diff --git a/TestON/drivers/common/cli/onosdriver.py b/TestON/drivers/common/cli/onosdriver.py
index 6b13e3a..18540f0 100755
--- a/TestON/drivers/common/cli/onosdriver.py
+++ b/TestON/drivers/common/cli/onosdriver.py
@@ -39,6 +39,11 @@
"""
self.name = None
self.home = None
+ self.maxNodes = None
+ self.karafUser = None
+ self.karafPass = None
+ self.webUser = None
+ self.webPass = None
self.handle = None
self.nicAddr = None
super( OnosDriver, self ).__init__()
@@ -55,26 +60,26 @@
for key in connectargs:
vars( self )[ key ] = connectargs[ key ]
- self.home = "~/onos"
- for key in self.options:
- if key == "home":
- self.home = self.options[ 'home' ]
- break
- if self.home is None or self.home == "":
- self.home = "~/onos"
-
self.name = self.options[ 'name' ]
# The 'nodes' tag is optional and it is not required in .topo file
for key in self.options:
- if key == "nodes":
+ if key == "home":
+ self.home = self.options[ 'home' ]
+ elif key == "nodes":
# Maximum number of ONOS nodes to run, if there is any
self.maxNodes = int( self.options[ 'nodes' ] )
- break
- self.maxNodes = None
+ elif key == "web_user":
+ self.webUser = self.options[ key ]
+ elif key == "web_pass":
+ self.webPass = self.options[ key ]
- if self.maxNodes is None or self.maxNodes == "":
- self.maxNodes = 100
+ self.home = self.checkOptions( self.home, "~/onos" )
+ self.maxNodes = self.checkOptions( self.maxNodes, 100 )
+ self.karafUser = self.checkOptions( self.karafUser, self.user_name )
+ self.karafPass = self.checkOptions( self.karafPass, self.pwd )
+ self.webUser = self.checkOptions( self.webUser, "onos" )
+ self.webPass = self.checkOptions( self.webPass, "rocks" )
# Grabs all OC environment variables based on max number of nodes
self.onosIps = {} # Dictionary of all possible ONOS ip
@@ -826,6 +831,8 @@
# on here.
appString = "export ONOS_APPS=" + appString
onosGroup = "export ONOS_GROUP=" + onosUser
+ onosWebUser = "export ONOS_WEB_USER=" + self.webUser
+ onosWebPass = "export ONOS_WEB_PASS=" + self.webPass
onosUser = "export ONOS_USER=" + onosUser
if useSSH:
onosUseSSH = "export ONOS_USE_SSH=true"
@@ -871,6 +878,8 @@
cellFile.write( appString + "\n" )
cellFile.write( onosGroup + "\n" )
cellFile.write( onosUser + "\n" )
+ cellFile.write( onosWebUser + "\n" )
+ cellFile.write( onosWebPass + "\n" )
if useSSH:
cellFile.write( onosUseSSH + "\n" )
cellFile.close()
@@ -2718,10 +2727,15 @@
for ip in onosIPs:
cmd += " " + str( ip )
self.handle.sendline( cmd )
- self.handle.expect( self.prompt, timeout=timeout )
- handle = self.handle.before
- main.log.debug( handle )
+ i = 0
+ while i == 0:
+ i = self.handle.expect( [ "Password", self.prompt ], timeout=timeout )
+ handle = self.handle.before
+ main.log.debug( "%s: %s" % ( self.name, handle ) )
+ if i == 0:
+ self.handle.sendline( self.pwd )
assert handle is not None, "Error in sendline"
+ assert "The requested URL returned error" not in handle, handle
assert "Command not found:" not in handle, handle
assert "Exception:" not in handle, handle
# Rename and move diags file to dstDir from /tmp
@@ -2730,7 +2744,7 @@
self.handle.sendline( "mv /tmp/onos-diags.tar.gz " + str( dstDir ) + "onos-diags" + str( suffix ) + ".tar.gz" )
self.handle.expect( self.prompt )
handle = self.handle.before
- main.log.debug( handle )
+ main.log.debug( "%s: %s" % ( self.name, handle ) )
assert handle is not None, "Error in sendline"
assert "No such file or directory" not in handle, handle
return main.TRUE
diff --git a/TestON/drivers/component.py b/TestON/drivers/component.py
index c3d3297..3585244 100644
--- a/TestON/drivers/component.py
+++ b/TestON/drivers/component.py
@@ -59,6 +59,11 @@
else:
raise error
+ def checkOptions( self, var, defaultVar ):
+ if var is None or var == "":
+ return defaultVar
+ return var
+
def connect( self ):
vars( main )[ self.name + 'log' ] = logging.getLogger( self.name )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params.stratum b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params.stratum
index 9b1a5c9..fdbf848 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params.stratum
+++ b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params.stratum
@@ -26,7 +26,7 @@
</DEPENDENCY>
<MN_DOCKER>
- <args>--privileged --net host --rm -v topo:/topo -v ~/mininet/custom:/home/root/mininet/custom -v /var/run/openvswitch/:/var/run/openvswitch/ -v /tmp/mn-stratum:/tmp -v /tmp/mn_conf/:/home/root --hostname mn-stratum -v /etc/network/interfaces:/etc/network/interfaces -it -d</args>
+ <args>--privileged --net host --rm -v topo:/topo -v ~/mininet/custom:/home/root/mininet/custom -v /var/run/openvswitch/:/var/run/openvswitch/ -v /tmp/mn-stratum:/tmp -v /tmp/mn_conf/:/home/root/config --hostname mn-stratum -v /etc/network/interfaces:/etc/network/interfaces -it -d</args>
<name>trellis_mininet</name>
<home>/home/root/</home>
</MN_DOCKER>
@@ -45,7 +45,7 @@
<ENV>
<cellName>productionCell</cellName>
- <cellApps>drivers,fpm,netcfghostprovider,drivers.bmv2,pipelines.fabric,segmentrouting,t3</cellApps>
+ <cellApps>drivers,fpm,lldpprovider,hostprovider,netcfghostprovider,drivers.bmv2,pipelines.fabric,segmentrouting,t3</cellApps>
</ENV>
<ONOS_Configuration>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/SRBridgingTest.py b/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/SRBridgingTest.py
index 984ba6b..a617716 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/SRBridgingTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/SRBridgingTest.py
@@ -41,60 +41,64 @@
main.switchType = "ovs"
def runTest( self, main, test_idx, topology, onosNodes, description, vlan = [] ):
- skipPackage = False
- init = False
- if not hasattr( main, 'apps' ):
- init = True
- run.initTest( main )
- # Skip onos packaging if the clusrer size stays the same
- if not init and onosNodes == main.Cluster.numCtrls:
- skipPackage = True
+ try:
+ skipPackage = False
+ init = False
+ if not hasattr( main, 'apps' ):
+ init = True
+ run.initTest( main )
+ # Skip onos packaging if the clusrer size stays the same
+ if not init and onosNodes == main.Cluster.numCtrls:
+ skipPackage = True
- main.case( '%s, with %s, %s switches and %d ONOS instance%s' %
- ( description, self.topo[ topology ][ 3 ], main.switchType, onosNodes, 's' if onosNodes > 1 else '' ) )
+ main.case( '%s, with %s, %s switches and %d ONOS instance%s' %
+ ( description, self.topo[ topology ][ 3 ], main.switchType, onosNodes, 's' if onosNodes > 1 else '' ) )
- main.cfgName = 'CASE%01d%01d' % ( test_idx / 10, ( ( test_idx - 1 ) % 10 ) % 4 + 1 )
- main.Cluster.setRunningNode( onosNodes )
- run.installOnos( main, skipPackage=skipPackage, cliSleep=5 )
- if main.useBmv2:
- switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
- # Translate configuration file from OVS-OFDPA to BMv2 driver
- translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
- translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
- else:
- translator.bmv2ToOfdpa( main )
- suf = main.params.get( 'jsonFileSuffix', None)
- if suf:
- run.loadJson( main, suffix=suf )
- else:
- run.loadJson( main )
- run.loadChart( main )
- if hasattr( main, 'Mininet1' ):
- run.mnDockerSetup( main ) # optionally create and setup docker image
-
- # Run the test with Mininet
- mininet_args = ' --spine=%d --leaf=%d' % ( self.topo[ topology ][ 0 ], self.topo[ topology ][ 1 ] )
- if self.topo[ topology ][ 2 ]:
- mininet_args += ' --dual-homed'
- if len( vlan ) > 0 :
- mininet_args += ' --vlan=%s' % ( ','.join( ['%d' % vlanId for vlanId in vlan ] ) )
+ main.cfgName = 'CASE%01d%01d' % ( test_idx / 10, ( ( test_idx - 1 ) % 10 ) % 4 + 1 )
+ main.Cluster.setRunningNode( onosNodes )
+ run.installOnos( main, skipPackage=skipPackage, cliSleep=5 )
if main.useBmv2:
- mininet_args += ' --switch %s' % main.switchType
- main.log.info( "Using %s switch" % main.switchType )
+ switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
+ # Translate configuration file from OVS-OFDPA to BMv2 driver
+ translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
+ translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
+ else:
+ translator.bmv2ToOfdpa( main )
+ suf = main.params.get( 'jsonFileSuffix', None)
+ if suf:
+ run.loadJson( main, suffix=suf )
+ else:
+ run.loadJson( main )
+ run.loadChart( main )
+ if hasattr( main, 'Mininet1' ):
+ run.mnDockerSetup( main ) # optionally create and setup docker image
- run.startMininet( main, 'trellis_fabric.py', args=mininet_args )
+ # Run the test with Mininet
+ mininet_args = ' --spine=%d --leaf=%d' % ( self.topo[ topology ][ 0 ], self.topo[ topology ][ 1 ] )
+ if self.topo[ topology ][ 2 ]:
+ mininet_args += ' --dual-homed'
+ if len( vlan ) > 0 :
+ mininet_args += ' --vlan=%s' % ( ','.join( ['%d' % vlanId for vlanId in vlan ] ) )
+ if main.useBmv2:
+ mininet_args += ' --switch %s' % main.switchType
+ main.log.info( "Using %s switch" % main.switchType )
- else:
- # Run the test with physical devices
- run.connectToPhysicalNetwork( main )
+ run.startMininet( main, 'trellis_fabric.py', args=mininet_args )
- run.checkFlows( main, minFlowCount=self.topo[ topology ][ 5 if main.useBmv2 else 4 ] * self.topo[ topology ][ 1 ], sleep=5 )
- if main.useBmv2:
- leaf_dpid = [ "device:bmv2:leaf%d" % ( ls + 1 ) for ls in range( self.topo[ topology ][ 1 ]) ]
- else:
- leaf_dpid = [ "of:%016d" % ( ls + 1 ) for ls in range( self.topo[ topology ][ 1 ] ) ]
- for dpid in leaf_dpid:
- run.checkFlowsByDpid( main, dpid, self.topo[ topology ][ 5 if main.useBmv2 else 4 ], sleep=5 )
- run.pingAll( main )
+ else:
+ # Run the test with physical devices
+ run.connectToPhysicalNetwork( main )
+
+ run.checkFlows( main, minFlowCount=self.topo[ topology ][ 5 if main.useBmv2 else 4 ] * self.topo[ topology ][ 1 ], sleep=5 )
+ if main.useBmv2:
+ leaf_dpid = [ "device:bmv2:leaf%d" % ( ls + 1 ) for ls in range( self.topo[ topology ][ 1 ]) ]
+ else:
+ leaf_dpid = [ "of:%016d" % ( ls + 1 ) for ls in range( self.topo[ topology ][ 1 ] ) ]
+ for dpid in leaf_dpid:
+ run.checkFlowsByDpid( main, dpid, self.topo[ topology ][ 5 if main.useBmv2 else 4 ], sleep=5 )
+ run.pingAll( main )
+ except Exception as e:
+ main.log.exception( "Error in runTest" )
+ main.skipCase( result="FAIL", msg=e )
run.cleanup( main )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/dependencies/SRClusterRestartFuncs.py b/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/dependencies/SRClusterRestartFuncs.py
index f36719f..7ddd2aa 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/dependencies/SRClusterRestartFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/dependencies/SRClusterRestartFuncs.py
@@ -31,37 +31,42 @@
self.topo[ '4x4' ] = ( 4, 4, '--leaf=4 --spine=4', '4x4 Leaf-spine' )
def runTest( self, main, caseNum, numNodes, Topo, minFlow, testing, killList=[ 0, 1, 2 ] ):
- description = "Cluster Restart test with " + self.topo[ Topo ][ 3 ]
- caseTitle = 'CASE{}_'.format( caseNum ) + testing
- main.case( description )
- if not hasattr( main, 'apps' ):
- run.initTest( main )
- main.cfgName = Topo
- main.Cluster.setRunningNode( numNodes )
- run.installOnos( main )
- run.loadJson( main )
- run.loadChart( main )
- if hasattr( main, 'Mininet1' ):
- run.startMininet( main, 'cord_fabric.py', args=self.topo[ Topo ][ 2 ] )
- else:
- # Run the test with physical devices
- # TODO: connect TestON to the physical network
- pass
- # pre-configured routing and bridging test
- run.checkFlows( main, minFlowCount=minFlow )
- run.pingAll( main )
- switch = '{}'.format( self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ] )
- link = '{}'.format( ( self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ] ) * self.topo[ Topo ][ 0 ] )
- run.killOnos( main, killList, switch, link, '0' )
- run.pingAll( main, caseTitle, dumpflows=False )
- run.recoverOnos( main, killList, switch, link, '{}'.format( numNodes ) )
- run.checkFlows( main, minFlowCount=minFlow, tag=caseTitle )
- run.pingAll( main, caseTitle )
- # TODO Dynamic config of hosts in subnet
- # TODO Dynamic config of host not in subnet
- # TODO Dynamic config of vlan xconnect
- # TODO Vrouter integration
- # TODO Mcast integration
+ try:
+ description = "Cluster Restart test with " + self.topo[ Topo ][ 3 ]
+ caseTitle = 'CASE{}_'.format( caseNum ) + testing
+ main.case( description )
+ if not hasattr( main, 'apps' ):
+ run.initTest( main )
+ main.cfgName = Topo
+ main.Cluster.setRunningNode( numNodes )
+ run.installOnos( main )
+ run.loadJson( main )
+ run.loadChart( main )
+ if hasattr( main, 'Mininet1' ):
+ run.startMininet( main, 'cord_fabric.py', args=self.topo[ Topo ][ 2 ] )
+ else:
+ # Run the test with physical devices
+ # TODO: connect TestON to the physical network
+ pass
+ # pre-configured routing and bridging test
+ run.checkFlows( main, minFlowCount=minFlow )
+ run.pingAll( main )
+ switch = '{}'.format( self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ] )
+ link = '{}'.format( ( self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ] ) * self.topo[ Topo ][ 0 ] )
+ run.killOnos( main, killList, switch, link, '0' )
+ run.pingAll( main, caseTitle, dumpflows=False )
+ run.recoverOnos( main, killList, switch, link, '{}'.format( numNodes ) )
+ run.checkFlows( main, minFlowCount=minFlow, tag=caseTitle )
+ run.pingAll( main, caseTitle )
+ # TODO Dynamic config of hosts in subnet
+ # TODO Dynamic config of host not in subnet
+ # TODO Dynamic config of vlan xconnect
+ # TODO Vrouter integration
+ # TODO Mcast integration
+
+ except Exception as e:
+ main.log.exception( "Error in runTest" )
+ main.skipCase( result="FAIL", msg=e )
if hasattr( main, 'Mininet1' ):
run.cleanup( main )
else:
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/SRDhcprelay.params.stratum b/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/SRDhcprelay.params.stratum
index e7e1564..15abde5 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/SRDhcprelay.params.stratum
+++ b/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/SRDhcprelay.params.stratum
@@ -25,7 +25,7 @@
</DEPENDENCY>
<MN_DOCKER>
- <args>--privileged --net host --rm -v topo:/topo -v ~/mininet/custom:/home/root/mininet/custom -v /var/run/openvswitch/:/var/run/openvswitch/ -v /tmp/mn-stratum:/tmp -v /tmp/mn_conf/:/home/root --hostname mn-stratum -v /etc/network/interfaces:/etc/network/interfaces -v /lib/modules:/lib/modules -it -d</args>
+ <args>--privileged --net host --rm -v topo:/topo -v ~/mininet/custom/:/home/root/mininet/custom/ -v /var/run/openvswitch/:/var/run/openvswitch/ -v /tmp/mn-stratum/:/tmp/ -v /tmp/mn_conf/:/home/root/config/ -v /etc/network/interfaces:/etc/network/interfaces -v /lib/modules/:/lib/modules/ -it -d --hostname mn-stratum </args>
<name>trellis_mininet</name>
<home>/home/root/</home>
</MN_DOCKER>
@@ -44,7 +44,7 @@
<ENV>
<cellName>productionCell</cellName>
- <cellApps>drivers,fpm,dhcprelay,netcfghostprovider,routeradvertisement,drivers.bmv2,pipelines.fabric</cellApps>
+ <cellApps>drivers,lldpprovider,hostprovider,fpm,dhcprelay,netcfghostprovider,routeradvertisement,drivers.bmv2,pipelines.fabric,segmentrouting</cellApps>
</ENV>
<GIT>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/dependencies/SRDhcprelayTest.py b/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/dependencies/SRDhcprelayTest.py
index a2ceb25..15e33a3 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/dependencies/SRDhcprelayTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/dependencies/SRDhcprelayTest.py
@@ -29,59 +29,63 @@
@staticmethod
def runTest( main, testIndex, onosNodes, description, dhcpRelay=False, remoteServer=False, multipleServer=False, ipv6=False, vlan=[], dualHomed=False ):
- skipPackage = False
- init = False
- if not hasattr( main, 'apps' ):
- init = True
- run.initTest( main )
- # Skip onos packaging if the clusrer size stays the same
- if not init and onosNodes == main.Cluster.numCtrls:
- skipPackage = True
+ try:
+ skipPackage = False
+ init = False
+ if not hasattr( main, 'apps' ):
+ init = True
+ run.initTest( main )
+ # Skip onos packaging if the clusrer size stays the same
+ if not init and onosNodes == main.Cluster.numCtrls:
+ skipPackage = True
- main.case( '%s, with %d ONOS instance%s' %
- ( description, onosNodes, 's' if onosNodes > 1 else '' ) )
+ main.case( '%s, with %d ONOS instance%s' %
+ ( description, onosNodes, 's' if onosNodes > 1 else '' ) )
- main.cfgName = 'CASE%02d' % testIndex
- main.resultFileName = 'CASE%02d' % testIndex
- main.Cluster.setRunningNode( onosNodes )
- run.installOnos( main, skipPackage=skipPackage, cliSleep=5 )
- if main.useBmv2:
- # Translate configuration file from OVS-OFDPA to BMv2 driver
- translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
- switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
- translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
- else:
- translator.bmv2ToOfdpa( main )
- run.loadJson( main )
- run.loadHost( main )
- if hasattr( main, 'Mininet1' ):
- run.mnDockerSetup( main )
- # Run the test with Mininet
- if dualHomed:
- mininet_args = ' --spine=2 --leaf=4 --dual-homed'
- else:
- mininet_args = ' --spine=2 --leaf=2'
- mininet_args += ' --dhcp-client'
- if dhcpRelay:
- mininet_args += ' --dhcp-relay'
- if multipleServer:
- mininet_args += ' --multiple-dhcp-server'
- if remoteServer:
- mininet_args += ' --remote-dhcp-server'
- if ipv6:
- mininet_args += ' --ipv6'
- if len( vlan ) > 0 :
- mininet_args += ' --vlan=%s' % ( ','.join( ['%d' % vlanId for vlanId in vlan ] ) )
+ main.cfgName = 'CASE%02d' % testIndex
+ main.resultFileName = 'CASE%02d' % testIndex
+ main.Cluster.setRunningNode( onosNodes )
+ run.installOnos( main, skipPackage=skipPackage, cliSleep=5 )
if main.useBmv2:
- mininet_args += ' --switch %s' % main.switchType
- main.log.info( "Using %s switch" % main.switchType )
+ # Translate configuration file from OVS-OFDPA to BMv2 driver
+ translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
+ switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
+ translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
+ else:
+ translator.bmv2ToOfdpa( main )
+ run.loadJson( main )
+ run.loadHost( main )
+ if hasattr( main, 'Mininet1' ):
+ run.mnDockerSetup( main )
+ # Run the test with Mininet
+ if dualHomed:
+ mininet_args = ' --spine=2 --leaf=4 --dual-homed'
+ else:
+ mininet_args = ' --spine=2 --leaf=2'
+ mininet_args += ' --dhcp-client'
+ if dhcpRelay:
+ mininet_args += ' --dhcp-relay'
+ if multipleServer:
+ mininet_args += ' --multiple-dhcp-server'
+ if remoteServer:
+ mininet_args += ' --remote-dhcp-server'
+ if ipv6:
+ mininet_args += ' --ipv6'
+ if len( vlan ) > 0 :
+ mininet_args += ' --vlan=%s' % ( ','.join( ['%d' % vlanId for vlanId in vlan ] ) )
+ if main.useBmv2:
+ mininet_args += ' --switch %s' % main.switchType
+ main.log.info( "Using %s switch" % main.switchType )
- run.startMininet( main, 'trellis_fabric.py', args=mininet_args )
- else:
- # Run the test with physical devices
- # TODO: connect TestON to the physical network
- pass
- run.verifyOnosHostIp( main )
- run.verifyNetworkHostIp( main )
+ run.startMininet( main, 'trellis_fabric.py', args=mininet_args )
+ else:
+ # Run the test with physical devices
+ # TODO: connect TestON to the physical network
+ pass
+ run.verifyOnosHostIp( main, skipOnFail=False )
+ run.verifyNetworkHostIp( main )
+ except Exception as e:
+ main.log.exception( "Error in runTest" )
+ main.skipCase( result="FAIL", msg=e )
run.cleanup( main )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/dependencies/json/CASE12.json b/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/dependencies/json/CASE12.json
index 028848c..3517e1d 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/dependencies/json/CASE12.json
+++ b/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/dependencies/json/CASE12.json
@@ -113,7 +113,7 @@
{
"dhcpServerConnectPoint": "of:0000000000000002/7",
"serverIps": ["10.0.99.3", "2000::99fd"],
- "gatewayIps": ["10.0.1.1", "2000::101/120"]
+ "gatewayIps": ["10.0.1.1", "2000::101"]
}
]
}
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/dependencies/json/CASE14.json b/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/dependencies/json/CASE14.json
index 028848c..3517e1d 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/dependencies/json/CASE14.json
+++ b/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/dependencies/json/CASE14.json
@@ -113,7 +113,7 @@
{
"dhcpServerConnectPoint": "of:0000000000000002/7",
"serverIps": ["10.0.99.3", "2000::99fd"],
- "gatewayIps": ["10.0.1.1", "2000::101/120"]
+ "gatewayIps": ["10.0.1.1", "2000::101"]
}
]
}
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.params b/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.params
index fdd3a93..a66b468 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.params
@@ -16,11 +16,13 @@
<useCommonConf>True</useCommonConf>
<useCommonTopo>True</useCommonTopo>
<topology>cord_fabric.py</topology>
+ <trellisOar>/home/sdn/segmentrouting-oar-3.0.0-SNAPSHOT.oar</trellisOar>
+ <t3Oar>/home/sdn/t3-app-3.0.0-SNAPSHOT.oar</t3Oar>
</DEPENDENCY>
<ENV>
<cellName>productionCell</cellName>
- <cellApps>drivers,hostprovider,segmentrouting,openflow-base,netcfghostprovider,netcfglinksprovider</cellApps>
+ <cellApps>drivers,hostprovider,openflow-base,netcfghostprovider,netcfglinksprovider</cellApps>
</ENV>
<GIT>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDynamic/dependencies/SRDynamicFuncs.py b/TestON/tests/USECASE/SegmentRouting/SRDynamic/dependencies/SRDynamicFuncs.py
index e4390de..3071651 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRDynamic/dependencies/SRDynamicFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRDynamic/dependencies/SRDynamicFuncs.py
@@ -34,42 +34,46 @@
self.topo[ '4x4' ] = ( 4, 4, '--leaf=4 --spine=4', '4x4 Leaf-spine' )
def runTest( self, main, caseNum, numNodes, Topo, minBeforeFlow, minAfterFlow, killOnosAndDeleteCfg ):
- if not hasattr( main, 'apps' ):
- run.initTest( main )
+ try:
+ if not hasattr( main, 'apps' ):
+ run.initTest( main )
- description = "Bridging and Routing sanity test with " + \
- self.topo[ Topo ][ 3 ] + \
- "and {} nodes.".format( numNodes ) + \
- ( "\nAlso, killing the first Onos and removing the host cfg." if killOnosAndDeleteCfg else "" )
- main.case( description )
+ description = "Bridging and Routing sanity test with " + \
+ self.topo[ Topo ][ 3 ] + \
+ "and {} nodes.".format( numNodes ) + \
+ ( "\nAlso, killing the first Onos and removing the host cfg." if killOnosAndDeleteCfg else "" )
+ main.case( description )
- main.cfgName = Topo
- main.Cluster.setRunningNode( numNodes )
- run.installOnos( main )
- run.loadJson( main )
- run.loadChart( main )
- run.startMininet( main, 'cord_fabric.py',
- args=self.topo[ Topo ][ 2 ] )
- # pre-configured routing and bridging test
- run.checkFlows( main, minFlowCount=minBeforeFlow )
- run.pingAll( main, dumpflows=False )
- run.addHostCfg( main )
- run.checkFlows( main, minFlowCount=minAfterFlow, dumpflows=False )
- run.pingAll( main )
- if killOnosAndDeleteCfg:
- switch = self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ]
- link = ( self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ] ) * self.topo[ Topo ][ 0 ]
- self.killAndDelete( main, caseNum, numNodes, minBeforeFlow, switch, link )
- # TODO Dynamic config of hosts in subnet
- # TODO Dynamic config of host not in subnet
- # TODO Dynamic config of vlan xconnect
- # TODO Vrouter integration
- # TODO Mcast integration
- if hasattr( main, 'Mininet1' ):
- run.cleanup( main )
- else:
- # TODO: disconnect TestON from the physical network
- pass
+ main.cfgName = Topo
+ main.Cluster.setRunningNode( numNodes )
+ run.installOnos( main )
+ run.loadJson( main )
+ run.loadChart( main )
+ run.startMininet( main, 'cord_fabric.py',
+ args=self.topo[ Topo ][ 2 ] )
+ # pre-configured routing and bridging test
+ run.checkFlows( main, minFlowCount=minBeforeFlow )
+ run.pingAll( main, dumpflows=False )
+ run.addHostCfg( main )
+ run.checkFlows( main, minFlowCount=minAfterFlow, dumpflows=False )
+ run.pingAll( main )
+ if killOnosAndDeleteCfg:
+ switch = self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ]
+ link = ( self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ] ) * self.topo[ Topo ][ 0 ]
+ self.killAndDelete( main, caseNum, numNodes, minBeforeFlow, switch, link )
+ # TODO Dynamic config of hosts in subnet
+ # TODO Dynamic config of host not in subnet
+ # TODO Dynamic config of vlan xconnect
+ # TODO Vrouter integration
+ # TODO Mcast integration
+ if hasattr( main, 'Mininet1' ):
+ run.cleanup( main )
+ else:
+ # TODO: disconnect TestON from the physical network
+ pass
+ except Exception as e:
+ main.log.exception( "Error in runTest" )
+ main.skipCase( result="FAIL", msg=e )
def killAndDelete( self, main, caseNum, numNodes, minBeforeFlow, switch, link ):
run.killOnos( main, [ 0 ], '{}'.format( switch ), '{}'.format( link ), '{}'.format( numNodes - 1 ) )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDynamicConf/dependencies/SRDynamicConfTest.py b/TestON/tests/USECASE/SegmentRouting/SRDynamicConf/dependencies/SRDynamicConfTest.py
index 78f3e6a..a959e71 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRDynamicConf/dependencies/SRDynamicConfTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRDynamicConf/dependencies/SRDynamicConfTest.py
@@ -38,342 +38,346 @@
Shared among same test scenario with different topology.
Only used when ping chart is updated.
'''
- topo = dict()
- # (number of spine switch, number of leaf switch, dual-homed, description, port number of h1)
- topo[ '0x1' ] = ( 0, 1, False, 'single ToR', 1 )
- topo[ '0x2' ] = ( 0, 2, True, 'dual-homed ToR', 2 )
- topo[ '2x2' ] = ( 2, 2, False, '2x2 leaf-spine topology', 3 )
- topo[ '2x4' ] = ( 2, 4, True, '2x4 dual-homed leaf-spine topology', 6 )
- fanout = 4
- switchNames = {}
- switchNames[ '2x2' ] = [ "leaf1", "leaf2", "spine101", "spine102" ]
-
- TAG = 'CASE%d' % testIndex
- skipPackage = False
- init = False
- dualHomed = topo[ topology ][ 2 ]
- portNum = topo[ topology ][ 4 ]
- defaultIntf = 'bond0' if dualHomed else 'eth0'
-
- from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as run
- if not hasattr( main, 'apps' ):
- init = True
- run.initTest( main )
- # Skip onos packaging if the clusrer size stays the same
- if not init and onosNodes == main.Cluster.numCtrls:
- skipPackage = True
-
- main.case( '%s, with %s and %d ONOS instance%s' %
- ( description, topo[ topology ][ 3 ], onosNodes, 's' if onosNodes > 1 else '' ) )
- main.cfgName = topology
- main.Cluster.setRunningNode( onosNodes )
- run.installOnos( main, skipPackage=skipPackage, cliSleep=5 )
-
- # Provide common configuration
- # TODO: Generate json and chart dynamically, according to topologies and scenarios
- if main.useBmv2:
- # Translate configuration file from OVS-OFDPA to BMv2 driver
- translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
- switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
- translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
- else:
- translator.bmv2ToOfdpa( main )
- run.loadJson( main )
- run.loadChart( main )
-
- # Provide topology-specific interface configuration
- import json
try:
- intfCfg = "%s%s%s.json" % ( main.configPath, main.forJson, TAG )
+ topo = dict()
+ # (number of spine switch, number of leaf switch, dual-homed, description, port number of h1)
+ topo[ '0x1' ] = ( 0, 1, False, 'single ToR', 1 )
+ topo[ '0x2' ] = ( 0, 2, True, 'dual-homed ToR', 2 )
+ topo[ '2x2' ] = ( 2, 2, False, '2x2 leaf-spine topology', 3 )
+ topo[ '2x4' ] = ( 2, 4, True, '2x4 dual-homed leaf-spine topology', 6 )
+ fanout = 4
+ switchNames = {}
+ switchNames[ '2x2' ] = [ "leaf1", "leaf2", "spine101", "spine102" ]
+
+ TAG = 'CASE%d' % testIndex
+ skipPackage = False
+ init = False
+ dualHomed = topo[ topology ][ 2 ]
+ portNum = topo[ topology ][ 4 ]
+ defaultIntf = 'bond0' if dualHomed else 'eth0'
+
+ from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as run
+ if not hasattr( main, 'apps' ):
+ init = True
+ run.initTest( main )
+ # Skip onos packaging if the clusrer size stays the same
+ if not init and onosNodes == main.Cluster.numCtrls:
+ skipPackage = True
+
+ main.case( '%s, with %s and %d ONOS instance%s' %
+ ( description, topo[ topology ][ 3 ], onosNodes, 's' if onosNodes > 1 else '' ) )
+ main.cfgName = topology
+ main.Cluster.setRunningNode( onosNodes )
+ run.installOnos( main, skipPackage=skipPackage, cliSleep=5 )
+
+ # Provide common configuration
+ # TODO: Generate json and chart dynamically, according to topologies and scenarios
if main.useBmv2:
# Translate configuration file from OVS-OFDPA to BMv2 driver
- translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
+ translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
- translator.ofdpaToBmv2( main, switchPrefix=switchPrefix, cfgFile=intfCfg )
+ translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
else:
- translator.bmv2ToOfdpa( main, intfCfg )
- with open( intfCfg ) as cfg:
- main.Cluster.active( 0 ).REST.setNetCfg( json.load( cfg ) )
- except IOError:
- # Load default interface configuration
- defaultIntfCfg = "%s%s%s_ports.json" % ( main.configPath, main.forJson, topology )
+ translator.bmv2ToOfdpa( main )
+ run.loadJson( main )
+ run.loadChart( main )
+
+ # Provide topology-specific interface configuration
+ import json
+ try:
+ intfCfg = "%s%s%s.json" % ( main.configPath, main.forJson, TAG )
+ if main.useBmv2:
+ # Translate configuration file from OVS-OFDPA to BMv2 driver
+ translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
+ switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
+ translator.ofdpaToBmv2( main, switchPrefix=switchPrefix, cfgFile=intfCfg )
+ else:
+ translator.bmv2ToOfdpa( main, intfCfg )
+ with open( intfCfg ) as cfg:
+ main.Cluster.active( 0 ).REST.setNetCfg( json.load( cfg ) )
+ except IOError:
+ # Load default interface configuration
+ defaultIntfCfg = "%s%s%s_ports.json" % ( main.configPath, main.forJson, topology )
+ if main.useBmv2:
+ # Translate configuration file from OVS-OFDPA to BMv2 driver
+ translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
+ switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
+ translator.ofdpaToBmv2( main, switchPrefix=switchPrefix, cfgFile=defaultIntfCfg )
+ else:
+ translator.bmv2ToOfdpa( main, defaultIntfCfg )
+ with open( defaultIntfCfg ) as cfg:
+ main.Cluster.active( 0 ).REST.setNetCfg( json.load( cfg ) )
+
+ try:
+ with open( "%s%sCASE%d.chart" % (main.configPath, main.forChart, testIndex / 10 * 10) ) as chart:
+ main.pingChart = json.load( chart )
+ except IOError:
+ # Load default chart
+ with open( "%s%sdefault.chart" % (main.configPath, main.forChart) ) as chart:
+ main.pingChart = json.load( chart )
+
+ # Set up topology
+ if hasattr( main, 'Mininet1' ):
+ # Run the test with mininet topology
+ mininet_args = ' --spine=%d --leaf=%d --fanout=%d' \
+ % ( topo[ topology ][ 0 ], topo[ topology ][ 1 ], fanout )
+ if len( vlan ) > 0 :
+ mininet_args += ' --vlan=%s' % ( ','.join( [ '%d' % vlanId for vlanId in vlan ] ) )
+ if topo[ topology ][ 0 ] > 0:
+ mininet_args += ',0,0,0,0'
+ if dualHomed:
+ mininet_args += ' --dual-homed'
+ if main.useBmv2:
+ mininet_args += ' --switch bmv2'
+ main.log.info( "Using BMv2 switch" )
+
+ run.startMininet( main, 'trellis_fabric.py', args=mininet_args )
+ else:
+ # Run the test with physical devices
+ run.connectToPhysicalNetwork( main, switchNames[ topology ] )
+
+ # minFlowCountPerLeaf = 13 + [# of ports] * 5 + [# of hosts] * 2 + [# of vlan ids]
+ minFlowCountPerLeaf = 13 + ( fanout + topo[ topology ][ 0 ]) * 5 + fanout * 2 + len( set( vlan ) )
+ run.checkFlows( main, minFlowCount=minFlowCountPerLeaf * topo[ topology ][ 1 ], sleep=5, dumpflows=False )
+ # Check connectivity before changing interface configuration
+ run.pingAll( main, '%s_Before' % TAG, retryAttempts=2 )
+
if main.useBmv2:
- # Translate configuration file from OVS-OFDPA to BMv2 driver
- translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
- switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
- translator.ofdpaToBmv2( main, switchPrefix=switchPrefix, cfgFile=defaultIntfCfg )
+ leaf_dpid = [ "device:bmv2:leaf%d" % ( ls + 1 ) for ls in range( topo[ topology ][ 1 ] ) ]
else:
- translator.bmv2ToOfdpa( main, defaultIntfCfg )
- with open( defaultIntfCfg ) as cfg:
- main.Cluster.active( 0 ).REST.setNetCfg( json.load( cfg ) )
+ leaf_dpid = [ "of:%016d" % ( ls + 1 ) for ls in range( topo[ topology ][ 1 ] ) ]
+ for dpid in leaf_dpid:
+ run.checkFlowsByDpid( main, dpid, minFlowCountPerLeaf, sleep=5 )
- try:
- with open( "%s%sCASE%d.chart" % (main.configPath, main.forChart, testIndex / 10 * 10) ) as chart:
- main.pingChart = json.load( chart )
- except IOError:
- # Load default chart
- with open( "%s%sdefault.chart" % (main.configPath, main.forChart) ) as chart:
- main.pingChart = json.load( chart )
+ # Testcase-specific interface configuration change
+ if testIndex / 10 == 1:
+ # CASE11-14
+ if hasattr( main, 'Mininet1' ):
+ # Assign vlan tag 10 to host h1
+ main.Mininet1.assignVLAN( 'h1', 'h1-%s' % defaultIntf, '10' )
+ # Update port configuration of port 1
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 10, ] )
+ else:
+ # TODO: update physical device configuration, same for all test cases
+ pass
+ elif testIndex / 10 == 2:
+ # CASE21-24
+ if hasattr( main, 'Mininet1' ):
+ # Update port configuration of port 1
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], untagged=20 )
+ elif testIndex / 10 == 3:
+ # CASE31-34
+ if hasattr( main, 'Mininet1' ):
+ # Update port configuration of port 1
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], untagged=110 )
+ # Update port configuration of port 2
+ SRDynamicConfTest.updateIntfCfg( main, portNum + 1, dualHomed,
+ [ '10.0.2.254/24', ], untagged=110 )
+ elif testIndex / 10 == 4:
+ # CASE41-44
+ if hasattr( main, 'Mininet1' ):
+ # Assign vlan tag 20 to host h1
+ main.Mininet1.assignVLAN( 'h1', 'h1-%s' % defaultIntf, '20')
+ # Update port configuration of port 1
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 20, ] )
+ elif testIndex / 10 == 5:
+ # CASE51-54
+ if hasattr( main, 'Mininet1' ):
+ # Update port configuration of port 1
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 20, ], native=10 )
+ elif testIndex / 10 == 6:
+ # CASE61-64
+ if hasattr( main, 'Mininet1' ):
+ # Update port configuration of port 1
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 120, ], native=110 )
+ # Update port configuration of port 2
+ SRDynamicConfTest.updateIntfCfg( main, portNum + 1, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 120, ], native=110 )
+ elif testIndex / 10 == 7:
+ # CASE71-74
+ if hasattr( main, 'Mininet1' ):
+ # Update host configuration of h1
+ main.Mininet1.removeVLAN( 'h1', 'h1-%s.10' % defaultIntf )
+ # Update port configuration of port 1
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], untagged=10 )
+ elif testIndex / 10 == 8:
+ # CASE81-84
+ if hasattr( main, 'Mininet1' ):
+ # Update port configuration of port 1
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 20, ], native=10 )
+ elif testIndex / 10 == 9:
+ # CASE91-94
+ if hasattr( main, 'Mininet1' ):
+ # Update host configuration
+ main.Mininet1.removeVLAN( 'h1', 'h1-%s.10' % defaultIntf )
+ main.Mininet1.removeVLAN( 'h2', 'h2-%s.10' % defaultIntf )
- # Set up topology
- if hasattr( main, 'Mininet1' ):
- # Run the test with mininet topology
- mininet_args = ' --spine=%d --leaf=%d --fanout=%d' \
- % ( topo[ topology ][ 0 ], topo[ topology ][ 1 ], fanout )
- if len( vlan ) > 0 :
- mininet_args += ' --vlan=%s' % ( ','.join( [ '%d' % vlanId for vlanId in vlan ] ) )
- if topo[ topology ][ 0 ] > 0:
- mininet_args += ',0,0,0,0'
- if dualHomed:
- mininet_args += ' --dual-homed'
- if main.useBmv2:
- mininet_args += ' --switch bmv2'
- main.log.info( "Using BMv2 switch" )
+ # Update port configuration
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 120, ], native=110 )
+ SRDynamicConfTest.updateIntfCfg( main, portNum + 1, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 120, ], native=110 )
+ elif testIndex / 10 == 10:
+ # CASE101-104
+ if hasattr( main, 'Mininet1' ):
+ # Update port configuration
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], untagged=20 )
+ elif testIndex / 10 == 11:
+ # CASE111-114
+ if hasattr( main, 'Mininet1' ):
+ # Update port configuration
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 20, ] )
+ elif testIndex / 10 == 12:
+ # CASE121-124
+ if hasattr( main, 'Mininet1' ):
+ # Update port configuration
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 20, ], native=110 )
+ SRDynamicConfTest.updateIntfCfg( main, portNum + 1, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 20, ], native=110 )
+ elif testIndex / 10 == 13:
+ # CASE131-134
+ if hasattr( main, 'Mininet1' ):
+ # Update port configuration
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 120, ], native=10 )
+ elif testIndex / 10 == 14:
+ # CASE141-144
+ if hasattr( main, 'Mininet1' ):
+ # Update port configuration
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 20, ] )
+ elif testIndex / 10 == 15:
+ # CASE151-154
+ if hasattr( main, 'Mininet1' ):
+ # Update port configuration
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 120, ] )
+ elif testIndex / 10 == 16:
+ # CASE161-164
+ if hasattr( main, 'Mininet1' ):
+ # Update port configuration
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 20, ], native=10 )
+ elif testIndex / 10 == 17:
+ # CASE171-174
+ if hasattr( main, 'Mininet1' ):
+ # Update port configuration
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 120, ] )
+ elif testIndex / 10 == 18:
+ # CASE181-184
+ if hasattr( main, 'Mininet1' ):
+ # Update port configuration
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 20, ], native=10 )
+ elif testIndex / 10 == 19:
+ # CASE191-194
+ if hasattr( main, 'Mininet1' ):
+ # Update port configuration
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], untagged=20 )
+ elif testIndex / 10 == 20:
+ # CASE201-204
+ if hasattr( main, 'Mininet1' ):
+ # Update port configuration
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 20 ] )
+ elif testIndex / 10 == 21:
+ # CASE211-214
+ if hasattr( main, 'Mininet1' ):
+ # Update port configuration
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 20 ], native=110 )
+ elif testIndex / 10 == 22:
+ # CASE221-224
+ if hasattr( main, 'Mininet1' ):
+ # Update port configuration
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 120 ], native=10 )
+ elif testIndex / 10 == 23:
+ # CASE231-234
+ if hasattr( main, "Mininet1" ):
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 10, ] )
+ for dpid in leaf_dpid:
+ run.checkFlowsByDpid( main, dpid, minFlowCountPerLeaf, sleep=5 )
+ main.pingChart[ 'leaf1' ][ 'expect' ] = False
+ run.pingAll( main, '%s_1' % TAG, retryAttempts=2 )
- run.startMininet( main, 'trellis_fabric.py', args=mininet_args )
- else:
- # Run the test with physical devices
- run.connectToPhysicalNetwork( main, switchNames[ topology ] )
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], untagged=50 )
+ for dpid in leaf_dpid:
+ run.checkFlowsByDpid( main, dpid, minFlowCountPerLeaf, sleep=5 )
+ run.pingAll( main, '%s_2' % TAG, retryAttempts=2 )
- # minFlowCountPerLeaf = 13 + [# of ports] * 5 + [# of hosts] * 2 + [# of vlan ids]
- minFlowCountPerLeaf = 13 + ( fanout + topo[ topology ][ 0 ]) * 5 + fanout * 2 + len( set( vlan ) )
- run.checkFlows( main, minFlowCount=minFlowCountPerLeaf * topo[ topology ][ 1 ], sleep=5, dumpflows=False )
- # Check connectivity before changing interface configuration
- run.pingAll( main, '%s_Before' % TAG, retryAttempts=2 )
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 20, ] )
+ for dpid in leaf_dpid:
+ run.checkFlowsByDpid( main, dpid, minFlowCountPerLeaf, sleep=5 )
+ run.pingAll( main, '%s_3' % TAG, retryAttempts=2 )
- if main.useBmv2:
- leaf_dpid = [ "device:bmv2:leaf%d" % ( ls + 1 ) for ls in range( topo[ topology ][ 1 ] ) ]
- else:
- leaf_dpid = [ "of:%016d" % ( ls + 1 ) for ls in range( topo[ topology ][ 1 ] ) ]
- for dpid in leaf_dpid:
- run.checkFlowsByDpid( main, dpid, minFlowCountPerLeaf, sleep=5 )
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 40, ], native=10 )
+ for dpid in leaf_dpid:
+ run.checkFlowsByDpid( main, dpid, minFlowCountPerLeaf, sleep=5 )
+ main.pingChart[ 'leaf1' ][ 'expect' ] = True
+ run.pingAll( main, '%s_4' % TAG, retryAttempts=2 )
- # Testcase-specific interface configuration change
- if testIndex / 10 == 1:
- # CASE11-14
- if hasattr( main, 'Mininet1' ):
- # Assign vlan tag 10 to host h1
- main.Mininet1.assignVLAN( 'h1', 'h1-%s' % defaultIntf, '10' )
- # Update port configuration of port 1
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 10, ] )
- else:
- # TODO: update physical device configuration, same for all test cases
- pass
- elif testIndex / 10 == 2:
- # CASE21-24
- if hasattr( main, 'Mininet1' ):
- # Update port configuration of port 1
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], untagged=20 )
- elif testIndex / 10 == 3:
- # CASE31-34
- if hasattr( main, 'Mininet1' ):
- # Update port configuration of port 1
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], untagged=110 )
- # Update port configuration of port 2
- SRDynamicConfTest.updateIntfCfg( main, portNum + 1, dualHomed,
- [ '10.0.2.254/24', ], untagged=110 )
- elif testIndex / 10 == 4:
- # CASE41-44
- if hasattr( main, 'Mininet1' ):
- # Assign vlan tag 20 to host h1
- main.Mininet1.assignVLAN( 'h1', 'h1-%s' % defaultIntf, '20')
- # Update port configuration of port 1
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 20, ] )
- elif testIndex / 10 == 5:
- # CASE51-54
- if hasattr( main, 'Mininet1' ):
- # Update port configuration of port 1
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 20, ], native=10 )
- elif testIndex / 10 == 6:
- # CASE61-64
- if hasattr( main, 'Mininet1' ):
- # Update port configuration of port 1
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 120, ], native=110 )
- # Update port configuration of port 2
- SRDynamicConfTest.updateIntfCfg( main, portNum + 1, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 120, ], native=110 )
- elif testIndex / 10 == 7:
- # CASE71-74
- if hasattr( main, 'Mininet1' ):
- # Update host configuration of h1
- main.Mininet1.removeVLAN( 'h1', 'h1-%s.10' % defaultIntf )
- # Update port configuration of port 1
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], untagged=10 )
- elif testIndex / 10 == 8:
- # CASE81-84
- if hasattr( main, 'Mininet1' ):
- # Update port configuration of port 1
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 20, ], native=10 )
- elif testIndex / 10 == 9:
- # CASE91-94
- if hasattr( main, 'Mininet1' ):
- # Update host configuration
- main.Mininet1.removeVLAN( 'h1', 'h1-%s.10' % defaultIntf )
- main.Mininet1.removeVLAN( 'h2', 'h2-%s.10' % defaultIntf )
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], tagged=[ 20, ] )
+ for dpid in leaf_dpid:
+ run.checkFlowsByDpid( main, dpid, minFlowCountPerLeaf, sleep=5 )
+ main.pingChart[ 'leaf1' ][ 'expect' ] = False
+ run.pingAll( main, '%s_5' % TAG, retryAttempts=2 )
- # Update port configuration
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 120, ], native=110 )
- SRDynamicConfTest.updateIntfCfg( main, portNum + 1, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 120, ], native=110 )
- elif testIndex / 10 == 10:
- # CASE101-104
- if hasattr( main, 'Mininet1' ):
- # Update port configuration
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], untagged=20 )
- elif testIndex / 10 == 11:
- # CASE111-114
- if hasattr( main, 'Mininet1' ):
- # Update port configuration
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 20, ] )
- elif testIndex / 10 == 12:
- # CASE121-124
- if hasattr( main, 'Mininet1' ):
- # Update port configuration
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 20, ], native=110 )
- SRDynamicConfTest.updateIntfCfg( main, portNum + 1, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 20, ], native=110 )
- elif testIndex / 10 == 13:
- # CASE131-134
- if hasattr( main, 'Mininet1' ):
- # Update port configuration
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 120, ], native=10 )
- elif testIndex / 10 == 14:
- # CASE141-144
- if hasattr( main, 'Mininet1' ):
- # Update port configuration
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 20, ] )
- elif testIndex / 10 == 15:
- # CASE151-154
- if hasattr( main, 'Mininet1' ):
- # Update port configuration
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 120, ] )
- elif testIndex / 10 == 16:
- # CASE161-164
- if hasattr( main, 'Mininet1' ):
- # Update port configuration
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 20, ], native=10 )
- elif testIndex / 10 == 17:
- # CASE171-174
- if hasattr( main, 'Mininet1' ):
- # Update port configuration
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 120, ] )
- elif testIndex / 10 == 18:
- # CASE181-184
- if hasattr( main, 'Mininet1' ):
- # Update port configuration
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 20, ], native=10 )
- elif testIndex / 10 == 19:
- # CASE191-194
- if hasattr( main, 'Mininet1' ):
- # Update port configuration
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], untagged=20 )
- elif testIndex / 10 == 20:
- # CASE201-204
- if hasattr( main, 'Mininet1' ):
- # Update port configuration
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 20 ] )
- elif testIndex / 10 == 21:
- # CASE211-214
- if hasattr( main, 'Mininet1' ):
- # Update port configuration
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 20 ], native=110 )
- elif testIndex / 10 == 22:
- # CASE221-224
- if hasattr( main, 'Mininet1' ):
- # Update port configuration
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 120 ], native=10 )
- elif testIndex / 10 == 23:
- # CASE231-234
- if hasattr( main, "Mininet1" ):
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 10, ] )
- for dpid in leaf_dpid:
- run.checkFlowsByDpid( main, dpid, minFlowCountPerLeaf, sleep=5 )
- main.pingChart[ 'leaf1' ][ 'expect' ] = False
- run.pingAll( main, '%s_1' % TAG, retryAttempts=2 )
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], untagged= 20 )
+ for dpid in leaf_dpid:
+ run.checkFlowsByDpid( main, dpid, minFlowCountPerLeaf, sleep=5 )
+ run.pingAll( main, '%s_6' % TAG, retryAttempts=2 )
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], untagged=50 )
- for dpid in leaf_dpid:
- run.checkFlowsByDpid( main, dpid, minFlowCountPerLeaf, sleep=5 )
- run.pingAll( main, '%s_2' % TAG, retryAttempts=2 )
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.2.254/24', ], untagged= 10 )
+ for dpid in leaf_dpid:
+ run.checkFlowsByDpid( main, dpid, minFlowCountPerLeaf, sleep=5 )
+ main.pingChart[ 'leaf1' ][ 'expect' ] = True
+ elif testIndex / 10 == 24:
+ # CASE243-244
+ # Only for 2x2 and 2x4 topology, to test reachability from other leaf
+ if hasattr( main, "Mininet1" ):
+ # Update host IP and default GW
+ main.Mininet1.changeIP( 'h1', 'h1-%s' % defaultIntf, '10.0.6.1', '255.255.255.0' )
+ main.Mininet1.changeDefaultGateway( 'h1', '10.0.6.254' )
+ # Update port configuration
+ SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
+ [ '10.0.6.254/24', ], untagged=60 )
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 20, ] )
- for dpid in leaf_dpid:
- run.checkFlowsByDpid( main, dpid, minFlowCountPerLeaf, sleep=5 )
- run.pingAll( main, '%s_3' % TAG, retryAttempts=2 )
+ # Update ping chart in case it is changed
+ try:
+ with open( "%s%sCASE%d_after.chart" % (main.configPath, main.forChart, testIndex / 10 * 10 ) ) as chart:
+ main.pingChart = json.load(chart)
+ except IOError:
+ main.log.debug( "Ping chart is not changed" )
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 40, ], native=10 )
- for dpid in leaf_dpid:
- run.checkFlowsByDpid( main, dpid, minFlowCountPerLeaf, sleep=5 )
- main.pingChart[ 'leaf1' ][ 'expect' ] = True
- run.pingAll( main, '%s_4' % TAG, retryAttempts=2 )
+ # Check connectivity after changing interface configuration
+ run.checkFlows( main, minFlowCount=minFlowCountPerLeaf * topo[ topology ][ 1 ], sleep=5, dumpflows=False )
+ run.pingAll( main, '%s_After' % TAG, retryAttempts=2 )
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], tagged=[ 20, ] )
- for dpid in leaf_dpid:
- run.checkFlowsByDpid( main, dpid, minFlowCountPerLeaf, sleep=5 )
- main.pingChart[ 'leaf1' ][ 'expect' ] = False
- run.pingAll( main, '%s_5' % TAG, retryAttempts=2 )
-
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], untagged= 20 )
- for dpid in leaf_dpid:
- run.checkFlowsByDpid( main, dpid, minFlowCountPerLeaf, sleep=5 )
- run.pingAll( main, '%s_6' % TAG, retryAttempts=2 )
-
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.2.254/24', ], untagged= 10 )
- for dpid in leaf_dpid:
- run.checkFlowsByDpid( main, dpid, minFlowCountPerLeaf, sleep=5 )
- main.pingChart[ 'leaf1' ][ 'expect' ] = True
- elif testIndex / 10 == 24:
- # CASE243-244
- # Only for 2x2 and 2x4 topology, to test reachability from other leaf
- if hasattr( main, "Mininet1" ):
- # Update host IP and default GW
- main.Mininet1.changeIP( 'h1', 'h1-%s' % defaultIntf, '10.0.6.1', '255.255.255.0' )
- main.Mininet1.changeDefaultGateway( 'h1', '10.0.6.254' )
- # Update port configuration
- SRDynamicConfTest.updateIntfCfg( main, portNum, dualHomed,
- [ '10.0.6.254/24', ], untagged=60 )
-
- # Update ping chart in case it is changed
- try:
- with open( "%s%sCASE%d_after.chart" % (main.configPath, main.forChart, testIndex / 10 * 10 ) ) as chart:
- main.pingChart = json.load(chart)
- except IOError:
- main.log.debug( "Ping chart is not changed" )
-
- # Check connectivity after changing interface configuration
- run.checkFlows( main, minFlowCount=minFlowCountPerLeaf * topo[ topology ][ 1 ], sleep=5, dumpflows=False )
- run.pingAll( main, '%s_After' % TAG, retryAttempts=2 )
-
- run.cleanup( main )
+ run.cleanup( main )
+ except Exception as e:
+ main.log.exception( "Error in runTest" )
+ main.skipCase( result="FAIL", msg=e )
@staticmethod
def updateIntfCfg( main, portNum, dualHomed, ips=[], untagged=0, tagged=[], native=0 ):
diff --git a/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.params b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.params
index b87d333..637ba3f 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.params
@@ -16,11 +16,13 @@
<useCommonConf>True</useCommonConf>
<useCommonTopo>True</useCommonTopo>
<topology>cord_fabric.py</topology>
+ <trellisOar>/home/sdn/segmentrouting-oar-3.0.0-SNAPSHOT.oar</trellisOar>
+ <t3Oar>/home/sdn/t3-app-3.0.0-SNAPSHOT.oar</t3Oar>
</DEPENDENCY>
<ENV>
<cellName>productionCell</cellName>
- <cellApps>drivers,hostprovider,segmentrouting,openflow-base,netcfghostprovider,netcfglinksprovider</cellApps>
+ <cellApps>drivers,hostprovider,openflow-base,netcfghostprovider,netcfglinksprovider</cellApps>
</ENV>
<GIT>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/dependencies/SRHAFuncs.py b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/dependencies/SRHAFuncs.py
index 5b1d137..9169573 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/dependencies/SRHAFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/dependencies/SRHAFuncs.py
@@ -34,48 +34,52 @@
self.topo[ '4x4' ] = ( 4, 4, '--leaf=4 --spine=4', '4x4 Leaf-spine' )
def runTest( self, main, caseNum, numNodes, Topo, minFlow, isRandom, isKillingSwitch ):
- if not hasattr( main, 'apps' ):
- run.initTest( main )
+ try:
+ if not hasattr( main, 'apps' ):
+ run.initTest( main )
- description = "High Availability tests - " + \
- self.generateDescription( isRandom, isKillingSwitch ) + \
- self.topo[ Topo ][ 3 ]
- main.case( description )
- run.config( main, Topo )
- run.installOnos( main )
- run.loadJson( main )
- run.loadChart( main )
- run.startMininet( main, 'cord_fabric.py', args=self.topo[ Topo ][ 2 ] )
- # pre-configured routing and bridging test
- run.checkFlows( main, minFlowCount=minFlow )
- run.pingAll( main )
- switch = self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ]
- link = ( self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ] ) * self.topo[ Topo ][ 0 ]
- self.generateRandom( isRandom )
- for i in range( 0, main.failures ):
- toKill = self.getNextNum( isRandom, main.Cluster.numCtrls, i )
- run.killOnos( main, [ toKill ], '{}'.format( switch ),
- '{}'.format( link ), '{}'.format( numNodes - 1 ) )
- run.pingAll( main, 'CASE{}_ONOS_Failure{}'.format( caseNum, i + 1 ) )
- if isKillingSwitch:
- self.killAndRecoverSwitch( main, caseNum, numNodes,
- Topo, minFlow, isRandom,
- i, switch, link )
- run.recoverOnos( main, [ toKill ], '{}'.format( switch ),
- '{}'.format( link ), '{}'.format( numNodes ) )
- run.checkFlows( main, minFlowCount=minFlow,
- tag='CASE{}_ONOS{}_Recovery'.format( caseNum, i + 1 ) )
- run.pingAll( main, 'CASE{}_ONOS_Recovery{}'.format( caseNum, i + 1 ) )
- # TODO Dynamic config of hosts in subnet
- # TODO Dynamic config of host not in subnet
- # TODO Dynamic config of vlan xconnect
- # TODO Vrouter integration
- # TODO Mcast integration
- if hasattr( main, 'Mininet1' ):
- run.cleanup( main )
- else:
- # TODO: disconnect TestON from the physical network
- pass
+ description = "High Availability tests - " + \
+ self.generateDescription( isRandom, isKillingSwitch ) + \
+ self.topo[ Topo ][ 3 ]
+ main.case( description )
+ run.config( main, Topo )
+ run.installOnos( main )
+ run.loadJson( main )
+ run.loadChart( main )
+ run.startMininet( main, 'cord_fabric.py', args=self.topo[ Topo ][ 2 ] )
+ # pre-configured routing and bridging test
+ run.checkFlows( main, minFlowCount=minFlow )
+ run.pingAll( main )
+ switch = self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ]
+ link = ( self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ] ) * self.topo[ Topo ][ 0 ]
+ self.generateRandom( isRandom )
+ for i in range( 0, main.failures ):
+ toKill = self.getNextNum( isRandom, main.Cluster.numCtrls, i )
+ run.killOnos( main, [ toKill ], '{}'.format( switch ),
+ '{}'.format( link ), '{}'.format( numNodes - 1 ) )
+ run.pingAll( main, 'CASE{}_ONOS_Failure{}'.format( caseNum, i + 1 ) )
+ if isKillingSwitch:
+ self.killAndRecoverSwitch( main, caseNum, numNodes,
+ Topo, minFlow, isRandom,
+ i, switch, link )
+ run.recoverOnos( main, [ toKill ], '{}'.format( switch ),
+ '{}'.format( link ), '{}'.format( numNodes ) )
+ run.checkFlows( main, minFlowCount=minFlow,
+ tag='CASE{}_ONOS{}_Recovery'.format( caseNum, i + 1 ) )
+ run.pingAll( main, 'CASE{}_ONOS_Recovery{}'.format( caseNum, i + 1 ) )
+ # TODO Dynamic config of hosts in subnet
+ # TODO Dynamic config of host not in subnet
+ # TODO Dynamic config of vlan xconnect
+ # TODO Vrouter integration
+ # TODO Mcast integration
+ if hasattr( main, 'Mininet1' ):
+ run.cleanup( main )
+ else:
+ # TODO: disconnect TestON from the physical network
+ pass
+ except Exception as e:
+ main.log.exception( "Error in runTest" )
+ main.skipCase( result="FAIL", msg=e )
def generateDescription( self, isRandom, isKillingSwitch ):
return "ONOS " + ( "random " if isRandom else "" ) + "failures" +\
diff --git a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params
index de88e07..3bd8457 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params
@@ -16,11 +16,13 @@
<useCommonConf>True</useCommonConf>
<useCommonTopo>True</useCommonTopo>
<topology>cord_fabric.py</topology>
+ <trellisOar>/home/sdn/segmentrouting-oar-3.0.0-SNAPSHOT.oar</trellisOar>
+ <t3Oar>/home/sdn/t3-app-3.0.0-SNAPSHOT.oar</t3Oar>
</DEPENDENCY>
<ENV>
<cellName>productionCell</cellName>
- <cellApps>drivers,hostprovider,segmentrouting,openflow-base,netcfghostprovider,netcfglinksprovider</cellApps>
+ <cellApps>drivers,hostprovider,openflow-base,netcfghostprovider,netcfglinksprovider</cellApps>
</ENV>
<GIT>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/dependencies/SRLinkFailFuncs.py b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/dependencies/SRLinkFailFuncs.py
index 1ed1c55..c83d1e7 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/dependencies/SRLinkFailFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/dependencies/SRLinkFailFuncs.py
@@ -37,36 +37,40 @@
self.portTwo = '3'
def runTest( self, main, caseNum, numNodes, Topo, minFlow ):
- if not hasattr( main, 'apps' ):
- run.initTest( main )
+ try:
+ if not hasattr( main, 'apps' ):
+ run.initTest( main )
- description = "Bridging and Routing Link Failure test with " + self.topo[ Topo ][ 3 ] + " and {} Onos".format( numNodes )
- main.case( description )
+ description = "Bridging and Routing Link Failure test with " + self.topo[ Topo ][ 3 ] + " and {} Onos".format( numNodes )
+ main.case( description )
- main.cfgName = Topo
- main.Cluster.setRunningNode( numNodes )
- run.installOnos( main )
- run.loadJson( main )
- run.loadChart( main )
- run.startMininet( main, 'cord_fabric.py', args=self.topo[ Topo ][ 2 ] )
- # pre-configured routing and bridging test
- run.checkFlows( main, minFlowCount=minFlow )
- run.pingAll( main )
- switch = self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ]
- link = ( self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ] ) * self.topo[ Topo ][ 0 ]
- # link failure
- run.killLink( main, self.switchOne, self.switchTwo, switches='{}'.format( switch ), links='{}'.format( link - 2 ) )
- run.pingAll( main, "CASE{}_Failure".format( caseNum ) )
- run.restoreLink( main, self.switchOne, self.switchTwo, '{}'.format( switch ), '{}'.format( link ),
- True, self.dpidOne, self.dpidTwo, self.portOne, self.portTwo )
- run.pingAll( main, "CASE{}_Recovery".format( caseNum ) )
- # TODO Dynamic config of hosts in subnet
- # TODO Dynamic config of host not in subnet
- # TODO Dynamic config of vlan xconnect
- # TODO Vrouter integration
- # TODO Mcast integration
- if hasattr( main, 'Mininet1' ):
- run.cleanup( main )
- else:
- # TODO: disconnect TestON from the physical network
- pass
+ main.cfgName = Topo
+ main.Cluster.setRunningNode( numNodes )
+ run.installOnos( main )
+ run.loadJson( main )
+ run.loadChart( main )
+ run.startMininet( main, 'cord_fabric.py', args=self.topo[ Topo ][ 2 ] )
+ # pre-configured routing and bridging test
+ run.checkFlows( main, minFlowCount=minFlow )
+ run.pingAll( main )
+ switch = self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ]
+ link = ( self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ] ) * self.topo[ Topo ][ 0 ]
+ # link failure
+ run.killLink( main, self.switchOne, self.switchTwo, switches='{}'.format( switch ), links='{}'.format( link - 2 ) )
+ run.pingAll( main, "CASE{}_Failure".format( caseNum ) )
+ run.restoreLink( main, self.switchOne, self.switchTwo, '{}'.format( switch ), '{}'.format( link ),
+ True, self.dpidOne, self.dpidTwo, self.portOne, self.portTwo )
+ run.pingAll( main, "CASE{}_Recovery".format( caseNum ) )
+ # TODO Dynamic config of hosts in subnet
+ # TODO Dynamic config of host not in subnet
+ # TODO Dynamic config of vlan xconnect
+ # TODO Vrouter integration
+ # TODO Mcast integration
+ if hasattr( main, 'Mininet1' ):
+ run.cleanup( main )
+ else:
+ # TODO: disconnect TestON from the physical network
+ pass
+ except Exception as e:
+ main.log.exception( "Error in runTest" )
+ main.skipCase( result="FAIL", msg=e )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/SRMulticastTest.py b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/SRMulticastTest.py
index 1802221..c6d52fd 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/SRMulticastTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/SRMulticastTest.py
@@ -25,51 +25,55 @@
from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as lib
import tests.USECASE.SegmentRouting.dependencies.cfgtranslator as translator
- skipPackage = False
- init = False
- if not hasattr( main, "apps" ):
- init = True
- lib.initTest( main )
- # Skip onos packaging if the cluster size stays the same
- if not init and onosNodes == main.Cluster.numCtrls:
- skipPackage = True
+ try:
+ skipPackage = False
+ init = False
+ if not hasattr( main, "apps" ):
+ init = True
+ lib.initTest( main )
+ # Skip onos packaging if the cluster size stays the same
+ if not init and onosNodes == main.Cluster.numCtrls:
+ skipPackage = True
- main.resultFileName = "CASE%03d" % test_idx
- main.Cluster.setRunningNode( onosNodes )
- lib.installOnos( main, skipPackage=skipPackage, cliSleep=5 )
- # Load configuration files
- main.step( "Load configurations" )
- main.cfgName = "TEST_CONFIG_ipv4=1_ipv6=1" if hasattr( main, "Mininet1" ) else main.params[ "DEPENDENCY" ][ "confName" ]
- if main.useBmv2:
- # Translate configuration file from OVS-OFDPA to BMv2 driver
- translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
- switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
- translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
- else:
- translator.bmv2ToOfdpa( main )
- lib.loadJson( main )
- time.sleep( float( main.params[ "timers" ][ "loadNetcfgSleep" ] ) )
- main.cfgName = "common" if hasattr( main, "Mininet1" ) else main.params[ "DEPENDENCY" ][ "confName" ]
- lib.loadMulticastConfig( main )
- lib.loadHost( main )
-
- if hasattr( main, "Mininet1" ):
- # Run the test with Mininet
- mininet_args = " --dhcp=1 --routers=1 --ipv6=1 --ipv4=1"
+ main.resultFileName = "CASE%03d" % test_idx
+ main.Cluster.setRunningNode( onosNodes )
+ lib.installOnos( main, skipPackage=skipPackage, cliSleep=5 )
+ # Load configuration files
+ main.step( "Load configurations" )
+ main.cfgName = "TEST_CONFIG_ipv4=1_ipv6=1" if hasattr( main, "Mininet1" ) else main.params[ "DEPENDENCY" ][ "confName" ]
if main.useBmv2:
- mininet_args += ' --switch bmv2'
- main.log.info( "Using BMv2 switch" )
- lib.startMininet( main, main.params[ "DEPENDENCY" ][ "topology" ], args=mininet_args )
- time.sleep( float( main.params[ "timers" ][ "startMininetSleep" ] ) )
- else:
- # Run the test with physical devices
- lib.connectToPhysicalNetwork( main )
+ # Translate configuration file from OVS-OFDPA to BMv2 driver
+ translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
+ switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
+ translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
+ else:
+ translator.bmv2ToOfdpa( main )
+ lib.loadJson( main )
+ time.sleep( float( main.params[ "timers" ][ "loadNetcfgSleep" ] ) )
+ main.cfgName = "common" if hasattr( main, "Mininet1" ) else main.params[ "DEPENDENCY" ][ "confName" ]
+ lib.loadMulticastConfig( main )
+ lib.loadHost( main )
- # Create scapy components
- lib.startScapyHosts( main )
- # Verify host IP assignment
- lib.verifyOnosHostIp( main )
- lib.verifyNetworkHostIp( main )
+ if hasattr( main, "Mininet1" ):
+ # Run the test with Mininet
+ mininet_args = " --dhcp=1 --routers=1 --ipv6=1 --ipv4=1"
+ if main.useBmv2:
+ mininet_args += ' --switch bmv2'
+ main.log.info( "Using BMv2 switch" )
+ lib.startMininet( main, main.params[ "DEPENDENCY" ][ "topology" ], args=mininet_args )
+ time.sleep( float( main.params[ "timers" ][ "startMininetSleep" ] ) )
+ else:
+ # Run the test with physical devices
+ lib.connectToPhysicalNetwork( main )
+
+ # Create scapy components
+ lib.startScapyHosts( main )
+ # Verify host IP assignment
+ lib.verifyOnosHostIp( main )
+ lib.verifyNetworkHostIp( main )
+ except Exception as e:
+ main.log.exception( "Error in setupTest" )
+ main.skipCase( result="FAIL", msg=e )
def verifyMcastRoutes( main ):
"""
diff --git a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.params b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.params
index 89f758c..99993f4 100755
--- a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.params
+++ b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.params
@@ -16,11 +16,13 @@
<useCommonConf>True</useCommonConf>
<useCommonTopo>True</useCommonTopo>
<topology>cord_fabric.py</topology>
+ <trellisOar>/home/sdn/segmentrouting-oar-3.0.0-SNAPSHOT.oar</trellisOar>
+ <t3Oar>/home/sdn/t3-app-3.0.0-SNAPSHOT.oar</t3Oar>
</DEPENDENCY>
<ENV>
<cellName>productionCell</cellName>
- <cellApps>drivers,hostprovider,segmentrouting,openflow-base,netcfghostprovider,netcfglinksprovider</cellApps>
+ <cellApps>drivers,hostprovider,openflow-base,netcfghostprovider,netcfglinksprovider</cellApps>
</ENV>
<GIT>
diff --git a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/dependencies/SROnosFailureFuncs.py b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/dependencies/SROnosFailureFuncs.py
index f130ff2..7233000 100644
--- a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/dependencies/SROnosFailureFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/dependencies/SROnosFailureFuncs.py
@@ -31,38 +31,42 @@
self.topo[ '4x4' ] = ( 4, 4, '--leaf=4 --spine=4', '4x4 Leaf-spine' )
def runTest( self, main, caseNum, numNodes, Topo, minFlow, killList=[ 0 ] ):
- description = "ONOS Failure test with " + self.topo[ Topo ][ 3 ]
- main.case( description )
- if not hasattr( main, 'apps' ):
- run.initTest( main )
- main.cfgName = Topo
- main.Cluster.setRunningNode( numNodes )
- run.installOnos( main )
- run.loadJson( main )
- run.loadChart( main )
- if hasattr( main, 'Mininet1' ):
- run.startMininet( main, 'cord_fabric.py', args=self.topo[ Topo ][ 2 ] )
- else:
- # Run the test with physical devices
- # TODO: connect TestON to the physical network
- pass
- # pre-configured routing and bridging test
- run.checkFlows( main, minFlowCount=minFlow )
- run.pingAll( main, 'CASE{}'.format( caseNum ) )
- switch = '{}'.format( self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ] )
- link = '{}'.format( ( self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ] ) * self.topo[ Topo ][ 0 ] )
- run.killOnos( main, killList, switch, link, '{}'.format( numNodes - 1 ) )
- run.pingAll( main, 'CASE{}_Failure'.format( caseNum ) )
- run.recoverOnos( main, killList, switch, link, '{}'.format( numNodes ) )
- run.checkFlows( main, minFlowCount=minFlow, tag='CASE{}_Recovery'.format( caseNum ) )
- run.pingAll( main, 'CASE{}_Recovery'.format( caseNum ) )
- # TODO Dynamic config of hosts in subnet
- # TODO Dynamic config of host not in subnet
- # TODO Dynamic config of vlan xconnect
- # TODO Vrouter integration
- # TODO Mcast integration
- if hasattr( main, 'Mininet1' ):
- run.cleanup( main )
- else:
- # TODO: disconnect TestON from the physical network
- pass
+ try:
+ description = "ONOS Failure test with " + self.topo[ Topo ][ 3 ]
+ main.case( description )
+ if not hasattr( main, 'apps' ):
+ run.initTest( main )
+ main.cfgName = Topo
+ main.Cluster.setRunningNode( numNodes )
+ run.installOnos( main )
+ run.loadJson( main )
+ run.loadChart( main )
+ if hasattr( main, 'Mininet1' ):
+ run.startMininet( main, 'cord_fabric.py', args=self.topo[ Topo ][ 2 ] )
+ else:
+ # Run the test with physical devices
+ # TODO: connect TestON to the physical network
+ pass
+ # pre-configured routing and bridging test
+ run.checkFlows( main, minFlowCount=minFlow )
+ run.pingAll( main, 'CASE{}'.format( caseNum ) )
+ switch = '{}'.format( self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ] )
+ link = '{}'.format( ( self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ] ) * self.topo[ Topo ][ 0 ] )
+ run.killOnos( main, killList, switch, link, '{}'.format( numNodes - 1 ) )
+ run.pingAll( main, 'CASE{}_Failure'.format( caseNum ) )
+ run.recoverOnos( main, killList, switch, link, '{}'.format( numNodes ) )
+ run.checkFlows( main, minFlowCount=minFlow, tag='CASE{}_Recovery'.format( caseNum ) )
+ run.pingAll( main, 'CASE{}_Recovery'.format( caseNum ) )
+ # TODO Dynamic config of hosts in subnet
+ # TODO Dynamic config of host not in subnet
+ # TODO Dynamic config of vlan xconnect
+ # TODO Vrouter integration
+ # TODO Mcast integration
+ if hasattr( main, 'Mininet1' ):
+ run.cleanup( main )
+ else:
+ # TODO: disconnect TestON from the physical network
+ pass
+ except Exception as e:
+ main.log.exception( "Error in runTest" )
+ main.skipCase( result="FAIL", msg=e )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.params.stratum b/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.params.stratum
index 835baff..29d1415 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.params.stratum
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.params.stratum
@@ -25,7 +25,7 @@
</DEPENDENCY>
<MN_DOCKER>
- <args>--privileged --net host --rm -v topo:/topo -v ~/mininet/custom:/home/root/mininet/custom -v /var/run/openvswitch/:/var/run/openvswitch/ -v /tmp/mn-stratum:/tmp -v /tmp/mn_conf/:/home/root --hostname mn-stratum -v /etc/network/interfaces:/etc/network/interfaces -v /lib/modules:/lib/modules -it -d</args>
+ <args>--privileged --net host --rm -v topo:/topo -v ~/mininet/custom:/home/root/mininet/custom -v /var/run/openvswitch/:/var/run/openvswitch/ -v /tmp/mn-stratum:/tmp -v /tmp/mn_conf/:/home/root/config --hostname mn-stratum -v /etc/network/interfaces:/etc/network/interfaces -v /lib/modules:/lib/modules -it -d</args>
<name>trellis_mininet</name>
<home>/home/root/</home>
</MN_DOCKER>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py
index 2868b57..24864e5 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py
@@ -28,82 +28,87 @@
import tests.USECASE.SegmentRouting.dependencies.cfgtranslator as translator
import time
- skipPackage = False
- init = False
- if not hasattr( main, 'apps' ):
- init = True
- lib.initTest( main )
- if onosNodes < 0:
- onosNodes = main.Cluster.numCtrls
- # Skip onos packaging if the cluster size stays the same
- if not init and onosNodes == main.Cluster.numCtrls:
- skipPackage = True
+ try:
+ skipPackage = False
+ init = False
+ if not hasattr( main, 'apps' ):
+ init = True
+ lib.initTest( main )
+ if onosNodes < 0:
+ onosNodes = main.Cluster.numCtrls
+ # Skip onos packaging if the cluster size stays the same
+ if not init and onosNodes == main.Cluster.numCtrls:
+ skipPackage = True
- main.internalIpv4Hosts = main.params[ 'TOPO' ][ 'internalIpv4Hosts' ].split( ',' )
- main.internalIpv6Hosts = main.params[ 'TOPO' ][ 'internalIpv6Hosts' ].split( ',' )
- main.externalIpv4Hosts = main.params[ 'TOPO' ][ 'externalIpv4Hosts' ].split( ',' ) if main.params[ 'TOPO' ].get('externalIpv4Hosts') else []
- main.externalIpv6Hosts = main.params[ 'TOPO' ][ 'externalIpv6Hosts' ].split( ',' ) if main.params[ 'TOPO' ].get('externalIpv6Hosts') else []
- main.staticIpv4Hosts = main.params[ 'TOPO' ][ 'staticIpv4Hosts' ].split( ',' ) if main.params[ 'TOPO' ].get('staticIpv4Hosts') else []
- main.staticIpv6Hosts = main.params[ 'TOPO' ][ 'staticIpv6Hosts' ].split( ',' ) if main.params[ 'TOPO' ].get('staticIpv6Hosts') else []
- main.disconnectedIpv4Hosts = []
- main.disconnectedIpv6Hosts = []
- main.disconnectedExternalIpv4Hosts = []
- main.disconnectedExternalIpv6Hosts = []
- main.disconnectedStaticIpv4Hosts = []
- main.disconnectedStaticIpv6Hosts = []
- main.resultFileName = 'CASE%03d' % test_idx
- main.Cluster.setRunningNode( onosNodes )
+ main.internalIpv4Hosts = main.params[ 'TOPO' ][ 'internalIpv4Hosts' ].split( ',' )
+ main.internalIpv6Hosts = main.params[ 'TOPO' ][ 'internalIpv6Hosts' ].split( ',' )
+ main.externalIpv4Hosts = main.params[ 'TOPO' ][ 'externalIpv4Hosts' ].split( ',' ) if main.params[ 'TOPO' ].get('externalIpv4Hosts') else []
+ main.externalIpv6Hosts = main.params[ 'TOPO' ][ 'externalIpv6Hosts' ].split( ',' ) if main.params[ 'TOPO' ].get('externalIpv6Hosts') else []
+ main.staticIpv4Hosts = main.params[ 'TOPO' ][ 'staticIpv4Hosts' ].split( ',' ) if main.params[ 'TOPO' ].get('staticIpv4Hosts') else []
+ main.staticIpv6Hosts = main.params[ 'TOPO' ][ 'staticIpv6Hosts' ].split( ',' ) if main.params[ 'TOPO' ].get('staticIpv6Hosts') else []
+ main.disconnectedIpv4Hosts = []
+ main.disconnectedIpv6Hosts = []
+ main.disconnectedExternalIpv4Hosts = []
+ main.disconnectedExternalIpv6Hosts = []
+ main.disconnectedStaticIpv4Hosts = []
+ main.disconnectedStaticIpv6Hosts = []
+ main.resultFileName = 'CASE%03d' % test_idx
+ main.Cluster.setRunningNode( onosNodes )
- lib.installOnos( main, skipPackage=skipPackage, cliSleep=5 )
+ lib.installOnos( main, skipPackage=skipPackage, cliSleep=5 )
- # Load configuration files
- if hasattr( main, "Mininet1" ):
- main.cfgName = 'TEST_CONFIG_ipv4={}_ipv6={}'.format( 1 if ipv4 else 0,
- 1 if ipv6 else 0)
- else:
- main.cfgName = main.params[ "DEPENDENCY" ][ "confName" ]
- if main.useBmv2:
- # Translate configuration file from OVS-OFDPA to BMv2 driver
- translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
- switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
- translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
- else:
- translator.bmv2ToOfdpa( main )
- lib.loadJson( main )
- main.log.debug( "sleeping %i seconds" % float( main.params[ 'timers' ][ 'loadNetcfgSleep' ] ) )
- time.sleep( float( main.params[ 'timers' ][ 'loadNetcfgSleep' ] ) )
- lib.loadHost( main )
-
- # if static route flag add routes
- # these routes are topology specific
- if static:
- if ipv4:
- lib.addStaticOnosRoute( main, "10.0.88.0/24", "10.0.1.1")
- lib.addStaticOnosRoute( main, "10.0.88.0/24", "10.0.5.1")
- if ipv6:
- lib.addStaticOnosRoute( main, "2000::8700/120", "2000::101")
- lib.addStaticOnosRoute( main, "2000::8700/120", "2000::501")
- if countFlowsGroups:
- lib.loadCount( main )
-
- if hasattr( main, 'Mininet1' ):
- lib.mnDockerSetup( main )
- # Run the test with Mininet
- mininet_args = ' --dhcp=1 --routers=1 --ipv6={} --ipv4={}'.format( 1 if ipv6 else 0,
- 1 if ipv4 else 0 )
+ # Load configuration files
+ if hasattr( main, "Mininet1" ):
+ main.cfgName = 'TEST_CONFIG_ipv4={}_ipv6={}'.format( 1 if ipv4 else 0,
+ 1 if ipv6 else 0)
+ else:
+ main.cfgName = main.params[ "DEPENDENCY" ][ "confName" ]
if main.useBmv2:
- mininet_args += ' --switch %s' % main.switchType
- main.log.info( "Using %s switch" % main.switchType )
- lib.startMininet( main, main.params[ 'DEPENDENCY' ][ 'topology' ], args=mininet_args )
- main.log.debug( "Waiting %i seconds for ONOS to discover dataplane" % float( main.params[ "timers" ][ "startMininetSleep" ] ))
- time.sleep( float( main.params[ "timers" ][ "startMininetSleep" ] ) )
- else:
- # Run the test with physical devices
- lib.connectToPhysicalNetwork( main )
+ # Translate configuration file from OVS-OFDPA to BMv2 driver
+ translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
+ switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
+ translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
+ else:
+ translator.bmv2ToOfdpa( main )
+ lib.loadJson( main )
+ main.log.debug( "sleeping %i seconds" % float( main.params[ 'timers' ][ 'loadNetcfgSleep' ] ) )
+ time.sleep( float( main.params[ 'timers' ][ 'loadNetcfgSleep' ] ) )
+ lib.loadHost( main )
- # wait some time for onos to install the rules!
- main.log.info( "Waiting %i seconds for ONOS to program the dataplane" % float( main.params[ "timers" ][ "dhcpSleep" ] ))
- time.sleep( float( main.params[ 'timers' ][ 'dhcpSleep' ] ) )
+ # if static route flag add routes
+ # these routes are topology specific
+ if static:
+ if ipv4:
+ lib.addStaticOnosRoute( main, "10.0.88.0/24", "10.0.1.1")
+ lib.addStaticOnosRoute( main, "10.0.88.0/24", "10.0.5.1")
+ if ipv6:
+ lib.addStaticOnosRoute( main, "2000::8700/120", "2000::101")
+ lib.addStaticOnosRoute( main, "2000::8700/120", "2000::501")
+ if countFlowsGroups:
+ lib.loadCount( main )
+
+ if hasattr( main, 'Mininet1' ):
+ lib.mnDockerSetup( main )
+ # Run the test with Mininet
+ mininet_args = ' --dhcp=1 --routers=1 --ipv6={} --ipv4={}'.format( 1 if ipv6 else 0,
+ 1 if ipv4 else 0 )
+ if main.useBmv2:
+ mininet_args += ' --switch %s' % main.switchType
+ main.log.info( "Using %s switch" % main.switchType )
+ lib.startMininet( main, main.params[ 'DEPENDENCY' ][ 'topology' ], args=mininet_args )
+ main.log.debug( "Waiting %i seconds for ONOS to discover dataplane" % float( main.params[ "timers" ][ "startMininetSleep" ] ))
+ time.sleep( float( main.params[ "timers" ][ "startMininetSleep" ] ) )
+ else:
+ # Run the test with physical devices
+ lib.connectToPhysicalNetwork( main )
+
+ lib.saveOnosDiagnostics( main )
+ # wait some time for onos to install the rules!
+ main.log.info( "Waiting %i seconds for ONOS to program the dataplane" % float( main.params[ "timers" ][ "dhcpSleep" ] ))
+ time.sleep( float( main.params[ 'timers' ][ 'dhcpSleep' ] ) )
+ except Exception as e:
+ main.log.exception( "Error in setupTest" )
+ main.skipCase( result="FAIL", msg=e )
def verifyPingInternal( main, ipv4=True, ipv6=True, disconnected=True ):
"""
diff --git a/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.params b/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.params
index a0b85f8..be96c56 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.params
@@ -16,11 +16,13 @@
<useCommonConf>True</useCommonConf>
<useCommonTopo>True</useCommonTopo>
<topology>cord_fabric.py</topology>
+ <trellisOar>/home/sdn/segmentrouting-oar-3.0.0-SNAPSHOT.oar</trellisOar>
+ <t3Oar>/home/sdn/t3-app-3.0.0-SNAPSHOT.oar</t3Oar>
</DEPENDENCY>
<ENV>
<cellName>productionCell</cellName>
- <cellApps>drivers,hostprovider,segmentrouting,openflow-base,netcfghostprovider,netcfglinksprovider</cellApps>
+ <cellApps>drivers,hostprovider,openflow-base,netcfghostprovider,netcfglinksprovider</cellApps>
</ENV>
<GIT>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRSanity/dependencies/SRSanityFuncs.py b/TestON/tests/USECASE/SegmentRouting/SRSanity/dependencies/SRSanityFuncs.py
index 4d91bdd..d182e37 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRSanity/dependencies/SRSanityFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRSanity/dependencies/SRSanityFuncs.py
@@ -31,28 +31,32 @@
self.topo[ '4x4' ] = ( 4, 4, '--leaf=4 --spine=4', '4x4 Leaf-spine' )
def runTest( self, main, caseNum, numNodes, Topo, minFlow ):
- if not hasattr( main, 'apps' ):
- run.initTest( main )
+ try:
+ if not hasattr( main, 'apps' ):
+ run.initTest( main )
- description = "Bridging and Routing sanity test with " + self.topo[ Topo ][ 3 ] + " and {} Onos".format( numNodes )
- main.case( description )
+ description = "Bridging and Routing sanity test with " + self.topo[ Topo ][ 3 ] + " and {} Onos".format( numNodes )
+ main.case( description )
- main.cfgName = Topo
- main.Cluster.setRunningNode( numNodes )
- run.installOnos( main )
- run.loadJson( main )
- run.loadChart( main )
- run.startMininet( main, 'cord_fabric.py', args=self.topo[ Topo ][ 2 ] )
- # pre-configured routing and bridging test
- run.checkFlows( main, minFlowCount=minFlow )
- run.pingAll( main )
- # TODO Dynamic config of hosts in subnet
- # TODO Dynamic config of host not in subnet
- # TODO Dynamic config of vlan xconnect
- # TODO Vrouter integration
- # TODO Mcast integration
- if hasattr( main, 'Mininet1' ):
- run.cleanup( main )
- else:
- # TODO: disconnect TestON from the physical network
- pass
+ main.cfgName = Topo
+ main.Cluster.setRunningNode( numNodes )
+ run.installOnos( main )
+ run.loadJson( main )
+ run.loadChart( main )
+ run.startMininet( main, 'cord_fabric.py', args=self.topo[ Topo ][ 2 ] )
+ # pre-configured routing and bridging test
+ run.checkFlows( main, minFlowCount=minFlow )
+ run.pingAll( main )
+ # TODO Dynamic config of hosts in subnet
+ # TODO Dynamic config of host not in subnet
+ # TODO Dynamic config of vlan xconnect
+ # TODO Vrouter integration
+ # TODO Mcast integration
+ if hasattr( main, 'Mininet1' ):
+ run.cleanup( main )
+ else:
+ # TODO: disconnect TestON from the physical network
+ pass
+ except Exception as e:
+ main.log.exception( "Error in runTest" )
+ main.skipCase( result="FAIL", msg=e )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.params b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.params
index de88e07..3bd8457 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.params
@@ -16,11 +16,13 @@
<useCommonConf>True</useCommonConf>
<useCommonTopo>True</useCommonTopo>
<topology>cord_fabric.py</topology>
+ <trellisOar>/home/sdn/segmentrouting-oar-3.0.0-SNAPSHOT.oar</trellisOar>
+ <t3Oar>/home/sdn/t3-app-3.0.0-SNAPSHOT.oar</t3Oar>
</DEPENDENCY>
<ENV>
<cellName>productionCell</cellName>
- <cellApps>drivers,hostprovider,segmentrouting,openflow-base,netcfghostprovider,netcfglinksprovider</cellApps>
+ <cellApps>drivers,hostprovider,openflow-base,netcfghostprovider,netcfglinksprovider</cellApps>
</ENV>
<GIT>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/dependencies/SRSwitchFailureFuncs.py b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/dependencies/SRSwitchFailureFuncs.py
index 0ff6338..4cd8a19 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/dependencies/SRSwitchFailureFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/dependencies/SRSwitchFailureFuncs.py
@@ -31,37 +31,41 @@
self.topo[ '4x4' ] = ( 4, 4, '--leaf=4 --spine=4', '4x4 Leaf-spine' )
def runTest( self, main, caseNum, numNodes, Topo, minFlow ):
- if not hasattr( main, 'apps' ):
- run.initTest( main )
+ try:
+ if not hasattr( main, 'apps' ):
+ run.initTest( main )
- description = "Switch Failure test with " + self.topo[ Topo ][ 3 ] + " and {} Onos".format( numNodes )
- main.case( description )
+ description = "Switch Failure test with " + self.topo[ Topo ][ 3 ] + " and {} Onos".format( numNodes )
+ main.case( description )
- main.cfgName = Topo
- main.Cluster.setRunningNode( numNodes )
- run.installOnos( main )
- run.loadJson( main )
- run.loadChart( main )
- run.startMininet( main, 'cord_fabric.py', args=self.topo[ Topo ][ 2 ] )
- # pre-configured routing and bridging test
- run.checkFlows( main, minFlowCount=minFlow )
- run.pingAll( main )
- # switch failure\
- switch = main.params[ 'kill' ][ 'switch' ]
- switchNum = self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ]
- linkNum = ( self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ] ) * self.topo[ Topo ][ 0 ]
- run.killSwitch( main, switch, switches='{}'.format( switchNum - 1 ), links='{}'.format( linkNum - switchNum ) )
- run.pingAll( main, "CASE{}_Failure".format( caseNum ) )
- run.recoverSwitch( main, switch, switches='{}'.format( switchNum ), links='{}'.format( linkNum ) )
- run.checkFlows( main, minFlowCount=minFlow, tag="CASE{}_Recovery".format( caseNum ) )
- run.pingAll( main, "CASE{}_Recovery".format( caseNum ) )
- # TODO Dynamic config of hosts in subnet
- # TODO Dynamic config of host not in subnet
- # TODO Dynamic config of vlan xconnect
- # TODO Vrouter integration
- # TODO Mcast integration
- if hasattr( main, 'Mininet1' ):
- run.cleanup( main )
- else:
- # TODO: disconnect TestON from the physical network
- pass
+ main.cfgName = Topo
+ main.Cluster.setRunningNode( numNodes )
+ run.installOnos( main )
+ run.loadJson( main )
+ run.loadChart( main )
+ run.startMininet( main, 'cord_fabric.py', args=self.topo[ Topo ][ 2 ] )
+ # pre-configured routing and bridging test
+ run.checkFlows( main, minFlowCount=minFlow )
+ run.pingAll( main )
+ # switch failure\
+ switch = main.params[ 'kill' ][ 'switch' ]
+ switchNum = self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ]
+ linkNum = ( self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ] ) * self.topo[ Topo ][ 0 ]
+ run.killSwitch( main, switch, switches='{}'.format( switchNum - 1 ), links='{}'.format( linkNum - switchNum ) )
+ run.pingAll( main, "CASE{}_Failure".format( caseNum ) )
+ run.recoverSwitch( main, switch, switches='{}'.format( switchNum ), links='{}'.format( linkNum ) )
+ run.checkFlows( main, minFlowCount=minFlow, tag="CASE{}_Recovery".format( caseNum ) )
+ run.pingAll( main, "CASE{}_Recovery".format( caseNum ) )
+ # TODO Dynamic config of hosts in subnet
+ # TODO Dynamic config of host not in subnet
+ # TODO Dynamic config of vlan xconnect
+ # TODO Vrouter integration
+ # TODO Mcast integration
+ if hasattr( main, 'Mininet1' ):
+ run.cleanup( main )
+ else:
+ # TODO: disconnect TestON from the physical network
+ pass
+ except Exception as e:
+ main.log.exception( "Error in runTest" )
+ main.skipCase( result="FAIL", msg=e )
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/Dockerfile b/TestON/tests/USECASE/SegmentRouting/dependencies/Dockerfile
index bed2f54..be8d72d 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/Dockerfile
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/Dockerfile
@@ -29,4 +29,22 @@
&& alias sudo='' \
&& apt-get update \
&& ./install.sh -3fvn
+
+# Install scapy dependencies
+RUN apt-get update && \
+ apt-get -y install \
+ gcc tcpdump libpcap-dev \
+ python3 python3-pip tcpdump
+#install pip packages for scapy
+RUN pip3 install pexpect \
+ netaddr \
+ pyYaml \
+ ipaddr
+RUN git clone https://github.com/secdev/scapy.git \
+ && cd scapy \
+ && python setup.py install \
+ && pip install --pre scapy[basic]
+# Fix for tcpdump/docker bug
+RUN mv /usr/sbin/tcpdump /usr/bin/tcpdump \
+ && ln -s /usr/bin/tcpdump /usr/sbin/tcpdump
ENTRYPOINT bash
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py b/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
index 8fa0b96..4ce8934 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
@@ -255,6 +255,16 @@
main.ONOSbench.home + main.bmv2Path + main.bmv2,
main.Mininet1.home + "custom",
direction="to" )
+
+ if 'MN_DOCKER' in main.params and main.params['MN_DOCKER']['args']:
+ # move the config files into home
+ main.Mininet1.handle.sendline( "cp config/* . " )
+ main.Mininet1.handle.expect( main.Mininet1.Prompt() )
+ main.log.debug( main.Mininet1.handle.before + main.Mininet1.handle.after )
+ main.Mininet1.handle.sendline( "ls -al " )
+ main.Mininet1.handle.expect( main.Mininet1.Prompt() )
+ main.log.debug( main.Mininet1.handle.before + main.Mininet1.handle.after )
+
stepResult = copyResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
@@ -1595,46 +1605,58 @@
"""
Optionally start and setup docker image for mininet
"""
- if 'MN_DOCKER' in main.params and main.params['MN_DOCKER']['args']:
+ try:
+ if 'MN_DOCKER' in main.params and main.params['MN_DOCKER']['args']:
- main.log.info( "Creating Mininet Docker" )
- handle = main.Mininet1.handle
- # build docker image
- dockerFilePath = "%s/../dependencies/" % main.testDir
- dockerName = "trellis_mininet"
- # TODO: assert on these docker calls
- main.Mininet1.dockerBuild( dockerFilePath, dockerName )
+ main.log.info( "Creating Mininet Docker" )
+ handle = main.Mininet1.handle
+ # build docker image
+ dockerFilePath = "%s/../dependencies/" % main.testDir
+ dockerName = "trellis_mininet"
+ # Stop any leftover container
+ main.Mininet1.dockerStop( dockerName )
+ # TODO: assert on these docker calls
+ main.Mininet1.dockerBuild( dockerFilePath, dockerName )
- confDir = "/tmp/mn_conf/"
- # Try to ensure the destination exists
- main.log.info( "Create folder for network config files" )
- handle.sendline( "mkdir -p %s" % confDir )
- handle.expect( main.Mininet1.Prompt() )
- main.log.debug( handle.before + handle.after )
- # Make sure permissions are correct
- handle.sendline( "sudo chown %s:%s %s" % ( main.Mininet1.user_name, main.Mininet1.user_name, confDir ) )
- handle.expect( main.Mininet1.Prompt() )
- handle.sendline( "sudo chmod -R a+rwx %s" % ( confDir ) )
- handle.expect( main.Mininet1.Prompt() )
- main.log.debug( handle.before + handle.after )
- # Stop any leftover container
- main.Mininet1.dockerStop( dockerName )
- # Start docker container
- runResponse = main.Mininet1.dockerRun( main.params[ 'MN_DOCKER' ][ 'name' ],
- dockerName,
- main.params[ 'MN_DOCKER' ][ 'args' ] )
- if runResponse == main.FALSE:
- main.log.error( "Docker container already running, aborting test" )
- main.cleanup()
- main.exit()
+ confDir = "/tmp/mn_conf/"
+ # Try to ensure the destination exists
+ main.log.info( "Create folder for network config files" )
+ handle.sendline( "rm -rf %s" % confDir )
+ handle.expect( main.Mininet1.Prompt() )
+ main.log.debug( handle.before + handle.after )
+ handle.sendline( "mkdir -p %s" % confDir )
+ handle.expect( main.Mininet1.Prompt() )
+ main.log.debug( handle.before + handle.after )
+ # Make sure permissions are correct
+ handle.sendline( "sudo chown %s:%s %s" % ( main.Mininet1.user_name, main.Mininet1.user_name, confDir ) )
+ handle.expect( main.Mininet1.Prompt() )
+ handle.sendline( "sudo chmod -R a+rwx %s" % ( confDir ) )
+ handle.expect( main.Mininet1.Prompt() )
+ main.log.debug( handle.before + handle.after )
+ # Start docker container
+ runResponse = main.Mininet1.dockerRun( main.params[ 'MN_DOCKER' ][ 'name' ],
+ dockerName,
+ main.params[ 'MN_DOCKER' ][ 'args' ] )
+ if runResponse == main.FALSE:
+ main.log.error( "Docker container already running, aborting test" )
+ main.cleanup()
+ main.exit()
- main.Mininet1.dockerAttach( dockerName, dockerPrompt='~#' )
- main.Mininet1.sudoRequired = False
+ main.Mininet1.dockerAttach( dockerName, dockerPrompt='~#' )
+ main.Mininet1.sudoRequired = False
- # Fow when we create component handles
- main.Mininet1.mExecDir = "/tmp"
- main.Mininet1.hostHome = main.params[ "MN_DOCKER" ][ "home" ]
- main.Mininet1.hostPrompt = "/home/root#"
+ # Fow when we create component handles
+ main.Mininet1.mExecDir = "/tmp"
+ main.Mininet1.hostHome = main.params[ "MN_DOCKER" ][ "home" ]
+ main.Mininet1.hostPrompt = "/home/root#"
+
+ # For some reason docker isn't doing this
+ main.Mininet1.handle.sendline( "echo \"127.0.0.1 $(cat /etc/hostname)\" >> /etc/hosts" )
+ main.Mininet1.handle.expect( "etc/hosts" )
+ main.Mininet1.handle.expect( main.Mininet1.Prompt() )
+ except Exception as e:
+ main.log.exception( "Error seting up mininet" )
+ man.skipCase( result="FAIL", msg=e )
@staticmethod
def mnDockerTeardown( main ):
diff --git a/TestON/tests/dependencies/Cluster.py b/TestON/tests/dependencies/Cluster.py
index ae08de9..e8aa795 100644
--- a/TestON/tests/dependencies/Cluster.py
+++ b/TestON/tests/dependencies/Cluster.py
@@ -704,6 +704,7 @@
result = main.TRUE
cliResults = self.command( "startOnosCli",
args=[ "ipAddress" ],
+ kwargs= { "karafTimeout": "karafTimeout" },
specificDriver=2,
getFrom=getFrom,
funcFromCtrl=True )