Update tests for aether pods
- Update test for QA-POD
- SRStaging for testing connecting to Staging pod
- Add some functions for a kubernetes deployed cluster
- Connect to ONOS nodes with kubernetes
- Add option to connect to components through jump hosts
- Fixes for installing ONOS in custom locations
- Invoke python2 instead of python
- If using an ssh agent, also use that for pexpect ssh sessions,
E.G. Jenkins initiated tests
Change-Id: I1fc345c8eab60a5b00c17e6ed677a63489a74a19
diff --git a/TestON/bin/cli.py b/TestON/bin/cli.py
index 74e7603..b03fce7 100755
--- a/TestON/bin/cli.py
+++ b/TestON/bin/cli.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python2
'''
Created on 20-Dec-2012
Copyright 2012 Open Networking Foundation
diff --git a/TestON/core/teston.py b/TestON/core/teston.py
index 6775856..647fa81 100644
--- a/TestON/core/teston.py
+++ b/TestON/core/teston.py
@@ -175,6 +175,9 @@
driver_options[ 'name' ] = component
driverName = self.componentDictionary[ component ][ 'type' ]
driver_options[ 'type' ] = driverName
+ driver_home = self.componentDictionary[ component ].get( 'home' )
+ if driver_home:
+ driver_options[ 'home' ] = driver_home
classPath = self.getDriverPath( driverName.lower() )
driverModule = importlib.import_module( classPath )
diff --git a/TestON/drivers/common/cli/emulator/scapyclidriver.py b/TestON/drivers/common/cli/emulator/scapyclidriver.py
index d340d5c..79a1956 100644
--- a/TestON/drivers/common/cli/emulator/scapyclidriver.py
+++ b/TestON/drivers/common/cli/emulator/scapyclidriver.py
@@ -40,12 +40,14 @@
super( ScapyCliDriver, self ).__init__()
self.handle = self
self.name = None
- self.home = None
+ self.home = "~/"
self.wrapped = sys.modules[ __name__ ]
self.flag = 0
# TODO: Refactor driver to use these everywhere
self.hostPrompt = "\$"
self.scapyPrompt = ">>>"
+ self.sudoRequired = True
+ self.ifaceName = None
def connect( self, **connectargs ):
"""
@@ -54,9 +56,17 @@
try:
for key in connectargs:
vars( self )[ key ] = connectargs[ key ]
- self.home = self.options[ 'home' ] if 'home' in self.options.keys() else "~/"
- self.name = self.options[ 'name' ]
- self.ifaceName = self.options[ 'ifaceName' ] if 'ifaceName' in self.options.keys() else self.name + "-eth0"
+ for key in self.options:
+ if key == "home":
+ self.home = self.options[ key ]
+ elif key == "name":
+ self.name = self.options[ key ]
+ elif key == "sudo_required":
+ self.sudoRequired = False if self.options[ key ] == "false" else True
+ elif key == "ifaceName":
+ self.ifaceName = self.options[ key ]
+ if self.ifaceName is None:
+ self.ifaceName = self.name + "-eth0"
# Parse route config
self.routes = []
@@ -151,8 +161,21 @@
try:
main.log.debug( self.name + ": Starting scapy" )
- self.handle.sendline( "sudo scapy" )
- self.handle.expect( self.scapyPrompt )
+ if self.sudoRequired:
+ self.handle.sendline( "sudo scapy" )
+ else:
+ self.handle.sendline( "scapy" )
+ i = self.handle.expect( [ "not found", "password for", self.scapyPrompt ] )
+ if i == 1:
+ main.log.debug( "Sudo asking for password" )
+ main.log.sendline( self.pwd )
+ i = self.handle.expect( [ "not found", self.scapyPrompt ] )
+ if i == 0:
+ output = self.handle.before + self.handle.after
+ self.handle.expect( self.prompt )
+ output += self.handle.before + self.handle.after
+ main.log.debug( self.name + ": Scapy not installed, aborting test. \n" + output )
+ main.cleanAndExit()
self.handle.sendline( "conf.color_theme = NoTheme()" )
self.handle.expect( self.scapyPrompt )
response = self.cleanOutput( self.handle.before )
@@ -1023,6 +1046,9 @@
if gateway is None:
main.log.error( self.name + ": Gateway is None, cannot set route" )
return main.FALSE
+ if network is None or "None" in network:
+ main.log.error( self.name + ": Network is None, cannot set route" )
+ return main.FALSE
try:
cmdStr = 'conf.route.add( net="%s", gw="%s"' % ( network, gateway )
if interface:
diff --git a/TestON/drivers/common/cli/hostdriver.py b/TestON/drivers/common/cli/hostdriver.py
index e14a38d..6175801 100644
--- a/TestON/drivers/common/cli/hostdriver.py
+++ b/TestON/drivers/common/cli/hostdriver.py
@@ -228,14 +228,12 @@
self.handle.sendline( command )
i = self.handle.expect( [ self.prompt, pexpect.TIMEOUT ],
timeout=wait + 5 )
+ response = self.handle.before
if i == 1:
main.log.error(
self.name +
": timeout when waiting for response" )
main.log.error( self.name + ": response: " + str( self.handle.before ) )
- self.handle.sendline( "" )
- self.handle.expect( self.prompt )
- response = self.handle.before
if re.search( ',\s0\%\spacket\sloss', response ):
main.log.info( self.name + ": no packets lost, host is reachable" )
return main.TRUE
diff --git a/TestON/drivers/common/cli/networkdriver.py b/TestON/drivers/common/cli/networkdriver.py
index 0f7b699..78f6464 100755
--- a/TestON/drivers/common/cli/networkdriver.py
+++ b/TestON/drivers/common/cli/networkdriver.py
@@ -509,7 +509,7 @@
cannot reach each other"""
hostComponentList = []
for hostName in hostList:
- hostComponent = self.hosts[ hostName ]
+ hostComponent = self.hosts[ str( hostName ) ]
if hostComponent:
hostComponentList.append( hostComponent )
try:
@@ -611,7 +611,7 @@
pingResponse += str( str( srcHost.shortName ) + " -> " )
for dstHost in dstComponentList:
failedPings = 0
- dstIP = dstHost.ip
+ dstIP = dstHost.ip_address
assert dstIP, "Not able to get IP address of host {}".format( dstHost )
for iface in srcHost.interfaces:
# FIXME This only works if one iface name is configured
diff --git a/TestON/drivers/common/cli/onosclidriver.py b/TestON/drivers/common/cli/onosclidriver.py
index b0bf7cb..80dfe7c 100755
--- a/TestON/drivers/common/cli/onosclidriver.py
+++ b/TestON/drivers/common/cli/onosclidriver.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-
+# -*- coding: utf-8 -*-
"""
OCT 13 2014
Copyright 2014 Open Networking Foundation (ONF)
@@ -58,7 +58,9 @@
self.handle = None
self.karafUser = None
self.karafPass = None
+ self.karafPort = None
self.karafTimeout = None
+ self.address = None
self.dockerPrompt = None
self.graph = Graph()
@@ -71,7 +73,6 @@
try:
for key in connectargs:
vars( self )[ key ] = connectargs[ key ]
- self.karafPrompt = self.user_name + "@root >"
self.home = "~/onos"
for key in self.options:
if key == "home":
@@ -84,12 +85,17 @@
self.dockerPrompt = self.options[ key ]
elif key == "karaf_timeout":
self.karafTimeout = self.options[ key ]
+ elif key == "karaf_port":
+ self.karafPort = self.options[ key ]
self.home = self.checkOptions( self.home, "~/onos" )
self.karafUser = self.checkOptions( self.karafUser, self.user_name )
self.karafPass = self.checkOptions( self.karafPass, self.pwd )
+ self.karafPort = self.checkOptions( self.karafPort, 8101 )
self.dockerPrompt = self.checkOptions( self.dockerPrompt, "~/onos#" )
self.karafTimeout = self.checkOptions( self.karafTimeout, 7200000 )
+ self.karafPrompt = self.karafUser + "@root >"
+
for key in self.options:
if key == 'onosIp':
self.onosIp = self.options[ 'onosIp' ]
@@ -122,6 +128,7 @@
self.handle.sendline( "cd " + self.home )
self.handle.expect( self.prompt )
if self.handle:
+ self.address = self.ip_address
return self.handle
else:
main.log.info( "NO ONOS HANDLE" )
@@ -282,6 +289,7 @@
and passed to startOnosCli from PARAMS file as str.
"""
self.onosIp = ONOSIp
+ self.address = self.onosIp
try:
# Check if we are already in the cli
self.handle.sendline( "" )
@@ -293,7 +301,7 @@
# Not in CLI so login
if self.inDocker:
# The Docker does not have all the wrapper scripts
- startCliCommand = "ssh -p 8101 -o StrictHostKeyChecking=no %s@localhost" % self.karafUser
+ startCliCommand = "ssh -p %s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s@localhost" % ( self.karafPort, self.karafUser )
elif waitForStart:
# Wait for onos start ( onos-wait-for-start ) and enter onos cli
startCliCommand = "onos-wait-for-start " + str( ONOSIp )
@@ -342,6 +350,7 @@
main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except Exception:
+ main.log.debug( self.handle.before + str( self.handle.after ) )
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
@@ -4931,6 +4940,28 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
+ def getAddress( self):
+ """
+ Get the onos ip address from the cli. This is usefull when connecting using
+ a container manager such as kubernetes. This function also sets self.address
+ the value from ONOS.
+
+ Returns:
+ The string value of the key or
+ None on Error
+ """
+ try:
+ output = self.summary()
+ address = json.loads( output ).get( 'node' )
+ self.address = address
+ return address
+ except TypeError:
+ main.log.exception( self.name + ": Object not as expected" )
+ return None
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanAndExit()
+
def transactionalMapGet( self, keyName ):
"""
CLI command to get the value of a key in a consistent map using
@@ -5317,6 +5348,39 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
+ def logList( self, saveValues=True ):
+ """
+ Gets the current log levels and optionally saves them
+ returns a dict of the log levels or
+ returns main.FALSE if Error occurred
+ """
+ try:
+ self.handle.sendline( "log:list" )
+ self.handle.expect( self.karafPrompt )
+
+ response = self.handle.before
+ logLevels = {}
+ for line in response.splitlines():
+ parsed = line.split('│')
+ logger = parsed[0].strip()
+ if len( parsed ) != 2 or 'Level' in parsed[1] or logger[0] == '─':
+ continue
+ level = parsed[1].strip()
+ logLevels[ logger ] = level
+ if saveValues:
+ self.logLevels = logLevels
+ return logLevels
+ except pexpect.TIMEOUT:
+ main.log.exception( self.name + ": TIMEOUT exception found" )
+ main.cleanAndExit()
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanAndExit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanAndExit()
+
def getGraphDict( self, timeout=60, includeHost=False ):
"""
Return a dictionary which describes the latest network topology data as a
diff --git a/TestON/drivers/common/cli/onosclusterdriver.py b/TestON/drivers/common/cli/onosclusterdriver.py
index 11c44ae..d10b17f 100755
--- a/TestON/drivers/common/cli/onosclusterdriver.py
+++ b/TestON/drivers/common/cli/onosclusterdriver.py
@@ -68,7 +68,7 @@
raise AttributeError( "Could not find the attribute %s in %r or it's component handles" % ( name, self ) )
def __init__( self, name, ipAddress, CLI=None, REST=None, Bench=None, pos=None,
- userName=None, server=None, dockerPrompt=None ):
+ userName=None, server=None, k8s=None, dockerPrompt=None ):
# TODO: validate these arguments
self.name = str( name )
self.ipAddress = ipAddress
@@ -80,6 +80,7 @@
self.ip_address = ipAddress
self.user_name = userName
self.server = server
+ self.k8s = k8s
self.dockerPrompt = dockerPrompt
class OnosClusterDriver( CLI ):
@@ -94,6 +95,7 @@
self.useDocker = False
self.dockerPrompt = None
self.maxNodes = None
+ self.kubeConfig = None
self.nodes = []
super( OnosClusterDriver, self ).__init__()
@@ -129,6 +131,8 @@
elif key == "nodes":
# Maximum number of ONOS nodes to run, if there is any
self.maxNodes = self.options[ key ]
+ elif key == "kubeConfig":
+ self.kubeConfig = self.options[ key ]
self.home = self.checkOptions( self.home, "~/onos" )
self.karafUser = self.checkOptions( self.karafUser, self.user_name )
@@ -139,43 +143,45 @@
self.useDocker = self.checkOptions( self.useDocker, False )
self.dockerPrompt = self.checkOptions( self.dockerPrompt, "~/onos#" )
self.maxNodes = int( self.checkOptions( self.maxNodes, 100 ) )
+ self.kubeConfig = self.checkOptions( self.kubeConfig, None )
self.name = self.options[ 'name' ]
- # Grabs all OC environment variables based on max number of nodes
- # TODO: Also support giving an ip range as a compononet option
- self.onosIps = {} # Dictionary of all possible ONOS ip
- try:
- if self.maxNodes:
- for i in range( self.maxNodes ):
- envString = "OC" + str( i + 1 )
- # If there is no more OC# then break the loop
- if os.getenv( envString ):
- self.onosIps[ envString ] = os.getenv( envString )
+ if not self.kubeConfig:
+ # Grabs all OC environment variables based on max number of nodes
+ # TODO: Also support giving an ip range as a compononet option
+ self.onosIps = {} # Dictionary of all possible ONOS ip
+
+ try:
+ if self.maxNodes:
+ for i in range( self.maxNodes ):
+ envString = "OC" + str( i + 1 )
+ # If there is no more OC# then break the loop
+ if os.getenv( envString ):
+ self.onosIps[ envString ] = os.getenv( envString )
+ else:
+ self.maxNodes = len( self.onosIps )
+ main.log.info( self.name +
+ ": Created cluster data with " +
+ str( self.maxNodes ) +
+ " maximum number" +
+ " of nodes" )
+ break
+
+ if not self.onosIps:
+ main.log.info( "Could not read any environment variable"
+ + " please load a cell file with all" +
+ " onos IP" )
+ self.maxNodes = None
else:
- self.maxNodes = len( self.onosIps )
- main.log.info( self.name +
- ": Created cluster data with " +
- str( self.maxNodes ) +
- " maximum number" +
- " of nodes" )
- break
-
- if not self.onosIps:
- main.log.info( "Could not read any environment variable"
- + " please load a cell file with all" +
- " onos IP" )
- self.maxNodes = None
- else:
- main.log.info( self.name + ": Found " +
- str( self.onosIps.values() ) +
- " ONOS IPs" )
- except KeyError:
- main.log.info( "Invalid environment variable" )
- except Exception as inst:
- main.log.error( "Uncaught exception: " + str( inst ) )
-
+ main.log.info( self.name + ": Found " +
+ str( self.onosIps.values() ) +
+ " ONOS IPs" )
+ except KeyError:
+ main.log.info( "Invalid environment variable" )
+ except Exception as inst:
+ main.log.error( "Uncaught exception: " + str( inst ) )
try:
if os.getenv( str( self.ip_address ) ) is not None:
self.ip_address = os.getenv( str( self.ip_address ) )
@@ -200,7 +206,41 @@
if self.handle:
self.handle.sendline( "cd " + self.home )
self.handle.expect( "\$" )
+ if self.kubeConfig:
+ # Try to get # of onos nodes using given kubernetes configuration
+ names = self.kubectlGetPodNames( self.kubeConfig,
+ main.params[ 'kubernetes' ][ 'namespace' ],
+ main.params[ 'kubernetes' ][ 'appName' ] )
+ self.podNames = names
+ self.onosIps = {} # Dictionary of all possible ONOS ip
+ for i in range( 1, len( names ) + 1 ):
+ self.onosIps[ 'OC%i' % i ] = self.ip_address
+ self.maxNodes = len( names )
self.createComponents( prefix=prefix )
+ if self.kubeConfig:
+ # Create Port Forwarding sessions for each controller
+ for node in self.nodes:
+ kubectl = node.k8s
+ index = self.nodes.index( node )
+ # Store each pod name in the k8s component
+ kubectl.podName = self.podNames[ index ]
+ # Setup port-forwarding and save the local port
+ guiPort = 8181
+ cliPort = 8101
+ portsList = ""
+ for port in [ guiPort, cliPort ]:
+ localPort = port + index + 1
+ portsList += "%s:%s " % ( localPort, port )
+ if port == cliPort:
+ node.CLI.karafPort = localPort
+ main.log.info( "Setting up port forward for pod %s: [ %s ]" % ( self.podNames[ index ], portsList ) )
+ pf = kubectl.kubectlPortForward( self.podNames[ index ],
+ portsList,
+ kubectl.kubeConfig,
+ main.params[ 'kubernetes' ][ 'namespace' ] )
+ if not pf:
+ main.log.error( "Failed to create port forwarding" )
+ return main.FALSE
return self.handle
else:
main.log.info( "Failed to create ONOS handle" )
@@ -243,6 +283,8 @@
clihost = main.componentDictionary[ name ][ 'COMPONENTS' ].get( "diff_clihost", "" )
if clihost == "True":
main.componentDictionary[ name ][ 'host' ] = host
+ home = main.componentDictionary[name]['COMPONENTS'].get( "onos_home", None )
+ main.componentDictionary[name]['home'] = self.checkOptions( home, None )
main.componentDictionary[name]['type'] = "OnosCliDriver"
main.componentDictionary[name]['connect_order'] = str( int( main.componentDictionary[name]['connect_order'] ) + 1 )
@@ -279,7 +321,6 @@
Parse the cluster options to create an ONOS cli component with the given name
"""
main.componentDictionary[name] = main.componentDictionary[self.name].copy()
- main.log.debug( main.componentDictionary[name] )
user = main.componentDictionary[name]['COMPONENTS'].get( "web_user", "onos" )
main.componentDictionary[name]['user'] = self.checkOptions( user, "onos" )
password = main.componentDictionary[name]['COMPONENTS'].get( "web_pass", "rocks" )
@@ -289,7 +330,6 @@
main.componentDictionary[name]['port'] = self.checkOptions( port, "8181" )
main.componentDictionary[name]['type'] = "OnosRestDriver"
main.componentDictionary[name]['connect_order'] = str( int( main.componentDictionary[name]['connect_order'] ) + 1 )
- main.log.debug( main.componentDictionary[name] )
def createRestComponent( self, name, ipAddress ):
"""
@@ -328,7 +368,6 @@
home = main.componentDictionary[name]['COMPONENTS'].get( "onos_home", None )
main.componentDictionary[name]['home'] = self.checkOptions( home, None )
main.componentDictionary[name]['connect_order'] = str( int( main.componentDictionary[name]['connect_order'] ) + 1 )
- main.log.debug( main.componentDictionary[name] )
def createBenchComponent( self, name ):
"""
@@ -372,10 +411,11 @@
home = main.componentDictionary[name]['COMPONENTS'].get( "onos_home", None )
main.componentDictionary[name]['home'] = self.checkOptions( home, None )
# TODO: for now we use karaf user name and password also for logging to the onos nodes
- main.componentDictionary[name]['user'] = self.karafUser
- main.componentDictionary[name]['password'] = self.karafPass
+ # FIXME: We shouldn't use karaf* for this, what we want is another set of variables to
+ # login to a shell on the server ONOS is running on
+ #main.componentDictionary[name]['user'] = self.karafUser
+ #main.componentDictionary[name]['password'] = self.karafPass
main.componentDictionary[name]['connect_order'] = str( int( main.componentDictionary[name]['connect_order'] ) + 1 )
- main.log.debug( main.componentDictionary[name] )
def createServerComponent( self, name, ipAddress ):
"""
@@ -416,11 +456,14 @@
restPrefix = prefix + "rest"
benchPrefix = prefix + "bench"
serverPrefix = prefix + "server"
+ k8sPrefix = prefix + "k8s"
for i in xrange( 1, self.maxNodes + 1 ):
cliName = cliPrefix + str( i )
restName = restPrefix + str( i )
benchName = benchPrefix + str( i )
serverName = serverPrefix + str( i )
+ if self.kubeConfig:
+ k8sName = k8sPrefix + str( i )
# Unfortunately this means we need to have a cell set beofre running TestON,
# Even if it is just the entire possible cluster size
@@ -430,6 +473,10 @@
rest = self.createRestComponent( restName, ip )
bench = self.createBenchComponent( benchName )
server = self.createServerComponent( serverName, ip ) if createServer else None
+ k8s = self.createServerComponent( k8sName, ip ) if self.kubeConfig else None
+ if self.kubeConfig:
+ k8s.kubeConfig = self.kubeConfig
+ k8s.podName = None
self.nodes.append( Controller( prefix + str( i ), ip, cli, rest, bench, i - 1,
- self.user_name, server=server,
+ self.user_name, server=server, k8s=k8s,
dockerPrompt=self.dockerPrompt ) )
diff --git a/TestON/drivers/common/cli/onosdriver.py b/TestON/drivers/common/cli/onosdriver.py
index e9cca52..78b8477 100755
--- a/TestON/drivers/common/cli/onosdriver.py
+++ b/TestON/drivers/common/cli/onosdriver.py
@@ -73,6 +73,10 @@
self.webUser = self.options[ key ]
elif key == "web_pass":
self.webPass = self.options[ key ]
+ elif key == "karaf_username":
+ self.karafUser = self.options[ key ]
+ elif key == "karaf_password":
+ self.karafPass = self.options[ key ]
self.home = self.checkOptions( self.home, "~/onos" )
self.maxNodes = self.checkOptions( self.maxNodes, 100 )
@@ -158,6 +162,7 @@
response = main.TRUE
try:
if self.handle:
+ self.preDisconnect()
self.handle.sendline( "" )
self.handle.expect( self.prompt )
self.handle.sendline( "exit" )
@@ -392,7 +397,7 @@
elif i == 4:
# Prompt returned
break
- main.log.debug( output )
+ main.log.debug( self.name + ": " + output )
return ret
except pexpect.TIMEOUT:
main.log.exception( self.name + ": TIMEOUT exception found" )
@@ -446,7 +451,7 @@
elif i == 5:
# Prompt returned
break
- main.log.debug( output )
+ main.log.debug( self.name + ": " + output )
return ret
except pexpect.TIMEOUT:
main.log.exception( self.name + ": TIMEOUT exception found" )
@@ -812,7 +817,6 @@
~/<self.home>/tools/test/cells/
"""
try:
-
# Variable initialization
cellDirectory = self.home + "/tools/test/cells/"
# We want to create the cell file in the dependencies directory
@@ -1008,7 +1012,7 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- def onosCli( self, ONOSIp, cmdstr, timeout=60 ):
+ def onosCli( self, ONOSIp, cmdstr, cliPort=8101, waitForStart=False, timeout=60 ):
"""
Uses 'onos' command to send various ONOS CLI arguments.
Required:
@@ -1038,17 +1042,17 @@
self.handle.sendline( "" )
self.handle.expect( self.prompt )
- self.handle.sendline( "onos-wait-for-start " + ONOSIp )
- i = self.handle.expect( [ self.prompt, "Password: " ] )
- if i == 1:
- self.handle.sendline( self.pwd )
- self.handle.expect( self.prompt )
-
- self.handle.sendline( "ssh -q -p 8101 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s %s " % ( ONOSIp, cmdstr ) )
+ if waitForStart:
+ self.handle.sendline( "onos-wait-for-start " + ONOSIp )
+ i = self.handle.expect( [ self.prompt, "Password: " ] )
+ if i == 1:
+ self.handle.sendline( self.pwd )
+ self.handle.expect( self.prompt )
+ self.handle.sendline( "ssh -q -p %s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s@%s %s " % ( cliPort, self.karafUser, ONOSIp, cmdstr ) )
i = self.handle.expect( [ self.prompt, "Password: ", pexpect.TIMEOUT ], timeout=timeout )
if i == 1:
- self.handle.sendline( self.pwd )
- i = self.handle.expect( [ self.prompt, pexpect.TIMEOUT ], timeout=timeout )
+ self.handle.sendline( self.karafPass )
+ i = self.handle.expect( [ self.prompt, "Password:", pexpect.TIMEOUT ], timeout=timeout )
if i == 0:
handleBefore = self.handle.before
main.log.info( self.name + ": Command sent successfully" )
@@ -1058,12 +1062,18 @@
returnString = handleBefore
return returnString
elif i == 1:
+ main.log.error( self.name + ": Incorrect password for ONOS cli" )
+ self.handle.send( "\x03" ) # Control-C
+ self.handle.expect( self.prompt )
+ return main.FALSE
+ elif i == 2:
main.log.error( self.name + ": Timeout when sending " + cmdstr )
- main.log.debug( self.handle.before )
+ main.log.debug( self.name + ": " + self.handle.before )
self.handle.send( "\x03" ) # Control-C
self.handle.expect( self.prompt )
return main.FALSE
except pexpect.TIMEOUT:
+ main.log.debug( self.handle.before + str( self.handle.after ) )
main.log.exception( self.name + ": Timeout when sending " + cmdstr )
return main.FALSE
except pexpect.EOF:
@@ -1107,7 +1117,7 @@
elif i == 2:
# timeout
main.log.error( self.name + ": Failed to secure ssh on " + node )
- main.log.debug( self.handle.before )
+ main.log.debug( self.name + ": " + self.handle.before )
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
main.log.error( self.name + ": " + self.handle.before )
@@ -1755,7 +1765,7 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- def dumpONOSCmd( self, ONOSIp, CMD, destDir, filename, options="", timeout=60 ):
+ def dumpONOSCmd( self, ONOSIp, CMD, destDir, filename, options="", cliPort=8101, timeout=60 ):
"""
Dump Cmd to a desired directory.
For debugging purposes, you may want to use
@@ -1780,7 +1790,7 @@
if destDir[ -1: ] != "/":
destDir += "/"
cmd = CMD + " " + options + " > " + str( destDir ) + str( filename ) + localtime
- return self.onosCli( ONOSIp, cmd, timeout=timeout )
+ return self.onosCli( ONOSIp, cmd, cliPort=cliPort, timeout=timeout )
def cpLogsToDir( self, logToCopy,
destDir, copyFileName="" ):
diff --git a/TestON/drivers/common/clidriver.py b/TestON/drivers/common/clidriver.py
index 3083f44..aa1e9f9 100644
--- a/TestON/drivers/common/clidriver.py
+++ b/TestON/drivers/common/clidriver.py
@@ -22,6 +22,7 @@
"""
import pexpect
import re
+import os
from drivers.component import Component
@@ -47,6 +48,7 @@
It will take user_name ,ip_address and password as arguments<br>
and will return the handle.
"""
+ self.shell = "/bin/bash -l"
for key in connectargs:
vars( self )[ key ] = connectargs[ key ]
self.checkPrompt()
@@ -55,27 +57,24 @@
ssh_newkey = 'Are you sure you want to continue connecting'
refused = "ssh: connect to host " + \
self.ip_address + " port 22: Connection refused"
+ ssh_options = "-t -X -A -o ServerAliveInterval=120 -o TCPKeepAlive=yes"
+ ssh_destination = self.user_name + "@" + self.ip_address
+ envVars = { "TERM": "vt100" }
+ # TODO: Add option to specify which shell/command to use
+ jump_host = main.componentDictionary[ self.name ].get( 'jump_host' )
if self.port:
- self.handle = pexpect.spawn(
- 'ssh -X -p ' +
- self.port +
- ' ' +
- self.user_name +
- '@' +
- self.ip_address +
- ' -o ServerAliveInterval=120 -o TCPKeepAlive=yes',
- env={ "TERM": "vt100" },
- maxread=1000000 )
- else:
- self.handle = pexpect.spawn(
- 'ssh -X ' +
- self.user_name +
- '@' +
- self.ip_address +
- ' -o ServerAliveInterval=120 -o TCPKeepAlive=yes',
- env={ "TERM": "vt100" },
- maxread=1000000,
- timeout=60 )
+ ssh_option += " -p " + self.port
+ if jump_host:
+ jump_host = main.componentDictionary.get( jump_host )
+ ssh_options += " -J %s@%s" % ( jump_host.get( 'user' ), jump_host.get( 'host' ) )
+ ssh_auth = os.getenv('SSH_AUTH_SOCK')
+ if ssh_auth:
+ envVars[ 'SSH_AUTH_SOCK' ] = ssh_auth
+ self.handle = pexpect.spawn(
+ "ssh %s %s %s" % ( ssh_options, ssh_destination, self.shell ),
+ env=envVars,
+ maxread=1000000,
+ timeout=60 )
# set tty window size
self.handle.setwinsize( 24, 250 )
@@ -119,6 +118,7 @@
return main.FALSE
elif i == 2:
main.log.error( self.name + ": Connection timeout" )
+ main.log.debug( self.handle.before )
return main.FALSE
elif i == 3: # timeout
main.log.error(
@@ -126,15 +126,18 @@
self.user_name +
"@" +
self.ip_address )
+ main.log.debug( self.handle.before )
return main.FALSE
elif i == 4:
main.log.error(
"ssh: connect to host " +
self.ip_address +
" port 22: Connection refused" )
+ main.log.debug( self.handle.before )
return main.FALSE
elif i == 6: # Incorrect Password
main.log.error( self.name + ": Incorrect Password" )
+ main.log.debug( self.handle.before )
return main.FALSE
elif i == 7: # Prompt
main.log.info( self.name + ": Password not required logged in" )
@@ -146,6 +149,7 @@
return self.handle
def disconnect( self ):
+ result = self.preDisconnect()
result = super( CLI, self ).disconnect( self )
result = main.TRUE
@@ -373,6 +377,10 @@
while "to" means copy "to" the remote machine from
local machine
"""
+ jump_host = main.componentDictionary[ remoteHost.name ].get( 'jump_host' )
+ if jump_host:
+ jump_host = main.componentDictionary.get( jump_host )
+ options += " -J %s@%s " % ( jump_host.get( 'user' ), jump_host.get( 'host' ) )
return self.secureCopy( remoteHost.user_name,
remoteHost.ip_address,
filePath,
@@ -728,7 +736,7 @@
main.log.exception( self.name + ": Uncaught exception!" )
return main.FALSE
- def dockerRun( self, image, containerName, options="", imageArgs="" ):
+ def dockerRun( self, image, containerName, options="", imageArgs="", background=False ):
"""
Run a docker image
Required Arguments:
@@ -745,6 +753,8 @@
options if options else "",
image,
imageArgs )
+ if background:
+ cmdStr += " &"
main.log.info( self.name + ": sending: " + cmdStr )
self.handle.sendline( cmdStr)
i = self.handle.expect( [ self.prompt,
@@ -927,3 +937,251 @@
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
return main.FALSE
+
+# TODO: How is this different from exitFromCmd used elsewhere?
+ def exitFromProcess( self ):
+ """
+ Send ctrl-c, which should close and exit the program
+ """
+ try:
+ cmdStr = "\x03"
+ main.log.info( self.name + ": sending: " + repr( cmdStr ) )
+ self.handle.send( cmdStr)
+ i = self.handle.expect( [ self.prompt, pexpect.TIMEOUT ] )
+ if i == 0:
+ return main.TRUE
+ else:
+ main.log.error( self.name + ": Error exiting process" )
+ main.log.debug( self.name + ": " + self.handle.before + str( self.handle.after ) )
+ return main.FALSE
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ return main.FALSE
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ return main.FALSE
+
+ def preDisconnect( self ):
+ """
+ A Stub for a function that will be called before disconnect.
+ This can be set if for instance, the shell is running a program
+ and needs to exit the program before disconnecting from the component
+ """
+ print "preDisconnect"
+ return main.TRUE
+
+ def kubectlGetPodNames( self, kubeconfig=None, namespace=None, app=None, name=None ):
+ """
+ Use kubectl to get the names of pods
+ Optional Arguments:
+ - kubeconfig: The path to a kubeconfig file
+ - namespace: The namespace to search in
+ - app: Get pods belonging to a specific app
+ - name: Get pods with a specific name label
+ Returns a list containing the names of the pods or
+ main.FALSE on Error
+ """
+
+ try:
+ cmdStr = "kubectl %s %s get pods %s %s --output=jsonpath='{.items..metadata.name}{\"\\n\"}'" % (
+ "--kubeconfig %s" % kubeconfig if kubeconfig else "",
+ "-n %s" % namespace if namespace else "",
+ "-l app=%s" % app if app else "",
+ "-l name=%s" % name if name else "" )
+ main.log.info( self.name + ": sending: " + repr( cmdStr ) )
+ self.handle.sendline( cmdStr )
+ i = self.handle.expect( [ "not found", "error", "The connection to the server", self.prompt ] )
+ if i == 3:
+ output = self.handle.before + self.handle.after
+ names = output.split( '\r\n' )[1].split()
+ return names
+ else:
+ main.log.error( self.name + ": Error executing command" )
+ main.log.debug( self.name + ": " + self.handle.before + str( self.handle.after ) )
+ return main.FALSE
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ return main.FALSE
+ except pexpect.TIMEOUT:
+ main.log.exception( self.name + ": TIMEOUT exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ return main.FALSE
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ return main.FALSE
+
+ def kubectlDescribe( self, describeString, dstPath, kubeconfig=None, namespace=None ):
+ """
+ Use kubectl to get the logs from a pod
+ Required Arguments:
+ - describeString: The string passed to the cli. Example: "pods"
+ - dstPath: The location to save the logs to
+ Optional Arguments:
+ - kubeconfig: The path to a kubeconfig file
+ - namespace: The namespace to search in
+ Returns main.TRUE or
+ main.FALSE on Error
+ """
+
+ try:
+ cmdStr = "kubectl %s %s describe %s > %s " % (
+ "--kubeconfig %s" % kubeconfig if kubeconfig else "",
+ "-n %s" % namespace if namespace else "",
+ describeString,
+ dstPath )
+ main.log.info( self.name + ": sending: " + repr( cmdStr ) )
+ self.handle.sendline( cmdStr )
+ i = self.handle.expect( [ "not found", "error", "The connection to the server", self.prompt ] )
+ if i == 3:
+ main.log.debug( self.name + ": " + self.handle.before )
+ return main.TRUE
+ else:
+ main.log.error( self.name + ": Error executing command" )
+ main.log.debug( self.name + ": " + self.handle.before + str( self.handle.after ) )
+ return main.FALSE
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ return main.FALSE
+ except pexpect.TIMEOUT:
+ main.log.exception( self.name + ": TIMEOUT exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ return main.FALSE
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ return main.FALSE
+
+ def kubectlPodNodes( self, dstPath=None, kubeconfig=None, namespace=None ):
+ """
+ Use kubectl to get the logs from a pod
+ Optional Arguments:
+ - dstPath: The location to save the logs to
+ - kubeconfig: The path to a kubeconfig file
+ - namespace: The namespace to search in
+ Returns main.TRUE if dstPath is given, else the output of the command or
+ main.FALSE on Error
+ """
+
+ try:
+ cmdStr = "kubectl %s %s get pods -o=custom-columns=NAME:.metadata.name,NODE:.spec.nodeName %s " % (
+ "--kubeconfig %s" % kubeconfig if kubeconfig else "",
+ "-n %s" % namespace if namespace else "",
+ " > %s" % dstPath if dstPath else "" )
+ main.log.info( self.name + ": sending: " + repr( cmdStr ) )
+ self.handle.sendline( cmdStr )
+ i = self.handle.expect( [ "not found", "error", "The connection to the server", self.prompt ] )
+ if i == 3:
+ output = self.handle.before
+ main.log.debug( self.name + ": " + output )
+ return output if dstPath else main.TRUE
+ else:
+ main.log.error( self.name + ": Error executing command" )
+ main.log.debug( self.name + ": " + self.handle.before + str( self.handle.after ) )
+ return main.FALSE
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ return main.FALSE
+ except pexpect.TIMEOUT:
+ main.log.exception( self.name + ": TIMEOUT exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ return main.FALSE
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ return main.FALSE
+
+ def kubectlLogs( self, podName, dstPath, kubeconfig=None, namespace=None, timeout=240 ):
+ """
+ Use kubectl to get the logs from a pod
+ Required Arguments:
+ - podName: The name of the pod to get the logs of
+ - dstPath: The location to save the logs to
+ Optional Arguments:
+ - kubeconfig: The path to a kubeconfig file
+ - namespace: The namespace to search in
+ - timeout: Timeout for command to return. The longer the logs, the longer it will take to fetch them.
+ Returns main.TRUE or
+ main.FALSE on Error
+ """
+
+ try:
+ cmdStr = "kubectl %s %s logs %s > %s " % (
+ "--kubeconfig %s" % kubeconfig if kubeconfig else "",
+ "-n %s" % namespace if namespace else "",
+ podName,
+ dstPath )
+ main.log.info( self.name + ": sending: " + repr( cmdStr ) )
+ self.handle.sendline( cmdStr )
+ i = self.handle.expect( [ "not found", "error", "The connection to the server", self.prompt ], timeout=timeout )
+ if i == 3:
+ main.log.debug( self.name + ": " + self.handle.before )
+ return main.TRUE
+ else:
+ main.log.error( self.name + ": Error executing command" )
+ main.log.debug( self.name + ": " + self.handle.before + str( self.handle.after ) )
+ return main.FALSE
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ return main.FALSE
+ except pexpect.TIMEOUT:
+ main.log.exception( self.name + ": TIMEOUT exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ return main.FALSE
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ return main.FALSE
+
+ def kubectlPortForward( self, podName, portsList, kubeconfig=None, namespace=None, ):
+ """
+ Use kubectl to setup port forwarding from the local machine to the kubernetes pod
+
+ Note: This command does not return until the port forwarding session is ended.
+
+ Required Arguments:
+ - podName: The name of the pod as a string
+ - portsList: The list of ports to forward, as a string. see kubectl help for details
+ Optional Arguments:
+ - kubeconfig: The path to a kubeconfig file
+ - namespace: The namespace to search in
+ - app: Get pods belonging to a specific app
+ Returns a list containing the names of the pods or
+ main.FALSE on Error
+
+
+ """
+ try:
+ cmdStr = "kubectl %s %s port-forward pod/%s %s" % (
+ "--kubeconfig %s" % kubeconfig if kubeconfig else "",
+ "-n %s" % namespace if namespace else "",
+ podName,
+ portsList )
+ main.log.info( self.name + ": sending: " + repr( cmdStr ) )
+ self.handle.sendline( cmdStr )
+ i = self.handle.expect( [ "not found", "error", "closed/timedout",
+ self.prompt, "The connection to the server", "Forwarding from" ] )
+ # NOTE: This won't clear the buffer entirely, and each time the port forward
+ # is used, another line will be added to the buffer. We need to make
+ # sure we clear the buffer before using this component again.
+
+ if i == 5:
+ # Setup preDisconnect function
+ self.preDisconnect = self.exitFromProcess
+ return main.TRUE
+ else:
+ main.log.error( self.name + ": Error executing command" )
+ main.log.debug( self.name + ": " + self.handle.before + str( self.handle.after ) )
+ return main.FALSE
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ return main.FALSE
+ except pexpect.TIMEOUT:
+ main.log.exception( self.name + ": TIMEOUT exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ return main.FALSE
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ return main.FALSE
diff --git a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params
index 2deaa95..7021d41 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params
@@ -46,8 +46,6 @@
</ONOS_Logging>
<GIT>
- <pull>False</pull>
- <branch>master</branch>
</GIT>
<CTRL>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params.tofino b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params.tofino
index 88d669b..00666bc 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params.tofino
+++ b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params.tofino
@@ -2,7 +2,7 @@
<testcases>1</testcases>
<GRAPH>
- <nodeCluster>Fabric</nodeCluster>
+ <nodeCluster>QA-Pod</nodeCluster>
<builds>20</builds>
<jobName>SRBridging-tofino</jobName>
<branch>master</branch>
@@ -18,7 +18,7 @@
<useCommonTopo>True</useCommonTopo>
<useBmv2>True</useBmv2>
<bmv2SwitchType>stratum</bmv2SwitchType>
- <switchPrefix>tofino</switchPrefix>
+ <switchPrefix></switchPrefix>
<stratumRoot>~/stratum</stratumRoot>
<topology>trellis_fabric.py</topology>
<lib>routinglib.py,trellislib.py,stratum.py</lib>
@@ -28,27 +28,32 @@
<persistent_setup>True</persistent_setup>
- <MN_DOCKER>
- <args>--privileged --net host --rm -v topo:/topo -v ~/mininet/custom:/home/root/mininet/custom -v /var/run/openvswitch/:/var/run/openvswitch/ -v /tmp/mn-stratum:/tmp -v /tmp/mn_conf/:/home/root/config --hostname mn-stratum -v /etc/network/interfaces:/etc/network/interfaces -it -d</args>
- <name>trellis_mininet</name>
- <home>/home/root/</home>
- </MN_DOCKER>
- <CLUSTER>
- # Params for onos docker
- <dockerSkipBuild>True</dockerSkipBuild>
- <dockerBuildCmd>make ONOS_VERSION=onos-2.2 DOCKER_TAG=TestON-onos-2.2 onos-build trellis-control-build trellis-t3-build tost-build</dockerBuildCmd> # If using another cmd like make
- <dockerBuildTimeout>1200</dockerBuildTimeout>
- <dockerFilePath>~/tost-onos</dockerFilePath>
- <dockerImageTag>registry.aetherproject.org/tost/tost:master</dockerImageTag>
- <dockerOptions>-d --rm --network host -v ~/.ssh/authorized_keys:/root/.ssh/authorized_keys -v /tmp/cluster.json:/root/onos/config/cluster.json </dockerOptions> # We start the container detached, so the docker component can connect to cli instead of logs
- <atomixImageTag>atomix/atomix:3.1.5</atomixImageTag>
- <atomixOptions>-d --rm --network host -v ~/.ssh/authorized_keys:/root/.ssh/authorized_keys -v /tmp/atomix.json:/opt/atomix/conf/atomix.json </atomixOptions>
- </CLUSTER>
+ <kubernetes>
+ <appName>onos-tost-onos-classic</appName>
+ <namespace>tost</namespace>
+ </kubernetes>
+
+ <PERF>
+ <traffic_host>Host3</traffic_host>
+ <traffic_container>mlabbe/iperf</traffic_container>
+ <traffic_container_arguments>--net=host -v /proc/net/arp:/host/arp --rm</traffic_container_arguments>
+ <traffic_cmd_arguments> -u -b 20M -t 20</traffic_cmd_arguments>
+
+ <pcap_host>Host4</pcap_host>
+ <pcap_container>toendeavour/tshark</pcap_container>
+ <pcap_container_arguments>--cap-add=NET_RAW --cap-add=NET_ADMIN --net=host --rm -v ~/TestON/tshark/:/tshark</pcap_container_arguments>
+ <pcap_cmd_arguments>-t e -F pcap</pcap_cmd_arguments>
+ <pcap_cmd_arguments2>-t e -F pcap</pcap_cmd_arguments2>
+
+ </PERF>
+
+
+
<ENV>
<cellName>productionCell</cellName>
- <cellApps>drivers,fpm,lldpprovider,hostprovider,netcfghostprovider,drivers.bmv2,org.opencord.fabric-tofino,pipelines.fabric,org.stratumproject.fabric-tna,drivers.barefoot,segmentrouting,t3</cellApps>
+ <cellApps>drivers,fpm,lldpprovider,hostprovider,netcfghostprovider,drivers.bmv2,pipelines.fabric,org.stratumproject.fabric-tna,drivers.barefoot,segmentrouting,t3</cellApps>
</ENV>
<EXTERNAL_APPS>
@@ -79,7 +84,6 @@
<org.onosproject.drivers.gnoi>DEBUG</org.onosproject.drivers.gnoi>
<org.onosproject.drivers.gmni>DEBUG</org.onosproject.drivers.gmni>
<org.onosproject.drivers.barefoot>DEBUG</org.onosproject.drivers.barefoot>
- <org.opencord.fabric.tofino>DEBUG</org.opencord.fabric.tofino>
<org.onosproject.bmv2>DEBUG</org.onosproject.bmv2>
</ONOS_Logging>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo.0x1.physical b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo.0x1.physical
index d7a2bad..e67e354 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo.0x1.physical
+++ b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo.0x1.physical
@@ -8,12 +8,13 @@
<connect_order>1</connect_order>
<home></home> # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
<COMPONENTS>
+ <kubeConfig>~/.kube/qa-ace-menlo</kubeConfig> # If set, will attempt to use this file for setting up port-forwarding
<useDocker>True</useDocker> # Whether to use docker for ONOS nodes
- <docker_prompt>~/onos#</docker_prompt>
+ <docker_prompt>\$</docker_prompt>
<cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
<diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
- <karaf_username></karaf_username>
- <karaf_password></karaf_password>
+ <karaf_username>karaf</karaf_username>
+ <karaf_password>karaf</karaf_password>
<web_user>sdn</web_user>
<web_pass>rocks</web_pass>
<rest_port></rest_port>
@@ -30,13 +31,14 @@
<type>StratumOSSwitchDriver</type>
<connect_order>2</connect_order>
<COMPONENTS>
+ <prompt>#</prompt>
<shortName>leaf1</shortName>
<port1>1</port1>
<link1>Host1</link1>
<port2>2</port2>
<link2>Host2</link2>
- <onosConfigPath>~/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/json/</onosConfigPath>
- <onosConfigFile>tofino-onos-netcfg.json</onosConfigFile>
+ <onosConfigPath></onosConfigPath>
+ <onosConfigFile></onosConfigFile>
</COMPONENTS>
</SwitchLeaf1>
@@ -49,14 +51,14 @@
<COMPONENTS>
<mac>3c:fd:fe:a8:ea:30</mac>
<inband>false</inband>
- <ip>10.0.2.1</ip>
+ <ip>192.168.102.2</ip>
<shortName>h1</shortName>
<port1>0</port1>
<link1>SwitchLeaf1</link1>
<interfaceName>ens6f0</interfaceName>
<routes>
<route1>
- <network>10.0.2.0</network>
+ <network>192.168.102.1</network>
<netmask>24</netmask>
<gw></gw>
<interface></interface>
@@ -74,14 +76,14 @@
<COMPONENTS>
<mac>3c:fd:fe:a8:ea:31</mac>
<inband>false</inband>
- <ip>10.0.2.2</ip>
+ <ip>192.168.102.11</ip>
<shortName>h2</shortName>
<port1>0</port1>
<link1>SwitchLeaf1</link1>
<interfaceName>ens6f1</interfaceName>
<routes>
<route1>
- <network>10.0.2.0</network>
+ <network>192.168.102.1</network>
<netmask>24</netmask>
<gw></gw>
<interface></interface>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/SRBridgingTest.py b/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/SRBridgingTest.py
index f88c85a..bfba8df 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/SRBridgingTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/SRBridgingTest.py
@@ -65,18 +65,21 @@
main.Cluster.setRunningNode( onosNodes )
run.installOnos( main, skipPackage=skipPackage, cliSleep=5 )
if main.useBmv2:
- switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
+ switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', '' )
+ if switchPrefix is None:
+ switchPrefix = ''
# Translate configuration file from OVS-OFDPA to BMv2 driver
translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
else:
translator.bmv2ToOfdpa( main )
- suf = main.params.get( 'jsonFileSuffix', None)
- if suf:
- run.loadJson( main, suffix=suf )
- else:
- run.loadJson( main )
- run.loadChart( main )
+ if not main.persistentSetup:
+ suf = main.params.get( 'jsonFileSuffix', None)
+ if suf:
+ run.loadJson( main, suffix=suf )
+ else:
+ run.loadJson( main )
+ run.loadChart( main ) # stores hosts to ping and expected results
if hasattr( main, 'Mininet1' ):
run.mnDockerSetup( main ) # optionally create and setup docker image
@@ -94,15 +97,14 @@
else:
# Run the test with physical devices
- run.connectToPhysicalNetwork( main )
+ run.connectToPhysicalNetwork( main, hostDiscovery=False ) # We don't want to do host discovery in the pod
run.checkFlows( main, minFlowCount=self.topo[ topology ][ 5 if main.useBmv2 else 4 ] * self.topo[ topology ][ 1 ], sleep=5 )
if main.useBmv2:
- switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix' )
- if switchPrefix == "tofino":
- leaf_dpid = [ "device:tofino:leaf%d" % ( ls + 1 ) for ls in range( self.topo[ topology ][ 1 ]) ]
- else:
- leaf_dpid = [ "device:bmv2:leaf%d" % ( ls + 1 ) for ls in range( self.topo[ topology ][ 1 ]) ]
+ switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', '' )
+ if switchPrefix is not '' and switchPrefix is not None:
+ switchPrefix += ':'
+ leaf_dpid = [ "device:%sleaf%d" % ( switchPrefix, ls + 1 ) for ls in range( self.topo[ topology ][ 1 ]) ]
else:
leaf_dpid = [ "of:%016d" % ( ls + 1 ) for ls in range( self.topo[ topology ][ 1 ] ) ]
for dpid in leaf_dpid:
diff --git a/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/dependencies/SRClusterRestartFuncs.py b/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/dependencies/SRClusterRestartFuncs.py
index cead0b4..af86ca8 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/dependencies/SRClusterRestartFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/dependencies/SRClusterRestartFuncs.py
@@ -40,7 +40,8 @@
main.cfgName = Topo
main.Cluster.setRunningNode( numNodes )
run.installOnos( main )
- run.loadJson( main )
+ if not main.persistentSetup:
+ run.loadJson( main )
run.loadChart( main )
if hasattr( main, 'Mininet1' ):
run.startMininet( main, 'cord_fabric.py', args=self.topo[ Topo ][ 2 ] )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/dependencies/SRDhcprelayTest.py b/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/dependencies/SRDhcprelayTest.py
index d5f89a6..325af80 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/dependencies/SRDhcprelayTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/dependencies/SRDhcprelayTest.py
@@ -49,11 +49,14 @@
if main.useBmv2:
# Translate configuration file from OVS-OFDPA to BMv2 driver
translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
- switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
+ switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', '' )
+ if switchPrefix is None:
+ switchPrefix = ''
translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
else:
translator.bmv2ToOfdpa( main )
- run.loadJson( main )
+ if not main.persistentSetup:
+ run.loadJson( main )
run.loadHost( main )
if hasattr( main, 'Mininet1' ):
run.mnDockerSetup( main )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDynamic/dependencies/SRDynamicFuncs.py b/TestON/tests/USECASE/SegmentRouting/SRDynamic/dependencies/SRDynamicFuncs.py
index 0ca4c91..1e2d0a3 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRDynamic/dependencies/SRDynamicFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRDynamic/dependencies/SRDynamicFuncs.py
@@ -47,7 +47,8 @@
main.cfgName = Topo
main.Cluster.setRunningNode( numNodes )
run.installOnos( main )
- run.loadJson( main )
+ if not main.persistentSetup:
+ run.loadJson( main )
run.loadChart( main )
run.startMininet( main, 'cord_fabric.py',
args=self.topo[ Topo ][ 2 ] )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDynamicConf/dependencies/SRDynamicConfTest.py b/TestON/tests/USECASE/SegmentRouting/SRDynamicConf/dependencies/SRDynamicConfTest.py
index 5c8d277..fb44e2e 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRDynamicConf/dependencies/SRDynamicConfTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRDynamicConf/dependencies/SRDynamicConfTest.py
@@ -75,11 +75,14 @@
if main.useBmv2:
# Translate configuration file from OVS-OFDPA to BMv2 driver
translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
- switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
+ switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', '' )
+ if switchPrefix is None:
+ switchPrefix = ''
translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
else:
translator.bmv2ToOfdpa( main )
- run.loadJson( main )
+ if not main.persistentSetup:
+ run.loadJson( main )
run.loadChart( main )
# Provide topology-specific interface configuration
diff --git a/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/dependencies/SRHAFuncs.py b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/dependencies/SRHAFuncs.py
index 561ebfc..5f71d28 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/dependencies/SRHAFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/dependencies/SRHAFuncs.py
@@ -44,11 +44,13 @@
main.case( description )
run.config( main, Topo )
run.installOnos( main )
- run.loadJson( main )
+ if not main.persistentSetup:
+ run.loadJson( main )
run.loadChart( main )
run.startMininet( main, 'cord_fabric.py', args=self.topo[ Topo ][ 2 ] )
- # xconnects need to be loaded after topology
- run.loadXconnects( main )
+ if not main.persistentSetup:
+ # xconnects need to be loaded after topology
+ run.loadXconnects( main )
# pre-configured routing and bridging test
run.checkFlows( main, minFlowCount=minFlow )
run.pingAll( main )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/dependencies/SRLinkFailFuncs.py b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/dependencies/SRLinkFailFuncs.py
index 4664c9e..ea55f04 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/dependencies/SRLinkFailFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/dependencies/SRLinkFailFuncs.py
@@ -61,10 +61,11 @@
else:
translator.bmv2ToOfdpa( main )
translator.bmv2ToOfdpa( main, cfgFile=xconnectFile )
- if suf:
- run.loadJson( main, suffix=suf )
- else:
- run.loadJson( main )
+ if not main.persistentSetup:
+ if suf:
+ run.loadJson( main, suffix=suf )
+ else:
+ run.loadJson( main )
run.loadChart( main )
if hasattr( main, 'Mininet1' ):
run.mnDockerSetup( main ) # optionally create and setup docker image
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/SRMulticastTest.py b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/SRMulticastTest.py
index af9250c..4dfcd53 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/SRMulticastTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/SRMulticastTest.py
@@ -44,11 +44,14 @@
if main.useBmv2:
# Translate configuration file from OVS-OFDPA to BMv2 driver
translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
- switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
+ switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', '' )
+ if switchPrefix is None:
+ switchPrefix = ''
translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
else:
translator.bmv2ToOfdpa( main )
- lib.loadJson( main )
+ if not main.persistentSetup:
+ lib.loadJson( main )
time.sleep( float( main.params[ "timers" ][ "loadNetcfgSleep" ] ) )
main.cfgName = "common" if hasattr( main, "Mininet1" ) else main.params[ "DEPENDENCY" ][ "confName" ]
lib.loadMulticastConfig( main )
diff --git a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/dependencies/SROnosFailureFuncs.py b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/dependencies/SROnosFailureFuncs.py
index 96bf84d..f374c47 100644
--- a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/dependencies/SROnosFailureFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/dependencies/SROnosFailureFuncs.py
@@ -55,10 +55,11 @@
else:
translator.bmv2ToOfdpa( main )
translator.bmv2ToOfdpa( main, cfgFile=xconnectFile )
- if suf:
- run.loadJson( main, suffix=suf )
- else:
- run.loadJson( main )
+ if not main.persistentSetup:
+ if suf:
+ run.loadJson( main, suffix=suf )
+ else:
+ run.loadJson( main )
run.loadChart( main )
if hasattr( main, 'Mininet1' ):
run.mnDockerSetup( main ) # optionally create and setup docker image
@@ -74,8 +75,9 @@
# Run the test with physical devices
# TODO: connect TestON to the physical network
pass
- # xconnects need to be loaded after topology
- run.loadXconnects( main )
+ if not main.persistentSetup:
+ # xconnects need to be loaded after topology
+ run.loadXconnects( main )
switches = self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ]
links = ( self.topo[ Topo ][ 0 ] * self.topo[ Topo ][ 1 ] ) * 2
# pre-configured routing and bridging test
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py
index 9c1b8c1..1b544dc 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py
@@ -66,11 +66,14 @@
if main.useBmv2:
# Translate configuration file from OVS-OFDPA to BMv2 driver
translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
- switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
+ switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', '' )
+ if switchPrefix is None:
+ switchPrefix = ''
translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
else:
translator.bmv2ToOfdpa( main )
- lib.loadJson( main )
+ if not main.persistentSetup:
+ lib.loadJson( main )
main.log.debug( "sleeping %i seconds" % float( main.params[ 'timers' ][ 'loadNetcfgSleep' ] ) )
time.sleep( float( main.params[ 'timers' ][ 'loadNetcfgSleep' ] ) )
lib.loadHost( main )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRSanity/dependencies/SRSanityFuncs.py b/TestON/tests/USECASE/SegmentRouting/SRSanity/dependencies/SRSanityFuncs.py
index 9115ff0..f079be4 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRSanity/dependencies/SRSanityFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRSanity/dependencies/SRSanityFuncs.py
@@ -41,11 +41,13 @@
main.cfgName = Topo
main.Cluster.setRunningNode( numNodes )
run.installOnos( main )
- run.loadJson( main )
+ if not main.persistentSetup:
+ run.loadJson( main )
run.loadChart( main )
run.startMininet( main, 'cord_fabric.py', args=self.topo[ Topo ][ 2 ] )
- # xconnects need to be loaded after topology
- run.loadXconnects( main )
+ if not main.persistentSetup:
+ # xconnects need to be loaded after topology
+ run.loadXconnects( main )
# pre-configured routing and bridging test
run.checkFlows( main, minFlowCount=minFlow )
run.pingAll( main )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.params b/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.params
new file mode 100644
index 0000000..cb37498
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.params
@@ -0,0 +1,80 @@
+<PARAMS>
+ <testcases>7</testcases>
+
+ <GRAPH>
+ <nodeCluster>staging</nodeCluster>
+ <builds>20</builds>
+ <jobName>SRStaging</jobName>
+ <branch>master</branch>
+ </GRAPH>
+
+ <SCALE>
+ <size>3</size>
+ <max>3</max>
+ </SCALE>
+
+ <DEPENDENCY>
+ <useCommonConf>False</useCommonConf>
+ <useCommonTopo>True</useCommonTopo>
+ <useBmv2>True</useBmv2>
+ <bmv2SwitchType>stratum</bmv2SwitchType>
+ <switchPrefix></switchPrefix>
+ <stratumRoot>~/stratum</stratumRoot>
+ <topology>trellis_fabric.py</topology>
+ <lib>routinglib.py,trellislib.py,stratum.py</lib>
+ </DEPENDENCY>
+
+ <jsonFileSuffix>.hw</jsonFileSuffix>
+
+ <persistent_setup>True</persistent_setup>
+
+ <kubernetes>
+ <appName>onos-tost-onos-classic</appName>
+ <namespace>tost</namespace>
+ </kubernetes>
+
+ <PERF>
+ <traffic_host>Host1 Host2 Host3</traffic_host>
+ <traffic_cmd_arguments> -u -b 20M -t 20</traffic_cmd_arguments>
+
+ <pcap_host>ng40vm</pcap_host>
+ <pcap_cmd_arguments>-t e -F pcap -s 100 </pcap_cmd_arguments>
+
+ </PERF>
+ <ONOS_Logging>
+ <org.onosproject.p4runtime.ctl.client>DEBUG</org.onosproject.p4runtime.ctl.client>
+ <org.onosproject.segmentrouting>DEBUG</org.onosproject.segmentrouting>
+ <org.onosproject.gnmi.ctl>TRACE</org.onosproject.gnmi.ctl>
+ </ONOS_Logging>
+
+
+ <ENV>
+ <cellName>productionCell</cellName>
+ <cellApps>drivers,fpm,lldpprovider,hostprovider,netcfghostprovider,drivers.bmv2,org.opencord.fabric-tofino,pipelines.fabric,org.stratumproject.fabric-tna,drivers.barefoot,segmentrouting,t3</cellApps>
+ </ENV>
+
+ <EXTERNAL_APPS>
+ </EXTERNAL_APPS>
+
+ <CTRL>
+ <port>6653</port>
+ </CTRL>
+
+ <timers>
+ <LinkDiscovery>12</LinkDiscovery>
+ <SwitchDiscovery>12</SwitchDiscovery>
+ </timers>
+
+ <SLEEP>
+ <startup>10</startup>
+ </SLEEP>
+
+ <TOPO>
+ <switchNum>4</switchNum>
+ <linkNum>16</linkNum>
+ </TOPO>
+
+ <ALARM>
+ <minPassPercent>100</minPassPercent>
+ </ALARM>
+</PARAMS>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.py b/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.py
new file mode 100644
index 0000000..124fd50
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.py
@@ -0,0 +1,91 @@
+class SRStaging:
+ def __init__( self ):
+ self.default = ''
+
+ def CASE1( self, main ):
+ main.case("Testing connections")
+ main.persistentSetup = True
+ def CASE7( self, main ):
+ """
+ Tests connectivity between two untagged hosts
+ (Ports are configured as vlan-untagged)
+
+ Sets up 3 ONOS instance
+ Start 2x2 leaf-spine topology
+ Pingall
+ """
+ try:
+ from tests.USECASE.SegmentRouting.SRStaging.dependencies.SRStagingTest import SRStagingTest
+ except ImportError:
+ main.log.error( "SRStagingTest not found. Exiting the test" )
+ main.cleanAndExit()
+ try:
+ main.funcs
+ except ( NameError, AttributeError ):
+ main.funcs = SRStagingTest()
+ # Load kubeconfig
+ # Setup ssh tunnel
+ # connect to ONOS CLI
+
+
+ main.funcs.setupTest( main,
+ test_idx=7,
+ topology='2x2staging',
+ onosNodes=3,
+ description="Developing tests on the staging pod" )
+ srcComponentNames = main.params[ 'PERF' ][ 'traffic_host' ].split()
+ srcComponentList = []
+ for name in srcComponentNames:
+ srcComponentList.append( getattr( main, name ) )
+ dstComponent = getattr( main, main.params[ 'PERF' ][ 'pcap_host' ] )
+
+ main.downtimeResults = {}
+
+
+ # TODO: MOVE TO CONFIG FILE
+ device = "device:leaf2"
+ port1 = "268"
+ port2 = "284"
+ port3 = "260"
+ port4 = "276"
+
+ descPrefix = "Upstream_Leaf_Spine_Portstate"
+ # TODO: Move most of this logic into linkDown/linkUp
+ ## First Link Down
+ shortDesc = descPrefix + "-Failure1"
+ longDesc = "%s Failure: Bring down %s/%s" % ( descPrefix, device, port1 )
+ main.funcs.linkDown( device, port1, srcComponentList, dstComponent, shortDesc, longDesc )
+ ## Second Link Down
+ shortDesc = descPrefix + "-Failure2"
+ longDesc = "%s Failure: Bring down %s/%s" % ( descPrefix, device, port2 )
+ main.funcs.linkDown( device, port2, srcComponentList, dstComponent, shortDesc, longDesc )
+ ## First Link Up
+ # TODO Check these are set correctly
+ shortDesc = descPrefix + "-Recovery1"
+ longDesc = "%s Recovery: Bring up %s/%s" % ( descPrefix, device, port1 )
+ main.funcs.linkUp( device, port1, srcComponentList, dstComponent, shortDesc, longDesc )
+ ## Second Link Up
+ shortDesc = descPrefix + "-Recovery2"
+ longDesc = "%s Recovery: Bring up %s/%s" % ( descPrefix, device, port2 )
+ main.funcs.linkUp( device, port2, srcComponentList, dstComponent, shortDesc, longDesc )
+ ## Third Link Down
+ shortDesc = descPrefix + "-Failure3"
+ longDesc = "%s Failure: Bring down %s/%s" % ( descPrefix, device, port3 )
+ main.funcs.linkDown( device, port3, srcComponentList, dstComponent, shortDesc, longDesc )
+ ## Forth Link Down
+ shortDesc = descPrefix + "-Failure4"
+ longDesc = "%s Failure: Bring down %s/%s" % ( descPrefix, device, port4 )
+ main.funcs.linkDown( device, port4, srcComponentList, dstComponent, shortDesc, longDesc )
+ ## Third Link Up
+ shortDesc = descPrefix + "-Recovery3"
+ longDesc = "%s Recovery: Bring upn %s/%s" % ( descPrefix, device, port3 )
+ main.funcs.linkUp( device, port3, srcComponentList, dstComponent, shortDesc, longDesc )
+ ## Forth Link Up
+ shortDesc = descPrefix + "-Recovery4"
+ longDesc = "%s Recovery: Bring up %s/%s" % ( descPrefix, device, port4 )
+ main.funcs.linkUp( device, port4, srcComponentList, dstComponent, shortDesc, longDesc )
+
+ main.log.warn( main.downtimeResults )
+ import json
+ main.log.warn( json.dumps( main.downtimeResults, indent=4, sort_keys=True ) )
+ main.funcs.cleanup( main )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.topo b/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.topo
new file mode 100644
index 0000000..8c25811
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.topo
@@ -0,0 +1,218 @@
+<TOPOLOGY>
+ <COMPONENT>
+ <ONOScell>
+ <host>localhost</host> # ONOS "bench" machine
+ <user>jenkins</user>
+ <password></password>
+ <type>OnosClusterDriver</type>
+ <connect_order>50</connect_order>
+ <jump_host></jump_host>
+ <home>~/Projects/onos/</home> # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
+ <COMPONENTS>
+ <kubeConfig>~/.kube/stg-ace-menlo</kubeConfig> # If set, will attempt to use this file for setting up port-forwarding
+ <useDocker>True</useDocker> # Whether to use docker for ONOS nodes
+ <docker_prompt>\$</docker_prompt>
+ <cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
+ <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+ <karaf_username>karaf</karaf_username>
+ <karaf_password>karaf</karaf_password>
+ <web_user>sdn</web_user>
+ <web_pass>rocks</web_pass>
+ <rest_port></rest_port>
+ <prompt></prompt> # TODO: we technically need a few of these, one per component
+ <onos_home>~/Projects/onos/</onos_home> # defines where onos home is on the target cell machine. Defaults to entry in "home" if empty.
+ <nodes> 3 </nodes> # number of nodes in the cluster
+ </COMPONENTS>
+ </ONOScell>
+
+ <SwitchLeaf1>
+ <host>10.32.4.132</host>
+ <user>root</user>
+ <password>onl</password>
+ <type>StratumOSSwitchDriver</type>
+ <connect_order>12</connect_order>
+ <jump_host>NetworkBench</jump_host>
+ <COMPONENTS>
+ <shortName>leaf1</shortName>
+ <port1></port1>
+ <link1></link1>
+ <port2></port2>
+ <link2></link2>
+ <onosConfigPath></onosConfigPath>
+ <onosConfigFile></onosConfigFile>
+ </COMPONENTS>
+ </SwitchLeaf1>
+
+ <SwitchLeaf2>
+ <host>10.32.4.136</host>
+ <user>root</user>
+ <password>onl</password>
+ <type>StratumOSSwitchDriver</type>
+ <connect_order>13</connect_order>
+ <jump_host>NetworkBench</jump_host>
+ <COMPONENTS>
+ <shortName>leaf2</shortName>
+ <port1></port1>
+ <link1></link1>
+ <port2></port2>
+ <link2></link2>
+ <onosConfigPath></onosConfigPath>
+ <onosConfigFile></onosConfigFile>
+ </COMPONENTS>
+ </SwitchLeaf2>
+
+ <SwitchSpine1>
+ <host>10.32.4.130</host>
+ <user>root</user>
+ <password>onl</password>
+ <type>StratumOSSwitchDriver</type>
+ <connect_order>14</connect_order>
+ <jump_host>NetworkBench</jump_host>
+ <COMPONENTS>
+ <shortName>spine1</shortName>
+ <port1></port1>
+ <link1></link1>
+ <port2></port2>
+ <link2></link2>
+ <onosConfigPath></onosConfigPath>
+ <onosConfigFile></onosConfigFile>
+ </COMPONENTS>
+ </SwitchSpine1>
+
+ <SwitchSpine2>
+ <host>10.32.4.134</host>
+ <user>root</user>
+ <password>onl</password>
+ <type>StratumOSSwitchDriver</type>
+ <connect_order>15</connect_order>
+ <jump_host>NetworkBench</jump_host>
+ <COMPONENTS>
+ <shortName>spine2</shortName>
+ <port1></port1>
+ <link1></link1>
+ <port2></port2>
+ <link2></link2>
+ <onosConfigPath></onosConfigPath>
+ <onosConfigFile></onosConfigFile>
+ </COMPONENTS>
+ </SwitchSpine2>
+
+ <Host1>
+ <host>10.32.4.138</host>
+ <user>jenkins</user>
+ <password></password>
+ <type>HostDriver</type>
+ <connect_order>6</connect_order>
+ <jump_host>NetworkBench</jump_host>
+ <COMPONENTS>
+ <mac></mac>
+ <inband>false</inband>
+ <ip>10.32.5.161</ip>
+ <shortName>h1</shortName>
+ <port1></port1>
+ <link1></link1>
+ <interfaceName>enp175s0f0</interfaceName>
+ <routes>
+ <route1>
+ <network></network>
+ <netmask></netmask>
+ <gw></gw>
+ <interface></interface>
+ </route1>
+ </routes>
+ <sudo_required>false</sudo_required>
+ </COMPONENTS>
+ </Host1>
+
+ <Host2>
+ <host>10.32.4.139</host>
+ <user>jenkins</user>
+ <password></password>
+ <type>HostDriver</type>
+ <connect_order>7</connect_order>
+ <jump_host>NetworkBench</jump_host>
+ <COMPONENTS>
+ <mac></mac>
+ <inband>false</inband>
+ <ip>10.32.5.162</ip>
+ <shortName>h2</shortName>
+ <port1></port1>
+ <link1></link1>
+ <interfaceName>enp175s0f0</interfaceName>
+ <routes>
+ <route1>
+ <network></network>
+ <netmask></netmask>
+ <gw></gw>
+ <interface></interface>
+ </route1>
+ </routes>
+ <sudo_required>false</sudo_required>
+ </COMPONENTS>
+ </Host2>
+
+ <Host3>
+ <host>10.32.4.140</host>
+ <user>jenkins</user>
+ <password></password>
+ <type>HostDriver</type>
+ <connect_order>8</connect_order>
+ <jump_host>NetworkBench</jump_host>
+ <COMPONENTS>
+ <mac></mac>
+ <inband>false</inband>
+ <ip>10.32.5.163</ip>
+ <shortName>h3</shortName>
+ <port1></port1>
+ <link1></link1>
+ <interfaceName>enp175s0f0</interfaceName>
+ <routes>
+ <route1>
+ <network></network>
+ <netmask></netmask>
+ <gw></gw>
+ <interface></interface>
+ </route1>
+ </routes>
+ <sudo_required>false</sudo_required>
+ </COMPONENTS>
+ </Host3>
+
+ <ng40vm>
+ <host>10.32.5.6</host>
+ <user>ng40</user>
+ <password>ng40</password>
+ <type>HostDriver</type>
+ <connect_order>8</connect_order>
+ <jump_host>NetworkBench</jump_host>
+ <COMPONENTS>
+ <mac></mac>
+ <inband>false</inband>
+ <ip>10.32.5.6</ip>
+ <shortName>ng40</shortName>
+ <port1></port1>
+ <link1></link1>
+ <interfaceName>ens8</interfaceName>
+ <routes>
+ <route1>
+ <network></network>
+ <netmask></netmask>
+ <gw></gw>
+ <interface></interface>
+ </route1>
+ </routes>
+ </COMPONENTS>
+ </ng40vm>
+
+ <NetworkBench>
+ <host>66.201.42.222</host>
+ <user>jenkins</user>
+ <password></password>
+ <type>NetworkDriver</type>
+ <connect_order>1</connect_order>
+ <COMPONENTS>
+ </COMPONENTS>
+ </NetworkBench>
+
+ </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRStaging/__init__.py b/TestON/tests/USECASE/SegmentRouting/SRStaging/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRStaging/__init__.py
diff --git a/TestON/tests/USECASE/SegmentRouting/SRStaging/dependencies/SRStagingTest.py b/TestON/tests/USECASE/SegmentRouting/SRStaging/dependencies/SRStagingTest.py
new file mode 100644
index 0000000..5dd43da
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRStaging/dependencies/SRStagingTest.py
@@ -0,0 +1,393 @@
+"""
+Copyright 2017 Open Networking Foundation ( ONF )
+
+Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+
+ TestON is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 2 of the License, or
+ ( at your option ) any later version.
+
+ TestON is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TestON. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as run
+import time
+import re
+import json
+import pexpect
+
+class SRStagingTest ():
+
+ def __init__( self ):
+ self.default = ''
+ self.topo = dict()
+ # TODO: Check minFlowCount of leaf for BMv2 switch
+ # (number of spine switch, number of leaf switch, dual-homed, description, minFlowCount - leaf (OvS), minFlowCount - leaf (BMv2))
+ self.topo[ '0x1' ] = ( 0, 1, False, 'single ToR', 28, 20 )
+ self.topo[ '0x2' ] = ( 0, 2, True, 'dual-homed ToR', 37, 37 )
+ self.topo[ '2x2' ] = ( 2, 2, False, '2x2 leaf-spine topology', 37, 32 )
+ self.topo[ '2x2staging' ] = ( 2, 2, True, '2x2 leaf-spine topology', 37, 32 )
+ # TODO: Implement 2x3 topology
+ # topo[ '2x3' ] = ( 2, 3, True, '2x3 leaf-spine topology with dual ToR and single ToR', 28 )
+ self.topo[ '2x4' ] = ( 2, 4, True, '2x4 dual-homed leaf-spine topology', 53, 53 )
+ self.topo[ '2x4' ] = ( 2, 4, True, '2x4 dual-homed leaf-spine topology', 53, 53 )
+ self.switchNames = {}
+ self.switchNames[ '0x1' ] = [ "leaf1" ]
+ self.switchNames[ '2x2' ] = [ "leaf1", "leaf2", "spine101", "spine102" ]
+ main.switchType = "ovs"
+
+ def setupTest( self, main, test_idx, topology, onosNodes, description, vlan = [] ):
+ try:
+ skipPackage = False
+ init = False
+ if not hasattr( main, 'apps' ):
+ init = True
+ run.initTest( main )
+ # Skip onos packaging if the cluster size stays the same
+ if not init and onosNodes == main.Cluster.numCtrls:
+ skipPackage = True
+
+ main.case( '%s, with %s, %s switches and %d ONOS instance%s' %
+ ( description, self.topo[ topology ][ 3 ],
+ main.switchType,
+ onosNodes,
+ 's' if onosNodes > 1 else '' ) )
+
+ main.cfgName = 'CASE%01d%01d' % ( test_idx / 10, ( ( test_idx - 1 ) % 10 ) % 4 + 1 )
+ main.Cluster.setRunningNode( onosNodes )
+ # Set ONOS Log levels
+ # TODO: Check levels before and reset them after
+ run.installOnos( main, skipPackage=skipPackage, cliSleep=5 )
+
+ if hasattr( main, 'Mininet1' ):
+ run.mnDockerSetup( main ) # optionally create and setup docker image
+
+ # Run the test with Mininet
+ mininet_args = ' --spine=%d --leaf=%d' % ( self.topo[ topology ][ 0 ], self.topo[ topology ][ 1 ] )
+ if self.topo[ topology ][ 2 ]:
+ mininet_args += ' --dual-homed'
+ if len( vlan ) > 0 :
+ mininet_args += ' --vlan=%s' % ( ','.join( ['%d' % vlanId for vlanId in vlan ] ) )
+ if main.useBmv2:
+ mininet_args += ' --switch %s' % main.switchType
+ main.log.info( "Using %s switch" % main.switchType )
+
+ run.startMininet( main, 'trellis_fabric.py', args=mininet_args )
+
+ else:
+ # Run the test with physical devices
+ run.connectToPhysicalNetwork( main, hostDiscovery=False ) # We don't want to do host discovery in the pod
+ except Exception as e:
+ main.log.exception( "Error in setupTest" )
+ main.skipCase( result="FAIL", msg=e )
+
+ def startCapturing( self, main, srcList, dst, shortDesc=None, longDesc=None ):
+ """
+ Starts logging, traffic generation, traffic filters, etc before a failure is induced
+ src: the src component that sends the traffic
+ dst: the dst component that receives the traffic
+ """
+ try:
+ # ping right before to make sure arp is cached and sudo is authenticated
+ for src in srcList:
+ src.handle.sendline( "sudo /bin/ping -c 1 %s" % dst.ip_address )
+ try:
+ i = src.handle.expect( [ "password", src.prompt ] )
+ if i == 0:
+ src.handle.sendline( src.pwd )
+ src.handle.expect( src.prompt )
+ except Exception:
+ main.log.error( "Unexpected response from ping" )
+ src.handle.send( '\x03' ) # ctrl-c
+ src.handle.expect( src.prompt )
+ main.log.warn( "%s: %s" % ( src.name, src.handle.before ) )
+ # TODO: Create new components for iperf and tshark?
+ # Also generate more streams with differnt udp ports or some other
+ # method of guranteeing we kill a link with traffic
+ # Start traffic
+ # TODO: ASSERTS
+ main.pingStart = time.time()
+ dstIp = dst.interfaces[0]['ips'][0]
+ for src in srcList:
+ srcIp = src.interfaces[0]['ips'][0]
+ iperfArgs = "%s --bind %s -c %s" % ( main.params[ 'PERF' ][ 'traffic_cmd_arguments' ],
+ srcIp,
+ dstIp )
+ main.log.info( "Starting iperf" )
+ src.handle.sendline( "/usr/bin/iperf %s &> /dev/null &" % iperfArgs )
+ src.handle.expect( src.prompt )
+ # Check path of traffic, to use in failures
+ # TODO: Do we need to add udp port to filter?
+ # TODO: Dynamically find the interface to filter on
+ # Start packet capture
+ pcapFileReceiver = "%s/tshark/%s-%s-tsharkReceiver" % ( "~/TestON",
+ shortDesc if shortDesc else "tshark",
+ dst.name )
+ tsharkArgsReceiver = "%s -i %s -f 'udp && host %s' -w %s" % ( main.params[ 'PERF' ][ 'pcap_cmd_arguments' ],
+ dst.interfaces[0]['name'],
+ dstIp,
+ pcapFileReceiver )
+ commands = [ 'mkdir -p ~/TestON/tshark',
+ 'rm %s' % pcapFileReceiver,
+ 'touch %s' % pcapFileReceiver,
+ 'chmod o=rw %s' % pcapFileReceiver ]
+ for command in commands:
+ dst.handle.sendline( command )
+ dst.handle.expect( dst.prompt )
+ main.log.debug( "%s: %s" % (dst.name, dst.handle.before ) )
+ main.log.info( "Starting tshark on %s " % dst.name )
+ dst.handle.sendline( "sudo /usr/bin/tshark %s &> /dev/null &" % tsharkArgsReceiver )
+ dst.handle.expect( dst.prompt )
+
+ for src in srcList:
+ srcIp = src.interfaces[0]['ips'][0]
+ pcapFileSender = "%s/tshark/%s-%s-tsharkSender" % ( "~/TestON",
+ shortDesc if shortDesc else "tshark",
+ src.name )
+ tsharkArgsSender = "%s -i %s -f 'udp && host %s' -w %s" % ( main.params[ 'PERF' ][ 'pcap_cmd_arguments' ],
+ src.interfaces[0]['name'],
+ srcIp,
+ pcapFileSender )
+ # Prepare file with correct permissions
+ commands = [ 'mkdir -p ~/TestON/tshark',
+ 'rm %s' % pcapFileSender,
+ 'touch %s' % pcapFileSender,
+ 'chmod o=rw %s' % pcapFileSender ]
+ for command in commands:
+ src.handle.sendline( command )
+ src.handle.expect( src.prompt )
+ main.log.debug( "%s: %s" % (src.name, src.handle.before ) )
+
+ main.log.info( "Starting tshark on %s " % src.name )
+ for src in srcList:
+ src.handle.sendline( "sudo /usr/bin/tshark %s &> /dev/null &" % tsharkArgsSender )
+ src.handle.expect( src.prompt )
+ # Timestamp used for EVENT START
+ main.eventStart = time.time()
+ # LOG Event start in ONOS logs
+ for ctrl in main.Cluster.active():
+ ctrl.CLI.log( "'%s START'" % longDesc, level="INFO" )
+ except Exception as e:
+ main.log.exception( "Error in startCapturing" )
+ main.skipCase( result="FAIL", msg=e )
+
+ def stopCapturing( self, main, srcList, dst, shortDesc=None, longDesc=None ):
+ try:
+ pcapFileReceiver = "%s/tshark/%s-%s-tsharkReceiver" % ( "~/TestON",
+ shortDesc if shortDesc else "tshark",
+ dst.name )
+ # Timestamp used for EVENT STOP
+ main.eventStop = time.time()
+ # LOG Event stop in ONOS logs
+ for ctrl in main.Cluster.active():
+ ctrl.CLI.log( "'%s STOP'" % longDesc, level="INFO" )
+ # Stop packet capture
+ dst.handle.sendline( 'fg' ) # Bring process to front
+ dst.handle.send( '\x03' ) # send ctrl-c
+ dst.handle.expect( dst.prompt )
+ for src in srcList:
+ src.handle.sendline( 'fg' ) # Bring process to front
+ src.handle.send( '\x03' ) # send ctrl-c
+ src.handle.expect( src.prompt )
+ # Stop traffic
+ for src in srcList:
+ src.handle.sendline( 'fg' ) # Bring process to front
+ src.handle.send( '\x03' ) # send ctrl-c
+ src.handle.expect( src.prompt )
+ main.pingStop = time.time()
+ main.log.warn( "It took %s seconds since we started ping for us to stop pcap" % ( main.pingStop - main.pingStart ) )
+
+ main.downtimeResults[ shortDesc ] = {}
+ for src in srcList:
+ pcapFileSender = "%s/tshark/%s-%s-tsharkSender" % ( "~/TestON",
+ shortDesc if shortDesc else "tshark",
+ src.name )
+ main.downtimeResults[ shortDesc ].update( { src.name: self.analyzePcap( src, pcapFileSender, "'udp && ip.src == %s'" % src.interfaces[0]['ips'][0], debug=False) } )
+ main.downtimeResults[ shortDesc ].update( { "%s-%s" % ( src.name, dst.name ): self.analyzePcap( dst, pcapFileReceiver, "'udp && ip.src == %s'" % src.interfaces[0]['ips'][0], debug=False) } )
+ # Grab pcap
+ senderSCP = main.ONOSbench.scp( src, pcapFileSender, main.logdir, direction="from" )
+ # Grab logs
+ # Grab pcap
+ receiverSCP = main.ONOSbench.scp( dst, pcapFileReceiver, main.logdir, direction="from" )
+ # Grab Write logs on switches
+ # TODO: kubectl cp write-reqs.txt
+
+ except Exception as e:
+ main.log.exception( "Error in stopCapturing" )
+
+ def linkDown( self, device, port, srcComponentList, dstComponent, shortDesc, longDesc ):
+ """"
+ High level function that handles an event including monitoring
+ Arguments:
+ device - String of the device uri in ONOS
+ port - String of the port uri in ONOS
+ srcComponentLsit - List containing src components, used for sending traffic
+ dstComponent - Component used for receiving taffic
+ shortDesc - String, Short description, used in reporting and file prefixes
+ longDesc - String, Longer description, used in logging
+ """
+ import time
+ try:
+ main.step( "Start Capturing" )
+ main.funcs.startCapturing( main,
+ srcComponentList,
+ dstComponent,
+ shortDesc=shortDesc,
+ longDesc=longDesc )
+ main.step( "Port down" )
+ ctrl = main.Cluster.active( 0 ).CLI
+ portDown = ctrl.portstate( dpid=device, port=port, state="disable" )
+ portsJson = json.loads( ctrl.ports() )
+ for d in portsJson:
+ if d['device']['id'] == device:
+ for p in d['ports']:
+ if "(%s)" % port in p['port']:
+ adminState = p['isEnabled']
+ main.log.debug( adminState )
+ #TODO ASSERTS
+ main.log.info( "Sleeping 10 seconds" )
+ time.sleep(10)
+ main.step( "Stop Capturing" )
+ main.funcs.stopCapturing( main,
+ srcComponentList,
+ dstComponent,
+ shortDesc=shortDesc,
+ longDesc=longDesc )
+ except Exception as e:
+ main.log.exception( "Error in linkDown" )
+
+ def linkUp( self, device, port, srcComponentList, dstComponent, shortDesc, longDesc ):
+ """"
+ High level function that handles an event including monitoring
+ Arguments:
+ device - String of the device uri in ONOS
+ port - String of the port uri in ONOS
+ srcComponentLsit - List containing src components, used for sending traffic
+ dstComponent - Component used for receiving taffic
+ shortDesc - String, Short description, used in reporting and file prefixes
+ longDesc - String, Longer description, used in logging
+ """
+ import time
+ try:
+ main.step( "Start Capturing" )
+ main.funcs.startCapturing( main,
+ srcComponentList,
+ dstComponent,
+ shortDesc=shortDesc,
+ longDesc=longDesc )
+ main.step( "Port Up" )
+ ctrl = main.Cluster.active( 0 ).CLI
+ portUp = ctrl.portstate( dpid=device, port=port, state="enable" )
+ portsJson = json.loads( ctrl.ports() )
+ for d in portsJson:
+ if d['device']['id'] == device:
+ for p in d['ports']:
+ if "(%s)" % port in p['port']:
+ adminState = p['isEnabled']
+ main.log.debug( adminState )
+ #TODO ASSERTS
+ main.log.info( "Sleeping 10 seconds" )
+ time.sleep(10)
+ main.step( "Stop Capturing" )
+ main.funcs.stopCapturing( main,
+ srcComponentList,
+ dstComponent,
+ shortDesc=shortDesc,
+ longDesc=longDesc )
+ except Exception as e:
+ main.log.exception( "Error in linkUp" )
+
+ def switchDown( self ):
+ try:
+ pass
+ except Exception as e:
+ main.log.exception( "Error in switchDown" )
+
+ def switchUp( self ):
+ try:
+ pass
+ except Exception as e:
+ main.log.exception( "Error in switchUp" )
+
+ def onosDown( self ):
+ try:
+ pass
+ except Exception as e:
+ main.log.exception( "Error in onosDown" )
+
+ def analyzePcap( self, component, filePath, packetFilter, debug=False ):
+ try:
+ try:
+ output = ""
+ component.handle.sendline( "" )
+ while True:
+ component.handle.expect( component.prompt, timeout=1 )
+ output += component.handle.before + str( component.handle.after )
+ except pexpect.TIMEOUT:
+ main.log.debug( "%s: %s" % ( component.name, output ) )
+ except Exception as e:
+ main.log.exception( "Error in onosDown" )
+ lineRE = r'^\s*\d+\s+([0-9.]+)'
+ tsharkOptions = "-t dd -r %s -Y %s -T fields -e frame.number -e frame.time_delta -e ip.src -e ip.dst -e udp" % ( filePath, packetFilter )
+ component.handle.sendline( "sudo /usr/bin/tshark %s" % tsharkOptions )
+ i = component.handle.expect( [ "appears to be damaged or corrupt.", "Malformed Packet", component.prompt, pexpect.TIMEOUT ], timeout=60 )
+ if i != 2:
+ main.log.error( "Error Reading pcap file" )
+ component.handle.send( '\x03' ) # CTRL-C to end process
+ component.handle.expect( component.prompt )
+ main.log.debug( component.handle.before )
+ return 0
+ output = component.handle.before
+ deltas = []
+ for line in output.splitlines():
+ # Search for a packet in each line
+ # If match, save the delta time of the packet
+ m = re.search( lineRE, line )
+ if m:
+ if debug:
+ main.log.debug( repr( line ) )
+ main.log.info( m.groups() )
+ deltas.append( float( m.group(1) ) * 1000 )
+ else:
+ main.log.warn( repr( line ) )
+ if not deltas:
+ main.log.error( "No Packets found" )
+ return 0
+ # Print largest timestamp gap
+ deltas.sort()
+ if debug:
+ main.log.debug( deltas[ -10: ] ) # largest 10
+ main.log.info( "%s: Detected downtime (longest gap between packets): %s ms" % ( component.name, deltas[ -1 ] ) )
+ return deltas[ -1 ]
+ except Exception as e:
+ main.log.exception( "Error in analyzePcap" )
+
+ def dbWrite( self, main, filename ):
+ try:
+ dbFileName = "%s/%s" % ( main.logdir, filename )
+ dbfile = open( dbFileName, "w+" )
+ header = []
+ row = []
+ for eventName, results in main.downtimeResults.iteritems():
+ for measurementName, value in results.iteritems():
+ header.append( "'%s-%s'" % ( eventName, measurementName ) )
+ row.append( "'%s'" % value )
+ dbfile.write( ",".join( header ) + "\n" + ",".join( row ) + "\n" )
+ dbfile.close()
+ except IOError:
+ main.log.warn( "Error opening " + dbFileName + " to write results." )
+
+ def cleanup( self, main ):
+ # TODO: Do things like restore log levels here
+ run.cleanup( main )
+ self.dbWrite( main, "SRStaging-dbfile.csv")
+
diff --git a/TestON/tests/USECASE/SegmentRouting/SRStaging/dependencies/__init__.py b/TestON/tests/USECASE/SegmentRouting/SRStaging/dependencies/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRStaging/dependencies/__init__.py
diff --git a/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/dependencies/SRSwitchFailureFuncs.py b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/dependencies/SRSwitchFailureFuncs.py
index b25ace4..82a4616 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/dependencies/SRSwitchFailureFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/dependencies/SRSwitchFailureFuncs.py
@@ -55,10 +55,11 @@
else:
translator.bmv2ToOfdpa( main )
translator.bmv2ToOfdpa( main, cfgFile=xconnectFile )
- if suf:
- run.loadJson( main, suffix=suf )
- else:
- run.loadJson( main )
+ if not main.persistentSetup:
+ if suf:
+ run.loadJson( main, suffix=suf )
+ else:
+ run.loadJson( main )
run.loadChart( main )
if hasattr( main, 'Mininet1' ):
run.mnDockerSetup( main ) # optionally create and setup docker image
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py b/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
index 699370c..5ed1784 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
@@ -365,7 +365,7 @@
network.discoverHosts()
@staticmethod
- def connectToPhysicalNetwork( main ):
+ def connectToPhysicalNetwork( main, hostDiscovery=True ):
main.step( "Connecting to physical netowrk" )
main.physicalNet = True
topoResult = main.NetworkBench.connectToNet()
@@ -414,7 +414,8 @@
actual=stepResult,
onpass="Successfully connected inband hosts",
onfail="Failed to connect inband hosts" )
- Testcaselib.discoverHosts( main )
+ if hostDiscovery:
+ Testcaselib.discoverHosts( main )
@staticmethod
def saveOnosDiagnostics( main ):
@@ -540,11 +541,13 @@
main.ONOSbench.dumpONOSCmd( main.Cluster.active( 0 ).ipAddress,
"flows",
main.logdir,
- tag + "_FlowsBefore" )
+ tag + "_FlowsBefore",
+ cliPort=main.Cluster.active(0).CLI.karafPort )
main.ONOSbench.dumpONOSCmd( main.Cluster.active( 0 ).ipAddress,
"groups",
main.logdir,
- tag + "_GroupsBefore" )
+ tag + "_GroupsBefore",
+ cliPort=main.Cluster.active(0).CLI.karafPort )
@staticmethod
def checkDevices( main, switches, tag="", sleep=10 ):
@@ -630,7 +633,7 @@
@staticmethod
def pingAll( main, tag="", dumpflows=True, acceptableFailed=0, basedOnIp=False,
- sleep=10, retryAttempts=1, skipOnFail=False ):
+ sleep=10, retryAttempts=1, skipOnFail=False, useScapy=True ):
'''
Verify connectivity between hosts according to the ping chart
acceptableFailed: max number of acceptable failed pings.
@@ -697,7 +700,7 @@
onpass="IPv6 connectivity successfully tested",
onfail="IPv6 connectivity failed" )
elif main.physicalNet:
- pa = main.Network.pingallHosts( hosts, ipv6=True, useScapy=True )
+ pa = main.Network.pingallHosts( hosts, ipv6=True, useScapy=useScapy )
utilities.assert_equals( expect=expect, actual=pa,
onpass="IP connectivity successfully tested",
onfail="IP connectivity failed" )
@@ -716,11 +719,13 @@
main.ONOSbench.dumpONOSCmd( main.Cluster.active( 0 ).ipAddress,
"flows",
main.logdir,
- tag + "_FlowsOn" )
+ tag + "_FlowsOn",
+ cliPort=main.Cluster.active(0).CLI.karafPort )
main.ONOSbench.dumpONOSCmd( main.Cluster.active( 0 ).ipAddress,
"groups",
main.logdir,
- tag + "_GroupsOn" )
+ tag + "_GroupsOn",
+ cliPort=main.Cluster.active(0).CLI.karafPort )
@staticmethod
def killLink( main, end1, end2, switches, links, sleep=None ):
@@ -1053,6 +1058,8 @@
if not main.persistentSetup:
for ctrl in main.Cluster.active():
main.ONOSbench.onosStop( ctrl.ipAddress )
+ else:
+ Testcaselib.resetOnosLogLevels( main )
Testcaselib.mnDockerTeardown( main )
@staticmethod
@@ -1758,6 +1765,10 @@
Read and Set onos log levels from the params file
"""
main.step( 'Set logging levels' )
+ # Get original values incase we want to reset them
+ ctrl = main.Cluster.active(0)
+ ctrl.CLI.logList()
+
logging = True
try:
logs = main.params.get( 'ONOS_Logging', False )
@@ -1770,3 +1781,32 @@
utilities.assert_equals( expect=True, actual=logging,
onpass="Set log levels",
onfail="Failed to set log levels" )
+
+ @staticmethod
+ def resetOnosLogLevels( main ):
+ """
+ Read and reset onos log levels to a previously read set of values
+ """
+ main.step( 'Reset logging levels' )
+ # Get original values incase we want to reset them
+ ctrl = main.Cluster.active(0)
+ currentLevels = ctrl.CLI.logList( saveValues=False )
+ origLevels = ctrl.CLI.logLevels
+ toBeSet = {}
+ for logger, level in currentLevels.iteritems():
+ if logger not in origLevels:
+ toBeSet[ logger ] = origLevels[ 'ROOT' ]
+ else:
+ oldLevel = origLevels[ logger ]
+ if level != oldLevel:
+ toBeSet[ logger ] = oldLevel
+ logging = True
+ try:
+ for logger, level in toBeSet.iteritems():
+ for ctrl in main.Cluster.active():
+ ctrl.CLI.logSet( level, logger )
+ except AttributeError:
+ logging = False
+ utilities.assert_equals( expect=True, actual=logging,
+ onpass="Reset log levels",
+ onfail="Failed to reset log levels" )
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/cfgtranslator.py b/TestON/tests/USECASE/SegmentRouting/dependencies/cfgtranslator.py
index b978b73..6b538a9 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/cfgtranslator.py
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/cfgtranslator.py
@@ -100,6 +100,10 @@
# Translate configuration JSON file from OFDPA-OVS driver to BMv2 driver.
def ofdpaToBmv2( main, switchPrefix="bmv2", cfgFile="", roleMap={r'0*[1-9]([0-9]){2}': 'spine', r'0{15}[1-9]': "leaf"} ):
didRE = r"of:0*(?P<swNum>[1-9][0-9]*)(/(?P<portNum>[0-9]+))?"
+ if switchPrefix is None:
+ switchPrefix = ''
+ else:
+ switchPrefix += ':'
if not cfgFile:
cfgFile = "%s%s.json" % ( main.configPath + main.forJson,
main.cfgName )
@@ -117,7 +121,7 @@
if roleMatch:
role = roleValue
break
- new_port = 'device:' + switchPrefix + ':' + role + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' )
+ new_port = 'device:' + switchPrefix + role + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' )
netcfg[ 'ports' ][ new_port ] = netcfg[ 'ports' ].pop( port )
if 'hosts' in netcfg.keys():
@@ -134,7 +138,7 @@
if roleMatch:
role = roleValue
break
- new_locations.append( 'device:' + switchPrefix + ':' + role + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' ) )
+ new_locations.append( 'device:' + switchPrefix + role + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' ) )
else:
new_locations.append( location )
netcfg[ 'hosts' ][ host ][ 'basic' ][ 'locations' ] = new_locations
@@ -149,7 +153,7 @@
if roleMatch:
role = roleValue
break
- new_location = 'device:' + switchPrefix + ':' + role + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' )
+ new_location = 'device:' + switchPrefix + role + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' )
netcfg[ 'hosts' ][ host ][ 'basic' ][ 'locations' ] = new_location
if 'devices' in netcfg.keys():
@@ -160,9 +164,9 @@
#TODO This or roleMap? maybe use this to populate role Map?
isLeaf = netcfg[ 'devices' ][ device ][ SR_APP ][ 'isEdgeRouter' ]
if isLeaf is True:
- new_device = 'device:' + switchPrefix + ':leaf' + searchObj.group( 'swNum' )
+ new_device = 'device:' + switchPrefix + 'leaf' + searchObj.group( 'swNum' )
else:
- new_device = 'device:' + switchPrefix + ':spine' + searchObj.group( 'swNum' )
+ new_device = 'device:' + switchPrefix + 'spine' + searchObj.group( 'swNum' )
netcfg[ 'devices' ][ new_device ] = netcfg[ 'devices' ].pop( device )
if 'pairDeviceId' in netcfg[ 'devices' ][ new_device ][ SR_APP ].keys():
searchObj = re.search( didRE,
@@ -175,7 +179,7 @@
if roleMatch:
role = roleValue
break
- netcfg[ 'devices' ][ new_device ][ SR_APP ][ 'pairDeviceId' ] = 'device:' + switchPrefix + ':' + role + \
+ netcfg[ 'devices' ][ new_device ][ SR_APP ][ 'pairDeviceId' ] = 'device:' + switchPrefix + role + \
searchObj.group( 'swNum' )
if 'basic' in netcfg[ 'devices' ][ new_device ].keys():
if 'driver' in netcfg[ 'devices' ][ new_device ][ 'basic' ].keys():
@@ -196,7 +200,7 @@
role = roleValue
break
netcfg[ 'apps' ][ DHCP_APP_ID ][ 'default' ][ i ][ 'dhcpServerConnectPoint' ] = \
- 'device:' + switchPrefix + ':' + role + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' )
+ 'device:' + switchPrefix + role + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' )
if 'xconnects' in netcfg.keys():
new_xconnects = []
@@ -210,7 +214,7 @@
if roleMatch:
role = roleValue
break
- new_device = 'device:' + switchPrefix + ':' + role + searchObj.group( 'swNum' )
+ new_device = 'device:' + switchPrefix + role + searchObj.group( 'swNum' )
xconnect[ 'deviceId' ] = new_device
new_xconnects.append( xconnect )
netcfg[ 'xconnects' ] = new_xconnects
diff --git a/TestON/tests/dependencies/Cluster.py b/TestON/tests/dependencies/Cluster.py
index aa63714..2e79481 100644
--- a/TestON/tests/dependencies/Cluster.py
+++ b/TestON/tests/dependencies/Cluster.py
@@ -109,7 +109,7 @@
nodeList = self.runningNodes
for ctrl in nodeList:
- ips.append( ctrl.ipAddress )
+ ips.append( ctrl.ipAddress if ctrl.ipAddress is not 'localhost' else ctrl.address )
return ips
@@ -310,18 +310,21 @@
Returns:
Returns main.TRUE if it successfully set and verify cell.
"""
+ result = main.TRUE
setCellResult = self.command( "setCell",
args=[ cellName ],
specificDriver=1,
getFrom="all" )
- benchCellResult = main.ONOSbench.setCell( cellName )
- verifyResult = self.command( "verifyCell",
- specificDriver=1,
- getFrom="all" )
- result = main.TRUE
for i in range( len( setCellResult ) ):
- result = result and setCellResult[ i ] and verifyResult[ i ]
+ result = result and setCellResult[ i ]
+ benchCellResult = main.ONOSbench.setCell( cellName )
result = result and benchCellResult
+ if not self.useDocker:
+ verifyResult = self.command( "verifyCell",
+ specificDriver=1,
+ getFrom="all" )
+ for i in range( len( verifyResult ) ):
+ result = result and verifyResult[ i ]
return result
def checkService( self ):
@@ -738,6 +741,7 @@
Returns True if it successfully checked
"""
results = True
+ self.command( "getAddress", specificDriver=2 )
nodesOutput = self.command( "nodes", specificDriver=2 )
ips = sorted( self.getIps( activeOnly=True ) )
for i in nodesOutput:
diff --git a/TestON/tests/dependencies/ONOSSetup.py b/TestON/tests/dependencies/ONOSSetup.py
index 03275a1..042eff5 100644
--- a/TestON/tests/dependencies/ONOSSetup.py
+++ b/TestON/tests/dependencies/ONOSSetup.py
@@ -695,6 +695,7 @@
elif main.persistentSetup:
for ctrl in cluster.getRunningNodes():
ctrl.inDocker = True
+ ctrl.CLI.inDocker = True
onosCliResult = main.TRUE
if startOnosCli:
diff --git a/TestON/tests/dependencies/topology.py b/TestON/tests/dependencies/topology.py
index 7cdd9ab..26c3309 100644
--- a/TestON/tests/dependencies/topology.py
+++ b/TestON/tests/dependencies/topology.py
@@ -305,7 +305,8 @@
main.log.debug( "t3 command: {}".format( cmd ) )
main.ONOSbench.dumpONOSCmd( main.Cluster.active( 0 ).ipAddress, cmd, main.logdir,
"t3-CASE{}-{}-{}-route{}-".format( main.CurrentTestCaseNumber, srcIp, dstIp, i ),
- timeout=10 )
+ timeout=10,
+ cliPort=main.Cluster.active(0).CLI.karafPort )
return main.FALSE if unexpectedPings else main.TRUE
def sendScapyPackets( self, sender, receiver, pktFilter, pkt, sIface=None, dIface=None, expect=True, acceptableFailed=0, collectT3=True, t3Command="" ):
@@ -337,7 +338,8 @@
main.log.debug( "Collecting t3 with source {} and destination {}".format( sender.name, receiver.name ) )
main.log.debug( "t3 command: {}".format( t3Command ) )
main.ONOSbench.dumpONOSCmd( main.Cluster.active( 0 ).ipAddress, t3Command, main.logdir,
- "t3-CASE{}-{}-{}-".format( main.CurrentTestCaseNumber, sender.name, receiver.name ) )
+ "t3-CASE{}-{}-{}-".format( main.CurrentTestCaseNumber, sender.name, receiver.name ),
+ cliPort=main.Cluster.active(0).CLI.karafPort )
return scapyResult
def sendScapyPacketsHelper( self, sender, receiver, pktFilter, pkt, sIface=None, dIface=None, expect=True ):
@@ -418,7 +420,8 @@
main.log.debug( "t3 command: {}".format( cmd ) )
main.ONOSbench.dumpONOSCmd( main.Cluster.active( 0 ).ipAddress, cmd, main.logdir,
"t3-CASE{}-{}-{}-route{}-".format( main.CurrentTestCaseNumber, srcIp, dstIp, i ),
- timeout=10 )
+ timeout=10,
+ cliPort=main.Cluster.active(0).CLI.karafPort )
return trafficResult
def pingAndCaptureHelper( self, srcHost, dstIp, dstHost, dstIntf, ipv6=False, expect=True ):
diff --git a/TestON/tests/dependencies/utils.py b/TestON/tests/dependencies/utils.py
index 3cf849a..de61ae2 100644
--- a/TestON/tests/dependencies/utils.py
+++ b/TestON/tests/dependencies/utils.py
@@ -73,8 +73,12 @@
stepResult = main.TRUE
scpResult = main.TRUE
copyResult = main.TRUE
+ isKube = False
for ctrl in main.Cluster.runningNodes:
- if ctrl.inDocker:
+ if ctrl.k8s:
+ isKube = True
+ continue
+ elif ctrl.inDocker:
scpResult = scpResult and ctrl.server.dockerCp( ctrl.name,
"/opt/onos/log/karaf.log",
"/tmp/karaf.log",
@@ -97,6 +101,28 @@
stepResult = main.TRUE and stepResult
else:
stepResult = main.FALSE and stepResult
+ if isKube:
+ # TODO: Look into using Stern, kail, or just use `kubectl logs <pod>`
+ # We also need to save the pod name to switch name mapping
+ main.ONOSbench.kubectlPodNodes( dstPath=main.logdir + "/podMapping.txt",
+ kubeconfig=ctrl.k8s.kubeConfig,
+ namespace=main.params[ 'kubernetes' ][ 'namespace' ] )
+ # TODO Get stratum write logs
+ # Save image for pods, based on "describe pods"
+ main.ONOSbench.kubectlDescribe( "pods",
+ main.logdir + "/describePods.txt",
+ kubeconfig=ctrl.k8s.kubeConfig,
+ namespace=main.params[ 'kubernetes' ][ 'namespace' ] )
+ # Get the pod logs
+ pods = main.ONOSbench.kubectlGetPodNames( kubeconfig=ctrl.k8s.kubeConfig,
+ namespace=main.params[ 'kubernetes' ][ 'namespace' ] )
+
+ for pod in pods:
+ path = "%s/%s.log" % ( main.logdir, pod )
+ stratumPods = main.ONOSbench.kubectlLogs( pod,
+ path,
+ kubeconfig=ctrl.k8s.kubeConfig,
+ namespace=main.params[ 'kubernetes' ][ 'namespace' ] )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully copied remote ONOS logs",