Merge "added better exception catching to FUNCoptical"
diff --git a/TestON/tests/FUNCnetconf/Dependency/netconf.py b/TestON/tests/FUNCnetconf/Dependency/netconf.py
new file mode 100644
index 0000000..5ebbea5
--- /dev/null
+++ b/TestON/tests/FUNCnetconf/Dependency/netconf.py
@@ -0,0 +1,141 @@
+"""
+ Wrapper functions for FUNCnetconf
+ This functions include Onosclidriver and Mininetclidriver driver functions
+ Author: Jeremy Songster, jeremy@onlab.us
+"""
+import time
+import json
+
+def __init__( self ):
+ self.default = ''
+
+def startApp( main ):
+ """
+ This function starts the netconf app in all onos nodes and ensures that
+ the OF-Config server is running on the node to be configured
+ """
+
+ startResult = main.FALSE
+ startResult = main.CLIs[ 0 ].activateApp( appName="org.onosproject.netconf" )
+ return startResult
+
+def startOFC( main ):
+ """
+ This function uses pexpect pxssh class to activate the ofc-server daemon on OC2
+ """
+
+ startResult = main.FALSE
+ try:
+ main.ONOSbench.handle.sendline( "" )
+ main.ONOSbench.handle.expect( "\$" )
+ main.ONOSbench.handle.sendline( "ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1 }'" )
+ main.ONOSbench.handle.expect( "\$1 }'" )
+ main.ONOSbench.handle.expect( "\$" )
+ main.configDeviceIp = main.ONOSbench.handle.before
+ main.configDeviceIp = main.configDeviceIp.split()
+ main.configDeviceIp = main.configDeviceIp[ 0 ]
+ main.log.info( "Device to be configured: " + str( main.configDeviceIp ) )
+ main.ONOSbench.handle.sendline( "sudo ofc-server" )
+ main.ONOSbench.handle.expect( "\$" )
+ startResult = main.TRUE
+ return startResult
+ except pexpect.ExceptionPexpect as e:
+ main.log.exception( self.name + ": Pexpect exception found: " )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanup()
+ main.exit()
+
+def createConfig( main ):
+ """
+ This function writes a configuration file that can later be sent to the
+ REST API to configure a device.
+ The controller device is assumed to be OC1
+ The device to be configured is assumed to be OC2
+ """
+ createCfgResult = main.FALSE
+ # TODO, add ability to set Manufacturer, Hardware and Software versions
+ main.cfgJson = '{ "devices":{ "netconf:'+ main.configDeviceIp + ":" +\
+ main.configDevicePort + '":' + '{ "basic":{ "driver":"'+\
+ main.configDriver + '" } } }, "apps": { "' +\
+ main.configApps + '":{ "devices":[ { "name":' +\
+ main.configName + ', "password":' + main.configPass +\
+ ', "ip":"' + main.configDeviceIp + '", "port":' +\
+ main.configPort + '} ] } } }'
+ try:
+ file = open( "/home/admin/OnosSystemTest/TestON/tests/FUNCnetconf/Dependency/netconfConfig.json", 'w' )
+ main.cfgJson = json.loads( main.cfgJson )
+ main.cfgJson = json.dumps( main.cfgJson, sort_keys=True,
+ indent=4, separators=(',', ': '))
+ print main.cfgJson
+ file.write( main.cfgJson )
+ if file:
+ createCfgResult = main.TRUE
+ file.close()
+ return createCfgResult
+ else:
+ main.log.error( "There was an error opening the file")
+ return createCfgResult
+ except:
+ main.log.exception( "There was an error opening the file")
+ return createCfgResult
+
+def sendConfig( main ):
+ """
+ This function prepares the command needed to upload the configuration
+ file to the REST API
+ """
+ ip = main.ONOSip[0]
+ port = 8181
+ url = "/network/configuration"
+ method = "POST"
+ data = main.cfgJson
+ configResult = main.FALSE
+ sendResult = main.CLIs[ 0 ].send( ip=ip, port=port, url=url, method=method, data=data )
+ main.log.info( "Device configuration request response code: " + str( sendResult[ 0 ] ) )
+ if ( 200 <= sendResult[ 0 ] <= 299):
+ configResult = main.TRUE
+ else:
+ configResult = main.FALSE
+
+ return configResult
+
+def devices( main ):
+ """
+ This function get the list of devices from the REST API, the ONOS CLI, and
+ the device-controllers command and check to see that each recognizes the
+ device is configured according to the configuration uploaded above.
+ """
+ availResult = main.FALSE
+ typeResult = main.FALSE
+ addressResult = main.FALSE
+ driverResult = main.FALSE
+ try:
+ apiResult = main.CLIs[ 0 ].devices()
+ cliResult = main.CLIs2[ 0 ].devices()
+
+ apiDict = json.loads( apiResult )
+ cliDict = json.loads( cliResult )
+ apiAnnotations = apiDict[ 0 ].get( "annotations" )
+ cliAnnotations = cliDict[ 0 ].get( "annotations" )
+
+ main.log.info( "API device availability result: " + str( apiDict[ 0 ].get( "available" ) ) )
+ main.log.info( "CLI device availability result: " + str( cliDict[ 0 ].get( "available" ) ) )
+ if apiDict[ 0 ].get( "available" ) == True and cliDict[ 0 ].get( "available" ) == True:
+ availResult = main.TRUE
+ main.log.info( "API device type result: " + apiDict[ 0 ].get( "type" ) )
+ main.log.info( "CLI device type result: " + cliDict[ 0 ].get( "type" ) )
+ if apiDict[ 0 ].get( "type" ) == "SWITCH" and cliDict[ 0 ].get( "type" ) == "SWITCH":
+ typeResult = main.TRUE
+ main.log.info( "API device ipaddress: " + apiAnnotations.get( "ipaddress" ) )
+ main.log.info( "CLI device ipaddress: " + apiAnnotations.get( "ipaddress" ) )
+ if str( apiAnnotations.get( "ipaddress" ) ) == main.configDeviceIp and str( cliAnnotations.get( "ipaddress" ) ) == main.configDeviceIp:
+ addressResult = main.TRUE
+ main.log.info( "API device driver: " + apiAnnotations.get( "driver" ) )
+ main.log.info( "CLI device driver: " + cliAnnotations.get( "driver" ) )
+ if apiAnnotations.get( "driver" ) == main.configDriver and cliAnnotations.get( "driver" ) == main.configDriver:
+ driverResult = main.TRUE
+
+ return availResult and typeResult and addressResult and driverResult
+ except TypeError:
+ main.log.error( "Device was not configured correctly" )
+ return main.FALSE
\ No newline at end of file
diff --git a/TestON/tests/FUNCnetconf/Dependency/netconfConfig.json b/TestON/tests/FUNCnetconf/Dependency/netconfConfig.json
new file mode 100644
index 0000000..fc5a231
--- /dev/null
+++ b/TestON/tests/FUNCnetconf/Dependency/netconfConfig.json
@@ -0,0 +1,21 @@
+{
+ "apps": {
+ "org.onosproject.netconf": {
+ "devices": [
+ {
+ "ip": "10.128.50.10",
+ "name": "sdn",
+ "password": "rocks",
+ "port": 830
+ }
+ ]
+ }
+ },
+ "devices": {
+ "netconf:10.128.50.10:830": {
+ "basic": {
+ "driver": "ovs-netconf"
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/TestON/tests/FUNCnetconf/Dependency/startUp.py b/TestON/tests/FUNCnetconf/Dependency/startUp.py
new file mode 100644
index 0000000..bf2a2b6
--- /dev/null
+++ b/TestON/tests/FUNCnetconf/Dependency/startUp.py
@@ -0,0 +1,38 @@
+"""
+ This wrapper function is use for starting up onos instance
+"""
+
+import time
+import os
+import json
+
+def onosBuild( main, gitBranch ):
+ """
+ This includes pulling ONOS and building it using maven install
+ """
+
+ buildResult = main.FALSE
+
+ # Git checkout a branch of ONOS
+ checkOutResult = main.ONOSbench.gitCheckout( gitBranch )
+ # Does the git pull on the branch that was checked out
+ if not checkOutResult:
+ main.log.warn( "Failed to checked out " + gitBranch +
+ " branch")
+ else:
+ main.log.info( "Successfully checked out " + gitBranch +
+ " branch")
+ gitPullResult = main.ONOSbench.gitPull()
+ if gitPullResult == main.ERROR:
+ main.log.error( "Error pulling git branch" )
+ else:
+ main.log.info( "Successfully pulled " + gitBranch + " branch" )
+
+ # Maven clean install
+ buildResult = main.ONOSbench.cleanInstall()
+
+ return buildResult
+
+
+
+
diff --git a/TestON/tests/FUNCnetconf/Dependency/topo.py b/TestON/tests/FUNCnetconf/Dependency/topo.py
new file mode 100644
index 0000000..b44e3fc
--- /dev/null
+++ b/TestON/tests/FUNCnetconf/Dependency/topo.py
@@ -0,0 +1,100 @@
+"""
+ These functions can be used for topology comparisons
+"""
+
+import time
+import os
+import json
+
+def getAllDevices( main ):
+ """
+ Return a list containing the devices output from each ONOS node
+ """
+ devices = []
+ threads = []
+ for i in range( main.numCtrls ):
+ t = main.Thread( target=main.CLIs[i].devices,
+ name="devices-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ devices.append( t.result )
+ return devices
+
+def getAllHosts( main ):
+ """
+ Return a list containing the hosts output from each ONOS node
+ """
+ hosts = []
+ ipResult = main.TRUE
+ threads = []
+ for i in range( main.numCtrls ):
+ t = main.Thread( target=main.CLIs[i].hosts,
+ name="hosts-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ hosts.append( t.result )
+ return hosts
+
+def getAllPorts( main ):
+ """
+ Return a list containing the ports output from each ONOS node
+ """
+ ports = []
+ threads = []
+ for i in range( main.numCtrls ):
+ t = main.Thread( target=main.CLIs[i].ports,
+ name="ports-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ ports.append( t.result )
+ return ports
+
+def getAllLinks( main ):
+ """
+ Return a list containing the links output from each ONOS node
+ """
+ links = []
+ threads = []
+ for i in range( main.numCtrls ):
+ t = main.Thread( target=main.CLIs[i].links,
+ name="links-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ links.append( t.result )
+ return links
+
+def getAllClusters( main ):
+ """
+ Return a list containing the clusters output from each ONOS node
+ """
+ clusters = []
+ threads = []
+ for i in range( main.numCtrls ):
+ t = main.Thread( target=main.CLIs[i].clusters,
+ name="clusters-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ clusters.append( t.result )
+ return clusters
+
+
diff --git a/TestON/tests/FUNCnetconf/FUNCnetconf.params b/TestON/tests/FUNCnetconf/FUNCnetconf.params
new file mode 100644
index 0000000..c0fcc5a
--- /dev/null
+++ b/TestON/tests/FUNCnetconf/FUNCnetconf.params
@@ -0,0 +1,53 @@
+<PARAMS>
+ # CASE - Description
+ # 1 - Variable initialization and optional pull and build ONOS package
+ # 2 - Install ONOS
+ # 100 - Ensure netconf app is running
+ # 200 - Create or modify a configuration file
+ # 300 - Push a configuration file to bring up a device
+ # 400 - Bring down a device (not yet possible)
+
+ <testcases>1,[2,100,200,300]*2</testcases>
+
+ <SCALE>
+ <size>1,3</size>
+ </SCALE>
+
+ <DEPENDENCY>
+ <path> /tests/FUNCnetconf/Dependency/</path>
+ <wrapper1>startUp</wrapper1>
+ <wrapper2>netconf</wrapper2>
+ <wrapper3>topo</wrapper3>
+ <topology></topology>
+ </DEPENDENCY>
+
+ <ENV>
+ <cellApps>drivers,openflow,proxyarp,mobility,netconf</cellApps>
+ </ENV>
+
+ <GIT>
+ <pull>False</pull>
+ <branch>master</branch>
+ </GIT>
+
+ <SLEEP>
+ <startup>20</startup>
+ <upSwitch>10</upSwitch>
+ <topoAttempts>3</topoAttempts>
+ </SLEEP>
+
+ <MININET>
+ <switch>0</switch>
+ <links></links>
+ </MININET>
+
+ <CONFIGURE>
+ <cfgDevicePort>830</cfgDevicePort>
+ <cfgDriver>ovs-netconf</cfgDriver>
+ <cfgApps>org.onosproject.netconf</cfgApps>
+ <cfgName>"sdn"</cfgName>
+ <cfgPass>"rocks"</cfgPass>
+ <cfgAppPort>830</cfgAppPort>
+ </CONFIGURE>
+
+</PARAMS>
diff --git a/TestON/tests/FUNCnetconf/FUNCnetconf.py b/TestON/tests/FUNCnetconf/FUNCnetconf.py
new file mode 100644
index 0000000..ddd1cd8
--- /dev/null
+++ b/TestON/tests/FUNCnetconf/FUNCnetconf.py
@@ -0,0 +1,361 @@
+# Testing the NETCONF protocol within ONOS
+
+class FUNCnetconf:
+
+ def __init__( self ):
+ self.default = ''
+
+ def CASE1( self, main ):
+ import time
+ import imp
+ import re
+
+ """
+ - Construct tests variables
+ - GIT ( optional )
+ - Checkout ONOS master branch
+ - Pull latest ONOS code
+ - Building ONOS ( optional )
+ - Install ONOS package
+ - Build ONOS package
+ """
+
+ main.case( "Constructing test variables and building ONOS package" )
+ main.step( "Constructing test variables" )
+ main.caseExplanation = "This test case is mainly for loading " +\
+ "from params file, and pull and build the " +\
+ " latest ONOS package"
+ stepResult = main.FALSE
+
+ # Test variables
+ try:
+ main.testOnDirectory = re.sub( "(/tests)$", "", main.testDir )
+ main.apps = main.params[ 'ENV' ][ 'cellApps' ]
+ gitBranch = main.params[ 'GIT' ][ 'branch' ]
+ main.dependencyPath = main.testOnDirectory + \
+ main.params[ 'DEPENDENCY' ][ 'path' ]
+ # main.topology = main.params[ 'DEPENDENCY' ][ 'topology' ]
+ main.scale = ( main.params[ 'SCALE' ][ 'size' ] ).split( "," )
+ if main.ONOSbench.maxNodes:
+ main.maxNodes = int( main.ONOSbench.maxNodes )
+ else:
+ main.maxNodes = 0
+ wrapperFile1 = main.params[ 'DEPENDENCY' ][ 'wrapper1' ]
+ wrapperFile2 = main.params[ 'DEPENDENCY' ][ 'wrapper2' ]
+ wrapperFile3 = main.params[ 'DEPENDENCY' ][ 'wrapper3' ]
+ main.startUpSleep = int( main.params[ 'SLEEP' ][ 'startup' ] )
+ main.switchSleep = int( main.params[ 'SLEEP' ][ 'upSwitch' ] )
+ main.checkTopoAttempts = int( main.params[ 'SLEEP' ][ 'topoAttempts' ] )
+ main.numSwitch = int( main.params[ 'MININET' ][ 'switch' ] )
+
+ # Config file parameters
+ main.configDevicePort = main.params[ 'CONFIGURE' ][ 'cfgDevicePort' ]
+ main.configDriver = main.params[ 'CONFIGURE' ][ 'cfgDriver' ]
+ main.configApps = main.params[ 'CONFIGURE' ][ 'cfgApps' ]
+ main.configName = main.params[ 'CONFIGURE' ][ 'cfgName' ]
+ main.configPass = main.params[ 'CONFIGURE' ][ 'cfgPass' ]
+ main.configPort = main.params[ 'CONFIGURE' ][ 'cfgAppPort' ]
+
+ gitPull = main.params[ 'GIT' ][ 'pull' ]
+ main.cellData = {} # for creating cell file
+ main.hostsData = {}
+ main.CLIs = []
+ main.CLIs2 = []
+ main.ONOSip = []
+ main.assertReturnString = '' # Assembled assert return string
+
+ main.ONOSip = main.ONOSbench.getOnosIps()
+ print main.ONOSip
+
+ # Assigning ONOS cli handles to a list
+ for i in range( 1, main.maxNodes + 1 ):
+ main.CLIs.append( getattr( main, 'ONOSrest' + str( i ) ) )
+ main.CLIs2.append( getattr( main, 'ONOScli' + str( i ) ) )
+
+ # -- INIT SECTION, ONLY RUNS ONCE -- #
+ main.startUp = imp.load_source( wrapperFile1,
+ main.dependencyPath +
+ wrapperFile1 +
+ ".py" )
+
+ main.netconfFunction = imp.load_source( wrapperFile2,
+ main.dependencyPath +
+ wrapperFile2 +
+ ".py" )
+
+ main.topo = imp.load_source( wrapperFile3,
+ main.dependencyPath +
+ wrapperFile3 +
+ ".py" )
+
+ # Uncomment out the following if a mininet topology is added
+ # copyResult1 = main.ONOSbench.scp( main.Mininet1,
+ # main.dependencyPath +
+ # main.topology,
+ # main.Mininet1.home + "custom/",
+ # direction="to" )
+
+ if main.CLIs and main.CLIs2:
+ stepResult = main.TRUE
+ else:
+ main.log.error( "Did not properly created list of ONOS CLI handle" )
+ stepResult = main.FALSE
+ except Exception as e:
+ main.log.exception(e)
+ main.cleanup()
+ main.exit()
+
+ utilities.assert_equals( expect=main.TRUE,
+ actual=stepResult,
+ onpass="Successfully construct " +
+ "test variables ",
+ onfail="Failed to construct test variables" )
+
+ if gitPull == 'True':
+ main.step( "Building ONOS in " + gitBranch + " branch" )
+ onosBuildResult = main.startUp.onosBuild( main, gitBranch )
+ stepResult = onosBuildResult
+ utilities.assert_equals( expect=main.TRUE,
+ actual=stepResult,
+ onpass="Successfully compiled " +
+ "latest ONOS",
+ onfail="Failed to compile " +
+ "latest ONOS" )
+ else:
+ main.log.warn( "Did not pull new code so skipping mvn " +
+ "clean install" )
+ main.ONOSbench.getVersion( report=True )
+
+ def CASE2( self, main ):
+ """
+ - Set up cell
+ - Create cell file
+ - Set cell file
+ - Verify cell file
+ - Kill ONOS process
+ - Uninstall ONOS cluster
+ - Verify ONOS start up
+ - Install ONOS cluster
+ - Connect to cli
+ """
+
+ # main.scale[ 0 ] determines the current number of ONOS controller
+ main.numCtrls = int( main.scale[ 0 ] )
+
+ main.case( "Starting up " + str( main.numCtrls ) +
+ " node(s) ONOS cluster" )
+ main.caseExplanation = "Set up ONOS with " + str( main.numCtrls ) +\
+ " node(s) ONOS cluster"
+
+
+
+ #kill off all onos processes
+ main.log.info( "Safety check, killing all ONOS processes" +
+ " before initiating environment setup" )
+
+
+
+ time.sleep( main.startUpSleep )
+ main.step( "Uninstalling ONOS package" )
+ onosUninstallResult = main.TRUE
+ for ip in main.ONOSip:
+ onosUninstallResult = onosUninstallResult and \
+ main.ONOSbench.onosUninstall( nodeIp=ip )
+ stepResult = onosUninstallResult
+ utilities.assert_equals( expect=main.TRUE,
+ actual=stepResult,
+ onpass="Successfully uninstalled ONOS package",
+ onfail="Failed to uninstall ONOS package" )
+
+ for i in range( main.maxNodes ):
+ main.ONOSbench.onosDie( main.ONOSip[ i ] )
+
+ main.log.info( "NODE COUNT = " + str( main.numCtrls ) )
+
+ tempOnosIp = []
+ for i in range( main.numCtrls ):
+ tempOnosIp.append( main.ONOSip[i] )
+
+ main.ONOSbench.createCellFile( main.ONOSbench.ip_address,
+ "temp", main.Mininet1.ip_address,
+ main.apps, tempOnosIp )
+
+ main.step( "Apply cell to environment" )
+ cellResult = main.ONOSbench.setCell( "temp" )
+ verifyResult = main.ONOSbench.verifyCell()
+ stepResult = cellResult and verifyResult
+ utilities.assert_equals( expect=main.TRUE,
+ actual=stepResult,
+ onpass="Successfully applied cell to " + \
+ "environment",
+ onfail="Failed to apply cell to environment " )
+
+ main.step( "Creating ONOS package" )
+ packageResult = main.ONOSbench.onosPackage()
+ stepResult = packageResult
+ utilities.assert_equals( expect=main.TRUE,
+ actual=stepResult,
+ onpass="Successfully created ONOS package",
+ onfail="Failed to create ONOS package" )
+
+ time.sleep( main.startUpSleep )
+ main.step( "Installing ONOS package" )
+ onosInstallResult = main.TRUE
+ for i in range( main.numCtrls ):
+ onosInstallResult = onosInstallResult and \
+ main.ONOSbench.onosInstall( node=main.ONOSip[ i ], options="" )
+ stepResult = onosInstallResult
+
+ utilities.assert_equals( expect=main.TRUE,
+ actual=stepResult,
+ onpass="Successfully installed ONOS package",
+ onfail="Failed to install ONOS package" )
+
+ time.sleep( main.startUpSleep )
+ main.step( "Starting ONOS service" )
+ stopResult = main.TRUE
+ startResult = main.TRUE
+ onosIsUp = main.TRUE
+
+ for i in range( main.numCtrls ):
+ onosIsUp = onosIsUp and main.ONOSbench.isup( main.ONOSip[ i ] )
+ if onosIsUp == main.TRUE:
+ main.log.report( "ONOS instance is up and ready" )
+ else:
+ main.log.report( "ONOS instance may not be up, stop and " +
+ "start ONOS again " )
+ for i in range( main.numCtrls ):
+ stopResult = stopResult and \
+ main.ONOSbench.onosStop( main.ONOSip[ i ] )
+ for i in range( main.numCtrls ):
+ startResult = startResult and \
+ main.ONOSbench.onosStart( main.ONOSip[ i ] )
+ stepResult = onosIsUp and stopResult and startResult
+ utilities.assert_equals( expect=main.TRUE,
+ actual=stepResult,
+ onpass="ONOS service is ready",
+ onfail="ONOS service did not start properly" )
+
+ # Start an ONOS cli to provide functionality that is not currently
+ # supported by the Rest API remove this when Leader Checking is supported
+ # by the REST API
+
+ main.step( "Start ONOS cli" )
+ cliResult = main.TRUE
+ for i in range( main.numCtrls ):
+ cliResult = cliResult and \
+ main.CLIs2[ i ].startOnosCli( main.ONOSip[ i ] )
+ stepResult = cliResult
+ utilities.assert_equals( expect=main.TRUE,
+ actual=stepResult,
+ onpass="Successfully start ONOS cli",
+ onfail="Failed to start ONOS cli" )
+
+ # Remove the first element in main.scale list
+ main.scale.remove( main.scale[ 0 ] )
+
+ def CASE100( self, main ):
+ """
+ Start NETCONF app and OFC-Server or make sure that they are already running
+ """
+ assert main, "There is no main"
+ assert main.CLIs, "There is no main.CLIs"
+ assert main.numCtrls, "Placed the total number of switch topology in \
+ main.numCtrls"
+
+ testResult = main.FALSE
+ main.testName = "Start up NETCONF app in all nodes"
+ main.case( main.testName + " Test - " + str( main.numCtrls ) +
+ " NODE(S)" )
+ main.step( "Starting NETCONF app" )
+ main.assertReturnString = "Assertion result for starting NETCONF app"
+ testResult = main.netconfFunction.startApp( main )
+
+ utilities.assert_equals( expect=main.TRUE,
+ actual=testResult,
+ onpass=main.assertReturnString,
+ onfail=main.assertReturnString )
+
+ main.step( "Starting OFC-Server" )
+ main.assertReturnString = "Assertion result for starting OFC-Server"
+ testResult = main.netconfFunction.startOFC( main )
+
+ utilities.assert_equals( expect=main.TRUE,
+ actual=testResult,
+ onpass=main.assertReturnString,
+ onfail=main.assertReturnString )
+ time.sleep( main.startUpSleep )
+
+ def CASE200( self, main ):
+ """
+ Create or modify a Configuration file
+ -The file is built from information loaded from the .params file
+ """
+ assert main, "There is no main"
+ assert main.CLIs, "There is no main.CLIs"
+ assert main.numCtrls, "Placed the total number of switch topology in \
+ main.numCtrls"
+
+ main.testName = "Assemble the configuration"
+ main.case( main.testName + " Test - " + str( main.numCtrls ) +
+ " NODES(S)" )
+ main.step( "Assembling configuration file" )
+ main.assertReturnString = "Assertion result for assembling configuration file"
+ testResult = main.FALSE
+ testResult = main.netconfFunction.createConfig( main )
+
+ utilities.assert_equals( expect=main.TRUE,
+ actual=testResult,
+ onpass=main.assertReturnString,
+ onfail=main.assertReturnString )
+ time.sleep( main.startUpSleep )
+
+ def CASE300( self, main ):
+ """
+ Push a configuration and bring up a switch
+ """
+ assert main, "There is no main"
+ assert main.CLIs, "There is no main.CLIs"
+ assert main.numCtrls, "Placed the total number of switch topology in \
+ main.numCtrls"
+
+ main.testName = "Uploading the configuration"
+ main.case( main.testName + " Test - " + str( main.numCtrls ) +
+ " NODES(S)" )
+ main.step( "Sending the configuration file")
+ main.assertReturnString = "Assertion result for sending the configuration file"
+ testResult = main.FALSE
+
+ testResult = main.netconfFunction.sendConfig( main )
+
+ utilities.assert_equals( expect=main.TRUE,
+ actual=testResult,
+ onpass=main.assertReturnString,
+ onfail=main.assertReturnString )
+
+ time.sleep( main.switchSleep )
+
+ main.step( "Confirming the device was configured" )
+ main.assertReturnString = "Assertion result for confirming a configuration."
+ testResult = main.FALSE
+
+ testResult = main.netconfFunction.devices( main )
+
+ utilities.assert_equals( expect=main.TRUE,
+ actual=testResult,
+ onpass=main.assertReturnString,
+ onfail=main.assertReturnString )
+
+ def CASE400( self, main ):
+ """
+ Bring down a switch
+ This test case is not yet possible, but the functionality needed to
+ perform it is planned to be added
+ There is a message that is sent "Device () has closed session"
+ when the device disconnects from onos for some reason.
+ Because of the triggers for this message are not practical
+ to activate this will likely not be used to implement the test
+ case at this time
+ Possible ways to do this may include bringing down mininet then checking
+ ONOS to see if it was recongnized the device being disconnected
+ """
diff --git a/TestON/tests/FUNCnetconf/FUNCnetconf.topo b/TestON/tests/FUNCnetconf/FUNCnetconf.topo
new file mode 100644
index 0000000..a5a836f
--- /dev/null
+++ b/TestON/tests/FUNCnetconf/FUNCnetconf.topo
@@ -0,0 +1,87 @@
+<TOPOLOGY>
+ <COMPONENT>
+
+ <ONOSbench>
+ <host>localhost</host>
+ <user>admin</user>
+ <password></password>
+ <type>OnosDriver</type>
+ <connect_order>1</connect_order>
+ <COMPONENTS>
+ <nodes>3</nodes>
+ </COMPONENTS>
+ </ONOSbench>
+
+ <ONOSrest1>
+ <host>OC1</host>
+ <port>8181</port>
+ <user>onos</user>
+ <password>rocks</password>
+ <type>OnosRestDriver</type>
+ <connect_order>2</connect_order>
+ <COMPONENTS>
+ </COMPONENTS>
+ </ONOSrest1>
+
+ <ONOSrest2>
+ <host>OC2</host>
+ <port>8181</port>
+ <user>onos</user>
+ <password>rocks</password>
+ <type>OnosRestDriver</type>
+ <connect_order>3</connect_order>
+ <COMPONENTS>
+ </COMPONENTS>
+ </ONOSrest2>
+
+ <ONOSrest3>
+ <host>OC3</host>
+ <port>8181</port>
+ <user>onos</user>
+ <password>rocks</password>
+ <type>OnosRestDriver</type>
+ <connect_order>4</connect_order>
+ <COMPONENTS>
+ </COMPONENTS>
+ </ONOSrest3>
+
+ <ONOScli1>
+ <host>localhost</host>
+ <user>admin</user>
+ <password></password>
+ <type>OnosCliDriver</type>
+ <connect_order>5</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOScli1>
+
+ <ONOScli2>
+ <host>localhost</host>
+ <user>admin</user>
+ <password></password>
+ <type>OnosCliDriver</type>
+ <connect_order>6</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOScli2>
+
+ <ONOScli3>
+ <host>localhost</host>
+ <user>admin</user>
+ <password></password>
+ <type>OnosCliDriver</type>
+ <connect_order>7</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOScli3>
+
+ <Mininet1>
+ <host>OCN</host>
+ <user>admin</user>
+ <password></password>
+ <type>MininetCliDriver</type>
+ <connect_order>8</connect_order>
+ <COMPONENTS>
+ <home>~/mininet/</home>
+ </COMPONENTS>
+ </Mininet1>
+
+ </COMPONENT>
+</TOPOLOGY>
\ No newline at end of file
diff --git a/TestON/tests/FUNCnetconf/README b/TestON/tests/FUNCnetconf/README
new file mode 100644
index 0000000..dbf51c0
--- /dev/null
+++ b/TestON/tests/FUNCnetconf/README
@@ -0,0 +1,29 @@
+Summary:
+ This test suite consists of generic NETCONF protocol testing within ONOS
+ using OF-Config to translate NETCONF requests for OVSDB.
+ The following is an overview of how the NETCONF protocol is tested
+ Steps:
+ - Start NETCONF App in all currently running nodes
+ - Start OF-Config server on device to be configured
+ - Create configuration file
+ - Upload the configuration file to the device to be configured
+ - Verify that the device was configured successfully
+
+Required:
+ Make sure that OF-Config, https://github.com/openvswitch/of-config, is
+ installed on the device that is to be configured, the test assumes this
+ device is the machine running TestON.
+ Ensure that <cfgName> and <cfgPass> in the params file are the username
+ and password required to ssh into the desired machine and account that
+ of-config is to be run on.
+ The netconfConfig.json file contains the configuration that was
+ generated by the test. The test also relies on the existence of this
+ file and will missbehave if it is removed entirely. The contents of the
+ file are overwritten everytime the test suite runs through Test Case 200
+
+TODO:
+Extend configuration to allow for specification of
+ - Vendor name
+ - Hardware version
+ - Software version
+ - Serial Number
\ No newline at end of file
diff --git a/TestON/tests/FUNCnetconf/__init__.py b/TestON/tests/FUNCnetconf/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/FUNCnetconf/__init__.py
diff --git a/TestON/tests/HAclusterRestart/HAclusterRestart.params b/TestON/tests/HAclusterRestart/HAclusterRestart.params
index e28c0f7..0923f8a 100644
--- a/TestON/tests/HAclusterRestart/HAclusterRestart.params
+++ b/TestON/tests/HAclusterRestart/HAclusterRestart.params
@@ -18,9 +18,7 @@
#CASE16: Install Distributed Primitives app
#CASE17: Check for basic functionality with distributed primitives
<testcases>1,2,8,3,8,4,5,14,16,17,[6],8,3,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
- <imports>
- <path> /home/admin/OnosSystemTest/TestON/tests/HAclusterRestart/dependencies/ </path>
- </imports>
+
<ENV>
<cellName>HA</cellName>
<appString>drivers,openflow,proxyarp,mobility</appString>
diff --git a/TestON/tests/HAclusterRestart/HAclusterRestart.py b/TestON/tests/HAclusterRestart/HAclusterRestart.py
index f903f95..7b1acd4 100644
--- a/TestON/tests/HAclusterRestart/HAclusterRestart.py
+++ b/TestON/tests/HAclusterRestart/HAclusterRestart.py
@@ -49,13 +49,13 @@
"""
import imp
import time
+ import json
main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
"initialization" )
main.case( "Setting up test environment" )
main.caseExplanation = "Setup the test environment including " +\
"installing ONOS, starting Mininet and ONOS" +\
"cli sessions."
- # TODO: save all the timers and output them for plotting
# load some variables from the params file
PULLCODE = False
@@ -93,10 +93,8 @@
ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
try:
- fileName = "Counters"
- path = main.params[ 'imports' ][ 'path' ]
- main.Counters = imp.load_source( fileName,
- path + fileName + ".py" )
+ from tests.HAsanity.dependencies.Counters import Counters
+ main.Counters = Counters()
except Exception as e:
main.log.exception( e )
main.cleanup()
@@ -241,6 +239,9 @@
onpass="ONOS cli startup successful",
onfail="ONOS cli startup failed" )
+ # Create a list of active nodes for use when some nodes are stopped
+ main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
+
if main.params[ 'tcpdump' ].lower() == "true":
main.step( "Start Packet Capture MN" )
main.Mininet2.startTcpdump(
@@ -250,10 +251,9 @@
port=main.params[ 'MNtcpdump' ][ 'port' ] )
main.step( "App Ids check" )
- time.sleep(60)
appCheck = main.TRUE
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].appToIDCheck,
name="appToIDCheck-" + str( i ),
args=[] )
@@ -264,12 +264,55 @@
t.join()
appCheck = appCheck and t.result
if appCheck != main.TRUE:
- main.log.warn( main.CLIs[0].apps() )
- main.log.warn( main.CLIs[0].appIDs() )
+ node = main.activeNodes[0]
+ main.log.warn( main.CLIs[node].apps() )
+ main.log.warn( main.CLIs[node].appIDs() )
utilities.assert_equals( expect=main.TRUE, actual=appCheck,
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
+ main.step( "Checking ONOS nodes" )
+ nodesOutput = []
+ nodeResults = main.TRUE
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].nodes,
+ name="nodes-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ nodesOutput.append( t.result )
+ ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
+ ips.sort()
+ for i in nodesOutput:
+ try:
+ current = json.loads( i )
+ activeIps = []
+ currentResult = main.FALSE
+ for node in current:
+ if node['state'] == 'READY':
+ activeIps.append( node['ip'] )
+ activeIps.sort()
+ if ips == activeIps:
+ currentResult = main.TRUE
+ except ( ValueError, TypeError ):
+ main.log.error( "Error parsing nodes output" )
+ main.log.warn( repr( i ) )
+ currentResult = main.FALSE
+ nodeResults = nodeResults and currentResult
+ utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
+ onpass="Nodes check successful",
+ onfail="Nodes check NOT successful" )
+
+ if not nodeResults:
+ for cli in main.CLIs:
+ main.log.debug( "{} components not ACTIVE: \n{}".format(
+ cli.name,
+ cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
+
if cliResults == main.FALSE:
main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
@@ -357,6 +400,7 @@
ipList = [ ]
deviceList = []
+ onosCli = main.CLIs[ main.activeNodes[0] ]
try:
# Assign mastership to specific controllers. This assignment was
# determined for a 7 node cluser, but will work with any sized
@@ -366,45 +410,45 @@
if i == 1:
c = 0
ip = main.nodes[ c ].ip_address # ONOS1
- deviceId = main.ONOScli1.getDevice( "1000" ).get( 'id' )
+ deviceId = onosCli.getDevice( "1000" ).get( 'id' )
elif i == 2:
c = 1 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS2
- deviceId = main.ONOScli1.getDevice( "2000" ).get( 'id' )
+ deviceId = onosCli.getDevice( "2000" ).get( 'id' )
elif i == 3:
c = 1 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS2
- deviceId = main.ONOScli1.getDevice( "3000" ).get( 'id' )
+ deviceId = onosCli.getDevice( "3000" ).get( 'id' )
elif i == 4:
c = 3 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS4
- deviceId = main.ONOScli1.getDevice( "3004" ).get( 'id' )
+ deviceId = onosCli.getDevice( "3004" ).get( 'id' )
elif i == 5:
c = 2 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS3
- deviceId = main.ONOScli1.getDevice( "5000" ).get( 'id' )
+ deviceId = onosCli.getDevice( "5000" ).get( 'id' )
elif i == 6:
c = 2 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS3
- deviceId = main.ONOScli1.getDevice( "6000" ).get( 'id' )
+ deviceId = onosCli.getDevice( "6000" ).get( 'id' )
elif i == 7:
c = 5 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS6
- deviceId = main.ONOScli1.getDevice( "6007" ).get( 'id' )
+ deviceId = onosCli.getDevice( "6007" ).get( 'id' )
elif i >= 8 and i <= 17:
c = 4 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS5
dpid = '3' + str( i ).zfill( 3 )
- deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
+ deviceId = onosCli.getDevice( dpid ).get( 'id' )
elif i >= 18 and i <= 27:
c = 6 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS7
dpid = '6' + str( i ).zfill( 3 )
- deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
+ deviceId = onosCli.getDevice( dpid ).get( 'id' )
elif i == 28:
c = 0
ip = main.nodes[ c ].ip_address # ONOS1
- deviceId = main.ONOScli1.getDevice( "2800" ).get( 'id' )
+ deviceId = onosCli.getDevice( "2800" ).get( 'id' )
else:
main.log.error( "You didn't write an else statement for " +
"switch s" + str( i ) )
@@ -412,13 +456,12 @@
# Assign switch
assert deviceId, "No device id for s" + str( i ) + " in ONOS"
# TODO: make this controller dynamic
- roleCall = roleCall and main.ONOScli1.deviceRole( deviceId,
- ip )
+ roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
ipList.append( ip )
deviceList.append( deviceId )
except ( AttributeError, AssertionError ):
main.log.exception( "Something is wrong with ONOS device view" )
- main.log.info( main.ONOScli1.devices() )
+ main.log.info( onosCli.devices() )
utilities.assert_equals(
expect=main.TRUE,
actual=roleCall,
@@ -434,7 +477,7 @@
ip = ipList[i]
deviceId = deviceList[i]
# Check assignment
- master = main.ONOScli1.getRole( deviceId ).get( 'master' )
+ master = onosCli.getRole( deviceId ).get( 'master' )
if ip in master:
roleCheck = roleCheck and main.TRUE
else:
@@ -482,7 +525,8 @@
# install onos-app-fwd
main.step( "Install reactive forwarding app" )
- installResults = main.CLIs[0].activateApp( "org.onosproject.fwd" )
+ onosCli = main.CLIs[ main.activeNodes[0] ]
+ installResults = onosCli.activateApp( "org.onosproject.fwd" )
utilities.assert_equals( expect=main.TRUE, actual=installResults,
onpass="Install fwd successful",
onfail="Install fwd failed" )
@@ -490,7 +534,7 @@
main.step( "Check app ids" )
appCheck = main.TRUE
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].appToIDCheck,
name="appToIDCheck-" + str( i ),
args=[] )
@@ -501,8 +545,8 @@
t.join()
appCheck = appCheck and t.result
if appCheck != main.TRUE:
- main.log.warn( main.CLIs[0].apps() )
- main.log.warn( main.CLIs[0].appIDs() )
+ main.log.warn( onosCli.apps() )
+ main.log.warn( onosCli.appIDs() )
utilities.assert_equals( expect=main.TRUE, actual=appCheck,
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
@@ -531,7 +575,8 @@
time.sleep( 11 )
# uninstall onos-app-fwd
main.step( "Uninstall reactive forwarding app" )
- uninstallResult = main.CLIs[0].deactivateApp( "org.onosproject.fwd" )
+ node = main.activeNodes[0]
+ uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
onpass="Uninstall fwd successful",
onfail="Uninstall fwd failed" )
@@ -539,7 +584,7 @@
main.step( "Check app ids" )
threads = []
appCheck2 = main.TRUE
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].appToIDCheck,
name="appToIDCheck-" + str( i ),
args=[] )
@@ -550,8 +595,9 @@
t.join()
appCheck2 = appCheck2 and t.result
if appCheck2 != main.TRUE:
- main.log.warn( main.CLIs[0].apps() )
- main.log.warn( main.CLIs[0].appIDs() )
+ node = main.activeNodes[0]
+ main.log.warn( main.CLIs[node].apps() )
+ main.log.warn( main.CLIs[node].appIDs() )
utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
@@ -570,16 +616,17 @@
host2 = "00:00:00:00:00:" + \
str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
# NOTE: getHost can return None
- host1Dict = main.ONOScli1.getHost( host1 )
- host2Dict = main.ONOScli1.getHost( host2 )
+ host1Dict = onosCli.getHost( host1 )
+ host2Dict = onosCli.getHost( host2 )
host1Id = None
host2Id = None
if host1Dict and host2Dict:
host1Id = host1Dict.get( 'id', None )
host2Id = host2Dict.get( 'id', None )
if host1Id and host2Id:
- nodeNum = ( i % main.numCtrls )
- tmpId = main.CLIs[ nodeNum ].addHostIntent( host1Id, host2Id )
+ nodeNum = ( i % len( main.activeNodes ) )
+ node = main.activeNodes[nodeNum]
+ tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
if tmpId:
main.log.info( "Added intent with id: " + tmpId )
intentIds.append( tmpId )
@@ -589,7 +636,8 @@
else:
main.log.error( "Error, getHost() failed for h" + str( i ) +
" and/or h" + str( i + 10 ) )
- hosts = main.CLIs[ 0 ].hosts()
+ node = main.activeNodes[0]
+ hosts = main.CLIs[node].hosts()
main.log.warn( "Hosts output: " )
try:
main.log.warn( json.dumps( json.loads( hosts ),
@@ -604,7 +652,7 @@
onfail="Error looking up host ids" )
intentStart = time.time()
- onosIds = main.ONOScli1.getAllIntentsId()
+ onosIds = onosCli.getAllIntentsId()
main.log.info( "Submitted intents: " + str( intentIds ) )
main.log.info( "Intents in ONOS: " + str( onosIds ) )
for intent in intentIds:
@@ -617,7 +665,7 @@
else:
intentStop = None
# Print the intent states
- intents = main.ONOScli1.intents()
+ intents = onosCli.intents()
intentStates = []
installedCheck = True
main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
@@ -643,7 +691,7 @@
count += 1
main.log.info( "%-6s%-15s%-15s" %
( str( count ), str( i ), str( s ) ) )
- leaders = main.ONOScli1.leaders()
+ leaders = onosCli.leaders()
try:
missing = False
if leaders:
@@ -670,12 +718,12 @@
main.log.error( repr( leaders ) )
# Check all nodes
if missing:
- for node in main.CLIs:
- response = node.leaders( jsonFormat=False)
- main.log.warn( str( node.name ) + " leaders output: \n" +
+ for i in main.activeNodes:
+ response = main.CLIs[i].leaders( jsonFormat=False)
+ main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
str( response ) )
- partitions = main.ONOScli1.partitions()
+ partitions = onosCli.partitions()
try:
if partitions :
parsedPartitions = json.loads( partitions )
@@ -690,7 +738,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing partitions" )
main.log.error( repr( partitions ) )
- pendingMap = main.ONOScli1.pendingMap()
+ pendingMap = onosCli.pendingMap()
try:
if pendingMap :
parsedPending = json.loads( pendingMap )
@@ -711,21 +759,21 @@
main.log.error( "Error in pushing host intents to ONOS" )
main.step( "Intent Anti-Entropy dispersion" )
- for i in range(100):
+ for j in range(100):
correct = True
main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
- for cli in main.CLIs:
+ for i in main.activeNodes:
onosIds = []
- ids = cli.getAllIntentsId()
+ ids = main.CLIs[i].getAllIntentsId()
onosIds.append( ids )
- main.log.debug( "Intents in " + cli.name + ": " +
+ main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
str( sorted( onosIds ) ) )
if sorted( ids ) != sorted( intentIds ):
main.log.warn( "Set of intent IDs doesn't match" )
correct = False
break
else:
- intents = json.loads( cli.intents() )
+ intents = json.loads( main.CLIs[i].intents() )
for intent in intents:
if intent[ 'state' ] != "INSTALLED":
main.log.warn( "Intent " + intent[ 'id' ] +
@@ -754,7 +802,7 @@
else:
count += 1
gossipPeriod = int( main.params['timers']['gossip'] )
- maxGossipTime = gossipPeriod * len( main.nodes )
+ maxGossipTime = gossipPeriod * len( main.activeNodes )
utilities.assert_greater_equals(
expect=maxGossipTime, actual=gossipTime,
onpass="ECM anti-entropy for intents worked within " +
@@ -770,11 +818,11 @@
installedCheck = True
main.log.info( "Sleeping 60 seconds to see if intents are found" )
time.sleep( 60 )
- onosIds = main.ONOScli1.getAllIntentsId()
+ onosIds = onosCli.getAllIntentsId()
main.log.info( "Submitted intents: " + str( intentIds ) )
main.log.info( "Intents in ONOS: " + str( onosIds ) )
# Print the intent states
- intents = main.ONOScli1.intents()
+ intents = onosCli.intents()
intentStates = []
main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
count = 0
@@ -798,7 +846,7 @@
count += 1
main.log.info( "%-6s%-15s%-15s" %
( str( count ), str( i ), str( s ) ) )
- leaders = main.ONOScli1.leaders()
+ leaders = onosCli.leaders()
try:
missing = False
if leaders:
@@ -828,12 +876,13 @@
main.log.error( repr( leaders ) )
# Check all nodes
if missing:
- for node in main.CLIs:
+ for i in main.activeNodes:
+ node = main.CLIs[i]
response = node.leaders( jsonFormat=False)
main.log.warn( str( node.name ) + " leaders output: \n" +
str( response ) )
- partitions = main.ONOScli1.partitions()
+ partitions = onosCli.partitions()
try:
if partitions :
parsedPartitions = json.loads( partitions )
@@ -848,7 +897,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing partitions" )
main.log.error( repr( partitions ) )
- pendingMap = main.ONOScli1.pendingMap()
+ pendingMap = onosCli.pendingMap()
try:
if pendingMap :
parsedPending = json.loads( pendingMap )
@@ -879,6 +928,7 @@
"functionality and check the state of " +\
"the intent"
main.step( "Ping across added host intents" )
+ onosCli = main.CLIs[ main.activeNodes[0] ]
PingResult = main.TRUE
for i in range( 8, 18 ):
ping = main.Mininet1.pingHost( src="h" + str( i ),
@@ -896,7 +946,7 @@
# TODO: pretty print
main.log.warn( "ONOS1 intents: " )
try:
- tmpIntents = main.ONOScli1.intents()
+ tmpIntents = onosCli.intents()
main.log.warn( json.dumps( json.loads( tmpIntents ),
sort_keys=True,
indent=4,
@@ -915,7 +965,7 @@
while not installedCheck and loopCount < 40:
installedCheck = True
# Print the intent states
- intents = main.ONOScli1.intents()
+ intents = onosCli.intents()
intentStates = []
main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
count = 0
@@ -944,7 +994,7 @@
"INSTALLED state" )
main.step( "Check leadership of topics" )
- leaders = main.ONOScli1.leaders()
+ leaders = onosCli.leaders()
topicCheck = main.TRUE
try:
if leaders:
@@ -979,7 +1029,8 @@
# TODO: Check for a leader of these topics
# Check all nodes
if topicCheck:
- for node in main.CLIs:
+ for i in main.activeNodes:
+ node = main.CLIs[i]
response = node.leaders( jsonFormat=False)
main.log.warn( str( node.name ) + " leaders output: \n" +
str( response ) )
@@ -988,7 +1039,7 @@
onpass="intent Partitions is in leaders",
onfail="Some topics were lost " )
# Print partitions
- partitions = main.ONOScli1.partitions()
+ partitions = onosCli.partitions()
try:
if partitions :
parsedPartitions = json.loads( partitions )
@@ -1004,7 +1055,7 @@
main.log.exception( "Error parsing partitions" )
main.log.error( repr( partitions ) )
# Print Pending Map
- pendingMap = main.ONOScli1.pendingMap()
+ pendingMap = onosCli.pendingMap()
try:
if pendingMap :
parsedPending = json.loads( pendingMap )
@@ -1024,7 +1075,7 @@
"intents change" )
time.sleep( 60 )
# Print the intent states
- intents = main.ONOScli1.intents()
+ intents = onosCli.intents()
intentStates = []
main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
count = 0
@@ -1043,7 +1094,7 @@
count += 1
main.log.info( "%-6s%-15s%-15s" %
( str( count ), str( i ), str( s ) ) )
- leaders = main.ONOScli1.leaders()
+ leaders = onosCli.leaders()
try:
missing = False
if leaders:
@@ -1072,12 +1123,13 @@
main.log.exception( "Error parsing leaders" )
main.log.error( repr( leaders ) )
if missing:
- for node in main.CLIs:
+ for i in main.activeNodes:
+ node = main.CLIs[i]
response = node.leaders( jsonFormat=False)
main.log.warn( str( node.name ) + " leaders output: \n" +
str( response ) )
- partitions = main.ONOScli1.partitions()
+ partitions = onosCli.partitions()
try:
if partitions :
parsedPartitions = json.loads( partitions )
@@ -1092,7 +1144,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing partitions" )
main.log.error( repr( partitions ) )
- pendingMap = main.ONOScli1.pendingMap()
+ pendingMap = onosCli.pendingMap()
try:
if pendingMap :
parsedPending = json.loads( pendingMap )
@@ -1107,7 +1159,8 @@
main.log.exception( "Error parsing pending map" )
main.log.error( repr( pendingMap ) )
# Print flowrules
- main.log.debug( main.CLIs[0].flows( jsonFormat=False ) )
+ node = main.activeNodes[0]
+ main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
main.step( "Wait a minute then ping again" )
# the wait is above
PingResult = main.TRUE
@@ -1127,7 +1180,7 @@
# TODO: pretty print
main.log.warn( "ONOS1 intents: " )
try:
- tmpIntents = main.ONOScli1.intents()
+ tmpIntents = onosCli.intents()
main.log.warn( json.dumps( json.loads( tmpIntents ),
sort_keys=True,
indent=4,
@@ -1164,7 +1217,7 @@
# Assert that each device has a master
rolesNotNull = main.TRUE
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].rolesNotNull,
name="rolesNotNull-" + str( i ),
args=[] )
@@ -1186,7 +1239,7 @@
consistentMastership = True
rolesResults = True
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].roles,
name="roles-" + str( i ),
args=[] )
@@ -1197,13 +1250,12 @@
t.join()
ONOSMastership.append( t.result )
- for i in range( main.numCtrls ):
+ for i in range( len( ONOSMastership ) ):
+ node = str( main.activeNodes[i] + 1 )
if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
- main.log.error( "Error in getting ONOS" + str( i + 1 ) +
- " roles" )
- main.log.warn(
- "ONOS" + str( i + 1 ) + " mastership response: " +
- repr( ONOSMastership[i] ) )
+ main.log.error( "Error in getting ONOS" + node + " roles" )
+ main.log.warn( "ONOS" + node + " mastership response: " +
+ repr( ONOSMastership[i] ) )
rolesResults = False
utilities.assert_equals(
expect=True,
@@ -1224,10 +1276,11 @@
onfail="ONOS nodes have different views of switch roles" )
if rolesResults and not consistentMastership:
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
try:
main.log.warn(
- "ONOS" + str( i + 1 ) + " roles: ",
+ "ONOS" + node + " roles: ",
json.dumps(
json.loads( ONOSMastership[ i ] ),
sort_keys=True,
@@ -1247,7 +1300,7 @@
consistentIntents = True
intentsResults = True
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].intents,
name="intents-" + str( i ),
args=[],
@@ -1259,8 +1312,8 @@
t.join()
ONOSIntents.append( t.result )
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( ONOSIntents ) ):
+ node = str( main.activeNodes[i] + 1 )
if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
main.log.error( "Error in getting ONOS" + node + " intents" )
main.log.warn( "ONOS" + node + " intents response: " +
@@ -1293,44 +1346,51 @@
# ... ... ...
# ... ... ...
title = " Id"
- for n in range( main.numCtrls ):
+ for n in main.activeNodes:
title += " " * 10 + "ONOS" + str( n + 1 )
main.log.warn( title )
# get all intent keys in the cluster
keys = []
- for nodeStr in ONOSIntents:
- node = json.loads( nodeStr )
- for intent in node:
- keys.append( intent.get( 'id' ) )
- keys = set( keys )
- for key in keys:
- row = "%-13s" % key
+ try:
+ # Get the set of all intent keys
for nodeStr in ONOSIntents:
node = json.loads( nodeStr )
for intent in node:
- if intent.get( 'id', "Error" ) == key:
- row += "%-15s" % intent.get( 'state' )
- main.log.warn( row )
- # End table view
+ keys.append( intent.get( 'id' ) )
+ keys = set( keys )
+ # For each intent key, print the state on each node
+ for key in keys:
+ row = "%-13s" % key
+ for nodeStr in ONOSIntents:
+ node = json.loads( nodeStr )
+ for intent in node:
+ if intent.get( 'id', "Error" ) == key:
+ row += "%-15s" % intent.get( 'state' )
+ main.log.warn( row )
+ # End of intent state table
+ except ValueError as e:
+ main.log.exception( e )
+ main.log.debug( "nodeStr was: " + repr( nodeStr ) )
if intentsResults and not consistentIntents:
# print the json objects
- n = len(ONOSIntents)
- main.log.debug( "ONOS" + str( n ) + " intents: " )
+ n = str( main.activeNodes[-1] + 1 )
+ main.log.debug( "ONOS" + n + " intents: " )
main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
sort_keys=True,
indent=4,
separators=( ',', ': ' ) ) )
- for i in range( main.numCtrls ):
+ for i in range( len( ONOSIntents ) ):
+ node = str( main.activeNodes[i] + 1 )
if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
- main.log.debug( "ONOS" + str( i + 1 ) + " intents: " )
+ main.log.debug( "ONOS" + node + " intents: " )
main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
sort_keys=True,
indent=4,
separators=( ',', ': ' ) ) )
else:
- main.log.debug( main.nodes[ i ].name + " intents match ONOS" +
- str( n ) + " intents" )
+ main.log.debug( "ONOS" + node + " intents match ONOS" +
+ n + " intents" )
elif intentsResults and consistentIntents:
intentCheck = main.TRUE
intentState = ONOSIntents[ 0 ]
@@ -1344,7 +1404,7 @@
consistentFlows = True
flowsResults = True
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].flows,
name="flows-" + str( i ),
args=[],
@@ -1359,8 +1419,8 @@
result = t.result
ONOSFlows.append( result )
- for i in range( main.numCtrls ):
- num = str( i + 1 )
+ for i in range( len( ONOSFlows ) ):
+ num = str( main.activeNodes[i] + 1 )
if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
main.log.error( "Error in getting ONOS" + num + " flows" )
main.log.warn( "ONOS" + num + " flows response: " +
@@ -1396,16 +1456,16 @@
onfail="ONOS nodes have different flow counts" )
if flowsResults and not consistentFlows:
- for i in range( main.numCtrls ):
+ for i in range( len( ONOSFlows ) ):
+ node = str( main.activeNodes[i] + 1 )
try:
main.log.warn(
- "ONOS" + str( i + 1 ) + " flows: " +
+ "ONOS" + node + " flows: " +
json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
indent=4, separators=( ',', ': ' ) ) )
except ( ValueError, TypeError ):
- main.log.warn(
- "ONOS" + str( i + 1 ) + " flows: " +
- repr( ONOSFlows[ i ] ) )
+ main.log.warn( "ONOS" + node + " flows: " +
+ repr( ONOSFlows[ i ] ) )
elif flowsResults and consistentFlows:
flowCheck = main.TRUE
flowState = ONOSFlows[ 0 ]
@@ -1414,7 +1474,7 @@
global flows
flows = []
for i in range( 1, 29 ):
- flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3" ) )
+ flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
if flowCheck == main.FALSE:
for table in flows:
main.log.warn( table )
@@ -1465,7 +1525,7 @@
main.step( "Collecting topology information from ONOS" )
devices = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].devices,
name="devices-" + str( i ),
args=[ ] )
@@ -1477,7 +1537,7 @@
devices.append( t.result )
hosts = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].hosts,
name="hosts-" + str( i ),
args=[ ] )
@@ -1497,7 +1557,7 @@
ports = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].ports,
name="ports-" + str( i ),
args=[ ] )
@@ -1509,7 +1569,7 @@
ports.append( t.result )
links = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].links,
name="links-" + str( i ),
args=[ ] )
@@ -1521,7 +1581,7 @@
links.append( t.result )
clusters = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].clusters,
name="clusters-" + str( i ),
args=[ ] )
@@ -1537,7 +1597,7 @@
main.step( "Host view is consistent across ONOS nodes" )
consistentHostsResult = main.TRUE
for controller in range( len( hosts ) ):
- controllerStr = str( controller + 1 )
+ controllerStr = str( main.activeNodes[controller] + 1 )
if hosts[ controller ] and "Error" not in hosts[ controller ]:
if hosts[ controller ] == hosts[ 0 ]:
continue
@@ -1564,7 +1624,7 @@
main.step( "Each host has an IP address" )
ipResult = main.TRUE
for controller in range( 0, len( hosts ) ):
- controllerStr = str( controller + 1 )
+ controllerStr = str( main.activeNodes[controller] + 1 )
if hosts[ controller ]:
for host in hosts[ controller ]:
if not host.get( 'ipAddresses', [ ] ):
@@ -1581,7 +1641,7 @@
main.step( "Cluster view is consistent across ONOS nodes" )
consistentClustersResult = main.TRUE
for controller in range( len( clusters ) ):
- controllerStr = str( controller + 1 )
+ controllerStr = str( main.activeNodes[controller] + 1 )
if "Error" not in clusters[ controller ]:
if clusters[ controller ] == clusters[ 0 ]:
continue
@@ -1626,8 +1686,8 @@
mnSwitches = main.Mininet1.getSwitches()
mnLinks = main.Mininet1.getLinks()
mnHosts = main.Mininet1.getHosts()
- for controller in range( main.numCtrls ):
- controllerStr = str( controller + 1 )
+ for controller in main.activeNodes:
+ controllerStr = str( main.activeNodes[controller] + 1 )
if devices[ controller ] and ports[ controller ] and\
"Error" not in devices[ controller ] and\
"Error" not in ports[ controller ]:
@@ -1792,11 +1852,10 @@
labels.append( "Restart" )
data.append( str( main.restartTime ) )
- # FIXME: revisit test plan for election with madan
# Rerun for election on restarted nodes
runResults = main.TRUE
for cli in main.CLIs:
- run = main.CLIs[0].electionTestRun()
+ run = cli.electionTestRun()
if run != main.TRUE:
main.log.error( "Error running for election on " + cli.name )
runResults = runResults and run
@@ -1806,9 +1865,10 @@
# TODO: Make this configurable
time.sleep( 60 )
- main.log.debug( main.CLIs[0].nodes( jsonFormat=False ) )
- main.log.debug( main.CLIs[0].leaders( jsonFormat=False ) )
- main.log.debug( main.CLIs[0].partitions( jsonFormat=False ) )
+ node = main.activeNodes[0]
+ main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
+ main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
+ main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
def CASE7( self, main ):
"""
@@ -1826,7 +1886,7 @@
# Assert that each device has a master
rolesNotNull = main.TRUE
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].rolesNotNull,
name="rolesNotNull-" + str( i ),
args=[ ] )
@@ -1848,7 +1908,7 @@
consistentMastership = True
rolesResults = True
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].roles,
name="roles-" + str( i ),
args=[] )
@@ -1859,13 +1919,12 @@
t.join()
ONOSMastership.append( t.result )
- for i in range( main.numCtrls ):
+ for i in range( len( ONOSMastership ) ):
+ node = str( main.activeNodes[i] + 1 )
if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
- main.log.error( "Error in getting ONOS" + str( i + 1 ) +
- " roles" )
- main.log.warn(
- "ONOS" + str( i + 1 ) + " mastership response: " +
- repr( ONOSMastership[i] ) )
+ main.log.error( "Error in getting ONOS" + node + " roles" )
+ main.log.warn( "ONOS" + node + " mastership response: " +
+ repr( ONOSMastership[i] ) )
rolesResults = False
utilities.assert_equals(
expect=True,
@@ -1886,8 +1945,9 @@
onfail="ONOS nodes have different views of switch roles" )
if rolesResults and not consistentMastership:
- for i in range( main.numCtrls ):
- main.log.warn( "ONOS" + str( i + 1 ) + " roles: ",
+ for i in range( len( ONOSMastership ) ):
+ node = str( main.activeNodes[i] + 1 )
+ main.log.warn( "ONOS" + node + " roles: ",
json.dumps( json.loads( ONOSMastership[ i ] ),
sort_keys=True,
indent=4,
@@ -1903,7 +1963,7 @@
consistentIntents = True
intentsResults = True
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].intents,
name="intents-" + str( i ),
args=[],
@@ -1915,11 +1975,11 @@
t.join()
ONOSIntents.append( t.result )
- for i in range( main.numCtrls ):
+ for i in range( len( ONOSIntents) ):
+ node = str( main.activeNodes[i] + 1 )
if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
- main.log.error( "Error in getting ONOS" + str( i + 1 ) +
- " intents" )
- main.log.warn( "ONOS" + str( i + 1 ) + " intents response: " +
+ main.log.error( "Error in getting ONOS" + node + " intents" )
+ main.log.warn( "ONOS" + node + " intents response: " +
repr( ONOSIntents[ i ] ) )
intentsResults = False
utilities.assert_equals(
@@ -1942,7 +2002,7 @@
# ... ... ...
# ... ... ...
title = " ID"
- for n in range( main.numCtrls ):
+ for n in main.activeNodes:
title += " " * 10 + "ONOS" + str( n + 1 )
main.log.warn( title )
# get all intent keys in the cluster
@@ -1982,8 +2042,9 @@
main.log.info( dict( out ) )
if intentsResults and not consistentIntents:
- for i in range( main.numCtrls ):
- main.log.warn( "ONOS" + str( i + 1 ) + " intents: " )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ main.log.warn( "ONOS" + node + " intents: " )
main.log.warn( json.dumps(
json.loads( ONOSIntents[ i ] ),
sort_keys=True,
@@ -1999,45 +2060,50 @@
# NOTE: this requires case 5 to pass for intentState to be set.
# maybe we should stop the test if that fails?
sameIntents = main.FALSE
- if intentState and intentState == ONOSIntents[ 0 ]:
- sameIntents = main.TRUE
- main.log.info( "Intents are consistent with before failure" )
- # TODO: possibly the states have changed? we may need to figure out
- # what the acceptable states are
- elif len( intentState ) == len( ONOSIntents[ 0 ] ):
- sameIntents = main.TRUE
- try:
- before = json.loads( intentState )
- after = json.loads( ONOSIntents[ 0 ] )
- for intent in before:
- if intent not in after:
- sameIntents = main.FALSE
- main.log.debug( "Intent is not currently in ONOS " +
- "(at least in the same form):" )
- main.log.debug( json.dumps( intent ) )
- except ( ValueError, TypeError ):
- main.log.exception( "Exception printing intents" )
- main.log.debug( repr( ONOSIntents[0] ) )
- main.log.debug( repr( intentState ) )
- if sameIntents == main.FALSE:
- try:
- main.log.debug( "ONOS intents before: " )
- main.log.debug( json.dumps( json.loads( intentState ),
- sort_keys=True, indent=4,
- separators=( ',', ': ' ) ) )
- main.log.debug( "Current ONOS intents: " )
- main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
- sort_keys=True, indent=4,
- separators=( ',', ': ' ) ) )
- except ( ValueError, TypeError ):
- main.log.exception( "Exception printing intents" )
- main.log.debug( repr( ONOSIntents[0] ) )
- main.log.debug( repr( intentState ) )
- utilities.assert_equals(
- expect=main.TRUE,
- actual=sameIntents,
- onpass="Intents are consistent with before failure",
- onfail="The Intents changed during failure" )
+ try:
+ intentState
+ except NameError:
+ main.log.warn( "No previous intent state was saved" )
+ else:
+ if intentState and intentState == ONOSIntents[ 0 ]:
+ sameIntents = main.TRUE
+ main.log.info( "Intents are consistent with before failure" )
+ # TODO: possibly the states have changed? we may need to figure out
+ # what the acceptable states are
+ elif len( intentState ) == len( ONOSIntents[ 0 ] ):
+ sameIntents = main.TRUE
+ try:
+ before = json.loads( intentState )
+ after = json.loads( ONOSIntents[ 0 ] )
+ for intent in before:
+ if intent not in after:
+ sameIntents = main.FALSE
+ main.log.debug( "Intent is not currently in ONOS " +
+ "(at least in the same form):" )
+ main.log.debug( json.dumps( intent ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Exception printing intents" )
+ main.log.debug( repr( ONOSIntents[0] ) )
+ main.log.debug( repr( intentState ) )
+ if sameIntents == main.FALSE:
+ try:
+ main.log.debug( "ONOS intents before: " )
+ main.log.debug( json.dumps( json.loads( intentState ),
+ sort_keys=True, indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.debug( "Current ONOS intents: " )
+ main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
+ sort_keys=True, indent=4,
+ separators=( ',', ': ' ) ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Exception printing intents" )
+ main.log.debug( repr( ONOSIntents[0] ) )
+ main.log.debug( repr( intentState ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=sameIntents,
+ onpass="Intents are consistent with before failure",
+ onfail="The Intents changed during failure" )
intentCheck = intentCheck and sameIntents
"""
main.step( "Get the OF Table entries and compare to before " +
@@ -2092,7 +2158,9 @@
# Test of LeadershipElection
leaderList = []
leaderResult = main.TRUE
- for cli in main.CLIs:
+
+ for i in main.activeNodes:
+ cli = main.CLIs[i]
leaderN = cli.electionTestLeader()
leaderList.append( leaderN )
if leaderN == main.FALSE:
@@ -2147,7 +2215,7 @@
cliStart = time.time()
devices = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=utilities.retry,
name="devices-" + str( i ),
args=[ main.CLIs[i].devices, [ None ] ],
@@ -2162,7 +2230,7 @@
hosts = []
ipResult = main.TRUE
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=utilities.retry,
name="hosts-" + str( i ),
args=[ main.CLIs[i].hosts, [ None ] ],
@@ -2180,7 +2248,7 @@
main.log.error( repr( t.result ) )
hosts.append( None )
for controller in range( 0, len( hosts ) ):
- controllerStr = str( controller + 1 )
+ controllerStr = str( main.activeNodes[controller] + 1 )
if hosts[ controller ]:
for host in hosts[ controller ]:
if host is None or host.get( 'ipAddresses', [] ) == []:
@@ -2190,7 +2258,7 @@
ipResult = main.FALSE
ports = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=utilities.retry,
name="ports-" + str( i ),
args=[ main.CLIs[i].ports, [ None ] ],
@@ -2204,7 +2272,7 @@
ports.append( t.result )
links = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=utilities.retry,
name="links-" + str( i ),
args=[ main.CLIs[i].links, [ None ] ],
@@ -2218,7 +2286,7 @@
links.append( t.result )
clusters = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=utilities.retry,
name="clusters-" + str( i ),
args=[ main.CLIs[i].clusters, [ None ] ],
@@ -2248,8 +2316,8 @@
mnSwitches = main.Mininet1.getSwitches()
mnLinks = main.Mininet1.getLinks()
mnHosts = main.Mininet1.getHosts()
- for controller in range( main.numCtrls ):
- controllerStr = str( controller + 1 )
+ for controller in range( len( main.activeNodes ) ):
+ controllerStr = str( main.activeNodes[controller] + 1 )
if devices[ controller ] and ports[ controller ] and\
"Error" not in devices[ controller ] and\
"Error" not in ports[ controller ]:
@@ -2299,7 +2367,7 @@
" hosts don't match Mininet" )
# CHECKING HOST ATTACHMENT POINTS
hostAttachment = True
- noHosts = False
+ zeroHosts = False
# FIXME: topo-HA/obelisk specific mappings:
# key is mac and value is dpid
mappings = {}
@@ -2332,7 +2400,7 @@
if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
if hosts[ controller ] == []:
main.log.warn( "There are no hosts discovered" )
- noHosts = True
+ zeroHosts = True
else:
for host in hosts[ controller ]:
mac = None
@@ -2376,7 +2444,7 @@
main.log.error( "No hosts json output or \"Error\"" +
" in output. hosts = " +
repr( hosts[ controller ] ) )
- if noHosts is False:
+ if zeroHosts is False:
# TODO: Find a way to know if there should be hosts in a
# given point of the test
hostAttachment = True
@@ -2402,7 +2470,7 @@
main.step( "Hosts view is consistent across all ONOS nodes" )
consistentHostsResult = main.TRUE
for controller in range( len( hosts ) ):
- controllerStr = str( controller + 1 )
+ controllerStr = str( main.activeNodes[controller] + 1 )
if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
if hosts[ controller ] == hosts[ 0 ]:
continue
@@ -2444,7 +2512,7 @@
main.step( "Clusters view is consistent across all ONOS nodes" )
consistentClustersResult = main.TRUE
for controller in range( len( clusters ) ):
- controllerStr = str( controller + 1 )
+ controllerStr = str( main.activeNodes[controller] + 1 )
if "Error" not in clusters[ controller ]:
if clusters[ controller ] == clusters[ 0 ]:
continue
@@ -2473,6 +2541,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing clusters[0]: " +
repr( clusters[0] ) )
+ numClusters = "ERROR"
clusterResults = main.FALSE
if numClusters == 1:
clusterResults = main.TRUE
@@ -2510,12 +2579,19 @@
onpass="Link are correct",
onfail="Links are incorrect" )
+ main.step( "Hosts are correct" )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=hostsResults,
+ onpass="Hosts are correct",
+ onfail="Hosts are incorrect" )
+
# FIXME: move this to an ONOS state case
main.step( "Checking ONOS nodes" )
nodesOutput = []
nodeResults = main.TRUE
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].nodes,
name="nodes-" + str( i ),
args=[ ] )
@@ -2525,7 +2601,7 @@
for t in threads:
t.join()
nodesOutput.append( t.result )
- ips = [ node.ip_address for node in main.nodes ]
+ ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
ips.sort()
for i in nodesOutput:
try:
@@ -2546,6 +2622,11 @@
utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
onpass="Nodes check successful",
onfail="Nodes check NOT successful" )
+ if not nodeResults:
+ for cli in main.CLIs:
+ main.log.debug( "{} components not ACTIVE: \n{}".format(
+ cli.name,
+ cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
def CASE9( self, main ):
"""
@@ -2618,6 +2699,7 @@
switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
description = "Killing a switch to ensure it is discovered correctly"
+ onosCli = main.CLIs[ main.activeNodes[0] ]
main.case( description )
switch = main.params[ 'kill' ][ 'switch' ]
switchDPID = main.params[ 'kill' ][ 'dpid' ]
@@ -2629,7 +2711,7 @@
main.log.info( "Waiting " + str( switchSleep ) +
" seconds for switch down to be discovered" )
time.sleep( switchSleep )
- device = main.ONOScli1.getDevice( dpid=switchDPID )
+ device = onosCli.getDevice( dpid=switchDPID )
# Peek at the deleted switch
main.log.warn( str( device ) )
result = main.FALSE
@@ -2662,6 +2744,7 @@
switch = main.params[ 'kill' ][ 'switch' ]
switchDPID = main.params[ 'kill' ][ 'dpid' ]
links = main.params[ 'kill' ][ 'links' ].split()
+ onosCli = main.CLIs[ main.activeNodes[0] ]
description = "Adding a switch to ensure it is discovered correctly"
main.case( description )
@@ -2669,14 +2752,12 @@
main.Mininet1.addSwitch( switch, dpid=switchDPID )
for peer in links:
main.Mininet1.addLink( switch, peer )
- ipList = []
- for i in range( main.numCtrls ):
- ipList.append( main.nodes[ i ].ip_address )
+ ipList = [ node.ip_address for node in main.nodes ]
main.Mininet1.assignSwController( sw=switch, ip=ipList )
main.log.info( "Waiting " + str( switchSleep ) +
" seconds for switch up to be discovered" )
time.sleep( switchSleep )
- device = main.ONOScli1.getDevice( dpid=switchDPID )
+ device = onosCli.getDevice( dpid=switchDPID )
# Peek at the deleted switch
main.log.warn( str( device ) )
result = main.FALSE
@@ -2769,7 +2850,8 @@
main.case("Start Leadership Election app")
main.step( "Install leadership election app" )
- appResult = main.ONOScli1.activateApp( "org.onosproject.election" )
+ onosCli = main.CLIs[ main.activeNodes[0] ]
+ appResult = onosCli.activateApp( "org.onosproject.election" )
utilities.assert_equals(
expect=main.TRUE,
actual=appResult,
@@ -2779,9 +2861,10 @@
main.step( "Run for election on each node" )
leaderResult = main.TRUE
leaders = []
- for cli in main.CLIs:
- cli.electionTestRun()
- for cli in main.CLIs:
+ for i in main.activeNodes:
+ main.CLIs[i].electionTestRun()
+ for i in main.activeNodes:
+ cli = main.CLIs[i]
leader = cli.electionTestLeader()
if leader is None or leader == main.FALSE:
main.log.error( cli.name + ": Leader for the election app " +
@@ -2831,16 +2914,10 @@
description = "Check that Leadership Election is still functional"
main.case( description )
- # NOTE: Need to re-run since being a canidate is not persistant
- # TODO: add check for "Command not found:" in the driver, this
- # means the election test app isn't loaded
+ # NOTE: Need to re-run after restarts since being a canidate is not persistant
- oldLeaders = [] # leaders by node before withdrawl from candidates
- newLeaders = [] # leaders by node after withdrawl from candidates
- oldAllCandidates = [] # list of lists of each nodes' candidates before
- newAllCandidates = [] # list of lists of each nodes' candidates after
- oldCandidates = [] # list of candidates from node 0 before withdrawl
- newCandidates = [] # list of candidates from node 0 after withdrawl
+ oldLeaders = [] # list of lists of each nodes' candidates before
+ newLeaders = [] # list of lists of each nodes' candidates after
oldLeader = '' # the old leader from oldLeaders, None if not same
newLeader = '' # the new leaders fron newLoeaders, None if not same
oldLeaderCLI = None # the CLI of the old leader used for re-electing
@@ -2851,10 +2928,9 @@
main.step( "Run for election on each node" )
electionResult = main.TRUE
- for cli in main.CLIs: # run test election on each node
- if cli.electionTestRun() == main.FALSE:
+ for i in main.activeNodes: # run test election on each node
+ if main.CLIs[i].electionTestRun() == main.FALSE:
electionResult = main.FALSE
-
utilities.assert_equals(
expect=main.TRUE,
actual=electionResult,
@@ -2867,42 +2943,36 @@
main.skipCase()
main.step( "Check that each node shows the same leader and candidates" )
- sameResult = main.TRUE
- failMessage = "Nodes have different leaders"
- for cli in main.CLIs:
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- oldAllCandidates.append( node )
- if node:
- oldLeaders.append( node[ 0 ] )
- else:
- oldLeaders.append( None )
- oldCandidates = oldAllCandidates[ 0 ]
- if oldCandidates is None:
- oldCandidates = [ None ]
-
- # Check that each node has the same leader. Defines oldLeader
- if len( set( oldLeaders ) ) != 1:
- sameResult = main.FALSE
- main.log.error( "More than one leader present:" + str( oldLeaders ) )
- oldLeader = None
+ failMessage = "Nodes have different leaderboards"
+ def consistentLeaderboards( nodes ):
+ TOPIC = 'org.onosproject.election'
+ # FIXME: use threads
+ #FIXME: should we retry outside the function?
+ for n in range( 5 ): # Retry in case election is still happening
+ leaderList = []
+ # Get all leaderboards
+ for cli in nodes:
+ leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
+ # Compare leaderboards
+ result = all( i == leaderList[0] for i in leaderList ) and\
+ leaderList is not None
+ main.log.debug( leaderList )
+ main.log.warn( result )
+ if result:
+ return ( result, leaderList )
+ time.sleep(5) #TODO: paramerterize
+ main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
+ activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+ sameResult, oldLeaders = consistentLeaderboards( activeCLIs )
+ if sameResult:
+ oldLeader = oldLeaders[ 0 ][ 0 ]
+ main.log.warn( oldLeader )
else:
- oldLeader = oldLeaders[ 0 ]
-
- # Check that each node's candidate list is the same
- candidateDiscrepancy = False # Boolean of candidate mismatches
- for candidates in oldAllCandidates:
- if candidates is None:
- main.log.warn( "Error getting candidates" )
- candidates = [ None ]
- if set( candidates ) != set( oldCandidates ):
- sameResult = main.FALSE
- candidateDiscrepancy = True
- if candidateDiscrepancy:
- failMessage += " and candidates"
+ oldLeader = None
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=sameResult,
- onpass="Leadership is consistent for the election topic",
+ onpass="Leaderboards are consistent for the election topic",
onfail=failMessage )
main.step( "Find current leader and withdraw" )
@@ -2912,7 +2982,7 @@
main.log.error( "Leadership isn't consistent." )
withdrawResult = main.FALSE
# Get the CLI of the oldLeader
- for i in range( len( main.CLIs ) ):
+ for i in main.activeNodes:
if oldLeader == main.nodes[ i ].ip_address:
oldLeaderCLI = main.CLIs[ i ]
break
@@ -2927,55 +2997,30 @@
onfail="Node was not withdrawn from election" )
main.step( "Check that a new node was elected leader" )
- # FIXME: use threads
- newLeaderResult = main.TRUE
failMessage = "Nodes have different leaders"
-
# Get new leaders and candidates
- for cli in main.CLIs:
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- # elections might no have finished yet
- if node[ 0 ] == 'none' and not expectNoLeader:
- main.log.info( "Node has no leader, waiting 5 seconds to be " +
- "sure elections are complete." )
- time.sleep(5)
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- # election still isn't done or there is a problem
- if node[ 0 ] == 'none':
- main.log.error( "No leader was elected on at least 1 node" )
- newLeaderResult = main.FALSE
- newAllCandidates.append( node )
- newLeaders.append( node[ 0 ] )
- newCandidates = newAllCandidates[ 0 ]
-
- # Check that each node has the same leader. Defines newLeader
- if len( set( newLeaders ) ) != 1:
- newLeaderResult = main.FALSE
- main.log.error( "Nodes have different leaders: " +
- str( newLeaders ) )
- newLeader = None
+ newLeaderResult, newLeaders = consistentLeaderboards( activeCLIs )
+ if newLeaders[ 0 ][ 0 ] == 'none':
+ main.log.error( "No leader was elected on at least 1 node" )
+ if not expectNoLeader:
+ newLeaderResult = False
+ if newLeaderResult:
+ newLeader = newLeaders[ 0 ][ 0 ]
else:
- newLeader = newLeaders[ 0 ]
-
- # Check that each node's candidate list is the same
- for candidates in newAllCandidates:
- if set( candidates ) != set( newCandidates ):
- newLeaderResult = main.FALSE
- main.log.error( "Discrepancy in candidate lists detected" )
+ newLeader = None
# Check that the new leader is not the older leader, which was withdrawn
if newLeader == oldLeader:
- newLeaderResult = main.FALSE
+ newLeaderResult = False
main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
" as the current leader" )
-
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=newLeaderResult,
onpass="Leadership election passed",
onfail="Something went wrong with Leadership election" )
- main.step( "Check that that new leader was the candidate of old leader")
+ main.step( "Check that that new leader was the candidate of old leader" )
# candidates[ 2 ] should become the top candidate after withdrawl
correctCandidateResult = main.TRUE
if expectNoLeader:
@@ -2985,12 +3030,17 @@
else:
main.log.info( "Expected no leader, got: " + str( newLeader ) )
correctCandidateResult = main.FALSE
- elif len( oldCandidates ) >= 3 and newLeader != oldCandidates[ 2 ]:
- correctCandidateResult = main.FALSE
- main.log.error( "Candidate {} was elected. {} should have had priority.".format(
- newLeader, oldCandidates[ 2 ] ) )
+ elif len( oldLeaders[0] ) >= 3:
+ if newLeader == oldLeaders[ 0 ][ 2 ]:
+ # correct leader was elected
+ correctCandidateResult = main.TRUE
+ else:
+ correctCandidateResult = main.FALSE
+ main.log.error( "Candidate {} was elected. {} should have had priority.".format(
+ newLeader, oldLeaders[ 0 ][ 2 ] ) )
else:
main.log.warn( "Could not determine who should be the correct leader" )
+ main.log.debug( oldLeaders[ 0 ] )
correctCandidateResult = main.FALSE
utilities.assert_equals(
expect=main.TRUE,
@@ -3010,54 +3060,23 @@
actual=runResult,
onpass="App re-ran for election",
onfail="App failed to run for election" )
+
main.step(
"Check that oldLeader is a candidate, and leader if only 1 node" )
# verify leader didn't just change
- positionResult = main.TRUE
- # Get new leaders and candidates, wait if oldLeader is not a candidate yet
-
- # Reset and reuse the new candidate and leaders lists
- newAllCandidates = []
- newCandidates = []
- newLeaders = []
- for cli in main.CLIs:
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- if oldLeader not in node: # election might no have finished yet
- main.log.info( "Old Leader not elected, waiting 5 seconds to " +
- "be sure elections are complete" )
- time.sleep(5)
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- if oldLeader not in node: # election still isn't done, errors
- main.log.error(
- "Old leader was not elected on at least one node" )
- positionResult = main.FALSE
- newAllCandidates.append( node )
- newLeaders.append( node[ 0 ] )
- newCandidates = newAllCandidates[ 0 ]
-
- # Check that each node has the same leader. Defines newLeader
- if len( set( newLeaders ) ) != 1:
- positionResult = main.FALSE
- main.log.error( "Nodes have different leaders: " +
- str( newLeaders ) )
- newLeader = None
- else:
- newLeader = newLeaders[ 0 ]
-
- # Check that each node's candidate list is the same
- for candidates in newAllCandidates:
- if set( candidates ) != set( newCandidates ):
- newLeaderResult = main.FALSE
- main.log.error( "Discrepancy in candidate lists detected" )
+ # Get new leaders and candidates
+ reRunLeaders = []
+ time.sleep( 5 ) # Paremterize
+ positionResult, reRunLeaders = consistentLeaderboards( activeCLIs )
# Check that the re-elected node is last on the candidate List
- if oldLeader != newCandidates[ -1 ]:
- main.log.error( "Old Leader (" + str( oldLeader ) + ") not in the proper position " +
- str( newCandidates ) )
+ if oldLeader != reRunLeaders[ 0 ][ -1 ]:
+ main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
+ str( reRunLeaders[ 0 ] ) ) )
positionResult = main.FALSE
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=positionResult,
onpass="Old leader successfully re-ran for election",
onfail="Something went wrong with Leadership election after " +
@@ -3076,15 +3095,11 @@
# Variables for the distributed primitives tests
global pCounterName
- global iCounterName
global pCounterValue
- global iCounterValue
global onosSet
global onosSetName
pCounterName = "TestON-Partitions"
- iCounterName = "TestON-inMemory"
pCounterValue = 0
- iCounterValue = 0
onosSet = set([])
onosSetName = "TestON-set"
@@ -3092,7 +3107,8 @@
main.case( description )
main.step( "Install Primitives app" )
appName = "org.onosproject.distributedprimitives"
- appResults = main.CLIs[0].activateApp( appName )
+ node = main.activeNodes[0]
+ appResults = main.CLIs[node].activateApp( appName )
utilities.assert_equals( expect=main.TRUE,
actual=appResults,
onpass="Primitives app activated",
@@ -3110,7 +3126,6 @@
assert main.CLIs, "main.CLIs not defined"
assert main.nodes, "main.nodes not defined"
assert pCounterName, "pCounterName not defined"
- assert iCounterName, "iCounterName not defined"
assert onosSetName, "onosSetName not defined"
# NOTE: assert fails if value is 0/None/Empty/False
try:
@@ -3119,11 +3134,6 @@
main.log.error( "pCounterValue not defined, setting to 0" )
pCounterValue = 0
try:
- iCounterValue
- except NameError:
- main.log.error( "iCounterValue not defined, setting to 0" )
- iCounterValue = 0
- try:
onosSet
except NameError:
main.log.error( "onosSet not defined, setting to empty Set" )
@@ -3144,7 +3154,7 @@
pCounters = []
threads = []
addedPValues = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
name="counterAddAndGet-" + str( i ),
args=[ pCounterName ] )
@@ -3174,7 +3184,7 @@
pCounters = []
threads = []
addedPValues = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
name="counterGetAndAdd-" + str( i ),
args=[ pCounterName ] )
@@ -3211,7 +3221,7 @@
pCounters = []
threads = []
addedPValues = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
name="counterIncrement-" + str( i ),
args=[ pCounterName ],
@@ -3242,7 +3252,7 @@
pCounters = []
threads = []
addedPValues = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
name="counterIncrement-" + str( i ),
args=[ pCounterName ],
@@ -3273,7 +3283,7 @@
pCounters = []
threads = []
addedPValues = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
name="counterIncrement-" + str( i ),
args=[ pCounterName ],
@@ -3307,199 +3317,12 @@
onpass="Added counters are correct",
onfail="Added counters are incorrect" )
- # In-Memory counters
- main.step( "Increment and get an in-memory counter on each node" )
- iCounters = []
- addedIValues = []
- threads = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
- name="icounterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "inMemory": True } )
- iCounterValue += 1
- addedIValues.append( iCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in the in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=iCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Get then Increment a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
- name="counterGetAndAdd-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "inMemory": True } )
- addedIValues.append( iCounterValue )
- iCounterValue += 1
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=iCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Counters we added have the correct values" )
- incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
- utilities.assert_equals( expect=main.TRUE,
- actual=incrementCheck,
- onpass="Added counters are correct",
- onfail="Added counters are incorrect" )
-
- main.step( "Add -8 to then get a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
- name="counterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "delta": -8, "inMemory": True } )
- iCounterValue += -8
- addedIValues.append( iCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=pCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Add 5 to then get a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
- name="counterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "delta": 5, "inMemory": True } )
- iCounterValue += 5
- addedIValues.append( iCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=pCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Get then add 5 to a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
- name="counterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "delta": 5, "inMemory": True } )
- addedIValues.append( iCounterValue )
- iCounterValue += 5
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=iCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Counters we added have the correct values" )
- incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
- utilities.assert_equals( expect=main.TRUE,
- actual=incrementCheck,
- onpass="Added counters are correct",
- onfail="Added counters are incorrect" )
-
- main.step( "Check counters are consistant across nodes" )
- onosCounters, consistentCounterResults = main.Counters.consistentCheck()
- utilities.assert_equals( expect=main.TRUE,
- actual=consistentCounterResults,
- onpass="ONOS counters are consistent " +
- "across nodes",
- onfail="ONOS Counters are inconsistent " +
- "across nodes" )
-
- main.step( "Counters we added have the correct values" )
- incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
- incrementCheck = incrementCheck and \
- main.Counters.counterCheck( iCounterName, iCounterValue )
- utilities.assert_equals( expect=main.TRUE,
- actual=incrementCheck,
- onpass="Added counters are correct",
- onfail="Added counters are incorrect" )
# DISTRIBUTED SETS
main.step( "Distributed Set get" )
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -3510,8 +3333,8 @@
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -3541,7 +3364,7 @@
main.step( "Distributed Set size" )
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -3552,8 +3375,8 @@
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( "ONOS" + node +
@@ -3569,7 +3392,7 @@
onosSet.add( addValue )
addResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestAdd,
name="setTestAdd-" + str( i ),
args=[ onosSetName, addValue ] )
@@ -3583,7 +3406,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
addResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if addResponses[ i ] == main.TRUE:
# All is well
pass
@@ -3603,7 +3426,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -3613,8 +3436,8 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -3636,7 +3459,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -3646,8 +3469,8 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( "ONOS" + node +
@@ -3664,7 +3487,7 @@
onosSet.update( addAllValue.split() )
addResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestAdd,
name="setTestAddAll-" + str( i ),
args=[ onosSetName, addAllValue ] )
@@ -3678,7 +3501,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
addAllResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if addResponses[ i ] == main.TRUE:
# All is well
pass
@@ -3698,7 +3521,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -3708,8 +3531,8 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -3733,7 +3556,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -3743,8 +3566,8 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( "ONOS" + node +
@@ -3760,7 +3583,7 @@
main.step( "Distributed Set contains()" )
containsResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setContains-" + str( i ),
args=[ onosSetName ],
@@ -3773,7 +3596,7 @@
containsResponses.append( t.result )
containsResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if containsResponses[ i ] == main.ERROR:
containsResults = main.FALSE
else:
@@ -3787,7 +3610,7 @@
main.step( "Distributed Set containsAll()" )
containsAllResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setContainsAll-" + str( i ),
args=[ onosSetName ],
@@ -3800,7 +3623,7 @@
containsAllResponses.append( t.result )
containsAllResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if containsResponses[ i ] == main.ERROR:
containsResults = main.FALSE
else:
@@ -3815,7 +3638,7 @@
onosSet.remove( addValue )
removeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestRemove,
name="setTestRemove-" + str( i ),
args=[ onosSetName, addValue ] )
@@ -3829,7 +3652,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
removeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if removeResponses[ i ] == main.TRUE:
# All is well
pass
@@ -3849,7 +3672,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -3859,8 +3682,8 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -3884,7 +3707,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -3894,8 +3717,8 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( "ONOS" + node +
@@ -3913,7 +3736,7 @@
removeAllResponses = []
threads = []
try:
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestRemove,
name="setTestRemoveAll-" + str( i ),
args=[ onosSetName, addAllValue ] )
@@ -3929,7 +3752,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
removeAllResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if removeAllResponses[ i ] == main.TRUE:
# All is well
pass
@@ -3949,7 +3772,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -3959,8 +3782,8 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -3984,7 +3807,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -3994,8 +3817,8 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( "ONOS" + node +
@@ -4012,7 +3835,7 @@
onosSet.update( addAllValue.split() )
addResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestAdd,
name="setTestAddAll-" + str( i ),
args=[ onosSetName, addAllValue ] )
@@ -4026,7 +3849,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
addAllResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if addResponses[ i ] == main.TRUE:
# All is well
pass
@@ -4046,7 +3869,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -4056,8 +3879,8 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -4081,7 +3904,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -4091,8 +3914,8 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( "ONOS" + node +
@@ -4109,7 +3932,7 @@
onosSet.clear()
clearResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestRemove,
name="setTestClear-" + str( i ),
args=[ onosSetName, " "], # Values doesn't matter
@@ -4124,7 +3947,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
clearResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if clearResponses[ i ] == main.TRUE:
# All is well
pass
@@ -4144,7 +3967,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -4154,8 +3977,8 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -4179,7 +4002,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -4189,8 +4012,8 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( "ONOS" + node +
@@ -4207,7 +4030,7 @@
onosSet.update( addAllValue.split() )
addResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestAdd,
name="setTestAddAll-" + str( i ),
args=[ onosSetName, addAllValue ] )
@@ -4221,7 +4044,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
addAllResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if addResponses[ i ] == main.TRUE:
# All is well
pass
@@ -4241,7 +4064,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -4251,8 +4074,8 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -4276,7 +4099,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -4286,8 +4109,8 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( "ONOS" + node +
@@ -4304,7 +4127,7 @@
onosSet.intersection_update( retainValue.split() )
retainResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestRemove,
name="setTestRetain-" + str( i ),
args=[ onosSetName, retainValue ],
@@ -4319,7 +4142,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
retainResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if retainResponses[ i ] == main.TRUE:
# All is well
pass
@@ -4339,7 +4162,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -4349,8 +4172,8 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -4374,7 +4197,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -4384,8 +4207,8 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( "ONOS" + node + " expected a size of " +
@@ -4402,7 +4225,8 @@
tMapValue = "Testing"
numKeys = 100
putResult = True
- putResponses = main.CLIs[ 0 ].transactionalMapPut( numKeys, tMapValue )
+ node = main.activeNodes[0]
+ putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
if putResponses and len( putResponses ) == 100:
for i in putResponses:
if putResponses[ i ][ 'value' ] != tMapValue:
@@ -4422,7 +4246,7 @@
getResponses = []
threads = []
valueCheck = True
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].transactionalMapGet,
name="TMap-get-" + str( i ),
args=[ "Key" + str( n ) ] )
@@ -4442,49 +4266,3 @@
actual=getCheck,
onpass="Partitioned Transactional Map get values were correct",
onfail="Partitioned Transactional Map values incorrect" )
-
- main.step( "In-memory Transactional maps put" )
- tMapValue = "Testing"
- numKeys = 100
- putResult = True
- putResponses = main.CLIs[ 0 ].transactionalMapPut( numKeys, tMapValue, inMemory=True )
- if len( putResponses ) == 100:
- for i in putResponses:
- if putResponses[ i ][ 'value' ] != tMapValue:
- putResult = False
- else:
- putResult = False
- if not putResult:
- main.log.debug( "Put response values: " + str( putResponses ) )
- utilities.assert_equals( expect=True,
- actual=putResult,
- onpass="In-Memory Transactional Map put successful",
- onfail="In-Memory Transactional Map put values are incorrect" )
-
- main.step( "In-Memory Transactional maps get" )
- getCheck = True
- for n in range( 1, numKeys + 1 ):
- getResponses = []
- threads = []
- valueCheck = True
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].transactionalMapGet,
- name="TMap-get-" + str( i ),
- args=[ "Key" + str( n ) ],
- kwargs={ "inMemory": True } )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- getResponses.append( t.result )
- for node in getResponses:
- if node != tMapValue:
- valueCheck = False
- if not valueCheck:
- main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
- main.log.warn( getResponses )
- getCheck = getCheck and valueCheck
- utilities.assert_equals( expect=True,
- actual=getCheck,
- onpass="In-Memory Transactional Map get values were correct",
- onfail="In-Memory Transactional Map values incorrect" )
diff --git a/TestON/tests/HAclusterRestart/dependencies/Counters.py b/TestON/tests/HAclusterRestart/dependencies/Counters.py
index 2dc95e1..192b919 100644
--- a/TestON/tests/HAclusterRestart/dependencies/Counters.py
+++ b/TestON/tests/HAclusterRestart/dependencies/Counters.py
@@ -1,102 +1,105 @@
-def __init__( self ):
- self.default = ''
+import json
-def consistentCheck():
- """
- Checks that TestON counters are consistent across all nodes.
+class Counters():
- Returns the tuple (onosCounters, consistent)
- - onosCounters is the parsed json output of the counters command on all nodes
- - consistent is main.TRUE if all "TestON" counters are consitent across all
- nodes or main.FALSE
- """
- import json
- try:
- correctResults = main.TRUE
- # Get onos counters results
- onosCountersRaw = []
- threads = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counters,
- name="counters-" + str( i ) )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- onosCountersRaw.append( t.result )
- onosCounters = []
- for i in range( main.numCtrls ):
- try:
- onosCounters.append( json.loads( onosCountersRaw[i] ) )
- except ( ValueError, TypeError ):
- main.log.error( "Could not parse counters response from ONOS" +
- str( i + 1 ) )
- main.log.warn( repr( onosCountersRaw[ i ] ) )
- onosCounters.append( [] )
- return main.FALSE
+ def __init__( self ):
+ self.default = ''
- testCounters = {}
- # make a list of all the "TestON-*" counters in ONOS
- # lookes like a dict whose keys are the name of the ONOS node and values
- # are a list of the counters. I.E.
- # { "ONOS1": [ {"name":"TestON-inMemory","value":56},
- # {"name":"TestON-Partitions","value":56} ]
- # }
- # NOTE: There is an assumtion that all nodes are active
- # based on the above for loops
- for controller in enumerate( onosCounters ):
- for dbType in controller[1]:
- for dbName, items in dbType.iteritems():
- for item in items:
- if 'TestON' in item['name']:
- node = 'ONOS' + str( controller[0] + 1 )
- try:
- testCounters[node].append( item )
- except KeyError:
- testCounters[node] = [ item ]
- # compare the counters on each node
- tmp = [ v == testCounters['ONOS1'] for k, v in testCounters.iteritems() ]
- if all( tmp ):
- consistent = main.TRUE
- else:
- consistent = main.FALSE
- main.log.error( "ONOS nodes have different values for counters:\n" +
- testCounters )
- return ( onosCounters, consistent )
- except Exception:
- main.log.exception( "" )
- main.cleanup()
- main.exit()
+ def consistentCheck( self ):
+ """
+ Checks that TestON counters are consistent across all nodes.
-def counterCheck( counterName, counterValue ):
- """
- Checks that TestON counters are consistent across all nodes and that
- specified counter is in ONOS with the given value
- """
- import json
- correctResults = main.TRUE
- # Get onos counters results and consistentCheck
- onosCounters, consistent = main.Counters.consistentCheck()
- # Check for correct values
- for i in range( main.numCtrls ):
- current = onosCounters[i]
- onosValue = None
+ Returns the tuple (onosCounters, consistent)
+ - onosCounters is the parsed json output of the counters command on all nodes
+ - consistent is main.TRUE if all "TestON" counters are consitent across all
+ nodes or main.FALSE
+ """
try:
- for database in current:
- database = database.values()[0]
- for counter in database:
- if counter.get( 'name' ) == counterName:
- onosValue = counter.get( 'value' )
- break
- except AttributeError, e:
- main.log.error( "ONOS" + str( i + 1 ) + " counters result " +
- "is not as expected" )
- correctResults = main.FALSE
- if onosValue == counterValue:
- main.log.info( counterName + " counter value is correct" )
- else:
- main.log.error( counterName + " counter value is incorrect," +
- " expected value: " + str( counterValue )
- + " current value: " + str( onosValue ) )
- correctResults = main.FALSE
- return consistent and correctResults
+ correctResults = main.TRUE
+ # Get onos counters results
+ onosCountersRaw = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=utilities.retry,
+ name="counters-" + str( i ),
+ args=[ main.CLIs[i].counters, [ None ] ],
+ kwargs= { 'sleep': 5, 'attempts': 5,
+ 'randomTime': True } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ onosCountersRaw.append( t.result )
+ onosCounters = []
+ for i in range( len( main.activeNodes ) ):
+ try:
+ onosCounters.append( json.loads( onosCountersRaw[i] ) )
+ except ( ValueError, TypeError ):
+ main.log.error( "Could not parse counters response from ONOS" +
+ str( main.activeNodes[i] + 1 ) )
+ main.log.warn( repr( onosCountersRaw[ i ] ) )
+ onosCounters.append( [] )
+
+ testCounters = {}
+ # make a list of all the "TestON-*" counters in ONOS
+ # lookes like a dict whose keys are the name of the ONOS node and values
+ # are a list of the counters. I.E.
+ # { "ONOS1": [ { "name":"TestON-Partitions","value":56} ]
+ # }
+ # NOTE: There is an assumtion that all nodes are active
+ # based on the above for loops
+ for controller in enumerate( onosCounters ):
+ for key, value in controller[1].iteritems():
+ if 'TestON' in key:
+ node = 'ONOS' + str( controller[0] + 1 )
+ try:
+ testCounters[node].append( { key: value } )
+ except KeyError:
+ testCounters[node] = [ { key: value } ]
+ # compare the counters on each node
+ firstV = testCounters.values()[0]
+ tmp = [ v == firstV for k, v in testCounters.iteritems() ]
+ if all( tmp ):
+ consistent = main.TRUE
+ else:
+ consistent = main.FALSE
+ main.log.error( "ONOS nodes have different values for counters:\n" +
+ testCounters )
+ return ( onosCounters, consistent )
+ except Exception:
+ main.log.exception( "" )
+ main.cleanup()
+ main.exit()
+
+ def counterCheck( self, counterName, counterValue ):
+ """
+ Checks that TestON counters are consistent across all nodes and that
+ specified counter is in ONOS with the given value
+ """
+ try:
+ correctResults = main.TRUE
+ # Get onos counters results and consistentCheck
+ onosCounters, consistent = self.consistentCheck()
+ # Check for correct values
+ for i in range( len( main.activeNodes ) ):
+ current = onosCounters[i]
+ onosValue = None
+ try:
+ onosValue = current.get( counterName )
+ except AttributeError, e:
+ node = str( main.activeNodes[i] + 1 )
+ main.log.error( "ONOS" + node + " counters result " +
+ "is not as expected" )
+ correctResults = main.FALSE
+ if onosValue == counterValue:
+ main.log.info( counterName + " counter value is correct" )
+ else:
+ main.log.error( counterName + " counter value is incorrect," +
+ " expected value: " + str( counterValue )
+ + " current value: " + str( onosValue ) )
+ correctResults = main.FALSE
+ return consistent and correctResults
+ except Exception:
+ main.log.exception( "" )
+ main.cleanup()
+ main.exit()
diff --git a/TestON/tests/HAfullNetPartition/HAfullNetPartition.params b/TestON/tests/HAfullNetPartition/HAfullNetPartition.params
index 23e7ae8..e263df3 100644
--- a/TestON/tests/HAfullNetPartition/HAfullNetPartition.params
+++ b/TestON/tests/HAfullNetPartition/HAfullNetPartition.params
@@ -20,9 +20,7 @@
#CASE16: Install Distributed Primitives app
#CASE17: Check for basic functionality with distributed primitives
<testcases>1,[2,8,3,4,5,14,16,17]*1,[61,8,7,4,15,17,62],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
- <imports>
- <path> /home/admin/OnosSystemTest/TestON/tests/HAfullNetPartition/dependencies/ </path>
- </imports>
+
<ENV>
<cellName>HA</cellName>
<appString>drivers,openflow,proxyarp,mobility</appString>
diff --git a/TestON/tests/HAfullNetPartition/HAfullNetPartition.py b/TestON/tests/HAfullNetPartition/HAfullNetPartition.py
index 5a6de1a..97e04cf 100644
--- a/TestON/tests/HAfullNetPartition/HAfullNetPartition.py
+++ b/TestON/tests/HAfullNetPartition/HAfullNetPartition.py
@@ -51,6 +51,7 @@
import imp
import pexpect
import time
+ import json
main.log.info( "ONOS HA test: Partition ONOS nodes into two sub-clusters - " +
"initialization" )
main.case( "Setting up test environment" )
@@ -77,6 +78,11 @@
global ONOS5Port
global ONOS6Port
global ONOS7Port
+ # These are for csv plotting in jenkins
+ global labels
+ global data
+ labels = []
+ data = []
# FIXME: just get controller port from params?
# TODO: do we really need all these?
@@ -89,11 +95,8 @@
ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
try:
- fileName = "Counters"
- # TODO: Maybe make a library folder somewhere?
- path = main.params[ 'imports' ][ 'path' ]
- main.Counters = imp.load_source( fileName,
- path + fileName + ".py" )
+ from tests.HAsanity.dependencies.Counters import Counters
+ main.Counters = Counters()
except Exception as e:
main.log.exception( e )
main.cleanup()
@@ -273,7 +276,6 @@
port=main.params[ 'MNtcpdump' ][ 'port' ] )
main.step( "App Ids check" )
- time.sleep(60)
appCheck = main.TRUE
threads = []
for i in main.activeNodes:
@@ -294,6 +296,48 @@
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
+ main.step( "Checking ONOS nodes" )
+ nodesOutput = []
+ nodeResults = main.TRUE
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].nodes,
+ name="nodes-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ nodesOutput.append( t.result )
+ ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
+ ips.sort()
+ for i in nodesOutput:
+ try:
+ current = json.loads( i )
+ activeIps = []
+ currentResult = main.FALSE
+ for node in current:
+ if node['state'] == 'READY':
+ activeIps.append( node['ip'] )
+ activeIps.sort()
+ if ips == activeIps:
+ currentResult = main.TRUE
+ except ( ValueError, TypeError ):
+ main.log.error( "Error parsing nodes output" )
+ main.log.warn( repr( i ) )
+ currentResult = main.FALSE
+ nodeResults = nodeResults and currentResult
+ utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
+ onpass="Nodes check successful",
+ onfail="Nodes check NOT successful" )
+
+ if not nodeResults:
+ for cli in main.CLIs:
+ main.log.debug( "{} components not ACTIVE: \n{}".format(
+ cli.name,
+ cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
+
if cliResults == main.FALSE:
main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
@@ -1309,20 +1353,26 @@
main.log.warn( title )
# get all intent keys in the cluster
keys = []
- for nodeStr in ONOSIntents:
- node = json.loads( nodeStr )
- for intent in node:
- keys.append( intent.get( 'id' ) )
- keys = set( keys )
- for key in keys:
- row = "%-13s" % key
+ try:
+ # Get the set of all intent keys
for nodeStr in ONOSIntents:
node = json.loads( nodeStr )
for intent in node:
- if intent.get( 'id', "Error" ) == key:
- row += "%-15s" % intent.get( 'state' )
- main.log.warn( row )
- # End table view
+ keys.append( intent.get( 'id' ) )
+ keys = set( keys )
+ # For each intent key, print the state on each node
+ for key in keys:
+ row = "%-13s" % key
+ for nodeStr in ONOSIntents:
+ node = json.loads( nodeStr )
+ for intent in node:
+ if intent.get( 'id', "Error" ) == key:
+ row += "%-15s" % intent.get( 'state' )
+ main.log.warn( row )
+ # End of intent state table
+ except ValueError as e:
+ main.log.exception( e )
+ main.log.debug( "nodeStr was: " + repr( nodeStr ) )
if intentsResults and not consistentIntents:
# print the json objects
@@ -2391,8 +2441,9 @@
hostsResults = hostsResults and currentHostsResult
hostAttachmentResults = hostAttachmentResults and\
hostAttachment
- topoResult = devicesResults and linksResults and\
- hostsResults and hostAttachmentResults
+ topoResult = ( devicesResults and linksResults
+ and hostsResults and ipResult and
+ hostAttachmentResults )
utilities.assert_equals( expect=True,
actual=topoResult,
onpass="ONOS topology matches Mininet",
@@ -2456,7 +2507,6 @@
controllerStr +
" is inconsistent with ONOS1" )
consistentClustersResult = main.FALSE
-
else:
main.log.error( "Error in getting dataplane clusters " +
"from ONOS" + controllerStr )
@@ -2477,6 +2527,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing clusters[0]: " +
repr( clusters[0] ) )
+ numClusters = "ERROR"
clusterResults = main.FALSE
if numClusters == 1:
clusterResults = main.TRUE
@@ -2514,6 +2565,13 @@
onpass="Link are correct",
onfail="Links are incorrect" )
+ main.step( "Hosts are correct" )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=hostsResults,
+ onpass="Hosts are correct",
+ onfail="Hosts are incorrect" )
+
# FIXME: move this to an ONOS state case
main.step( "Checking ONOS nodes" )
nodesOutput = []
@@ -2550,6 +2608,11 @@
utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
onpass="Nodes check successful",
onfail="Nodes check NOT successful" )
+ if not nodeResults:
+ for cli in main.CLIs:
+ main.log.debug( "{} components not ACTIVE: \n{}".format(
+ cli.name,
+ cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
def CASE9( self, main ):
"""
@@ -2839,16 +2902,10 @@
description = "Check that Leadership Election is still functional"
main.case( description )
- # NOTE: Need to re-run since being a canidate is not persistant
- # TODO: add check for "Command not found:" in the driver, this
- # means the election test app isn't loaded
+ # NOTE: Need to re-run after restarts since being a canidate is not persistant
- oldLeaders = [] # leaders by node before withdrawl from candidates
- newLeaders = [] # leaders by node after withdrawl from candidates
- oldAllCandidates = [] # list of lists of each nodes' candidates before
- newAllCandidates = [] # list of lists of each nodes' candidates after
- oldCandidates = [] # list of candidates from node 0 before withdrawl
- newCandidates = [] # list of candidates from node 0 after withdrawl
+ oldLeaders = [] # list of lists of each nodes' candidates before
+ newLeaders = [] # list of lists of each nodes' candidates after
oldLeader = '' # the old leader from oldLeaders, None if not same
newLeader = '' # the new leaders fron newLoeaders, None if not same
oldLeaderCLI = None # the CLI of the old leader used for re-electing
@@ -2874,44 +2931,36 @@
main.skipCase()
main.step( "Check that each node shows the same leader and candidates" )
- sameResult = main.TRUE
- failMessage = "Nodes have different leaders"
- for i in main.activeNodes:
- cli = main.CLIs[i]
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- oldAllCandidates.append( node )
- if node:
- oldLeaders.append( node[ 0 ] )
- else:
- oldLeaders.append( None )
- oldCandidates = oldAllCandidates[ 0 ]
- if oldCandidates is None:
- oldCandidates = [ None ]
-
- # Check that each node has the same leader. Defines oldLeader
- if len( set( oldLeaders ) ) != 1:
- sameResult = main.FALSE
- main.log.error( "More than one leader present:" + str( oldLeaders ) )
- # FIXME: for split brain, we will likely have 2. WHat should we do here?
- oldLeader = None
+ failMessage = "Nodes have different leaderboards"
+ def consistentLeaderboards( nodes ):
+ TOPIC = 'org.onosproject.election'
+ # FIXME: use threads
+ #FIXME: should we retry outside the function?
+ for n in range( 5 ): # Retry in case election is still happening
+ leaderList = []
+ # Get all leaderboards
+ for cli in nodes:
+ leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
+ # Compare leaderboards
+ result = all( i == leaderList[0] for i in leaderList ) and\
+ leaderList is not None
+ main.log.debug( leaderList )
+ main.log.warn( result )
+ if result:
+ return ( result, leaderList )
+ time.sleep(5) #TODO: paramerterize
+ main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
+ activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+ sameResult, oldLeaders = consistentLeaderboards( activeCLIs )
+ if sameResult:
+ oldLeader = oldLeaders[ 0 ][ 0 ]
+ main.log.warn( oldLeader )
else:
- oldLeader = oldLeaders[ 0 ]
-
- # Check that each node's candidate list is the same
- candidateDiscrepancy = False # Boolean of candidate mismatches
- for candidates in oldAllCandidates:
- if candidates is None:
- main.log.warn( "Error getting candidates" )
- candidates = [ None ]
- if set( candidates ) != set( oldCandidates ):
- sameResult = main.FALSE
- candidateDiscrepancy = True
- if candidateDiscrepancy:
- failMessage += " and candidates"
+ oldLeader = None
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=sameResult,
- onpass="Leadership is consistent for the election topic",
+ onpass="Leaderboards are consistent for the election topic",
onfail=failMessage )
main.step( "Find current leader and withdraw" )
@@ -2936,56 +2985,30 @@
onfail="Node was not withdrawn from election" )
main.step( "Check that a new node was elected leader" )
- # FIXME: use threads
- newLeaderResult = main.TRUE
failMessage = "Nodes have different leaders"
-
# Get new leaders and candidates
- for i in main.activeNodes:
- cli = main.CLIs[i]
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- # elections might no have finished yet
- if node[ 0 ] == 'none' and not expectNoLeader:
- main.log.info( "Node has no leader, waiting 5 seconds to be " +
- "sure elections are complete." )
- time.sleep(5)
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- # election still isn't done or there is a problem
- if node[ 0 ] == 'none':
- main.log.error( "No leader was elected on at least 1 node" )
- newLeaderResult = main.FALSE
- newAllCandidates.append( node )
- newLeaders.append( node[ 0 ] )
- newCandidates = newAllCandidates[ 0 ]
-
- # Check that each node has the same leader. Defines newLeader
- if len( set( newLeaders ) ) != 1:
- newLeaderResult = main.FALSE
- main.log.error( "Nodes have different leaders: " +
- str( newLeaders ) )
- newLeader = None
+ newLeaderResult, newLeaders = consistentLeaderboards( activeCLIs )
+ if newLeaders[ 0 ][ 0 ] == 'none':
+ main.log.error( "No leader was elected on at least 1 node" )
+ if not expectNoLeader:
+ newLeaderResult = False
+ if newLeaderResult:
+ newLeader = newLeaders[ 0 ][ 0 ]
else:
- newLeader = newLeaders[ 0 ]
-
- # Check that each node's candidate list is the same
- for candidates in newAllCandidates:
- if set( candidates ) != set( newCandidates ):
- newLeaderResult = main.FALSE
- main.log.error( "Discrepancy in candidate lists detected" )
+ newLeader = None
# Check that the new leader is not the older leader, which was withdrawn
if newLeader == oldLeader:
- newLeaderResult = main.FALSE
+ newLeaderResult = False
main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
" as the current leader" )
-
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=newLeaderResult,
onpass="Leadership election passed",
onfail="Something went wrong with Leadership election" )
- main.step( "Check that that new leader was the candidate of old leader")
+ main.step( "Check that that new leader was the candidate of old leader" )
# candidates[ 2 ] should become the top candidate after withdrawl
correctCandidateResult = main.TRUE
if expectNoLeader:
@@ -2995,12 +3018,17 @@
else:
main.log.info( "Expected no leader, got: " + str( newLeader ) )
correctCandidateResult = main.FALSE
- elif len( oldCandidates ) >= 3 and newLeader != oldCandidates[ 2 ]:
- correctCandidateResult = main.FALSE
- main.log.error( "Candidate {} was elected. {} should have had priority.".format(
- newLeader, oldCandidates[ 2 ] ) )
+ elif len( oldLeaders[0] ) >= 3:
+ if newLeader == oldLeaders[ 0 ][ 2 ]:
+ # correct leader was elected
+ correctCandidateResult = main.TRUE
+ else:
+ correctCandidateResult = main.FALSE
+ main.log.error( "Candidate {} was elected. {} should have had priority.".format(
+ newLeader, oldLeaders[ 0 ][ 2 ] ) )
else:
main.log.warn( "Could not determine who should be the correct leader" )
+ main.log.debug( oldLeaders[ 0 ] )
correctCandidateResult = main.FALSE
utilities.assert_equals(
expect=main.TRUE,
@@ -3020,55 +3048,23 @@
actual=runResult,
onpass="App re-ran for election",
onfail="App failed to run for election" )
+
main.step(
"Check that oldLeader is a candidate, and leader if only 1 node" )
# verify leader didn't just change
- positionResult = main.TRUE
- # Get new leaders and candidates, wait if oldLeader is not a candidate yet
-
- # Reset and reuse the new candidate and leaders lists
- newAllCandidates = []
- newCandidates = []
- newLeaders = []
- for i in main.activeNodes:
- cli = main.CLIs[i]
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- if oldLeader not in node: # election might no have finished yet
- main.log.info( "Old Leader not elected, waiting 5 seconds to " +
- "be sure elections are complete" )
- time.sleep(5)
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- if oldLeader not in node: # election still isn't done, errors
- main.log.error(
- "Old leader was not elected on at least one node" )
- positionResult = main.FALSE
- newAllCandidates.append( node )
- newLeaders.append( node[ 0 ] )
- newCandidates = newAllCandidates[ 0 ]
-
- # Check that each node has the same leader. Defines newLeader
- if len( set( newLeaders ) ) != 1:
- positionResult = main.FALSE
- main.log.error( "Nodes have different leaders: " +
- str( newLeaders ) )
- newLeader = None
- else:
- newLeader = newLeaders[ 0 ]
-
- # Check that each node's candidate list is the same
- for candidates in newAllCandidates:
- if set( candidates ) != set( newCandidates ):
- newLeaderResult = main.FALSE
- main.log.error( "Discrepancy in candidate lists detected" )
+ # Get new leaders and candidates
+ reRunLeaders = []
+ time.sleep( 5 ) # Paremterize
+ positionResult, reRunLeaders = consistentLeaderboards( activeCLIs )
# Check that the re-elected node is last on the candidate List
- if oldLeader != newCandidates[ -1 ]:
- main.log.error( "Old Leader (" + str( oldLeader ) + ") not in the proper position " +
- str( newCandidates ) )
+ if oldLeader != reRunLeaders[ 0 ][ -1 ]:
+ main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
+ str( reRunLeaders[ 0 ] ) ) )
positionResult = main.FALSE
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=positionResult,
onpass="Old leader successfully re-ran for election",
onfail="Something went wrong with Leadership election after " +
@@ -3087,15 +3083,11 @@
# Variables for the distributed primitives tests
global pCounterName
- global iCounterName
global pCounterValue
- global iCounterValue
global onosSet
global onosSetName
pCounterName = "TestON-Partitions"
- iCounterName = "TestON-inMemory"
pCounterValue = 0
- iCounterValue = 0
onosSet = set([])
onosSetName = "TestON-set"
@@ -3122,7 +3114,6 @@
assert main.CLIs, "main.CLIs not defined"
assert main.nodes, "main.nodes not defined"
assert pCounterName, "pCounterName not defined"
- assert iCounterName, "iCounterName not defined"
assert onosSetName, "onosSetName not defined"
# NOTE: assert fails if value is 0/None/Empty/False
try:
@@ -3131,11 +3122,6 @@
main.log.error( "pCounterValue not defined, setting to 0" )
pCounterValue = 0
try:
- iCounterValue
- except NameError:
- main.log.error( "iCounterValue not defined, setting to 0" )
- iCounterValue = 0
- try:
onosSet
except NameError:
main.log.error( "onosSet not defined, setting to empty Set" )
@@ -3319,193 +3305,6 @@
onpass="Added counters are correct",
onfail="Added counters are incorrect" )
- # In-Memory counters
- main.step( "Increment and get an in-memory counter on each node" )
- iCounters = []
- addedIValues = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
- name="icounterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "inMemory": True } )
- iCounterValue += 1
- addedIValues.append( iCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in the in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=iCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Get then Increment a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
- name="counterGetAndAdd-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "inMemory": True } )
- addedIValues.append( iCounterValue )
- iCounterValue += 1
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=iCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Counters we added have the correct values" )
- incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
- utilities.assert_equals( expect=main.TRUE,
- actual=incrementCheck,
- onpass="Added counters are correct",
- onfail="Added counters are incorrect" )
-
- main.step( "Add -8 to then get a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
- name="counterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "delta": -8, "inMemory": True } )
- iCounterValue += -8
- addedIValues.append( iCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=pCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Add 5 to then get a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
- name="counterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "delta": 5, "inMemory": True } )
- iCounterValue += 5
- addedIValues.append( iCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=pCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Get then add 5 to a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
- name="counterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "delta": 5, "inMemory": True } )
- addedIValues.append( iCounterValue )
- iCounterValue += 5
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=iCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Counters we added have the correct values" )
- incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
- utilities.assert_equals( expect=main.TRUE,
- actual=incrementCheck,
- onpass="Added counters are correct",
- onfail="Added counters are incorrect" )
-
- main.step( "Check counters are consistant across nodes" )
- onosCounters, consistentCounterResults = main.Counters.consistentCheck()
- utilities.assert_equals( expect=main.TRUE,
- actual=consistentCounterResults,
- onpass="ONOS counters are consistent " +
- "across nodes",
- onfail="ONOS Counters are inconsistent " +
- "across nodes" )
-
- main.step( "Counters we added have the correct values" )
- incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
- incrementCheck = incrementCheck and \
- main.Counters.counterCheck( iCounterName, iCounterValue )
- utilities.assert_equals( expect=main.TRUE,
- actual=incrementCheck,
- onpass="Added counters are correct",
- onfail="Added counters are incorrect" )
# DISTRIBUTED SETS
main.step( "Distributed Set get" )
size = len( onosSet )
@@ -4455,50 +4254,3 @@
actual=getCheck,
onpass="Partitioned Transactional Map get values were correct",
onfail="Partitioned Transactional Map values incorrect" )
-
- main.step( "In-memory Transactional maps put" )
- tMapValue = "Testing"
- numKeys = 100
- putResult = True
- node = main.activeNodes[0]
- putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
- if len( putResponses ) == 100:
- for i in putResponses:
- if putResponses[ i ][ 'value' ] != tMapValue:
- putResult = False
- else:
- putResult = False
- if not putResult:
- main.log.debug( "Put response values: " + str( putResponses ) )
- utilities.assert_equals( expect=True,
- actual=putResult,
- onpass="In-Memory Transactional Map put successful",
- onfail="In-Memory Transactional Map put values are incorrect" )
-
- main.step( "In-Memory Transactional maps get" )
- getCheck = True
- for n in range( 1, numKeys + 1 ):
- getResponses = []
- threads = []
- valueCheck = True
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[i].transactionalMapGet,
- name="TMap-get-" + str( i ),
- args=[ "Key" + str( n ) ],
- kwargs={ "inMemory": True } )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- getResponses.append( t.result )
- for node in getResponses:
- if node != tMapValue:
- valueCheck = False
- if not valueCheck:
- main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
- main.log.warn( getResponses )
- getCheck = getCheck and valueCheck
- utilities.assert_equals( expect=True,
- actual=getCheck,
- onpass="In-Memory Transactional Map get values were correct",
- onfail="In-Memory Transactional Map values incorrect" )
diff --git a/TestON/tests/HAfullNetPartition/dependencies/Counters.py b/TestON/tests/HAfullNetPartition/dependencies/Counters.py
index 265ba13..192b919 100644
--- a/TestON/tests/HAfullNetPartition/dependencies/Counters.py
+++ b/TestON/tests/HAfullNetPartition/dependencies/Counters.py
@@ -1,112 +1,105 @@
-def __init__( self ):
- self.default = ''
+import json
-def consistentCheck():
- """
- Checks that TestON counters are consistent across all nodes.
+class Counters():
- Returns the tuple (onosCounters, consistent)
- - onosCounters is the parsed json output of the counters command on all nodes
- - consistent is main.TRUE if all "TestON" counters are consitent across all
- nodes or main.FALSE
- """
- import json
- try:
- correctResults = main.TRUE
- # Get onos counters results
- onosCountersRaw = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=utilities.retry,
- name="counters-" + str( i ),
- args=[ main.CLIs[i].counters, [ None ] ],
- kwargs= { 'sleep': 5, 'attempts': 5,
- 'randomTime': True } )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- onosCountersRaw.append( t.result )
- onosCounters = []
- for i in range( len( main.activeNodes ) ):
- try:
- onosCounters.append( json.loads( onosCountersRaw[i] ) )
- except ( ValueError, TypeError ):
- main.log.error( "Could not parse counters response from ONOS" +
- str( main.activeNodes[i] + 1 ) )
- main.log.warn( repr( onosCountersRaw[ i ] ) )
- onosCounters.append( [] )
- #return ( onosCounters, main.FALSE )
+ def __init__( self ):
+ self.default = ''
- testCounters = {}
- # make a list of all the "TestON-*" counters in ONOS
- # lookes like a dict whose keys are the name of the ONOS node and values
- # are a list of the counters. I.E.
- # { "ONOS1": [ {"name":"TestON-inMemory","value":56},
- # {"name":"TestON-Partitions","value":56} ]
- # }
- # NOTE: There is an assumtion that all nodes are active
- # based on the above for loops
- for controller in enumerate( onosCounters ):
- for dbType in controller[1]:
- for dbName, items in dbType.iteritems():
- for item in items:
- if 'TestON' in item['name']:
- node = 'ONOS' + str( main.activeNodes[ controller[0] ] + 1 )
- try:
- testCounters[node].append( item )
- except KeyError:
- testCounters[node] = [ item ]
- # compare the counters on each node
- firstV = testCounters.values()[0]
- tmp = [ v == firstV for k, v in testCounters.iteritems() ]
- if all( tmp ):
- consistent = main.TRUE
- else:
- consistent = main.FALSE
- main.log.error( "ONOS nodes have different values for counters:\n" +
- testCounters )
- return ( onosCounters, consistent )
- except Exception:
- main.log.exception( "" )
- main.cleanup()
- main.exit()
+ def consistentCheck( self ):
+ """
+ Checks that TestON counters are consistent across all nodes.
-def counterCheck( counterName, counterValue ):
- """
- Checks that TestON counters are consistent across all nodes and that
- specified counter is in ONOS with the given value
- """
- import json
- try:
- correctResults = main.TRUE
- # Get onos counters results and consistentCheck
- onosCounters, consistent = main.Counters.consistentCheck()
- # Check for correct values
- for i in range( len( main.activeNodes ) ):
- current = onosCounters[i]
- onosValue = None
- try:
- for database in current:
- database = database.values()[0]
- for counter in database:
- if counter.get( 'name' ) == counterName:
- onosValue = counter.get( 'value' )
- break
- except AttributeError, e:
- node = str( main.activeNodes[i] + 1 )
- main.log.error( "ONOS" + node + " counters result " +
- "is not as expected" )
- correctResults = main.FALSE
- if onosValue == counterValue:
- main.log.info( counterName + " counter value is correct" )
+ Returns the tuple (onosCounters, consistent)
+ - onosCounters is the parsed json output of the counters command on all nodes
+ - consistent is main.TRUE if all "TestON" counters are consitent across all
+ nodes or main.FALSE
+ """
+ try:
+ correctResults = main.TRUE
+ # Get onos counters results
+ onosCountersRaw = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=utilities.retry,
+ name="counters-" + str( i ),
+ args=[ main.CLIs[i].counters, [ None ] ],
+ kwargs= { 'sleep': 5, 'attempts': 5,
+ 'randomTime': True } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ onosCountersRaw.append( t.result )
+ onosCounters = []
+ for i in range( len( main.activeNodes ) ):
+ try:
+ onosCounters.append( json.loads( onosCountersRaw[i] ) )
+ except ( ValueError, TypeError ):
+ main.log.error( "Could not parse counters response from ONOS" +
+ str( main.activeNodes[i] + 1 ) )
+ main.log.warn( repr( onosCountersRaw[ i ] ) )
+ onosCounters.append( [] )
+
+ testCounters = {}
+ # make a list of all the "TestON-*" counters in ONOS
+ # lookes like a dict whose keys are the name of the ONOS node and values
+ # are a list of the counters. I.E.
+ # { "ONOS1": [ { "name":"TestON-Partitions","value":56} ]
+ # }
+ # NOTE: There is an assumtion that all nodes are active
+ # based on the above for loops
+ for controller in enumerate( onosCounters ):
+ for key, value in controller[1].iteritems():
+ if 'TestON' in key:
+ node = 'ONOS' + str( controller[0] + 1 )
+ try:
+ testCounters[node].append( { key: value } )
+ except KeyError:
+ testCounters[node] = [ { key: value } ]
+ # compare the counters on each node
+ firstV = testCounters.values()[0]
+ tmp = [ v == firstV for k, v in testCounters.iteritems() ]
+ if all( tmp ):
+ consistent = main.TRUE
else:
- main.log.error( counterName + " counter value is incorrect," +
- " expected value: " + str( counterValue )
- + " current value: " + str( onosValue ) )
- correctResults = main.FALSE
- return consistent and correctResults
- except Exception:
- main.log.exception( "" )
- main.cleanup()
- main.exit()
+ consistent = main.FALSE
+ main.log.error( "ONOS nodes have different values for counters:\n" +
+ testCounters )
+ return ( onosCounters, consistent )
+ except Exception:
+ main.log.exception( "" )
+ main.cleanup()
+ main.exit()
+
+ def counterCheck( self, counterName, counterValue ):
+ """
+ Checks that TestON counters are consistent across all nodes and that
+ specified counter is in ONOS with the given value
+ """
+ try:
+ correctResults = main.TRUE
+ # Get onos counters results and consistentCheck
+ onosCounters, consistent = self.consistentCheck()
+ # Check for correct values
+ for i in range( len( main.activeNodes ) ):
+ current = onosCounters[i]
+ onosValue = None
+ try:
+ onosValue = current.get( counterName )
+ except AttributeError, e:
+ node = str( main.activeNodes[i] + 1 )
+ main.log.error( "ONOS" + node + " counters result " +
+ "is not as expected" )
+ correctResults = main.FALSE
+ if onosValue == counterValue:
+ main.log.info( counterName + " counter value is correct" )
+ else:
+ main.log.error( counterName + " counter value is incorrect," +
+ " expected value: " + str( counterValue )
+ + " current value: " + str( onosValue ) )
+ correctResults = main.FALSE
+ return consistent and correctResults
+ except Exception:
+ main.log.exception( "" )
+ main.cleanup()
+ main.exit()
diff --git a/TestON/tests/HAkillNodes/HAkillNodes.params b/TestON/tests/HAkillNodes/HAkillNodes.params
index 7d9c090..8b68d54 100644
--- a/TestON/tests/HAkillNodes/HAkillNodes.params
+++ b/TestON/tests/HAkillNodes/HAkillNodes.params
@@ -20,9 +20,7 @@
#CASE16: Install Distributed Primitives app
#CASE17: Check for basic functionality with distributed primitives
<testcases>1,2,8,3,4,5,14,16,17,[61,8,7,4,15,17,62],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
- <imports>
- <path> /home/admin/OnosSystemTest/TestON/tests/HAkillNodes/dependencies/ </path>
- </imports>
+
<ENV>
<cellName>HA</cellName>
<appString>drivers,openflow,proxyarp,mobility</appString>
diff --git a/TestON/tests/HAkillNodes/HAkillNodes.py b/TestON/tests/HAkillNodes/HAkillNodes.py
index 7654097..d38ac36 100644
--- a/TestON/tests/HAkillNodes/HAkillNodes.py
+++ b/TestON/tests/HAkillNodes/HAkillNodes.py
@@ -51,6 +51,7 @@
import imp
import pexpect
import time
+ import json
main.log.info( "ONOS HA test: Restart a minority of ONOS nodes - " +
"initialization" )
main.case( "Setting up test environment" )
@@ -77,6 +78,11 @@
global ONOS5Port
global ONOS6Port
global ONOS7Port
+ # These are for csv plotting in jenkins
+ global labels
+ global data
+ labels = []
+ data = []
# FIXME: just get controller port from params?
# TODO: do we really need all these?
@@ -89,11 +95,8 @@
ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
try:
- fileName = "Counters"
- # TODO: Maybe make a library folder somewhere?
- path = main.params[ 'imports' ][ 'path' ]
- main.Counters = imp.load_source( fileName,
- path + fileName + ".py" )
+ from tests.HAsanity.dependencies.Counters import Counters
+ main.Counters = Counters()
except Exception as e:
main.log.exception( e )
main.cleanup()
@@ -280,7 +283,6 @@
port=main.params[ 'MNtcpdump' ][ 'port' ] )
main.step( "App Ids check" )
- time.sleep(60)
appCheck = main.TRUE
threads = []
for i in main.activeNodes:
@@ -305,6 +307,48 @@
handle.sendline( "git checkout -- tools/package/init/onos.conf" )
handle.expect( "\$" )
+ main.step( "Checking ONOS nodes" )
+ nodesOutput = []
+ nodeResults = main.TRUE
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].nodes,
+ name="nodes-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ nodesOutput.append( t.result )
+ ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
+ ips.sort()
+ for i in nodesOutput:
+ try:
+ current = json.loads( i )
+ activeIps = []
+ currentResult = main.FALSE
+ for node in current:
+ if node['state'] == 'READY':
+ activeIps.append( node['ip'] )
+ activeIps.sort()
+ if ips == activeIps:
+ currentResult = main.TRUE
+ except ( ValueError, TypeError ):
+ main.log.error( "Error parsing nodes output" )
+ main.log.warn( repr( i ) )
+ currentResult = main.FALSE
+ nodeResults = nodeResults and currentResult
+ utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
+ onpass="Nodes check successful",
+ onfail="Nodes check NOT successful" )
+
+ if not nodeResults:
+ for cli in main.CLIs:
+ main.log.debug( "{} components not ACTIVE: \n{}".format(
+ cli.name,
+ cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
+
if cliResults == main.FALSE:
main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
@@ -496,6 +540,16 @@
assert utilities.assert_equals, "utilities.assert_equals not defined"
assert main.CLIs, "main.CLIs not defined"
assert main.nodes, "main.nodes not defined"
+ try:
+ labels
+ except NameError:
+ main.log.error( "labels not defined, setting to []" )
+ labels = []
+ try:
+ data
+ except NameError:
+ main.log.error( "data not defined, setting to []" )
+ data = []
main.case( "Adding host Intents" )
main.caseExplanation = "Discover hosts by using pingall then " +\
"assign predetermined host-to-host intents." +\
@@ -1320,20 +1374,26 @@
main.log.warn( title )
# get all intent keys in the cluster
keys = []
- for nodeStr in ONOSIntents:
- node = json.loads( nodeStr )
- for intent in node:
- keys.append( intent.get( 'id' ) )
- keys = set( keys )
- for key in keys:
- row = "%-13s" % key
+ try:
+ # Get the set of all intent keys
for nodeStr in ONOSIntents:
node = json.loads( nodeStr )
for intent in node:
- if intent.get( 'id', "Error" ) == key:
- row += "%-15s" % intent.get( 'state' )
- main.log.warn( row )
- # End table view
+ keys.append( intent.get( 'id' ) )
+ keys = set( keys )
+ # For each intent key, print the state on each node
+ for key in keys:
+ row = "%-13s" % key
+ for nodeStr in ONOSIntents:
+ node = json.loads( nodeStr )
+ for intent in node:
+ if intent.get( 'id', "Error" ) == key:
+ row += "%-15s" % intent.get( 'state' )
+ main.log.warn( row )
+ # End of intent state table
+ except ValueError as e:
+ main.log.exception( e )
+ main.log.debug( "nodeStr was: " + repr( nodeStr ) )
if intentsResults and not consistentIntents:
# print the json objects
@@ -1815,6 +1875,15 @@
main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
+ main.step( "Rerun for election on the node(s) that were killed" )
+ runResults = main.TRUE
+ for i in main.kill:
+ runResults = runResults and\
+ main.CLIs[i].electionTestRun()
+ utilities.assert_equals( expect=main.TRUE, actual=runResults,
+ onpass="ONOS nodes reran for election topic",
+ onfail="Errror rerunning for election" )
+
def CASE7( self, main ):
"""
Check state after ONOS failure
@@ -1854,6 +1923,7 @@
main.step( "Read device roles from ONOS" )
ONOSMastership = []
+ mastershipCheck = main.FALSE
consistentMastership = True
rolesResults = True
threads = []
@@ -1901,6 +1971,8 @@
sort_keys=True,
indent=4,
separators=( ',', ': ' ) ) )
+ elif rolesResults and consistentMastership:
+ mastershipCheck = main.TRUE
# NOTE: we expect mastership to change on controller failure
@@ -2006,45 +2078,50 @@
# NOTE: this requires case 5 to pass for intentState to be set.
# maybe we should stop the test if that fails?
sameIntents = main.FALSE
- if intentState and intentState == ONOSIntents[ 0 ]:
- sameIntents = main.TRUE
- main.log.info( "Intents are consistent with before failure" )
- # TODO: possibly the states have changed? we may need to figure out
- # what the acceptable states are
- elif len( intentState ) == len( ONOSIntents[ 0 ] ):
- sameIntents = main.TRUE
- try:
- before = json.loads( intentState )
- after = json.loads( ONOSIntents[ 0 ] )
- for intent in before:
- if intent not in after:
- sameIntents = main.FALSE
- main.log.debug( "Intent is not currently in ONOS " +
- "(at least in the same form):" )
- main.log.debug( json.dumps( intent ) )
- except ( ValueError, TypeError ):
- main.log.exception( "Exception printing intents" )
- main.log.debug( repr( ONOSIntents[0] ) )
- main.log.debug( repr( intentState ) )
- if sameIntents == main.FALSE:
- try:
- main.log.debug( "ONOS intents before: " )
- main.log.debug( json.dumps( json.loads( intentState ),
- sort_keys=True, indent=4,
- separators=( ',', ': ' ) ) )
- main.log.debug( "Current ONOS intents: " )
- main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
- sort_keys=True, indent=4,
- separators=( ',', ': ' ) ) )
- except ( ValueError, TypeError ):
- main.log.exception( "Exception printing intents" )
- main.log.debug( repr( ONOSIntents[0] ) )
- main.log.debug( repr( intentState ) )
- utilities.assert_equals(
- expect=main.TRUE,
- actual=sameIntents,
- onpass="Intents are consistent with before failure",
- onfail="The Intents changed during failure" )
+ try:
+ intentState
+ except NameError:
+ main.log.warn( "No previous intent state was saved" )
+ else:
+ if intentState and intentState == ONOSIntents[ 0 ]:
+ sameIntents = main.TRUE
+ main.log.info( "Intents are consistent with before failure" )
+ # TODO: possibly the states have changed? we may need to figure out
+ # what the acceptable states are
+ elif len( intentState ) == len( ONOSIntents[ 0 ] ):
+ sameIntents = main.TRUE
+ try:
+ before = json.loads( intentState )
+ after = json.loads( ONOSIntents[ 0 ] )
+ for intent in before:
+ if intent not in after:
+ sameIntents = main.FALSE
+ main.log.debug( "Intent is not currently in ONOS " +
+ "(at least in the same form):" )
+ main.log.debug( json.dumps( intent ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Exception printing intents" )
+ main.log.debug( repr( ONOSIntents[0] ) )
+ main.log.debug( repr( intentState ) )
+ if sameIntents == main.FALSE:
+ try:
+ main.log.debug( "ONOS intents before: " )
+ main.log.debug( json.dumps( json.loads( intentState ),
+ sort_keys=True, indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.debug( "Current ONOS intents: " )
+ main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
+ sort_keys=True, indent=4,
+ separators=( ',', ': ' ) ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Exception printing intents" )
+ main.log.debug( repr( ONOSIntents[0] ) )
+ main.log.debug( repr( intentState ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=sameIntents,
+ onpass="Intents are consistent with before failure",
+ onfail="The Intents changed during failure" )
intentCheck = intentCheck and sameIntents
main.step( "Get the OF Table entries and compare to before " +
@@ -2056,7 +2133,6 @@
FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
if FlowTables == main.FALSE:
main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
-
utilities.assert_equals(
expect=main.TRUE,
actual=FlowTables,
@@ -2401,8 +2477,9 @@
hostsResults = hostsResults and currentHostsResult
hostAttachmentResults = hostAttachmentResults and\
hostAttachment
- topoResult = devicesResults and linksResults and\
- hostsResults and hostAttachmentResults
+ topoResult = ( devicesResults and linksResults
+ and hostsResults and ipResult and
+ hostAttachmentResults )
utilities.assert_equals( expect=True,
actual=topoResult,
onpass="ONOS topology matches Mininet",
@@ -2466,7 +2543,6 @@
controllerStr +
" is inconsistent with ONOS1" )
consistentClustersResult = main.FALSE
-
else:
main.log.error( "Error in getting dataplane clusters " +
"from ONOS" + controllerStr )
@@ -2487,6 +2563,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing clusters[0]: " +
repr( clusters[0] ) )
+ numClusters = "ERROR"
clusterResults = main.FALSE
if numClusters == 1:
clusterResults = main.TRUE
@@ -2524,6 +2601,13 @@
onpass="Link are correct",
onfail="Links are incorrect" )
+ main.step( "Hosts are correct" )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=hostsResults,
+ onpass="Hosts are correct",
+ onfail="Hosts are incorrect" )
+
# FIXME: move this to an ONOS state case
main.step( "Checking ONOS nodes" )
nodesOutput = []
@@ -2560,6 +2644,11 @@
utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
onpass="Nodes check successful",
onfail="Nodes check NOT successful" )
+ if not nodeResults:
+ for cli in main.CLIs:
+ main.log.debug( "{} components not ACTIVE: \n{}".format(
+ cli.name,
+ cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
def CASE9( self, main ):
"""
@@ -2849,16 +2938,10 @@
description = "Check that Leadership Election is still functional"
main.case( description )
- # NOTE: Need to re-run since being a canidate is not persistant
- # TODO: add check for "Command not found:" in the driver, this
- # means the election test app isn't loaded
+ # NOTE: Need to re-run after restarts since being a canidate is not persistant
- oldLeaders = [] # leaders by node before withdrawl from candidates
- newLeaders = [] # leaders by node after withdrawl from candidates
- oldAllCandidates = [] # list of lists of each nodes' candidates before
- newAllCandidates = [] # list of lists of each nodes' candidates after
- oldCandidates = [] # list of candidates from node 0 before withdrawl
- newCandidates = [] # list of candidates from node 0 after withdrawl
+ oldLeaders = [] # list of lists of each nodes' candidates before
+ newLeaders = [] # list of lists of each nodes' candidates after
oldLeader = '' # the old leader from oldLeaders, None if not same
newLeader = '' # the new leaders fron newLoeaders, None if not same
oldLeaderCLI = None # the CLI of the old leader used for re-electing
@@ -2884,43 +2967,36 @@
main.skipCase()
main.step( "Check that each node shows the same leader and candidates" )
- sameResult = main.TRUE
- failMessage = "Nodes have different leaders"
- for i in main.activeNodes:
- cli = main.CLIs[i]
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- oldAllCandidates.append( node )
- if node:
- oldLeaders.append( node[ 0 ] )
- else:
- oldLeaders.append( None )
- oldCandidates = oldAllCandidates[ 0 ]
- if oldCandidates is None:
- oldCandidates = [ None ]
-
- # Check that each node has the same leader. Defines oldLeader
- if len( set( oldLeaders ) ) != 1:
- sameResult = main.FALSE
- main.log.error( "More than one leader present:" + str( oldLeaders ) )
- oldLeader = None
+ failMessage = "Nodes have different leaderboards"
+ def consistentLeaderboards( nodes ):
+ TOPIC = 'org.onosproject.election'
+ # FIXME: use threads
+ #FIXME: should we retry outside the function?
+ for n in range( 5 ): # Retry in case election is still happening
+ leaderList = []
+ # Get all leaderboards
+ for cli in nodes:
+ leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
+ # Compare leaderboards
+ result = all( i == leaderList[0] for i in leaderList ) and\
+ leaderList is not None
+ main.log.debug( leaderList )
+ main.log.warn( result )
+ if result:
+ return ( result, leaderList )
+ time.sleep(5) #TODO: paramerterize
+ main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
+ activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+ sameResult, oldLeaders = consistentLeaderboards( activeCLIs )
+ if sameResult:
+ oldLeader = oldLeaders[ 0 ][ 0 ]
+ main.log.warn( oldLeader )
else:
- oldLeader = oldLeaders[ 0 ]
-
- # Check that each node's candidate list is the same
- candidateDiscrepancy = False # Boolean of candidate mismatches
- for candidates in oldAllCandidates:
- if candidates is None:
- main.log.warn( "Error getting candidates" )
- candidates = [ None ]
- if set( candidates ) != set( oldCandidates ):
- sameResult = main.FALSE
- candidateDiscrepancy = True
- if candidateDiscrepancy:
- failMessage += " and candidates"
+ oldLeader = None
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=sameResult,
- onpass="Leadership is consistent for the election topic",
+ onpass="Leaderboards are consistent for the election topic",
onfail=failMessage )
main.step( "Find current leader and withdraw" )
@@ -2945,56 +3021,30 @@
onfail="Node was not withdrawn from election" )
main.step( "Check that a new node was elected leader" )
- # FIXME: use threads
- newLeaderResult = main.TRUE
failMessage = "Nodes have different leaders"
-
# Get new leaders and candidates
- for i in main.activeNodes:
- cli = main.CLIs[i]
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- # elections might no have finished yet
- if node[ 0 ] == 'none' and not expectNoLeader:
- main.log.info( "Node has no leader, waiting 5 seconds to be " +
- "sure elections are complete." )
- time.sleep(5)
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- # election still isn't done or there is a problem
- if node[ 0 ] == 'none':
- main.log.error( "No leader was elected on at least 1 node" )
- newLeaderResult = main.FALSE
- newAllCandidates.append( node )
- newLeaders.append( node[ 0 ] )
- newCandidates = newAllCandidates[ 0 ]
-
- # Check that each node has the same leader. Defines newLeader
- if len( set( newLeaders ) ) != 1:
- newLeaderResult = main.FALSE
- main.log.error( "Nodes have different leaders: " +
- str( newLeaders ) )
- newLeader = None
+ newLeaderResult, newLeaders = consistentLeaderboards( activeCLIs )
+ if newLeaders[ 0 ][ 0 ] == 'none':
+ main.log.error( "No leader was elected on at least 1 node" )
+ if not expectNoLeader:
+ newLeaderResult = False
+ if newLeaderResult:
+ newLeader = newLeaders[ 0 ][ 0 ]
else:
- newLeader = newLeaders[ 0 ]
-
- # Check that each node's candidate list is the same
- for candidates in newAllCandidates:
- if set( candidates ) != set( newCandidates ):
- newLeaderResult = main.FALSE
- main.log.error( "Discrepancy in candidate lists detected" )
+ newLeader = None
# Check that the new leader is not the older leader, which was withdrawn
if newLeader == oldLeader:
- newLeaderResult = main.FALSE
+ newLeaderResult = False
main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
" as the current leader" )
-
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=newLeaderResult,
onpass="Leadership election passed",
onfail="Something went wrong with Leadership election" )
- main.step( "Check that that new leader was the candidate of old leader")
+ main.step( "Check that that new leader was the candidate of old leader" )
# candidates[ 2 ] should become the top candidate after withdrawl
correctCandidateResult = main.TRUE
if expectNoLeader:
@@ -3004,12 +3054,17 @@
else:
main.log.info( "Expected no leader, got: " + str( newLeader ) )
correctCandidateResult = main.FALSE
- elif len( oldCandidates ) >= 3 and newLeader != oldCandidates[ 2 ]:
- correctCandidateResult = main.FALSE
- main.log.error( "Candidate {} was elected. {} should have had priority.".format(
- newLeader, oldCandidates[ 2 ] ) )
+ elif len( oldLeaders[0] ) >= 3:
+ if newLeader == oldLeaders[ 0 ][ 2 ]:
+ # correct leader was elected
+ correctCandidateResult = main.TRUE
+ else:
+ correctCandidateResult = main.FALSE
+ main.log.error( "Candidate {} was elected. {} should have had priority.".format(
+ newLeader, oldLeaders[ 0 ][ 2 ] ) )
else:
main.log.warn( "Could not determine who should be the correct leader" )
+ main.log.debug( oldLeaders[ 0 ] )
correctCandidateResult = main.FALSE
utilities.assert_equals(
expect=main.TRUE,
@@ -3029,55 +3084,23 @@
actual=runResult,
onpass="App re-ran for election",
onfail="App failed to run for election" )
+
main.step(
"Check that oldLeader is a candidate, and leader if only 1 node" )
# verify leader didn't just change
- positionResult = main.TRUE
- # Get new leaders and candidates, wait if oldLeader is not a candidate yet
-
- # Reset and reuse the new candidate and leaders lists
- newAllCandidates = []
- newCandidates = []
- newLeaders = []
- for i in main.activeNodes:
- cli = main.CLIs[i]
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- if oldLeader not in node: # election might no have finished yet
- main.log.info( "Old Leader not elected, waiting 5 seconds to " +
- "be sure elections are complete" )
- time.sleep(5)
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- if oldLeader not in node: # election still isn't done, errors
- main.log.error(
- "Old leader was not elected on at least one node" )
- positionResult = main.FALSE
- newAllCandidates.append( node )
- newLeaders.append( node[ 0 ] )
- newCandidates = newAllCandidates[ 0 ]
-
- # Check that each node has the same leader. Defines newLeader
- if len( set( newLeaders ) ) != 1:
- positionResult = main.FALSE
- main.log.error( "Nodes have different leaders: " +
- str( newLeaders ) )
- newLeader = None
- else:
- newLeader = newLeaders[ 0 ]
-
- # Check that each node's candidate list is the same
- for candidates in newAllCandidates:
- if set( candidates ) != set( newCandidates ):
- newLeaderResult = main.FALSE
- main.log.error( "Discrepancy in candidate lists detected" )
+ # Get new leaders and candidates
+ reRunLeaders = []
+ time.sleep( 5 ) # Paremterize
+ positionResult, reRunLeaders = consistentLeaderboards( activeCLIs )
# Check that the re-elected node is last on the candidate List
- if oldLeader != newCandidates[ -1 ]:
- main.log.error( "Old Leader (" + str( oldLeader ) + ") not in the proper position " +
- str( newCandidates ) )
+ if oldLeader != reRunLeaders[ 0 ][ -1 ]:
+ main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
+ str( reRunLeaders[ 0 ] ) ) )
positionResult = main.FALSE
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=positionResult,
onpass="Old leader successfully re-ran for election",
onfail="Something went wrong with Leadership election after " +
@@ -3096,15 +3119,11 @@
# Variables for the distributed primitives tests
global pCounterName
- global iCounterName
global pCounterValue
- global iCounterValue
global onosSet
global onosSetName
pCounterName = "TestON-Partitions"
- iCounterName = "TestON-inMemory"
pCounterValue = 0
- iCounterValue = 0
onosSet = set([])
onosSetName = "TestON-set"
@@ -3131,7 +3150,6 @@
assert main.CLIs, "main.CLIs not defined"
assert main.nodes, "main.nodes not defined"
assert pCounterName, "pCounterName not defined"
- assert iCounterName, "iCounterName not defined"
assert onosSetName, "onosSetName not defined"
# NOTE: assert fails if value is 0/None/Empty/False
try:
@@ -3140,11 +3158,6 @@
main.log.error( "pCounterValue not defined, setting to 0" )
pCounterValue = 0
try:
- iCounterValue
- except NameError:
- main.log.error( "iCounterValue not defined, setting to 0" )
- iCounterValue = 0
- try:
onosSet
except NameError:
main.log.error( "onosSet not defined, setting to empty Set" )
@@ -3328,193 +3341,6 @@
onpass="Added counters are correct",
onfail="Added counters are incorrect" )
- # In-Memory counters
- main.step( "Increment and get an in-memory counter on each node" )
- iCounters = []
- addedIValues = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
- name="icounterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "inMemory": True } )
- iCounterValue += 1
- addedIValues.append( iCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in the in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=iCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Get then Increment a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
- name="counterGetAndAdd-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "inMemory": True } )
- addedIValues.append( iCounterValue )
- iCounterValue += 1
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=iCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Counters we added have the correct values" )
- incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
- utilities.assert_equals( expect=main.TRUE,
- actual=incrementCheck,
- onpass="Added counters are correct",
- onfail="Added counters are incorrect" )
-
- main.step( "Add -8 to then get a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
- name="counterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "delta": -8, "inMemory": True } )
- iCounterValue += -8
- addedIValues.append( iCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=pCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Add 5 to then get a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
- name="counterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "delta": 5, "inMemory": True } )
- iCounterValue += 5
- addedIValues.append( iCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=pCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Get then add 5 to a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
- name="counterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "delta": 5, "inMemory": True } )
- addedIValues.append( iCounterValue )
- iCounterValue += 5
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=iCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Counters we added have the correct values" )
- incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
- utilities.assert_equals( expect=main.TRUE,
- actual=incrementCheck,
- onpass="Added counters are correct",
- onfail="Added counters are incorrect" )
-
- main.step( "Check counters are consistant across nodes" )
- onosCounters, consistentCounterResults = main.Counters.consistentCheck()
- utilities.assert_equals( expect=main.TRUE,
- actual=consistentCounterResults,
- onpass="ONOS counters are consistent " +
- "across nodes",
- onfail="ONOS Counters are inconsistent " +
- "across nodes" )
-
- main.step( "Counters we added have the correct values" )
- incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
- incrementCheck = incrementCheck and \
- main.Counters.counterCheck( iCounterName, iCounterValue )
- utilities.assert_equals( expect=main.TRUE,
- actual=incrementCheck,
- onpass="Added counters are correct",
- onfail="Added counters are incorrect" )
# DISTRIBUTED SETS
main.step( "Distributed Set get" )
size = len( onosSet )
@@ -4464,50 +4290,3 @@
actual=getCheck,
onpass="Partitioned Transactional Map get values were correct",
onfail="Partitioned Transactional Map values incorrect" )
-
- main.step( "In-memory Transactional maps put" )
- tMapValue = "Testing"
- numKeys = 100
- putResult = True
- node = main.activeNodes[0]
- putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
- if len( putResponses ) == 100:
- for i in putResponses:
- if putResponses[ i ][ 'value' ] != tMapValue:
- putResult = False
- else:
- putResult = False
- if not putResult:
- main.log.debug( "Put response values: " + str( putResponses ) )
- utilities.assert_equals( expect=True,
- actual=putResult,
- onpass="In-Memory Transactional Map put successful",
- onfail="In-Memory Transactional Map put values are incorrect" )
-
- main.step( "In-Memory Transactional maps get" )
- getCheck = True
- for n in range( 1, numKeys + 1 ):
- getResponses = []
- threads = []
- valueCheck = True
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[i].transactionalMapGet,
- name="TMap-get-" + str( i ),
- args=[ "Key" + str( n ) ],
- kwargs={ "inMemory": True } )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- getResponses.append( t.result )
- for node in getResponses:
- if node != tMapValue:
- valueCheck = False
- if not valueCheck:
- main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
- main.log.warn( getResponses )
- getCheck = getCheck and valueCheck
- utilities.assert_equals( expect=True,
- actual=getCheck,
- onpass="In-Memory Transactional Map get values were correct",
- onfail="In-Memory Transactional Map values incorrect" )
diff --git a/TestON/tests/HAkillNodes/dependencies/Counters.py b/TestON/tests/HAkillNodes/dependencies/Counters.py
index f3833eb..192b919 100644
--- a/TestON/tests/HAkillNodes/dependencies/Counters.py
+++ b/TestON/tests/HAkillNodes/dependencies/Counters.py
@@ -1,104 +1,105 @@
-def __init__( self ):
- self.default = ''
+import json
-def consistentCheck():
- """
- Checks that TestON counters are consistent across all nodes.
+class Counters():
- Returns the tuple (onosCounters, consistent)
- - onosCounters is the parsed json output of the counters command on all nodes
- - consistent is main.TRUE if all "TestON" counters are consitent across all
- nodes or main.FALSE
- """
- import json
- try:
- correctResults = main.TRUE
- # Get onos counters results
- onosCountersRaw = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[i].counters,
- name="counters-" + str( i ) )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- onosCountersRaw.append( t.result )
- onosCounters = []
- for i in range( len( main.activeNodes ) ):
- try:
- onosCounters.append( json.loads( onosCountersRaw[i] ) )
- except ( ValueError, TypeError ):
- main.log.error( "Could not parse counters response from ONOS" +
- str( main.activeNodes[i] + 1 ) )
- main.log.warn( repr( onosCountersRaw[ i ] ) )
- onosCounters.append( [] )
- return main.FALSE
+ def __init__( self ):
+ self.default = ''
- testCounters = {}
- # make a list of all the "TestON-*" counters in ONOS
- # lookes like a dict whose keys are the name of the ONOS node and values
- # are a list of the counters. I.E.
- # { "ONOS1": [ {"name":"TestON-inMemory","value":56},
- # {"name":"TestON-Partitions","value":56} ]
- # }
- # NOTE: There is an assumtion that all nodes are active
- # based on the above for loops
- for controller in enumerate( onosCounters ):
- for dbType in controller[1]:
- for dbName, items in dbType.iteritems():
- for item in items:
- if 'TestON' in item['name']:
- node = 'ONOS' + str( main.activeNodes[ controller[0] ] + 1 )
- try:
- testCounters[node].append( item )
- except KeyError:
- testCounters[node] = [ item ]
- # compare the counters on each node
- firstV = testCounters.values()[0]
- tmp = [ v == firstV for k, v in testCounters.iteritems() ]
- if all( tmp ):
- consistent = main.TRUE
- else:
- consistent = main.FALSE
- main.log.error( "ONOS nodes have different values for counters:\n" +
- testCounters )
- return ( onosCounters, consistent )
- except Exception:
- main.log.exception( "" )
- main.cleanup()
- main.exit()
+ def consistentCheck( self ):
+ """
+ Checks that TestON counters are consistent across all nodes.
-def counterCheck( counterName, counterValue ):
- """
- Checks that TestON counters are consistent across all nodes and that
- specified counter is in ONOS with the given value
- """
- import json
- correctResults = main.TRUE
- # Get onos counters results and consistentCheck
- onosCounters, consistent = main.Counters.consistentCheck()
- # Check for correct values
- for i in range( len( main.activeNodes ) ):
- current = onosCounters[i]
- onosValue = None
+ Returns the tuple (onosCounters, consistent)
+ - onosCounters is the parsed json output of the counters command on all nodes
+ - consistent is main.TRUE if all "TestON" counters are consitent across all
+ nodes or main.FALSE
+ """
try:
- for database in current:
- database = database.values()[0]
- for counter in database:
- if counter.get( 'name' ) == counterName:
- onosValue = counter.get( 'value' )
- break
- except AttributeError, e:
- node = str( main.activeNodes[i] + 1 )
- main.log.error( "ONOS" + node + " counters result " +
- "is not as expected" )
- correctResults = main.FALSE
- if onosValue == counterValue:
- main.log.info( counterName + " counter value is correct" )
- else:
- main.log.error( counterName + " counter value is incorrect," +
- " expected value: " + str( counterValue )
- + " current value: " + str( onosValue ) )
- correctResults = main.FALSE
- return consistent and correctResults
+ correctResults = main.TRUE
+ # Get onos counters results
+ onosCountersRaw = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=utilities.retry,
+ name="counters-" + str( i ),
+ args=[ main.CLIs[i].counters, [ None ] ],
+ kwargs= { 'sleep': 5, 'attempts': 5,
+ 'randomTime': True } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ onosCountersRaw.append( t.result )
+ onosCounters = []
+ for i in range( len( main.activeNodes ) ):
+ try:
+ onosCounters.append( json.loads( onosCountersRaw[i] ) )
+ except ( ValueError, TypeError ):
+ main.log.error( "Could not parse counters response from ONOS" +
+ str( main.activeNodes[i] + 1 ) )
+ main.log.warn( repr( onosCountersRaw[ i ] ) )
+ onosCounters.append( [] )
+
+ testCounters = {}
+ # make a list of all the "TestON-*" counters in ONOS
+ # lookes like a dict whose keys are the name of the ONOS node and values
+ # are a list of the counters. I.E.
+ # { "ONOS1": [ { "name":"TestON-Partitions","value":56} ]
+ # }
+ # NOTE: There is an assumtion that all nodes are active
+ # based on the above for loops
+ for controller in enumerate( onosCounters ):
+ for key, value in controller[1].iteritems():
+ if 'TestON' in key:
+ node = 'ONOS' + str( controller[0] + 1 )
+ try:
+ testCounters[node].append( { key: value } )
+ except KeyError:
+ testCounters[node] = [ { key: value } ]
+ # compare the counters on each node
+ firstV = testCounters.values()[0]
+ tmp = [ v == firstV for k, v in testCounters.iteritems() ]
+ if all( tmp ):
+ consistent = main.TRUE
+ else:
+ consistent = main.FALSE
+ main.log.error( "ONOS nodes have different values for counters:\n" +
+ testCounters )
+ return ( onosCounters, consistent )
+ except Exception:
+ main.log.exception( "" )
+ main.cleanup()
+ main.exit()
+
+ def counterCheck( self, counterName, counterValue ):
+ """
+ Checks that TestON counters are consistent across all nodes and that
+ specified counter is in ONOS with the given value
+ """
+ try:
+ correctResults = main.TRUE
+ # Get onos counters results and consistentCheck
+ onosCounters, consistent = self.consistentCheck()
+ # Check for correct values
+ for i in range( len( main.activeNodes ) ):
+ current = onosCounters[i]
+ onosValue = None
+ try:
+ onosValue = current.get( counterName )
+ except AttributeError, e:
+ node = str( main.activeNodes[i] + 1 )
+ main.log.error( "ONOS" + node + " counters result " +
+ "is not as expected" )
+ correctResults = main.FALSE
+ if onosValue == counterValue:
+ main.log.info( counterName + " counter value is correct" )
+ else:
+ main.log.error( counterName + " counter value is incorrect," +
+ " expected value: " + str( counterValue )
+ + " current value: " + str( onosValue ) )
+ correctResults = main.FALSE
+ return consistent and correctResults
+ except Exception:
+ main.log.exception( "" )
+ main.cleanup()
+ main.exit()
diff --git a/TestON/tests/HAsanity/HAsanity.params b/TestON/tests/HAsanity/HAsanity.params
index b8455c0..e1315f2 100644
--- a/TestON/tests/HAsanity/HAsanity.params
+++ b/TestON/tests/HAsanity/HAsanity.params
@@ -19,9 +19,7 @@
#CASE17: Check for basic functionality with distributed primitives
#1,2,8,21,8,3,4,5,14,16,17,[6],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13
<testcases>1,2,8,21,8,3,4,5,14,16,17,[6],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
- <imports>
- <path> /home/admin/OnosSystemTest/TestON/tests/HAsanity/dependencies/ </path>
- </imports>
+
<ENV>
<cellName>HA</cellName>
<appString>drivers,openflow,proxyarp,mobility</appString>
diff --git a/TestON/tests/HAsanity/HAsanity.py b/TestON/tests/HAsanity/HAsanity.py
index 624b720..312da64 100644
--- a/TestON/tests/HAsanity/HAsanity.py
+++ b/TestON/tests/HAsanity/HAsanity.py
@@ -50,12 +50,12 @@
"""
import imp
import time
+ import json
main.log.info( "ONOS HA Sanity test - initialization" )
main.case( "Setting up test environment" )
main.caseExplanation = "Setup the test environment including " +\
"installing ONOS, starting Mininet and ONOS" +\
"cli sessions."
- # TODO: save all the timers and output them for plotting
# load some variables from the params file
PULLCODE = False
@@ -77,6 +77,11 @@
global ONOS5Port
global ONOS6Port
global ONOS7Port
+ # These are for csv plotting in jenkins
+ global labels
+ global data
+ labels = []
+ data = []
# FIXME: just get controller port from params?
# TODO: do we really need all these?
@@ -89,10 +94,8 @@
ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
try:
- fileName = "Counters"
- path = main.params[ 'imports' ][ 'path' ]
- main.Counters = imp.load_source( fileName,
- path + fileName + ".py" )
+ from tests.HAsanity.dependencies.Counters import Counters
+ main.Counters = Counters()
except Exception as e:
main.log.exception( e )
main.cleanup()
@@ -237,6 +240,9 @@
onpass="ONOS cli startup successful",
onfail="ONOS cli startup failed" )
+ # Create a list of active nodes for use when some nodes are stopped
+ main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
+
if main.params[ 'tcpdump' ].lower() == "true":
main.step( "Start Packet Capture MN" )
main.Mininet2.startTcpdump(
@@ -246,10 +252,9 @@
port=main.params[ 'MNtcpdump' ][ 'port' ] )
main.step( "App Ids check" )
- time.sleep(60)
appCheck = main.TRUE
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].appToIDCheck,
name="appToIDCheck-" + str( i ),
args=[] )
@@ -260,12 +265,55 @@
t.join()
appCheck = appCheck and t.result
if appCheck != main.TRUE:
- main.log.warn( main.CLIs[0].apps() )
- main.log.warn( main.CLIs[0].appIDs() )
+ node = main.activeNodes[0]
+ main.log.warn( main.CLIs[node].apps() )
+ main.log.warn( main.CLIs[node].appIDs() )
utilities.assert_equals( expect=main.TRUE, actual=appCheck,
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
+ main.step( "Checking ONOS nodes" )
+ nodesOutput = []
+ nodeResults = main.TRUE
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].nodes,
+ name="nodes-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ nodesOutput.append( t.result )
+ ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
+ ips.sort()
+ for i in nodesOutput:
+ try:
+ current = json.loads( i )
+ activeIps = []
+ currentResult = main.FALSE
+ for node in current:
+ if node['state'] == 'READY':
+ activeIps.append( node['ip'] )
+ activeIps.sort()
+ if ips == activeIps:
+ currentResult = main.TRUE
+ except ( ValueError, TypeError ):
+ main.log.error( "Error parsing nodes output" )
+ main.log.warn( repr( i ) )
+ currentResult = main.FALSE
+ nodeResults = nodeResults and currentResult
+ utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
+ onpass="Nodes check successful",
+ onfail="Nodes check NOT successful" )
+
+ if not nodeResults:
+ for cli in main.CLIs:
+ main.log.debug( "{} components not ACTIVE: \n{}".format(
+ cli.name,
+ cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
+
if cliResults == main.FALSE:
main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
@@ -353,6 +401,7 @@
ipList = [ ]
deviceList = []
+ onosCli = main.CLIs[ main.activeNodes[0] ]
try:
# Assign mastership to specific controllers. This assignment was
# determined for a 7 node cluser, but will work with any sized
@@ -362,45 +411,45 @@
if i == 1:
c = 0
ip = main.nodes[ c ].ip_address # ONOS1
- deviceId = main.ONOScli1.getDevice( "1000" ).get( 'id' )
+ deviceId = onosCli.getDevice( "1000" ).get( 'id' )
elif i == 2:
c = 1 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS2
- deviceId = main.ONOScli1.getDevice( "2000" ).get( 'id' )
+ deviceId = onosCli.getDevice( "2000" ).get( 'id' )
elif i == 3:
c = 1 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS2
- deviceId = main.ONOScli1.getDevice( "3000" ).get( 'id' )
+ deviceId = onosCli.getDevice( "3000" ).get( 'id' )
elif i == 4:
c = 3 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS4
- deviceId = main.ONOScli1.getDevice( "3004" ).get( 'id' )
+ deviceId = onosCli.getDevice( "3004" ).get( 'id' )
elif i == 5:
c = 2 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS3
- deviceId = main.ONOScli1.getDevice( "5000" ).get( 'id' )
+ deviceId = onosCli.getDevice( "5000" ).get( 'id' )
elif i == 6:
c = 2 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS3
- deviceId = main.ONOScli1.getDevice( "6000" ).get( 'id' )
+ deviceId = onosCli.getDevice( "6000" ).get( 'id' )
elif i == 7:
c = 5 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS6
- deviceId = main.ONOScli1.getDevice( "6007" ).get( 'id' )
+ deviceId = onosCli.getDevice( "6007" ).get( 'id' )
elif i >= 8 and i <= 17:
c = 4 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS5
dpid = '3' + str( i ).zfill( 3 )
- deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
+ deviceId = onosCli.getDevice( dpid ).get( 'id' )
elif i >= 18 and i <= 27:
c = 6 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS7
dpid = '6' + str( i ).zfill( 3 )
- deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
+ deviceId = onosCli.getDevice( dpid ).get( 'id' )
elif i == 28:
c = 0
ip = main.nodes[ c ].ip_address # ONOS1
- deviceId = main.ONOScli1.getDevice( "2800" ).get( 'id' )
+ deviceId = onosCli.getDevice( "2800" ).get( 'id' )
else:
main.log.error( "You didn't write an else statement for " +
"switch s" + str( i ) )
@@ -408,13 +457,12 @@
# Assign switch
assert deviceId, "No device id for s" + str( i ) + " in ONOS"
# TODO: make this controller dynamic
- roleCall = roleCall and main.ONOScli1.deviceRole( deviceId,
- ip )
+ roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
ipList.append( ip )
deviceList.append( deviceId )
except ( AttributeError, AssertionError ):
main.log.exception( "Something is wrong with ONOS device view" )
- main.log.info( main.ONOScli1.devices() )
+ main.log.info( onosCli.devices() )
utilities.assert_equals(
expect=main.TRUE,
actual=roleCall,
@@ -430,7 +478,7 @@
ip = ipList[i]
deviceId = deviceList[i]
# Check assignment
- master = main.ONOScli1.getRole( deviceId ).get( 'master' )
+ master = onosCli.getRole( deviceId ).get( 'master' )
if ip in master:
roleCheck = roleCheck and main.TRUE
else:
@@ -457,6 +505,16 @@
assert utilities.assert_equals, "utilities.assert_equals not defined"
assert main.CLIs, "main.CLIs not defined"
assert main.nodes, "main.nodes not defined"
+ try:
+ labels
+ except NameError:
+ main.log.error( "labels not defined, setting to []" )
+ labels = []
+ try:
+ data
+ except NameError:
+ main.log.error( "data not defined, setting to []" )
+ data = []
main.case( "Adding host Intents" )
main.caseExplanation = "Discover hosts by using pingall then " +\
"assign predetermined host-to-host intents." +\
@@ -466,7 +524,8 @@
# install onos-app-fwd
main.step( "Install reactive forwarding app" )
- installResults = main.CLIs[0].activateApp( "org.onosproject.fwd" )
+ onosCli = main.CLIs[ main.activeNodes[0] ]
+ installResults = onosCli.activateApp( "org.onosproject.fwd" )
utilities.assert_equals( expect=main.TRUE, actual=installResults,
onpass="Install fwd successful",
onfail="Install fwd failed" )
@@ -474,7 +533,7 @@
main.step( "Check app ids" )
appCheck = main.TRUE
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].appToIDCheck,
name="appToIDCheck-" + str( i ),
args=[] )
@@ -485,8 +544,8 @@
t.join()
appCheck = appCheck and t.result
if appCheck != main.TRUE:
- main.log.warn( main.CLIs[0].apps() )
- main.log.warn( main.CLIs[0].appIDs() )
+ main.log.warn( onosCli.apps() )
+ main.log.warn( onosCli.appIDs() )
utilities.assert_equals( expect=main.TRUE, actual=appCheck,
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
@@ -515,7 +574,8 @@
time.sleep( 11 )
# uninstall onos-app-fwd
main.step( "Uninstall reactive forwarding app" )
- uninstallResult = main.CLIs[0].deactivateApp( "org.onosproject.fwd" )
+ node = main.activeNodes[0]
+ uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
onpass="Uninstall fwd successful",
onfail="Uninstall fwd failed" )
@@ -523,7 +583,7 @@
main.step( "Check app ids" )
threads = []
appCheck2 = main.TRUE
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].appToIDCheck,
name="appToIDCheck-" + str( i ),
args=[] )
@@ -534,8 +594,9 @@
t.join()
appCheck2 = appCheck2 and t.result
if appCheck2 != main.TRUE:
- main.log.warn( main.CLIs[0].apps() )
- main.log.warn( main.CLIs[0].appIDs() )
+ node = main.activeNodes[0]
+ main.log.warn( main.CLIs[node].apps() )
+ main.log.warn( main.CLIs[node].appIDs() )
utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
@@ -554,16 +615,17 @@
host2 = "00:00:00:00:00:" + \
str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
# NOTE: getHost can return None
- host1Dict = main.ONOScli1.getHost( host1 )
- host2Dict = main.ONOScli1.getHost( host2 )
+ host1Dict = onosCli.getHost( host1 )
+ host2Dict = onosCli.getHost( host2 )
host1Id = None
host2Id = None
if host1Dict and host2Dict:
host1Id = host1Dict.get( 'id', None )
host2Id = host2Dict.get( 'id', None )
if host1Id and host2Id:
- nodeNum = ( i % main.numCtrls )
- tmpId = main.CLIs[ nodeNum ].addHostIntent( host1Id, host2Id )
+ nodeNum = ( i % len( main.activeNodes ) )
+ node = main.activeNodes[nodeNum]
+ tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
if tmpId:
main.log.info( "Added intent with id: " + tmpId )
intentIds.append( tmpId )
@@ -573,7 +635,8 @@
else:
main.log.error( "Error, getHost() failed for h" + str( i ) +
" and/or h" + str( i + 10 ) )
- hosts = main.CLIs[ 0 ].hosts()
+ node = main.activeNodes[0]
+ hosts = main.CLIs[node].hosts()
main.log.warn( "Hosts output: " )
try:
main.log.warn( json.dumps( json.loads( hosts ),
@@ -588,7 +651,7 @@
onfail="Error looking up host ids" )
intentStart = time.time()
- onosIds = main.ONOScli1.getAllIntentsId()
+ onosIds = onosCli.getAllIntentsId()
main.log.info( "Submitted intents: " + str( intentIds ) )
main.log.info( "Intents in ONOS: " + str( onosIds ) )
for intent in intentIds:
@@ -601,7 +664,7 @@
else:
intentStop = None
# Print the intent states
- intents = main.ONOScli1.intents()
+ intents = onosCli.intents()
intentStates = []
installedCheck = True
main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
@@ -627,7 +690,7 @@
count += 1
main.log.info( "%-6s%-15s%-15s" %
( str( count ), str( i ), str( s ) ) )
- leaders = main.ONOScli1.leaders()
+ leaders = onosCli.leaders()
try:
missing = False
if leaders:
@@ -654,12 +717,12 @@
main.log.error( repr( leaders ) )
# Check all nodes
if missing:
- for node in main.CLIs:
- response = node.leaders( jsonFormat=False)
- main.log.warn( str( node.name ) + " leaders output: \n" +
+ for i in main.activeNodes:
+ response = main.CLIs[i].leaders( jsonFormat=False)
+ main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
str( response ) )
- partitions = main.ONOScli1.partitions()
+ partitions = onosCli.partitions()
try:
if partitions :
parsedPartitions = json.loads( partitions )
@@ -674,7 +737,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing partitions" )
main.log.error( repr( partitions ) )
- pendingMap = main.ONOScli1.pendingMap()
+ pendingMap = onosCli.pendingMap()
try:
if pendingMap :
parsedPending = json.loads( pendingMap )
@@ -695,21 +758,21 @@
main.log.error( "Error in pushing host intents to ONOS" )
main.step( "Intent Anti-Entropy dispersion" )
- for i in range(100):
+ for j in range(100):
correct = True
main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
- for cli in main.CLIs:
+ for i in main.activeNodes:
onosIds = []
- ids = cli.getAllIntentsId()
+ ids = main.CLIs[i].getAllIntentsId()
onosIds.append( ids )
- main.log.debug( "Intents in " + cli.name + ": " +
+ main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
str( sorted( onosIds ) ) )
if sorted( ids ) != sorted( intentIds ):
main.log.warn( "Set of intent IDs doesn't match" )
correct = False
break
else:
- intents = json.loads( cli.intents() )
+ intents = json.loads( main.CLIs[i].intents() )
for intent in intents:
if intent[ 'state' ] != "INSTALLED":
main.log.warn( "Intent " + intent[ 'id' ] +
@@ -727,7 +790,7 @@
main.log.info( "It took about " + str( gossipTime ) +
" seconds for all intents to appear in each node" )
gossipPeriod = int( main.params['timers']['gossip'] )
- maxGossipTime = gossipPeriod * len( main.nodes )
+ maxGossipTime = gossipPeriod * len( main.activeNodes )
utilities.assert_greater_equals(
expect=maxGossipTime, actual=gossipTime,
onpass="ECM anti-entropy for intents worked within " +
@@ -743,11 +806,11 @@
installedCheck = True
main.log.info( "Sleeping 60 seconds to see if intents are found" )
time.sleep( 60 )
- onosIds = main.ONOScli1.getAllIntentsId()
+ onosIds = onosCli.getAllIntentsId()
main.log.info( "Submitted intents: " + str( intentIds ) )
main.log.info( "Intents in ONOS: " + str( onosIds ) )
# Print the intent states
- intents = main.ONOScli1.intents()
+ intents = onosCli.intents()
intentStates = []
main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
count = 0
@@ -771,7 +834,7 @@
count += 1
main.log.info( "%-6s%-15s%-15s" %
( str( count ), str( i ), str( s ) ) )
- leaders = main.ONOScli1.leaders()
+ leaders = onosCli.leaders()
try:
missing = False
if leaders:
@@ -801,12 +864,13 @@
main.log.error( repr( leaders ) )
# Check all nodes
if missing:
- for node in main.CLIs:
+ for i in main.activeNodes:
+ node = main.CLIs[i]
response = node.leaders( jsonFormat=False)
main.log.warn( str( node.name ) + " leaders output: \n" +
str( response ) )
- partitions = main.ONOScli1.partitions()
+ partitions = onosCli.partitions()
try:
if partitions :
parsedPartitions = json.loads( partitions )
@@ -821,7 +885,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing partitions" )
main.log.error( repr( partitions ) )
- pendingMap = main.ONOScli1.pendingMap()
+ pendingMap = onosCli.pendingMap()
try:
if pendingMap :
parsedPending = json.loads( pendingMap )
@@ -847,11 +911,12 @@
assert utilities.assert_equals, "utilities.assert_equals not defined"
assert main.CLIs, "main.CLIs not defined"
assert main.nodes, "main.nodes not defined"
- main.case( "Verify connectivity by sendind traffic across Intents" )
+ main.case( "Verify connectivity by sending traffic across Intents" )
main.caseExplanation = "Ping across added host intents to check " +\
"functionality and check the state of " +\
"the intent"
main.step( "Ping across added host intents" )
+ onosCli = main.CLIs[ main.activeNodes[0] ]
PingResult = main.TRUE
for i in range( 8, 18 ):
ping = main.Mininet1.pingHost( src="h" + str( i ),
@@ -869,7 +934,7 @@
# TODO: pretty print
main.log.warn( "ONOS1 intents: " )
try:
- tmpIntents = main.ONOScli1.intents()
+ tmpIntents = onosCli.intents()
main.log.warn( json.dumps( json.loads( tmpIntents ),
sort_keys=True,
indent=4,
@@ -888,7 +953,7 @@
while not installedCheck and loopCount < 40:
installedCheck = True
# Print the intent states
- intents = main.ONOScli1.intents()
+ intents = onosCli.intents()
intentStates = []
main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
count = 0
@@ -917,7 +982,7 @@
"INSTALLED state" )
main.step( "Check leadership of topics" )
- leaders = main.ONOScli1.leaders()
+ leaders = onosCli.leaders()
topicCheck = main.TRUE
try:
if leaders:
@@ -952,7 +1017,8 @@
# TODO: Check for a leader of these topics
# Check all nodes
if topicCheck:
- for node in main.CLIs:
+ for i in main.activeNodes:
+ node = main.CLIs[i]
response = node.leaders( jsonFormat=False)
main.log.warn( str( node.name ) + " leaders output: \n" +
str( response ) )
@@ -961,7 +1027,7 @@
onpass="intent Partitions is in leaders",
onfail="Some topics were lost " )
# Print partitions
- partitions = main.ONOScli1.partitions()
+ partitions = onosCli.partitions()
try:
if partitions :
parsedPartitions = json.loads( partitions )
@@ -977,7 +1043,7 @@
main.log.exception( "Error parsing partitions" )
main.log.error( repr( partitions ) )
# Print Pending Map
- pendingMap = main.ONOScli1.pendingMap()
+ pendingMap = onosCli.pendingMap()
try:
if pendingMap :
parsedPending = json.loads( pendingMap )
@@ -997,7 +1063,7 @@
"intents change" )
time.sleep( 60 )
# Print the intent states
- intents = main.ONOScli1.intents()
+ intents = onosCli.intents()
intentStates = []
main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
count = 0
@@ -1016,7 +1082,7 @@
count += 1
main.log.info( "%-6s%-15s%-15s" %
( str( count ), str( i ), str( s ) ) )
- leaders = main.ONOScli1.leaders()
+ leaders = onosCli.leaders()
try:
missing = False
if leaders:
@@ -1045,12 +1111,13 @@
main.log.exception( "Error parsing leaders" )
main.log.error( repr( leaders ) )
if missing:
- for node in main.CLIs:
+ for i in main.activeNodes:
+ node = main.CLIs[i]
response = node.leaders( jsonFormat=False)
main.log.warn( str( node.name ) + " leaders output: \n" +
str( response ) )
- partitions = main.ONOScli1.partitions()
+ partitions = onosCli.partitions()
try:
if partitions :
parsedPartitions = json.loads( partitions )
@@ -1065,7 +1132,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing partitions" )
main.log.error( repr( partitions ) )
- pendingMap = main.ONOScli1.pendingMap()
+ pendingMap = onosCli.pendingMap()
try:
if pendingMap :
parsedPending = json.loads( pendingMap )
@@ -1080,7 +1147,8 @@
main.log.exception( "Error parsing pending map" )
main.log.error( repr( pendingMap ) )
# Print flowrules
- main.log.debug( main.CLIs[0].flows( jsonFormat=False ) )
+ node = main.activeNodes[0]
+ main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
main.step( "Wait a minute then ping again" )
# the wait is above
PingResult = main.TRUE
@@ -1100,7 +1168,7 @@
# TODO: pretty print
main.log.warn( "ONOS1 intents: " )
try:
- tmpIntents = main.ONOScli1.intents()
+ tmpIntents = onosCli.intents()
main.log.warn( json.dumps( json.loads( tmpIntents ),
sort_keys=True,
indent=4,
@@ -1137,7 +1205,7 @@
# Assert that each device has a master
rolesNotNull = main.TRUE
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].rolesNotNull,
name="rolesNotNull-" + str( i ),
args=[] )
@@ -1159,7 +1227,7 @@
consistentMastership = True
rolesResults = True
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].roles,
name="roles-" + str( i ),
args=[] )
@@ -1170,13 +1238,12 @@
t.join()
ONOSMastership.append( t.result )
- for i in range( main.numCtrls ):
+ for i in range( len( ONOSMastership ) ):
+ node = str( main.activeNodes[i] + 1 )
if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
- main.log.error( "Error in getting ONOS" + str( i + 1 ) +
- " roles" )
- main.log.warn(
- "ONOS" + str( i + 1 ) + " mastership response: " +
- repr( ONOSMastership[i] ) )
+ main.log.error( "Error in getting ONOS" + node + " roles" )
+ main.log.warn( "ONOS" + node + " mastership response: " +
+ repr( ONOSMastership[i] ) )
rolesResults = False
utilities.assert_equals(
expect=True,
@@ -1197,10 +1264,11 @@
onfail="ONOS nodes have different views of switch roles" )
if rolesResults and not consistentMastership:
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
try:
main.log.warn(
- "ONOS" + str( i + 1 ) + " roles: ",
+ "ONOS" + node + " roles: ",
json.dumps(
json.loads( ONOSMastership[ i ] ),
sort_keys=True,
@@ -1220,7 +1288,7 @@
consistentIntents = True
intentsResults = True
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].intents,
name="intents-" + str( i ),
args=[],
@@ -1232,11 +1300,11 @@
t.join()
ONOSIntents.append( t.result )
- for i in range( main.numCtrls ):
+ for i in range( len( ONOSIntents ) ):
+ node = str( main.activeNodes[i] + 1 )
if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
- main.log.error( "Error in getting ONOS" + str( i + 1 ) +
- " intents" )
- main.log.warn( "ONOS" + str( i + 1 ) + " intents response: " +
+ main.log.error( "Error in getting ONOS" + node + " intents" )
+ main.log.warn( "ONOS" + node + " intents response: " +
repr( ONOSIntents[ i ] ) )
intentsResults = False
utilities.assert_equals(
@@ -1266,7 +1334,7 @@
# ... ... ...
# ... ... ...
title = " Id"
- for n in range( main.numCtrls ):
+ for n in main.activeNodes:
title += " " * 10 + "ONOS" + str( n + 1 )
main.log.warn( title )
# get all intent keys in the cluster
@@ -1294,22 +1362,23 @@
if intentsResults and not consistentIntents:
# print the json objects
- n = len(ONOSIntents)
- main.log.debug( "ONOS" + str( n ) + " intents: " )
+ n = str( main.activeNodes[-1] + 1 )
+ main.log.debug( "ONOS" + n + " intents: " )
main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
sort_keys=True,
indent=4,
separators=( ',', ': ' ) ) )
- for i in range( main.numCtrls ):
+ for i in range( len( ONOSIntents ) ):
+ node = str( main.activeNodes[i] + 1 )
if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
- main.log.debug( "ONOS" + str( i + 1 ) + " intents: " )
+ main.log.debug( "ONOS" + node + " intents: " )
main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
sort_keys=True,
indent=4,
separators=( ',', ': ' ) ) )
else:
- main.log.debug( main.nodes[ i ].name + " intents match ONOS" +
- str( n ) + " intents" )
+ main.log.debug( "ONOS" + node + " intents match ONOS" +
+ n + " intents" )
elif intentsResults and consistentIntents:
intentCheck = main.TRUE
intentState = ONOSIntents[ 0 ]
@@ -1323,7 +1392,7 @@
consistentFlows = True
flowsResults = True
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].flows,
name="flows-" + str( i ),
args=[],
@@ -1338,8 +1407,8 @@
result = t.result
ONOSFlows.append( result )
- for i in range( main.numCtrls ):
- num = str( i + 1 )
+ for i in range( len( ONOSFlows ) ):
+ num = str( main.activeNodes[i] + 1 )
if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
main.log.error( "Error in getting ONOS" + num + " flows" )
main.log.warn( "ONOS" + num + " flows response: " +
@@ -1375,16 +1444,16 @@
onfail="ONOS nodes have different flow counts" )
if flowsResults and not consistentFlows:
- for i in range( main.numCtrls ):
+ for i in range( len( ONOSFlows ) ):
+ node = str( main.activeNodes[i] + 1 )
try:
main.log.warn(
- "ONOS" + str( i + 1 ) + " flows: " +
+ "ONOS" + node + " flows: " +
json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
indent=4, separators=( ',', ': ' ) ) )
except ( ValueError, TypeError ):
- main.log.warn(
- "ONOS" + str( i + 1 ) + " flows: " +
- repr( ONOSFlows[ i ] ) )
+ main.log.warn( "ONOS" + node + " flows: " +
+ repr( ONOSFlows[ i ] ) )
elif flowsResults and consistentFlows:
flowCheck = main.TRUE
flowState = ONOSFlows[ 0 ]
@@ -1444,7 +1513,7 @@
main.step( "Collecting topology information from ONOS" )
devices = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].devices,
name="devices-" + str( i ),
args=[ ] )
@@ -1456,7 +1525,7 @@
devices.append( t.result )
hosts = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].hosts,
name="hosts-" + str( i ),
args=[ ] )
@@ -1476,7 +1545,7 @@
ports = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].ports,
name="ports-" + str( i ),
args=[ ] )
@@ -1488,7 +1557,7 @@
ports.append( t.result )
links = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].links,
name="links-" + str( i ),
args=[ ] )
@@ -1500,7 +1569,7 @@
links.append( t.result )
clusters = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].clusters,
name="clusters-" + str( i ),
args=[ ] )
@@ -1516,7 +1585,7 @@
main.step( "Host view is consistent across ONOS nodes" )
consistentHostsResult = main.TRUE
for controller in range( len( hosts ) ):
- controllerStr = str( controller + 1 )
+ controllerStr = str( main.activeNodes[controller] + 1 )
if hosts[ controller ] and "Error" not in hosts[ controller ]:
if hosts[ controller ] == hosts[ 0 ]:
continue
@@ -1543,7 +1612,7 @@
main.step( "Each host has an IP address" )
ipResult = main.TRUE
for controller in range( 0, len( hosts ) ):
- controllerStr = str( controller + 1 )
+ controllerStr = str( main.activeNodes[controller] + 1 )
if hosts[ controller ]:
for host in hosts[ controller ]:
if not host.get( 'ipAddresses', [ ] ):
@@ -1560,7 +1629,7 @@
main.step( "Cluster view is consistent across ONOS nodes" )
consistentClustersResult = main.TRUE
for controller in range( len( clusters ) ):
- controllerStr = str( controller + 1 )
+ controllerStr = str( main.activeNodes[controller] + 1 )
if "Error" not in clusters[ controller ]:
if clusters[ controller ] == clusters[ 0 ]:
continue
@@ -1605,16 +1674,15 @@
mnSwitches = main.Mininet1.getSwitches()
mnLinks = main.Mininet1.getLinks()
mnHosts = main.Mininet1.getHosts()
- for controller in range( main.numCtrls ):
- controllerStr = str( controller + 1 )
+ for controller in main.activeNodes:
+ controllerStr = str( main.activeNodes[controller] + 1 )
if devices[ controller ] and ports[ controller ] and\
"Error" not in devices[ controller ] and\
"Error" not in ports[ controller ]:
-
- currentDevicesResult = main.Mininet1.compareSwitches(
- mnSwitches,
- json.loads( devices[ controller ] ),
- json.loads( ports[ controller ] ) )
+ currentDevicesResult = main.Mininet1.compareSwitches(
+ mnSwitches,
+ json.loads( devices[ controller ] ),
+ json.loads( ports[ controller ] ) )
else:
currentDevicesResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
@@ -1708,7 +1776,7 @@
# Assert that each device has a master
rolesNotNull = main.TRUE
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].rolesNotNull,
name="rolesNotNull-" + str( i ),
args=[ ] )
@@ -1730,7 +1798,7 @@
consistentMastership = True
rolesResults = True
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].roles,
name="roles-" + str( i ),
args=[] )
@@ -1741,13 +1809,12 @@
t.join()
ONOSMastership.append( t.result )
- for i in range( main.numCtrls ):
+ for i in range( len( ONOSMastership ) ):
+ node = str( main.activeNodes[i] + 1 )
if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
- main.log.error( "Error in getting ONOS" + str( i + 1 ) +
- " roles" )
- main.log.warn(
- "ONOS" + str( i + 1 ) + " mastership response: " +
- repr( ONOSMastership[i] ) )
+ main.log.error( "Error in getting ONOS" + node + " roles" )
+ main.log.warn( "ONOS" + node + " mastership response: " +
+ repr( ONOSMastership[i] ) )
rolesResults = False
utilities.assert_equals(
expect=True,
@@ -1768,16 +1835,13 @@
onfail="ONOS nodes have different views of switch roles" )
if rolesResults and not consistentMastership:
- for i in range( main.numCtrls ):
- main.log.warn(
- "ONOS" + str( i + 1 ) + " roles: ",
- json.dumps(
- json.loads( ONOSMastership[ i ] ),
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- elif rolesResults and not consistentMastership:
- mastershipCheck = main.TRUE
+ for i in range( len( ONOSMastership ) ):
+ node = str( main.activeNodes[i] + 1 )
+ main.log.warn( "ONOS" + node + " roles: ",
+ json.dumps( json.loads( ONOSMastership[ i ] ),
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
description2 = "Compare switch roles from before failure"
main.step( description2 )
@@ -1817,7 +1881,7 @@
consistentIntents = True
intentsResults = True
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].intents,
name="intents-" + str( i ),
args=[],
@@ -1829,11 +1893,11 @@
t.join()
ONOSIntents.append( t.result )
- for i in range( main.numCtrls ):
+ for i in range( len( ONOSIntents) ):
+ node = str( main.activeNodes[i] + 1 )
if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
- main.log.error( "Error in getting ONOS" + str( i + 1 ) +
- " intents" )
- main.log.warn( "ONOS" + str( i + 1 ) + " intents response: " +
+ main.log.error( "Error in getting ONOS" + node + " intents" )
+ main.log.warn( "ONOS" + node + " intents response: " +
repr( ONOSIntents[ i ] ) )
intentsResults = False
utilities.assert_equals(
@@ -1856,7 +1920,7 @@
# ... ... ...
# ... ... ...
title = " ID"
- for n in range( main.numCtrls ):
+ for n in main.activeNodes:
title += " " * 10 + "ONOS" + str( n + 1 )
main.log.warn( title )
# get all intent keys in the cluster
@@ -1896,8 +1960,9 @@
main.log.info( dict( out ) )
if intentsResults and not consistentIntents:
- for i in range( main.numCtrls ):
- main.log.warn( "ONOS" + str( i + 1 ) + " intents: " )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ main.log.warn( "ONOS" + node + " intents: " )
main.log.warn( json.dumps(
json.loads( ONOSIntents[ i ] ),
sort_keys=True,
@@ -1912,45 +1977,50 @@
# NOTE: this requires case 5 to pass for intentState to be set.
# maybe we should stop the test if that fails?
sameIntents = main.FALSE
- if intentState and intentState == ONOSIntents[ 0 ]:
- sameIntents = main.TRUE
- main.log.info( "Intents are consistent with before failure" )
- # TODO: possibly the states have changed? we may need to figure out
- # what the acceptable states are
- elif len( intentState ) == len( ONOSIntents[ 0 ] ):
- sameIntents = main.TRUE
- try:
- before = json.loads( intentState )
- after = json.loads( ONOSIntents[ 0 ] )
- for intent in before:
- if intent not in after:
- sameIntents = main.FALSE
- main.log.debug( "Intent is not currently in ONOS " +
- "(at least in the same form):" )
- main.log.debug( json.dumps( intent ) )
- except ( ValueError, TypeError ):
- main.log.exception( "Exception printing intents" )
- main.log.debug( repr( ONOSIntents[0] ) )
- main.log.debug( repr( intentState ) )
- if sameIntents == main.FALSE:
- try:
- main.log.debug( "ONOS intents before: " )
- main.log.debug( json.dumps( json.loads( intentState ),
- sort_keys=True, indent=4,
- separators=( ',', ': ' ) ) )
- main.log.debug( "Current ONOS intents: " )
- main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
- sort_keys=True, indent=4,
- separators=( ',', ': ' ) ) )
- except ( ValueError, TypeError ):
- main.log.exception( "Exception printing intents" )
- main.log.debug( repr( ONOSIntents[0] ) )
- main.log.debug( repr( intentState ) )
- utilities.assert_equals(
- expect=main.TRUE,
- actual=sameIntents,
- onpass="Intents are consistent with before failure",
- onfail="The Intents changed during failure" )
+ try:
+ intentState
+ except NameError:
+ main.log.warn( "No previous intent state was saved" )
+ else:
+ if intentState and intentState == ONOSIntents[ 0 ]:
+ sameIntents = main.TRUE
+ main.log.info( "Intents are consistent with before failure" )
+ # TODO: possibly the states have changed? we may need to figure out
+ # what the acceptable states are
+ elif len( intentState ) == len( ONOSIntents[ 0 ] ):
+ sameIntents = main.TRUE
+ try:
+ before = json.loads( intentState )
+ after = json.loads( ONOSIntents[ 0 ] )
+ for intent in before:
+ if intent not in after:
+ sameIntents = main.FALSE
+ main.log.debug( "Intent is not currently in ONOS " +
+ "(at least in the same form):" )
+ main.log.debug( json.dumps( intent ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Exception printing intents" )
+ main.log.debug( repr( ONOSIntents[0] ) )
+ main.log.debug( repr( intentState ) )
+ if sameIntents == main.FALSE:
+ try:
+ main.log.debug( "ONOS intents before: " )
+ main.log.debug( json.dumps( json.loads( intentState ),
+ sort_keys=True, indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.debug( "Current ONOS intents: " )
+ main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
+ sort_keys=True, indent=4,
+ separators=( ',', ': ' ) ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Exception printing intents" )
+ main.log.debug( repr( ONOSIntents[0] ) )
+ main.log.debug( repr( intentState ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=sameIntents,
+ onpass="Intents are consistent with before failure",
+ onfail="The Intents changed during failure" )
intentCheck = intentCheck and sameIntents
main.step( "Get the OF Table entries and compare to before " +
@@ -1962,7 +2032,6 @@
FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
if FlowTables == main.FALSE:
main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
-
utilities.assert_equals(
expect=main.TRUE,
actual=FlowTables,
@@ -2000,12 +2069,17 @@
main.step( "Leadership Election is still functional" )
# Test of LeadershipElection
+ leaderList = []
+
# NOTE: this only works for the sanity test. In case of failures,
# leader will likely change
- leader = main.nodes[ 0 ].ip_address
+ leader = main.nodes[ main.activeNodes[ 0 ] ].ip_address
leaderResult = main.TRUE
- for cli in main.CLIs:
+
+ for i in main.activeNodes:
+ cli = main.CLIs[i]
leaderN = cli.electionTestLeader()
+ leaderList.append( leaderN )
# verify leader is ONOS1
if leaderN == leader:
# all is well
@@ -2018,11 +2092,16 @@
"electionTestLeader function, check the" +
" error logs" )
leaderResult = main.FALSE
- elif leader != leaderN:
+ elif leaderN is None:
+ main.log.error( cli.name +
+ " shows no leader for the election-app was" +
+ " elected after the old one died" )
leaderResult = main.FALSE
- main.log.error( cli.name + " sees " + str( leaderN ) +
- " as the leader of the election app. " +
- "Leader should be " + str( leader ) )
+ if len( set( leaderList ) ) != 1:
+ leaderResult = main.FALSE
+ main.log.error(
+ "Inconsistent view of leader for the election test app" )
+ # TODO: print the list
utilities.assert_equals(
expect=main.TRUE,
actual=leaderResult,
@@ -2045,6 +2124,7 @@
main.caseExplanation = "Compare topology objects between Mininet" +\
" and ONOS"
topoResult = main.FALSE
+ topoFailMsg = "ONOS topology don't match Mininet"
elapsed = 0
count = 0
main.step( "Comparing ONOS topology to MN topology" )
@@ -2059,10 +2139,12 @@
cliStart = time.time()
devices = []
threads = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].devices,
+ for i in main.activeNodes:
+ t = main.Thread( target=utilities.retry,
name="devices-" + str( i ),
- args=[ ] )
+ args=[ main.CLIs[i].devices, [ None ] ],
+ kwargs= { 'sleep': 5, 'attempts': 5,
+ 'randomTime': True } )
threads.append( t )
t.start()
@@ -2072,7 +2154,7 @@
hosts = []
ipResult = main.TRUE
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=utilities.retry,
name="hosts-" + str( i ),
args=[ main.CLIs[i].hosts, [ None ] ],
@@ -2090,7 +2172,7 @@
main.log.error( repr( t.result ) )
hosts.append( None )
for controller in range( 0, len( hosts ) ):
- controllerStr = str( controller + 1 )
+ controllerStr = str( main.activeNodes[controller] + 1 )
if hosts[ controller ]:
for host in hosts[ controller ]:
if host is None or host.get( 'ipAddresses', [] ) == []:
@@ -2100,10 +2182,12 @@
ipResult = main.FALSE
ports = []
threads = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].ports,
+ for i in main.activeNodes:
+ t = main.Thread( target=utilities.retry,
name="ports-" + str( i ),
- args=[ ] )
+ args=[ main.CLIs[i].ports, [ None ] ],
+ kwargs= { 'sleep': 5, 'attempts': 5,
+ 'randomTime': True } )
threads.append( t )
t.start()
@@ -2112,10 +2196,12 @@
ports.append( t.result )
links = []
threads = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].links,
+ for i in main.activeNodes:
+ t = main.Thread( target=utilities.retry,
name="links-" + str( i ),
- args=[ ] )
+ args=[ main.CLIs[i].links, [ None ] ],
+ kwargs= { 'sleep': 5, 'attempts': 5,
+ 'randomTime': True } )
threads.append( t )
t.start()
@@ -2124,10 +2210,12 @@
links.append( t.result )
clusters = []
threads = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].clusters,
+ for i in main.activeNodes:
+ t = main.Thread( target=utilities.retry,
name="clusters-" + str( i ),
- args=[ ] )
+ args=[ main.CLIs[i].clusters, [ None ] ],
+ kwargs= { 'sleep': 5, 'attempts': 5,
+ 'randomTime': True } )
threads.append( t )
t.start()
@@ -2140,11 +2228,20 @@
print "Elapsed time: " + str( elapsed )
print "CLI time: " + str( cliTime )
+ if all( e is None for e in devices ) and\
+ all( e is None for e in hosts ) and\
+ all( e is None for e in ports ) and\
+ all( e is None for e in links ) and\
+ all( e is None for e in clusters ):
+ topoFailMsg = "Could not get topology from ONOS"
+ main.log.error( topoFailMsg )
+ continue # Try again, No use trying to compare
+
mnSwitches = main.Mininet1.getSwitches()
mnLinks = main.Mininet1.getLinks()
mnHosts = main.Mininet1.getHosts()
- for controller in range( main.numCtrls ):
- controllerStr = str( controller + 1 )
+ for controller in range( len( main.activeNodes ) ):
+ controllerStr = str( main.activeNodes[controller] + 1 )
if devices[ controller ] and ports[ controller ] and\
"Error" not in devices[ controller ] and\
"Error" not in ports[ controller ]:
@@ -2286,7 +2383,7 @@
utilities.assert_equals( expect=True,
actual=topoResult,
onpass="ONOS topology matches Mininet",
- onfail="ONOS topology don't match Mininet" )
+ onfail=topoFailMsg )
# End of While loop to pull ONOS state
# Compare json objects for hosts and dataplane clusters
@@ -2295,7 +2392,7 @@
main.step( "Hosts view is consistent across all ONOS nodes" )
consistentHostsResult = main.TRUE
for controller in range( len( hosts ) ):
- controllerStr = str( controller + 1 )
+ controllerStr = str( main.activeNodes[controller] + 1 )
if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
if hosts[ controller ] == hosts[ 0 ]:
continue
@@ -2337,7 +2434,7 @@
main.step( "Clusters view is consistent across all ONOS nodes" )
consistentClustersResult = main.TRUE
for controller in range( len( clusters ) ):
- controllerStr = str( controller + 1 )
+ controllerStr = str( main.activeNodes[controller] + 1 )
if "Error" not in clusters[ controller ]:
if clusters[ controller ] == clusters[ 0 ]:
continue
@@ -2346,7 +2443,6 @@
controllerStr +
" is inconsistent with ONOS1" )
consistentClustersResult = main.FALSE
-
else:
main.log.error( "Error in getting dataplane clusters " +
"from ONOS" + controllerStr )
@@ -2367,6 +2463,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing clusters[0]: " +
repr( clusters[0] ) )
+ numClusters = "ERROR"
clusterResults = main.FALSE
if numClusters == 1:
clusterResults = main.TRUE
@@ -2416,7 +2513,7 @@
nodesOutput = []
nodeResults = main.TRUE
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].nodes,
name="nodes-" + str( i ),
args=[ ] )
@@ -2426,7 +2523,7 @@
for t in threads:
t.join()
nodesOutput.append( t.result )
- ips = [ node.ip_address for node in main.nodes ]
+ ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
ips.sort()
for i in nodesOutput:
try:
@@ -2447,6 +2544,11 @@
utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
onpass="Nodes check successful",
onfail="Nodes check NOT successful" )
+ if not nodeResults:
+ for cli in main.CLIs:
+ main.log.debug( "{} components not ACTIVE: \n{}".format(
+ cli.name,
+ cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
def CASE9( self, main ):
"""
@@ -2519,6 +2621,7 @@
switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
description = "Killing a switch to ensure it is discovered correctly"
+ onosCli = main.CLIs[ main.activeNodes[0] ]
main.case( description )
switch = main.params[ 'kill' ][ 'switch' ]
switchDPID = main.params[ 'kill' ][ 'dpid' ]
@@ -2530,7 +2633,7 @@
main.log.info( "Waiting " + str( switchSleep ) +
" seconds for switch down to be discovered" )
time.sleep( switchSleep )
- device = main.ONOScli1.getDevice( dpid=switchDPID )
+ device = onosCli.getDevice( dpid=switchDPID )
# Peek at the deleted switch
main.log.warn( str( device ) )
result = main.FALSE
@@ -2563,6 +2666,7 @@
switch = main.params[ 'kill' ][ 'switch' ]
switchDPID = main.params[ 'kill' ][ 'dpid' ]
links = main.params[ 'kill' ][ 'links' ].split()
+ onosCli = main.CLIs[ main.activeNodes[0] ]
description = "Adding a switch to ensure it is discovered correctly"
main.case( description )
@@ -2570,14 +2674,12 @@
main.Mininet1.addSwitch( switch, dpid=switchDPID )
for peer in links:
main.Mininet1.addLink( switch, peer )
- ipList = []
- for i in range( main.numCtrls ):
- ipList.append( main.nodes[ i ].ip_address )
+ ipList = [ node.ip_address for node in main.nodes ]
main.Mininet1.assignSwController( sw=switch, ip=ipList )
main.log.info( "Waiting " + str( switchSleep ) +
" seconds for switch up to be discovered" )
time.sleep( switchSleep )
- device = main.ONOScli1.getDevice( dpid=switchDPID )
+ device = onosCli.getDevice( dpid=switchDPID )
# Peek at the deleted switch
main.log.warn( str( device ) )
result = main.FALSE
@@ -2623,7 +2725,7 @@
# NOTE: must end in /
for f in logFiles:
for node in main.nodes:
- dstName = main.logdir + "/" + node.name + "-" + f
+ dstName = main.logdir + "/" + node.name + "-" + f
main.ONOSbench.secureCopy( node.user_name, node.ip_address,
logFolder + f, dstName )
# std*.log's
@@ -2633,7 +2735,7 @@
# NOTE: must end in /
for f in logFiles:
for node in main.nodes:
- dstName = main.logdir + "/" + node.name + "-" + f
+ dstName = main.logdir + "/" + node.name + "-" + f
main.ONOSbench.secureCopy( node.user_name, node.ip_address,
logFolder + f, dstName )
else:
@@ -2672,7 +2774,8 @@
main.case("Start Leadership Election app")
main.step( "Install leadership election app" )
- appResult = main.ONOScli1.activateApp( "org.onosproject.election" )
+ onosCli = main.CLIs[ main.activeNodes[0] ]
+ appResult = onosCli.activateApp( "org.onosproject.election" )
utilities.assert_equals(
expect=main.TRUE,
actual=appResult,
@@ -2682,9 +2785,10 @@
main.step( "Run for election on each node" )
leaderResult = main.TRUE
leaders = []
- for cli in main.CLIs:
- cli.electionTestRun()
- for cli in main.CLIs:
+ for i in main.activeNodes:
+ main.CLIs[i].electionTestRun()
+ for i in main.activeNodes:
+ cli = main.CLIs[i]
leader = cli.electionTestLeader()
if leader is None or leader == main.FALSE:
main.log.error( cli.name + ": Leader for the election app " +
@@ -2732,18 +2836,12 @@
assert main.CLIs, "main.CLIs not defined"
assert main.nodes, "main.nodes not defined"
- description = "Check that Leadership Election App is still functional"
+ description = "Check that Leadership Election is still functional"
main.case( description )
- # NOTE: Need to re-run since being a canidate is not persistant
- # TODO: add check for "Command not found:" in the driver, this
- # means the election test app isn't loaded
+ # NOTE: Need to re-run after restarts since being a canidate is not persistant
- oldLeaders = [] # leaders by node before withdrawl from candidates
- newLeaders = [] # leaders by node after withdrawl from candidates
- oldAllCandidates = [] # list of lists of each nodes' candidates before
- newAllCandidates = [] # list of lists of each nodes' candidates after
- oldCandidates = [] # list of candidates from node 0 before withdrawl
- newCandidates = [] # list of candidates from node 0 after withdrawl
+ oldLeaders = [] # list of lists of each nodes' candidates before
+ newLeaders = [] # list of lists of each nodes' candidates after
oldLeader = '' # the old leader from oldLeaders, None if not same
newLeader = '' # the new leaders fron newLoeaders, None if not same
oldLeaderCLI = None # the CLI of the old leader used for re-electing
@@ -2754,8 +2852,8 @@
main.step( "Run for election on each node" )
electionResult = main.TRUE
- for cli in main.CLIs: # run test election on each node
- if cli.electionTestRun() == main.FALSE:
+ for i in main.activeNodes: # run test election on each node
+ if main.CLIs[i].electionTestRun() == main.FALSE:
electionResult = main.FALSE
utilities.assert_equals(
expect=main.TRUE,
@@ -2765,40 +2863,40 @@
if electionResult == main.FALSE:
main.log.error(
- "Skipping Test Case because Election Test isn't loaded" )
+ "Skipping Test Case because Election Test App isn't loaded" )
main.skipCase()
main.step( "Check that each node shows the same leader and candidates" )
- sameResult = main.TRUE
- failMessage = "Nodes have different leaders"
- for cli in main.CLIs:
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- oldAllCandidates.append( node )
- oldLeaders.append( node[ 0 ] )
- oldCandidates = oldAllCandidates[ 0 ]
-
- # Check that each node has the same leader. Defines oldLeader
- if len( set( oldLeaders ) ) != 1:
- sameResult = main.FALSE
- main.log.error( "More than one leader present:" + str( oldLeaders ) )
- oldLeader = None
+ failMessage = "Nodes have different leaderboards"
+ def consistentLeaderboards( nodes ):
+ TOPIC = 'org.onosproject.election'
+ # FIXME: use threads
+ #FIXME: should we retry outside the function?
+ for n in range( 5 ): # Retry in case election is still happening
+ leaderList = []
+ # Get all leaderboards
+ for cli in nodes:
+ leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
+ # Compare leaderboards
+ result = all( i == leaderList[0] for i in leaderList ) and\
+ leaderList is not None
+ main.log.debug( leaderList )
+ main.log.warn( result )
+ if result:
+ return ( result, leaderList )
+ time.sleep(5) #TODO: paramerterize
+ main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
+ activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+ sameResult, oldLeaders = consistentLeaderboards( activeCLIs )
+ if sameResult:
+ oldLeader = oldLeaders[ 0 ][ 0 ]
+ main.log.warn( oldLeader )
else:
- oldLeader = oldLeaders[ 0 ]
-
- # Check that each node's candidate list is the same
- candidateDiscrepancy = False # Boolean of candidate mismatches
- for candidates in oldAllCandidates:
- if set( candidates ) != set( oldCandidates ):
- sameResult = main.FALSE
- candidateDiscrepancy = True
-
- if candidateDiscrepancy:
- failMessage += " and candidates"
-
+ oldLeader = None
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=sameResult,
- onpass="Leadership is consistent for the election topic",
+ onpass="Leaderboards are consistent for the election topic",
onfail=failMessage )
main.step( "Find current leader and withdraw" )
@@ -2808,7 +2906,7 @@
main.log.error( "Leadership isn't consistent." )
withdrawResult = main.FALSE
# Get the CLI of the oldLeader
- for i in range( len( main.CLIs ) ):
+ for i in main.activeNodes:
if oldLeader == main.nodes[ i ].ip_address:
oldLeaderCLI = main.CLIs[ i ]
break
@@ -2823,57 +2921,31 @@
onfail="Node was not withdrawn from election" )
main.step( "Check that a new node was elected leader" )
-
- # FIXME: use threads
- newLeaderResult = main.TRUE
failMessage = "Nodes have different leaders"
-
# Get new leaders and candidates
- for cli in main.CLIs:
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- # elections might no have finished yet
- if node[ 0 ] == 'none' and not expectNoLeader:
- main.log.info( "Node has no leader, waiting 5 seconds to be " +
- "sure elections are complete." )
- time.sleep(5)
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- # election still isn't done or there is a problem
- if node[ 0 ] == 'none':
- main.log.error( "No leader was elected on at least 1 node" )
- newLeaderResult = main.FALSE
- newAllCandidates.append( node )
- newLeaders.append( node[ 0 ] )
- newCandidates = newAllCandidates[ 0 ]
-
- # Check that each node has the same leader. Defines newLeader
- if len( set( newLeaders ) ) != 1:
- newLeaderResult = main.FALSE
- main.log.error( "Nodes have different leaders: " +
- str( newLeaders ) )
- newLeader = None
+ newLeaderResult, newLeaders = consistentLeaderboards( activeCLIs )
+ if newLeaders[ 0 ][ 0 ] == 'none':
+ main.log.error( "No leader was elected on at least 1 node" )
+ if not expectNoLeader:
+ newLeaderResult = False
+ if newLeaderResult:
+ newLeader = newLeaders[ 0 ][ 0 ]
else:
- newLeader = newLeaders[ 0 ]
-
- # Check that each node's candidate list is the same
- for candidates in newAllCandidates:
- if set( candidates ) != set( newCandidates ):
- newLeaderResult = main.FALSE
- main.log.error( "Discrepancy in candidate lists detected" )
+ newLeader = None
# Check that the new leader is not the older leader, which was withdrawn
if newLeader == oldLeader:
- newLeaderResult = main.FALSE
+ newLeaderResult = False
main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
" as the current leader" )
-
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=newLeaderResult,
onpass="Leadership election passed",
onfail="Something went wrong with Leadership election" )
- main.step( "Check that that new leader was the candidate of old leader")
- # candidates[ 2 ] should be come the top candidate after withdrawl
+ main.step( "Check that that new leader was the candidate of old leader" )
+ # candidates[ 2 ] should become the top candidate after withdrawl
correctCandidateResult = main.TRUE
if expectNoLeader:
if newLeader == 'none':
@@ -2882,11 +2954,18 @@
else:
main.log.info( "Expected no leader, got: " + str( newLeader ) )
correctCandidateResult = main.FALSE
- elif newLeader != oldCandidates[ 2 ]:
+ elif len( oldLeaders[0] ) >= 3:
+ if newLeader == oldLeaders[ 0 ][ 2 ]:
+ # correct leader was elected
+ correctCandidateResult = main.TRUE
+ else:
+ correctCandidateResult = main.FALSE
+ main.log.error( "Candidate {} was elected. {} should have had priority.".format(
+ newLeader, oldLeaders[ 0 ][ 2 ] ) )
+ else:
+ main.log.warn( "Could not determine who should be the correct leader" )
+ main.log.debug( oldLeaders[ 0 ] )
correctCandidateResult = main.FALSE
- main.log.error( "Candidate {} was elected. {} should have had priority.".format(
- newLeader, oldCandidates[ 2 ] ) )
-
utilities.assert_equals(
expect=main.TRUE,
actual=correctCandidateResult,
@@ -2905,54 +2984,23 @@
actual=runResult,
onpass="App re-ran for election",
onfail="App failed to run for election" )
+
main.step(
"Check that oldLeader is a candidate, and leader if only 1 node" )
# verify leader didn't just change
- positionResult = main.TRUE
- # Get new leaders and candidates, wait if oldLeader is not a candidate yet
-
- # Reset and reuse the new candidate and leaders lists
- newAllCandidates = []
- newCandidates = []
- newLeaders = []
- for cli in main.CLIs:
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- if oldLeader not in node: # election might no have finished yet
- main.log.info( "Old Leader not elected, waiting 5 seconds to " +
- "be sure elections are complete" )
- time.sleep(5)
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- if oldLeader not in node: # election still isn't done, errors
- main.log.error(
- "Old leader was not elected on at least one node" )
- positionResult = main.FALSE
- newAllCandidates.append( node )
- newLeaders.append( node[ 0 ] )
- newCandidates = newAllCandidates[ 0 ]
-
- # Check that each node has the same leader. Defines newLeader
- if len( set( newLeaders ) ) != 1:
- positionResult = main.FALSE
- main.log.error( "Nodes have different leaders: " +
- str( newLeaders ) )
- newLeader = None
- else:
- newLeader = newLeaders[ 0 ]
-
- # Check that each node's candidate list is the same
- for candidates in newAllCandidates:
- if set( candidates ) != set( newCandidates ):
- newLeaderResult = main.FALSE
- main.log.error( "Discrepancy in candidate lists detected" )
+ # Get new leaders and candidates
+ reRunLeaders = []
+ time.sleep( 5 ) # Paremterize
+ positionResult, reRunLeaders = consistentLeaderboards( activeCLIs )
# Check that the re-elected node is last on the candidate List
- if oldLeader != newCandidates[ -1 ]:
- main.log.error( "Old Leader (" + str( oldLeader ) + ") not in the proper position " +
- str( newCandidates ) )
+ if oldLeader != reRunLeaders[ 0 ][ -1 ]:
+ main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
+ str( reRunLeaders[ 0 ] ) ) )
positionResult = main.FALSE
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=positionResult,
onpass="Old leader successfully re-ran for election",
onfail="Something went wrong with Leadership election after " +
@@ -2971,15 +3019,11 @@
# Variables for the distributed primitives tests
global pCounterName
- global iCounterName
global pCounterValue
- global iCounterValue
global onosSet
global onosSetName
pCounterName = "TestON-Partitions"
- iCounterName = "TestON-inMemory"
pCounterValue = 0
- iCounterValue = 0
onosSet = set([])
onosSetName = "TestON-set"
@@ -2987,7 +3031,8 @@
main.case( description )
main.step( "Install Primitives app" )
appName = "org.onosproject.distributedprimitives"
- appResults = main.CLIs[0].activateApp( appName )
+ node = main.activeNodes[0]
+ appResults = main.CLIs[node].activateApp( appName )
utilities.assert_equals( expect=main.TRUE,
actual=appResults,
onpass="Primitives app activated",
@@ -3005,7 +3050,6 @@
assert main.CLIs, "main.CLIs not defined"
assert main.nodes, "main.nodes not defined"
assert pCounterName, "pCounterName not defined"
- assert iCounterName, "iCounterName not defined"
assert onosSetName, "onosSetName not defined"
# NOTE: assert fails if value is 0/None/Empty/False
try:
@@ -3014,11 +3058,6 @@
main.log.error( "pCounterValue not defined, setting to 0" )
pCounterValue = 0
try:
- iCounterValue
- except NameError:
- main.log.error( "iCounterValue not defined, setting to 0" )
- iCounterValue = 0
- try:
onosSet
except NameError:
main.log.error( "onosSet not defined, setting to empty Set" )
@@ -3039,7 +3078,7 @@
pCounters = []
threads = []
addedPValues = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
name="counterAddAndGet-" + str( i ),
args=[ pCounterName ] )
@@ -3069,7 +3108,7 @@
pCounters = []
threads = []
addedPValues = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
name="counterGetAndAdd-" + str( i ),
args=[ pCounterName ] )
@@ -3106,7 +3145,7 @@
pCounters = []
threads = []
addedPValues = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
name="counterIncrement-" + str( i ),
args=[ pCounterName ],
@@ -3137,7 +3176,7 @@
pCounters = []
threads = []
addedPValues = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
name="counterIncrement-" + str( i ),
args=[ pCounterName ],
@@ -3168,7 +3207,7 @@
pCounters = []
threads = []
addedPValues = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
name="counterIncrement-" + str( i ),
args=[ pCounterName ],
@@ -3202,199 +3241,12 @@
onpass="Added counters are correct",
onfail="Added counters are incorrect" )
- # In-Memory counters
- main.step( "Increment and get an in-memory counter on each node" )
- iCounters = []
- addedIValues = []
- threads = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
- name="icounterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "inMemory": True } )
- iCounterValue += 1
- addedIValues.append( iCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in the in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=iCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Get then Increment a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
- name="counterGetAndAdd-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "inMemory": True } )
- addedIValues.append( iCounterValue )
- iCounterValue += 1
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=iCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Counters we added have the correct values" )
- incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
- utilities.assert_equals( expect=main.TRUE,
- actual=incrementCheck,
- onpass="Added counters are correct",
- onfail="Added counters are incorrect" )
-
- main.step( "Add -8 to then get a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
- name="counterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "delta": -8, "inMemory": True } )
- iCounterValue += -8
- addedIValues.append( iCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=pCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Add 5 to then get a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
- name="counterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "delta": 5, "inMemory": True } )
- iCounterValue += 5
- addedIValues.append( iCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=pCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Get then add 5 to a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
- name="counterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "delta": 5, "inMemory": True } )
- addedIValues.append( iCounterValue )
- iCounterValue += 5
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=iCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Counters we added have the correct values" )
- incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
- utilities.assert_equals( expect=main.TRUE,
- actual=incrementCheck,
- onpass="Added counters are correct",
- onfail="Added counters are incorrect" )
-
- main.step( "Check counters are consistant across nodes" )
- onosCounters, consistentCounterResults = main.Counters.consistentCheck()
- utilities.assert_equals( expect=main.TRUE,
- actual=consistentCounterResults,
- onpass="ONOS counters are consistent " +
- "across nodes",
- onfail="ONOS Counters are inconsistent " +
- "across nodes" )
-
- main.step( "Counters we added have the correct values" )
- incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
- incrementCheck = incrementCheck and \
- main.Counters.counterCheck( iCounterName, iCounterValue )
- utilities.assert_equals( expect=main.TRUE,
- actual=incrementCheck,
- onpass="Added counters are correct",
- onfail="Added counters are incorrect" )
# DISTRIBUTED SETS
main.step( "Distributed Set get" )
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -3405,13 +3257,14 @@
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if onosSet != current:
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has incorrect view" +
" of set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -3420,7 +3273,7 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has repeat elements in" +
" set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -3435,7 +3288,7 @@
main.step( "Distributed Set size" )
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -3446,10 +3299,11 @@
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" expected a size of " + str( size ) +
" for set " + onosSetName +
" but got " + str( sizeResponses[ i ] ) )
@@ -3462,7 +3316,7 @@
onosSet.add( addValue )
addResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestAdd,
name="setTestAdd-" + str( i ),
args=[ onosSetName, addValue ] )
@@ -3476,7 +3330,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
addResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if addResponses[ i ] == main.TRUE:
# All is well
pass
@@ -3496,7 +3350,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -3506,14 +3360,14 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if onosSet != current:
- main.log.error( "ONOS" + str( i + 1 ) +
- " has incorrect view" +
+ main.log.error( "ONOS" + node + " has incorrect view" +
" of set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
main.log.debug( "Expected: " + str( onosSet ) )
@@ -3521,8 +3375,7 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + str( i + 1 ) +
- " has repeat elements in" +
+ main.log.error( "ONOS" + node + " has repeat elements in" +
" set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
getResults = main.FALSE
@@ -3530,7 +3383,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -3540,10 +3393,11 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" expected a size of " + str( size ) +
" for set " + onosSetName +
" but got " + str( sizeResponses[ i ] ) )
@@ -3557,7 +3411,7 @@
onosSet.update( addAllValue.split() )
addResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestAdd,
name="setTestAddAll-" + str( i ),
args=[ onosSetName, addAllValue ] )
@@ -3571,7 +3425,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
addAllResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if addResponses[ i ] == main.TRUE:
# All is well
pass
@@ -3591,7 +3445,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -3601,13 +3455,14 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if onosSet != current:
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has incorrect view" +
" of set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -3616,7 +3471,7 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has repeat elements in" +
" set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -3625,7 +3480,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -3635,10 +3490,11 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" expected a size of " + str( size ) +
" for set " + onosSetName +
" but got " + str( sizeResponses[ i ] ) )
@@ -3651,7 +3507,7 @@
main.step( "Distributed Set contains()" )
containsResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setContains-" + str( i ),
args=[ onosSetName ],
@@ -3664,7 +3520,7 @@
containsResponses.append( t.result )
containsResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if containsResponses[ i ] == main.ERROR:
containsResults = main.FALSE
else:
@@ -3678,7 +3534,7 @@
main.step( "Distributed Set containsAll()" )
containsAllResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setContainsAll-" + str( i ),
args=[ onosSetName ],
@@ -3691,7 +3547,7 @@
containsAllResponses.append( t.result )
containsAllResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if containsResponses[ i ] == main.ERROR:
containsResults = main.FALSE
else:
@@ -3706,7 +3562,7 @@
onosSet.remove( addValue )
removeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestRemove,
name="setTestRemove-" + str( i ),
args=[ onosSetName, addValue ] )
@@ -3720,7 +3576,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
removeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if removeResponses[ i ] == main.TRUE:
# All is well
pass
@@ -3740,7 +3596,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -3750,13 +3606,14 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if onosSet != current:
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has incorrect view" +
" of set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -3765,7 +3622,7 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has repeat elements in" +
" set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -3774,7 +3631,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -3784,10 +3641,11 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" expected a size of " + str( size ) +
" for set " + onosSetName +
" but got " + str( sizeResponses[ i ] ) )
@@ -3802,7 +3660,7 @@
removeAllResponses = []
threads = []
try:
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestRemove,
name="setTestRemoveAll-" + str( i ),
args=[ onosSetName, addAllValue ] )
@@ -3818,7 +3676,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
removeAllResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if removeAllResponses[ i ] == main.TRUE:
# All is well
pass
@@ -3838,7 +3696,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -3848,13 +3706,14 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if onosSet != current:
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has incorrect view" +
" of set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -3863,7 +3722,7 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has repeat elements in" +
" set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -3872,7 +3731,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -3882,10 +3741,11 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" expected a size of " + str( size ) +
" for set " + onosSetName +
" but got " + str( sizeResponses[ i ] ) )
@@ -3899,7 +3759,7 @@
onosSet.update( addAllValue.split() )
addResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestAdd,
name="setTestAddAll-" + str( i ),
args=[ onosSetName, addAllValue ] )
@@ -3913,7 +3773,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
addAllResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if addResponses[ i ] == main.TRUE:
# All is well
pass
@@ -3933,7 +3793,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -3943,13 +3803,14 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if onosSet != current:
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has incorrect view" +
" of set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -3958,7 +3819,7 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has repeat elements in" +
" set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -3967,7 +3828,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -3977,10 +3838,11 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" expected a size of " + str( size ) +
" for set " + onosSetName +
" but got " + str( sizeResponses[ i ] ) )
@@ -3994,7 +3856,7 @@
onosSet.clear()
clearResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestRemove,
name="setTestClear-" + str( i ),
args=[ onosSetName, " "], # Values doesn't matter
@@ -4009,7 +3871,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
clearResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if clearResponses[ i ] == main.TRUE:
# All is well
pass
@@ -4029,7 +3891,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -4039,13 +3901,14 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if onosSet != current:
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has incorrect view" +
" of set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -4054,7 +3917,7 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has repeat elements in" +
" set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -4063,7 +3926,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -4073,10 +3936,11 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" expected a size of " + str( size ) +
" for set " + onosSetName +
" but got " + str( sizeResponses[ i ] ) )
@@ -4090,7 +3954,7 @@
onosSet.update( addAllValue.split() )
addResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestAdd,
name="setTestAddAll-" + str( i ),
args=[ onosSetName, addAllValue ] )
@@ -4104,7 +3968,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
addAllResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if addResponses[ i ] == main.TRUE:
# All is well
pass
@@ -4124,7 +3988,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -4134,13 +3998,14 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if onosSet != current:
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has incorrect view" +
" of set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -4149,7 +4014,7 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has repeat elements in" +
" set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -4158,7 +4023,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -4168,10 +4033,11 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" expected a size of " + str( size ) +
" for set " + onosSetName +
" but got " + str( sizeResponses[ i ] ) )
@@ -4185,7 +4051,7 @@
onosSet.intersection_update( retainValue.split() )
retainResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestRemove,
name="setTestRetain-" + str( i ),
args=[ onosSetName, retainValue ],
@@ -4200,7 +4066,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
retainResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if retainResponses[ i ] == main.TRUE:
# All is well
pass
@@ -4220,7 +4086,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -4230,13 +4096,14 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if onosSet != current:
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has incorrect view" +
" of set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -4245,7 +4112,7 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has repeat elements in" +
" set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -4254,7 +4121,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -4264,11 +4131,11 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + str( i + 1 ) +
- " expected a size of " +
+ main.log.error( "ONOS" + node + " expected a size of " +
str( size ) + " for set " + onosSetName +
" but got " + str( sizeResponses[ i ] ) )
retainResults = retainResults and getResults and sizeResults
@@ -4282,8 +4149,9 @@
tMapValue = "Testing"
numKeys = 100
putResult = True
- putResponses = main.CLIs[ 0 ].transactionalMapPut( numKeys, tMapValue )
- if len( putResponses ) == 100:
+ node = main.activeNodes[0]
+ putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
+ if putResponses and len( putResponses ) == 100:
for i in putResponses:
if putResponses[ i ][ 'value' ] != tMapValue:
putResult = False
@@ -4302,10 +4170,10 @@
getResponses = []
threads = []
valueCheck = True
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].transactionalMapGet,
name="TMap-get-" + str( i ),
- args=[ "Key" + str ( n ) ] )
+ args=[ "Key" + str( n ) ] )
threads.append( t )
t.start()
for t in threads:
@@ -4322,49 +4190,3 @@
actual=getCheck,
onpass="Partitioned Transactional Map get values were correct",
onfail="Partitioned Transactional Map values incorrect" )
-
- main.step( "In-memory Transactional maps put" )
- tMapValue = "Testing"
- numKeys = 100
- putResult = True
- putResponses = main.CLIs[ 0 ].transactionalMapPut( numKeys, tMapValue, inMemory=True )
- if len( putResponses ) == 100:
- for i in putResponses:
- if putResponses[ i ][ 'value' ] != tMapValue:
- putResult = False
- else:
- putResult = False
- if not putResult:
- main.log.debug( "Put response values: " + str( putResponses ) )
- utilities.assert_equals( expect=True,
- actual=putResult,
- onpass="In-Memory Transactional Map put successful",
- onfail="In-Memory Transactional Map put values are incorrect" )
-
- main.step( "In-Memory Transactional maps get" )
- getCheck = True
- for n in range( 1, numKeys + 1 ):
- getResponses = []
- threads = []
- valueCheck = True
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].transactionalMapGet,
- name="TMap-get-" + str( i ),
- args=[ "Key" + str ( n ) ],
- kwargs={ "inMemory": True } )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- getResponses.append( t.result )
- for node in getResponses:
- if node != tMapValue:
- valueCheck = False
- if not valueCheck:
- main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
- main.log.warn( getResponses )
- getCheck = getCheck and valueCheck
- utilities.assert_equals( expect=True,
- actual=getCheck,
- onpass="In-Memory Transactional Map get values were correct",
- onfail="In-Memory Transactional Map values incorrect" )
diff --git a/TestON/tests/HAsanity/dependencies/Counters.py b/TestON/tests/HAsanity/dependencies/Counters.py
index 2dc95e1..192b919 100644
--- a/TestON/tests/HAsanity/dependencies/Counters.py
+++ b/TestON/tests/HAsanity/dependencies/Counters.py
@@ -1,102 +1,105 @@
-def __init__( self ):
- self.default = ''
+import json
-def consistentCheck():
- """
- Checks that TestON counters are consistent across all nodes.
+class Counters():
- Returns the tuple (onosCounters, consistent)
- - onosCounters is the parsed json output of the counters command on all nodes
- - consistent is main.TRUE if all "TestON" counters are consitent across all
- nodes or main.FALSE
- """
- import json
- try:
- correctResults = main.TRUE
- # Get onos counters results
- onosCountersRaw = []
- threads = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counters,
- name="counters-" + str( i ) )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- onosCountersRaw.append( t.result )
- onosCounters = []
- for i in range( main.numCtrls ):
- try:
- onosCounters.append( json.loads( onosCountersRaw[i] ) )
- except ( ValueError, TypeError ):
- main.log.error( "Could not parse counters response from ONOS" +
- str( i + 1 ) )
- main.log.warn( repr( onosCountersRaw[ i ] ) )
- onosCounters.append( [] )
- return main.FALSE
+ def __init__( self ):
+ self.default = ''
- testCounters = {}
- # make a list of all the "TestON-*" counters in ONOS
- # lookes like a dict whose keys are the name of the ONOS node and values
- # are a list of the counters. I.E.
- # { "ONOS1": [ {"name":"TestON-inMemory","value":56},
- # {"name":"TestON-Partitions","value":56} ]
- # }
- # NOTE: There is an assumtion that all nodes are active
- # based on the above for loops
- for controller in enumerate( onosCounters ):
- for dbType in controller[1]:
- for dbName, items in dbType.iteritems():
- for item in items:
- if 'TestON' in item['name']:
- node = 'ONOS' + str( controller[0] + 1 )
- try:
- testCounters[node].append( item )
- except KeyError:
- testCounters[node] = [ item ]
- # compare the counters on each node
- tmp = [ v == testCounters['ONOS1'] for k, v in testCounters.iteritems() ]
- if all( tmp ):
- consistent = main.TRUE
- else:
- consistent = main.FALSE
- main.log.error( "ONOS nodes have different values for counters:\n" +
- testCounters )
- return ( onosCounters, consistent )
- except Exception:
- main.log.exception( "" )
- main.cleanup()
- main.exit()
+ def consistentCheck( self ):
+ """
+ Checks that TestON counters are consistent across all nodes.
-def counterCheck( counterName, counterValue ):
- """
- Checks that TestON counters are consistent across all nodes and that
- specified counter is in ONOS with the given value
- """
- import json
- correctResults = main.TRUE
- # Get onos counters results and consistentCheck
- onosCounters, consistent = main.Counters.consistentCheck()
- # Check for correct values
- for i in range( main.numCtrls ):
- current = onosCounters[i]
- onosValue = None
+ Returns the tuple (onosCounters, consistent)
+ - onosCounters is the parsed json output of the counters command on all nodes
+ - consistent is main.TRUE if all "TestON" counters are consitent across all
+ nodes or main.FALSE
+ """
try:
- for database in current:
- database = database.values()[0]
- for counter in database:
- if counter.get( 'name' ) == counterName:
- onosValue = counter.get( 'value' )
- break
- except AttributeError, e:
- main.log.error( "ONOS" + str( i + 1 ) + " counters result " +
- "is not as expected" )
- correctResults = main.FALSE
- if onosValue == counterValue:
- main.log.info( counterName + " counter value is correct" )
- else:
- main.log.error( counterName + " counter value is incorrect," +
- " expected value: " + str( counterValue )
- + " current value: " + str( onosValue ) )
- correctResults = main.FALSE
- return consistent and correctResults
+ correctResults = main.TRUE
+ # Get onos counters results
+ onosCountersRaw = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=utilities.retry,
+ name="counters-" + str( i ),
+ args=[ main.CLIs[i].counters, [ None ] ],
+ kwargs= { 'sleep': 5, 'attempts': 5,
+ 'randomTime': True } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ onosCountersRaw.append( t.result )
+ onosCounters = []
+ for i in range( len( main.activeNodes ) ):
+ try:
+ onosCounters.append( json.loads( onosCountersRaw[i] ) )
+ except ( ValueError, TypeError ):
+ main.log.error( "Could not parse counters response from ONOS" +
+ str( main.activeNodes[i] + 1 ) )
+ main.log.warn( repr( onosCountersRaw[ i ] ) )
+ onosCounters.append( [] )
+
+ testCounters = {}
+ # make a list of all the "TestON-*" counters in ONOS
+ # lookes like a dict whose keys are the name of the ONOS node and values
+ # are a list of the counters. I.E.
+ # { "ONOS1": [ { "name":"TestON-Partitions","value":56} ]
+ # }
+ # NOTE: There is an assumtion that all nodes are active
+ # based on the above for loops
+ for controller in enumerate( onosCounters ):
+ for key, value in controller[1].iteritems():
+ if 'TestON' in key:
+ node = 'ONOS' + str( controller[0] + 1 )
+ try:
+ testCounters[node].append( { key: value } )
+ except KeyError:
+ testCounters[node] = [ { key: value } ]
+ # compare the counters on each node
+ firstV = testCounters.values()[0]
+ tmp = [ v == firstV for k, v in testCounters.iteritems() ]
+ if all( tmp ):
+ consistent = main.TRUE
+ else:
+ consistent = main.FALSE
+ main.log.error( "ONOS nodes have different values for counters:\n" +
+ testCounters )
+ return ( onosCounters, consistent )
+ except Exception:
+ main.log.exception( "" )
+ main.cleanup()
+ main.exit()
+
+ def counterCheck( self, counterName, counterValue ):
+ """
+ Checks that TestON counters are consistent across all nodes and that
+ specified counter is in ONOS with the given value
+ """
+ try:
+ correctResults = main.TRUE
+ # Get onos counters results and consistentCheck
+ onosCounters, consistent = self.consistentCheck()
+ # Check for correct values
+ for i in range( len( main.activeNodes ) ):
+ current = onosCounters[i]
+ onosValue = None
+ try:
+ onosValue = current.get( counterName )
+ except AttributeError, e:
+ node = str( main.activeNodes[i] + 1 )
+ main.log.error( "ONOS" + node + " counters result " +
+ "is not as expected" )
+ correctResults = main.FALSE
+ if onosValue == counterValue:
+ main.log.info( counterName + " counter value is correct" )
+ else:
+ main.log.error( counterName + " counter value is incorrect," +
+ " expected value: " + str( counterValue )
+ + " current value: " + str( onosValue ) )
+ correctResults = main.FALSE
+ return consistent and correctResults
+ except Exception:
+ main.log.exception( "" )
+ main.cleanup()
+ main.exit()
diff --git a/TestON/tests/HAsingleInstanceRestart/HAsingleInstanceRestart.params b/TestON/tests/HAsingleInstanceRestart/HAsingleInstanceRestart.params
index 990bef4..d7014e4 100644
--- a/TestON/tests/HAsingleInstanceRestart/HAsingleInstanceRestart.params
+++ b/TestON/tests/HAsingleInstanceRestart/HAsingleInstanceRestart.params
@@ -15,9 +15,7 @@
#CASE14: start election app on all onos nodes
#CASE15: Check that Leadership Election is still functional
<testcases>1,2,8,3,4,5,14,15,16,17,[6],8,3,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
- <imports>
- <path> /home/admin/OnosSystemTest/TestON/tests/HAsingleInstanceRestart/dependencies/ </path>
- </imports>
+
<ENV>
<cellName>HA</cellName>
<appString>drivers,openflow,proxyarp,mobility</appString>
diff --git a/TestON/tests/HAsingleInstanceRestart/HAsingleInstanceRestart.py b/TestON/tests/HAsingleInstanceRestart/HAsingleInstanceRestart.py
index 2b98bbe..44b402f 100644
--- a/TestON/tests/HAsingleInstanceRestart/HAsingleInstanceRestart.py
+++ b/TestON/tests/HAsingleInstanceRestart/HAsingleInstanceRestart.py
@@ -49,13 +49,13 @@
"""
import imp
import time
+ import json
main.log.info( "ONOS Single node cluster restart " +
"HA test - initialization" )
main.case( "Setting up test environment" )
main.caseExplanation = "Setup the test environment including " +\
"installing ONOS, starting Mininet and ONOS" +\
"cli sessions."
- # TODO: save all the timers and output them for plotting
# load some variables from the params file
PULLCODE = False
@@ -70,10 +70,8 @@
main.numCtrls = int( main.ONOSbench.maxNodes )
try:
- fileName = "Counters"
- path = main.params[ 'imports' ][ 'path' ]
- main.Counters = imp.load_source( fileName,
- path + fileName + ".py" )
+ from tests.HAsanity.dependencies.Counters import Counters
+ main.Counters = Counters()
except Exception as e:
main.log.exception( e )
main.cleanup()
@@ -191,27 +189,49 @@
onfail="ONOS package failed" )
main.step( "Installing ONOS package" )
- onosInstallResult = main.ONOSbench.onosInstall(
- options="-f", node=main.nodes[0].ip_address )
+ onosInstallResult = main.TRUE
+ for node in main.nodes:
+ tmpResult = main.ONOSbench.onosInstall( options="-f",
+ node=node.ip_address )
+ onosInstallResult = onosInstallResult and tmpResult
utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
onpass="ONOS install successful",
onfail="ONOS install failed" )
main.step( "Checking if ONOS is up yet" )
for i in range( 2 ):
- onos1Isup = main.ONOSbench.isup( main.nodes[0].ip_address )
- if onos1Isup:
+ onosIsupResult = main.TRUE
+ for node in main.nodes:
+ started = main.ONOSbench.isup( node.ip_address )
+ if not started:
+ main.log.error( node.name + " hasn't started" )
+ onosIsupResult = onosIsupResult and started
+ if onosIsupResult == main.TRUE:
break
- utilities.assert_equals( expect=main.TRUE, actual=onos1Isup,
+ utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
onpass="ONOS startup successful",
onfail="ONOS startup failed" )
main.log.step( "Starting ONOS CLI sessions" )
- cliResults = main.ONOScli1.startOnosCli( main.nodes[0].ip_address )
+ cliResults = main.TRUE
+ threads = []
+ for i in range( main.numCtrls ):
+ t = main.Thread( target=main.CLIs[i].startOnosCli,
+ name="startOnosCli-" + str( i ),
+ args=[main.nodes[i].ip_address] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ cliResults = cliResults and t.result
utilities.assert_equals( expect=main.TRUE, actual=cliResults,
onpass="ONOS cli startup successful",
onfail="ONOS cli startup failed" )
+ # Create a list of active nodes for use when some nodes are stopped
+ main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
+
if main.params[ 'tcpdump' ].lower() == "true":
main.step( "Start Packet Capture MN" )
main.Mininet2.startTcpdump(
@@ -221,15 +241,68 @@
port=main.params[ 'MNtcpdump' ][ 'port' ] )
main.step( "App Ids check" )
- time.sleep(60)
- appCheck = main.ONOScli1.appToIDCheck()
+ appCheck = main.TRUE
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].appToIDCheck,
+ name="appToIDCheck-" + str( i ),
+ args=[] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ appCheck = appCheck and t.result
if appCheck != main.TRUE:
- main.log.warn( main.CLIs[0].apps() )
- main.log.warn( main.CLIs[0].appIDs() )
+ node = main.activeNodes[0]
+ main.log.warn( main.CLIs[node].apps() )
+ main.log.warn( main.CLIs[node].appIDs() )
utilities.assert_equals( expect=main.TRUE, actual=appCheck,
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
+ main.step( "Checking ONOS nodes" )
+ nodesOutput = []
+ nodeResults = main.TRUE
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].nodes,
+ name="nodes-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ nodesOutput.append( t.result )
+ ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
+ ips.sort()
+ for i in nodesOutput:
+ try:
+ current = json.loads( i )
+ activeIps = []
+ currentResult = main.FALSE
+ for node in current:
+ if node['state'] == 'READY':
+ activeIps.append( node['ip'] )
+ activeIps.sort()
+ if ips == activeIps:
+ currentResult = main.TRUE
+ except ( ValueError, TypeError ):
+ main.log.error( "Error parsing nodes output" )
+ main.log.warn( repr( i ) )
+ currentResult = main.FALSE
+ nodeResults = nodeResults and currentResult
+ utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
+ onpass="Nodes check successful",
+ onfail="Nodes check NOT successful" )
+
+ if not nodeResults:
+ for cli in main.CLIs:
+ main.log.debug( "{} components not ACTIVE: \n{}".format(
+ cli.name,
+ cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
+
if cliResults == main.FALSE:
main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
@@ -265,12 +338,14 @@
main.log.info( str( response ) )
except Exception:
main.log.info( repr( response ) )
- if re.search( "tcp:" + main.nodes[0].ip_address, response ):
- mastershipCheck = mastershipCheck and main.TRUE
- else:
- mastershipCheck = main.FALSE
- if mastershipCheck == main.TRUE:
- main.log.info( "Switch mastership assigned correctly" )
+ for node in main.nodes:
+ if re.search( "tcp:" + node.ip_address, response ):
+ mastershipCheck = mastershipCheck and main.TRUE
+ else:
+ main.log.error( "Error, node " + node.ip_address + " is " +
+ "not in the list of controllers s" +
+ str( i ) + " is connecting to." )
+ mastershipCheck = main.FALSE
utilities.assert_equals(
expect=main.TRUE,
actual=mastershipCheck,
@@ -293,55 +368,73 @@
" mastership to specific ONOS nodes using" +\
" 'device-role'"
main.step( "Assign mastership of switches to specific controllers" )
+ # Manually assign mastership to the controller we want
roleCall = main.TRUE
- roleCheck = main.TRUE
+
+ ipList = [ ]
+ deviceList = []
+ onosCli = main.CLIs[ main.activeNodes[0] ]
try:
+ # Assign mastership to specific controllers. This assignment was
+ # determined for a 7 node cluser, but will work with any sized
+ # cluster
for i in range( 1, 29 ): # switches 1 through 28
- ip = main.nodes[ 0 ].ip_address # ONOS1
# set up correct variables:
if i == 1:
- deviceId = main.ONOScli1.getDevice( "1000" ).get( 'id' )
+ c = 0
+ ip = main.nodes[ c ].ip_address # ONOS1
+ deviceId = onosCli.getDevice( "1000" ).get( 'id' )
elif i == 2:
- deviceId = main.ONOScli1.getDevice( "2000" ).get( 'id' )
+ c = 1 % main.numCtrls
+ ip = main.nodes[ c ].ip_address # ONOS2
+ deviceId = onosCli.getDevice( "2000" ).get( 'id' )
elif i == 3:
- deviceId = main.ONOScli1.getDevice( "3000" ).get( 'id' )
+ c = 1 % main.numCtrls
+ ip = main.nodes[ c ].ip_address # ONOS2
+ deviceId = onosCli.getDevice( "3000" ).get( 'id' )
elif i == 4:
- deviceId = main.ONOScli1.getDevice( "3004" ).get( 'id' )
+ c = 3 % main.numCtrls
+ ip = main.nodes[ c ].ip_address # ONOS4
+ deviceId = onosCli.getDevice( "3004" ).get( 'id' )
elif i == 5:
- deviceId = main.ONOScli1.getDevice( "5000" ).get( 'id' )
+ c = 2 % main.numCtrls
+ ip = main.nodes[ c ].ip_address # ONOS3
+ deviceId = onosCli.getDevice( "5000" ).get( 'id' )
elif i == 6:
- deviceId = main.ONOScli1.getDevice( "6000" ).get( 'id' )
+ c = 2 % main.numCtrls
+ ip = main.nodes[ c ].ip_address # ONOS3
+ deviceId = onosCli.getDevice( "6000" ).get( 'id' )
elif i == 7:
- deviceId = main.ONOScli1.getDevice( "6007" ).get( 'id' )
+ c = 5 % main.numCtrls
+ ip = main.nodes[ c ].ip_address # ONOS6
+ deviceId = onosCli.getDevice( "6007" ).get( 'id' )
elif i >= 8 and i <= 17:
+ c = 4 % main.numCtrls
+ ip = main.nodes[ c ].ip_address # ONOS5
dpid = '3' + str( i ).zfill( 3 )
- deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
+ deviceId = onosCli.getDevice( dpid ).get( 'id' )
elif i >= 18 and i <= 27:
+ c = 6 % main.numCtrls
+ ip = main.nodes[ c ].ip_address # ONOS7
dpid = '6' + str( i ).zfill( 3 )
- deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
+ deviceId = onosCli.getDevice( dpid ).get( 'id' )
elif i == 28:
- deviceId = main.ONOScli1.getDevice( "2800" ).get( 'id' )
+ c = 0
+ ip = main.nodes[ c ].ip_address # ONOS1
+ deviceId = onosCli.getDevice( "2800" ).get( 'id' )
else:
main.log.error( "You didn't write an else statement for " +
"switch s" + str( i ) )
+ roleCall = main.FALSE
# Assign switch
assert deviceId, "No device id for s" + str( i ) + " in ONOS"
# TODO: make this controller dynamic
- roleCall = roleCall and main.ONOScli1.deviceRole( deviceId,
- ip )
- # Check assignment
- master = main.ONOScli1.getRole( deviceId ).get( 'master' )
- if ip in master:
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
- main.log.error( "Error, controller " + ip + " is not" +
- " master " + "of device " +
- str( deviceId ) + ". Master is " +
- repr( master ) + "." )
+ roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
+ ipList.append( ip )
+ deviceList.append( deviceId )
except ( AttributeError, AssertionError ):
main.log.exception( "Something is wrong with ONOS device view" )
- main.log.info( main.ONOScli1.devices() )
+ main.log.info( onosCli.devices() )
utilities.assert_equals(
expect=main.TRUE,
actual=roleCall,
@@ -349,6 +442,23 @@
onfail="Something wrong with deviceRole calls" )
main.step( "Check mastership was correctly assigned" )
+ roleCheck = main.TRUE
+ # NOTE: This is due to the fact that device mastership change is not
+ # atomic and is actually a multi step process
+ time.sleep( 5 )
+ for i in range( len( ipList ) ):
+ ip = ipList[i]
+ deviceId = deviceList[i]
+ # Check assignment
+ master = onosCli.getRole( deviceId ).get( 'master' )
+ if ip in master:
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+ main.log.error( "Error, controller " + ip + " is not" +
+ " master " + "of device " +
+ str( deviceId ) + ". Master is " +
+ repr( master ) + "." )
utilities.assert_equals(
expect=main.TRUE,
actual=roleCheck,
@@ -376,16 +486,28 @@
# install onos-app-fwd
main.step( "Install reactive forwarding app" )
- installResults = main.CLIs[0].activateApp( "org.onosproject.fwd" )
+ onosCli = main.CLIs[ main.activeNodes[0] ]
+ installResults = onosCli.activateApp( "org.onosproject.fwd" )
utilities.assert_equals( expect=main.TRUE, actual=installResults,
onpass="Install fwd successful",
onfail="Install fwd failed" )
main.step( "Check app ids" )
- appCheck = main.ONOScli1.appToIDCheck()
+ appCheck = main.TRUE
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].appToIDCheck,
+ name="appToIDCheck-" + str( i ),
+ args=[] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ appCheck = appCheck and t.result
if appCheck != main.TRUE:
- main.log.warn( main.CLIs[0].apps() )
- main.log.warn( main.CLIs[0].appIDs() )
+ main.log.warn( onosCli.apps() )
+ main.log.warn( onosCli.appIDs() )
utilities.assert_equals( expect=main.TRUE, actual=appCheck,
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
@@ -414,24 +536,37 @@
time.sleep( 11 )
# uninstall onos-app-fwd
main.step( "Uninstall reactive forwarding app" )
- uninstallResult = main.CLIs[0].deactivateApp( "org.onosproject.fwd" )
+ node = main.activeNodes[0]
+ uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
onpass="Uninstall fwd successful",
onfail="Uninstall fwd failed" )
main.step( "Check app ids" )
- appCheck2 = main.ONOScli1.appToIDCheck()
+ threads = []
+ appCheck2 = main.TRUE
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].appToIDCheck,
+ name="appToIDCheck-" + str( i ),
+ args=[] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ appCheck2 = appCheck2 and t.result
if appCheck2 != main.TRUE:
- main.log.warn( main.CLIs[0].apps() )
- main.log.warn( main.CLIs[0].appIDs() )
+ node = main.activeNodes[0]
+ main.log.warn( main.CLIs[node].apps() )
+ main.log.warn( main.CLIs[node].appIDs() )
utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
main.step( "Add host intents via cli" )
intentIds = []
- # TODO: move the host numbers to params
- # Maybe look at all the paths we ping?
+ # TODO: move the host numbers to params
+ # Maybe look at all the paths we ping?
intentAddResult = True
hostResult = main.TRUE
for i in range( 8, 18 ):
@@ -442,15 +577,17 @@
host2 = "00:00:00:00:00:" + \
str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
# NOTE: getHost can return None
- host1Dict = main.ONOScli1.getHost( host1 )
- host2Dict = main.ONOScli1.getHost( host2 )
+ host1Dict = onosCli.getHost( host1 )
+ host2Dict = onosCli.getHost( host2 )
host1Id = None
host2Id = None
if host1Dict and host2Dict:
host1Id = host1Dict.get( 'id', None )
host2Id = host2Dict.get( 'id', None )
if host1Id and host2Id:
- tmpId = main.ONOScli1.addHostIntent( host1Id, host2Id )
+ nodeNum = ( i % len( main.activeNodes ) )
+ node = main.activeNodes[nodeNum]
+ tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
if tmpId:
main.log.info( "Added intent with id: " + tmpId )
intentIds.append( tmpId )
@@ -460,7 +597,8 @@
else:
main.log.error( "Error, getHost() failed for h" + str( i ) +
" and/or h" + str( i + 10 ) )
- hosts = main.ONOScli1.hosts()
+ node = main.activeNodes[0]
+ hosts = main.CLIs[node].hosts()
main.log.warn( "Hosts output: " )
try:
main.log.warn( json.dumps( json.loads( hosts ),
@@ -475,7 +613,7 @@
onfail="Error looking up host ids" )
intentStart = time.time()
- onosIds = main.ONOScli1.getAllIntentsId()
+ onosIds = onosCli.getAllIntentsId()
main.log.info( "Submitted intents: " + str( intentIds ) )
main.log.info( "Intents in ONOS: " + str( onosIds ) )
for intent in intentIds:
@@ -488,7 +626,7 @@
else:
intentStop = None
# Print the intent states
- intents = main.ONOScli1.intents()
+ intents = onosCli.intents()
intentStates = []
installedCheck = True
main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
@@ -514,7 +652,7 @@
count += 1
main.log.info( "%-6s%-15s%-15s" %
( str( count ), str( i ), str( s ) ) )
- leaders = main.ONOScli1.leaders()
+ leaders = onosCli.leaders()
try:
missing = False
if leaders:
@@ -541,11 +679,12 @@
main.log.error( repr( leaders ) )
# Check all nodes
if missing:
- response = main.ONOScli1.leaders( jsonFormat=False)
- main.log.warn( "ONOS1 leaders output: \n" +
- str( response ) )
+ for i in main.activeNodes:
+ response = main.CLIs[i].leaders( jsonFormat=False)
+ main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
+ str( response ) )
- partitions = main.ONOScli1.partitions()
+ partitions = onosCli.partitions()
try:
if partitions :
parsedPartitions = json.loads( partitions )
@@ -560,7 +699,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing partitions" )
main.log.error( repr( partitions ) )
- pendingMap = main.ONOScli1.pendingMap()
+ pendingMap = onosCli.pendingMap()
try:
if pendingMap :
parsedPending = json.loads( pendingMap )
@@ -581,21 +720,21 @@
main.log.error( "Error in pushing host intents to ONOS" )
main.step( "Intent Anti-Entropy dispersion" )
- for i in range(100):
+ for j in range(100):
correct = True
main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
- for cli in main.CLIs:
+ for i in main.activeNodes:
onosIds = []
- ids = cli.getAllIntentsId()
+ ids = main.CLIs[i].getAllIntentsId()
onosIds.append( ids )
- main.log.debug( "Intents in " + cli.name + ": " +
+ main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
str( sorted( onosIds ) ) )
if sorted( ids ) != sorted( intentIds ):
main.log.warn( "Set of intent IDs doesn't match" )
correct = False
break
else:
- intents = json.loads( cli.intents() )
+ intents = json.loads( main.CLIs[i].intents() )
for intent in intents:
if intent[ 'state' ] != "INSTALLED":
main.log.warn( "Intent " + intent[ 'id' ] +
@@ -613,7 +752,7 @@
main.log.info( "It took about " + str( gossipTime ) +
" seconds for all intents to appear in each node" )
gossipPeriod = int( main.params['timers']['gossip'] )
- maxGossipTime = gossipPeriod * len( main.nodes )
+ maxGossipTime = gossipPeriod * len( main.activeNodes )
utilities.assert_greater_equals(
expect=maxGossipTime, actual=gossipTime,
onpass="ECM anti-entropy for intents worked within " +
@@ -629,11 +768,11 @@
installedCheck = True
main.log.info( "Sleeping 60 seconds to see if intents are found" )
time.sleep( 60 )
- onosIds = main.ONOScli1.getAllIntentsId()
+ onosIds = onosCli.getAllIntentsId()
main.log.info( "Submitted intents: " + str( intentIds ) )
main.log.info( "Intents in ONOS: " + str( onosIds ) )
# Print the intent states
- intents = main.ONOScli1.intents()
+ intents = onosCli.intents()
intentStates = []
main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
count = 0
@@ -657,7 +796,7 @@
count += 1
main.log.info( "%-6s%-15s%-15s" %
( str( count ), str( i ), str( s ) ) )
- leaders = main.ONOScli1.leaders()
+ leaders = onosCli.leaders()
try:
missing = False
if leaders:
@@ -687,10 +826,13 @@
main.log.error( repr( leaders ) )
# Check all nodes
if missing:
- response = main.ONOScli1.leaders( jsonFormat=False)
- main.log.warn( "ONOS1 leaders output: \n" +
- str( response ) )
- partitions = main.ONOScli1.partitions()
+ for i in main.activeNodes:
+ node = main.CLIs[i]
+ response = node.leaders( jsonFormat=False)
+ main.log.warn( str( node.name ) + " leaders output: \n" +
+ str( response ) )
+
+ partitions = onosCli.partitions()
try:
if partitions :
parsedPartitions = json.loads( partitions )
@@ -705,7 +847,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing partitions" )
main.log.error( repr( partitions ) )
- pendingMap = main.ONOScli1.pendingMap()
+ pendingMap = onosCli.pendingMap()
try:
if pendingMap :
parsedPending = json.loads( pendingMap )
@@ -729,11 +871,12 @@
assert main.numCtrls, "main.numCtrls not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- main.case( "Verify connectivity by sendind traffic across Intents" )
+ main.case( "Verify connectivity by sending traffic across Intents" )
main.caseExplanation = "Ping across added host intents to check " +\
"functionality and check the state of " +\
"the intent"
main.step( "Ping across added host intents" )
+ onosCli = main.CLIs[ main.activeNodes[0] ]
PingResult = main.TRUE
for i in range( 8, 18 ):
ping = main.Mininet1.pingHost( src="h" + str( i ),
@@ -751,7 +894,7 @@
# TODO: pretty print
main.log.warn( "ONOS1 intents: " )
try:
- tmpIntents = main.ONOScli1.intents()
+ tmpIntents = onosCli.intents()
main.log.warn( json.dumps( json.loads( tmpIntents ),
sort_keys=True,
indent=4,
@@ -793,7 +936,7 @@
"INSTALLED state" )
main.step( "Check leadership of topics" )
- leaders = main.ONOScli1.leaders()
+ leaders = onosCli.leaders()
topicCheck = main.TRUE
try:
if leaders:
@@ -826,11 +969,19 @@
main.log.exception( "Error parsing leaders" )
main.log.error( repr( leaders ) )
# TODO: Check for a leader of these topics
+ # Check all nodes
+ if topicCheck:
+ for i in main.activeNodes:
+ node = main.CLIs[i]
+ response = node.leaders( jsonFormat=False)
+ main.log.warn( str( node.name ) + " leaders output: \n" +
+ str( response ) )
+
utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
onpass="intent Partitions is in leaders",
onfail="Some topics were lost " )
# Print partitions
- partitions = main.ONOScli1.partitions()
+ partitions = onosCli.partitions()
try:
if partitions :
parsedPartitions = json.loads( partitions )
@@ -846,7 +997,7 @@
main.log.exception( "Error parsing partitions" )
main.log.error( repr( partitions ) )
# Print Pending Map
- pendingMap = main.ONOScli1.pendingMap()
+ pendingMap = onosCli.pendingMap()
try:
if pendingMap :
parsedPending = json.loads( pendingMap )
@@ -866,7 +1017,7 @@
"intents change" )
time.sleep( 60 )
# Print the intent states
- intents = main.ONOScli1.intents()
+ intents = onosCli.intents()
intentStates = []
main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
count = 0
@@ -885,7 +1036,7 @@
count += 1
main.log.info( "%-6s%-15s%-15s" %
( str( count ), str( i ), str( s ) ) )
- leaders = main.ONOScli1.leaders()
+ leaders = onosCli.leaders()
try:
missing = False
if leaders:
@@ -914,10 +1065,13 @@
main.log.exception( "Error parsing leaders" )
main.log.error( repr( leaders ) )
if missing:
- response = main.ONOScli1.leaders( jsonFormat=False)
- main.log.warn( "ONOS1 leaders output: \n" +
- str( response ) )
- partitions = main.ONOScli1.partitions()
+ for i in main.activeNodes:
+ node = main.CLIs[i]
+ response = node.leaders( jsonFormat=False)
+ main.log.warn( str( node.name ) + " leaders output: \n" +
+ str( response ) )
+
+ partitions = onosCli.partitions()
try:
if partitions :
parsedPartitions = json.loads( partitions )
@@ -932,7 +1086,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing partitions" )
main.log.error( repr( partitions ) )
- pendingMap = main.ONOScli1.pendingMap()
+ pendingMap = onosCli.pendingMap()
try:
if pendingMap :
parsedPending = json.loads( pendingMap )
@@ -947,7 +1101,8 @@
main.log.exception( "Error parsing pending map" )
main.log.error( repr( pendingMap ) )
# Print flowrules
- main.log.debug( main.CLIs[0].flows( jsonFormat=False ) )
+ node = main.activeNodes[0]
+ main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
main.step( "Wait a minute then ping again" )
# the wait is above
PingResult = main.TRUE
@@ -967,7 +1122,7 @@
# TODO: pretty print
main.log.warn( "ONOS1 intents: " )
try:
- tmpIntents = main.ONOScli1.intents()
+ tmpIntents = onosCli.intents()
main.log.warn( json.dumps( json.loads( tmpIntents ),
sort_keys=True,
indent=4,
@@ -999,7 +1154,18 @@
mastershipState = '[]'
# Assert that each device has a master
- rolesNotNull = main.ONOScli1.rolesNotNull()
+ rolesNotNull = main.TRUE
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].rolesNotNull,
+ name="rolesNotNull-" + str( i ),
+ args=[] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ rolesNotNull = rolesNotNull and t.result
utilities.assert_equals(
expect=main.TRUE,
actual=rolesNotNull,
@@ -1068,13 +1234,13 @@
main.step( "Each host has an IP address" )
ipResult = main.TRUE
for controller in range( 0, len( hosts ) ):
- controllerStr = str( controller + 1 )
- for host in hosts[ controller ]:
- if host is None or host.get( 'ipAddresses', [] ) == []:
- main.log.error(
- "DEBUG:Error with host ips on controller" +
- controllerStr + ": " + str( host ) )
- ipResult = main.FALSE
+ controllerStr = str( main.activeNodes[controller] + 1 )
+ if hosts[ controller ]:
+ for host in hosts[ controller ]:
+ if not host.get( 'ipAddresses', [ ] ):
+ main.log.error( "Error with host ips on controller" +
+ controllerStr + ": " + str( host ) )
+ ipResult = main.FALSE
utilities.assert_equals(
expect=main.TRUE,
actual=ipResult,
@@ -1105,8 +1271,8 @@
mnSwitches = main.Mininet1.getSwitches()
mnLinks = main.Mininet1.getLinks()
mnHosts = main.Mininet1.getHosts()
- for controller in range( main.numCtrls ):
- controllerStr = str( controller + 1 )
+ for controller in main.activeNodes:
+ controllerStr = str( main.activeNodes[controller] + 1 )
if devices[ controller ] and ports[ controller ] and\
"Error" not in devices[ controller ] and\
"Error" not in ports[ controller ]:
@@ -1135,7 +1301,7 @@
onfail="ONOS" + controllerStr +
" links view is incorrect" )
- if hosts[ controller ] or "Error" not in hosts[ controller ]:
+ if hosts[ controller ] and "Error" not in hosts[ controller ]:
currentHostsResult = main.Mininet1.compareHosts(
mnHosts,
hosts[ controller ] )
@@ -1246,7 +1412,18 @@
main.step( "Check that each switch has a master" )
# Assert that each device has a master
- rolesNotNull = main.ONOScli1.rolesNotNull()
+ rolesNotNull = main.TRUE
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].rolesNotNull,
+ name="rolesNotNull-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ rolesNotNull = rolesNotNull and t.result
utilities.assert_equals(
expect=main.TRUE,
actual=rolesNotNull,
@@ -1328,45 +1505,50 @@
# NOTE: this requires case 5 to pass for intentState to be set.
# maybe we should stop the test if that fails?
sameIntents = main.FALSE
- if intentState and intentState == ONOSIntents[ 0 ]:
- sameIntents = main.TRUE
- main.log.info( "Intents are consistent with before failure" )
- # TODO: possibly the states have changed? we may need to figure out
- # what the acceptable states are
- elif len( intentState ) == len( ONOSIntents[ 0 ] ):
- sameIntents = main.TRUE
- try:
- before = json.loads( intentState )
- after = json.loads( ONOSIntents[ 0 ] )
- for intent in before:
- if intent not in after:
- sameIntents = main.FALSE
- main.log.debug( "Intent is not currently in ONOS " +
- "(at least in the same form):" )
- main.log.debug( json.dumps( intent ) )
- except ( ValueError, TypeError ):
- main.log.exception( "Exception printing intents" )
- main.log.debug( repr( ONOSIntents[0] ) )
- main.log.debug( repr( intentState ) )
- if sameIntents == main.FALSE:
- try:
- main.log.debug( "ONOS intents before: " )
- main.log.debug( json.dumps( json.loads( intentState ),
- sort_keys=True, indent=4,
- separators=( ',', ': ' ) ) )
- main.log.debug( "Current ONOS intents: " )
- main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
- sort_keys=True, indent=4,
- separators=( ',', ': ' ) ) )
- except ( ValueError, TypeError ):
- main.log.exception( "Exception printing intents" )
- main.log.debug( repr( ONOSIntents[0] ) )
- main.log.debug( repr( intentState ) )
- utilities.assert_equals(
- expect=main.TRUE,
- actual=sameIntents,
- onpass="Intents are consistent with before failure",
- onfail="The Intents changed during failure" )
+ try:
+ intentState
+ except NameError:
+ main.log.warn( "No previous intent state was saved" )
+ else:
+ if intentState and intentState == ONOSIntents[ 0 ]:
+ sameIntents = main.TRUE
+ main.log.info( "Intents are consistent with before failure" )
+ # TODO: possibly the states have changed? we may need to figure out
+ # what the acceptable states are
+ elif len( intentState ) == len( ONOSIntents[ 0 ] ):
+ sameIntents = main.TRUE
+ try:
+ before = json.loads( intentState )
+ after = json.loads( ONOSIntents[ 0 ] )
+ for intent in before:
+ if intent not in after:
+ sameIntents = main.FALSE
+ main.log.debug( "Intent is not currently in ONOS " +
+ "(at least in the same form):" )
+ main.log.debug( json.dumps( intent ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Exception printing intents" )
+ main.log.debug( repr( ONOSIntents[0] ) )
+ main.log.debug( repr( intentState ) )
+ if sameIntents == main.FALSE:
+ try:
+ main.log.debug( "ONOS intents before: " )
+ main.log.debug( json.dumps( json.loads( intentState ),
+ sort_keys=True, indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.debug( "Current ONOS intents: " )
+ main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
+ sort_keys=True, indent=4,
+ separators=( ',', ': ' ) ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Exception printing intents" )
+ main.log.debug( repr( ONOSIntents[0] ) )
+ main.log.debug( repr( intentState ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=sameIntents,
+ onpass="Intents are consistent with before failure",
+ onfail="The Intents changed during failure" )
intentCheck = intentCheck and sameIntents
"""
main.step( "Get the OF Table entries and compare to before " +
@@ -1378,7 +1560,6 @@
FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
if FlowTables == main.FALSE:
main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
-
utilities.assert_equals(
expect=main.TRUE,
actual=FlowTables,
@@ -1388,7 +1569,7 @@
main.step( "Leadership Election is still functional" )
# Test of LeadershipElection
- leader = main.nodes[0].ip_address
+ leader = main.nodes[ main.activeNodes[ 0 ] ].ip_address
leaderResult = main.TRUE
for controller in range( 1, main.numCtrls + 1 ):
# loop through ONOScli handlers
@@ -1607,10 +1788,16 @@
hostAttachmentResults = hostAttachmentResults and\
hostAttachment
- # "consistent" results don't make sense for single instance
+ # "consistent" results don't make sense for single instance
# there should always only be one cluster
- numClusters = len( json.loads( clusters[ 0 ] ) )
clusterResults = main.FALSE
+ try:
+ numClusters = len( json.loads( clusters[ 0 ] ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing clusters[0]: " +
+ repr( clusters[0] ) )
+ numClusters = "ERROR"
+ clusterResults = main.FALSE
if numClusters == 1:
clusterResults = main.TRUE
utilities.assert_equals(
@@ -1700,6 +1887,7 @@
switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
description = "Killing a switch to ensure it is discovered correctly"
+ onosCli = main.CLIs[ main.activeNodes[0] ]
main.case( description )
switch = main.params[ 'kill' ][ 'switch' ]
switchDPID = main.params[ 'kill' ][ 'dpid' ]
@@ -1711,7 +1899,7 @@
main.log.info( "Waiting " + str( switchSleep ) +
" seconds for switch down to be discovered" )
time.sleep( switchSleep )
- device = main.ONOScli1.getDevice( dpid=switchDPID )
+ device = onosCli.getDevice( dpid=switchDPID )
# Peek at the deleted switch
main.log.warn( str( device ) )
result = main.FALSE
@@ -1735,6 +1923,7 @@
switch = main.params[ 'kill' ][ 'switch' ]
switchDPID = main.params[ 'kill' ][ 'dpid' ]
links = main.params[ 'kill' ][ 'links' ].split()
+ onosCli = main.CLIs[ main.activeNodes[0] ]
description = "Adding a switch to ensure it is discovered correctly"
main.case( description )
@@ -1742,14 +1931,12 @@
main.Mininet1.addSwitch( switch, dpid=switchDPID )
for peer in links:
main.Mininet1.addLink( switch, peer )
- ipList = []
- for i in range( main.numCtrls ):
- ipList.append( main.nodes[ i ].ip_address )
+ ipList = [ node.ip_address for node in main.nodes ]
main.Mininet1.assignSwController( sw=switch, ip=ipList )
main.log.info( "Waiting " + str( switchSleep ) +
" seconds for switch up to be discovered" )
time.sleep( switchSleep )
- device = main.ONOScli1.getDevice( dpid=switchDPID )
+ device = onosCli.getDevice( dpid=switchDPID )
# Peek at the deleted switch
main.log.warn( str( device ) )
result = main.FALSE
@@ -1792,7 +1979,7 @@
# NOTE: must end in /
for f in logFiles:
for node in main.nodes:
- dstName = main.logdir + "/" + node.name + "-" + f
+ dstName = main.logdir + "/" + node.name + "-" + f
main.ONOSbench.secureCopy( node.user_name, node.ip_address,
logFolder + f, dstName )
# std*.log's
@@ -1802,7 +1989,7 @@
# NOTE: must end in /
for f in logFiles:
for node in main.nodes:
- dstName = main.logdir + "/" + node.name + "-" + f
+ dstName = main.logdir + "/" + node.name + "-" + f
main.ONOSbench.secureCopy( node.user_name, node.ip_address,
logFolder + f, dstName )
else:
@@ -1839,7 +2026,8 @@
main.case("Start Leadership Election app")
main.step( "Install leadership election app" )
- appResult = main.ONOScli1.activateApp( "org.onosproject.election" )
+ onosCli = main.CLIs[ main.activeNodes[0] ]
+ appResult = onosCli.activateApp( "org.onosproject.election" )
utilities.assert_equals(
expect=main.TRUE,
actual=appResult,
@@ -1847,31 +2035,19 @@
onfail="Something went wrong with installing Leadership election" )
main.step( "Run for election on each node" )
- leaderResult = main.ONOScli1.electionTestRun()
- # check for leader
- leader = main.ONOScli1.electionTestLeader()
- # verify leader is ONOS1
- if leader == main.nodes[0].ip_address:
- # all is well
- pass
- elif leader is None:
- # No leader elected
- main.log.error( "No leader was elected" )
- leaderResult = main.FALSE
- elif leader == main.FALSE:
- # error in response
- # TODO: add check for "Command not found:" in the driver, this
- # means the app isn't loaded
- main.log.error( "Something is wrong with electionTestLeader" +
- " function, check the error logs" )
- leaderResult = main.FALSE
- else:
- # error in response
- main.log.error(
- "Unexpected response from electionTestLeader function:'" +
- str( leader ) +
- "'" )
- leaderResult = main.FALSE
+ leaderResult = main.TRUE
+ leaders = []
+ for i in main.activeNodes:
+ main.CLIs[i].electionTestRun()
+ for i in main.activeNodes:
+ cli = main.CLIs[i]
+ leader = cli.electionTestLeader()
+ if leader is None or leader == main.FALSE:
+ main.log.error( cli.name + ": Leader for the election app " +
+ "should be an ONOS node, instead got '" +
+ str( leader ) + "'" )
+ leaderResult = main.FALSE
+ leaders.append( leader )
utilities.assert_equals(
expect=main.TRUE,
actual=leaderResult,
@@ -1902,16 +2078,10 @@
description = "Check that Leadership Election is still functional"
main.case( description )
- # NOTE: Need to re-run since being a canidate is not persistant
- # TODO: add check for "Command not found:" in the driver, this
- # means the election test app isn't loaded
+ # NOTE: Need to re-run after restarts since being a canidate is not persistant
- oldLeaders = [] # leaders by node before withdrawl from candidates
- newLeaders = [] # leaders by node after withdrawl from candidates
- oldAllCandidates = [] # list of lists of each nodes' candidates before
- newAllCandidates = [] # list of lists of each nodes' candidates after
- oldCandidates = [] # list of candidates from node 0 before withdrawl
- newCandidates = [] # list of candidates from node 0 after withdrawl
+ oldLeaders = [] # list of lists of each nodes' candidates before
+ newLeaders = [] # list of lists of each nodes' candidates after
oldLeader = '' # the old leader from oldLeaders, None if not same
newLeader = '' # the new leaders fron newLoeaders, None if not same
oldLeaderCLI = None # the CLI of the old leader used for re-electing
@@ -1922,10 +2092,9 @@
main.step( "Run for election on each node" )
electionResult = main.TRUE
- for cli in main.CLIs: # run test election on each node
- if cli.electionTestRun() == main.FALSE:
+ for i in main.activeNodes: # run test election on each node
+ if main.CLIs[i].electionTestRun() == main.FALSE:
electionResult = main.FALSE
-
utilities.assert_equals(
expect=main.TRUE,
actual=electionResult,
@@ -1938,36 +2107,36 @@
main.skipCase()
main.step( "Check that each node shows the same leader and candidates" )
- sameResult = main.TRUE
- failMessage = "Nodes have different leaders"
- for cli in main.CLIs:
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- oldAllCandidates.append( node )
- oldLeaders.append( node[ 0 ] )
- oldCandidates = oldAllCandidates[ 0 ]
-
- # Check that each node has the same leader. Defines oldLeader
- if len( set( oldLeaders ) ) != 1:
- sameResult = main.FALSE
- main.log.error( "More than one leader present:" + str( oldLeaders ) )
- oldLeader = None
+ failMessage = "Nodes have different leaderboards"
+ def consistentLeaderboards( nodes ):
+ TOPIC = 'org.onosproject.election'
+ # FIXME: use threads
+ #FIXME: should we retry outside the function?
+ for n in range( 5 ): # Retry in case election is still happening
+ leaderList = []
+ # Get all leaderboards
+ for cli in nodes:
+ leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
+ # Compare leaderboards
+ result = all( i == leaderList[0] for i in leaderList ) and\
+ leaderList is not None
+ main.log.debug( leaderList )
+ main.log.warn( result )
+ if result:
+ return ( result, leaderList )
+ time.sleep(5) #TODO: paramerterize
+ main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
+ activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+ sameResult, oldLeaders = consistentLeaderboards( activeCLIs )
+ if sameResult:
+ oldLeader = oldLeaders[ 0 ][ 0 ]
+ main.log.warn( oldLeader )
else:
- oldLeader = oldLeaders[ 0 ]
-
- # Check that each node's candidate list is the same
- candidateDiscrepancy = False # Boolean of candidate mismatches
- for candidates in oldAllCandidates:
- if set( candidates ) != set( oldCandidates ):
- sameResult = main.FALSE
- candidateDiscrepancy = True
-
- if candidateDiscrepancy:
- failMessage += " and candidates"
-
+ oldLeader = None
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=sameResult,
- onpass="Leadership is consistent for the election topic",
+ onpass="Leaderboards are consistent for the election topic",
onfail=failMessage )
main.step( "Find current leader and withdraw" )
@@ -1977,7 +2146,7 @@
main.log.error( "Leadership isn't consistent." )
withdrawResult = main.FALSE
# Get the CLI of the oldLeader
- for i in range( len( main.CLIs ) ):
+ for i in main.activeNodes:
if oldLeader == main.nodes[ i ].ip_address:
oldLeaderCLI = main.CLIs[ i ]
break
@@ -1992,57 +2161,31 @@
onfail="Node was not withdrawn from election" )
main.step( "Check that a new node was elected leader" )
-
- # FIXME: use threads
- newLeaderResult = main.TRUE
failMessage = "Nodes have different leaders"
-
# Get new leaders and candidates
- for cli in main.CLIs:
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- # elections might no have finished yet
- if node[ 0 ] == 'none' and not expectNoLeader:
- main.log.info( "Node has no leader, waiting 5 seconds to be " +
- "sure elections are complete." )
- time.sleep(5)
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- # election still isn't done or there is a problem
- if node[ 0 ] == 'none':
- main.log.error( "No leader was elected on at least 1 node" )
- newLeaderResult = main.FALSE
- newAllCandidates.append( node )
- newLeaders.append( node[ 0 ] )
- newCandidates = newAllCandidates[ 0 ]
-
- # Check that each node has the same leader. Defines newLeader
- if len( set( newLeaders ) ) != 1:
- newLeaderResult = main.FALSE
- main.log.error( "Nodes have different leaders: " +
- str( newLeaders ) )
- newLeader = None
+ newLeaderResult, newLeaders = consistentLeaderboards( activeCLIs )
+ if newLeaders[ 0 ][ 0 ] == 'none':
+ main.log.error( "No leader was elected on at least 1 node" )
+ if not expectNoLeader:
+ newLeaderResult = False
+ if newLeaderResult:
+ newLeader = newLeaders[ 0 ][ 0 ]
else:
- newLeader = newLeaders[ 0 ]
-
- # Check that each node's candidate list is the same
- for candidates in newAllCandidates:
- if set( candidates ) != set( newCandidates ):
- newLeaderResult = main.FALSE
- main.log.error( "Discrepancy in candidate lists detected" )
+ newLeader = None
# Check that the new leader is not the older leader, which was withdrawn
if newLeader == oldLeader:
- newLeaderResult = main.FALSE
- main.log.error( "All nodes still see old leader: " + oldLeader +
+ newLeaderResult = False
+ main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
" as the current leader" )
-
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=newLeaderResult,
onpass="Leadership election passed",
onfail="Something went wrong with Leadership election" )
- main.step( "Check that that new leader was the candidate of old leader")
- # candidates[ 2 ] should be come the top candidate after withdrawl
+ main.step( "Check that that new leader was the candidate of old leader" )
+ # candidates[ 2 ] should become the top candidate after withdrawl
correctCandidateResult = main.TRUE
if expectNoLeader:
if newLeader == 'none':
@@ -2051,11 +2194,18 @@
else:
main.log.info( "Expected no leader, got: " + str( newLeader ) )
correctCandidateResult = main.FALSE
- elif newLeader != oldCandidates[ 2 ]:
+ elif len( oldLeaders[0] ) >= 3:
+ if newLeader == oldLeaders[ 0 ][ 2 ]:
+ # correct leader was elected
+ correctCandidateResult = main.TRUE
+ else:
+ correctCandidateResult = main.FALSE
+ main.log.error( "Candidate {} was elected. {} should have had priority.".format(
+ newLeader, oldLeaders[ 0 ][ 2 ] ) )
+ else:
+ main.log.warn( "Could not determine who should be the correct leader" )
+ main.log.debug( oldLeaders[ 0 ] )
correctCandidateResult = main.FALSE
- main.log.error( "Candidate " + newLeader + " was elected. " +
- oldCandidates[ 2 ] + " should have had priority." )
-
utilities.assert_equals(
expect=main.TRUE,
actual=correctCandidateResult,
@@ -2074,54 +2224,23 @@
actual=runResult,
onpass="App re-ran for election",
onfail="App failed to run for election" )
+
main.step(
"Check that oldLeader is a candidate, and leader if only 1 node" )
# verify leader didn't just change
- positionResult = main.TRUE
- # Get new leaders and candidates, wait if oldLeader is not a candidate yet
-
- # Reset and reuse the new candidate and leaders lists
- newAllCandidates = []
- newCandidates = []
- newLeaders = []
- for cli in main.CLIs:
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- if oldLeader not in node: # election might no have finished yet
- main.log.info( "Old Leader not elected, waiting 5 seconds to " +
- "be sure elections are complete" )
- time.sleep(5)
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- if oldLeader not in node: # election still isn't done, errors
- main.log.error(
- "Old leader was not elected on at least one node" )
- positionResult = main.FALSE
- newAllCandidates.append( node )
- newLeaders.append( node[ 0 ] )
- newCandidates = newAllCandidates[ 0 ]
-
- # Check that each node has the same leader. Defines newLeader
- if len( set( newLeaders ) ) != 1:
- positionResult = main.FALSE
- main.log.error( "Nodes have different leaders: " +
- str( newLeaders ) )
- newLeader = None
- else:
- newLeader = newLeaders[ 0 ]
-
- # Check that each node's candidate list is the same
- for candidates in newAllCandidates:
- if set( candidates ) != set( newCandidates ):
- newLeaderResult = main.FALSE
- main.log.error( "Discrepancy in candidate lists detected" )
+ # Get new leaders and candidates
+ reRunLeaders = []
+ time.sleep( 5 ) # Paremterize
+ positionResult, reRunLeaders = consistentLeaderboards( activeCLIs )
# Check that the re-elected node is last on the candidate List
- if oldLeader != newCandidates[ -1 ]:
- main.log.error( "Old Leader (" + oldLeader + ") not in the proper position " +
- str( newCandidates ) )
+ if oldLeader != reRunLeaders[ 0 ][ -1 ]:
+ main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
+ str( reRunLeaders[ 0 ] ) ) )
positionResult = main.FALSE
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=positionResult,
onpass="Old leader successfully re-ran for election",
onfail="Something went wrong with Leadership election after " +
@@ -2131,6 +2250,7 @@
"""
Install Distributed Primitives app
"""
+ import time
assert main.numCtrls, "main.numCtrls not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
@@ -2139,15 +2259,11 @@
# Variables for the distributed primitives tests
global pCounterName
- global iCounterName
global pCounterValue
- global iCounterValue
global onosSet
global onosSetName
pCounterName = "TestON-Partitions"
- iCounterName = "TestON-inMemory"
pCounterValue = 0
- iCounterValue = 0
onosSet = set([])
onosSetName = "TestON-set"
@@ -2155,11 +2271,13 @@
main.case( description )
main.step( "Install Primitives app" )
appName = "org.onosproject.distributedprimitives"
- appResults = main.CLIs[0].activateApp( appName )
+ node = main.activeNodes[0]
+ appResults = main.CLIs[node].activateApp( appName )
utilities.assert_equals( expect=main.TRUE,
actual=appResults,
onpass="Primitives app activated",
onfail="Primitives app not activated" )
+ time.sleep( 5 ) # To allow all nodes to activate
def CASE17( self, main ):
"""
@@ -2172,7 +2290,6 @@
assert main.CLIs, "main.CLIs not defined"
assert main.nodes, "main.nodes not defined"
assert pCounterName, "pCounterName not defined"
- assert iCounterName, "iCounterName not defined"
assert onosSetName, "onosSetName not defined"
# NOTE: assert fails if value is 0/None/Empty/False
try:
@@ -2181,11 +2298,6 @@
main.log.error( "pCounterValue not defined, setting to 0" )
pCounterValue = 0
try:
- iCounterValue
- except NameError:
- main.log.error( "iCounterValue not defined, setting to 0" )
- iCounterValue = 0
- try:
onosSet
except NameError:
main.log.error( "onosSet not defined, setting to empty Set" )
@@ -2206,7 +2318,7 @@
pCounters = []
threads = []
addedPValues = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
name="counterAddAndGet-" + str( i ),
args=[ pCounterName ] )
@@ -2236,7 +2348,7 @@
pCounters = []
threads = []
addedPValues = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
name="counterGetAndAdd-" + str( i ),
args=[ pCounterName ] )
@@ -2273,7 +2385,7 @@
pCounters = []
threads = []
addedPValues = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
name="counterIncrement-" + str( i ),
args=[ pCounterName ],
@@ -2304,7 +2416,7 @@
pCounters = []
threads = []
addedPValues = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
name="counterIncrement-" + str( i ),
args=[ pCounterName ],
@@ -2335,7 +2447,7 @@
pCounters = []
threads = []
addedPValues = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
name="counterIncrement-" + str( i ),
args=[ pCounterName ],
@@ -2369,199 +2481,12 @@
onpass="Added counters are correct",
onfail="Added counters are incorrect" )
- # In-Memory counters
- main.step( "Increment and get an in-memory counter on each node" )
- iCounters = []
- addedIValues = []
- threads = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
- name="icounterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "inMemory": True } )
- iCounterValue += 1
- addedIValues.append( iCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in the in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=iCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Get then Increment a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
- name="counterGetAndAdd-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "inMemory": True } )
- addedIValues.append( iCounterValue )
- iCounterValue += 1
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=iCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Counters we added have the correct values" )
- incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
- utilities.assert_equals( expect=main.TRUE,
- actual=incrementCheck,
- onpass="Added counters are correct",
- onfail="Added counters are incorrect" )
-
- main.step( "Add -8 to then get a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
- name="counterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "delta": -8, "inMemory": True } )
- iCounterValue += -8
- addedIValues.append( iCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=pCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Add 5 to then get a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
- name="counterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "delta": 5, "inMemory": True } )
- iCounterValue += 5
- addedIValues.append( iCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=pCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Get then add 5 to a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
- name="counterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "delta": 5, "inMemory": True } )
- addedIValues.append( iCounterValue )
- iCounterValue += 5
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=iCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Counters we added have the correct values" )
- incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
- utilities.assert_equals( expect=main.TRUE,
- actual=incrementCheck,
- onpass="Added counters are correct",
- onfail="Added counters are incorrect" )
-
- main.step( "Check counters are consistant across nodes" )
- onosCounters, consistentCounterResults = main.Counters.consistentCheck()
- utilities.assert_equals( expect=main.TRUE,
- actual=consistentCounterResults,
- onpass="ONOS counters are consistent " +
- "across nodes",
- onfail="ONOS Counters are inconsistent " +
- "across nodes" )
-
- main.step( "Counters we added have the correct values" )
- incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
- incrementCheck = incrementCheck and \
- main.Counters.counterCheck( iCounterName, iCounterValue )
- utilities.assert_equals( expect=main.TRUE,
- actual=incrementCheck,
- onpass="Added counters are correct",
- onfail="Added counters are incorrect" )
# DISTRIBUTED SETS
main.step( "Distributed Set get" )
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -2572,13 +2497,14 @@
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if onosSet != current:
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has incorrect view" +
" of set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -2587,7 +2513,7 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has repeat elements in" +
" set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -2602,7 +2528,7 @@
main.step( "Distributed Set size" )
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -2613,10 +2539,11 @@
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" expected a size of " + str( size ) +
" for set " + onosSetName +
" but got " + str( sizeResponses[ i ] ) )
@@ -2629,7 +2556,7 @@
onosSet.add( addValue )
addResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestAdd,
name="setTestAdd-" + str( i ),
args=[ onosSetName, addValue ] )
@@ -2643,7 +2570,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
addResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if addResponses[ i ] == main.TRUE:
# All is well
pass
@@ -2663,7 +2590,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -2673,14 +2600,14 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if onosSet != current:
- main.log.error( "ONOS" + str( i + 1 ) +
- " has incorrect view" +
+ main.log.error( "ONOS" + node + " has incorrect view" +
" of set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
main.log.debug( "Expected: " + str( onosSet ) )
@@ -2688,8 +2615,7 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + str( i + 1 ) +
- " has repeat elements in" +
+ main.log.error( "ONOS" + node + " has repeat elements in" +
" set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
getResults = main.FALSE
@@ -2697,7 +2623,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -2707,10 +2633,11 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" expected a size of " + str( size ) +
" for set " + onosSetName +
" but got " + str( sizeResponses[ i ] ) )
@@ -2724,7 +2651,7 @@
onosSet.update( addAllValue.split() )
addResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestAdd,
name="setTestAddAll-" + str( i ),
args=[ onosSetName, addAllValue ] )
@@ -2738,7 +2665,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
addAllResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if addResponses[ i ] == main.TRUE:
# All is well
pass
@@ -2758,7 +2685,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -2768,13 +2695,14 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if onosSet != current:
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has incorrect view" +
" of set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -2783,7 +2711,7 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has repeat elements in" +
" set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -2792,7 +2720,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -2802,10 +2730,11 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" expected a size of " + str( size ) +
" for set " + onosSetName +
" but got " + str( sizeResponses[ i ] ) )
@@ -2818,7 +2747,7 @@
main.step( "Distributed Set contains()" )
containsResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setContains-" + str( i ),
args=[ onosSetName ],
@@ -2831,7 +2760,7 @@
containsResponses.append( t.result )
containsResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if containsResponses[ i ] == main.ERROR:
containsResults = main.FALSE
else:
@@ -2845,7 +2774,7 @@
main.step( "Distributed Set containsAll()" )
containsAllResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setContainsAll-" + str( i ),
args=[ onosSetName ],
@@ -2858,7 +2787,7 @@
containsAllResponses.append( t.result )
containsAllResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if containsResponses[ i ] == main.ERROR:
containsResults = main.FALSE
else:
@@ -2873,7 +2802,7 @@
onosSet.remove( addValue )
removeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestRemove,
name="setTestRemove-" + str( i ),
args=[ onosSetName, addValue ] )
@@ -2887,7 +2816,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
removeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if removeResponses[ i ] == main.TRUE:
# All is well
pass
@@ -2907,7 +2836,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -2917,13 +2846,14 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if onosSet != current:
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has incorrect view" +
" of set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -2932,7 +2862,7 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has repeat elements in" +
" set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -2941,7 +2871,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -2951,10 +2881,11 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" expected a size of " + str( size ) +
" for set " + onosSetName +
" but got " + str( sizeResponses[ i ] ) )
@@ -2969,7 +2900,7 @@
removeAllResponses = []
threads = []
try:
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestRemove,
name="setTestRemoveAll-" + str( i ),
args=[ onosSetName, addAllValue ] )
@@ -2985,7 +2916,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
removeAllResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if removeAllResponses[ i ] == main.TRUE:
# All is well
pass
@@ -3005,7 +2936,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -3015,13 +2946,14 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if onosSet != current:
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has incorrect view" +
" of set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -3030,7 +2962,7 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has repeat elements in" +
" set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -3039,7 +2971,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -3049,10 +2981,11 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" expected a size of " + str( size ) +
" for set " + onosSetName +
" but got " + str( sizeResponses[ i ] ) )
@@ -3066,7 +2999,7 @@
onosSet.update( addAllValue.split() )
addResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestAdd,
name="setTestAddAll-" + str( i ),
args=[ onosSetName, addAllValue ] )
@@ -3080,7 +3013,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
addAllResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if addResponses[ i ] == main.TRUE:
# All is well
pass
@@ -3100,7 +3033,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -3110,13 +3043,14 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if onosSet != current:
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has incorrect view" +
" of set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -3125,7 +3059,7 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has repeat elements in" +
" set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -3134,7 +3068,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -3144,10 +3078,11 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" expected a size of " + str( size ) +
" for set " + onosSetName +
" but got " + str( sizeResponses[ i ] ) )
@@ -3161,7 +3096,7 @@
onosSet.clear()
clearResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestRemove,
name="setTestClear-" + str( i ),
args=[ onosSetName, " "], # Values doesn't matter
@@ -3176,7 +3111,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
clearResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if clearResponses[ i ] == main.TRUE:
# All is well
pass
@@ -3196,7 +3131,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -3206,13 +3141,14 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if onosSet != current:
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has incorrect view" +
" of set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -3221,7 +3157,7 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has repeat elements in" +
" set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -3230,7 +3166,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -3240,10 +3176,11 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" expected a size of " + str( size ) +
" for set " + onosSetName +
" but got " + str( sizeResponses[ i ] ) )
@@ -3257,7 +3194,7 @@
onosSet.update( addAllValue.split() )
addResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestAdd,
name="setTestAddAll-" + str( i ),
args=[ onosSetName, addAllValue ] )
@@ -3271,7 +3208,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
addAllResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if addResponses[ i ] == main.TRUE:
# All is well
pass
@@ -3291,7 +3228,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -3301,13 +3238,14 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if onosSet != current:
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has incorrect view" +
" of set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -3316,7 +3254,7 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has repeat elements in" +
" set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -3325,7 +3263,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -3335,10 +3273,11 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" expected a size of " + str( size ) +
" for set " + onosSetName +
" but got " + str( sizeResponses[ i ] ) )
@@ -3352,7 +3291,7 @@
onosSet.intersection_update( retainValue.split() )
retainResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestRemove,
name="setTestRetain-" + str( i ),
args=[ onosSetName, retainValue ],
@@ -3367,7 +3306,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
retainResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if retainResponses[ i ] == main.TRUE:
# All is well
pass
@@ -3387,7 +3326,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -3397,13 +3336,14 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if onosSet != current:
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has incorrect view" +
" of set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -3412,7 +3352,7 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + str( i + 1 ) +
+ main.log.error( "ONOS" + node +
" has repeat elements in" +
" set " + onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -3421,7 +3361,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -3431,11 +3371,11 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + str( i + 1 ) +
- " expected a size of " +
+ main.log.error( "ONOS" + node + " expected a size of " +
str( size ) + " for set " + onosSetName +
" but got " + str( sizeResponses[ i ] ) )
retainResults = retainResults and getResults and sizeResults
@@ -3449,8 +3389,9 @@
tMapValue = "Testing"
numKeys = 100
putResult = True
- putResponses = main.CLIs[ 0 ].transactionalMapPut( numKeys, tMapValue )
- if len( putResponses ) == 100:
+ node = main.activeNodes[0]
+ putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
+ if putResponses and len( putResponses ) == 100:
for i in putResponses:
if putResponses[ i ][ 'value' ] != tMapValue:
putResult = False
@@ -3469,10 +3410,10 @@
getResponses = []
threads = []
valueCheck = True
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].transactionalMapGet,
name="TMap-get-" + str( i ),
- args=[ "Key" + str ( n ) ] )
+ args=[ "Key" + str( n ) ] )
threads.append( t )
t.start()
for t in threads:
@@ -3489,49 +3430,3 @@
actual=getCheck,
onpass="Partitioned Transactional Map get values were correct",
onfail="Partitioned Transactional Map values incorrect" )
-
- main.step( "In-memory Transactional maps put" )
- tMapValue = "Testing"
- numKeys = 100
- putResult = True
- putResponses = main.CLIs[ 0 ].transactionalMapPut( numKeys, tMapValue, inMemory=True )
- if len( putResponses ) == 100:
- for i in putResponses:
- if putResponses[ i ][ 'value' ] != tMapValue:
- putResult = False
- else:
- putResult = False
- if not putResult:
- main.log.debug( "Put response values: " + str( putResponses ) )
- utilities.assert_equals( expect=True,
- actual=putResult,
- onpass="In-Memory Transactional Map put successful",
- onfail="In-Memory Transactional Map put values are incorrect" )
-
- main.step( "In-Memory Transactional maps get" )
- getCheck = True
- for n in range( 1, numKeys + 1 ):
- getResponses = []
- threads = []
- valueCheck = True
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].transactionalMapGet,
- name="TMap-get-" + str( i ),
- args=[ "Key" + str ( n ) ],
- kwargs={ "inMemory": True } )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- getResponses.append( t.result )
- for node in getResponses:
- if node != tMapValue:
- valueCheck = False
- if not valueCheck:
- main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
- main.log.warn( getResponses )
- getCheck = getCheck and valueCheck
- utilities.assert_equals( expect=True,
- actual=getCheck,
- onpass="In-Memory Transactional Map get values were correct",
- onfail="In-Memory Transactional Map values incorrect" )
diff --git a/TestON/tests/HAsingleInstanceRestart/dependencies/Counters.py b/TestON/tests/HAsingleInstanceRestart/dependencies/Counters.py
index 2dc95e1..192b919 100644
--- a/TestON/tests/HAsingleInstanceRestart/dependencies/Counters.py
+++ b/TestON/tests/HAsingleInstanceRestart/dependencies/Counters.py
@@ -1,102 +1,105 @@
-def __init__( self ):
- self.default = ''
+import json
-def consistentCheck():
- """
- Checks that TestON counters are consistent across all nodes.
+class Counters():
- Returns the tuple (onosCounters, consistent)
- - onosCounters is the parsed json output of the counters command on all nodes
- - consistent is main.TRUE if all "TestON" counters are consitent across all
- nodes or main.FALSE
- """
- import json
- try:
- correctResults = main.TRUE
- # Get onos counters results
- onosCountersRaw = []
- threads = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counters,
- name="counters-" + str( i ) )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- onosCountersRaw.append( t.result )
- onosCounters = []
- for i in range( main.numCtrls ):
- try:
- onosCounters.append( json.loads( onosCountersRaw[i] ) )
- except ( ValueError, TypeError ):
- main.log.error( "Could not parse counters response from ONOS" +
- str( i + 1 ) )
- main.log.warn( repr( onosCountersRaw[ i ] ) )
- onosCounters.append( [] )
- return main.FALSE
+ def __init__( self ):
+ self.default = ''
- testCounters = {}
- # make a list of all the "TestON-*" counters in ONOS
- # lookes like a dict whose keys are the name of the ONOS node and values
- # are a list of the counters. I.E.
- # { "ONOS1": [ {"name":"TestON-inMemory","value":56},
- # {"name":"TestON-Partitions","value":56} ]
- # }
- # NOTE: There is an assumtion that all nodes are active
- # based on the above for loops
- for controller in enumerate( onosCounters ):
- for dbType in controller[1]:
- for dbName, items in dbType.iteritems():
- for item in items:
- if 'TestON' in item['name']:
- node = 'ONOS' + str( controller[0] + 1 )
- try:
- testCounters[node].append( item )
- except KeyError:
- testCounters[node] = [ item ]
- # compare the counters on each node
- tmp = [ v == testCounters['ONOS1'] for k, v in testCounters.iteritems() ]
- if all( tmp ):
- consistent = main.TRUE
- else:
- consistent = main.FALSE
- main.log.error( "ONOS nodes have different values for counters:\n" +
- testCounters )
- return ( onosCounters, consistent )
- except Exception:
- main.log.exception( "" )
- main.cleanup()
- main.exit()
+ def consistentCheck( self ):
+ """
+ Checks that TestON counters are consistent across all nodes.
-def counterCheck( counterName, counterValue ):
- """
- Checks that TestON counters are consistent across all nodes and that
- specified counter is in ONOS with the given value
- """
- import json
- correctResults = main.TRUE
- # Get onos counters results and consistentCheck
- onosCounters, consistent = main.Counters.consistentCheck()
- # Check for correct values
- for i in range( main.numCtrls ):
- current = onosCounters[i]
- onosValue = None
+ Returns the tuple (onosCounters, consistent)
+ - onosCounters is the parsed json output of the counters command on all nodes
+ - consistent is main.TRUE if all "TestON" counters are consitent across all
+ nodes or main.FALSE
+ """
try:
- for database in current:
- database = database.values()[0]
- for counter in database:
- if counter.get( 'name' ) == counterName:
- onosValue = counter.get( 'value' )
- break
- except AttributeError, e:
- main.log.error( "ONOS" + str( i + 1 ) + " counters result " +
- "is not as expected" )
- correctResults = main.FALSE
- if onosValue == counterValue:
- main.log.info( counterName + " counter value is correct" )
- else:
- main.log.error( counterName + " counter value is incorrect," +
- " expected value: " + str( counterValue )
- + " current value: " + str( onosValue ) )
- correctResults = main.FALSE
- return consistent and correctResults
+ correctResults = main.TRUE
+ # Get onos counters results
+ onosCountersRaw = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=utilities.retry,
+ name="counters-" + str( i ),
+ args=[ main.CLIs[i].counters, [ None ] ],
+ kwargs= { 'sleep': 5, 'attempts': 5,
+ 'randomTime': True } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ onosCountersRaw.append( t.result )
+ onosCounters = []
+ for i in range( len( main.activeNodes ) ):
+ try:
+ onosCounters.append( json.loads( onosCountersRaw[i] ) )
+ except ( ValueError, TypeError ):
+ main.log.error( "Could not parse counters response from ONOS" +
+ str( main.activeNodes[i] + 1 ) )
+ main.log.warn( repr( onosCountersRaw[ i ] ) )
+ onosCounters.append( [] )
+
+ testCounters = {}
+ # make a list of all the "TestON-*" counters in ONOS
+ # lookes like a dict whose keys are the name of the ONOS node and values
+ # are a list of the counters. I.E.
+ # { "ONOS1": [ { "name":"TestON-Partitions","value":56} ]
+ # }
+ # NOTE: There is an assumtion that all nodes are active
+ # based on the above for loops
+ for controller in enumerate( onosCounters ):
+ for key, value in controller[1].iteritems():
+ if 'TestON' in key:
+ node = 'ONOS' + str( controller[0] + 1 )
+ try:
+ testCounters[node].append( { key: value } )
+ except KeyError:
+ testCounters[node] = [ { key: value } ]
+ # compare the counters on each node
+ firstV = testCounters.values()[0]
+ tmp = [ v == firstV for k, v in testCounters.iteritems() ]
+ if all( tmp ):
+ consistent = main.TRUE
+ else:
+ consistent = main.FALSE
+ main.log.error( "ONOS nodes have different values for counters:\n" +
+ testCounters )
+ return ( onosCounters, consistent )
+ except Exception:
+ main.log.exception( "" )
+ main.cleanup()
+ main.exit()
+
+ def counterCheck( self, counterName, counterValue ):
+ """
+ Checks that TestON counters are consistent across all nodes and that
+ specified counter is in ONOS with the given value
+ """
+ try:
+ correctResults = main.TRUE
+ # Get onos counters results and consistentCheck
+ onosCounters, consistent = self.consistentCheck()
+ # Check for correct values
+ for i in range( len( main.activeNodes ) ):
+ current = onosCounters[i]
+ onosValue = None
+ try:
+ onosValue = current.get( counterName )
+ except AttributeError, e:
+ node = str( main.activeNodes[i] + 1 )
+ main.log.error( "ONOS" + node + " counters result " +
+ "is not as expected" )
+ correctResults = main.FALSE
+ if onosValue == counterValue:
+ main.log.info( counterName + " counter value is correct" )
+ else:
+ main.log.error( counterName + " counter value is incorrect," +
+ " expected value: " + str( counterValue )
+ + " current value: " + str( onosValue ) )
+ correctResults = main.FALSE
+ return consistent and correctResults
+ except Exception:
+ main.log.exception( "" )
+ main.cleanup()
+ main.exit()
diff --git a/TestON/tests/HAstopNodes/HAstopNodes.params b/TestON/tests/HAstopNodes/HAstopNodes.params
index fd10739..8b68d54 100644
--- a/TestON/tests/HAstopNodes/HAstopNodes.params
+++ b/TestON/tests/HAstopNodes/HAstopNodes.params
@@ -20,9 +20,7 @@
#CASE16: Install Distributed Primitives app
#CASE17: Check for basic functionality with distributed primitives
<testcases>1,2,8,3,4,5,14,16,17,[61,8,7,4,15,17,62],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
- <imports>
- <path> /home/admin/OnosSystemTest/TestON/tests/HAstopNodes/dependencies/ </path>
- </imports>
+
<ENV>
<cellName>HA</cellName>
<appString>drivers,openflow,proxyarp,mobility</appString>
diff --git a/TestON/tests/HAstopNodes/HAstopNodes.py b/TestON/tests/HAstopNodes/HAstopNodes.py
index 21ec7f3..45b4802 100644
--- a/TestON/tests/HAstopNodes/HAstopNodes.py
+++ b/TestON/tests/HAstopNodes/HAstopNodes.py
@@ -51,6 +51,7 @@
import imp
import pexpect
import time
+ import json
main.log.info( "ONOS HA test: Stop a minority of ONOS nodes - " +
"initialization" )
main.case( "Setting up test environment" )
@@ -77,6 +78,11 @@
global ONOS5Port
global ONOS6Port
global ONOS7Port
+ # These are for csv plotting in jenkins
+ global labels
+ global data
+ labels = []
+ data = []
# FIXME: just get controller port from params?
# TODO: do we really need all these?
@@ -89,11 +95,8 @@
ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
try:
- fileName = "Counters"
- # TODO: Maybe make a library folder somewhere?
- path = main.params[ 'imports' ][ 'path' ]
- main.Counters = imp.load_source( fileName,
- path + fileName + ".py" )
+ from tests.HAsanity.dependencies.Counters import Counters
+ main.Counters = Counters()
except Exception as e:
main.log.exception( e )
main.cleanup()
@@ -273,7 +276,6 @@
port=main.params[ 'MNtcpdump' ][ 'port' ] )
main.step( "App Ids check" )
- time.sleep(60)
appCheck = main.TRUE
threads = []
for i in main.activeNodes:
@@ -294,6 +296,48 @@
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
+ main.step( "Checking ONOS nodes" )
+ nodesOutput = []
+ nodeResults = main.TRUE
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].nodes,
+ name="nodes-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ nodesOutput.append( t.result )
+ ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
+ ips.sort()
+ for i in nodesOutput:
+ try:
+ current = json.loads( i )
+ activeIps = []
+ currentResult = main.FALSE
+ for node in current:
+ if node['state'] == 'READY':
+ activeIps.append( node['ip'] )
+ activeIps.sort()
+ if ips == activeIps:
+ currentResult = main.TRUE
+ except ( ValueError, TypeError ):
+ main.log.error( "Error parsing nodes output" )
+ main.log.warn( repr( i ) )
+ currentResult = main.FALSE
+ nodeResults = nodeResults and currentResult
+ utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
+ onpass="Nodes check successful",
+ onfail="Nodes check NOT successful" )
+
+ if not nodeResults:
+ for cli in main.CLIs:
+ main.log.debug( "{} components not ACTIVE: \n{}".format(
+ cli.name,
+ cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
+
if cliResults == main.FALSE:
main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
@@ -1309,20 +1353,26 @@
main.log.warn( title )
# get all intent keys in the cluster
keys = []
- for nodeStr in ONOSIntents:
- node = json.loads( nodeStr )
- for intent in node:
- keys.append( intent.get( 'id' ) )
- keys = set( keys )
- for key in keys:
- row = "%-13s" % key
+ try:
+ # Get the set of all intent keys
for nodeStr in ONOSIntents:
node = json.loads( nodeStr )
for intent in node:
- if intent.get( 'id', "Error" ) == key:
- row += "%-15s" % intent.get( 'state' )
- main.log.warn( row )
- # End table view
+ keys.append( intent.get( 'id' ) )
+ keys = set( keys )
+ # For each intent key, print the state on each node
+ for key in keys:
+ row = "%-13s" % key
+ for nodeStr in ONOSIntents:
+ node = json.loads( nodeStr )
+ for intent in node:
+ if intent.get( 'id', "Error" ) == key:
+ row += "%-15s" % intent.get( 'state' )
+ main.log.warn( row )
+ # End of intent state table
+ except ValueError as e:
+ main.log.exception( e )
+ main.log.debug( "nodeStr was: " + repr( nodeStr ) )
if intentsResults and not consistentIntents:
# print the json objects
@@ -1804,6 +1854,15 @@
main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
+ main.step( "Rerun for election on the node(s) that were killed" )
+ runResults = main.TRUE
+ for i in main.kill:
+ runResults = runResults and\
+ main.CLIs[i].electionTestRun()
+ utilities.assert_equals( expect=main.TRUE, actual=runResults,
+ onpass="ONOS nodes reran for election topic",
+ onfail="Errror rerunning for election" )
+
def CASE7( self, main ):
"""
Check state after ONOS failure
@@ -1843,6 +1902,7 @@
main.step( "Read device roles from ONOS" )
ONOSMastership = []
+ mastershipCheck = main.FALSE
consistentMastership = True
rolesResults = True
threads = []
@@ -1995,45 +2055,50 @@
# NOTE: this requires case 5 to pass for intentState to be set.
# maybe we should stop the test if that fails?
sameIntents = main.FALSE
- if intentState and intentState == ONOSIntents[ 0 ]:
- sameIntents = main.TRUE
- main.log.info( "Intents are consistent with before failure" )
- # TODO: possibly the states have changed? we may need to figure out
- # what the acceptable states are
- elif len( intentState ) == len( ONOSIntents[ 0 ] ):
- sameIntents = main.TRUE
- try:
- before = json.loads( intentState )
- after = json.loads( ONOSIntents[ 0 ] )
- for intent in before:
- if intent not in after:
- sameIntents = main.FALSE
- main.log.debug( "Intent is not currently in ONOS " +
- "(at least in the same form):" )
- main.log.debug( json.dumps( intent ) )
- except ( ValueError, TypeError ):
- main.log.exception( "Exception printing intents" )
- main.log.debug( repr( ONOSIntents[0] ) )
- main.log.debug( repr( intentState ) )
- if sameIntents == main.FALSE:
- try:
- main.log.debug( "ONOS intents before: " )
- main.log.debug( json.dumps( json.loads( intentState ),
- sort_keys=True, indent=4,
- separators=( ',', ': ' ) ) )
- main.log.debug( "Current ONOS intents: " )
- main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
- sort_keys=True, indent=4,
- separators=( ',', ': ' ) ) )
- except ( ValueError, TypeError ):
- main.log.exception( "Exception printing intents" )
- main.log.debug( repr( ONOSIntents[0] ) )
- main.log.debug( repr( intentState ) )
- utilities.assert_equals(
- expect=main.TRUE,
- actual=sameIntents,
- onpass="Intents are consistent with before failure",
- onfail="The Intents changed during failure" )
+ try:
+ intentState
+ except NameError:
+ main.log.warn( "No previous intent state was saved" )
+ else:
+ if intentState and intentState == ONOSIntents[ 0 ]:
+ sameIntents = main.TRUE
+ main.log.info( "Intents are consistent with before failure" )
+ # TODO: possibly the states have changed? we may need to figure out
+ # what the acceptable states are
+ elif len( intentState ) == len( ONOSIntents[ 0 ] ):
+ sameIntents = main.TRUE
+ try:
+ before = json.loads( intentState )
+ after = json.loads( ONOSIntents[ 0 ] )
+ for intent in before:
+ if intent not in after:
+ sameIntents = main.FALSE
+ main.log.debug( "Intent is not currently in ONOS " +
+ "(at least in the same form):" )
+ main.log.debug( json.dumps( intent ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Exception printing intents" )
+ main.log.debug( repr( ONOSIntents[0] ) )
+ main.log.debug( repr( intentState ) )
+ if sameIntents == main.FALSE:
+ try:
+ main.log.debug( "ONOS intents before: " )
+ main.log.debug( json.dumps( json.loads( intentState ),
+ sort_keys=True, indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.debug( "Current ONOS intents: " )
+ main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
+ sort_keys=True, indent=4,
+ separators=( ',', ': ' ) ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Exception printing intents" )
+ main.log.debug( repr( ONOSIntents[0] ) )
+ main.log.debug( repr( intentState ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=sameIntents,
+ onpass="Intents are consistent with before failure",
+ onfail="The Intents changed during failure" )
intentCheck = intentCheck and sameIntents
main.step( "Get the OF Table entries and compare to before " +
@@ -2045,7 +2110,6 @@
FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
if FlowTables == main.FALSE:
main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
-
utilities.assert_equals(
expect=main.TRUE,
actual=FlowTables,
@@ -2390,8 +2454,9 @@
hostsResults = hostsResults and currentHostsResult
hostAttachmentResults = hostAttachmentResults and\
hostAttachment
- topoResult = devicesResults and linksResults and\
- hostsResults and hostAttachmentResults
+ topoResult = ( devicesResults and linksResults
+ and hostsResults and ipResult and
+ hostAttachmentResults )
utilities.assert_equals( expect=True,
actual=topoResult,
onpass="ONOS topology matches Mininet",
@@ -2455,7 +2520,6 @@
controllerStr +
" is inconsistent with ONOS1" )
consistentClustersResult = main.FALSE
-
else:
main.log.error( "Error in getting dataplane clusters " +
"from ONOS" + controllerStr )
@@ -2476,6 +2540,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing clusters[0]: " +
repr( clusters[0] ) )
+ numClusters = "ERROR"
clusterResults = main.FALSE
if numClusters == 1:
clusterResults = main.TRUE
@@ -2513,6 +2578,13 @@
onpass="Link are correct",
onfail="Links are incorrect" )
+ main.step( "Hosts are correct" )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=hostsResults,
+ onpass="Hosts are correct",
+ onfail="Hosts are incorrect" )
+
# FIXME: move this to an ONOS state case
main.step( "Checking ONOS nodes" )
nodesOutput = []
@@ -2549,6 +2621,11 @@
utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
onpass="Nodes check successful",
onfail="Nodes check NOT successful" )
+ if not nodeResults:
+ for cli in main.CLIs:
+ main.log.debug( "{} components not ACTIVE: \n{}".format(
+ cli.name,
+ cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
def CASE9( self, main ):
"""
@@ -2838,16 +2915,10 @@
description = "Check that Leadership Election is still functional"
main.case( description )
- # NOTE: Need to re-run since being a canidate is not persistant
- # TODO: add check for "Command not found:" in the driver, this
- # means the election test app isn't loaded
+ # NOTE: Need to re-run after restarts since being a canidate is not persistant
- oldLeaders = [] # leaders by node before withdrawl from candidates
- newLeaders = [] # leaders by node after withdrawl from candidates
- oldAllCandidates = [] # list of lists of each nodes' candidates before
- newAllCandidates = [] # list of lists of each nodes' candidates after
- oldCandidates = [] # list of candidates from node 0 before withdrawl
- newCandidates = [] # list of candidates from node 0 after withdrawl
+ oldLeaders = [] # list of lists of each nodes' candidates before
+ newLeaders = [] # list of lists of each nodes' candidates after
oldLeader = '' # the old leader from oldLeaders, None if not same
newLeader = '' # the new leaders fron newLoeaders, None if not same
oldLeaderCLI = None # the CLI of the old leader used for re-electing
@@ -2873,43 +2944,36 @@
main.skipCase()
main.step( "Check that each node shows the same leader and candidates" )
- sameResult = main.TRUE
- failMessage = "Nodes have different leaders"
- for i in main.activeNodes:
- cli = main.CLIs[i]
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- oldAllCandidates.append( node )
- if node:
- oldLeaders.append( node[ 0 ] )
- else:
- oldLeaders.append( None )
- oldCandidates = oldAllCandidates[ 0 ]
- if oldCandidates is None:
- oldCandidates = [ None ]
-
- # Check that each node has the same leader. Defines oldLeader
- if len( set( oldLeaders ) ) != 1:
- sameResult = main.FALSE
- main.log.error( "More than one leader present:" + str( oldLeaders ) )
- oldLeader = None
+ failMessage = "Nodes have different leaderboards"
+ def consistentLeaderboards( nodes ):
+ TOPIC = 'org.onosproject.election'
+ # FIXME: use threads
+ #FIXME: should we retry outside the function?
+ for n in range( 5 ): # Retry in case election is still happening
+ leaderList = []
+ # Get all leaderboards
+ for cli in nodes:
+ leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
+ # Compare leaderboards
+ result = all( i == leaderList[0] for i in leaderList ) and\
+ leaderList is not None
+ main.log.debug( leaderList )
+ main.log.warn( result )
+ if result:
+ return ( result, leaderList )
+ time.sleep(5) #TODO: paramerterize
+ main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
+ activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+ sameResult, oldLeaders = consistentLeaderboards( activeCLIs )
+ if sameResult:
+ oldLeader = oldLeaders[ 0 ][ 0 ]
+ main.log.warn( oldLeader )
else:
- oldLeader = oldLeaders[ 0 ]
-
- # Check that each node's candidate list is the same
- candidateDiscrepancy = False # Boolean of candidate mismatches
- for candidates in oldAllCandidates:
- if candidates is None:
- main.log.warn( "Error getting candidates" )
- candidates = [ None ]
- if set( candidates ) != set( oldCandidates ):
- sameResult = main.FALSE
- candidateDiscrepancy = True
- if candidateDiscrepancy:
- failMessage += " and candidates"
+ oldLeader = None
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=sameResult,
- onpass="Leadership is consistent for the election topic",
+ onpass="Leaderboards are consistent for the election topic",
onfail=failMessage )
main.step( "Find current leader and withdraw" )
@@ -2934,56 +2998,30 @@
onfail="Node was not withdrawn from election" )
main.step( "Check that a new node was elected leader" )
- # FIXME: use threads
- newLeaderResult = main.TRUE
failMessage = "Nodes have different leaders"
-
# Get new leaders and candidates
- for i in main.activeNodes:
- cli = main.CLIs[i]
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- # elections might no have finished yet
- if node[ 0 ] == 'none' and not expectNoLeader:
- main.log.info( "Node has no leader, waiting 5 seconds to be " +
- "sure elections are complete." )
- time.sleep(5)
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- # election still isn't done or there is a problem
- if node[ 0 ] == 'none':
- main.log.error( "No leader was elected on at least 1 node" )
- newLeaderResult = main.FALSE
- newAllCandidates.append( node )
- newLeaders.append( node[ 0 ] )
- newCandidates = newAllCandidates[ 0 ]
-
- # Check that each node has the same leader. Defines newLeader
- if len( set( newLeaders ) ) != 1:
- newLeaderResult = main.FALSE
- main.log.error( "Nodes have different leaders: " +
- str( newLeaders ) )
- newLeader = None
+ newLeaderResult, newLeaders = consistentLeaderboards( activeCLIs )
+ if newLeaders[ 0 ][ 0 ] == 'none':
+ main.log.error( "No leader was elected on at least 1 node" )
+ if not expectNoLeader:
+ newLeaderResult = False
+ if newLeaderResult:
+ newLeader = newLeaders[ 0 ][ 0 ]
else:
- newLeader = newLeaders[ 0 ]
-
- # Check that each node's candidate list is the same
- for candidates in newAllCandidates:
- if set( candidates ) != set( newCandidates ):
- newLeaderResult = main.FALSE
- main.log.error( "Discrepancy in candidate lists detected" )
+ newLeader = None
# Check that the new leader is not the older leader, which was withdrawn
if newLeader == oldLeader:
- newLeaderResult = main.FALSE
+ newLeaderResult = False
main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
" as the current leader" )
-
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=newLeaderResult,
onpass="Leadership election passed",
onfail="Something went wrong with Leadership election" )
- main.step( "Check that that new leader was the candidate of old leader")
+ main.step( "Check that that new leader was the candidate of old leader" )
# candidates[ 2 ] should become the top candidate after withdrawl
correctCandidateResult = main.TRUE
if expectNoLeader:
@@ -2993,12 +3031,17 @@
else:
main.log.info( "Expected no leader, got: " + str( newLeader ) )
correctCandidateResult = main.FALSE
- elif len( oldCandidates ) >= 3 and newLeader != oldCandidates[ 2 ]:
- correctCandidateResult = main.FALSE
- main.log.error( "Candidate {} was elected. {} should have had priority.".format(
- newLeader, oldCandidates[ 2 ] ) )
+ elif len( oldLeaders[0] ) >= 3:
+ if newLeader == oldLeaders[ 0 ][ 2 ]:
+ # correct leader was elected
+ correctCandidateResult = main.TRUE
+ else:
+ correctCandidateResult = main.FALSE
+ main.log.error( "Candidate {} was elected. {} should have had priority.".format(
+ newLeader, oldLeaders[ 0 ][ 2 ] ) )
else:
main.log.warn( "Could not determine who should be the correct leader" )
+ main.log.debug( oldLeaders[ 0 ] )
correctCandidateResult = main.FALSE
utilities.assert_equals(
expect=main.TRUE,
@@ -3018,55 +3061,23 @@
actual=runResult,
onpass="App re-ran for election",
onfail="App failed to run for election" )
+
main.step(
"Check that oldLeader is a candidate, and leader if only 1 node" )
# verify leader didn't just change
- positionResult = main.TRUE
- # Get new leaders and candidates, wait if oldLeader is not a candidate yet
-
- # Reset and reuse the new candidate and leaders lists
- newAllCandidates = []
- newCandidates = []
- newLeaders = []
- for i in main.activeNodes:
- cli = main.CLIs[i]
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- if oldLeader not in node: # election might no have finished yet
- main.log.info( "Old Leader not elected, waiting 5 seconds to " +
- "be sure elections are complete" )
- time.sleep(5)
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- if oldLeader not in node: # election still isn't done, errors
- main.log.error(
- "Old leader was not elected on at least one node" )
- positionResult = main.FALSE
- newAllCandidates.append( node )
- newLeaders.append( node[ 0 ] )
- newCandidates = newAllCandidates[ 0 ]
-
- # Check that each node has the same leader. Defines newLeader
- if len( set( newLeaders ) ) != 1:
- positionResult = main.FALSE
- main.log.error( "Nodes have different leaders: " +
- str( newLeaders ) )
- newLeader = None
- else:
- newLeader = newLeaders[ 0 ]
-
- # Check that each node's candidate list is the same
- for candidates in newAllCandidates:
- if set( candidates ) != set( newCandidates ):
- newLeaderResult = main.FALSE
- main.log.error( "Discrepancy in candidate lists detected" )
+ # Get new leaders and candidates
+ reRunLeaders = []
+ time.sleep( 5 ) # Paremterize
+ positionResult, reRunLeaders = consistentLeaderboards( activeCLIs )
# Check that the re-elected node is last on the candidate List
- if oldLeader != newCandidates[ -1 ]:
- main.log.error( "Old Leader (" + str( oldLeader ) + ") not in the proper position " +
- str( newCandidates ) )
+ if oldLeader != reRunLeaders[ 0 ][ -1 ]:
+ main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
+ str( reRunLeaders[ 0 ] ) ) )
positionResult = main.FALSE
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=positionResult,
onpass="Old leader successfully re-ran for election",
onfail="Something went wrong with Leadership election after " +
@@ -3085,15 +3096,11 @@
# Variables for the distributed primitives tests
global pCounterName
- global iCounterName
global pCounterValue
- global iCounterValue
global onosSet
global onosSetName
pCounterName = "TestON-Partitions"
- iCounterName = "TestON-inMemory"
pCounterValue = 0
- iCounterValue = 0
onosSet = set([])
onosSetName = "TestON-set"
@@ -3120,7 +3127,6 @@
assert main.CLIs, "main.CLIs not defined"
assert main.nodes, "main.nodes not defined"
assert pCounterName, "pCounterName not defined"
- assert iCounterName, "iCounterName not defined"
assert onosSetName, "onosSetName not defined"
# NOTE: assert fails if value is 0/None/Empty/False
try:
@@ -3129,11 +3135,6 @@
main.log.error( "pCounterValue not defined, setting to 0" )
pCounterValue = 0
try:
- iCounterValue
- except NameError:
- main.log.error( "iCounterValue not defined, setting to 0" )
- iCounterValue = 0
- try:
onosSet
except NameError:
main.log.error( "onosSet not defined, setting to empty Set" )
@@ -3317,193 +3318,6 @@
onpass="Added counters are correct",
onfail="Added counters are incorrect" )
- # In-Memory counters
- main.step( "Increment and get an in-memory counter on each node" )
- iCounters = []
- addedIValues = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
- name="icounterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "inMemory": True } )
- iCounterValue += 1
- addedIValues.append( iCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in the in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=iCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Get then Increment a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
- name="counterGetAndAdd-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "inMemory": True } )
- addedIValues.append( iCounterValue )
- iCounterValue += 1
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=iCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Counters we added have the correct values" )
- incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
- utilities.assert_equals( expect=main.TRUE,
- actual=incrementCheck,
- onpass="Added counters are correct",
- onfail="Added counters are incorrect" )
-
- main.step( "Add -8 to then get a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
- name="counterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "delta": -8, "inMemory": True } )
- iCounterValue += -8
- addedIValues.append( iCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=pCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Add 5 to then get a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
- name="counterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "delta": 5, "inMemory": True } )
- iCounterValue += 5
- addedIValues.append( iCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=pCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Get then add 5 to a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
- name="counterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "delta": 5, "inMemory": True } )
- addedIValues.append( iCounterValue )
- iCounterValue += 5
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=iCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Counters we added have the correct values" )
- incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
- utilities.assert_equals( expect=main.TRUE,
- actual=incrementCheck,
- onpass="Added counters are correct",
- onfail="Added counters are incorrect" )
-
- main.step( "Check counters are consistant across nodes" )
- onosCounters, consistentCounterResults = main.Counters.consistentCheck()
- utilities.assert_equals( expect=main.TRUE,
- actual=consistentCounterResults,
- onpass="ONOS counters are consistent " +
- "across nodes",
- onfail="ONOS Counters are inconsistent " +
- "across nodes" )
-
- main.step( "Counters we added have the correct values" )
- incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
- incrementCheck = incrementCheck and \
- main.Counters.counterCheck( iCounterName, iCounterValue )
- utilities.assert_equals( expect=main.TRUE,
- actual=incrementCheck,
- onpass="Added counters are correct",
- onfail="Added counters are incorrect" )
# DISTRIBUTED SETS
main.step( "Distributed Set get" )
size = len( onosSet )
@@ -4453,50 +4267,3 @@
actual=getCheck,
onpass="Partitioned Transactional Map get values were correct",
onfail="Partitioned Transactional Map values incorrect" )
-
- main.step( "In-memory Transactional maps put" )
- tMapValue = "Testing"
- numKeys = 100
- putResult = True
- node = main.activeNodes[0]
- putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
- if len( putResponses ) == 100:
- for i in putResponses:
- if putResponses[ i ][ 'value' ] != tMapValue:
- putResult = False
- else:
- putResult = False
- if not putResult:
- main.log.debug( "Put response values: " + str( putResponses ) )
- utilities.assert_equals( expect=True,
- actual=putResult,
- onpass="In-Memory Transactional Map put successful",
- onfail="In-Memory Transactional Map put values are incorrect" )
-
- main.step( "In-Memory Transactional maps get" )
- getCheck = True
- for n in range( 1, numKeys + 1 ):
- getResponses = []
- threads = []
- valueCheck = True
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[i].transactionalMapGet,
- name="TMap-get-" + str( i ),
- args=[ "Key" + str( n ) ],
- kwargs={ "inMemory": True } )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- getResponses.append( t.result )
- for node in getResponses:
- if node != tMapValue:
- valueCheck = False
- if not valueCheck:
- main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
- main.log.warn( getResponses )
- getCheck = getCheck and valueCheck
- utilities.assert_equals( expect=True,
- actual=getCheck,
- onpass="In-Memory Transactional Map get values were correct",
- onfail="In-Memory Transactional Map values incorrect" )
diff --git a/TestON/tests/HAstopNodes/dependencies/Counters.py b/TestON/tests/HAstopNodes/dependencies/Counters.py
index f3833eb..192b919 100644
--- a/TestON/tests/HAstopNodes/dependencies/Counters.py
+++ b/TestON/tests/HAstopNodes/dependencies/Counters.py
@@ -1,104 +1,105 @@
-def __init__( self ):
- self.default = ''
+import json
-def consistentCheck():
- """
- Checks that TestON counters are consistent across all nodes.
+class Counters():
- Returns the tuple (onosCounters, consistent)
- - onosCounters is the parsed json output of the counters command on all nodes
- - consistent is main.TRUE if all "TestON" counters are consitent across all
- nodes or main.FALSE
- """
- import json
- try:
- correctResults = main.TRUE
- # Get onos counters results
- onosCountersRaw = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[i].counters,
- name="counters-" + str( i ) )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- onosCountersRaw.append( t.result )
- onosCounters = []
- for i in range( len( main.activeNodes ) ):
- try:
- onosCounters.append( json.loads( onosCountersRaw[i] ) )
- except ( ValueError, TypeError ):
- main.log.error( "Could not parse counters response from ONOS" +
- str( main.activeNodes[i] + 1 ) )
- main.log.warn( repr( onosCountersRaw[ i ] ) )
- onosCounters.append( [] )
- return main.FALSE
+ def __init__( self ):
+ self.default = ''
- testCounters = {}
- # make a list of all the "TestON-*" counters in ONOS
- # lookes like a dict whose keys are the name of the ONOS node and values
- # are a list of the counters. I.E.
- # { "ONOS1": [ {"name":"TestON-inMemory","value":56},
- # {"name":"TestON-Partitions","value":56} ]
- # }
- # NOTE: There is an assumtion that all nodes are active
- # based on the above for loops
- for controller in enumerate( onosCounters ):
- for dbType in controller[1]:
- for dbName, items in dbType.iteritems():
- for item in items:
- if 'TestON' in item['name']:
- node = 'ONOS' + str( main.activeNodes[ controller[0] ] + 1 )
- try:
- testCounters[node].append( item )
- except KeyError:
- testCounters[node] = [ item ]
- # compare the counters on each node
- firstV = testCounters.values()[0]
- tmp = [ v == firstV for k, v in testCounters.iteritems() ]
- if all( tmp ):
- consistent = main.TRUE
- else:
- consistent = main.FALSE
- main.log.error( "ONOS nodes have different values for counters:\n" +
- testCounters )
- return ( onosCounters, consistent )
- except Exception:
- main.log.exception( "" )
- main.cleanup()
- main.exit()
+ def consistentCheck( self ):
+ """
+ Checks that TestON counters are consistent across all nodes.
-def counterCheck( counterName, counterValue ):
- """
- Checks that TestON counters are consistent across all nodes and that
- specified counter is in ONOS with the given value
- """
- import json
- correctResults = main.TRUE
- # Get onos counters results and consistentCheck
- onosCounters, consistent = main.Counters.consistentCheck()
- # Check for correct values
- for i in range( len( main.activeNodes ) ):
- current = onosCounters[i]
- onosValue = None
+ Returns the tuple (onosCounters, consistent)
+ - onosCounters is the parsed json output of the counters command on all nodes
+ - consistent is main.TRUE if all "TestON" counters are consitent across all
+ nodes or main.FALSE
+ """
try:
- for database in current:
- database = database.values()[0]
- for counter in database:
- if counter.get( 'name' ) == counterName:
- onosValue = counter.get( 'value' )
- break
- except AttributeError, e:
- node = str( main.activeNodes[i] + 1 )
- main.log.error( "ONOS" + node + " counters result " +
- "is not as expected" )
- correctResults = main.FALSE
- if onosValue == counterValue:
- main.log.info( counterName + " counter value is correct" )
- else:
- main.log.error( counterName + " counter value is incorrect," +
- " expected value: " + str( counterValue )
- + " current value: " + str( onosValue ) )
- correctResults = main.FALSE
- return consistent and correctResults
+ correctResults = main.TRUE
+ # Get onos counters results
+ onosCountersRaw = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=utilities.retry,
+ name="counters-" + str( i ),
+ args=[ main.CLIs[i].counters, [ None ] ],
+ kwargs= { 'sleep': 5, 'attempts': 5,
+ 'randomTime': True } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ onosCountersRaw.append( t.result )
+ onosCounters = []
+ for i in range( len( main.activeNodes ) ):
+ try:
+ onosCounters.append( json.loads( onosCountersRaw[i] ) )
+ except ( ValueError, TypeError ):
+ main.log.error( "Could not parse counters response from ONOS" +
+ str( main.activeNodes[i] + 1 ) )
+ main.log.warn( repr( onosCountersRaw[ i ] ) )
+ onosCounters.append( [] )
+
+ testCounters = {}
+ # make a list of all the "TestON-*" counters in ONOS
+ # lookes like a dict whose keys are the name of the ONOS node and values
+ # are a list of the counters. I.E.
+ # { "ONOS1": [ { "name":"TestON-Partitions","value":56} ]
+ # }
+ # NOTE: There is an assumtion that all nodes are active
+ # based on the above for loops
+ for controller in enumerate( onosCounters ):
+ for key, value in controller[1].iteritems():
+ if 'TestON' in key:
+ node = 'ONOS' + str( controller[0] + 1 )
+ try:
+ testCounters[node].append( { key: value } )
+ except KeyError:
+ testCounters[node] = [ { key: value } ]
+ # compare the counters on each node
+ firstV = testCounters.values()[0]
+ tmp = [ v == firstV for k, v in testCounters.iteritems() ]
+ if all( tmp ):
+ consistent = main.TRUE
+ else:
+ consistent = main.FALSE
+ main.log.error( "ONOS nodes have different values for counters:\n" +
+ testCounters )
+ return ( onosCounters, consistent )
+ except Exception:
+ main.log.exception( "" )
+ main.cleanup()
+ main.exit()
+
+ def counterCheck( self, counterName, counterValue ):
+ """
+ Checks that TestON counters are consistent across all nodes and that
+ specified counter is in ONOS with the given value
+ """
+ try:
+ correctResults = main.TRUE
+ # Get onos counters results and consistentCheck
+ onosCounters, consistent = self.consistentCheck()
+ # Check for correct values
+ for i in range( len( main.activeNodes ) ):
+ current = onosCounters[i]
+ onosValue = None
+ try:
+ onosValue = current.get( counterName )
+ except AttributeError, e:
+ node = str( main.activeNodes[i] + 1 )
+ main.log.error( "ONOS" + node + " counters result " +
+ "is not as expected" )
+ correctResults = main.FALSE
+ if onosValue == counterValue:
+ main.log.info( counterName + " counter value is correct" )
+ else:
+ main.log.error( counterName + " counter value is incorrect," +
+ " expected value: " + str( counterValue )
+ + " current value: " + str( onosValue ) )
+ correctResults = main.FALSE
+ return consistent and correctResults
+ except Exception:
+ main.log.exception( "" )
+ main.cleanup()
+ main.exit()