Add tests for distributed sets and counters
diff --git a/TestON/tests/HATestClusterRestart/HATestClusterRestart.params b/TestON/tests/HATestClusterRestart/HATestClusterRestart.params
index 123fd0f..500c3ad 100644
--- a/TestON/tests/HATestClusterRestart/HATestClusterRestart.params
+++ b/TestON/tests/HATestClusterRestart/HATestClusterRestart.params
@@ -1,5 +1,5 @@
<PARAMS>
- <testcases>1,2,8,3,4,5,14,[6],8,3,7,4,15,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+ <testcases>1,2,8,3,4,5,14,16,17,[6],8,3,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
<ENV>
<cellName>HA</cellName>
</ENV>
diff --git a/TestON/tests/HATestClusterRestart/HATestClusterRestart.py b/TestON/tests/HATestClusterRestart/HATestClusterRestart.py
index 1205b11..2499625 100644
--- a/TestON/tests/HATestClusterRestart/HATestClusterRestart.py
+++ b/TestON/tests/HATestClusterRestart/HATestClusterRestart.py
@@ -18,6 +18,8 @@
CASE13: Clean up
CASE14: start election app on all onos nodes
CASE15: Check that Leadership Election is still functional
+CASE16: Install Distributed Primitives app
+CASE17: Check for basic functionality with distributed primitives
"""
@@ -97,29 +99,63 @@
for node in nodes:
main.ONOSbench.onosUninstall( node.ip_address )
+ # Make sure ONOS is DEAD
+ main.log.report( "Killing any ONOS processes" )
+ killResults = main.TRUE
+ for node in nodes:
+ killed = main.ONOSbench.onosKill( node.ip_address )
+ killResults = killResults and killed
+
cleanInstallResult = main.TRUE
gitPullResult = main.TRUE
main.step( "Starting Mininet" )
- main.Mininet1.startNet( )
+ mnResult = main.Mininet1.startNet( )
+ utilities.assert_equals( expect=main.TRUE, actual=mnResult,
+ onpass="Mininet Started",
+ onfail="Error starting Mininet" )
main.step( "Compiling the latest version of ONOS" )
if PULLCODE:
main.step( "Git checkout and pull " + gitBranch )
main.ONOSbench.gitCheckout( gitBranch )
gitPullResult = main.ONOSbench.gitPull()
- if gitPullResult == main.ERROR:
- main.log.error( "Error pulling git branch" )
+ # values of 1 or 3 are good
+ utilities.assert_lesser( expect=0, actual=gitPullResult,
+ onpass="Git pull successful",
+ onfail="Git pull failed" )
- main.step( "Using mvn clean & install" )
+ main.step( "Using mvn clean and install" )
cleanInstallResult = main.ONOSbench.cleanInstall()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=cleanInstallResult,
+ onpass="MCI successful",
+ onfail="MCI failed" )
else:
main.log.warn( "Did not pull new code so skipping mvn " +
"clean install" )
main.ONOSbench.getVersion( report=True )
+ # GRAPHS
+ # NOTE: important params here:
+ # job = name of Jenkins job
+ # Plot Name = Plot-HA, only can be used if multiple plots
+ # index = The number of the graph under plot name
+ job = "HAClusterRestart"
+ graphs = '<ac:structured-macro ac:name="html">\n'
+ graphs += '<ac:plain-text-body><![CDATA[\n'
+ graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
+ '/plot/getPlot?index=0&width=500&height=300"' +\
+ 'noborder="0" width="500" height="300" scrolling="yes" ' +\
+ 'seamless="seamless"></iframe>\n'
+ graphs += ']]></ac:plain-text-body>\n'
+ graphs += '</ac:structured-macro>\n'
+ main.log.wiki(graphs)
main.step( "Creating ONOS package" )
packageResult = main.ONOSbench.onosPackage()
+ utilities.assert_equals( expect=main.TRUE, actual=packageResult,
+ onpass="ONOS package successful",
+ onfail="ONOS package failed" )
main.step( "Installing ONOS package" )
onosInstallResult = main.TRUE
@@ -127,6 +163,9 @@
tmpResult = main.ONOSbench.onosInstall( options="-f",
node=node.ip_address )
onosInstallResult = onosInstallResult and tmpResult
+ utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
+ onpass="ONOS install successful",
+ onfail="ONOS install failed" )
main.step( "Checking if ONOS is up yet" )
for i in range( 2 ):
@@ -140,6 +179,9 @@
onosIsupResult = onosIsupResult and started
if onosIsupResult == main.TRUE:
break
+ utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
+ onpass="ONOS startup successful",
+ onfail="ONOS startup failed" )
main.log.step( "Starting ONOS CLI sessions" )
cliResults = main.TRUE
@@ -154,6 +196,9 @@
for t in threads:
t.join()
cliResults = cliResults and t.result
+ utilities.assert_equals( expect=main.TRUE, actual=cliResults,
+ onpass="ONOS cli startup successful",
+ onfail="ONOS cli startup failed" )
main.step( "Start Packet Capture MN" )
main.Mininet2.startTcpdump(
@@ -162,6 +207,7 @@
intf=main.params[ 'MNtcpdump' ][ 'intf' ],
port=main.params[ 'MNtcpdump' ][ 'port' ] )
+ main.step( "App Ids check" )
appCheck = main.TRUE
threads = []
for i in range( numControllers ):
@@ -174,21 +220,16 @@
for t in threads:
t.join()
appCheck = appCheck and t.result
- utilities.assert_equals( expect=main.TRUE, actual=appCheck,
- onpass="App Ids seem to be correct",
- onfail="Something is wrong with app Ids" )
if appCheck != main.TRUE:
main.log.warn( CLIs[0].apps() )
main.log.warn( CLIs[0].appIDs() )
+ utilities.assert_equals( expect=main.TRUE, actual=appCheck,
+ onpass="App Ids seem to be correct",
+ onfail="Something is wrong with app Ids" )
case1Result = ( cleanInstallResult and packageResult and
cellResult and verifyResult and onosInstallResult
- and onosIsupResult and cliResults and appCheck)
-
- utilities.assert_equals( expect=main.TRUE, actual=case1Result,
- onpass="Test startup successful",
- onfail="Test startup NOT successful" )
-
+ and onosIsupResult and cliResults )
if case1Result == main.FALSE:
main.cleanup()
main.exit()
@@ -198,6 +239,7 @@
Assign mastership to controllers
"""
import re
+ import time
assert numControllers, "numControllers not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
@@ -251,9 +293,13 @@
actual=mastershipCheck,
onpass="Switch mastership assigned correctly",
onfail="Switches not assigned correctly to controllers" )
+
+ main.step( "Assign mastership of switches to specific controllers" )
# Manually assign mastership to the controller we want
roleCall = main.TRUE
- roleCheck = main.TRUE
+
+ ipList = [ ]
+ deviceList = []
try:
for i in range( 1, 29 ): # switches 1 through 28
# set up correct variables:
@@ -297,16 +343,8 @@
# TODO: make this controller dynamic
roleCall = roleCall and main.ONOScli1.deviceRole( deviceId,
ip )
- # Check assignment
- master = main.ONOScli1.getRole( deviceId ).get( 'master' )
- if ip in master:
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
- main.log.error( "Error, controller " + ip + " is not" +
- " master " + "of device " +
- str( deviceId ) + ". Master is " +
- repr( master ) + "." )
+ ipList.append( ip )
+ deviceList.append( deviceId )
except ( AttributeError, AssertionError ):
main.log.exception( "Something is wrong with ONOS device view" )
main.log.info( main.ONOScli1.devices() )
@@ -316,6 +354,24 @@
onpass="Re-assigned switch mastership to designated controller",
onfail="Something wrong with deviceRole calls" )
+ main.step( "Check mastership was correctly assigned" )
+ roleCheck = main.TRUE
+ # NOTE: This is due to the fact that device mastership change is not
+ # atomic and is actually a multi step process
+ time.sleep( 5 )
+ for i in range( len( ipList ) ):
+ ip = ipList[i]
+ deviceId = deviceList[i]
+ # Check assignment
+ master = main.ONOScli1.getRole( deviceId ).get( 'master' )
+ if ip in master:
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+ main.log.error( "Error, controller " + ip + " is not" +
+ " master " + "of device " +
+ str( deviceId ) + ". Master is " +
+ repr( master ) + "." )
utilities.assert_equals(
expect=main.TRUE,
actual=roleCheck,
@@ -344,14 +400,16 @@
main.log.report( "Adding host intents" )
main.case( "Adding host Intents" )
- main.step( "Discovering Hosts( Via pingall for now )" )
+ main.step( "Discovering Hosts( Via pingall for now )" )
# FIXME: Once we have a host discovery mechanism, use that instead
# install onos-app-fwd
- main.log.info( "Install reactive forwarding app" )
- appResults = CLIs[0].activateApp( "org.onosproject.fwd" )
+ main.step( "Install reactive forwarding app" )
+ installResults = CLIs[0].activateApp( "org.onosproject.fwd" )
+ utilities.assert_equals( expect=main.TRUE, actual=installResults,
+ onpass="Install fwd successful",
+ onfail="Install fwd failed" )
- # FIXME: add this to asserts
appCheck = main.TRUE
threads = []
for i in range( numControllers ):
@@ -364,12 +422,12 @@
for t in threads:
t.join()
appCheck = appCheck and t.result
- utilities.assert_equals( expect=main.TRUE, actual=appCheck,
- onpass="App Ids seem to be correct",
- onfail="Something is wrong with app Ids" )
if appCheck != main.TRUE:
main.log.warn( CLIs[0].apps() )
main.log.warn( CLIs[0].appIDs() )
+ utilities.assert_equals( expect=main.TRUE, actual=appCheck,
+ onpass="App Ids seem to be correct",
+ onfail="Something is wrong with app Ids" )
# REACTIVE FWD test
pingResult = main.FALSE
@@ -380,13 +438,20 @@
expect=main.TRUE,
actual=pingResult,
onpass="Reactive Pingall test passed",
- onfail="Reactive Pingall failed, one or more ping pairs failed" )
+ onfail="Reactive Pingall failed, " +
+ "one or more ping pairs failed" )
time2 = time.time()
- main.log.info( "Time for pingall: %2f seconds" % ( time2 - time1 ) )
-
+ main.log.info( "Time for pingall: %2f seconds" %
+ ( time2 - time1 ) )
+ # timeout for fwd flows
+ time.sleep( 11 )
# uninstall onos-app-fwd
- main.log.info( "Uninstall reactive forwarding app" )
- appResults = appResults and CLIs[0].deactivateApp( "org.onosproject.fwd" )
+ main.step( "Uninstall reactive forwarding app" )
+ uninstallResult = CLIs[0].deactivateApp( "org.onosproject.fwd" )
+ utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
+ onpass="Uninstall fwd successful",
+ onfail="Uninstall fwd failed" )
+ main.step( "Check app ids check" )
threads = []
for i in range( numControllers ):
t = main.Thread( target=CLIs[i].appToIDCheck,
@@ -398,15 +463,12 @@
for t in threads:
t.join()
appCheck = appCheck and t.result
- utilities.assert_equals( expect=main.TRUE, actual=appCheck,
- onpass="App Ids seem to be correct",
- onfail="Something is wrong with app Ids" )
if appCheck != main.TRUE:
main.log.warn( CLIs[0].apps() )
main.log.warn( CLIs[0].appIDs() )
-
- # timeout for fwd flows
- time.sleep( 11 )
+ utilities.assert_equals( expect=main.TRUE, actual=appCheck,
+ onpass="App Ids seem to be correct",
+ onfail="Something is wrong with app Ids" )
main.step( "Add host intents" )
intentIds = []
@@ -553,28 +615,36 @@
actual=intentAddResult,
onpass="Pushed host intents to ONOS",
onfail="Error in pushing host intents to ONOS" )
+ main.step( "Intent Anti-Entropy dispersion" )
for i in range(100):
- onosIds = main.ONOScli1.getAllIntentsId()
+ correct = True
main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
- main.log.info( "Intents in ONOS: " + str( sorted( onosIds ) ) )
- if sorted(onosIds) == sorted(intentIds):
+ for cli in CLIs:
+ onosIds = []
+ ids = cli.getAllIntentsId()
+ onosIds.append( ids )
+ main.log.debug( "Intents in " + cli.name + ": " +
+ str( sorted( onosIds ) ) )
+ if sorted( ids ) != sorted( intentIds ):
+ correct = False
+ if correct:
break
else:
time.sleep(1)
- # FIXME: DEBUG
if not intentStop:
intentStop = time.time()
+ global gossipTime
gossipTime = intentStop - intentStart
main.log.info( "It took about " + str( gossipTime ) +
- " seconds for all intents to appear on ONOS1" )
+ " seconds for all intents to appear in each node" )
# FIXME: make this time configurable/calculate based off of number of
# nodes and gossip rounds
utilities.assert_greater_equals(
- expect=30, actual=gossipTime,
+ expect=40, actual=gossipTime,
onpass="ECM anti-entropy for intents worked within " +
"expected time",
onfail="Intent ECM anti-entropy took too long" )
- if gossipTime <= 30:
+ if gossipTime <= 40:
intentAddResult = True
if not intentAddResult or "key" in pendingMap:
@@ -870,6 +940,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing pending map" )
main.log.error( repr( pendingMap ) )
+ main.log.debug( CLIs[0].flows( jsonFormat=False ) )
def CASE5( self, main ):
"""
@@ -1019,23 +1090,52 @@
onpass="Intents are consistent across all ONOS nodes",
onfail="ONOS nodes have different views of intents" )
+ if intentsResults:
+ # Try to make it easy to figure out what is happening
+ #
+ # Intent ONOS1 ONOS2 ...
+ # 0x01 INSTALLED INSTALLING
+ # ... ... ...
+ # ... ... ...
+ title = " Id"
+ for n in range( numControllers ):
+ title += " " * 10 + "ONOS" + str( n + 1 )
+ main.log.warn( title )
+ # get all intent keys in the cluster
+ keys = []
+ for nodeStr in ONOSIntents:
+ node = json.loads( nodeStr )
+ for intent in node:
+ keys.append( intent.get( 'id' ) )
+ keys = set( keys )
+ for key in keys:
+ row = "%-13s" % key
+ for nodeStr in ONOSIntents:
+ node = json.loads( nodeStr )
+ for intent in node:
+ if intent.get( 'id', "Error" ) == key:
+ row += "%-15s" % intent.get( 'state' )
+ main.log.warn( row )
+ # End table view
+
if intentsResults and not consistentIntents:
+ # print the json objects
n = len(ONOSIntents)
- main.log.warn( "ONOS" + str( n ) + " intents: " )
- main.log.warn( json.dumps( json.loads( ONOSIntents[ -1 ] ),
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
+ main.log.debug( "ONOS" + str( n ) + " intents: " )
+ main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
for i in range( numControllers ):
if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
- main.log.warn( "ONOS" + str( i + 1 ) + " intents: " )
- main.log.warn( json.dumps( json.loads( ONOSIntents[i] ),
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
+ main.log.debug( "ONOS" + str( i + 1 ) + " intents: " )
+ main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
else:
- main.log.warn( nodes[ i ].name + " intents match ONOS" +
- str( n ) + " intents" )
+ main.log.debug( nodes[ i ].name + " intents match ONOS" +
+ str( n ) + " intents" )
elif intentsResults and consistentIntents:
intentCheck = main.TRUE
intentState = ONOSIntents[ 0 ]
@@ -1246,6 +1346,7 @@
# Compare json objects for hosts and dataplane clusters
# hosts
+ main.step( "Host view is consistent across ONOS nodes" )
consistentHostsResult = main.TRUE
for controller in range( len( hosts ) ):
controllerStr = str( controller + 1 )
@@ -1272,6 +1373,7 @@
onpass="Hosts view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of hosts" )
+ main.step( "Each host has an IP address" )
ipResult = main.TRUE
for controller in range( 0, len( hosts ) ):
controllerStr = str( controller + 1 )
@@ -1287,6 +1389,7 @@
onfail="The ip of at least one host is missing" )
# Strongly connected clusters of devices
+ main.step( "Cluster view is consistent across ONOS nodes" )
consistentClustersResult = main.TRUE
for controller in range( len( clusters ) ):
controllerStr = str( controller + 1 )
@@ -1311,6 +1414,7 @@
onpass="Clusters view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of clusters" )
# there should always only be one cluster
+ main.step( "Cluster view correct across ONOS nodes" )
try:
numClusters = len( json.loads( clusters[ 0 ] ) )
except ( ValueError, TypeError ):
@@ -1381,14 +1485,6 @@
onpass="Topology Check Test successful",
onfail="Topology Check Test NOT successful" )
- finalAssert = main.TRUE
- finalAssert = ( finalAssert and topoResult and flowCheck
- and intentCheck and consistentMastership
- and mastershipCheck and rolesNotNull )
- utilities.assert_equals( expect=main.TRUE, actual=finalAssert,
- onpass="State check successful",
- onfail="State check NOT successful" )
-
def CASE6( self, main ):
"""
The Failure case.
@@ -1398,13 +1494,23 @@
assert utilities.assert_equals, "utilities.assert_equals not defined"
assert CLIs, "CLIs not defined"
assert nodes, "nodes not defined"
- main.log.report( "Restart entire ONOS cluster" )
+
+ # Reset non-persistent variables
+ try:
+ iCounterValue = 0
+ except NameError:
+ main.log.error( "iCounterValue not defined, setting to 0" )
+ iCounterValue = 0
+
main.case( "Restart entire ONOS cluster" )
main.step( "Killing ONOS nodes" )
killResults = main.TRUE
for node in nodes:
killed = main.ONOSbench.onosKill( node.ip_address )
killResults = killResults and killed
+ utilities.assert_equals( expect=main.TRUE, actual=killResults,
+ onpass="ONOS nodes killed",
+ onfail="ONOS kill unsuccessful" )
main.step( "Checking if ONOS is up yet" )
for i in range( 2 ):
@@ -1416,6 +1522,9 @@
onosIsupResult = onosIsupResult and started
if onosIsupResult == main.TRUE:
break
+ utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
+ onpass="ONOS restarted",
+ onfail="ONOS restart NOT successful" )
main.log.step( "Starting ONOS CLI sessions" )
cliResults = main.TRUE
@@ -1430,11 +1539,9 @@
for t in threads:
t.join()
cliResults = cliResults and t.result
-
- caseResults = main.TRUE and onosIsupResult and cliResults
- utilities.assert_equals( expect=main.TRUE, actual=caseResults,
- onpass="ONOS restart successful",
- onfail="ONOS restart NOT successful" )
+ utilities.assert_equals( expect=main.TRUE, actual=cliResults,
+ onpass="ONOS cli started",
+ onfail="ONOS clis did not restart" )
def CASE7( self, main ):
"""
@@ -1468,6 +1575,7 @@
onpass="Each device has a master",
onfail="Some devices don't have a master assigned" )
+ main.step( "Read device roles from ONOS" )
ONOSMastership = []
mastershipCheck = main.FALSE
consistentMastership = True
@@ -1594,6 +1702,34 @@
"nodes" )
else:
consistentIntents = False
+
+ # Try to make it easy to figure out what is happening
+ #
+ # Intent ONOS1 ONOS2 ...
+ # 0x01 INSTALLED INSTALLING
+ # ... ... ...
+ # ... ... ...
+ title = " ID"
+ for n in range( numControllers ):
+ title += " " * 10 + "ONOS" + str( n + 1 )
+ main.log.warn( title )
+ # get all intent keys in the cluster
+ keys = []
+ for nodeStr in ONOSIntents:
+ node = json.loads( nodeStr )
+ for intent in node:
+ keys.append( intent.get( 'id' ) )
+ keys = set( keys )
+ for key in keys:
+ row = "%-13s" % key
+ for nodeStr in ONOSIntents:
+ node = json.loads( nodeStr )
+ for intent in node:
+ if intent.get( 'id' ) == key:
+ row += "%-15s" % intent.get( 'state' )
+ main.log.warn( row )
+ # End table view
+
utilities.assert_equals(
expect=True,
actual=consistentIntents,
@@ -1705,6 +1841,7 @@
# we expect loss in dataplane connectivity
LossInPings = main.FALSE
+ main.step( "Leadership Election is still functional" )
# Test of LeadershipElection
leaderList = []
leaderResult = main.TRUE
@@ -1737,15 +1874,6 @@
onpass="Leadership election passed",
onfail="Something went wrong with Leadership election" )
- result = ( mastershipCheck and intentCheck and FlowTables and
- ( not LossInPings ) and rolesNotNull and leaderResult )
- result = int( result )
- if result == main.TRUE:
- main.log.report( "Constant State Tests Passed" )
- utilities.assert_equals( expect=main.TRUE, actual=result,
- onpass="Constant State Tests Passed",
- onfail="Constant state tests failed" )
-
def CASE8( self, main ):
"""
Compare topo
@@ -1773,7 +1901,6 @@
ctrls.append( temp )
MNTopo = TestONTopology( main.Mininet1, ctrls )
- main.step( "Comparing ONOS topology to MN" )
devicesResults = main.TRUE
portsResults = main.TRUE
linksResults = main.TRUE
@@ -1930,6 +2057,7 @@
# Compare json objects for hosts and dataplane clusters
# hosts
+ main.step( "Hosts view is consistent across all ONOS nodes" )
consistentHostsResult = main.TRUE
for controller in range( len( hosts ) ):
controllerStr = str( controller + 1 )
@@ -1956,6 +2084,7 @@
onfail="ONOS nodes have different views of hosts" )
# Strongly connected clusters of devices
+ main.step( "Clusters view is consistent across all ONOS nodes" )
consistentClustersResult = main.TRUE
for controller in range( len( clusters ) ):
controllerStr = str( controller + 1 )
@@ -1981,6 +2110,8 @@
onpass="Clusters view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of clusters" )
# there should always only be one cluster
+ main.step( "Topology view is correct and consistent across all " +
+ "ONOS nodes" )
try:
numClusters = len( json.loads( clusters[ 0 ] ) )
except ( ValueError, TypeError ):
@@ -2017,6 +2148,7 @@
# FIXME: move this to an ONOS state case
main.step( "Checking ONOS nodes" )
nodesOutput = []
+ nodeResults = main.TRUE
threads = []
for i in range( numControllers ):
t = main.Thread( target=CLIs[i].nodes,
@@ -2033,9 +2165,10 @@
try:
current = json.loads( i )
for node in current:
+ currentResult = main.FALSE
if node['ip'] in ips: # node in nodes() output is in cell
if node['state'] == 'ACTIVE':
- pass # as it should be
+ currentResult = main.TRUE
else:
main.log.error( "Error in ONOS node availability" )
main.log.error(
@@ -2044,9 +2177,13 @@
indent=4,
separators=( ',', ': ' ) ) )
break
+ nodeResults = nodeResults and currentResult
except ( ValueError, TypeError ):
main.log.error( "Error parsing nodes output" )
main.log.warn( repr( i ) )
+ utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
+ onpass="Nodes check successful",
+ onfail="Nodes check NOT successful" )
def CASE9( self, main ):
"""
@@ -2269,7 +2406,10 @@
time.sleep( 10 )
main.step( "Stopping Mininet" )
- main.Mininet1.stopNet()
+ mnResult = main.Mininet1.stopNet()
+ utilities.assert_equals( expect=main.TRUE, actual=mnResult,
+ onpass="Mininet stopped",
+ onfail="MN cleanup NOT successful" )
main.step( "Checking ONOS Logs for errors" )
for node in nodes:
@@ -2280,16 +2420,10 @@
main.step( "Packing and rotating pcap archives" )
os.system( "~/TestON/dependencies/rotate.sh " + str( testname ) )
- # TODO: actually check something here
- utilities.assert_equals( expect=main.TRUE, actual=main.TRUE,
- onpass="Test cleanup successful",
- onfail="Test cleanup NOT successful" )
-
def CASE14( self, main ):
"""
start election app on all onos nodes
"""
- import time
assert numControllers, "numControllers not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
@@ -2297,10 +2431,13 @@
assert nodes, "nodes not defined"
leaderResult = main.TRUE
- main.log.info( "Install leadership election app" )
+ main.case("Start Leadership Election app")
+ main.step( "Install leadership election app" )
main.ONOScli1.activateApp( "org.onosproject.election" )
leaders = []
for cli in CLIs:
+ cli.electionTestRun()
+ for cli in CLIs:
leader = cli.electionTestLeader()
if leader is None or leader == main.FALSE:
main.log.report( cli.name + ": Leader for the election app " +
@@ -2326,6 +2463,7 @@
"""
Check that Leadership Election is still functional
"""
+ import time
assert numControllers, "numControllers not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
@@ -2380,6 +2518,7 @@
leaderResult = main.FALSE
elif leaderN is None:
# node may not have recieved the event yet
+ time.sleep(7)
leaderN = cli.electionTestLeader()
leaderList.pop()
leaderList.append( leaderN )
@@ -2418,18 +2557,1090 @@
actual=runResult,
onpass="App re-ran for election",
onfail="App failed to run for election" )
- if consistentLeader == main.TRUE:
- afterRun = main.ONOScli1.electionTestLeader()
- # verify leader didn't just change
- if afterRun == leaderList[ 0 ]:
- leaderResult = main.TRUE
- else:
- leaderResult = main.FALSE
- # TODO: assert on run and withdraw results?
+
+ afterRun = main.ONOScli1.electionTestLeader()
+ # verify leader didn't just change
+ if afterRun == leaderList[ 0 ]:
+ afterResult = main.TRUE
+ else:
+ afterResult = main.FALSE
utilities.assert_equals(
expect=main.TRUE,
- actual=leaderResult,
- onpass="Leadership election passed",
+ actual=afterResult,
+ onpass="Old leader successfully re-ran for election",
onfail="Something went wrong with Leadership election after " +
"the old leader re-ran for election" )
+
+ case15Result = withdrawResult and leaderResult and runResult and\
+ afterResult
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=case15Result,
+ onpass="Leadership election is still functional",
+ onfail="Leadership Election is no longer functional" )
+
+ def CASE16( self, main ):
+ """
+ Install Distributed Primitives app
+ """
+ assert numControllers, "numControllers not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert CLIs, "CLIs not defined"
+ assert nodes, "nodes not defined"
+
+ # Variables for the distributed primitives tests
+ global pCounterName
+ global iCounterName
+ global pCounterValue
+ global iCounterValue
+ global onosSet
+ global onosSetName
+ pCounterName = "TestON-Partitions"
+ iCounterName = "TestON-inMemory"
+ pCounterValue = 0
+ iCounterValue = 0
+ onosSet = set([])
+ onosSetName = "TestON-set"
+
+ description = "Install Primitives app"
+ main.case( description )
+ main.step( "Install Primitives app" )
+ appName = "org.onosproject.distributedprimitives"
+ appResults = CLIs[0].activateApp( appName )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=appResults,
+ onpass="Primitives app activated",
+ onfail="Primitives app not activated" )
+
+ def CASE17( self, main ):
+ """
+ Check for basic functionality with distributed primitives
+ """
+ # Make sure variables are defined/set
+ assert numControllers, "numControllers not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert CLIs, "CLIs not defined"
+ assert nodes, "nodes not defined"
+ assert pCounterName, "pCounterName not defined"
+ assert iCounterName, "iCounterName not defined"
+ assert onosSetName, "onosSetName not defined"
+ # NOTE: assert fails if value is 0/None/Empty/False
+ try:
+ pCounterValue
+ except NameError:
+ main.log.error( "pCounterValue not defined, setting to 0" )
+ pCounterValue = 0
+ try:
+ iCounterValue
+ except NameError:
+ main.log.error( "iCounterValue not defined, setting to 0" )
+ iCounterValue = 0
+ try:
+ onosSet
+ except NameError:
+ main.log.error( "onosSet not defined, setting to empty Set" )
+ onosSet = set([])
+ # Variables for the distributed primitives tests. These are local only
+ addValue = "a"
+ addAllValue = "a b c d e f"
+ retainValue = "c d e f"
+
+ description = "Check for basic functionality with distributed " +\
+ "primitives"
+ main.case( description )
+ main.caseExplaination = "Test the methods of the distributed primitives (counters and sets) throught the cli"
+ # DISTRIBUTED ATOMIC COUNTERS
+ main.step( "Increment and get a default counter on each node" )
+ pCounters = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].counterTestIncrement,
+ name="counterIncrement-" + str( i ),
+ args=[ pCounterName ] )
+ pCounterValue += 1
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ pCounters.append( t.result )
+ # Check that counter incremented numController times
+ pCounterResults = True
+ for i in range( numControllers ):
+ pCounterResults and ( i + 1 ) in pCounters
+ utilities.assert_equals( expect=True,
+ actual=pCounterResults,
+ onpass="Default counter incremented",
+ onfail="Error incrementing default" +
+ " counter" )
+
+ main.step( "Increment and get an in memory counter on each node" )
+ iCounters = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].counterTestIncrement,
+ name="icounterIncrement-" + str( i ),
+ args=[ iCounterName ],
+ kwargs={ "inMemory": True } )
+ iCounterValue += 1
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ iCounters.append( t.result )
+ # Check that counter incremented numController times
+ iCounterResults = True
+ for i in range( numControllers ):
+ iCounterResults and ( i + 1 ) in iCounters
+ utilities.assert_equals( expect=True,
+ actual=iCounterResults,
+ onpass="In memory counter incremented",
+ onfail="Error incrementing in memory" +
+ " counter" )
+
+ main.step( "Check counters are consistant across nodes" )
+ onosCounters = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].counters,
+ name="counters-" + str( i ) )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ onosCounters.append( t.result )
+ tmp = [ i == onosCounters[ 0 ] for i in onosCounters ]
+ if all( tmp ):
+ main.log.info( "Counters are consistent across all nodes" )
+ consistentCounterResults = main.TRUE
+ else:
+ main.log.error( "Counters are not consistent across all nodes" )
+ consistentCounterResults = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=consistentCounterResults,
+ onpass="ONOS counters are consistent " +
+ "across nodes",
+ onfail="ONOS Counters are inconsistent " +
+ "across nodes" )
+
+ main.step( "Counters we added have the correct values" )
+ correctResults = main.TRUE
+ for i in range( numControllers ):
+ current = onosCounters[i]
+ try:
+ pValue = current.get( pCounterName )
+ iValue = current.get( iCounterName )
+ if pValue == pCounterValue:
+ main.log.info( "Partitioned counter value is correct" )
+ else:
+ main.log.error( "Partitioned counter value is incorrect," +
+ " expected value: " + str( pCounterValue )
+ + " current value: " + str( pValue ) )
+ correctResults = main.FALSE
+ if iValue == iCounterValue:
+ main.log.info( "In memory counter value is correct" )
+ else:
+ main.log.error( "In memory counter value is incorrect, " +
+ "expected value: " + str( iCounterValue ) +
+ " current value: " + str( iValue ) )
+ correctResults = main.FALSE
+ except AttributeError, e:
+ main.log.error( "ONOS" + str( i + 1 ) + " counters result " +
+ "is not as expected" )
+ correctResults = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=correctResults,
+ onpass="Added counters are correct",
+ onfail="Added counters are incorrect" )
+ # DISTRIBUTED SETS
+ main.step( "Distributed Set get" )
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=getResults,
+ onpass="Set elements are correct",
+ onfail="Set elements are incorrect" )
+
+ main.step( "Distributed Set size" )
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=sizeResults,
+ onpass="Set sizes are correct",
+ onfail="Set sizes are incorrect" )
+
+ main.step( "Distributed Set add()" )
+ onosSet.add( addValue )
+ addResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestAdd,
+ name="setTestAdd-" + str( i ),
+ args=[ onosSetName, addValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ addResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ addResults = main.TRUE
+ for i in range( numControllers ):
+ if addResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif addResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif addResponses[ i ] == main.ERROR:
+ # Error in execution
+ addResults = main.FALSE
+ else:
+ # unexpected result
+ addResults = main.FALSE
+ if addResults != main.TRUE:
+ main.log.error( "Error executing set add" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ addResults = addResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=addResults,
+ onpass="Set add correct",
+ onfail="Set add was incorrect" )
+
+ main.step( "Distributed Set addAll()" )
+ onosSet.update( addAllValue.split() )
+ addResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestAdd,
+ name="setTestAddAll-" + str( i ),
+ args=[ onosSetName, addAllValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ addResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ addAllResults = main.TRUE
+ for i in range( numControllers ):
+ if addResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif addResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif addResponses[ i ] == main.ERROR:
+ # Error in execution
+ addAllResults = main.FALSE
+ else:
+ # unexpected result
+ addAllResults = main.FALSE
+ if addAllResults != main.TRUE:
+ main.log.error( "Error executing set addAll" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ addAllResults = addAllResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=addAllResults,
+ onpass="Set addAll correct",
+ onfail="Set addAll was incorrect" )
+
+ main.step( "Distributed Set contains()" )
+ containsResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setContains-" + str( i ),
+ args=[ onosSetName ],
+ kwargs={ "values": addValue } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ # NOTE: This is the tuple
+ containsResponses.append( t.result )
+
+ containsResults = main.TRUE
+ for i in range( numControllers ):
+ if containsResponses[ i ] == main.ERROR:
+ containsResults = main.FALSE
+ else:
+ containsResults = containsResults and\
+ containsResponses[ i ][ 1 ]
+ utilities.assert_equals( expect=main.TRUE,
+ actual=containsResults,
+ onpass="Set contains is functional",
+ onfail="Set contains failed" )
+
+ main.step( "Distributed Set containsAll()" )
+ containsAllResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setContainsAll-" + str( i ),
+ args=[ onosSetName ],
+ kwargs={ "values": addAllValue } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ # NOTE: This is the tuple
+ containsAllResponses.append( t.result )
+
+ containsAllResults = main.TRUE
+ for i in range( numControllers ):
+ if containsResponses[ i ] == main.ERROR:
+ containsResults = main.FALSE
+ else:
+ containsResults = containsResults and\
+ containsResponses[ i ][ 1 ]
+ utilities.assert_equals( expect=main.TRUE,
+ actual=containsAllResults,
+ onpass="Set containsAll is functional",
+ onfail="Set containsAll failed" )
+
+ main.step( "Distributed Set remove()" )
+ onosSet.remove( addValue )
+ removeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestRemove,
+ name="setTestRemove-" + str( i ),
+ args=[ onosSetName, addValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ removeResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ removeResults = main.TRUE
+ for i in range( numControllers ):
+ if removeResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif removeResponses[ i ] == main.FALSE:
+ # not in set, probably fine
+ pass
+ elif removeResponses[ i ] == main.ERROR:
+ # Error in execution
+ removeResults = main.FALSE
+ else:
+ # unexpected result
+ removeResults = main.FALSE
+ if removeResults != main.TRUE:
+ main.log.error( "Error executing set remove" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ removeResults = removeResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=removeResults,
+ onpass="Set remove correct",
+ onfail="Set remove was incorrect" )
+
+ main.step( "Distributed Set removeAll()" )
+ onosSet.difference_update( addAllValue.split() )
+ removeAllResponses = []
+ threads = []
+ try:
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestRemove,
+ name="setTestRemoveAll-" + str( i ),
+ args=[ onosSetName, addAllValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ removeAllResponses.append( t.result )
+ except Exception, e:
+ main.log.exception(e)
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ removeAllResults = main.TRUE
+ for i in range( numControllers ):
+ if removeAllResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif removeAllResponses[ i ] == main.FALSE:
+ # not in set, probably fine
+ pass
+ elif removeAllResponses[ i ] == main.ERROR:
+ # Error in execution
+ removeAllResults = main.FALSE
+ else:
+ # unexpected result
+ removeAllResults = main.FALSE
+ if removeAllResults != main.TRUE:
+ main.log.error( "Error executing set removeAll" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ removeAllResults = removeAllResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=removeAllResults,
+ onpass="Set removeAll correct",
+ onfail="Set removeAll was incorrect" )
+
+ main.step( "Distributed Set addAll()" )
+ onosSet.update( addAllValue.split() )
+ addResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestAdd,
+ name="setTestAddAll-" + str( i ),
+ args=[ onosSetName, addAllValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ addResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ addAllResults = main.TRUE
+ for i in range( numControllers ):
+ if addResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif addResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif addResponses[ i ] == main.ERROR:
+ # Error in execution
+ addAllResults = main.FALSE
+ else:
+ # unexpected result
+ addAllResults = main.FALSE
+ if addAllResults != main.TRUE:
+ main.log.error( "Error executing set addAll" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ addAllResults = addAllResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=addAllResults,
+ onpass="Set addAll correct",
+ onfail="Set addAll was incorrect" )
+
+ main.step( "Distributed Set clear()" )
+ onosSet.clear()
+ clearResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestRemove,
+ name="setTestClear-" + str( i ),
+ args=[ onosSetName, " "], # Values doesn't matter
+ kwargs={ "clear": True } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ clearResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ clearResults = main.TRUE
+ for i in range( numControllers ):
+ if clearResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif clearResponses[ i ] == main.FALSE:
+ # Nothing set, probably fine
+ pass
+ elif clearResponses[ i ] == main.ERROR:
+ # Error in execution
+ clearResults = main.FALSE
+ else:
+ # unexpected result
+ clearResults = main.FALSE
+ if clearResults != main.TRUE:
+ main.log.error( "Error executing set clear" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ clearResults = clearResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=clearResults,
+ onpass="Set clear correct",
+ onfail="Set clear was incorrect" )
+
+ main.step( "Distributed Set addAll()" )
+ onosSet.update( addAllValue.split() )
+ addResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestAdd,
+ name="setTestAddAll-" + str( i ),
+ args=[ onosSetName, addAllValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ addResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ addAllResults = main.TRUE
+ for i in range( numControllers ):
+ if addResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif addResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif addResponses[ i ] == main.ERROR:
+ # Error in execution
+ addAllResults = main.FALSE
+ else:
+ # unexpected result
+ addAllResults = main.FALSE
+ if addAllResults != main.TRUE:
+ main.log.error( "Error executing set addAll" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ addAllResults = addAllResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=addAllResults,
+ onpass="Set addAll correct",
+ onfail="Set addAll was incorrect" )
+
+ main.step( "Distributed Set retain()" )
+ onosSet.intersection_update( retainValue.split() )
+ retainResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestRemove,
+ name="setTestRetain-" + str( i ),
+ args=[ onosSetName, retainValue ],
+ kwargs={ "retain": True } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ retainResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ retainResults = main.TRUE
+ for i in range( numControllers ):
+ if retainResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif retainResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif retainResponses[ i ] == main.ERROR:
+ # Error in execution
+ retainResults = main.FALSE
+ else:
+ # unexpected result
+ retainResults = main.FALSE
+ if retainResults != main.TRUE:
+ main.log.error( "Error executing set retain" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " +
+ str( size ) + " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ retainResults = retainResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=retainResults,
+ onpass="Set retain correct",
+ onfail="Set retain was incorrect" )
+
diff --git a/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.params b/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.params
index 595b78c..ca1f915 100644
--- a/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.params
+++ b/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.params
@@ -1,5 +1,5 @@
<PARAMS>
- <testcases>1,2,8,3,4,5,14,[6],8,7,4,15,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+ <testcases>1,2,8,3,4,5,14,16,17,[6],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
<ENV>
<cellName>HA</cellName>
</ENV>
diff --git a/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.py b/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.py
index 43dd995..0dd40ed 100644
--- a/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.py
+++ b/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.py
@@ -18,6 +18,8 @@
CASE13: Clean up
CASE14: start election app on all onos nodes
CASE15: Check that Leadership Election is still functional
+CASE16: Install Distributed Primitives app
+CASE17: Check for basic functionality with distributed primitives
"""
@@ -44,8 +46,8 @@
start cli sessions
start tcpdump
"""
- main.log.report(
- "ONOS HA test: Restart minority of ONOS nodes - initialization" )
+ main.log.report( "ONOS HA test: Restart minority of ONOS nodes - " +
+ "initialization" )
main.case( "Setting up test environment" )
# TODO: save all the timers and output them for plotting
@@ -97,29 +99,63 @@
for node in nodes:
main.ONOSbench.onosUninstall( node.ip_address )
+ # Make sure ONOS is DEAD
+ main.log.report( "Killing any ONOS processes" )
+ killResults = main.TRUE
+ for node in nodes:
+ killed = main.ONOSbench.onosKill( node.ip_address )
+ killResults = killResults and killed
+
cleanInstallResult = main.TRUE
gitPullResult = main.TRUE
main.step( "Starting Mininet" )
- main.Mininet1.startNet( )
+ mnResult = main.Mininet1.startNet( )
+ utilities.assert_equals( expect=main.TRUE, actual=mnResult,
+ onpass="Mininet Started",
+ onfail="Error starting Mininet" )
main.step( "Compiling the latest version of ONOS" )
if PULLCODE:
main.step( "Git checkout and pull " + gitBranch )
main.ONOSbench.gitCheckout( gitBranch )
gitPullResult = main.ONOSbench.gitPull()
- if gitPullResult == main.ERROR:
- main.log.error( "Error pulling git branch" )
+ # values of 1 or 3 are good
+ utilities.assert_lesser( expect=0, actual=gitPullResult,
+ onpass="Git pull successful",
+ onfail="Git pull failed" )
- main.step( "Using mvn clean & install" )
+ main.step( "Using mvn clean and install" )
cleanInstallResult = main.ONOSbench.cleanInstall()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=cleanInstallResult,
+ onpass="MCI successful",
+ onfail="MCI failed" )
else:
main.log.warn( "Did not pull new code so skipping mvn " +
"clean install" )
main.ONOSbench.getVersion( report=True )
+ # GRAPHS
+ # NOTE: important params here:
+ # job = name of Jenkins job
+ # Plot Name = Plot-HA, only can be used if multiple plots
+ # index = The number of the graph under plot name
+ job = "HAMinorityRestart"
+ graphs = '<ac:structured-macro ac:name="html">\n'
+ graphs += '<ac:plain-text-body><![CDATA[\n'
+ graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
+ '/plot/getPlot?index=0&width=500&height=300"' +\
+ 'noborder="0" width="500" height="300" scrolling="yes" ' +\
+ 'seamless="seamless"></iframe>\n'
+ graphs += ']]></ac:plain-text-body>\n'
+ graphs += '</ac:structured-macro>\n'
+ main.log.wiki(graphs)
main.step( "Creating ONOS package" )
packageResult = main.ONOSbench.onosPackage()
+ utilities.assert_equals( expect=main.TRUE, actual=packageResult,
+ onpass="ONOS package successful",
+ onfail="ONOS package failed" )
main.step( "Installing ONOS package" )
onosInstallResult = main.TRUE
@@ -127,6 +163,9 @@
tmpResult = main.ONOSbench.onosInstall( options="-f",
node=node.ip_address )
onosInstallResult = onosInstallResult and tmpResult
+ utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
+ onpass="ONOS install successful",
+ onfail="ONOS install failed" )
main.step( "Checking if ONOS is up yet" )
for i in range( 2 ):
@@ -140,6 +179,9 @@
onosIsupResult = onosIsupResult and started
if onosIsupResult == main.TRUE:
break
+ utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
+ onpass="ONOS startup successful",
+ onfail="ONOS startup failed" )
main.log.step( "Starting ONOS CLI sessions" )
cliResults = main.TRUE
@@ -154,6 +196,9 @@
for t in threads:
t.join()
cliResults = cliResults and t.result
+ utilities.assert_equals( expect=main.TRUE, actual=cliResults,
+ onpass="ONOS cli startup successful",
+ onfail="ONOS cli startup failed" )
main.step( "Start Packet Capture MN" )
main.Mininet2.startTcpdump(
@@ -162,6 +207,7 @@
intf=main.params[ 'MNtcpdump' ][ 'intf' ],
port=main.params[ 'MNtcpdump' ][ 'port' ] )
+ main.step( "App Ids check" )
appCheck = main.TRUE
threads = []
for i in range( numControllers ):
@@ -174,21 +220,16 @@
for t in threads:
t.join()
appCheck = appCheck and t.result
- utilities.assert_equals( expect=main.TRUE, actual=appCheck,
- onpass="App Ids seem to be correct",
- onfail="Something is wrong with app Ids" )
if appCheck != main.TRUE:
main.log.warn( CLIs[0].apps() )
main.log.warn( CLIs[0].appIDs() )
+ utilities.assert_equals( expect=main.TRUE, actual=appCheck,
+ onpass="App Ids seem to be correct",
+ onfail="Something is wrong with app Ids" )
case1Result = ( cleanInstallResult and packageResult and
cellResult and verifyResult and onosInstallResult
- and onosIsupResult and cliResults and appCheck)
-
- utilities.assert_equals( expect=main.TRUE, actual=case1Result,
- onpass="Test startup successful",
- onfail="Test startup NOT successful" )
-
+ and onosIsupResult and cliResults )
if case1Result == main.FALSE:
main.cleanup()
main.exit()
@@ -198,6 +239,7 @@
Assign mastership to controllers
"""
import re
+ import time
assert numControllers, "numControllers not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
@@ -251,9 +293,13 @@
actual=mastershipCheck,
onpass="Switch mastership assigned correctly",
onfail="Switches not assigned correctly to controllers" )
+
+ main.step( "Assign mastership of switches to specific controllers" )
# Manually assign mastership to the controller we want
roleCall = main.TRUE
- roleCheck = main.TRUE
+
+ ipList = [ ]
+ deviceList = []
try:
for i in range( 1, 29 ): # switches 1 through 28
# set up correct variables:
@@ -297,16 +343,8 @@
# TODO: make this controller dynamic
roleCall = roleCall and main.ONOScli1.deviceRole( deviceId,
ip )
- # Check assignment
- master = main.ONOScli1.getRole( deviceId ).get( 'master' )
- if ip in master:
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
- main.log.error( "Error, controller " + ip + " is not" +
- " master " + "of device " +
- str( deviceId ) + ". Master is " +
- repr( master ) + "." )
+ ipList.append( ip )
+ deviceList.append( deviceId )
except ( AttributeError, AssertionError ):
main.log.exception( "Something is wrong with ONOS device view" )
main.log.info( main.ONOScli1.devices() )
@@ -316,6 +354,24 @@
onpass="Re-assigned switch mastership to designated controller",
onfail="Something wrong with deviceRole calls" )
+ main.step( "Check mastership was correctly assigned" )
+ roleCheck = main.TRUE
+ # NOTE: This is due to the fact that device mastership change is not
+ # atomic and is actually a multi step process
+ time.sleep( 5 )
+ for i in range( len( ipList ) ):
+ ip = ipList[i]
+ deviceId = deviceList[i]
+ # Check assignment
+ master = main.ONOScli1.getRole( deviceId ).get( 'master' )
+ if ip in master:
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+ main.log.error( "Error, controller " + ip + " is not" +
+ " master " + "of device " +
+ str( deviceId ) + ". Master is " +
+ repr( master ) + "." )
utilities.assert_equals(
expect=main.TRUE,
actual=roleCheck,
@@ -342,14 +398,16 @@
main.log.report( "Adding host intents" )
main.case( "Adding host Intents" )
- main.step( "Discovering Hosts( Via pingall for now )" )
+ main.step( "Discovering Hosts( Via pingall for now )" )
# FIXME: Once we have a host discovery mechanism, use that instead
# install onos-app-fwd
- main.log.info( "Install reactive forwarding app" )
- appResults = CLIs[0].activateApp( "org.onosproject.fwd" )
+ main.step( "Install reactive forwarding app" )
+ installResults = CLIs[0].activateApp( "org.onosproject.fwd" )
+ utilities.assert_equals( expect=main.TRUE, actual=installResults,
+ onpass="Install fwd successful",
+ onfail="Install fwd failed" )
- # FIXME: add this to asserts
appCheck = main.TRUE
threads = []
for i in range( numControllers ):
@@ -362,12 +420,12 @@
for t in threads:
t.join()
appCheck = appCheck and t.result
- utilities.assert_equals( expect=main.TRUE, actual=appCheck,
- onpass="App Ids seem to be correct",
- onfail="Something is wrong with app Ids" )
if appCheck != main.TRUE:
main.log.warn( CLIs[0].apps() )
main.log.warn( CLIs[0].appIDs() )
+ utilities.assert_equals( expect=main.TRUE, actual=appCheck,
+ onpass="App Ids seem to be correct",
+ onfail="Something is wrong with app Ids" )
# REACTIVE FWD test
pingResult = main.FALSE
@@ -378,13 +436,20 @@
expect=main.TRUE,
actual=pingResult,
onpass="Reactive Pingall test passed",
- onfail="Reactive Pingall failed, one or more ping pairs failed" )
+ onfail="Reactive Pingall failed, " +
+ "one or more ping pairs failed" )
time2 = time.time()
- main.log.info( "Time for pingall: %2f seconds" % ( time2 - time1 ) )
-
+ main.log.info( "Time for pingall: %2f seconds" %
+ ( time2 - time1 ) )
+ # timeout for fwd flows
+ time.sleep( 11 )
# uninstall onos-app-fwd
- main.log.info( "Uninstall reactive forwarding app" )
- appResults = appResults and CLIs[0].deactivateApp( "org.onosproject.fwd" )
+ main.step( "Uninstall reactive forwarding app" )
+ uninstallResult = CLIs[0].deactivateApp( "org.onosproject.fwd" )
+ utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
+ onpass="Uninstall fwd successful",
+ onfail="Uninstall fwd failed" )
+ main.step( "Check app ids check" )
threads = []
for i in range( numControllers ):
t = main.Thread( target=CLIs[i].appToIDCheck,
@@ -396,15 +461,12 @@
for t in threads:
t.join()
appCheck = appCheck and t.result
- utilities.assert_equals( expect=main.TRUE, actual=appCheck,
- onpass="App Ids seem to be correct",
- onfail="Something is wrong with app Ids" )
if appCheck != main.TRUE:
main.log.warn( CLIs[0].apps() )
main.log.warn( CLIs[0].appIDs() )
-
- # timeout for fwd flows
- time.sleep( 11 )
+ utilities.assert_equals( expect=main.TRUE, actual=appCheck,
+ onpass="App Ids seem to be correct",
+ onfail="Something is wrong with app Ids" )
main.step( "Add host intents" )
intentIds = []
@@ -551,28 +613,36 @@
actual=intentAddResult,
onpass="Pushed host intents to ONOS",
onfail="Error in pushing host intents to ONOS" )
+ main.step( "Intent Anti-Entropy dispersion" )
for i in range(100):
- onosIds = main.ONOScli1.getAllIntentsId()
+ correct = True
main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
- main.log.info( "Intents in ONOS: " + str( sorted( onosIds ) ) )
- if sorted(onosIds) == sorted(intentIds):
+ for cli in CLIs:
+ onosIds = []
+ ids = cli.getAllIntentsId()
+ onosIds.append( ids )
+ main.log.debug( "Intents in " + cli.name + ": " +
+ str( sorted( onosIds ) ) )
+ if sorted( ids ) != sorted( intentIds ):
+ correct = False
+ if correct:
break
else:
time.sleep(1)
- # FIXME: DEBUG
if not intentStop:
intentStop = time.time()
+ global gossipTime
gossipTime = intentStop - intentStart
main.log.info( "It took about " + str( gossipTime ) +
- " seconds for all intents to appear on ONOS1" )
+ " seconds for all intents to appear in each node" )
# FIXME: make this time configurable/calculate based off of number of
# nodes and gossip rounds
utilities.assert_greater_equals(
- expect=30, actual=gossipTime,
+ expect=40, actual=gossipTime,
onpass="ECM anti-entropy for intents worked within " +
"expected time",
onfail="Intent ECM anti-entropy took too long" )
- if gossipTime <= 30:
+ if gossipTime <= 40:
intentAddResult = True
if not intentAddResult or "key" in pendingMap:
@@ -868,6 +938,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing pending map" )
main.log.error( repr( pendingMap ) )
+ main.log.debug( CLIs[0].flows( jsonFormat=False ) )
def CASE5( self, main ):
"""
@@ -1017,23 +1088,52 @@
onpass="Intents are consistent across all ONOS nodes",
onfail="ONOS nodes have different views of intents" )
+ if intentsResults:
+ # Try to make it easy to figure out what is happening
+ #
+ # Intent ONOS1 ONOS2 ...
+ # 0x01 INSTALLED INSTALLING
+ # ... ... ...
+ # ... ... ...
+ title = " Id"
+ for n in range( numControllers ):
+ title += " " * 10 + "ONOS" + str( n + 1 )
+ main.log.warn( title )
+ # get all intent keys in the cluster
+ keys = []
+ for nodeStr in ONOSIntents:
+ node = json.loads( nodeStr )
+ for intent in node:
+ keys.append( intent.get( 'id' ) )
+ keys = set( keys )
+ for key in keys:
+ row = "%-13s" % key
+ for nodeStr in ONOSIntents:
+ node = json.loads( nodeStr )
+ for intent in node:
+ if intent.get( 'id', "Error" ) == key:
+ row += "%-15s" % intent.get( 'state' )
+ main.log.warn( row )
+ # End table view
+
if intentsResults and not consistentIntents:
+ # print the json objects
n = len(ONOSIntents)
- main.log.warn( "ONOS" + str( n ) + " intents: " )
- main.log.warn( json.dumps( json.loads( ONOSIntents[ -1 ] ),
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
+ main.log.debug( "ONOS" + str( n ) + " intents: " )
+ main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
for i in range( numControllers ):
if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
- main.log.warn( "ONOS" + str( i + 1 ) + " intents: " )
- main.log.warn( json.dumps( json.loads( ONOSIntents[i] ),
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
+ main.log.debug( "ONOS" + str( i + 1 ) + " intents: " )
+ main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
else:
- main.log.warn( nodes[ i ].name + " intents match ONOS" +
- str( n ) + " intents" )
+ main.log.debug( nodes[ i ].name + " intents match ONOS" +
+ str( n ) + " intents" )
elif intentsResults and consistentIntents:
intentCheck = main.TRUE
intentState = ONOSIntents[ 0 ]
@@ -1244,6 +1344,7 @@
# Compare json objects for hosts and dataplane clusters
# hosts
+ main.step( "Host view is consistent across ONOS nodes" )
consistentHostsResult = main.TRUE
for controller in range( len( hosts ) ):
controllerStr = str( controller + 1 )
@@ -1270,6 +1371,7 @@
onpass="Hosts view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of hosts" )
+ main.step( "Each host has an IP address" )
ipResult = main.TRUE
for controller in range( 0, len( hosts ) ):
controllerStr = str( controller + 1 )
@@ -1285,6 +1387,7 @@
onfail="The ip of at least one host is missing" )
# Strongly connected clusters of devices
+ main.step( "Cluster view is consistent across ONOS nodes" )
consistentClustersResult = main.TRUE
for controller in range( len( clusters ) ):
controllerStr = str( controller + 1 )
@@ -1309,6 +1412,7 @@
onpass="Clusters view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of clusters" )
# there should always only be one cluster
+ main.step( "Cluster view correct across ONOS nodes" )
try:
numClusters = len( json.loads( clusters[ 0 ] ) )
except ( ValueError, TypeError ):
@@ -1379,14 +1483,6 @@
onpass="Topology Check Test successful",
onfail="Topology Check Test NOT successful" )
- finalAssert = main.TRUE
- finalAssert = ( finalAssert and topoResult and flowCheck
- and intentCheck and consistentMastership
- and mastershipCheck and rolesNotNull )
- utilities.assert_equals( expect=main.TRUE, actual=finalAssert,
- onpass="State check successful",
- onfail="State check NOT successful" )
-
def CASE6( self, main ):
"""
The Failure case.
@@ -1397,15 +1493,20 @@
assert utilities.assert_equals, "utilities.assert_equals not defined"
assert CLIs, "CLIs not defined"
assert nodes, "nodes not defined"
- main.log.report( "Killing 3 ONOS nodes" )
main.case( "Restart minority of ONOS nodes" )
- # TODO: Randomize these nodes
+ main.step( "Killing 3 ONOS nodes" )
+ # TODO: Randomize these nodes or base this on partitions
# TODO: use threads in this case
- main.ONOSbench.onosKill( nodes[0].ip_address )
+ killResults = main.ONOSbench.onosKill( nodes[0].ip_address )
time.sleep( 10 )
- main.ONOSbench.onosKill( nodes[1].ip_address )
+ killResults = killResults and\
+ main.ONOSbench.onosKill( nodes[1].ip_address )
time.sleep( 10 )
- main.ONOSbench.onosKill( nodes[2].ip_address )
+ killResults = killResults and\
+ main.ONOSbench.onosKill( nodes[2].ip_address )
+ utilities.assert_equals( expect=main.TRUE, actual=killResults,
+ onpass="ONOS Killed successfully",
+ onfail="ONOS kill NOT successful" )
main.step( "Checking if ONOS is up yet" )
count = 0
@@ -1417,19 +1518,22 @@
onosIsupResult = onos1Isup and onos2Isup and onos3Isup
count = count + 1
# TODO: if it becomes an issue, we can retry this step a few times
+ utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
+ onpass="ONOS restarted successfully",
+ onfail="ONOS restart NOT successful" )
+ main.step( "Restarting ONOS CLIs" )
cliResult1 = main.ONOScli1.startOnosCli( nodes[0].ip_address )
cliResult2 = main.ONOScli2.startOnosCli( nodes[1].ip_address )
cliResult3 = main.ONOScli3.startOnosCli( nodes[2].ip_address )
cliResults = cliResult1 and cliResult2 and cliResult3
+ utilities.assert_equals( expect=main.TRUE, actual=cliResults,
+ onpass="ONOS cli restarted",
+ onfail="ONOS cli did not restart" )
# Grab the time of restart so we chan check how long the gossip
# protocol has had time to work
main.restartTime = time.time()
- caseResults = main.TRUE and onosIsupResult and cliResults
- utilities.assert_equals( expect=main.TRUE, actual=caseResults,
- onpass="ONOS restart successful",
- onfail="ONOS restart NOT successful" )
def CASE7( self, main ):
"""
@@ -1463,6 +1567,7 @@
onpass="Each device has a master",
onfail="Some devices don't have a master assigned" )
+ main.step( "Read device roles from ONOS" )
ONOSMastership = []
mastershipCheck = main.FALSE
consistentMastership = True
@@ -1589,6 +1694,34 @@
"nodes" )
else:
consistentIntents = False
+
+ # Try to make it easy to figure out what is happening
+ #
+ # Intent ONOS1 ONOS2 ...
+ # 0x01 INSTALLED INSTALLING
+ # ... ... ...
+ # ... ... ...
+ title = " ID"
+ for n in range( numControllers ):
+ title += " " * 10 + "ONOS" + str( n + 1 )
+ main.log.warn( title )
+ # get all intent keys in the cluster
+ keys = []
+ for nodeStr in ONOSIntents:
+ node = json.loads( nodeStr )
+ for intent in node:
+ keys.append( intent.get( 'id' ) )
+ keys = set( keys )
+ for key in keys:
+ row = "%-13s" % key
+ for nodeStr in ONOSIntents:
+ node = json.loads( nodeStr )
+ for intent in node:
+ if intent.get( 'id' ) == key:
+ row += "%-15s" % intent.get( 'state' )
+ main.log.warn( row )
+ # End table view
+
utilities.assert_equals(
expect=True,
actual=consistentIntents,
@@ -1696,12 +1829,13 @@
onpass="No Loss of connectivity",
onfail="Loss of dataplane connectivity detected" )
+ main.step( "Leadership Election is still functional" )
# Test of LeadershipElection
leaderList = []
# FIXME: make sure this matches nodes that were restarted
restarted = [ nodes[0].ip_address, nodes[1].ip_address,
nodes[2].ip_address ]
-
+
leaderResult = main.TRUE
for cli in CLIs:
leaderN = cli.electionTestLeader()
@@ -1737,15 +1871,6 @@
onpass="Leadership election passed",
onfail="Something went wrong with Leadership election" )
- result = ( mastershipCheck and intentCheck and FlowTables and
- ( not LossInPings ) and rolesNotNull and leaderResult )
- result = int( result )
- if result == main.TRUE:
- main.log.report( "Constant State Tests Passed" )
- utilities.assert_equals( expect=main.TRUE, actual=result,
- onpass="Constant State Tests Passed",
- onfail="Constant state tests failed" )
-
def CASE8( self, main ):
"""
Compare topo
@@ -1773,7 +1898,6 @@
ctrls.append( temp )
MNTopo = TestONTopology( main.Mininet1, ctrls )
- main.step( "Comparing ONOS topology to MN" )
devicesResults = main.TRUE
portsResults = main.TRUE
linksResults = main.TRUE
@@ -1930,6 +2054,7 @@
# Compare json objects for hosts and dataplane clusters
# hosts
+ main.step( "Hosts view is consistent across all ONOS nodes" )
consistentHostsResult = main.TRUE
for controller in range( len( hosts ) ):
controllerStr = str( controller + 1 )
@@ -1956,6 +2081,7 @@
onfail="ONOS nodes have different views of hosts" )
# Strongly connected clusters of devices
+ main.step( "Clusters view is consistent across all ONOS nodes" )
consistentClustersResult = main.TRUE
for controller in range( len( clusters ) ):
controllerStr = str( controller + 1 )
@@ -1981,6 +2107,8 @@
onpass="Clusters view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of clusters" )
# there should always only be one cluster
+ main.step( "Topology view is correct and consistent across all " +
+ "ONOS nodes" )
try:
numClusters = len( json.loads( clusters[ 0 ] ) )
except ( ValueError, TypeError ):
@@ -2017,6 +2145,7 @@
# FIXME: move this to an ONOS state case
main.step( "Checking ONOS nodes" )
nodesOutput = []
+ nodeResults = main.TRUE
threads = []
for i in range( numControllers ):
t = main.Thread( target=CLIs[i].nodes,
@@ -2033,9 +2162,10 @@
try:
current = json.loads( i )
for node in current:
+ currentResult = main.FALSE
if node['ip'] in ips: # node in nodes() output is in cell
if node['state'] == 'ACTIVE':
- pass # as it should be
+ currentResult = main.TRUE
else:
main.log.error( "Error in ONOS node availability" )
main.log.error(
@@ -2044,9 +2174,13 @@
indent=4,
separators=( ',', ': ' ) ) )
break
+ nodeResults = nodeResults and currentResult
except ( ValueError, TypeError ):
main.log.error( "Error parsing nodes output" )
main.log.warn( repr( i ) )
+ utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
+ onpass="Nodes check successful",
+ onfail="Nodes check NOT successful" )
def CASE9( self, main ):
"""
@@ -2269,7 +2403,10 @@
time.sleep( 10 )
main.step( "Stopping Mininet" )
- main.Mininet1.stopNet()
+ mnResult = main.Mininet1.stopNet()
+ utilities.assert_equals( expect=main.TRUE, actual=mnResult,
+ onpass="Mininet stopped",
+ onfail="MN cleanup NOT successful" )
main.step( "Checking ONOS Logs for errors" )
for node in nodes:
@@ -2280,16 +2417,10 @@
main.step( "Packing and rotating pcap archives" )
os.system( "~/TestON/dependencies/rotate.sh " + str( testname ) )
- # TODO: actually check something here
- utilities.assert_equals( expect=main.TRUE, actual=main.TRUE,
- onpass="Test cleanup successful",
- onfail="Test cleanup NOT successful" )
-
def CASE14( self, main ):
"""
start election app on all onos nodes
"""
- import time
assert numControllers, "numControllers not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
@@ -2297,10 +2428,13 @@
assert nodes, "nodes not defined"
leaderResult = main.TRUE
- main.log.info( "Install leadership election app" )
+ main.case("Start Leadership Election app")
+ main.step( "Install leadership election app" )
main.ONOScli1.activateApp( "org.onosproject.election" )
leaders = []
for cli in CLIs:
+ cli.electionTestRun()
+ for cli in CLIs:
leader = cli.electionTestLeader()
if leader is None or leader == main.FALSE:
main.log.report( cli.name + ": Leader for the election app " +
@@ -2326,6 +2460,7 @@
"""
Check that Leadership Election is still functional
"""
+ import time
assert numControllers, "numControllers not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
@@ -2380,6 +2515,7 @@
leaderResult = main.FALSE
elif leaderN is None:
# node may not have recieved the event yet
+ time.sleep(7)
leaderN = cli.electionTestLeader()
leaderList.pop()
leaderList.append( leaderN )
@@ -2418,18 +2554,1090 @@
actual=runResult,
onpass="App re-ran for election",
onfail="App failed to run for election" )
- if consistentLeader == main.TRUE:
- afterRun = main.ONOScli1.electionTestLeader()
- # verify leader didn't just change
- if afterRun == leaderList[ 0 ]:
- leaderResult = main.TRUE
- else:
- leaderResult = main.FALSE
- # TODO: assert on run and withdraw results?
+
+ afterRun = main.ONOScli1.electionTestLeader()
+ # verify leader didn't just change
+ if afterRun == leaderList[ 0 ]:
+ afterResult = main.TRUE
+ else:
+ afterResult = main.FALSE
utilities.assert_equals(
expect=main.TRUE,
- actual=leaderResult,
- onpass="Leadership election passed",
+ actual=afterResult,
+ onpass="Old leader successfully re-ran for election",
onfail="Something went wrong with Leadership election after " +
"the old leader re-ran for election" )
+
+ case15Result = withdrawResult and leaderResult and runResult and\
+ afterResult
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=case15Result,
+ onpass="Leadership election is still functional",
+ onfail="Leadership Election is no longer functional" )
+
+ def CASE16( self, main ):
+ """
+ Install Distributed Primitives app
+ """
+ assert numControllers, "numControllers not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert CLIs, "CLIs not defined"
+ assert nodes, "nodes not defined"
+
+ # Variables for the distributed primitives tests
+ global pCounterName
+ global iCounterName
+ global pCounterValue
+ global iCounterValue
+ global onosSet
+ global onosSetName
+ pCounterName = "TestON-Partitions"
+ iCounterName = "TestON-inMemory"
+ pCounterValue = 0
+ iCounterValue = 0
+ onosSet = set([])
+ onosSetName = "TestON-set"
+
+ description = "Install Primitives app"
+ main.case( description )
+ main.step( "Install Primitives app" )
+ appName = "org.onosproject.distributedprimitives"
+ appResults = CLIs[0].activateApp( appName )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=appResults,
+ onpass="Primitives app activated",
+ onfail="Primitives app not activated" )
+
+ def CASE17( self, main ):
+ """
+ Check for basic functionality with distributed primitives
+ """
+ # Make sure variables are defined/set
+ assert numControllers, "numControllers not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert CLIs, "CLIs not defined"
+ assert nodes, "nodes not defined"
+ assert pCounterName, "pCounterName not defined"
+ assert iCounterName, "iCounterName not defined"
+ assert onosSetName, "onosSetName not defined"
+ # NOTE: assert fails if value is 0/None/Empty/False
+ try:
+ pCounterValue
+ except NameError:
+ main.log.error( "pCounterValue not defined, setting to 0" )
+ pCounterValue = 0
+ try:
+ iCounterValue
+ except NameError:
+ main.log.error( "iCounterValue not defined, setting to 0" )
+ iCounterValue = 0
+ try:
+ onosSet
+ except NameError:
+ main.log.error( "onosSet not defined, setting to empty Set" )
+ onosSet = set([])
+ # Variables for the distributed primitives tests. These are local only
+ addValue = "a"
+ addAllValue = "a b c d e f"
+ retainValue = "c d e f"
+
+ description = "Check for basic functionality with distributed " +\
+ "primitives"
+ main.case( description )
+ main.caseExplaination = "Test the methods of the distributed primitives (counters and sets) throught the cli"
+ # DISTRIBUTED ATOMIC COUNTERS
+ main.step( "Increment and get a default counter on each node" )
+ pCounters = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].counterTestIncrement,
+ name="counterIncrement-" + str( i ),
+ args=[ pCounterName ] )
+ pCounterValue += 1
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ pCounters.append( t.result )
+ # Check that counter incremented numController times
+ pCounterResults = True
+ for i in range( numControllers ):
+ pCounterResults and ( i + 1 ) in pCounters
+ utilities.assert_equals( expect=True,
+ actual=pCounterResults,
+ onpass="Default counter incremented",
+ onfail="Error incrementing default" +
+ " counter" )
+
+ main.step( "Increment and get an in memory counter on each node" )
+ iCounters = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].counterTestIncrement,
+ name="icounterIncrement-" + str( i ),
+ args=[ iCounterName ],
+ kwargs={ "inMemory": True } )
+ iCounterValue += 1
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ iCounters.append( t.result )
+ # Check that counter incremented numController times
+ iCounterResults = True
+ for i in range( numControllers ):
+ iCounterResults and ( i + 1 ) in iCounters
+ utilities.assert_equals( expect=True,
+ actual=iCounterResults,
+ onpass="In memory counter incremented",
+ onfail="Error incrementing in memory" +
+ " counter" )
+
+ main.step( "Check counters are consistant across nodes" )
+ onosCounters = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].counters,
+ name="counters-" + str( i ) )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ onosCounters.append( t.result )
+ tmp = [ i == onosCounters[ 0 ] for i in onosCounters ]
+ if all( tmp ):
+ main.log.info( "Counters are consistent across all nodes" )
+ consistentCounterResults = main.TRUE
+ else:
+ main.log.error( "Counters are not consistent across all nodes" )
+ consistentCounterResults = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=consistentCounterResults,
+ onpass="ONOS counters are consistent " +
+ "across nodes",
+ onfail="ONOS Counters are inconsistent " +
+ "across nodes" )
+
+ main.step( "Counters we added have the correct values" )
+ correctResults = main.TRUE
+ for i in range( numControllers ):
+ current = onosCounters[i]
+ try:
+ pValue = current.get( pCounterName )
+ iValue = current.get( iCounterName )
+ if pValue == pCounterValue:
+ main.log.info( "Partitioned counter value is correct" )
+ else:
+ main.log.error( "Partitioned counter value is incorrect," +
+ " expected value: " + str( pCounterValue )
+ + " current value: " + str( pValue ) )
+ correctResults = main.FALSE
+ if iValue == iCounterValue:
+ main.log.info( "In memory counter value is correct" )
+ else:
+ main.log.error( "In memory counter value is incorrect, " +
+ "expected value: " + str( iCounterValue ) +
+ " current value: " + str( iValue ) )
+ correctResults = main.FALSE
+ except AttributeError, e:
+ main.log.error( "ONOS" + str( i + 1 ) + " counters result " +
+ "is not as expected" )
+ correctResults = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=correctResults,
+ onpass="Added counters are correct",
+ onfail="Added counters are incorrect" )
+ # DISTRIBUTED SETS
+ main.step( "Distributed Set get" )
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=getResults,
+ onpass="Set elements are correct",
+ onfail="Set elements are incorrect" )
+
+ main.step( "Distributed Set size" )
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=sizeResults,
+ onpass="Set sizes are correct",
+ onfail="Set sizes are incorrect" )
+
+ main.step( "Distributed Set add()" )
+ onosSet.add( addValue )
+ addResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestAdd,
+ name="setTestAdd-" + str( i ),
+ args=[ onosSetName, addValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ addResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ addResults = main.TRUE
+ for i in range( numControllers ):
+ if addResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif addResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif addResponses[ i ] == main.ERROR:
+ # Error in execution
+ addResults = main.FALSE
+ else:
+ # unexpected result
+ addResults = main.FALSE
+ if addResults != main.TRUE:
+ main.log.error( "Error executing set add" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ addResults = addResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=addResults,
+ onpass="Set add correct",
+ onfail="Set add was incorrect" )
+
+ main.step( "Distributed Set addAll()" )
+ onosSet.update( addAllValue.split() )
+ addResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestAdd,
+ name="setTestAddAll-" + str( i ),
+ args=[ onosSetName, addAllValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ addResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ addAllResults = main.TRUE
+ for i in range( numControllers ):
+ if addResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif addResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif addResponses[ i ] == main.ERROR:
+ # Error in execution
+ addAllResults = main.FALSE
+ else:
+ # unexpected result
+ addAllResults = main.FALSE
+ if addAllResults != main.TRUE:
+ main.log.error( "Error executing set addAll" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ addAllResults = addAllResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=addAllResults,
+ onpass="Set addAll correct",
+ onfail="Set addAll was incorrect" )
+
+ main.step( "Distributed Set contains()" )
+ containsResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setContains-" + str( i ),
+ args=[ onosSetName ],
+ kwargs={ "values": addValue } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ # NOTE: This is the tuple
+ containsResponses.append( t.result )
+
+ containsResults = main.TRUE
+ for i in range( numControllers ):
+ if containsResponses[ i ] == main.ERROR:
+ containsResults = main.FALSE
+ else:
+ containsResults = containsResults and\
+ containsResponses[ i ][ 1 ]
+ utilities.assert_equals( expect=main.TRUE,
+ actual=containsResults,
+ onpass="Set contains is functional",
+ onfail="Set contains failed" )
+
+ main.step( "Distributed Set containsAll()" )
+ containsAllResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setContainsAll-" + str( i ),
+ args=[ onosSetName ],
+ kwargs={ "values": addAllValue } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ # NOTE: This is the tuple
+ containsAllResponses.append( t.result )
+
+ containsAllResults = main.TRUE
+ for i in range( numControllers ):
+ if containsResponses[ i ] == main.ERROR:
+ containsResults = main.FALSE
+ else:
+ containsResults = containsResults and\
+ containsResponses[ i ][ 1 ]
+ utilities.assert_equals( expect=main.TRUE,
+ actual=containsAllResults,
+ onpass="Set containsAll is functional",
+ onfail="Set containsAll failed" )
+
+ main.step( "Distributed Set remove()" )
+ onosSet.remove( addValue )
+ removeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestRemove,
+ name="setTestRemove-" + str( i ),
+ args=[ onosSetName, addValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ removeResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ removeResults = main.TRUE
+ for i in range( numControllers ):
+ if removeResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif removeResponses[ i ] == main.FALSE:
+ # not in set, probably fine
+ pass
+ elif removeResponses[ i ] == main.ERROR:
+ # Error in execution
+ removeResults = main.FALSE
+ else:
+ # unexpected result
+ removeResults = main.FALSE
+ if removeResults != main.TRUE:
+ main.log.error( "Error executing set remove" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ removeResults = removeResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=removeResults,
+ onpass="Set remove correct",
+ onfail="Set remove was incorrect" )
+
+ main.step( "Distributed Set removeAll()" )
+ onosSet.difference_update( addAllValue.split() )
+ removeAllResponses = []
+ threads = []
+ try:
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestRemove,
+ name="setTestRemoveAll-" + str( i ),
+ args=[ onosSetName, addAllValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ removeAllResponses.append( t.result )
+ except Exception, e:
+ main.log.exception(e)
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ removeAllResults = main.TRUE
+ for i in range( numControllers ):
+ if removeAllResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif removeAllResponses[ i ] == main.FALSE:
+ # not in set, probably fine
+ pass
+ elif removeAllResponses[ i ] == main.ERROR:
+ # Error in execution
+ removeAllResults = main.FALSE
+ else:
+ # unexpected result
+ removeAllResults = main.FALSE
+ if removeAllResults != main.TRUE:
+ main.log.error( "Error executing set removeAll" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ removeAllResults = removeAllResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=removeAllResults,
+ onpass="Set removeAll correct",
+ onfail="Set removeAll was incorrect" )
+
+ main.step( "Distributed Set addAll()" )
+ onosSet.update( addAllValue.split() )
+ addResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestAdd,
+ name="setTestAddAll-" + str( i ),
+ args=[ onosSetName, addAllValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ addResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ addAllResults = main.TRUE
+ for i in range( numControllers ):
+ if addResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif addResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif addResponses[ i ] == main.ERROR:
+ # Error in execution
+ addAllResults = main.FALSE
+ else:
+ # unexpected result
+ addAllResults = main.FALSE
+ if addAllResults != main.TRUE:
+ main.log.error( "Error executing set addAll" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ addAllResults = addAllResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=addAllResults,
+ onpass="Set addAll correct",
+ onfail="Set addAll was incorrect" )
+
+ main.step( "Distributed Set clear()" )
+ onosSet.clear()
+ clearResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestRemove,
+ name="setTestClear-" + str( i ),
+ args=[ onosSetName, " "], # Values doesn't matter
+ kwargs={ "clear": True } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ clearResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ clearResults = main.TRUE
+ for i in range( numControllers ):
+ if clearResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif clearResponses[ i ] == main.FALSE:
+ # Nothing set, probably fine
+ pass
+ elif clearResponses[ i ] == main.ERROR:
+ # Error in execution
+ clearResults = main.FALSE
+ else:
+ # unexpected result
+ clearResults = main.FALSE
+ if clearResults != main.TRUE:
+ main.log.error( "Error executing set clear" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ clearResults = clearResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=clearResults,
+ onpass="Set clear correct",
+ onfail="Set clear was incorrect" )
+
+ main.step( "Distributed Set addAll()" )
+ onosSet.update( addAllValue.split() )
+ addResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestAdd,
+ name="setTestAddAll-" + str( i ),
+ args=[ onosSetName, addAllValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ addResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ addAllResults = main.TRUE
+ for i in range( numControllers ):
+ if addResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif addResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif addResponses[ i ] == main.ERROR:
+ # Error in execution
+ addAllResults = main.FALSE
+ else:
+ # unexpected result
+ addAllResults = main.FALSE
+ if addAllResults != main.TRUE:
+ main.log.error( "Error executing set addAll" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ addAllResults = addAllResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=addAllResults,
+ onpass="Set addAll correct",
+ onfail="Set addAll was incorrect" )
+
+ main.step( "Distributed Set retain()" )
+ onosSet.intersection_update( retainValue.split() )
+ retainResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestRemove,
+ name="setTestRetain-" + str( i ),
+ args=[ onosSetName, retainValue ],
+ kwargs={ "retain": True } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ retainResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ retainResults = main.TRUE
+ for i in range( numControllers ):
+ if retainResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif retainResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif retainResponses[ i ] == main.ERROR:
+ # Error in execution
+ retainResults = main.FALSE
+ else:
+ # unexpected result
+ retainResults = main.FALSE
+ if retainResults != main.TRUE:
+ main.log.error( "Error executing set retain" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " +
+ str( size ) + " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ retainResults = retainResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=retainResults,
+ onpass="Set retain correct",
+ onfail="Set retain was incorrect" )
+
diff --git a/TestON/tests/HATestSanity/HATestSanity.params b/TestON/tests/HATestSanity/HATestSanity.params
index ee4374d..dace37a 100644
--- a/TestON/tests/HATestSanity/HATestSanity.params
+++ b/TestON/tests/HATestSanity/HATestSanity.params
@@ -1,5 +1,25 @@
<PARAMS>
- <testcases>1,2,8,3,4,5,14,[6],8,7,4,15,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+ #List of test cases:
+ #CASE1: Compile ONOS and push it to the test machines
+ #CASE2: Assign mastership to controllers
+ #CASE3: Assign intents
+ #CASE4: Ping across added host intents
+ #CASE5: Reading state of ONOS
+ #CASE6: The Failure case. Since this is the Sanity test, we do nothing.
+ #CASE7: Check state after control plane failure
+ #CASE8: Compare topo
+ #CASE9: Link s3-s28 down
+ #CASE10: Link s3-s28 up
+ #CASE11: Switch down
+ #CASE12: Switch up
+ #CASE13: Clean up
+ #CASE14: start election app on all onos nodes
+ #CASE15: Check that Leadership Election is still functional
+ #CASE16: Install Distributed Primitives app
+ #CASE17: Check for basic functionality with distributed primitives
+ #1,2,8,3,4,5,14,[6],8,7,4,15,9,8,4,10,8,4,11,8,4,12,8,4,13
+ #1,2,8,3,4,5,14,16,17,[6],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13
+ <testcases>1,2,8,3,4,5,14,16,17,[6],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
<ENV>
<cellName>HA</cellName>
</ENV>
diff --git a/TestON/tests/HATestSanity/HATestSanity.py b/TestON/tests/HATestSanity/HATestSanity.py
index 20f6cc8..8bc21b0 100644
--- a/TestON/tests/HATestSanity/HATestSanity.py
+++ b/TestON/tests/HATestSanity/HATestSanity.py
@@ -19,6 +19,8 @@
CASE13: Clean up
CASE14: start election app on all onos nodes
CASE15: Check that Leadership Election is still functional
+CASE16: Install Distributed Primitives app
+CASE17: Check for basic functionality with distributed primitives
"""
@@ -97,29 +99,65 @@
for node in nodes:
main.ONOSbench.onosUninstall( node.ip_address )
+ # Make sure ONOS is DEAD
+ main.log.report( "Killing any ONOS processes" )
+ killResults = main.TRUE
+ for node in nodes:
+ killed = main.ONOSbench.onosKill( node.ip_address )
+ killResults = killResults and killed
+
cleanInstallResult = main.TRUE
gitPullResult = main.TRUE
main.step( "Starting Mininet" )
- main.Mininet1.startNet( )
+ mnResult = main.Mininet1.startNet( )
+ utilities.assert_equals( expect=main.TRUE, actual=mnResult,
+ onpass="Mininet Started",
+ onfail="Error starting Mininet" )
main.step( "Compiling the latest version of ONOS" )
if PULLCODE:
main.step( "Git checkout and pull " + gitBranch )
main.ONOSbench.gitCheckout( gitBranch )
gitPullResult = main.ONOSbench.gitPull()
- if gitPullResult == main.ERROR:
- main.log.error( "Error pulling git branch" )
+ # values of 1 or 3 are good
+ utilities.assert_lesser( expect=0, actual=gitPullResult,
+ onpass="Git pull successful",
+ onfail="Git pull failed" )
- main.step( "Using mvn clean & install" )
+ main.step( "Using mvn clean and install" )
cleanInstallResult = main.ONOSbench.cleanInstall()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=cleanInstallResult,
+ onpass="MCI successful",
+ onfail="MCI failed" )
else:
main.log.warn( "Did not pull new code so skipping mvn " +
"clean install" )
main.ONOSbench.getVersion( report=True )
+ # GRAPHS
+ # NOTE: important params here:
+ # job = name of Jenkins job
+ # Plot Name = Plot-HA, only can be used if multiple plots
+ # index = The number of the graph under plot name
+ job = "HASanity"
+ plotName = "Plot-HA"
+ graphs = '<ac:structured-macro ac:name="html">\n'
+ graphs += '<ac:plain-text-body><![CDATA[\n'
+ graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
+ '/plot/' + plotName + '/getPlot?index=0' +\
+ '&width=500&height=300"' +\
+ 'noborder="0" width="500" height="300" scrolling="yes" ' +\
+ 'seamless="seamless"></iframe>\n'
+ graphs += ']]></ac:plain-text-body>\n'
+ graphs += '</ac:structured-macro>\n'
+ main.log.wiki(graphs)
main.step( "Creating ONOS package" )
packageResult = main.ONOSbench.onosPackage()
+ utilities.assert_equals( expect=main.TRUE, actual=packageResult,
+ onpass="ONOS package successful",
+ onfail="ONOS package failed" )
main.step( "Installing ONOS package" )
onosInstallResult = main.TRUE
@@ -127,6 +165,9 @@
tmpResult = main.ONOSbench.onosInstall( options="-f",
node=node.ip_address )
onosInstallResult = onosInstallResult and tmpResult
+ utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
+ onpass="ONOS install successful",
+ onfail="ONOS install failed" )
main.step( "Checking if ONOS is up yet" )
for i in range( 2 ):
@@ -140,6 +181,9 @@
onosIsupResult = onosIsupResult and started
if onosIsupResult == main.TRUE:
break
+ utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
+ onpass="ONOS startup successful",
+ onfail="ONOS startup failed" )
main.log.step( "Starting ONOS CLI sessions" )
cliResults = main.TRUE
@@ -154,6 +198,9 @@
for t in threads:
t.join()
cliResults = cliResults and t.result
+ utilities.assert_equals( expect=main.TRUE, actual=cliResults,
+ onpass="ONOS cli startup successful",
+ onfail="ONOS cli startup failed" )
main.step( "Start Packet Capture MN" )
main.Mininet2.startTcpdump(
@@ -162,6 +209,7 @@
intf=main.params[ 'MNtcpdump' ][ 'intf' ],
port=main.params[ 'MNtcpdump' ][ 'port' ] )
+ main.step( "App Ids check" )
appCheck = main.TRUE
threads = []
for i in range( numControllers ):
@@ -174,21 +222,16 @@
for t in threads:
t.join()
appCheck = appCheck and t.result
- utilities.assert_equals( expect=main.TRUE, actual=appCheck,
- onpass="App Ids seem to be correct",
- onfail="Something is wrong with app Ids" )
if appCheck != main.TRUE:
main.log.warn( CLIs[0].apps() )
main.log.warn( CLIs[0].appIDs() )
+ utilities.assert_equals( expect=main.TRUE, actual=appCheck,
+ onpass="App Ids seem to be correct",
+ onfail="Something is wrong with app Ids" )
case1Result = ( cleanInstallResult and packageResult and
cellResult and verifyResult and onosInstallResult
- and onosIsupResult and cliResults and appCheck)
-
- utilities.assert_equals( expect=main.TRUE, actual=case1Result,
- onpass="Test startup successful",
- onfail="Test startup NOT successful" )
-
+ and onosIsupResult and cliResults )
if case1Result == main.FALSE:
main.cleanup()
main.exit()
@@ -198,6 +241,7 @@
Assign mastership to controllers
"""
import re
+ import time
assert numControllers, "numControllers not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
@@ -251,9 +295,13 @@
actual=mastershipCheck,
onpass="Switch mastership assigned correctly",
onfail="Switches not assigned correctly to controllers" )
+
+ main.step( "Assign mastership of switches to specific controllers" )
# Manually assign mastership to the controller we want
roleCall = main.TRUE
- roleCheck = main.TRUE
+
+ ipList = [ ]
+ deviceList = []
try:
for i in range( 1, 29 ): # switches 1 through 28
# set up correct variables:
@@ -297,16 +345,8 @@
# TODO: make this controller dynamic
roleCall = roleCall and main.ONOScli1.deviceRole( deviceId,
ip )
- # Check assignment
- master = main.ONOScli1.getRole( deviceId ).get( 'master' )
- if ip in master:
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
- main.log.error( "Error, controller " + ip + " is not" +
- " master " + "of device " +
- str( deviceId ) + ". Master is " +
- repr( master ) + "." )
+ ipList.append( ip )
+ deviceList.append( deviceId )
except ( AttributeError, AssertionError ):
main.log.exception( "Something is wrong with ONOS device view" )
main.log.info( main.ONOScli1.devices() )
@@ -316,6 +356,24 @@
onpass="Re-assigned switch mastership to designated controller",
onfail="Something wrong with deviceRole calls" )
+ main.step( "Check mastership was correctly assigned" )
+ roleCheck = main.TRUE
+ # NOTE: This is due to the fact that device mastership change is not
+ # atomic and is actually a multi step process
+ time.sleep( 5 )
+ for i in range( len( ipList ) ):
+ ip = ipList[i]
+ deviceId = deviceList[i]
+ # Check assignment
+ master = main.ONOScli1.getRole( deviceId ).get( 'master' )
+ if ip in master:
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+ main.log.error( "Error, controller " + ip + " is not" +
+ " master " + "of device " +
+ str( deviceId ) + ". Master is " +
+ repr( master ) + "." )
utilities.assert_equals(
expect=main.TRUE,
actual=roleCheck,
@@ -342,14 +400,16 @@
main.log.report( "Adding host intents" )
main.case( "Adding host Intents" )
- main.step( "Discovering Hosts( Via pingall for now )" )
+ main.step( "Discovering Hosts( Via pingall for now )" )
# FIXME: Once we have a host discovery mechanism, use that instead
# install onos-app-fwd
- main.log.info( "Install reactive forwarding app" )
- appResults = CLIs[0].activateApp( "org.onosproject.fwd" )
+ main.step( "Install reactive forwarding app" )
+ installResults = CLIs[0].activateApp( "org.onosproject.fwd" )
+ utilities.assert_equals( expect=main.TRUE, actual=installResults,
+ onpass="Install fwd successful",
+ onfail="Install fwd failed" )
- # FIXME: add this to asserts
appCheck = main.TRUE
threads = []
for i in range( numControllers ):
@@ -362,12 +422,12 @@
for t in threads:
t.join()
appCheck = appCheck and t.result
- utilities.assert_equals( expect=main.TRUE, actual=appCheck,
- onpass="App Ids seem to be correct",
- onfail="Something is wrong with app Ids" )
if appCheck != main.TRUE:
main.log.warn( CLIs[0].apps() )
main.log.warn( CLIs[0].appIDs() )
+ utilities.assert_equals( expect=main.TRUE, actual=appCheck,
+ onpass="App Ids seem to be correct",
+ onfail="Something is wrong with app Ids" )
# REACTIVE FWD test
pingResult = main.FALSE
@@ -378,13 +438,20 @@
expect=main.TRUE,
actual=pingResult,
onpass="Reactive Pingall test passed",
- onfail="Reactive Pingall failed, one or more ping pairs failed" )
+ onfail="Reactive Pingall failed, " +
+ "one or more ping pairs failed" )
time2 = time.time()
- main.log.info( "Time for pingall: %2f seconds" % ( time2 - time1 ) )
-
+ main.log.info( "Time for pingall: %2f seconds" %
+ ( time2 - time1 ) )
+ # timeout for fwd flows
+ time.sleep( 11 )
# uninstall onos-app-fwd
- main.log.info( "Uninstall reactive forwarding app" )
- appResults = appResults and CLIs[0].deactivateApp( "org.onosproject.fwd" )
+ main.step( "Uninstall reactive forwarding app" )
+ uninstallResult = CLIs[0].deactivateApp( "org.onosproject.fwd" )
+ utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
+ onpass="Uninstall fwd successful",
+ onfail="Uninstall fwd failed" )
+ main.step( "Check app ids check" )
threads = []
for i in range( numControllers ):
t = main.Thread( target=CLIs[i].appToIDCheck,
@@ -396,15 +463,12 @@
for t in threads:
t.join()
appCheck = appCheck and t.result
- utilities.assert_equals( expect=main.TRUE, actual=appCheck,
- onpass="App Ids seem to be correct",
- onfail="Something is wrong with app Ids" )
if appCheck != main.TRUE:
main.log.warn( CLIs[0].apps() )
main.log.warn( CLIs[0].appIDs() )
-
- # timeout for fwd flows
- time.sleep( 11 )
+ utilities.assert_equals( expect=main.TRUE, actual=appCheck,
+ onpass="App Ids seem to be correct",
+ onfail="Something is wrong with app Ids" )
main.step( "Add host intents" )
intentIds = []
@@ -551,28 +615,36 @@
actual=intentAddResult,
onpass="Pushed host intents to ONOS",
onfail="Error in pushing host intents to ONOS" )
+ main.step( "Intent Anti-Entropy dispersion" )
for i in range(100):
- onosIds = main.ONOScli1.getAllIntentsId()
+ correct = True
main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
- main.log.info( "Intents in ONOS: " + str( sorted( onosIds ) ) )
- if sorted(onosIds) == sorted(intentIds):
+ for cli in CLIs:
+ onosIds = []
+ ids = cli.getAllIntentsId()
+ onosIds.append( ids )
+ main.log.debug( "Intents in " + cli.name + ": " +
+ str( sorted( onosIds ) ) )
+ if sorted( ids ) != sorted( intentIds ):
+ correct = False
+ if correct:
break
else:
time.sleep(1)
- # FIXME: DEBUG
if not intentStop:
intentStop = time.time()
+ global gossipTime
gossipTime = intentStop - intentStart
main.log.info( "It took about " + str( gossipTime ) +
- " seconds for all intents to appear on ONOS1" )
+ " seconds for all intents to appear in each node" )
# FIXME: make this time configurable/calculate based off of number of
# nodes and gossip rounds
utilities.assert_greater_equals(
- expect=30, actual=gossipTime,
+ expect=40, actual=gossipTime,
onpass="ECM anti-entropy for intents worked within " +
"expected time",
onfail="Intent ECM anti-entropy took too long" )
- if gossipTime <= 30:
+ if gossipTime <= 40:
intentAddResult = True
if not intentAddResult or "key" in pendingMap:
@@ -868,6 +940,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing pending map" )
main.log.error( repr( pendingMap ) )
+ main.log.debug( CLIs[0].flows( jsonFormat=False ) )
def CASE5( self, main ):
"""
@@ -1017,23 +1090,52 @@
onpass="Intents are consistent across all ONOS nodes",
onfail="ONOS nodes have different views of intents" )
+ if intentsResults:
+ # Try to make it easy to figure out what is happening
+ #
+ # Intent ONOS1 ONOS2 ...
+ # 0x01 INSTALLED INSTALLING
+ # ... ... ...
+ # ... ... ...
+ title = " Id"
+ for n in range( numControllers ):
+ title += " " * 10 + "ONOS" + str( n + 1 )
+ main.log.warn( title )
+ # get all intent keys in the cluster
+ keys = []
+ for nodeStr in ONOSIntents:
+ node = json.loads( nodeStr )
+ for intent in node:
+ keys.append( intent.get( 'id' ) )
+ keys = set( keys )
+ for key in keys:
+ row = "%-13s" % key
+ for nodeStr in ONOSIntents:
+ node = json.loads( nodeStr )
+ for intent in node:
+ if intent.get( 'id', "Error" ) == key:
+ row += "%-15s" % intent.get( 'state' )
+ main.log.warn( row )
+ # End table view
+
if intentsResults and not consistentIntents:
+ # print the json objects
n = len(ONOSIntents)
- main.log.warn( "ONOS" + str( n ) + " intents: " )
- main.log.warn( json.dumps( json.loads( ONOSIntents[ -1 ] ),
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
+ main.log.debug( "ONOS" + str( n ) + " intents: " )
+ main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
for i in range( numControllers ):
if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
- main.log.warn( "ONOS" + str( i + 1 ) + " intents: " )
- main.log.warn( json.dumps( json.loads( ONOSIntents[i] ),
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
+ main.log.debug( "ONOS" + str( i + 1 ) + " intents: " )
+ main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
else:
- main.log.warn( nodes[ i ].name + " intents match ONOS" +
- str( n ) + " intents" )
+ main.log.debug( nodes[ i ].name + " intents match ONOS" +
+ str( n ) + " intents" )
elif intentsResults and consistentIntents:
intentCheck = main.TRUE
intentState = ONOSIntents[ 0 ]
@@ -1244,6 +1346,7 @@
# Compare json objects for hosts and dataplane clusters
# hosts
+ main.step( "Host view is consistent across ONOS nodes" )
consistentHostsResult = main.TRUE
for controller in range( len( hosts ) ):
controllerStr = str( controller + 1 )
@@ -1270,6 +1373,7 @@
onpass="Hosts view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of hosts" )
+ main.step( "Each host has an IP address" )
ipResult = main.TRUE
for controller in range( 0, len( hosts ) ):
controllerStr = str( controller + 1 )
@@ -1285,6 +1389,7 @@
onfail="The ip of at least one host is missing" )
# Strongly connected clusters of devices
+ main.step( "Cluster view is consistent across ONOS nodes" )
consistentClustersResult = main.TRUE
for controller in range( len( clusters ) ):
controllerStr = str( controller + 1 )
@@ -1309,6 +1414,7 @@
onpass="Clusters view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of clusters" )
# there should always only be one cluster
+ main.step( "Cluster view correct across ONOS nodes" )
try:
numClusters = len( json.loads( clusters[ 0 ] ) )
except ( ValueError, TypeError ):
@@ -1379,14 +1485,6 @@
onpass="Topology Check Test successful",
onfail="Topology Check Test NOT successful" )
- finalAssert = main.TRUE
- finalAssert = ( finalAssert and topoResult and flowCheck
- and intentCheck and consistentMastership
- and mastershipCheck and rolesNotNull )
- utilities.assert_equals( expect=main.TRUE, actual=finalAssert,
- onpass="State check successful",
- onfail="State check NOT successful" )
-
def CASE6( self, main ):
"""
The Failure case. Since this is the Sanity test, we do nothing.
@@ -1397,7 +1495,7 @@
assert utilities.assert_equals, "utilities.assert_equals not defined"
assert CLIs, "CLIs not defined"
assert nodes, "nodes not defined"
- main.log.report( "Wait 60 seconds instead of inducing a failure" )
+ main.case( "Wait 60 seconds instead of inducing a failure" )
time.sleep( 60 )
utilities.assert_equals(
expect=main.TRUE,
@@ -1437,6 +1535,7 @@
onpass="Each device has a master",
onfail="Some devices don't have a master assigned" )
+ main.step( "Read device roles from ONOS" )
ONOSMastership = []
mastershipCheck = main.FALSE
consistentMastership = True
@@ -1562,6 +1661,34 @@
"nodes" )
else:
consistentIntents = False
+
+ # Try to make it easy to figure out what is happening
+ #
+ # Intent ONOS1 ONOS2 ...
+ # 0x01 INSTALLED INSTALLING
+ # ... ... ...
+ # ... ... ...
+ title = " ID"
+ for n in range( numControllers ):
+ title += " " * 10 + "ONOS" + str( n + 1 )
+ main.log.warn( title )
+ # get all intent keys in the cluster
+ keys = []
+ for nodeStr in ONOSIntents:
+ node = json.loads( nodeStr )
+ for intent in node:
+ keys.append( intent.get( 'id' ) )
+ keys = set( keys )
+ for key in keys:
+ row = "%-13s" % key
+ for nodeStr in ONOSIntents:
+ node = json.loads( nodeStr )
+ for intent in node:
+ if intent.get( 'id' ) == key:
+ row += "%-15s" % intent.get( 'state' )
+ main.log.warn( row )
+ # End table view
+
utilities.assert_equals(
expect=True,
actual=consistentIntents,
@@ -1669,6 +1796,7 @@
onpass="No Loss of connectivity",
onfail="Loss of dataplane connectivity detected" )
+ main.step( "Leadership Election is still functional" )
# Test of LeadershipElection
# NOTE: this only works for the sanity test. In case of failures,
# leader will likely change
@@ -1703,15 +1831,6 @@
onpass="Leadership election passed",
onfail="Something went wrong with Leadership election" )
- result = ( mastershipCheck and intentCheck and FlowTables and
- ( not LossInPings ) and rolesNotNull and leaderResult )
- result = int( result )
- if result == main.TRUE:
- main.log.report( "Constant State Tests Passed" )
- utilities.assert_equals( expect=main.TRUE, actual=result,
- onpass="Constant State Tests Passed",
- onfail="Constant state tests failed" )
-
def CASE8( self, main ):
"""
Compare topo
@@ -1739,7 +1858,6 @@
ctrls.append( temp )
MNTopo = TestONTopology( main.Mininet1, ctrls )
- main.step( "Comparing ONOS topology to MN" )
devicesResults = main.TRUE
portsResults = main.TRUE
linksResults = main.TRUE
@@ -1896,6 +2014,7 @@
# Compare json objects for hosts and dataplane clusters
# hosts
+ main.step( "Hosts view is consistent across all ONOS nodes" )
consistentHostsResult = main.TRUE
for controller in range( len( hosts ) ):
controllerStr = str( controller + 1 )
@@ -1922,6 +2041,7 @@
onfail="ONOS nodes have different views of hosts" )
# Strongly connected clusters of devices
+ main.step( "Clusters view is consistent across all ONOS nodes" )
consistentClustersResult = main.TRUE
for controller in range( len( clusters ) ):
controllerStr = str( controller + 1 )
@@ -1947,6 +2067,8 @@
onpass="Clusters view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of clusters" )
# there should always only be one cluster
+ main.step( "Topology view is correct and consistent across all " +
+ "ONOS nodes" )
try:
numClusters = len( json.loads( clusters[ 0 ] ) )
except ( ValueError, TypeError ):
@@ -1983,6 +2105,7 @@
# FIXME: move this to an ONOS state case
main.step( "Checking ONOS nodes" )
nodesOutput = []
+ nodeResults = main.TRUE
threads = []
for i in range( numControllers ):
t = main.Thread( target=CLIs[i].nodes,
@@ -1999,9 +2122,10 @@
try:
current = json.loads( i )
for node in current:
+ currentResult = main.FALSE
if node['ip'] in ips: # node in nodes() output is in cell
if node['state'] == 'ACTIVE':
- pass # as it should be
+ currentResult = main.TRUE
else:
main.log.error( "Error in ONOS node availability" )
main.log.error(
@@ -2010,9 +2134,13 @@
indent=4,
separators=( ',', ': ' ) ) )
break
+ nodeResults = nodeResults and currentResult
except ( ValueError, TypeError ):
main.log.error( "Error parsing nodes output" )
main.log.warn( repr( i ) )
+ utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
+ onpass="Nodes check successful",
+ onfail="Nodes check NOT successful" )
def CASE9( self, main ):
"""
@@ -2235,7 +2363,10 @@
time.sleep( 10 )
main.step( "Stopping Mininet" )
- main.Mininet1.stopNet()
+ mnResult = main.Mininet1.stopNet()
+ utilities.assert_equals( expect=main.TRUE, actual=mnResult,
+ onpass="Mininet stopped",
+ onfail="MN cleanup NOT successful" )
main.step( "Checking ONOS Logs for errors" )
for node in nodes:
@@ -2246,16 +2377,19 @@
main.step( "Packing and rotating pcap archives" )
os.system( "~/TestON/dependencies/rotate.sh " + str( testname ) )
- # TODO: actually check something here
- utilities.assert_equals( expect=main.TRUE, actual=main.TRUE,
- onpass="Test cleanup successful",
- onfail="Test cleanup NOT successful" )
+ try:
+ gossipIntentLog = open( main.logdir + "/Timers.csv", 'w')
+ # Overwrite with empty line and close
+ gossipIntentLog.write( "Gossip Intents\n" )
+ gossipIntentLog.write( str( gossipTime ) )
+ gossipIntentLog.close()
+ except NameError, e:
+ main.log.exception(e)
def CASE14( self, main ):
"""
start election app on all onos nodes
"""
- import time
assert numControllers, "numControllers not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
@@ -2263,10 +2397,13 @@
assert nodes, "nodes not defined"
leaderResult = main.TRUE
- main.log.info( "Install leadership election app" )
+ main.case("Start Leadership Election app")
+ main.step( "Install leadership election app" )
main.ONOScli1.activateApp( "org.onosproject.election" )
leaders = []
for cli in CLIs:
+ cli.electionTestRun()
+ for cli in CLIs:
leader = cli.electionTestLeader()
if leader is None or leader == main.FALSE:
main.log.report( cli.name + ": Leader for the election app " +
@@ -2292,6 +2429,7 @@
"""
Check that Leadership Election is still functional
"""
+ import time
assert numControllers, "numControllers not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
@@ -2346,6 +2484,7 @@
leaderResult = main.FALSE
elif leaderN is None:
# node may not have recieved the event yet
+ time.sleep(7)
leaderN = cli.electionTestLeader()
leaderList.pop()
leaderList.append( leaderN )
@@ -2384,18 +2523,1090 @@
actual=runResult,
onpass="App re-ran for election",
onfail="App failed to run for election" )
- if consistentLeader == main.TRUE:
- afterRun = main.ONOScli1.electionTestLeader()
- # verify leader didn't just change
- if afterRun == leaderList[ 0 ]:
- leaderResult = main.TRUE
- else:
- leaderResult = main.FALSE
- # TODO: assert on run and withdraw results?
+
+ afterRun = main.ONOScli1.electionTestLeader()
+ # verify leader didn't just change
+ if afterRun == leaderList[ 0 ]:
+ afterResult = main.TRUE
+ else:
+ afterResult = main.FALSE
utilities.assert_equals(
expect=main.TRUE,
- actual=leaderResult,
- onpass="Leadership election passed",
+ actual=afterResult,
+ onpass="Old leader successfully re-ran for election",
onfail="Something went wrong with Leadership election after " +
"the old leader re-ran for election" )
+
+ case15Result = withdrawResult and leaderResult and runResult and\
+ afterResult
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=case15Result,
+ onpass="Leadership election is still functional",
+ onfail="Leadership Election is no longer functional" )
+
+ def CASE16( self, main ):
+ """
+ Install Distributed Primitives app
+ """
+ assert numControllers, "numControllers not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert CLIs, "CLIs not defined"
+ assert nodes, "nodes not defined"
+
+ # Variables for the distributed primitives tests
+ global pCounterName
+ global iCounterName
+ global pCounterValue
+ global iCounterValue
+ global onosSet
+ global onosSetName
+ pCounterName = "TestON-Partitions"
+ iCounterName = "TestON-inMemory"
+ pCounterValue = 0
+ iCounterValue = 0
+ onosSet = set([])
+ onosSetName = "TestON-set"
+
+ description = "Install Primitives app"
+ main.case( description )
+ main.step( "Install Primitives app" )
+ appName = "org.onosproject.distributedprimitives"
+ appResults = CLIs[0].activateApp( appName )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=appResults,
+ onpass="Primitives app activated",
+ onfail="Primitives app not activated" )
+
+ def CASE17( self, main ):
+ """
+ Check for basic functionality with distributed primitives
+ """
+ # Make sure variables are defined/set
+ assert numControllers, "numControllers not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert CLIs, "CLIs not defined"
+ assert nodes, "nodes not defined"
+ assert pCounterName, "pCounterName not defined"
+ assert iCounterName, "iCounterName not defined"
+ assert onosSetName, "onosSetName not defined"
+ # NOTE: assert fails if value is 0/None/Empty/False
+ try:
+ pCounterValue
+ except NameError:
+ main.log.error( "pCounterValue not defined, setting to 0" )
+ pCounterValue = 0
+ try:
+ iCounterValue
+ except NameError:
+ main.log.error( "iCounterValue not defined, setting to 0" )
+ iCounterValue = 0
+ try:
+ onosSet
+ except NameError:
+ main.log.error( "onosSet not defined, setting to empty Set" )
+ onosSet = set([])
+ # Variables for the distributed primitives tests. These are local only
+ addValue = "a"
+ addAllValue = "a b c d e f"
+ retainValue = "c d e f"
+
+ description = "Check for basic functionality with distributed " +\
+ "primitives"
+ main.case( description )
+ main.caseExplaination = "Test the methods of the distributed primitives (counters and sets) throught the cli"
+ # DISTRIBUTED ATOMIC COUNTERS
+ main.step( "Increment and get a default counter on each node" )
+ pCounters = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].counterTestIncrement,
+ name="counterIncrement-" + str( i ),
+ args=[ pCounterName ] )
+ pCounterValue += 1
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ pCounters.append( t.result )
+ # Check that counter incremented numController times
+ pCounterResults = True
+ for i in range( numControllers ):
+ pCounterResults and ( i + 1 ) in pCounters
+ utilities.assert_equals( expect=True,
+ actual=pCounterResults,
+ onpass="Default counter incremented",
+ onfail="Error incrementing default" +
+ " counter" )
+
+ main.step( "Increment and get an in memory counter on each node" )
+ iCounters = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].counterTestIncrement,
+ name="icounterIncrement-" + str( i ),
+ args=[ iCounterName ],
+ kwargs={ "inMemory": True } )
+ iCounterValue += 1
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ iCounters.append( t.result )
+ # Check that counter incremented numController times
+ iCounterResults = True
+ for i in range( numControllers ):
+ iCounterResults and ( i + 1 ) in iCounters
+ utilities.assert_equals( expect=True,
+ actual=iCounterResults,
+ onpass="In memory counter incremented",
+ onfail="Error incrementing in memory" +
+ " counter" )
+
+ main.step( "Check counters are consistant across nodes" )
+ onosCounters = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].counters,
+ name="counters-" + str( i ) )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ onosCounters.append( t.result )
+ tmp = [ i == onosCounters[ 0 ] for i in onosCounters ]
+ if all( tmp ):
+ main.log.info( "Counters are consistent across all nodes" )
+ consistentCounterResults = main.TRUE
+ else:
+ main.log.error( "Counters are not consistent across all nodes" )
+ consistentCounterResults = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=consistentCounterResults,
+ onpass="ONOS counters are consistent " +
+ "across nodes",
+ onfail="ONOS Counters are inconsistent " +
+ "across nodes" )
+
+ main.step( "Counters we added have the correct values" )
+ correctResults = main.TRUE
+ for i in range( numControllers ):
+ current = onosCounters[i]
+ try:
+ pValue = current.get( pCounterName )
+ iValue = current.get( iCounterName )
+ if pValue == pCounterValue:
+ main.log.info( "Partitioned counter value is correct" )
+ else:
+ main.log.error( "Partitioned counter value is incorrect," +
+ " expected value: " + str( pCounterValue )
+ + " current value: " + str( pValue ) )
+ correctResults = main.FALSE
+ if iValue == iCounterValue:
+ main.log.info( "In memory counter value is correct" )
+ else:
+ main.log.error( "In memory counter value is incorrect, " +
+ "expected value: " + str( iCounterValue ) +
+ " current value: " + str( iValue ) )
+ correctResults = main.FALSE
+ except AttributeError, e:
+ main.log.error( "ONOS" + str( i + 1 ) + " counters result " +
+ "is not as expected" )
+ correctResults = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=correctResults,
+ onpass="Added counters are correct",
+ onfail="Added counters are incorrect" )
+ # DISTRIBUTED SETS
+ main.step( "Distributed Set get" )
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=getResults,
+ onpass="Set elements are correct",
+ onfail="Set elements are incorrect" )
+
+ main.step( "Distributed Set size" )
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=sizeResults,
+ onpass="Set sizes are correct",
+ onfail="Set sizes are incorrect" )
+
+ main.step( "Distributed Set add()" )
+ onosSet.add( addValue )
+ addResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestAdd,
+ name="setTestAdd-" + str( i ),
+ args=[ onosSetName, addValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ addResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ addResults = main.TRUE
+ for i in range( numControllers ):
+ if addResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif addResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif addResponses[ i ] == main.ERROR:
+ # Error in execution
+ addResults = main.FALSE
+ else:
+ # unexpected result
+ addResults = main.FALSE
+ if addResults != main.TRUE:
+ main.log.error( "Error executing set add" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ addResults = addResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=addResults,
+ onpass="Set add correct",
+ onfail="Set add was incorrect" )
+
+ main.step( "Distributed Set addAll()" )
+ onosSet.update( addAllValue.split() )
+ addResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestAdd,
+ name="setTestAddAll-" + str( i ),
+ args=[ onosSetName, addAllValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ addResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ addAllResults = main.TRUE
+ for i in range( numControllers ):
+ if addResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif addResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif addResponses[ i ] == main.ERROR:
+ # Error in execution
+ addAllResults = main.FALSE
+ else:
+ # unexpected result
+ addAllResults = main.FALSE
+ if addAllResults != main.TRUE:
+ main.log.error( "Error executing set addAll" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ addAllResults = addAllResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=addAllResults,
+ onpass="Set addAll correct",
+ onfail="Set addAll was incorrect" )
+
+ main.step( "Distributed Set contains()" )
+ containsResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setContains-" + str( i ),
+ args=[ onosSetName ],
+ kwargs={ "values": addValue } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ # NOTE: This is the tuple
+ containsResponses.append( t.result )
+
+ containsResults = main.TRUE
+ for i in range( numControllers ):
+ if containsResponses[ i ] == main.ERROR:
+ containsResults = main.FALSE
+ else:
+ containsResults = containsResults and\
+ containsResponses[ i ][ 1 ]
+ utilities.assert_equals( expect=main.TRUE,
+ actual=containsResults,
+ onpass="Set contains is functional",
+ onfail="Set contains failed" )
+
+ main.step( "Distributed Set containsAll()" )
+ containsAllResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setContainsAll-" + str( i ),
+ args=[ onosSetName ],
+ kwargs={ "values": addAllValue } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ # NOTE: This is the tuple
+ containsAllResponses.append( t.result )
+
+ containsAllResults = main.TRUE
+ for i in range( numControllers ):
+ if containsResponses[ i ] == main.ERROR:
+ containsResults = main.FALSE
+ else:
+ containsResults = containsResults and\
+ containsResponses[ i ][ 1 ]
+ utilities.assert_equals( expect=main.TRUE,
+ actual=containsAllResults,
+ onpass="Set containsAll is functional",
+ onfail="Set containsAll failed" )
+
+ main.step( "Distributed Set remove()" )
+ onosSet.remove( addValue )
+ removeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestRemove,
+ name="setTestRemove-" + str( i ),
+ args=[ onosSetName, addValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ removeResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ removeResults = main.TRUE
+ for i in range( numControllers ):
+ if removeResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif removeResponses[ i ] == main.FALSE:
+ # not in set, probably fine
+ pass
+ elif removeResponses[ i ] == main.ERROR:
+ # Error in execution
+ removeResults = main.FALSE
+ else:
+ # unexpected result
+ removeResults = main.FALSE
+ if removeResults != main.TRUE:
+ main.log.error( "Error executing set remove" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ removeResults = removeResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=removeResults,
+ onpass="Set remove correct",
+ onfail="Set remove was incorrect" )
+
+ main.step( "Distributed Set removeAll()" )
+ onosSet.difference_update( addAllValue.split() )
+ removeAllResponses = []
+ threads = []
+ try:
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestRemove,
+ name="setTestRemoveAll-" + str( i ),
+ args=[ onosSetName, addAllValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ removeAllResponses.append( t.result )
+ except Exception, e:
+ main.log.exception(e)
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ removeAllResults = main.TRUE
+ for i in range( numControllers ):
+ if removeAllResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif removeAllResponses[ i ] == main.FALSE:
+ # not in set, probably fine
+ pass
+ elif removeAllResponses[ i ] == main.ERROR:
+ # Error in execution
+ removeAllResults = main.FALSE
+ else:
+ # unexpected result
+ removeAllResults = main.FALSE
+ if removeAllResults != main.TRUE:
+ main.log.error( "Error executing set removeAll" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ removeAllResults = removeAllResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=removeAllResults,
+ onpass="Set removeAll correct",
+ onfail="Set removeAll was incorrect" )
+
+ main.step( "Distributed Set addAll()" )
+ onosSet.update( addAllValue.split() )
+ addResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestAdd,
+ name="setTestAddAll-" + str( i ),
+ args=[ onosSetName, addAllValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ addResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ addAllResults = main.TRUE
+ for i in range( numControllers ):
+ if addResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif addResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif addResponses[ i ] == main.ERROR:
+ # Error in execution
+ addAllResults = main.FALSE
+ else:
+ # unexpected result
+ addAllResults = main.FALSE
+ if addAllResults != main.TRUE:
+ main.log.error( "Error executing set addAll" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ addAllResults = addAllResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=addAllResults,
+ onpass="Set addAll correct",
+ onfail="Set addAll was incorrect" )
+
+ main.step( "Distributed Set clear()" )
+ onosSet.clear()
+ clearResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestRemove,
+ name="setTestClear-" + str( i ),
+ args=[ onosSetName, " "], # Values doesn't matter
+ kwargs={ "clear": True } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ clearResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ clearResults = main.TRUE
+ for i in range( numControllers ):
+ if clearResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif clearResponses[ i ] == main.FALSE:
+ # Nothing set, probably fine
+ pass
+ elif clearResponses[ i ] == main.ERROR:
+ # Error in execution
+ clearResults = main.FALSE
+ else:
+ # unexpected result
+ clearResults = main.FALSE
+ if clearResults != main.TRUE:
+ main.log.error( "Error executing set clear" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ clearResults = clearResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=clearResults,
+ onpass="Set clear correct",
+ onfail="Set clear was incorrect" )
+
+ main.step( "Distributed Set addAll()" )
+ onosSet.update( addAllValue.split() )
+ addResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestAdd,
+ name="setTestAddAll-" + str( i ),
+ args=[ onosSetName, addAllValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ addResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ addAllResults = main.TRUE
+ for i in range( numControllers ):
+ if addResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif addResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif addResponses[ i ] == main.ERROR:
+ # Error in execution
+ addAllResults = main.FALSE
+ else:
+ # unexpected result
+ addAllResults = main.FALSE
+ if addAllResults != main.TRUE:
+ main.log.error( "Error executing set addAll" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ addAllResults = addAllResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=addAllResults,
+ onpass="Set addAll correct",
+ onfail="Set addAll was incorrect" )
+
+ main.step( "Distributed Set retain()" )
+ onosSet.intersection_update( retainValue.split() )
+ retainResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestRemove,
+ name="setTestRetain-" + str( i ),
+ args=[ onosSetName, retainValue ],
+ kwargs={ "retain": True } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ retainResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ retainResults = main.TRUE
+ for i in range( numControllers ):
+ if retainResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif retainResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif retainResponses[ i ] == main.ERROR:
+ # Error in execution
+ retainResults = main.FALSE
+ else:
+ # unexpected result
+ retainResults = main.FALSE
+ if retainResults != main.TRUE:
+ main.log.error( "Error executing set retain" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " +
+ str( size ) + " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ retainResults = retainResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=retainResults,
+ onpass="Set retain correct",
+ onfail="Set retain was incorrect" )
+
diff --git a/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.params b/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.params
index d3eb8dd..d027c36 100644
--- a/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.params
+++ b/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.params
@@ -16,7 +16,7 @@
#CASE15: Check that Leadership Election is still functional
#1,2,8,3,4,5,14,[6],8,3,7,4,15,9,8,4,10,8,4,11,8,4,12,8,4,13
#extra hosts test 1,2,8,11,8,12,8
- <testcases>1,2,8,3,4,5,14,[6],8,3,7,4,15,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+ <testcases>1,2,8,3,4,5,14,16,17,[6],8,3,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
<ENV>
<cellName>HA</cellName>
</ENV>
diff --git a/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.py b/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.py
index d2f0bc9..0f68a02 100644
--- a/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.py
+++ b/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.py
@@ -18,6 +18,8 @@
CASE13: Clean up
CASE14: start election app on all onos nodes
CASE15: Check that Leadership Election is still functional
+CASE16: Install Distributed Primitives app
+CASE17: Check for basic functionality with distributed primitives
"""
@@ -97,35 +99,72 @@
for node in nodes:
main.ONOSbench.onosUninstall( node.ip_address )
+ # Make sure ONOS is DEAD
+ main.log.report( "Killing any ONOS processes" )
+ killResults = main.TRUE
+ for node in nodes:
+ killed = main.ONOSbench.onosKill( node.ip_address )
+ killResults = killResults and killed
+
cleanInstallResult = main.TRUE
gitPullResult = main.TRUE
main.step( "Starting Mininet" )
- main.Mininet1.startNet( )
+ mnResult = main.Mininet1.startNet( )
+ utilities.assert_equals( expect=main.TRUE, actual=mnResult,
+ onpass="Mininet Started",
+ onfail="Error starting Mininet" )
main.step( "Compiling the latest version of ONOS" )
if PULLCODE:
main.step( "Git checkout and pull " + gitBranch )
main.ONOSbench.gitCheckout( gitBranch )
gitPullResult = main.ONOSbench.gitPull()
- if gitPullResult == main.ERROR:
- main.log.error( "Error pulling git branch" )
+ # values of 1 or 3 are good
+ utilities.assert_lesser( expect=0, actual=gitPullResult,
+ onpass="Git pull successful",
+ onfail="Git pull failed" )
- main.step( "Using mvn clean & install" )
+ main.step( "Using mvn clean and install" )
cleanInstallResult = main.ONOSbench.cleanInstall()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=cleanInstallResult,
+ onpass="MCI successful",
+ onfail="MCI failed" )
else:
main.log.warn( "Did not pull new code so skipping mvn " +
"clean install" )
main.ONOSbench.getVersion( report=True )
+ # GRAPHS
+ # NOTE: important params here:
+ # job = name of Jenkins job
+ # Plot Name = Plot-HA, only can be used if multiple plots
+ # index = The number of the graph under plot name
+ job = "HASingleInstanceRestart"
+ graphs = '<ac:structured-macro ac:name="html">\n'
+ graphs += '<ac:plain-text-body><![CDATA[\n'
+ graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
+ '/plot/getPlot?index=0&width=500&height=300"' +\
+ 'noborder="0" width="500" height="300" scrolling="yes" ' +\
+ 'seamless="seamless"></iframe>\n'
+ graphs += ']]></ac:plain-text-body>\n'
+ graphs += '</ac:structured-macro>\n'
+ main.log.wiki(graphs)
cellResult = main.ONOSbench.setCell( "SingleHA" )
verifyResult = main.ONOSbench.verifyCell()
main.step( "Creating ONOS package" )
packageResult = main.ONOSbench.onosPackage()
+ utilities.assert_equals( expect=main.TRUE, actual=packageResult,
+ onpass="ONOS package successful",
+ onfail="ONOS package failed" )
main.step( "Installing ONOS package" )
- onos1InstallResult = main.ONOSbench.onosInstall( options="-f",
+ onosInstallResult = main.ONOSbench.onosInstall( options="-f",
node=ONOS1Ip )
+ utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
+ onpass="ONOS install successful",
+ onfail="ONOS install failed" )
main.step( "Checking if ONOS is up yet" )
for i in range( 2 ):
@@ -134,8 +173,15 @@
break
if not onos1Isup:
main.log.report( "ONOS1 didn't start!" )
+ utilities.assert_equals( expect=main.TRUE, actual=onos1Isup,
+ onpass="ONOS startup successful",
+ onfail="ONOS startup failed" )
- cliResult = main.ONOScli1.startOnosCli( ONOS1Ip )
+ main.log.step( "Starting ONOS CLI sessions" )
+ cliResults = main.ONOScli1.startOnosCli( ONOS1Ip )
+ utilities.assert_equals( expect=main.TRUE, actual=cliResults,
+ onpass="ONOS cli startup successful",
+ onfail="ONOS cli startup failed" )
main.step( "Start Packet Capture MN" )
main.Mininet2.startTcpdump(
@@ -144,13 +190,29 @@
intf=main.params[ 'MNtcpdump' ][ 'intf' ],
port=main.params[ 'MNtcpdump' ][ 'port' ] )
- case1Result = ( cleanInstallResult and packageResult and
- cellResult and verifyResult and onos1InstallResult
- and onos1Isup and cliResult )
+ main.step( "App Ids check" )
+ appCheck = main.TRUE
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].appToIDCheck,
+ name="appToIDCheck-" + str( i ),
+ args=[] )
+ threads.append( t )
+ t.start()
- utilities.assert_equals( expect=main.TRUE, actual=case1Result,
- onpass="Test startup successful",
- onfail="Test startup NOT successful" )
+ for t in threads:
+ t.join()
+ appCheck = appCheck and t.result
+ if appCheck != main.TRUE:
+ main.log.warn( CLIs[0].apps() )
+ main.log.warn( CLIs[0].appIDs() )
+ utilities.assert_equals( expect=main.TRUE, actual=appCheck,
+ onpass="App Ids seem to be correct",
+ onfail="Something is wrong with app Ids" )
+
+ case1Result = ( cleanInstallResult and packageResult and
+ cellResult and verifyResult and onosInstallResult
+ and onos1Isup and cliResults )
if case1Result == main.FALSE:
main.cleanup()
@@ -200,6 +262,7 @@
onpass="Switch mastership assigned correctly",
onfail="Switches not assigned correctly to controllers" )
+ main.step( "Assign mastership of switches to specific controllers" )
roleCall = main.TRUE
roleCheck = main.TRUE
try:
@@ -237,7 +300,7 @@
roleCall = roleCall and main.ONOScli1.deviceRole( deviceId,
ip )
# Check assignment
- master = main.ONOScli1.getRole( deviceId ).get( 'master' )
+ master = main.ONOScli1.getRole( deviceId ).get( 'master' )
if ip in master:
roleCheck = roleCheck and main.TRUE
else:
@@ -255,6 +318,7 @@
onpass="Re-assigned switch mastership to designated controller",
onfail="Something wrong with deviceRole calls" )
+ main.step( "Check mastership was correctly assigned" )
utilities.assert_equals(
expect=main.TRUE,
actual=roleCheck,
@@ -281,18 +345,23 @@
main.log.report( "Adding host intents" )
main.case( "Adding host Intents" )
- main.step( "Discovering Hosts( Via pingall for now )" )
+ main.step( "Discovering Hosts( Via pingall for now )" )
# FIXME: Once we have a host discovery mechanism, use that instead
# install onos-app-fwd
- main.log.info( "Install reactive forwarding app" )
- appResults = main.ONOScli1.activateApp( "org.onosproject.fwd" )
+ main.step( "Install reactive forwarding app" )
+ installResults = CLIs[0].activateApp( "org.onosproject.fwd" )
+ utilities.assert_equals( expect=main.TRUE, actual=installResults,
+ onpass="Install fwd successful",
+ onfail="Install fwd failed" )
- # FIXME: add this to asserts
appCheck = main.ONOScli1.appToIDCheck()
if appCheck != main.TRUE:
main.log.warn( CLIs[0].apps() )
main.log.warn( CLIs[0].appIDs() )
+ utilities.assert_equals( expect=main.TRUE, actual=appCheck,
+ onpass="App Ids seem to be correct",
+ onfail="Something is wrong with app Ids" )
# REACTIVE FWD test
pingResult = main.FALSE
@@ -303,20 +372,26 @@
expect=main.TRUE,
actual=pingResult,
onpass="Reactive Pingall test passed",
- onfail="Reactive Pingall failed, one or more ping pairs failed" )
+ onfail="Reactive Pingall failed, " +
+ "one or more ping pairs failed" )
time2 = time.time()
- main.log.info( "Time for pingall: %2f seconds" % ( time2 - time1 ) )
-
+ main.log.info( "Time for pingall: %2f seconds" %
+ ( time2 - time1 ) )
+ # timeout for fwd flows
+ time.sleep( 11 )
# uninstall onos-app-fwd
- main.log.info( "Uninstall reactive forwarding app" )
- appResults = appResults and main.ONOScli1.deactivateApp( "org.onosproject.fwd" )
+ main.step( "Uninstall reactive forwarding app" )
+ uninstallResult = CLIs[0].deactivateApp( "org.onosproject.fwd" )
+ utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
+ onpass="Uninstall fwd successful",
+ onfail="Uninstall fwd failed" )
appCheck2 = main.ONOScli1.appToIDCheck()
if appCheck2 != main.TRUE:
main.log.warn( CLIs[0].apps() )
main.log.warn( CLIs[0].appIDs() )
-
- # timeout for fwd flows
- time.sleep( 11 )
+ utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
+ onpass="App Ids seem to be correct",
+ onfail="Something is wrong with app Ids" )
main.step( "Add host intents" )
intentIds = []
@@ -462,6 +537,37 @@
actual=intentAddResult,
onpass="Pushed host intents to ONOS",
onfail="Error in pushing host intents to ONOS" )
+ main.step( "Intent Anti-Entropy dispersion" )
+ for i in range(100):
+ correct = True
+ main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
+ for cli in CLIs:
+ onosIds = []
+ ids = cli.getAllIntentsId()
+ onosIds.append( ids )
+ main.log.debug( "Intents in " + cli.name + ": " +
+ str( sorted( onosIds ) ) )
+ if sorted( ids ) != sorted( intentIds ):
+ correct = False
+ if correct:
+ break
+ else:
+ time.sleep(1)
+ if not intentStop:
+ intentStop = time.time()
+ global gossipTime
+ gossipTime = intentStop - intentStart
+ main.log.info( "It took about " + str( gossipTime ) +
+ " seconds for all intents to appear in each node" )
+ # FIXME: make this time configurable/calculate based off of number of
+ # nodes and gossip rounds
+ utilities.assert_greater_equals(
+ expect=40, actual=gossipTime,
+ onpass="ECM anti-entropy for intents worked within " +
+ "expected time",
+ onfail="Intent ECM anti-entropy took too long" )
+ if gossipTime <= 40:
+ intentAddResult = True
if not intentAddResult or "key" in pendingMap:
import time
@@ -754,6 +860,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing pending map" )
main.log.error( repr( pendingMap ) )
+ main.log.debug( main.ONOScli1.flows( jsonFormat=False ) )
def CASE5( self, main ):
"""
@@ -784,6 +891,7 @@
onpass="Each device has a master",
onfail="Some devices don't have a master assigned" )
+ main.step( "Get the Mastership of each switch" )
ONOS1Mastership = main.ONOScli1.roles()
# TODO: Make this a meaningful check
if "Error" in ONOS1Mastership or not ONOS1Mastership:
@@ -832,17 +940,9 @@
main.step( "Create TestONTopology object" )
ctrls = []
- count = 1
- temp = ()
- temp = temp + ( getattr( main, ( 'ONOS' + str( count ) ) ), )
- temp = temp + ( "ONOS" + str( count ), )
- temp = temp + ( main.params[ 'CTRL' ][ 'ip' + str( count ) ], )
- temp = temp + \
- ( eval( main.params[ 'CTRL' ][ 'port' + str( count ) ] ), )
+ temp = ( nodes[0], nodes[0].name, nodes[0].ip_address, 6633 )
ctrls.append( temp )
- MNTopo = TestONTopology(
- main.Mininet1,
- ctrls ) # can also add Intent API info for intent operations
+ MNTopo = TestONTopology( main.Mininet1, ctrls )
main.step( "Collecting topology information from ONOS" )
devices = []
@@ -855,6 +955,8 @@
links.append( main.ONOScli1.links() )
clusters = []
clusters.append( main.ONOScli1.clusters() )
+
+ main.step( "Each host has an IP address" )
ipResult = main.TRUE
for controller in range( 0, len( hosts ) ):
controllerStr = str( controller + 1 )
@@ -864,9 +966,19 @@
"DEBUG:Error with host ips on controller" +
controllerStr + ": " + str( host ) )
ipResult = main.FALSE
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=ipResult,
+ onpass="The ips of the hosts aren't empty",
+ onfail="The ip of at least one host is missing" )
# there should always only be one cluster
- numClusters = len( json.loads( clusters[ 0 ] ) )
+ main.step( "There is only one dataplane cluster" )
+ try:
+ numClusters = len( json.loads( clusters[ 0 ] ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing clusters[0]: " +
+ repr( clusters[ 0 ] ) )
clusterResults = main.FALSE
if numClusters == 1:
clusterResults = main.TRUE
@@ -945,13 +1057,6 @@
onpass="Topology Check Test successful",
onfail="Topology Check Test NOT successful" )
- finalAssert = main.TRUE
- finalAssert = finalAssert and topoResult and flowCheck \
- and intentCheck and consistentMastership and rolesNotNull
- utilities.assert_equals( expect=main.TRUE, actual=finalAssert,
- onpass="State check successful",
- onfail="State check NOT successful" )
-
def CASE6( self, main ):
"""
The Failure case.
@@ -961,10 +1066,21 @@
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
+ # Reset non-persistent variables
+ try:
+ iCounterValue = 0
+ except NameError:
+ main.log.error( "iCounterValue not defined, setting to 0" )
+ iCounterValue = 0
+
main.log.report( "Restart ONOS node" )
main.log.case( "Restart ONOS node" )
- main.ONOSbench.onosKill( ONOS1Ip )
+ main.step( "Killing ONOS processes" )
+ killResult = main.ONOSbench.onosKill( ONOS1Ip )
start = time.time()
+ utilities.assert_equals( expect=main.TRUE, actual=killResult,
+ onpass="ONOS Killed",
+ onfail="Error killing ONOS" )
main.step( "Checking if ONOS is up yet" )
count = 0
@@ -975,13 +1091,16 @@
break
else:
count = count + 1
+ utilities.assert_equals( expect=main.TRUE, actual=onos1Isup,
+ onpass="ONOS is back up",
+ onfail="ONOS failed to start" )
- cliResult = main.ONOScli1.startOnosCli( ONOS1Ip )
+ main.log.step( "Starting ONOS CLI sessions" )
+ cliResults = main.ONOScli1.startOnosCli( ONOS1Ip )
+ utilities.assert_equals( expect=main.TRUE, actual=cliResults,
+ onpass="ONOS cli startup successful",
+ onfail="ONOS cli startup failed" )
- caseResults = main.TRUE and onos1Isup and cliResult
- utilities.assert_equals( expect=main.TRUE, actual=caseResults,
- onpass="ONOS restart successful",
- onfail="ONOS restart NOT successful" )
if elapsed:
main.log.info( "ESTIMATE: ONOS took %s seconds to restart" %
str( elapsed ) )
@@ -997,6 +1116,7 @@
assert utilities.assert_equals, "utilities.assert_equals not defined"
main.case( "Running ONOS Constant State Tests" )
+ main.step( "Check that each switch has a master" )
# Assert that each device has a master
rolesNotNull = main.ONOScli1.rolesNotNull()
utilities.assert_equals(
@@ -1521,7 +1641,10 @@
time.sleep( 10 )
main.step( "Stopping Mininet" )
- main.Mininet1.stopNet()
+ mnResult = main.Mininet1.stopNet()
+ utilities.assert_equals( expect=main.TRUE, actual=mnResult,
+ onpass="Mininet stopped",
+ onfail="MN cleanup NOT successful" )
main.step( "Checking ONOS Logs for errors" )
print colors[ 'purple' ] + "Checking logs for errors on ONOS1:" + \
@@ -1531,11 +1654,6 @@
main.step( "Packing and rotating pcap archives" )
os.system( "~/TestON/dependencies/rotate.sh " + str( testname ) )
- # TODO: actually check something here
- utilities.assert_equals( expect=main.TRUE, actual=main.TRUE,
- onpass="Test cleanup successful",
- onfail="Test cleanup NOT successful" )
-
def CASE14( self, main ):
"""
start election app on all onos nodes
@@ -1545,7 +1663,8 @@
assert utilities.assert_equals, "utilities.assert_equals not defined"
leaderResult = main.TRUE
- main.log.info( "Install leadership election app" )
+ main.case("Start Leadership Election app")
+ main.step( "Install leadership election app" )
main.ONOScli1.activateApp( "org.onosproject.election" )
# check for leader
leader = main.ONOScli1.electionTestLeader()
@@ -1665,3 +1784,1068 @@
onpass="Leadership election passed",
onfail="ONOS1's election app was not leader after it re-ran " +
"for election" )
+
+ def CASE16( self, main ):
+ """
+ Install Distributed Primitives app
+ """
+ assert numControllers, "numControllers not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert CLIs, "CLIs not defined"
+ assert nodes, "nodes not defined"
+
+ # Variables for the distributed primitives tests
+ global pCounterName
+ global iCounterName
+ global pCounterValue
+ global iCounterValue
+ global onosSet
+ global onosSetName
+ pCounterName = "TestON-Partitions"
+ iCounterName = "TestON-inMemory"
+ pCounterValue = 0
+ iCounterValue = 0
+ onosSet = set([])
+ onosSetName = "TestON-set"
+
+ description = "Install Primitives app"
+ main.case( description )
+ main.step( "Install Primitives app" )
+ appName = "org.onosproject.distributedprimitives"
+ appResults = CLIs[0].activateApp( appName )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=appResults,
+ onpass="Primitives app activated",
+ onfail="Primitives app not activated" )
+
+ def CASE17( self, main ):
+ """
+ Check for basic functionality with distributed primitives
+ """
+ # Make sure variables are defined/set
+ assert numControllers, "numControllers not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert CLIs, "CLIs not defined"
+ assert nodes, "nodes not defined"
+ assert pCounterName, "pCounterName not defined"
+ assert iCounterName, "iCounterName not defined"
+ assert onosSetName, "onosSetName not defined"
+ # NOTE: assert fails if value is 0/None/Empty/False
+ try:
+ pCounterValue
+ except NameError:
+ main.log.error( "pCounterValue not defined, setting to 0" )
+ pCounterValue = 0
+ try:
+ iCounterValue
+ except NameError:
+ main.log.error( "iCounterValue not defined, setting to 0" )
+ iCounterValue = 0
+ try:
+ onosSet
+ except NameError:
+ main.log.error( "onosSet not defined, setting to empty Set" )
+ onosSet = set([])
+ # Variables for the distributed primitives tests. These are local only
+ addValue = "a"
+ addAllValue = "a b c d e f"
+ retainValue = "c d e f"
+
+ description = "Check for basic functionality with distributed " +\
+ "primitives"
+ main.case( description )
+ main.caseExplaination = "Test the methods of the distributed primitives (counters and sets) throught the cli"
+ # DISTRIBUTED ATOMIC COUNTERS
+ main.step( "Increment and get a default counter on each node" )
+ pCounters = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].counterTestIncrement,
+ name="counterIncrement-" + str( i ),
+ args=[ pCounterName ] )
+ pCounterValue += 1
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ pCounters.append( t.result )
+ # Check that counter incremented numController times
+ pCounterResults = True
+ for i in range( numControllers ):
+ pCounterResults and ( i + 1 ) in pCounters
+ utilities.assert_equals( expect=True,
+ actual=pCounterResults,
+ onpass="Default counter incremented",
+ onfail="Error incrementing default" +
+ " counter" )
+
+ main.step( "Increment and get an in memory counter on each node" )
+ iCounters = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].counterTestIncrement,
+ name="icounterIncrement-" + str( i ),
+ args=[ iCounterName ],
+ kwargs={ "inMemory": True } )
+ iCounterValue += 1
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ iCounters.append( t.result )
+ # Check that counter incremented numController times
+ iCounterResults = True
+ for i in range( numControllers ):
+ iCounterResults and ( i + 1 ) in iCounters
+ utilities.assert_equals( expect=True,
+ actual=iCounterResults,
+ onpass="In memory counter incremented",
+ onfail="Error incrementing in memory" +
+ " counter" )
+
+ main.step( "Check counters are consistant across nodes" )
+ onosCounters = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].counters,
+ name="counters-" + str( i ) )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ onosCounters.append( t.result )
+ tmp = [ i == onosCounters[ 0 ] for i in onosCounters ]
+ if all( tmp ):
+ main.log.info( "Counters are consistent across all nodes" )
+ consistentCounterResults = main.TRUE
+ else:
+ main.log.error( "Counters are not consistent across all nodes" )
+ consistentCounterResults = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=consistentCounterResults,
+ onpass="ONOS counters are consistent " +
+ "across nodes",
+ onfail="ONOS Counters are inconsistent " +
+ "across nodes" )
+
+ main.step( "Counters we added have the correct values" )
+ correctResults = main.TRUE
+ for i in range( numControllers ):
+ current = onosCounters[i]
+ try:
+ pValue = current.get( pCounterName )
+ iValue = current.get( iCounterName )
+ if pValue == pCounterValue:
+ main.log.info( "Partitioned counter value is correct" )
+ else:
+ main.log.error( "Partitioned counter value is incorrect," +
+ " expected value: " + str( pCounterValue )
+ + " current value: " + str( pValue ) )
+ correctResults = main.FALSE
+ if iValue == iCounterValue:
+ main.log.info( "In memory counter value is correct" )
+ else:
+ main.log.error( "In memory counter value is incorrect, " +
+ "expected value: " + str( iCounterValue ) +
+ " current value: " + str( iValue ) )
+ correctResults = main.FALSE
+ except AttributeError, e:
+ main.log.error( "ONOS" + str( i + 1 ) + " counters result " +
+ "is not as expected" )
+ correctResults = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=correctResults,
+ onpass="Added counters are correct",
+ onfail="Added counters are incorrect" )
+ # DISTRIBUTED SETS
+ main.step( "Distributed Set get" )
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=getResults,
+ onpass="Set elements are correct",
+ onfail="Set elements are incorrect" )
+
+ main.step( "Distributed Set size" )
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=sizeResults,
+ onpass="Set sizes are correct",
+ onfail="Set sizes are incorrect" )
+
+ main.step( "Distributed Set add()" )
+ onosSet.add( addValue )
+ addResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestAdd,
+ name="setTestAdd-" + str( i ),
+ args=[ onosSetName, addValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ addResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ addResults = main.TRUE
+ for i in range( numControllers ):
+ if addResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif addResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif addResponses[ i ] == main.ERROR:
+ # Error in execution
+ addResults = main.FALSE
+ else:
+ # unexpected result
+ addResults = main.FALSE
+ if addResults != main.TRUE:
+ main.log.error( "Error executing set add" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ addResults = addResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=addResults,
+ onpass="Set add correct",
+ onfail="Set add was incorrect" )
+
+ main.step( "Distributed Set addAll()" )
+ onosSet.update( addAllValue.split() )
+ addResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestAdd,
+ name="setTestAddAll-" + str( i ),
+ args=[ onosSetName, addAllValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ addResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ addAllResults = main.TRUE
+ for i in range( numControllers ):
+ if addResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif addResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif addResponses[ i ] == main.ERROR:
+ # Error in execution
+ addAllResults = main.FALSE
+ else:
+ # unexpected result
+ addAllResults = main.FALSE
+ if addAllResults != main.TRUE:
+ main.log.error( "Error executing set addAll" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ addAllResults = addAllResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=addAllResults,
+ onpass="Set addAll correct",
+ onfail="Set addAll was incorrect" )
+
+ main.step( "Distributed Set contains()" )
+ containsResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setContains-" + str( i ),
+ args=[ onosSetName ],
+ kwargs={ "values": addValue } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ # NOTE: This is the tuple
+ containsResponses.append( t.result )
+
+ containsResults = main.TRUE
+ for i in range( numControllers ):
+ if containsResponses[ i ] == main.ERROR:
+ containsResults = main.FALSE
+ else:
+ containsResults = containsResults and\
+ containsResponses[ i ][ 1 ]
+ utilities.assert_equals( expect=main.TRUE,
+ actual=containsResults,
+ onpass="Set contains is functional",
+ onfail="Set contains failed" )
+
+ main.step( "Distributed Set containsAll()" )
+ containsAllResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setContainsAll-" + str( i ),
+ args=[ onosSetName ],
+ kwargs={ "values": addAllValue } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ # NOTE: This is the tuple
+ containsAllResponses.append( t.result )
+
+ containsAllResults = main.TRUE
+ for i in range( numControllers ):
+ if containsResponses[ i ] == main.ERROR:
+ containsResults = main.FALSE
+ else:
+ containsResults = containsResults and\
+ containsResponses[ i ][ 1 ]
+ utilities.assert_equals( expect=main.TRUE,
+ actual=containsAllResults,
+ onpass="Set containsAll is functional",
+ onfail="Set containsAll failed" )
+
+ main.step( "Distributed Set remove()" )
+ onosSet.remove( addValue )
+ removeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestRemove,
+ name="setTestRemove-" + str( i ),
+ args=[ onosSetName, addValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ removeResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ removeResults = main.TRUE
+ for i in range( numControllers ):
+ if removeResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif removeResponses[ i ] == main.FALSE:
+ # not in set, probably fine
+ pass
+ elif removeResponses[ i ] == main.ERROR:
+ # Error in execution
+ removeResults = main.FALSE
+ else:
+ # unexpected result
+ removeResults = main.FALSE
+ if removeResults != main.TRUE:
+ main.log.error( "Error executing set remove" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ removeResults = removeResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=removeResults,
+ onpass="Set remove correct",
+ onfail="Set remove was incorrect" )
+
+ main.step( "Distributed Set removeAll()" )
+ onosSet.difference_update( addAllValue.split() )
+ removeAllResponses = []
+ threads = []
+ try:
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestRemove,
+ name="setTestRemoveAll-" + str( i ),
+ args=[ onosSetName, addAllValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ removeAllResponses.append( t.result )
+ except Exception, e:
+ main.log.exception(e)
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ removeAllResults = main.TRUE
+ for i in range( numControllers ):
+ if removeAllResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif removeAllResponses[ i ] == main.FALSE:
+ # not in set, probably fine
+ pass
+ elif removeAllResponses[ i ] == main.ERROR:
+ # Error in execution
+ removeAllResults = main.FALSE
+ else:
+ # unexpected result
+ removeAllResults = main.FALSE
+ if removeAllResults != main.TRUE:
+ main.log.error( "Error executing set removeAll" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ removeAllResults = removeAllResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=removeAllResults,
+ onpass="Set removeAll correct",
+ onfail="Set removeAll was incorrect" )
+
+ main.step( "Distributed Set addAll()" )
+ onosSet.update( addAllValue.split() )
+ addResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestAdd,
+ name="setTestAddAll-" + str( i ),
+ args=[ onosSetName, addAllValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ addResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ addAllResults = main.TRUE
+ for i in range( numControllers ):
+ if addResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif addResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif addResponses[ i ] == main.ERROR:
+ # Error in execution
+ addAllResults = main.FALSE
+ else:
+ # unexpected result
+ addAllResults = main.FALSE
+ if addAllResults != main.TRUE:
+ main.log.error( "Error executing set addAll" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ addAllResults = addAllResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=addAllResults,
+ onpass="Set addAll correct",
+ onfail="Set addAll was incorrect" )
+
+ main.step( "Distributed Set clear()" )
+ onosSet.clear()
+ clearResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestRemove,
+ name="setTestClear-" + str( i ),
+ args=[ onosSetName, " "], # Values doesn't matter
+ kwargs={ "clear": True } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ clearResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ clearResults = main.TRUE
+ for i in range( numControllers ):
+ if clearResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif clearResponses[ i ] == main.FALSE:
+ # Nothing set, probably fine
+ pass
+ elif clearResponses[ i ] == main.ERROR:
+ # Error in execution
+ clearResults = main.FALSE
+ else:
+ # unexpected result
+ clearResults = main.FALSE
+ if clearResults != main.TRUE:
+ main.log.error( "Error executing set clear" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ clearResults = clearResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=clearResults,
+ onpass="Set clear correct",
+ onfail="Set clear was incorrect" )
+
+ main.step( "Distributed Set addAll()" )
+ onosSet.update( addAllValue.split() )
+ addResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestAdd,
+ name="setTestAddAll-" + str( i ),
+ args=[ onosSetName, addAllValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ addResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ addAllResults = main.TRUE
+ for i in range( numControllers ):
+ if addResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif addResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif addResponses[ i ] == main.ERROR:
+ # Error in execution
+ addAllResults = main.FALSE
+ else:
+ # unexpected result
+ addAllResults = main.FALSE
+ if addAllResults != main.TRUE:
+ main.log.error( "Error executing set addAll" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ addAllResults = addAllResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=addAllResults,
+ onpass="Set addAll correct",
+ onfail="Set addAll was incorrect" )
+
+ main.step( "Distributed Set retain()" )
+ onosSet.intersection_update( retainValue.split() )
+ retainResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestRemove,
+ name="setTestRetain-" + str( i ),
+ args=[ onosSetName, retainValue ],
+ kwargs={ "retain": True } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ retainResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ retainResults = main.TRUE
+ for i in range( numControllers ):
+ if retainResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif retainResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif retainResponses[ i ] == main.ERROR:
+ # Error in execution
+ retainResults = main.FALSE
+ else:
+ # unexpected result
+ retainResults = main.FALSE
+ if retainResults != main.TRUE:
+ main.log.error( "Error executing set retain" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( numControllers ):
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in range( numControllers ):
+ t = main.Thread( target=CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( numControllers ):
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + str( i + 1 ) +
+ " expected a size of " +
+ str( size ) + " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ retainResults = retainResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=retainResults,
+ onpass="Set retain correct",
+ onfail="Set retain was incorrect" )
+