Refactor/clean HA tests
Cleanup leadership tests
Remove in-memory tests(no longer exist in onos)
Change-Id: Ib08fb33773232140f51fd144e9b0eaa6912ae0f8
diff --git a/TestON/tests/HAclusterRestart/HAclusterRestart.py b/TestON/tests/HAclusterRestart/HAclusterRestart.py
index f903f95..7b1acd4 100644
--- a/TestON/tests/HAclusterRestart/HAclusterRestart.py
+++ b/TestON/tests/HAclusterRestart/HAclusterRestart.py
@@ -49,13 +49,13 @@
"""
import imp
import time
+ import json
main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
"initialization" )
main.case( "Setting up test environment" )
main.caseExplanation = "Setup the test environment including " +\
"installing ONOS, starting Mininet and ONOS" +\
"cli sessions."
- # TODO: save all the timers and output them for plotting
# load some variables from the params file
PULLCODE = False
@@ -93,10 +93,8 @@
ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
try:
- fileName = "Counters"
- path = main.params[ 'imports' ][ 'path' ]
- main.Counters = imp.load_source( fileName,
- path + fileName + ".py" )
+ from tests.HAsanity.dependencies.Counters import Counters
+ main.Counters = Counters()
except Exception as e:
main.log.exception( e )
main.cleanup()
@@ -241,6 +239,9 @@
onpass="ONOS cli startup successful",
onfail="ONOS cli startup failed" )
+ # Create a list of active nodes for use when some nodes are stopped
+ main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
+
if main.params[ 'tcpdump' ].lower() == "true":
main.step( "Start Packet Capture MN" )
main.Mininet2.startTcpdump(
@@ -250,10 +251,9 @@
port=main.params[ 'MNtcpdump' ][ 'port' ] )
main.step( "App Ids check" )
- time.sleep(60)
appCheck = main.TRUE
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].appToIDCheck,
name="appToIDCheck-" + str( i ),
args=[] )
@@ -264,12 +264,55 @@
t.join()
appCheck = appCheck and t.result
if appCheck != main.TRUE:
- main.log.warn( main.CLIs[0].apps() )
- main.log.warn( main.CLIs[0].appIDs() )
+ node = main.activeNodes[0]
+ main.log.warn( main.CLIs[node].apps() )
+ main.log.warn( main.CLIs[node].appIDs() )
utilities.assert_equals( expect=main.TRUE, actual=appCheck,
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
+ main.step( "Checking ONOS nodes" )
+ nodesOutput = []
+ nodeResults = main.TRUE
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].nodes,
+ name="nodes-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ nodesOutput.append( t.result )
+ ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
+ ips.sort()
+ for i in nodesOutput:
+ try:
+ current = json.loads( i )
+ activeIps = []
+ currentResult = main.FALSE
+ for node in current:
+ if node['state'] == 'READY':
+ activeIps.append( node['ip'] )
+ activeIps.sort()
+ if ips == activeIps:
+ currentResult = main.TRUE
+ except ( ValueError, TypeError ):
+ main.log.error( "Error parsing nodes output" )
+ main.log.warn( repr( i ) )
+ currentResult = main.FALSE
+ nodeResults = nodeResults and currentResult
+ utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
+ onpass="Nodes check successful",
+ onfail="Nodes check NOT successful" )
+
+ if not nodeResults:
+ for cli in main.CLIs:
+ main.log.debug( "{} components not ACTIVE: \n{}".format(
+ cli.name,
+ cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
+
if cliResults == main.FALSE:
main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
@@ -357,6 +400,7 @@
ipList = [ ]
deviceList = []
+ onosCli = main.CLIs[ main.activeNodes[0] ]
try:
# Assign mastership to specific controllers. This assignment was
# determined for a 7 node cluser, but will work with any sized
@@ -366,45 +410,45 @@
if i == 1:
c = 0
ip = main.nodes[ c ].ip_address # ONOS1
- deviceId = main.ONOScli1.getDevice( "1000" ).get( 'id' )
+ deviceId = onosCli.getDevice( "1000" ).get( 'id' )
elif i == 2:
c = 1 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS2
- deviceId = main.ONOScli1.getDevice( "2000" ).get( 'id' )
+ deviceId = onosCli.getDevice( "2000" ).get( 'id' )
elif i == 3:
c = 1 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS2
- deviceId = main.ONOScli1.getDevice( "3000" ).get( 'id' )
+ deviceId = onosCli.getDevice( "3000" ).get( 'id' )
elif i == 4:
c = 3 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS4
- deviceId = main.ONOScli1.getDevice( "3004" ).get( 'id' )
+ deviceId = onosCli.getDevice( "3004" ).get( 'id' )
elif i == 5:
c = 2 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS3
- deviceId = main.ONOScli1.getDevice( "5000" ).get( 'id' )
+ deviceId = onosCli.getDevice( "5000" ).get( 'id' )
elif i == 6:
c = 2 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS3
- deviceId = main.ONOScli1.getDevice( "6000" ).get( 'id' )
+ deviceId = onosCli.getDevice( "6000" ).get( 'id' )
elif i == 7:
c = 5 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS6
- deviceId = main.ONOScli1.getDevice( "6007" ).get( 'id' )
+ deviceId = onosCli.getDevice( "6007" ).get( 'id' )
elif i >= 8 and i <= 17:
c = 4 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS5
dpid = '3' + str( i ).zfill( 3 )
- deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
+ deviceId = onosCli.getDevice( dpid ).get( 'id' )
elif i >= 18 and i <= 27:
c = 6 % main.numCtrls
ip = main.nodes[ c ].ip_address # ONOS7
dpid = '6' + str( i ).zfill( 3 )
- deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
+ deviceId = onosCli.getDevice( dpid ).get( 'id' )
elif i == 28:
c = 0
ip = main.nodes[ c ].ip_address # ONOS1
- deviceId = main.ONOScli1.getDevice( "2800" ).get( 'id' )
+ deviceId = onosCli.getDevice( "2800" ).get( 'id' )
else:
main.log.error( "You didn't write an else statement for " +
"switch s" + str( i ) )
@@ -412,13 +456,12 @@
# Assign switch
assert deviceId, "No device id for s" + str( i ) + " in ONOS"
# TODO: make this controller dynamic
- roleCall = roleCall and main.ONOScli1.deviceRole( deviceId,
- ip )
+ roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
ipList.append( ip )
deviceList.append( deviceId )
except ( AttributeError, AssertionError ):
main.log.exception( "Something is wrong with ONOS device view" )
- main.log.info( main.ONOScli1.devices() )
+ main.log.info( onosCli.devices() )
utilities.assert_equals(
expect=main.TRUE,
actual=roleCall,
@@ -434,7 +477,7 @@
ip = ipList[i]
deviceId = deviceList[i]
# Check assignment
- master = main.ONOScli1.getRole( deviceId ).get( 'master' )
+ master = onosCli.getRole( deviceId ).get( 'master' )
if ip in master:
roleCheck = roleCheck and main.TRUE
else:
@@ -482,7 +525,8 @@
# install onos-app-fwd
main.step( "Install reactive forwarding app" )
- installResults = main.CLIs[0].activateApp( "org.onosproject.fwd" )
+ onosCli = main.CLIs[ main.activeNodes[0] ]
+ installResults = onosCli.activateApp( "org.onosproject.fwd" )
utilities.assert_equals( expect=main.TRUE, actual=installResults,
onpass="Install fwd successful",
onfail="Install fwd failed" )
@@ -490,7 +534,7 @@
main.step( "Check app ids" )
appCheck = main.TRUE
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].appToIDCheck,
name="appToIDCheck-" + str( i ),
args=[] )
@@ -501,8 +545,8 @@
t.join()
appCheck = appCheck and t.result
if appCheck != main.TRUE:
- main.log.warn( main.CLIs[0].apps() )
- main.log.warn( main.CLIs[0].appIDs() )
+ main.log.warn( onosCli.apps() )
+ main.log.warn( onosCli.appIDs() )
utilities.assert_equals( expect=main.TRUE, actual=appCheck,
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
@@ -531,7 +575,8 @@
time.sleep( 11 )
# uninstall onos-app-fwd
main.step( "Uninstall reactive forwarding app" )
- uninstallResult = main.CLIs[0].deactivateApp( "org.onosproject.fwd" )
+ node = main.activeNodes[0]
+ uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
onpass="Uninstall fwd successful",
onfail="Uninstall fwd failed" )
@@ -539,7 +584,7 @@
main.step( "Check app ids" )
threads = []
appCheck2 = main.TRUE
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].appToIDCheck,
name="appToIDCheck-" + str( i ),
args=[] )
@@ -550,8 +595,9 @@
t.join()
appCheck2 = appCheck2 and t.result
if appCheck2 != main.TRUE:
- main.log.warn( main.CLIs[0].apps() )
- main.log.warn( main.CLIs[0].appIDs() )
+ node = main.activeNodes[0]
+ main.log.warn( main.CLIs[node].apps() )
+ main.log.warn( main.CLIs[node].appIDs() )
utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
@@ -570,16 +616,17 @@
host2 = "00:00:00:00:00:" + \
str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
# NOTE: getHost can return None
- host1Dict = main.ONOScli1.getHost( host1 )
- host2Dict = main.ONOScli1.getHost( host2 )
+ host1Dict = onosCli.getHost( host1 )
+ host2Dict = onosCli.getHost( host2 )
host1Id = None
host2Id = None
if host1Dict and host2Dict:
host1Id = host1Dict.get( 'id', None )
host2Id = host2Dict.get( 'id', None )
if host1Id and host2Id:
- nodeNum = ( i % main.numCtrls )
- tmpId = main.CLIs[ nodeNum ].addHostIntent( host1Id, host2Id )
+ nodeNum = ( i % len( main.activeNodes ) )
+ node = main.activeNodes[nodeNum]
+ tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
if tmpId:
main.log.info( "Added intent with id: " + tmpId )
intentIds.append( tmpId )
@@ -589,7 +636,8 @@
else:
main.log.error( "Error, getHost() failed for h" + str( i ) +
" and/or h" + str( i + 10 ) )
- hosts = main.CLIs[ 0 ].hosts()
+ node = main.activeNodes[0]
+ hosts = main.CLIs[node].hosts()
main.log.warn( "Hosts output: " )
try:
main.log.warn( json.dumps( json.loads( hosts ),
@@ -604,7 +652,7 @@
onfail="Error looking up host ids" )
intentStart = time.time()
- onosIds = main.ONOScli1.getAllIntentsId()
+ onosIds = onosCli.getAllIntentsId()
main.log.info( "Submitted intents: " + str( intentIds ) )
main.log.info( "Intents in ONOS: " + str( onosIds ) )
for intent in intentIds:
@@ -617,7 +665,7 @@
else:
intentStop = None
# Print the intent states
- intents = main.ONOScli1.intents()
+ intents = onosCli.intents()
intentStates = []
installedCheck = True
main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
@@ -643,7 +691,7 @@
count += 1
main.log.info( "%-6s%-15s%-15s" %
( str( count ), str( i ), str( s ) ) )
- leaders = main.ONOScli1.leaders()
+ leaders = onosCli.leaders()
try:
missing = False
if leaders:
@@ -670,12 +718,12 @@
main.log.error( repr( leaders ) )
# Check all nodes
if missing:
- for node in main.CLIs:
- response = node.leaders( jsonFormat=False)
- main.log.warn( str( node.name ) + " leaders output: \n" +
+ for i in main.activeNodes:
+ response = main.CLIs[i].leaders( jsonFormat=False)
+ main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
str( response ) )
- partitions = main.ONOScli1.partitions()
+ partitions = onosCli.partitions()
try:
if partitions :
parsedPartitions = json.loads( partitions )
@@ -690,7 +738,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing partitions" )
main.log.error( repr( partitions ) )
- pendingMap = main.ONOScli1.pendingMap()
+ pendingMap = onosCli.pendingMap()
try:
if pendingMap :
parsedPending = json.loads( pendingMap )
@@ -711,21 +759,21 @@
main.log.error( "Error in pushing host intents to ONOS" )
main.step( "Intent Anti-Entropy dispersion" )
- for i in range(100):
+ for j in range(100):
correct = True
main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
- for cli in main.CLIs:
+ for i in main.activeNodes:
onosIds = []
- ids = cli.getAllIntentsId()
+ ids = main.CLIs[i].getAllIntentsId()
onosIds.append( ids )
- main.log.debug( "Intents in " + cli.name + ": " +
+ main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
str( sorted( onosIds ) ) )
if sorted( ids ) != sorted( intentIds ):
main.log.warn( "Set of intent IDs doesn't match" )
correct = False
break
else:
- intents = json.loads( cli.intents() )
+ intents = json.loads( main.CLIs[i].intents() )
for intent in intents:
if intent[ 'state' ] != "INSTALLED":
main.log.warn( "Intent " + intent[ 'id' ] +
@@ -754,7 +802,7 @@
else:
count += 1
gossipPeriod = int( main.params['timers']['gossip'] )
- maxGossipTime = gossipPeriod * len( main.nodes )
+ maxGossipTime = gossipPeriod * len( main.activeNodes )
utilities.assert_greater_equals(
expect=maxGossipTime, actual=gossipTime,
onpass="ECM anti-entropy for intents worked within " +
@@ -770,11 +818,11 @@
installedCheck = True
main.log.info( "Sleeping 60 seconds to see if intents are found" )
time.sleep( 60 )
- onosIds = main.ONOScli1.getAllIntentsId()
+ onosIds = onosCli.getAllIntentsId()
main.log.info( "Submitted intents: " + str( intentIds ) )
main.log.info( "Intents in ONOS: " + str( onosIds ) )
# Print the intent states
- intents = main.ONOScli1.intents()
+ intents = onosCli.intents()
intentStates = []
main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
count = 0
@@ -798,7 +846,7 @@
count += 1
main.log.info( "%-6s%-15s%-15s" %
( str( count ), str( i ), str( s ) ) )
- leaders = main.ONOScli1.leaders()
+ leaders = onosCli.leaders()
try:
missing = False
if leaders:
@@ -828,12 +876,13 @@
main.log.error( repr( leaders ) )
# Check all nodes
if missing:
- for node in main.CLIs:
+ for i in main.activeNodes:
+ node = main.CLIs[i]
response = node.leaders( jsonFormat=False)
main.log.warn( str( node.name ) + " leaders output: \n" +
str( response ) )
- partitions = main.ONOScli1.partitions()
+ partitions = onosCli.partitions()
try:
if partitions :
parsedPartitions = json.loads( partitions )
@@ -848,7 +897,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing partitions" )
main.log.error( repr( partitions ) )
- pendingMap = main.ONOScli1.pendingMap()
+ pendingMap = onosCli.pendingMap()
try:
if pendingMap :
parsedPending = json.loads( pendingMap )
@@ -879,6 +928,7 @@
"functionality and check the state of " +\
"the intent"
main.step( "Ping across added host intents" )
+ onosCli = main.CLIs[ main.activeNodes[0] ]
PingResult = main.TRUE
for i in range( 8, 18 ):
ping = main.Mininet1.pingHost( src="h" + str( i ),
@@ -896,7 +946,7 @@
# TODO: pretty print
main.log.warn( "ONOS1 intents: " )
try:
- tmpIntents = main.ONOScli1.intents()
+ tmpIntents = onosCli.intents()
main.log.warn( json.dumps( json.loads( tmpIntents ),
sort_keys=True,
indent=4,
@@ -915,7 +965,7 @@
while not installedCheck and loopCount < 40:
installedCheck = True
# Print the intent states
- intents = main.ONOScli1.intents()
+ intents = onosCli.intents()
intentStates = []
main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
count = 0
@@ -944,7 +994,7 @@
"INSTALLED state" )
main.step( "Check leadership of topics" )
- leaders = main.ONOScli1.leaders()
+ leaders = onosCli.leaders()
topicCheck = main.TRUE
try:
if leaders:
@@ -979,7 +1029,8 @@
# TODO: Check for a leader of these topics
# Check all nodes
if topicCheck:
- for node in main.CLIs:
+ for i in main.activeNodes:
+ node = main.CLIs[i]
response = node.leaders( jsonFormat=False)
main.log.warn( str( node.name ) + " leaders output: \n" +
str( response ) )
@@ -988,7 +1039,7 @@
onpass="intent Partitions is in leaders",
onfail="Some topics were lost " )
# Print partitions
- partitions = main.ONOScli1.partitions()
+ partitions = onosCli.partitions()
try:
if partitions :
parsedPartitions = json.loads( partitions )
@@ -1004,7 +1055,7 @@
main.log.exception( "Error parsing partitions" )
main.log.error( repr( partitions ) )
# Print Pending Map
- pendingMap = main.ONOScli1.pendingMap()
+ pendingMap = onosCli.pendingMap()
try:
if pendingMap :
parsedPending = json.loads( pendingMap )
@@ -1024,7 +1075,7 @@
"intents change" )
time.sleep( 60 )
# Print the intent states
- intents = main.ONOScli1.intents()
+ intents = onosCli.intents()
intentStates = []
main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
count = 0
@@ -1043,7 +1094,7 @@
count += 1
main.log.info( "%-6s%-15s%-15s" %
( str( count ), str( i ), str( s ) ) )
- leaders = main.ONOScli1.leaders()
+ leaders = onosCli.leaders()
try:
missing = False
if leaders:
@@ -1072,12 +1123,13 @@
main.log.exception( "Error parsing leaders" )
main.log.error( repr( leaders ) )
if missing:
- for node in main.CLIs:
+ for i in main.activeNodes:
+ node = main.CLIs[i]
response = node.leaders( jsonFormat=False)
main.log.warn( str( node.name ) + " leaders output: \n" +
str( response ) )
- partitions = main.ONOScli1.partitions()
+ partitions = onosCli.partitions()
try:
if partitions :
parsedPartitions = json.loads( partitions )
@@ -1092,7 +1144,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing partitions" )
main.log.error( repr( partitions ) )
- pendingMap = main.ONOScli1.pendingMap()
+ pendingMap = onosCli.pendingMap()
try:
if pendingMap :
parsedPending = json.loads( pendingMap )
@@ -1107,7 +1159,8 @@
main.log.exception( "Error parsing pending map" )
main.log.error( repr( pendingMap ) )
# Print flowrules
- main.log.debug( main.CLIs[0].flows( jsonFormat=False ) )
+ node = main.activeNodes[0]
+ main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
main.step( "Wait a minute then ping again" )
# the wait is above
PingResult = main.TRUE
@@ -1127,7 +1180,7 @@
# TODO: pretty print
main.log.warn( "ONOS1 intents: " )
try:
- tmpIntents = main.ONOScli1.intents()
+ tmpIntents = onosCli.intents()
main.log.warn( json.dumps( json.loads( tmpIntents ),
sort_keys=True,
indent=4,
@@ -1164,7 +1217,7 @@
# Assert that each device has a master
rolesNotNull = main.TRUE
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].rolesNotNull,
name="rolesNotNull-" + str( i ),
args=[] )
@@ -1186,7 +1239,7 @@
consistentMastership = True
rolesResults = True
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].roles,
name="roles-" + str( i ),
args=[] )
@@ -1197,13 +1250,12 @@
t.join()
ONOSMastership.append( t.result )
- for i in range( main.numCtrls ):
+ for i in range( len( ONOSMastership ) ):
+ node = str( main.activeNodes[i] + 1 )
if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
- main.log.error( "Error in getting ONOS" + str( i + 1 ) +
- " roles" )
- main.log.warn(
- "ONOS" + str( i + 1 ) + " mastership response: " +
- repr( ONOSMastership[i] ) )
+ main.log.error( "Error in getting ONOS" + node + " roles" )
+ main.log.warn( "ONOS" + node + " mastership response: " +
+ repr( ONOSMastership[i] ) )
rolesResults = False
utilities.assert_equals(
expect=True,
@@ -1224,10 +1276,11 @@
onfail="ONOS nodes have different views of switch roles" )
if rolesResults and not consistentMastership:
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
try:
main.log.warn(
- "ONOS" + str( i + 1 ) + " roles: ",
+ "ONOS" + node + " roles: ",
json.dumps(
json.loads( ONOSMastership[ i ] ),
sort_keys=True,
@@ -1247,7 +1300,7 @@
consistentIntents = True
intentsResults = True
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].intents,
name="intents-" + str( i ),
args=[],
@@ -1259,8 +1312,8 @@
t.join()
ONOSIntents.append( t.result )
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( ONOSIntents ) ):
+ node = str( main.activeNodes[i] + 1 )
if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
main.log.error( "Error in getting ONOS" + node + " intents" )
main.log.warn( "ONOS" + node + " intents response: " +
@@ -1293,44 +1346,51 @@
# ... ... ...
# ... ... ...
title = " Id"
- for n in range( main.numCtrls ):
+ for n in main.activeNodes:
title += " " * 10 + "ONOS" + str( n + 1 )
main.log.warn( title )
# get all intent keys in the cluster
keys = []
- for nodeStr in ONOSIntents:
- node = json.loads( nodeStr )
- for intent in node:
- keys.append( intent.get( 'id' ) )
- keys = set( keys )
- for key in keys:
- row = "%-13s" % key
+ try:
+ # Get the set of all intent keys
for nodeStr in ONOSIntents:
node = json.loads( nodeStr )
for intent in node:
- if intent.get( 'id', "Error" ) == key:
- row += "%-15s" % intent.get( 'state' )
- main.log.warn( row )
- # End table view
+ keys.append( intent.get( 'id' ) )
+ keys = set( keys )
+ # For each intent key, print the state on each node
+ for key in keys:
+ row = "%-13s" % key
+ for nodeStr in ONOSIntents:
+ node = json.loads( nodeStr )
+ for intent in node:
+ if intent.get( 'id', "Error" ) == key:
+ row += "%-15s" % intent.get( 'state' )
+ main.log.warn( row )
+ # End of intent state table
+ except ValueError as e:
+ main.log.exception( e )
+ main.log.debug( "nodeStr was: " + repr( nodeStr ) )
if intentsResults and not consistentIntents:
# print the json objects
- n = len(ONOSIntents)
- main.log.debug( "ONOS" + str( n ) + " intents: " )
+ n = str( main.activeNodes[-1] + 1 )
+ main.log.debug( "ONOS" + n + " intents: " )
main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
sort_keys=True,
indent=4,
separators=( ',', ': ' ) ) )
- for i in range( main.numCtrls ):
+ for i in range( len( ONOSIntents ) ):
+ node = str( main.activeNodes[i] + 1 )
if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
- main.log.debug( "ONOS" + str( i + 1 ) + " intents: " )
+ main.log.debug( "ONOS" + node + " intents: " )
main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
sort_keys=True,
indent=4,
separators=( ',', ': ' ) ) )
else:
- main.log.debug( main.nodes[ i ].name + " intents match ONOS" +
- str( n ) + " intents" )
+ main.log.debug( "ONOS" + node + " intents match ONOS" +
+ n + " intents" )
elif intentsResults and consistentIntents:
intentCheck = main.TRUE
intentState = ONOSIntents[ 0 ]
@@ -1344,7 +1404,7 @@
consistentFlows = True
flowsResults = True
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].flows,
name="flows-" + str( i ),
args=[],
@@ -1359,8 +1419,8 @@
result = t.result
ONOSFlows.append( result )
- for i in range( main.numCtrls ):
- num = str( i + 1 )
+ for i in range( len( ONOSFlows ) ):
+ num = str( main.activeNodes[i] + 1 )
if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
main.log.error( "Error in getting ONOS" + num + " flows" )
main.log.warn( "ONOS" + num + " flows response: " +
@@ -1396,16 +1456,16 @@
onfail="ONOS nodes have different flow counts" )
if flowsResults and not consistentFlows:
- for i in range( main.numCtrls ):
+ for i in range( len( ONOSFlows ) ):
+ node = str( main.activeNodes[i] + 1 )
try:
main.log.warn(
- "ONOS" + str( i + 1 ) + " flows: " +
+ "ONOS" + node + " flows: " +
json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
indent=4, separators=( ',', ': ' ) ) )
except ( ValueError, TypeError ):
- main.log.warn(
- "ONOS" + str( i + 1 ) + " flows: " +
- repr( ONOSFlows[ i ] ) )
+ main.log.warn( "ONOS" + node + " flows: " +
+ repr( ONOSFlows[ i ] ) )
elif flowsResults and consistentFlows:
flowCheck = main.TRUE
flowState = ONOSFlows[ 0 ]
@@ -1414,7 +1474,7 @@
global flows
flows = []
for i in range( 1, 29 ):
- flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3" ) )
+ flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
if flowCheck == main.FALSE:
for table in flows:
main.log.warn( table )
@@ -1465,7 +1525,7 @@
main.step( "Collecting topology information from ONOS" )
devices = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].devices,
name="devices-" + str( i ),
args=[ ] )
@@ -1477,7 +1537,7 @@
devices.append( t.result )
hosts = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].hosts,
name="hosts-" + str( i ),
args=[ ] )
@@ -1497,7 +1557,7 @@
ports = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].ports,
name="ports-" + str( i ),
args=[ ] )
@@ -1509,7 +1569,7 @@
ports.append( t.result )
links = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].links,
name="links-" + str( i ),
args=[ ] )
@@ -1521,7 +1581,7 @@
links.append( t.result )
clusters = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].clusters,
name="clusters-" + str( i ),
args=[ ] )
@@ -1537,7 +1597,7 @@
main.step( "Host view is consistent across ONOS nodes" )
consistentHostsResult = main.TRUE
for controller in range( len( hosts ) ):
- controllerStr = str( controller + 1 )
+ controllerStr = str( main.activeNodes[controller] + 1 )
if hosts[ controller ] and "Error" not in hosts[ controller ]:
if hosts[ controller ] == hosts[ 0 ]:
continue
@@ -1564,7 +1624,7 @@
main.step( "Each host has an IP address" )
ipResult = main.TRUE
for controller in range( 0, len( hosts ) ):
- controllerStr = str( controller + 1 )
+ controllerStr = str( main.activeNodes[controller] + 1 )
if hosts[ controller ]:
for host in hosts[ controller ]:
if not host.get( 'ipAddresses', [ ] ):
@@ -1581,7 +1641,7 @@
main.step( "Cluster view is consistent across ONOS nodes" )
consistentClustersResult = main.TRUE
for controller in range( len( clusters ) ):
- controllerStr = str( controller + 1 )
+ controllerStr = str( main.activeNodes[controller] + 1 )
if "Error" not in clusters[ controller ]:
if clusters[ controller ] == clusters[ 0 ]:
continue
@@ -1626,8 +1686,8 @@
mnSwitches = main.Mininet1.getSwitches()
mnLinks = main.Mininet1.getLinks()
mnHosts = main.Mininet1.getHosts()
- for controller in range( main.numCtrls ):
- controllerStr = str( controller + 1 )
+ for controller in main.activeNodes:
+ controllerStr = str( main.activeNodes[controller] + 1 )
if devices[ controller ] and ports[ controller ] and\
"Error" not in devices[ controller ] and\
"Error" not in ports[ controller ]:
@@ -1792,11 +1852,10 @@
labels.append( "Restart" )
data.append( str( main.restartTime ) )
- # FIXME: revisit test plan for election with madan
# Rerun for election on restarted nodes
runResults = main.TRUE
for cli in main.CLIs:
- run = main.CLIs[0].electionTestRun()
+ run = cli.electionTestRun()
if run != main.TRUE:
main.log.error( "Error running for election on " + cli.name )
runResults = runResults and run
@@ -1806,9 +1865,10 @@
# TODO: Make this configurable
time.sleep( 60 )
- main.log.debug( main.CLIs[0].nodes( jsonFormat=False ) )
- main.log.debug( main.CLIs[0].leaders( jsonFormat=False ) )
- main.log.debug( main.CLIs[0].partitions( jsonFormat=False ) )
+ node = main.activeNodes[0]
+ main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
+ main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
+ main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
def CASE7( self, main ):
"""
@@ -1826,7 +1886,7 @@
# Assert that each device has a master
rolesNotNull = main.TRUE
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].rolesNotNull,
name="rolesNotNull-" + str( i ),
args=[ ] )
@@ -1848,7 +1908,7 @@
consistentMastership = True
rolesResults = True
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].roles,
name="roles-" + str( i ),
args=[] )
@@ -1859,13 +1919,12 @@
t.join()
ONOSMastership.append( t.result )
- for i in range( main.numCtrls ):
+ for i in range( len( ONOSMastership ) ):
+ node = str( main.activeNodes[i] + 1 )
if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
- main.log.error( "Error in getting ONOS" + str( i + 1 ) +
- " roles" )
- main.log.warn(
- "ONOS" + str( i + 1 ) + " mastership response: " +
- repr( ONOSMastership[i] ) )
+ main.log.error( "Error in getting ONOS" + node + " roles" )
+ main.log.warn( "ONOS" + node + " mastership response: " +
+ repr( ONOSMastership[i] ) )
rolesResults = False
utilities.assert_equals(
expect=True,
@@ -1886,8 +1945,9 @@
onfail="ONOS nodes have different views of switch roles" )
if rolesResults and not consistentMastership:
- for i in range( main.numCtrls ):
- main.log.warn( "ONOS" + str( i + 1 ) + " roles: ",
+ for i in range( len( ONOSMastership ) ):
+ node = str( main.activeNodes[i] + 1 )
+ main.log.warn( "ONOS" + node + " roles: ",
json.dumps( json.loads( ONOSMastership[ i ] ),
sort_keys=True,
indent=4,
@@ -1903,7 +1963,7 @@
consistentIntents = True
intentsResults = True
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].intents,
name="intents-" + str( i ),
args=[],
@@ -1915,11 +1975,11 @@
t.join()
ONOSIntents.append( t.result )
- for i in range( main.numCtrls ):
+ for i in range( len( ONOSIntents) ):
+ node = str( main.activeNodes[i] + 1 )
if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
- main.log.error( "Error in getting ONOS" + str( i + 1 ) +
- " intents" )
- main.log.warn( "ONOS" + str( i + 1 ) + " intents response: " +
+ main.log.error( "Error in getting ONOS" + node + " intents" )
+ main.log.warn( "ONOS" + node + " intents response: " +
repr( ONOSIntents[ i ] ) )
intentsResults = False
utilities.assert_equals(
@@ -1942,7 +2002,7 @@
# ... ... ...
# ... ... ...
title = " ID"
- for n in range( main.numCtrls ):
+ for n in main.activeNodes:
title += " " * 10 + "ONOS" + str( n + 1 )
main.log.warn( title )
# get all intent keys in the cluster
@@ -1982,8 +2042,9 @@
main.log.info( dict( out ) )
if intentsResults and not consistentIntents:
- for i in range( main.numCtrls ):
- main.log.warn( "ONOS" + str( i + 1 ) + " intents: " )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ main.log.warn( "ONOS" + node + " intents: " )
main.log.warn( json.dumps(
json.loads( ONOSIntents[ i ] ),
sort_keys=True,
@@ -1999,45 +2060,50 @@
# NOTE: this requires case 5 to pass for intentState to be set.
# maybe we should stop the test if that fails?
sameIntents = main.FALSE
- if intentState and intentState == ONOSIntents[ 0 ]:
- sameIntents = main.TRUE
- main.log.info( "Intents are consistent with before failure" )
- # TODO: possibly the states have changed? we may need to figure out
- # what the acceptable states are
- elif len( intentState ) == len( ONOSIntents[ 0 ] ):
- sameIntents = main.TRUE
- try:
- before = json.loads( intentState )
- after = json.loads( ONOSIntents[ 0 ] )
- for intent in before:
- if intent not in after:
- sameIntents = main.FALSE
- main.log.debug( "Intent is not currently in ONOS " +
- "(at least in the same form):" )
- main.log.debug( json.dumps( intent ) )
- except ( ValueError, TypeError ):
- main.log.exception( "Exception printing intents" )
- main.log.debug( repr( ONOSIntents[0] ) )
- main.log.debug( repr( intentState ) )
- if sameIntents == main.FALSE:
- try:
- main.log.debug( "ONOS intents before: " )
- main.log.debug( json.dumps( json.loads( intentState ),
- sort_keys=True, indent=4,
- separators=( ',', ': ' ) ) )
- main.log.debug( "Current ONOS intents: " )
- main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
- sort_keys=True, indent=4,
- separators=( ',', ': ' ) ) )
- except ( ValueError, TypeError ):
- main.log.exception( "Exception printing intents" )
- main.log.debug( repr( ONOSIntents[0] ) )
- main.log.debug( repr( intentState ) )
- utilities.assert_equals(
- expect=main.TRUE,
- actual=sameIntents,
- onpass="Intents are consistent with before failure",
- onfail="The Intents changed during failure" )
+ try:
+ intentState
+ except NameError:
+ main.log.warn( "No previous intent state was saved" )
+ else:
+ if intentState and intentState == ONOSIntents[ 0 ]:
+ sameIntents = main.TRUE
+ main.log.info( "Intents are consistent with before failure" )
+ # TODO: possibly the states have changed? we may need to figure out
+ # what the acceptable states are
+ elif len( intentState ) == len( ONOSIntents[ 0 ] ):
+ sameIntents = main.TRUE
+ try:
+ before = json.loads( intentState )
+ after = json.loads( ONOSIntents[ 0 ] )
+ for intent in before:
+ if intent not in after:
+ sameIntents = main.FALSE
+ main.log.debug( "Intent is not currently in ONOS " +
+ "(at least in the same form):" )
+ main.log.debug( json.dumps( intent ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Exception printing intents" )
+ main.log.debug( repr( ONOSIntents[0] ) )
+ main.log.debug( repr( intentState ) )
+ if sameIntents == main.FALSE:
+ try:
+ main.log.debug( "ONOS intents before: " )
+ main.log.debug( json.dumps( json.loads( intentState ),
+ sort_keys=True, indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.debug( "Current ONOS intents: " )
+ main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
+ sort_keys=True, indent=4,
+ separators=( ',', ': ' ) ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Exception printing intents" )
+ main.log.debug( repr( ONOSIntents[0] ) )
+ main.log.debug( repr( intentState ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=sameIntents,
+ onpass="Intents are consistent with before failure",
+ onfail="The Intents changed during failure" )
intentCheck = intentCheck and sameIntents
"""
main.step( "Get the OF Table entries and compare to before " +
@@ -2092,7 +2158,9 @@
# Test of LeadershipElection
leaderList = []
leaderResult = main.TRUE
- for cli in main.CLIs:
+
+ for i in main.activeNodes:
+ cli = main.CLIs[i]
leaderN = cli.electionTestLeader()
leaderList.append( leaderN )
if leaderN == main.FALSE:
@@ -2147,7 +2215,7 @@
cliStart = time.time()
devices = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=utilities.retry,
name="devices-" + str( i ),
args=[ main.CLIs[i].devices, [ None ] ],
@@ -2162,7 +2230,7 @@
hosts = []
ipResult = main.TRUE
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=utilities.retry,
name="hosts-" + str( i ),
args=[ main.CLIs[i].hosts, [ None ] ],
@@ -2180,7 +2248,7 @@
main.log.error( repr( t.result ) )
hosts.append( None )
for controller in range( 0, len( hosts ) ):
- controllerStr = str( controller + 1 )
+ controllerStr = str( main.activeNodes[controller] + 1 )
if hosts[ controller ]:
for host in hosts[ controller ]:
if host is None or host.get( 'ipAddresses', [] ) == []:
@@ -2190,7 +2258,7 @@
ipResult = main.FALSE
ports = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=utilities.retry,
name="ports-" + str( i ),
args=[ main.CLIs[i].ports, [ None ] ],
@@ -2204,7 +2272,7 @@
ports.append( t.result )
links = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=utilities.retry,
name="links-" + str( i ),
args=[ main.CLIs[i].links, [ None ] ],
@@ -2218,7 +2286,7 @@
links.append( t.result )
clusters = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=utilities.retry,
name="clusters-" + str( i ),
args=[ main.CLIs[i].clusters, [ None ] ],
@@ -2248,8 +2316,8 @@
mnSwitches = main.Mininet1.getSwitches()
mnLinks = main.Mininet1.getLinks()
mnHosts = main.Mininet1.getHosts()
- for controller in range( main.numCtrls ):
- controllerStr = str( controller + 1 )
+ for controller in range( len( main.activeNodes ) ):
+ controllerStr = str( main.activeNodes[controller] + 1 )
if devices[ controller ] and ports[ controller ] and\
"Error" not in devices[ controller ] and\
"Error" not in ports[ controller ]:
@@ -2299,7 +2367,7 @@
" hosts don't match Mininet" )
# CHECKING HOST ATTACHMENT POINTS
hostAttachment = True
- noHosts = False
+ zeroHosts = False
# FIXME: topo-HA/obelisk specific mappings:
# key is mac and value is dpid
mappings = {}
@@ -2332,7 +2400,7 @@
if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
if hosts[ controller ] == []:
main.log.warn( "There are no hosts discovered" )
- noHosts = True
+ zeroHosts = True
else:
for host in hosts[ controller ]:
mac = None
@@ -2376,7 +2444,7 @@
main.log.error( "No hosts json output or \"Error\"" +
" in output. hosts = " +
repr( hosts[ controller ] ) )
- if noHosts is False:
+ if zeroHosts is False:
# TODO: Find a way to know if there should be hosts in a
# given point of the test
hostAttachment = True
@@ -2402,7 +2470,7 @@
main.step( "Hosts view is consistent across all ONOS nodes" )
consistentHostsResult = main.TRUE
for controller in range( len( hosts ) ):
- controllerStr = str( controller + 1 )
+ controllerStr = str( main.activeNodes[controller] + 1 )
if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
if hosts[ controller ] == hosts[ 0 ]:
continue
@@ -2444,7 +2512,7 @@
main.step( "Clusters view is consistent across all ONOS nodes" )
consistentClustersResult = main.TRUE
for controller in range( len( clusters ) ):
- controllerStr = str( controller + 1 )
+ controllerStr = str( main.activeNodes[controller] + 1 )
if "Error" not in clusters[ controller ]:
if clusters[ controller ] == clusters[ 0 ]:
continue
@@ -2473,6 +2541,7 @@
except ( ValueError, TypeError ):
main.log.exception( "Error parsing clusters[0]: " +
repr( clusters[0] ) )
+ numClusters = "ERROR"
clusterResults = main.FALSE
if numClusters == 1:
clusterResults = main.TRUE
@@ -2510,12 +2579,19 @@
onpass="Link are correct",
onfail="Links are incorrect" )
+ main.step( "Hosts are correct" )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=hostsResults,
+ onpass="Hosts are correct",
+ onfail="Hosts are incorrect" )
+
# FIXME: move this to an ONOS state case
main.step( "Checking ONOS nodes" )
nodesOutput = []
nodeResults = main.TRUE
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].nodes,
name="nodes-" + str( i ),
args=[ ] )
@@ -2525,7 +2601,7 @@
for t in threads:
t.join()
nodesOutput.append( t.result )
- ips = [ node.ip_address for node in main.nodes ]
+ ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
ips.sort()
for i in nodesOutput:
try:
@@ -2546,6 +2622,11 @@
utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
onpass="Nodes check successful",
onfail="Nodes check NOT successful" )
+ if not nodeResults:
+ for cli in main.CLIs:
+ main.log.debug( "{} components not ACTIVE: \n{}".format(
+ cli.name,
+ cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
def CASE9( self, main ):
"""
@@ -2618,6 +2699,7 @@
switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
description = "Killing a switch to ensure it is discovered correctly"
+ onosCli = main.CLIs[ main.activeNodes[0] ]
main.case( description )
switch = main.params[ 'kill' ][ 'switch' ]
switchDPID = main.params[ 'kill' ][ 'dpid' ]
@@ -2629,7 +2711,7 @@
main.log.info( "Waiting " + str( switchSleep ) +
" seconds for switch down to be discovered" )
time.sleep( switchSleep )
- device = main.ONOScli1.getDevice( dpid=switchDPID )
+ device = onosCli.getDevice( dpid=switchDPID )
# Peek at the deleted switch
main.log.warn( str( device ) )
result = main.FALSE
@@ -2662,6 +2744,7 @@
switch = main.params[ 'kill' ][ 'switch' ]
switchDPID = main.params[ 'kill' ][ 'dpid' ]
links = main.params[ 'kill' ][ 'links' ].split()
+ onosCli = main.CLIs[ main.activeNodes[0] ]
description = "Adding a switch to ensure it is discovered correctly"
main.case( description )
@@ -2669,14 +2752,12 @@
main.Mininet1.addSwitch( switch, dpid=switchDPID )
for peer in links:
main.Mininet1.addLink( switch, peer )
- ipList = []
- for i in range( main.numCtrls ):
- ipList.append( main.nodes[ i ].ip_address )
+ ipList = [ node.ip_address for node in main.nodes ]
main.Mininet1.assignSwController( sw=switch, ip=ipList )
main.log.info( "Waiting " + str( switchSleep ) +
" seconds for switch up to be discovered" )
time.sleep( switchSleep )
- device = main.ONOScli1.getDevice( dpid=switchDPID )
+ device = onosCli.getDevice( dpid=switchDPID )
# Peek at the deleted switch
main.log.warn( str( device ) )
result = main.FALSE
@@ -2769,7 +2850,8 @@
main.case("Start Leadership Election app")
main.step( "Install leadership election app" )
- appResult = main.ONOScli1.activateApp( "org.onosproject.election" )
+ onosCli = main.CLIs[ main.activeNodes[0] ]
+ appResult = onosCli.activateApp( "org.onosproject.election" )
utilities.assert_equals(
expect=main.TRUE,
actual=appResult,
@@ -2779,9 +2861,10 @@
main.step( "Run for election on each node" )
leaderResult = main.TRUE
leaders = []
- for cli in main.CLIs:
- cli.electionTestRun()
- for cli in main.CLIs:
+ for i in main.activeNodes:
+ main.CLIs[i].electionTestRun()
+ for i in main.activeNodes:
+ cli = main.CLIs[i]
leader = cli.electionTestLeader()
if leader is None or leader == main.FALSE:
main.log.error( cli.name + ": Leader for the election app " +
@@ -2831,16 +2914,10 @@
description = "Check that Leadership Election is still functional"
main.case( description )
- # NOTE: Need to re-run since being a canidate is not persistant
- # TODO: add check for "Command not found:" in the driver, this
- # means the election test app isn't loaded
+ # NOTE: Need to re-run after restarts since being a canidate is not persistant
- oldLeaders = [] # leaders by node before withdrawl from candidates
- newLeaders = [] # leaders by node after withdrawl from candidates
- oldAllCandidates = [] # list of lists of each nodes' candidates before
- newAllCandidates = [] # list of lists of each nodes' candidates after
- oldCandidates = [] # list of candidates from node 0 before withdrawl
- newCandidates = [] # list of candidates from node 0 after withdrawl
+ oldLeaders = [] # list of lists of each nodes' candidates before
+ newLeaders = [] # list of lists of each nodes' candidates after
oldLeader = '' # the old leader from oldLeaders, None if not same
newLeader = '' # the new leaders fron newLoeaders, None if not same
oldLeaderCLI = None # the CLI of the old leader used for re-electing
@@ -2851,10 +2928,9 @@
main.step( "Run for election on each node" )
electionResult = main.TRUE
- for cli in main.CLIs: # run test election on each node
- if cli.electionTestRun() == main.FALSE:
+ for i in main.activeNodes: # run test election on each node
+ if main.CLIs[i].electionTestRun() == main.FALSE:
electionResult = main.FALSE
-
utilities.assert_equals(
expect=main.TRUE,
actual=electionResult,
@@ -2867,42 +2943,36 @@
main.skipCase()
main.step( "Check that each node shows the same leader and candidates" )
- sameResult = main.TRUE
- failMessage = "Nodes have different leaders"
- for cli in main.CLIs:
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- oldAllCandidates.append( node )
- if node:
- oldLeaders.append( node[ 0 ] )
- else:
- oldLeaders.append( None )
- oldCandidates = oldAllCandidates[ 0 ]
- if oldCandidates is None:
- oldCandidates = [ None ]
-
- # Check that each node has the same leader. Defines oldLeader
- if len( set( oldLeaders ) ) != 1:
- sameResult = main.FALSE
- main.log.error( "More than one leader present:" + str( oldLeaders ) )
- oldLeader = None
+ failMessage = "Nodes have different leaderboards"
+ def consistentLeaderboards( nodes ):
+ TOPIC = 'org.onosproject.election'
+ # FIXME: use threads
+ #FIXME: should we retry outside the function?
+ for n in range( 5 ): # Retry in case election is still happening
+ leaderList = []
+ # Get all leaderboards
+ for cli in nodes:
+ leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
+ # Compare leaderboards
+ result = all( i == leaderList[0] for i in leaderList ) and\
+ leaderList is not None
+ main.log.debug( leaderList )
+ main.log.warn( result )
+ if result:
+ return ( result, leaderList )
+ time.sleep(5) #TODO: paramerterize
+ main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
+ activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+ sameResult, oldLeaders = consistentLeaderboards( activeCLIs )
+ if sameResult:
+ oldLeader = oldLeaders[ 0 ][ 0 ]
+ main.log.warn( oldLeader )
else:
- oldLeader = oldLeaders[ 0 ]
-
- # Check that each node's candidate list is the same
- candidateDiscrepancy = False # Boolean of candidate mismatches
- for candidates in oldAllCandidates:
- if candidates is None:
- main.log.warn( "Error getting candidates" )
- candidates = [ None ]
- if set( candidates ) != set( oldCandidates ):
- sameResult = main.FALSE
- candidateDiscrepancy = True
- if candidateDiscrepancy:
- failMessage += " and candidates"
+ oldLeader = None
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=sameResult,
- onpass="Leadership is consistent for the election topic",
+ onpass="Leaderboards are consistent for the election topic",
onfail=failMessage )
main.step( "Find current leader and withdraw" )
@@ -2912,7 +2982,7 @@
main.log.error( "Leadership isn't consistent." )
withdrawResult = main.FALSE
# Get the CLI of the oldLeader
- for i in range( len( main.CLIs ) ):
+ for i in main.activeNodes:
if oldLeader == main.nodes[ i ].ip_address:
oldLeaderCLI = main.CLIs[ i ]
break
@@ -2927,55 +2997,30 @@
onfail="Node was not withdrawn from election" )
main.step( "Check that a new node was elected leader" )
- # FIXME: use threads
- newLeaderResult = main.TRUE
failMessage = "Nodes have different leaders"
-
# Get new leaders and candidates
- for cli in main.CLIs:
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- # elections might no have finished yet
- if node[ 0 ] == 'none' and not expectNoLeader:
- main.log.info( "Node has no leader, waiting 5 seconds to be " +
- "sure elections are complete." )
- time.sleep(5)
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- # election still isn't done or there is a problem
- if node[ 0 ] == 'none':
- main.log.error( "No leader was elected on at least 1 node" )
- newLeaderResult = main.FALSE
- newAllCandidates.append( node )
- newLeaders.append( node[ 0 ] )
- newCandidates = newAllCandidates[ 0 ]
-
- # Check that each node has the same leader. Defines newLeader
- if len( set( newLeaders ) ) != 1:
- newLeaderResult = main.FALSE
- main.log.error( "Nodes have different leaders: " +
- str( newLeaders ) )
- newLeader = None
+ newLeaderResult, newLeaders = consistentLeaderboards( activeCLIs )
+ if newLeaders[ 0 ][ 0 ] == 'none':
+ main.log.error( "No leader was elected on at least 1 node" )
+ if not expectNoLeader:
+ newLeaderResult = False
+ if newLeaderResult:
+ newLeader = newLeaders[ 0 ][ 0 ]
else:
- newLeader = newLeaders[ 0 ]
-
- # Check that each node's candidate list is the same
- for candidates in newAllCandidates:
- if set( candidates ) != set( newCandidates ):
- newLeaderResult = main.FALSE
- main.log.error( "Discrepancy in candidate lists detected" )
+ newLeader = None
# Check that the new leader is not the older leader, which was withdrawn
if newLeader == oldLeader:
- newLeaderResult = main.FALSE
+ newLeaderResult = False
main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
" as the current leader" )
-
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=newLeaderResult,
onpass="Leadership election passed",
onfail="Something went wrong with Leadership election" )
- main.step( "Check that that new leader was the candidate of old leader")
+ main.step( "Check that that new leader was the candidate of old leader" )
# candidates[ 2 ] should become the top candidate after withdrawl
correctCandidateResult = main.TRUE
if expectNoLeader:
@@ -2985,12 +3030,17 @@
else:
main.log.info( "Expected no leader, got: " + str( newLeader ) )
correctCandidateResult = main.FALSE
- elif len( oldCandidates ) >= 3 and newLeader != oldCandidates[ 2 ]:
- correctCandidateResult = main.FALSE
- main.log.error( "Candidate {} was elected. {} should have had priority.".format(
- newLeader, oldCandidates[ 2 ] ) )
+ elif len( oldLeaders[0] ) >= 3:
+ if newLeader == oldLeaders[ 0 ][ 2 ]:
+ # correct leader was elected
+ correctCandidateResult = main.TRUE
+ else:
+ correctCandidateResult = main.FALSE
+ main.log.error( "Candidate {} was elected. {} should have had priority.".format(
+ newLeader, oldLeaders[ 0 ][ 2 ] ) )
else:
main.log.warn( "Could not determine who should be the correct leader" )
+ main.log.debug( oldLeaders[ 0 ] )
correctCandidateResult = main.FALSE
utilities.assert_equals(
expect=main.TRUE,
@@ -3010,54 +3060,23 @@
actual=runResult,
onpass="App re-ran for election",
onfail="App failed to run for election" )
+
main.step(
"Check that oldLeader is a candidate, and leader if only 1 node" )
# verify leader didn't just change
- positionResult = main.TRUE
- # Get new leaders and candidates, wait if oldLeader is not a candidate yet
-
- # Reset and reuse the new candidate and leaders lists
- newAllCandidates = []
- newCandidates = []
- newLeaders = []
- for cli in main.CLIs:
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- if oldLeader not in node: # election might no have finished yet
- main.log.info( "Old Leader not elected, waiting 5 seconds to " +
- "be sure elections are complete" )
- time.sleep(5)
- node = cli.specificLeaderCandidate( 'org.onosproject.election' )
- if oldLeader not in node: # election still isn't done, errors
- main.log.error(
- "Old leader was not elected on at least one node" )
- positionResult = main.FALSE
- newAllCandidates.append( node )
- newLeaders.append( node[ 0 ] )
- newCandidates = newAllCandidates[ 0 ]
-
- # Check that each node has the same leader. Defines newLeader
- if len( set( newLeaders ) ) != 1:
- positionResult = main.FALSE
- main.log.error( "Nodes have different leaders: " +
- str( newLeaders ) )
- newLeader = None
- else:
- newLeader = newLeaders[ 0 ]
-
- # Check that each node's candidate list is the same
- for candidates in newAllCandidates:
- if set( candidates ) != set( newCandidates ):
- newLeaderResult = main.FALSE
- main.log.error( "Discrepancy in candidate lists detected" )
+ # Get new leaders and candidates
+ reRunLeaders = []
+ time.sleep( 5 ) # Paremterize
+ positionResult, reRunLeaders = consistentLeaderboards( activeCLIs )
# Check that the re-elected node is last on the candidate List
- if oldLeader != newCandidates[ -1 ]:
- main.log.error( "Old Leader (" + str( oldLeader ) + ") not in the proper position " +
- str( newCandidates ) )
+ if oldLeader != reRunLeaders[ 0 ][ -1 ]:
+ main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
+ str( reRunLeaders[ 0 ] ) ) )
positionResult = main.FALSE
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=positionResult,
onpass="Old leader successfully re-ran for election",
onfail="Something went wrong with Leadership election after " +
@@ -3076,15 +3095,11 @@
# Variables for the distributed primitives tests
global pCounterName
- global iCounterName
global pCounterValue
- global iCounterValue
global onosSet
global onosSetName
pCounterName = "TestON-Partitions"
- iCounterName = "TestON-inMemory"
pCounterValue = 0
- iCounterValue = 0
onosSet = set([])
onosSetName = "TestON-set"
@@ -3092,7 +3107,8 @@
main.case( description )
main.step( "Install Primitives app" )
appName = "org.onosproject.distributedprimitives"
- appResults = main.CLIs[0].activateApp( appName )
+ node = main.activeNodes[0]
+ appResults = main.CLIs[node].activateApp( appName )
utilities.assert_equals( expect=main.TRUE,
actual=appResults,
onpass="Primitives app activated",
@@ -3110,7 +3126,6 @@
assert main.CLIs, "main.CLIs not defined"
assert main.nodes, "main.nodes not defined"
assert pCounterName, "pCounterName not defined"
- assert iCounterName, "iCounterName not defined"
assert onosSetName, "onosSetName not defined"
# NOTE: assert fails if value is 0/None/Empty/False
try:
@@ -3119,11 +3134,6 @@
main.log.error( "pCounterValue not defined, setting to 0" )
pCounterValue = 0
try:
- iCounterValue
- except NameError:
- main.log.error( "iCounterValue not defined, setting to 0" )
- iCounterValue = 0
- try:
onosSet
except NameError:
main.log.error( "onosSet not defined, setting to empty Set" )
@@ -3144,7 +3154,7 @@
pCounters = []
threads = []
addedPValues = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
name="counterAddAndGet-" + str( i ),
args=[ pCounterName ] )
@@ -3174,7 +3184,7 @@
pCounters = []
threads = []
addedPValues = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
name="counterGetAndAdd-" + str( i ),
args=[ pCounterName ] )
@@ -3211,7 +3221,7 @@
pCounters = []
threads = []
addedPValues = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
name="counterIncrement-" + str( i ),
args=[ pCounterName ],
@@ -3242,7 +3252,7 @@
pCounters = []
threads = []
addedPValues = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
name="counterIncrement-" + str( i ),
args=[ pCounterName ],
@@ -3273,7 +3283,7 @@
pCounters = []
threads = []
addedPValues = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
name="counterIncrement-" + str( i ),
args=[ pCounterName ],
@@ -3307,199 +3317,12 @@
onpass="Added counters are correct",
onfail="Added counters are incorrect" )
- # In-Memory counters
- main.step( "Increment and get an in-memory counter on each node" )
- iCounters = []
- addedIValues = []
- threads = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
- name="icounterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "inMemory": True } )
- iCounterValue += 1
- addedIValues.append( iCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in the in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=iCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Get then Increment a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
- name="counterGetAndAdd-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "inMemory": True } )
- addedIValues.append( iCounterValue )
- iCounterValue += 1
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=iCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Counters we added have the correct values" )
- incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
- utilities.assert_equals( expect=main.TRUE,
- actual=incrementCheck,
- onpass="Added counters are correct",
- onfail="Added counters are incorrect" )
-
- main.step( "Add -8 to then get a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
- name="counterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "delta": -8, "inMemory": True } )
- iCounterValue += -8
- addedIValues.append( iCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=pCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Add 5 to then get a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
- name="counterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "delta": 5, "inMemory": True } )
- iCounterValue += 5
- addedIValues.append( iCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=pCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Get then add 5 to a in-memory counter on each node" )
- iCounters = []
- threads = []
- addedIValues = []
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
- name="counterIncrement-" + str( i ),
- args=[ iCounterName ],
- kwargs={ "delta": 5, "inMemory": True } )
- addedIValues.append( iCounterValue )
- iCounterValue += 5
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- iCounters.append( t.result )
- # Check that counter incremented numController times
- iCounterResults = True
- for i in addedIValues:
- tmpResult = i in iCounters
- iCounterResults = iCounterResults and tmpResult
- if not tmpResult:
- main.log.error( str( i ) + " is not in in-memory "
- "counter incremented results" )
- utilities.assert_equals( expect=True,
- actual=iCounterResults,
- onpass="In-memory counter incremented",
- onfail="Error incrementing in-memory" +
- " counter" )
-
- main.step( "Counters we added have the correct values" )
- incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
- utilities.assert_equals( expect=main.TRUE,
- actual=incrementCheck,
- onpass="Added counters are correct",
- onfail="Added counters are incorrect" )
-
- main.step( "Check counters are consistant across nodes" )
- onosCounters, consistentCounterResults = main.Counters.consistentCheck()
- utilities.assert_equals( expect=main.TRUE,
- actual=consistentCounterResults,
- onpass="ONOS counters are consistent " +
- "across nodes",
- onfail="ONOS Counters are inconsistent " +
- "across nodes" )
-
- main.step( "Counters we added have the correct values" )
- incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
- incrementCheck = incrementCheck and \
- main.Counters.counterCheck( iCounterName, iCounterValue )
- utilities.assert_equals( expect=main.TRUE,
- actual=incrementCheck,
- onpass="Added counters are correct",
- onfail="Added counters are incorrect" )
# DISTRIBUTED SETS
main.step( "Distributed Set get" )
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -3510,8 +3333,8 @@
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -3541,7 +3364,7 @@
main.step( "Distributed Set size" )
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -3552,8 +3375,8 @@
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( "ONOS" + node +
@@ -3569,7 +3392,7 @@
onosSet.add( addValue )
addResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestAdd,
name="setTestAdd-" + str( i ),
args=[ onosSetName, addValue ] )
@@ -3583,7 +3406,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
addResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if addResponses[ i ] == main.TRUE:
# All is well
pass
@@ -3603,7 +3426,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -3613,8 +3436,8 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -3636,7 +3459,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -3646,8 +3469,8 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( "ONOS" + node +
@@ -3664,7 +3487,7 @@
onosSet.update( addAllValue.split() )
addResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestAdd,
name="setTestAddAll-" + str( i ),
args=[ onosSetName, addAllValue ] )
@@ -3678,7 +3501,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
addAllResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if addResponses[ i ] == main.TRUE:
# All is well
pass
@@ -3698,7 +3521,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -3708,8 +3531,8 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -3733,7 +3556,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -3743,8 +3566,8 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( "ONOS" + node +
@@ -3760,7 +3583,7 @@
main.step( "Distributed Set contains()" )
containsResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setContains-" + str( i ),
args=[ onosSetName ],
@@ -3773,7 +3596,7 @@
containsResponses.append( t.result )
containsResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if containsResponses[ i ] == main.ERROR:
containsResults = main.FALSE
else:
@@ -3787,7 +3610,7 @@
main.step( "Distributed Set containsAll()" )
containsAllResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setContainsAll-" + str( i ),
args=[ onosSetName ],
@@ -3800,7 +3623,7 @@
containsAllResponses.append( t.result )
containsAllResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if containsResponses[ i ] == main.ERROR:
containsResults = main.FALSE
else:
@@ -3815,7 +3638,7 @@
onosSet.remove( addValue )
removeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestRemove,
name="setTestRemove-" + str( i ),
args=[ onosSetName, addValue ] )
@@ -3829,7 +3652,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
removeResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if removeResponses[ i ] == main.TRUE:
# All is well
pass
@@ -3849,7 +3672,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -3859,8 +3682,8 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -3884,7 +3707,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -3894,8 +3717,8 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( "ONOS" + node +
@@ -3913,7 +3736,7 @@
removeAllResponses = []
threads = []
try:
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestRemove,
name="setTestRemoveAll-" + str( i ),
args=[ onosSetName, addAllValue ] )
@@ -3929,7 +3752,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
removeAllResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if removeAllResponses[ i ] == main.TRUE:
# All is well
pass
@@ -3949,7 +3772,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -3959,8 +3782,8 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -3984,7 +3807,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -3994,8 +3817,8 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( "ONOS" + node +
@@ -4012,7 +3835,7 @@
onosSet.update( addAllValue.split() )
addResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestAdd,
name="setTestAddAll-" + str( i ),
args=[ onosSetName, addAllValue ] )
@@ -4026,7 +3849,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
addAllResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if addResponses[ i ] == main.TRUE:
# All is well
pass
@@ -4046,7 +3869,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -4056,8 +3879,8 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -4081,7 +3904,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -4091,8 +3914,8 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( "ONOS" + node +
@@ -4109,7 +3932,7 @@
onosSet.clear()
clearResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestRemove,
name="setTestClear-" + str( i ),
args=[ onosSetName, " "], # Values doesn't matter
@@ -4124,7 +3947,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
clearResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if clearResponses[ i ] == main.TRUE:
# All is well
pass
@@ -4144,7 +3967,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -4154,8 +3977,8 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -4179,7 +4002,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -4189,8 +4012,8 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( "ONOS" + node +
@@ -4207,7 +4030,7 @@
onosSet.update( addAllValue.split() )
addResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestAdd,
name="setTestAddAll-" + str( i ),
args=[ onosSetName, addAllValue ] )
@@ -4221,7 +4044,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
addAllResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if addResponses[ i ] == main.TRUE:
# All is well
pass
@@ -4241,7 +4064,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -4251,8 +4074,8 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -4276,7 +4099,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -4286,8 +4109,8 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( "ONOS" + node +
@@ -4304,7 +4127,7 @@
onosSet.intersection_update( retainValue.split() )
retainResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestRemove,
name="setTestRetain-" + str( i ),
args=[ onosSetName, retainValue ],
@@ -4319,7 +4142,7 @@
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
retainResults = main.TRUE
- for i in range( main.numCtrls ):
+ for i in range( len( main.activeNodes ) ):
if retainResponses[ i ] == main.TRUE:
# All is well
pass
@@ -4339,7 +4162,7 @@
size = len( onosSet )
getResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestGet,
name="setTestGet-" + str( i ),
args=[ onosSetName ] )
@@ -4349,8 +4172,8 @@
t.join()
getResponses.append( t.result )
getResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if isinstance( getResponses[ i ], list):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -4374,7 +4197,7 @@
getResults = main.FALSE
sizeResponses = []
threads = []
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].setTestSize,
name="setTestSize-" + str( i ),
args=[ onosSetName ] )
@@ -4384,8 +4207,8 @@
t.join()
sizeResponses.append( t.result )
sizeResults = main.TRUE
- for i in range( main.numCtrls ):
- node = str( i + 1 )
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( "ONOS" + node + " expected a size of " +
@@ -4402,7 +4225,8 @@
tMapValue = "Testing"
numKeys = 100
putResult = True
- putResponses = main.CLIs[ 0 ].transactionalMapPut( numKeys, tMapValue )
+ node = main.activeNodes[0]
+ putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
if putResponses and len( putResponses ) == 100:
for i in putResponses:
if putResponses[ i ][ 'value' ] != tMapValue:
@@ -4422,7 +4246,7 @@
getResponses = []
threads = []
valueCheck = True
- for i in range( main.numCtrls ):
+ for i in main.activeNodes:
t = main.Thread( target=main.CLIs[i].transactionalMapGet,
name="TMap-get-" + str( i ),
args=[ "Key" + str( n ) ] )
@@ -4442,49 +4266,3 @@
actual=getCheck,
onpass="Partitioned Transactional Map get values were correct",
onfail="Partitioned Transactional Map values incorrect" )
-
- main.step( "In-memory Transactional maps put" )
- tMapValue = "Testing"
- numKeys = 100
- putResult = True
- putResponses = main.CLIs[ 0 ].transactionalMapPut( numKeys, tMapValue, inMemory=True )
- if len( putResponses ) == 100:
- for i in putResponses:
- if putResponses[ i ][ 'value' ] != tMapValue:
- putResult = False
- else:
- putResult = False
- if not putResult:
- main.log.debug( "Put response values: " + str( putResponses ) )
- utilities.assert_equals( expect=True,
- actual=putResult,
- onpass="In-Memory Transactional Map put successful",
- onfail="In-Memory Transactional Map put values are incorrect" )
-
- main.step( "In-Memory Transactional maps get" )
- getCheck = True
- for n in range( 1, numKeys + 1 ):
- getResponses = []
- threads = []
- valueCheck = True
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[i].transactionalMapGet,
- name="TMap-get-" + str( i ),
- args=[ "Key" + str( n ) ],
- kwargs={ "inMemory": True } )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- getResponses.append( t.result )
- for node in getResponses:
- if node != tMapValue:
- valueCheck = False
- if not valueCheck:
- main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
- main.log.warn( getResponses )
- getCheck = getCheck and valueCheck
- utilities.assert_equals( expect=True,
- actual=getCheck,
- onpass="In-Memory Transactional Map get values were correct",
- onfail="In-Memory Transactional Map values incorrect" )