[ONOS-6593]Review and Refactor ONOS startup procedures in TestON
Change-Id: I509a8ee7a26c198957bebf59da5c85a0edb8b995
diff --git a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.params b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.params
index 9e02df9..65ec6c6 100644
--- a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.params
+++ b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.params
@@ -32,8 +32,10 @@
<cellName>HA</cellName>
<appString>drivers,openflow,proxyarp,mobility</appString>
</ENV>
- <Git> False </Git>
- <branch> master </branch>
+ <GIT>
+ <pull>False</pull>
+ <branch>master</branch>
+ </GIT>
<num_controllers> 1 </num_controllers>
<tcpdump> False </tcpdump>
diff --git a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
index 73ec921..3af5a66 100644
--- a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
+++ b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
@@ -57,20 +57,22 @@
# load some variables from the params file
PULLCODE = False
- if main.params[ 'Git' ] == 'True':
+ if main.params[ 'GIT' ][ 'pull' ] == 'True':
PULLCODE = True
- gitBranch = main.params[ 'branch' ]
+ gitBranch = main.params[ 'GIT' ][ 'branch' ]
cellName = main.params[ 'ENV' ][ 'cellName' ]
main.numCtrls = int( main.params[ 'num_controllers' ] )
if main.ONOSbench.maxNodes:
if main.ONOSbench.maxNodes < main.numCtrls:
main.numCtrls = int( main.ONOSbench.maxNodes )
-
+ # These are for csv plotting in jenkins
+ main.HAlabels = []
+ main.HAdata = []
try:
from tests.HA.dependencies.HA import HA
main.HA = HA()
- except Exception as e:
+ except ImportError as e:
main.log.exception( e )
main.cleanup()
main.exit()
@@ -112,19 +114,7 @@
gitPullResult = main.TRUE
- main.step( "Starting Mininet" )
- # scp topo file to mininet
- # TODO: move to params?
- topoName = "obelisk.py"
- filePath = main.ONOSbench.home + "/tools/test/topos/"
- main.ONOSbench.scp( main.Mininet1,
- filePath + topoName,
- main.Mininet1.home,
- direction="to" )
- mnResult = main.Mininet1.startNet()
- utilities.assert_equals( expect=main.TRUE, actual=mnResult,
- onpass="Mininet Started",
- onfail="Error starting Mininet" )
+ main.HA.startingMininet()
main.step( "Git checkout and pull " + gitBranch )
if PULLCODE:
@@ -136,24 +126,7 @@
onfail="Git pull failed" )
main.ONOSbench.getVersion( report=True )
- # GRAPHS
- # NOTE: important params here:
- # job = name of Jenkins job
- # Plot Name = Plot-HA, only can be used if multiple plots
- # index = The number of the graph under plot name
- job = "HAsingleInstanceRestart"
- plotName = "Plot-HA"
- index = "2"
- graphs = '<ac:structured-macro ac:name="html">\n'
- graphs += '<ac:plain-text-body><![CDATA[\n'
- graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
- '/plot/' + plotName + '/getPlot?index=' + index +\
- '&width=500&height=300"' +\
- 'noborder="0" width="500" height="300" scrolling="yes" ' +\
- 'seamless="seamless"></iframe>\n'
- graphs += ']]></ac:plain-text-body>\n'
- graphs += '</ac:structured-macro>\n'
- main.log.wiki( graphs )
+ main.HA.generateGraph( "HAsingleInstanceRestart" )
main.CLIs = []
main.nodes = []
@@ -223,928 +196,31 @@
onpass="ONOS cli startup successful",
onfail="ONOS cli startup failed" )
- # Create a list of active nodes for use when some nodes are stopped
- main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
-
- if main.params[ 'tcpdump' ].lower() == "true":
- main.step( "Start Packet Capture MN" )
- main.Mininet2.startTcpdump(
- str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
- + "-MN.pcap",
- intf=main.params[ 'MNtcpdump' ][ 'intf' ],
- port=main.params[ 'MNtcpdump' ][ 'port' ] )
-
- main.step( "Checking ONOS nodes" )
- nodeResults = utilities.retry( main.HA.nodesCheck,
- False,
- args=[ main.activeNodes ],
- attempts=5 )
-
- utilities.assert_equals( expect=True, actual=nodeResults,
- onpass="Nodes check successful",
- onfail="Nodes check NOT successful" )
-
- if not nodeResults:
- for i in main.activeNodes:
- cli = main.CLIs[ i ]
- main.log.debug( "{} components not ACTIVE: \n{}".format(
- cli.name,
- cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
- main.log.error( "Failed to start ONOS, stopping test" )
- main.cleanup()
- main.exit()
-
- main.step( "Activate apps defined in the params file" )
- # get data from the params
- apps = main.params.get( 'apps' )
- if apps:
- apps = apps.split( ',' )
- main.log.warn( apps )
- activateResult = True
- for app in apps:
- main.CLIs[ 0 ].app( app, "Activate" )
- # TODO: check this worked
- time.sleep( 10 ) # wait for apps to activate
- for app in apps:
- state = main.CLIs[ 0 ].appStatus( app )
- if state == "ACTIVE":
- activateResult = activateResult and True
- else:
- main.log.error( "{} is in {} state".format( app, state ) )
- activateResult = False
- utilities.assert_equals( expect=True,
- actual=activateResult,
- onpass="Successfully activated apps",
- onfail="Failed to activate apps" )
- else:
- main.log.warn( "No apps were specified to be loaded after startup" )
-
- main.step( "Set ONOS configurations" )
- config = main.params.get( 'ONOS_Configuration' )
- if config:
- main.log.debug( config )
- checkResult = main.TRUE
- for component in config:
- for setting in config[ component ]:
- value = config[ component ][ setting ]
- check = main.CLIs[ 0 ].setCfg( component, setting, value )
- main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
- checkResult = check and checkResult
- utilities.assert_equals( expect=main.TRUE,
- actual=checkResult,
- onpass="Successfully set config",
- onfail="Failed to set config" )
- else:
- main.log.warn( "No configurations were specified to be changed after startup" )
-
- main.step( "App Ids check" )
- appCheck = main.TRUE
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
- name="appToIDCheck-" + str( i ),
- args=[] )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- appCheck = appCheck and t.result
- if appCheck != main.TRUE:
- node = main.activeNodes[ 0 ]
- main.log.warn( main.CLIs[ node ].apps() )
- main.log.warn( main.CLIs[ node ].appIDs() )
- utilities.assert_equals( expect=main.TRUE, actual=appCheck,
- onpass="App Ids seem to be correct",
- onfail="Something is wrong with app Ids" )
+ main.HA.initialSetUp()
def CASE2( self, main ):
"""
Assign devices to controllers
"""
- import re
- assert main.numCtrls, "main.numCtrls not defined"
- assert main, "main not defined"
- assert utilities.assert_equals, "utilities.assert_equals not defined"
-
- main.case( "Assigning devices to controllers" )
- main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
- "and check that an ONOS node becomes the " +\
- "master of the device."
- main.step( "Assign switches to controllers" )
-
- ipList = []
- for i in range( main.numCtrls ):
- ipList.append( main.nodes[ i ].ip_address )
- swList = []
- for i in range( 1, 29 ):
- swList.append( "s" + str( i ) )
- main.Mininet1.assignSwController( sw=swList, ip=ipList )
-
- mastershipCheck = main.TRUE
- for i in range( 1, 29 ):
- response = main.Mininet1.getSwController( "s" + str( i ) )
- try:
- main.log.info( str( response ) )
- except Exception:
- main.log.info( repr( response ) )
- for node in main.nodes:
- if re.search( "tcp:" + node.ip_address, response ):
- mastershipCheck = mastershipCheck and main.TRUE
- else:
- main.log.error( "Error, node " + node.ip_address + " is " +
- "not in the list of controllers s" +
- str( i ) + " is connecting to." )
- mastershipCheck = main.FALSE
- utilities.assert_equals(
- expect=main.TRUE,
- actual=mastershipCheck,
- onpass="Switch mastership assigned correctly",
- onfail="Switches not assigned correctly to controllers" )
+ main.HA.assignDevices( main )
def CASE21( self, main ):
"""
Assign mastership to controllers
"""
- assert main.numCtrls, "main.numCtrls not defined"
- assert main, "main not defined"
- assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
-
- main.case( "Assigning Controller roles for switches" )
- main.caseExplanation = "Check that ONOS is connected to each " +\
- "device. Then manually assign" +\
- " mastership to specific ONOS nodes using" +\
- " 'device-role'"
- main.step( "Assign mastership of switches to specific controllers" )
- # Manually assign mastership to the controller we want
- roleCall = main.TRUE
-
- ipList = []
- deviceList = []
- onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
- try:
- # Assign mastership to specific controllers. This assignment was
- # determined for a 7 node cluser, but will work with any sized
- # cluster
- for i in range( 1, 29 ): # switches 1 through 28
- # set up correct variables:
- if i == 1:
- c = 0
- ip = main.nodes[ c ].ip_address # ONOS1
- deviceId = onosCli.getDevice( "1000" ).get( 'id' )
- elif i == 2:
- c = 1 % main.numCtrls
- ip = main.nodes[ c ].ip_address # ONOS2
- deviceId = onosCli.getDevice( "2000" ).get( 'id' )
- elif i == 3:
- c = 1 % main.numCtrls
- ip = main.nodes[ c ].ip_address # ONOS2
- deviceId = onosCli.getDevice( "3000" ).get( 'id' )
- elif i == 4:
- c = 3 % main.numCtrls
- ip = main.nodes[ c ].ip_address # ONOS4
- deviceId = onosCli.getDevice( "3004" ).get( 'id' )
- elif i == 5:
- c = 2 % main.numCtrls
- ip = main.nodes[ c ].ip_address # ONOS3
- deviceId = onosCli.getDevice( "5000" ).get( 'id' )
- elif i == 6:
- c = 2 % main.numCtrls
- ip = main.nodes[ c ].ip_address # ONOS3
- deviceId = onosCli.getDevice( "6000" ).get( 'id' )
- elif i == 7:
- c = 5 % main.numCtrls
- ip = main.nodes[ c ].ip_address # ONOS6
- deviceId = onosCli.getDevice( "6007" ).get( 'id' )
- elif i >= 8 and i <= 17:
- c = 4 % main.numCtrls
- ip = main.nodes[ c ].ip_address # ONOS5
- dpid = '3' + str( i ).zfill( 3 )
- deviceId = onosCli.getDevice( dpid ).get( 'id' )
- elif i >= 18 and i <= 27:
- c = 6 % main.numCtrls
- ip = main.nodes[ c ].ip_address # ONOS7
- dpid = '6' + str( i ).zfill( 3 )
- deviceId = onosCli.getDevice( dpid ).get( 'id' )
- elif i == 28:
- c = 0
- ip = main.nodes[ c ].ip_address # ONOS1
- deviceId = onosCli.getDevice( "2800" ).get( 'id' )
- else:
- main.log.error( "You didn't write an else statement for " +
- "switch s" + str( i ) )
- roleCall = main.FALSE
- # Assign switch
- assert deviceId, "No device id for s" + str( i ) + " in ONOS"
- # TODO: make this controller dynamic
- roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
- ipList.append( ip )
- deviceList.append( deviceId )
- except ( AttributeError, AssertionError ):
- main.log.exception( "Something is wrong with ONOS device view" )
- main.log.info( onosCli.devices() )
- utilities.assert_equals(
- expect=main.TRUE,
- actual=roleCall,
- onpass="Re-assigned switch mastership to designated controller",
- onfail="Something wrong with deviceRole calls" )
-
- main.step( "Check mastership was correctly assigned" )
- roleCheck = main.TRUE
- # NOTE: This is due to the fact that device mastership change is not
- # atomic and is actually a multi step process
- time.sleep( 5 )
- for i in range( len( ipList ) ):
- ip = ipList[ i ]
- deviceId = deviceList[ i ]
- # Check assignment
- master = onosCli.getRole( deviceId ).get( 'master' )
- if ip in master:
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
- main.log.error( "Error, controller " + ip + " is not" +
- " master " + "of device " +
- str( deviceId ) + ". Master is " +
- repr( master ) + "." )
- utilities.assert_equals(
- expect=main.TRUE,
- actual=roleCheck,
- onpass="Switches were successfully reassigned to designated " +
- "controller",
- onfail="Switches were not successfully reassigned" )
+ main.HA.assignMastership( main )
def CASE3( self, main ):
"""
Assign intents
"""
- import time
- import json
- assert main.numCtrls, "main.numCtrls not defined"
- assert main, "main not defined"
- assert utilities.assert_equals, "utilities.assert_equals not defined"
- # NOTE: we must reinstall intents until we have a persistant intent
- # datastore!
- main.case( "Adding host Intents" )
- main.caseExplanation = "Discover hosts by using pingall then " +\
- "assign predetermined host-to-host intents." +\
- " After installation, check that the intent" +\
- " is distributed to all nodes and the state" +\
- " is INSTALLED"
-
- # install onos-app-fwd
- main.step( "Install reactive forwarding app" )
- onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
- installResults = onosCli.activateApp( "org.onosproject.fwd" )
- utilities.assert_equals( expect=main.TRUE, actual=installResults,
- onpass="Install fwd successful",
- onfail="Install fwd failed" )
-
- main.step( "Check app ids" )
- appCheck = main.TRUE
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
- name="appToIDCheck-" + str( i ),
- args=[] )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- appCheck = appCheck and t.result
- if appCheck != main.TRUE:
- main.log.warn( onosCli.apps() )
- main.log.warn( onosCli.appIDs() )
- utilities.assert_equals( expect=main.TRUE, actual=appCheck,
- onpass="App Ids seem to be correct",
- onfail="Something is wrong with app Ids" )
-
- main.step( "Discovering Hosts( Via pingall for now )" )
- # FIXME: Once we have a host discovery mechanism, use that instead
- # REACTIVE FWD test
- pingResult = main.FALSE
- passMsg = "Reactive Pingall test passed"
- time1 = time.time()
- pingResult = main.Mininet1.pingall()
- time2 = time.time()
- if not pingResult:
- main.log.warn( "First pingall failed. Trying again..." )
- pingResult = main.Mininet1.pingall()
- passMsg += " on the second try"
- utilities.assert_equals(
- expect=main.TRUE,
- actual=pingResult,
- onpass=passMsg,
- onfail="Reactive Pingall failed, " +
- "one or more ping pairs failed" )
- main.log.info( "Time for pingall: %2f seconds" %
- ( time2 - time1 ) )
- # timeout for fwd flows
- time.sleep( 11 )
- # uninstall onos-app-fwd
- main.step( "Uninstall reactive forwarding app" )
- node = main.activeNodes[ 0 ]
- uninstallResult = main.CLIs[ node ].deactivateApp( "org.onosproject.fwd" )
- utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
- onpass="Uninstall fwd successful",
- onfail="Uninstall fwd failed" )
-
- main.step( "Check app ids" )
- threads = []
- appCheck2 = main.TRUE
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
- name="appToIDCheck-" + str( i ),
- args=[] )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- appCheck2 = appCheck2 and t.result
- if appCheck2 != main.TRUE:
- node = main.activeNodes[ 0 ]
- main.log.warn( main.CLIs[ node ].apps() )
- main.log.warn( main.CLIs[ node ].appIDs() )
- utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
- onpass="App Ids seem to be correct",
- onfail="Something is wrong with app Ids" )
-
- main.step( "Add host intents via cli" )
- intentIds = []
- # TODO: move the host numbers to params
- # Maybe look at all the paths we ping?
- intentAddResult = True
- hostResult = main.TRUE
- for i in range( 8, 18 ):
- main.log.info( "Adding host intent between h" + str( i ) +
- " and h" + str( i + 10 ) )
- host1 = "00:00:00:00:00:" + \
- str( hex( i )[ 2: ] ).zfill( 2 ).upper()
- host2 = "00:00:00:00:00:" + \
- str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
- # NOTE: getHost can return None
- host1Dict = onosCli.getHost( host1 )
- host2Dict = onosCli.getHost( host2 )
- host1Id = None
- host2Id = None
- if host1Dict and host2Dict:
- host1Id = host1Dict.get( 'id', None )
- host2Id = host2Dict.get( 'id', None )
- if host1Id and host2Id:
- nodeNum = ( i % len( main.activeNodes ) )
- node = main.activeNodes[ nodeNum ]
- tmpId = main.CLIs[ node ].addHostIntent( host1Id, host2Id )
- if tmpId:
- main.log.info( "Added intent with id: " + tmpId )
- intentIds.append( tmpId )
- else:
- main.log.error( "addHostIntent returned: " +
- repr( tmpId ) )
- else:
- main.log.error( "Error, getHost() failed for h" + str( i ) +
- " and/or h" + str( i + 10 ) )
- node = main.activeNodes[ 0 ]
- hosts = main.CLIs[ node ].hosts()
- main.log.warn( "Hosts output: " )
- try:
- main.log.warn( json.dumps( json.loads( hosts ),
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- except ( ValueError, TypeError ):
- main.log.warn( repr( hosts ) )
- hostResult = main.FALSE
- utilities.assert_equals( expect=main.TRUE, actual=hostResult,
- onpass="Found a host id for each host",
- onfail="Error looking up host ids" )
-
- intentStart = time.time()
- onosIds = onosCli.getAllIntentsId()
- main.log.info( "Submitted intents: " + str( intentIds ) )
- main.log.info( "Intents in ONOS: " + str( onosIds ) )
- for intent in intentIds:
- if intent in onosIds:
- pass # intent submitted is in onos
- else:
- intentAddResult = False
- if intentAddResult:
- intentStop = time.time()
- else:
- intentStop = None
- # Print the intent states
- intents = onosCli.intents()
- intentStates = []
- installedCheck = True
- main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
- count = 0
- try:
- for intent in json.loads( intents ):
- state = intent.get( 'state', None )
- if "INSTALLED" not in state:
- installedCheck = False
- intentId = intent.get( 'id', None )
- intentStates.append( ( intentId, state ) )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing intents" )
- # add submitted intents not in the store
- tmplist = [ i for i, s in intentStates ]
- missingIntents = False
- for i in intentIds:
- if i not in tmplist:
- intentStates.append( ( i, " - " ) )
- missingIntents = True
- intentStates.sort()
- for i, s in intentStates:
- count += 1
- main.log.info( "%-6s%-15s%-15s" %
- ( str( count ), str( i ), str( s ) ) )
- leaders = onosCli.leaders()
- try:
- missing = False
- if leaders:
- parsedLeaders = json.loads( leaders )
- main.log.warn( json.dumps( parsedLeaders,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # check for all intent partitions
- topics = []
- for i in range( 14 ):
- topics.append( "work-partition-" + str( i ) )
- main.log.debug( topics )
- ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
- for topic in topics:
- if topic not in ONOStopics:
- main.log.error( "Error: " + topic +
- " not in leaders" )
- missing = True
- else:
- main.log.error( "leaders() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing leaders" )
- main.log.error( repr( leaders ) )
- # Check all nodes
- if missing:
- for i in main.activeNodes:
- response = main.CLIs[ i ].leaders( jsonFormat=False )
- main.log.warn( str( main.CLIs[ i ].name ) + " leaders output: \n" +
- str( response ) )
-
- partitions = onosCli.partitions()
- try:
- if partitions:
- parsedPartitions = json.loads( partitions )
- main.log.warn( json.dumps( parsedPartitions,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check for a leader in all paritions
- # TODO check for consistency among nodes
- else:
- main.log.error( "partitions() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing partitions" )
- main.log.error( repr( partitions ) )
- pendingMap = onosCli.pendingMap()
- try:
- if pendingMap:
- parsedPending = json.loads( pendingMap )
- main.log.warn( json.dumps( parsedPending,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check something here?
- else:
- main.log.error( "pendingMap() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing pending map" )
- main.log.error( repr( pendingMap ) )
-
- intentAddResult = bool( intentAddResult and not missingIntents and
- installedCheck )
- if not intentAddResult:
- main.log.error( "Error in pushing host intents to ONOS" )
-
- main.step( "Intent Anti-Entropy dispersion" )
- for j in range( 100 ):
- correct = True
- main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
- for i in main.activeNodes:
- onosIds = []
- ids = main.CLIs[ i ].getAllIntentsId()
- onosIds.append( ids )
- main.log.debug( "Intents in " + main.CLIs[ i ].name + ": " +
- str( sorted( onosIds ) ) )
- if sorted( ids ) != sorted( intentIds ):
- main.log.warn( "Set of intent IDs doesn't match" )
- correct = False
- break
- else:
- intents = json.loads( main.CLIs[ i ].intents() )
- for intent in intents:
- if intent[ 'state' ] != "INSTALLED":
- main.log.warn( "Intent " + intent[ 'id' ] +
- " is " + intent[ 'state' ] )
- correct = False
- break
- if correct:
- break
- else:
- time.sleep( 1 )
- if not intentStop:
- intentStop = time.time()
- global gossipTime
- gossipTime = intentStop - intentStart
- main.log.info( "It took about " + str( gossipTime ) +
- " seconds for all intents to appear in each node" )
- gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
- maxGossipTime = gossipPeriod * len( main.activeNodes )
- utilities.assert_greater_equals(
- expect=maxGossipTime, actual=gossipTime,
- onpass="ECM anti-entropy for intents worked within " +
- "expected time",
- onfail="Intent ECM anti-entropy took too long. " +
- "Expected time:{}, Actual time:{}".format( maxGossipTime,
- gossipTime ) )
- if gossipTime <= maxGossipTime:
- intentAddResult = True
-
- if not intentAddResult or "key" in pendingMap:
- import time
- installedCheck = True
- main.log.info( "Sleeping 60 seconds to see if intents are found" )
- time.sleep( 60 )
- onosIds = onosCli.getAllIntentsId()
- main.log.info( "Submitted intents: " + str( intentIds ) )
- main.log.info( "Intents in ONOS: " + str( onosIds ) )
- # Print the intent states
- intents = onosCli.intents()
- intentStates = []
- main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
- count = 0
- try:
- for intent in json.loads( intents ):
- # Iter through intents of a node
- state = intent.get( 'state', None )
- if "INSTALLED" not in state:
- installedCheck = False
- intentId = intent.get( 'id', None )
- intentStates.append( ( intentId, state ) )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing intents" )
- # add submitted intents not in the store
- tmplist = [ i for i, s in intentStates ]
- for i in intentIds:
- if i not in tmplist:
- intentStates.append( ( i, " - " ) )
- intentStates.sort()
- for i, s in intentStates:
- count += 1
- main.log.info( "%-6s%-15s%-15s" %
- ( str( count ), str( i ), str( s ) ) )
- leaders = onosCli.leaders()
- try:
- missing = False
- if leaders:
- parsedLeaders = json.loads( leaders )
- main.log.warn( json.dumps( parsedLeaders,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # check for all intent partitions
- # check for election
- topics = []
- for i in range( 14 ):
- topics.append( "work-partition-" + str( i ) )
- # FIXME: this should only be after we start the app
- topics.append( "org.onosproject.election" )
- main.log.debug( topics )
- ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
- for topic in topics:
- if topic not in ONOStopics:
- main.log.error( "Error: " + topic +
- " not in leaders" )
- missing = True
- else:
- main.log.error( "leaders() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing leaders" )
- main.log.error( repr( leaders ) )
- # Check all nodes
- if missing:
- for i in main.activeNodes:
- node = main.CLIs[ i ]
- response = node.leaders( jsonFormat=False )
- main.log.warn( str( node.name ) + " leaders output: \n" +
- str( response ) )
-
- partitions = onosCli.partitions()
- try:
- if partitions:
- parsedPartitions = json.loads( partitions )
- main.log.warn( json.dumps( parsedPartitions,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check for a leader in all paritions
- # TODO check for consistency among nodes
- else:
- main.log.error( "partitions() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing partitions" )
- main.log.error( repr( partitions ) )
- pendingMap = onosCli.pendingMap()
- try:
- if pendingMap:
- parsedPending = json.loads( pendingMap )
- main.log.warn( json.dumps( parsedPending,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check something here?
- else:
- main.log.error( "pendingMap() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing pending map" )
- main.log.error( repr( pendingMap ) )
+ main.HA.assignIntents( main )
def CASE4( self, main ):
"""
Ping across added host intents
"""
- import json
- import time
- assert main.numCtrls, "main.numCtrls not defined"
- assert main, "main not defined"
- assert utilities.assert_equals, "utilities.assert_equals not defined"
- main.case( "Verify connectivity by sending traffic across Intents" )
- main.caseExplanation = "Ping across added host intents to check " +\
- "functionality and check the state of " +\
- "the intent"
-
- onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
- main.step( "Check Intent state" )
- installedCheck = True
- # Print the intent states
- intents = main.ONOScli1.intents()
- intentStates = []
- main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
- count = 0
- # Iter through intents of a node
- try:
- for intent in json.loads( intents ):
- state = intent.get( 'state', None )
- if "INSTALLED" not in state:
- installedCheck = False
- intentId = intent.get( 'id', None )
- intentStates.append( ( intentId, state ) )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing intents." )
- # Print states
- intentStates.sort()
- for i, s in intentStates:
- count += 1
- main.log.info( "%-6s%-15s%-15s" %
- ( str( count ), str( i ), str( s ) ) )
- utilities.assert_equals( expect=True, actual=installedCheck,
- onpass="Intents are all INSTALLED",
- onfail="Intents are not all in " +
- "INSTALLED state" )
-
- main.step( "Ping across added host intents" )
- PingResult = main.TRUE
- for i in range( 8, 18 ):
- ping = main.Mininet1.pingHost( src="h" + str( i ),
- target="h" + str( i + 10 ) )
- PingResult = PingResult and ping
- if ping == main.FALSE:
- main.log.warn( "Ping failed between h" + str( i ) +
- " and h" + str( i + 10 ) )
- elif ping == main.TRUE:
- main.log.info( "Ping test passed!" )
- # Don't set PingResult or you'd override failures
- if PingResult == main.FALSE:
- main.log.error(
- "Intents have not been installed correctly, pings failed." )
- # TODO: pretty print
- main.log.warn( "ONOS1 intents: " )
- try:
- tmpIntents = onosCli.intents()
- main.log.warn( json.dumps( json.loads( tmpIntents ),
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- except ( ValueError, TypeError ):
- main.log.warn( repr( tmpIntents ) )
- utilities.assert_equals(
- expect=main.TRUE,
- actual=PingResult,
- onpass="Intents have been installed correctly and pings work",
- onfail="Intents have not been installed correctly, pings failed." )
-
- main.step( "Check leadership of topics" )
- leaders = onosCli.leaders()
- topicCheck = main.TRUE
- try:
- if leaders:
- parsedLeaders = json.loads( leaders )
- main.log.warn( json.dumps( parsedLeaders,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # check for all intent partitions
- # check for election
- # TODO: Look at Devices as topics now that it uses this system
- topics = []
- for i in range( 14 ):
- topics.append( "work-partition-" + str( i ) )
- # FIXME: this should only be after we start the app
- # FIXME: topics.append( "org.onosproject.election" )
- # Print leaders output
- main.log.debug( topics )
- ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
- for topic in topics:
- if topic not in ONOStopics:
- main.log.error( "Error: " + topic +
- " not in leaders" )
- topicCheck = main.FALSE
- else:
- main.log.error( "leaders() returned None" )
- topicCheck = main.FALSE
- except ( ValueError, TypeError ):
- topicCheck = main.FALSE
- main.log.exception( "Error parsing leaders" )
- main.log.error( repr( leaders ) )
- # TODO: Check for a leader of these topics
- # Check all nodes
- if topicCheck:
- for i in main.activeNodes:
- node = main.CLIs[ i ]
- response = node.leaders( jsonFormat=False )
- main.log.warn( str( node.name ) + " leaders output: \n" +
- str( response ) )
-
- utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
- onpass="intent Partitions is in leaders",
- onfail="Some topics were lost " )
- # Print partitions
- partitions = onosCli.partitions()
- try:
- if partitions:
- parsedPartitions = json.loads( partitions )
- main.log.warn( json.dumps( parsedPartitions,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check for a leader in all paritions
- # TODO check for consistency among nodes
- else:
- main.log.error( "partitions() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing partitions" )
- main.log.error( repr( partitions ) )
- # Print Pending Map
- pendingMap = onosCli.pendingMap()
- try:
- if pendingMap:
- parsedPending = json.loads( pendingMap )
- main.log.warn( json.dumps( parsedPending,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check something here?
- else:
- main.log.error( "pendingMap() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing pending map" )
- main.log.error( repr( pendingMap ) )
-
- if not installedCheck:
- main.log.info( "Waiting 60 seconds to see if the state of " +
- "intents change" )
- time.sleep( 60 )
- # Print the intent states
- intents = onosCli.intents()
- intentStates = []
- main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
- count = 0
- # Iter through intents of a node
- try:
- for intent in json.loads( intents ):
- state = intent.get( 'state', None )
- if "INSTALLED" not in state:
- installedCheck = False
- intentId = intent.get( 'id', None )
- intentStates.append( ( intentId, state ) )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing intents." )
- intentStates.sort()
- for i, s in intentStates:
- count += 1
- main.log.info( "%-6s%-15s%-15s" %
- ( str( count ), str( i ), str( s ) ) )
- leaders = onosCli.leaders()
- try:
- missing = False
- if leaders:
- parsedLeaders = json.loads( leaders )
- main.log.warn( json.dumps( parsedLeaders,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # check for all intent partitions
- # check for election
- topics = []
- for i in range( 14 ):
- topics.append( "work-partition-" + str( i ) )
- # FIXME: this should only be after we start the app
- topics.append( "org.onosproject.election" )
- main.log.debug( topics )
- ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
- for topic in topics:
- if topic not in ONOStopics:
- main.log.error( "Error: " + topic +
- " not in leaders" )
- missing = True
- else:
- main.log.error( "leaders() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing leaders" )
- main.log.error( repr( leaders ) )
- if missing:
- for i in main.activeNodes:
- node = main.CLIs[ i ]
- response = node.leaders( jsonFormat=False )
- main.log.warn( str( node.name ) + " leaders output: \n" +
- str( response ) )
-
- partitions = onosCli.partitions()
- try:
- if partitions:
- parsedPartitions = json.loads( partitions )
- main.log.warn( json.dumps( parsedPartitions,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check for a leader in all paritions
- # TODO check for consistency among nodes
- else:
- main.log.error( "partitions() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing partitions" )
- main.log.error( repr( partitions ) )
- pendingMap = onosCli.pendingMap()
- try:
- if pendingMap:
- parsedPending = json.loads( pendingMap )
- main.log.warn( json.dumps( parsedPending,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check something here?
- else:
- main.log.error( "pendingMap() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing pending map" )
- main.log.error( repr( pendingMap ) )
- # Print flowrules
- node = main.activeNodes[ 0 ]
- main.log.debug( main.CLIs[ node ].flows( jsonFormat=False ) )
- main.step( "Wait a minute then ping again" )
- # the wait is above
- PingResult = main.TRUE
- for i in range( 8, 18 ):
- ping = main.Mininet1.pingHost( src="h" + str( i ),
- target="h" + str( i + 10 ) )
- PingResult = PingResult and ping
- if ping == main.FALSE:
- main.log.warn( "Ping failed between h" + str( i ) +
- " and h" + str( i + 10 ) )
- elif ping == main.TRUE:
- main.log.info( "Ping test passed!" )
- # Don't set PingResult or you'd override failures
- if PingResult == main.FALSE:
- main.log.error(
- "Intents have not been installed correctly, pings failed." )
- # TODO: pretty print
- main.log.warn( "ONOS1 intents: " )
- try:
- tmpIntents = onosCli.intents()
- main.log.warn( json.dumps( json.loads( tmpIntents ),
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- except ( ValueError, TypeError ):
- main.log.warn( repr( tmpIntents ) )
- utilities.assert_equals(
- expect=main.TRUE,
- actual=PingResult,
- onpass="Intents have been installed correctly and pings work",
- onfail="Intents have not been installed correctly, pings failed." )
+ main.HA.pingAcrossHostIntent( main, False, True )
def CASE5( self, main ):
"""
@@ -1856,238 +932,41 @@
"""
Link s3-s28 down
"""
- import time
- assert main.numCtrls, "main.numCtrls not defined"
- assert main, "main not defined"
- assert utilities.assert_equals, "utilities.assert_equals not defined"
- # NOTE: You should probably run a topology check after this
-
- linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
-
- description = "Turn off a link to ensure that Link Discovery " +\
- "is working properly"
- main.case( description )
-
- main.step( "Kill Link between s3 and s28" )
- LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
- main.log.info( "Waiting " + str( linkSleep ) +
- " seconds for link down to be discovered" )
- time.sleep( linkSleep )
- utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
- onpass="Link down successful",
- onfail="Failed to bring link down" )
- # TODO do some sort of check here
+ main.HA.linkDown( main )
def CASE10( self, main ):
"""
Link s3-s28 up
"""
- import time
- assert main.numCtrls, "main.numCtrls not defined"
- assert main, "main not defined"
- assert utilities.assert_equals, "utilities.assert_equals not defined"
- # NOTE: You should probably run a topology check after this
-
- linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
-
- description = "Restore a link to ensure that Link Discovery is " + \
- "working properly"
- main.case( description )
-
- main.step( "Bring link between s3 and s28 back up" )
- LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
- main.log.info( "Waiting " + str( linkSleep ) +
- " seconds for link up to be discovered" )
- time.sleep( linkSleep )
- utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
- onpass="Link up successful",
- onfail="Failed to bring link up" )
- # TODO do some sort of check here
+ main.HA.linkUp( main )
def CASE11( self, main ):
"""
Switch Down
"""
# NOTE: You should probably run a topology check after this
- import time
- assert main.numCtrls, "main.numCtrls not defined"
- assert main, "main not defined"
- assert utilities.assert_equals, "utilities.assert_equals not defined"
-
- switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
-
- description = "Killing a switch to ensure it is discovered correctly"
- onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
- main.case( description )
- switch = main.params[ 'kill' ][ 'switch' ]
- switchDPID = main.params[ 'kill' ][ 'dpid' ]
-
- # TODO: Make this switch parameterizable
- main.step( "Kill " + switch )
- main.log.info( "Deleting " + switch )
- main.Mininet1.delSwitch( switch )
- main.log.info( "Waiting " + str( switchSleep ) +
- " seconds for switch down to be discovered" )
- time.sleep( switchSleep )
- device = onosCli.getDevice( dpid=switchDPID )
- # Peek at the deleted switch
- main.log.warn( str( device ) )
- result = main.FALSE
- if device and device[ 'available' ] is False:
- result = main.TRUE
- utilities.assert_equals( expect=main.TRUE, actual=result,
- onpass="Kill switch successful",
- onfail="Failed to kill switch?" )
+ main.HA.switchDown( main )
def CASE12( self, main ):
"""
Switch Up
"""
# NOTE: You should probably run a topology check after this
- import time
- assert main.numCtrls, "main.numCtrls not defined"
- assert main, "main not defined"
- assert utilities.assert_equals, "utilities.assert_equals not defined"
-
- switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
- switch = main.params[ 'kill' ][ 'switch' ]
- switchDPID = main.params[ 'kill' ][ 'dpid' ]
- links = main.params[ 'kill' ][ 'links' ].split()
- onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
- description = "Adding a switch to ensure it is discovered correctly"
- main.case( description )
-
- main.step( "Add back " + switch )
- main.Mininet1.addSwitch( switch, dpid=switchDPID )
- for peer in links:
- main.Mininet1.addLink( switch, peer )
- ipList = [ node.ip_address for node in main.nodes ]
- main.Mininet1.assignSwController( sw=switch, ip=ipList )
- main.log.info( "Waiting " + str( switchSleep ) +
- " seconds for switch up to be discovered" )
- time.sleep( switchSleep )
- device = onosCli.getDevice( dpid=switchDPID )
- # Peek at the deleted switch
- main.log.warn( str( device ) )
- result = main.FALSE
- if device and device[ 'available' ]:
- result = main.TRUE
- utilities.assert_equals( expect=main.TRUE, actual=result,
- onpass="add switch successful",
- onfail="Failed to add switch?" )
+ main.HA.switchUp( main )
def CASE13( self, main ):
"""
Clean up
"""
- import os
- import time
- assert main.numCtrls, "main.numCtrls not defined"
- assert main, "main not defined"
- assert utilities.assert_equals, "utilities.assert_equals not defined"
- # printing colors to terminal
- colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
- 'blue': '\033[94m', 'green': '\033[92m',
- 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
- main.case( "Test Cleanup" )
- main.step( "Killing tcpdumps" )
- main.Mininet2.stopTcpdump()
-
- testname = main.TEST
- if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
- main.step( "Copying MN pcap and ONOS log files to test station" )
- teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
- teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
- # NOTE: MN Pcap file is being saved to logdir.
- # We scp this file as MN and TestON aren't necessarily the same vm
-
- # FIXME: To be replaced with a Jenkin's post script
- # TODO: Load these from params
- # NOTE: must end in /
- logFolder = "/opt/onos/log/"
- logFiles = [ "karaf.log", "karaf.log.1" ]
- # NOTE: must end in /
- for f in logFiles:
- for node in main.nodes:
- dstName = main.logdir + "/" + node.name + "-" + f
- main.ONOSbench.secureCopy( node.user_name, node.ip_address,
- logFolder + f, dstName )
- # std*.log's
- # NOTE: must end in /
- logFolder = "/opt/onos/var/"
- logFiles = [ "stderr.log", "stdout.log" ]
- # NOTE: must end in /
- for f in logFiles:
- for node in main.nodes:
- dstName = main.logdir + "/" + node.name + "-" + f
- main.ONOSbench.secureCopy( node.user_name, node.ip_address,
- logFolder + f, dstName )
- else:
- main.log.debug( "skipping saving log files" )
-
- main.step( "Stopping Mininet" )
- mnResult = main.Mininet1.stopNet()
- utilities.assert_equals( expect=main.TRUE, actual=mnResult,
- onpass="Mininet stopped",
- onfail="MN cleanup NOT successful" )
-
- main.step( "Checking ONOS Logs for errors" )
- for node in main.nodes:
- main.log.debug( "Checking logs for errors on " + node.name + ":" )
- main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
-
- try:
- timerLog = open( main.logdir + "/Timers.csv", 'w' )
- # Overwrite with empty line and close
- labels = "Gossip Intents, Restart"
- data = str( gossipTime ) + ", " + str( main.restartTime )
- timerLog.write( labels + "\n" + data )
- timerLog.close()
- except NameError as e:
- main.log.exception( e )
+ main.HAlabels.append( "Restart" )
+ main.HAdata.append( str( main.restartTime ) )
+ main.HA.cleanUp( main )
def CASE14( self, main ):
"""
start election app on all onos nodes
"""
- assert main.numCtrls, "main.numCtrls not defined"
- assert main, "main not defined"
- assert utilities.assert_equals, "utilities.assert_equals not defined"
-
- main.case( "Start Leadership Election app" )
- main.step( "Install leadership election app" )
- onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
- appResult = onosCli.activateApp( "org.onosproject.election" )
- utilities.assert_equals(
- expect=main.TRUE,
- actual=appResult,
- onpass="Election app installed",
- onfail="Something went wrong with installing Leadership election" )
-
- main.step( "Run for election on each node" )
- for i in main.activeNodes:
- main.CLIs[ i ].electionTestRun()
- time.sleep( 5 )
- activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
- sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
- utilities.assert_equals(
- expect=True,
- actual=sameResult,
- onpass="All nodes see the same leaderboards",
- onfail="Inconsistent leaderboards" )
-
- if sameResult:
- leader = leaders[ 0 ][ 0 ]
- if main.nodes[ main.activeNodes[ 0 ] ].ip_address in leader:
- correctLeader = True
- else:
- correctLeader = False
- main.step( "First node was elected leader" )
- utilities.assert_equals(
- expect=True,
- actual=correctLeader,
- onpass="Correct leader was elected",
- onfail="Incorrect leader" )
+ main.HA.startElectionApp( main )
def CASE15( self, main ):
"""
@@ -2104,197 +983,16 @@
old and new variable prefixes refer to data from before vs after
withdrawl and later before withdrawl vs after re-election
"""
- import time
- assert main.numCtrls, "main.numCtrls not defined"
- assert main, "main not defined"
- assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
-
- description = "Check that Leadership Election is still functional"
- main.case( description )
- # NOTE: Need to re-run after restarts since being a canidate is not persistant
-
- oldLeaders = [] # list of lists of each nodes' candidates before
- newLeaders = [] # list of lists of each nodes' candidates after
- oldLeader = '' # the old leader from oldLeaders, None if not same
- newLeader = '' # the new leaders fron newLoeaders, None if not same
- oldLeaderCLI = None # the CLI of the old leader used for re-electing
- expectNoLeader = False # True when there is only one leader
- if main.numCtrls == 1:
- expectNoLeader = True
-
- main.step( "Run for election on each node" )
- electionResult = main.TRUE
-
- for i in main.activeNodes: # run test election on each node
- if main.CLIs[ i ].electionTestRun() == main.FALSE:
- electionResult = main.FALSE
- utilities.assert_equals(
- expect=main.TRUE,
- actual=electionResult,
- onpass="All nodes successfully ran for leadership",
- onfail="At least one node failed to run for leadership" )
-
- if electionResult == main.FALSE:
- main.log.error(
- "Skipping Test Case because Election Test App isn't loaded" )
- main.skipCase()
-
- main.step( "Check that each node shows the same leader and candidates" )
- failMessage = "Nodes have different leaderboards"
- activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
- sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
- if sameResult:
- oldLeader = oldLeaders[ 0 ][ 0 ]
- main.log.warn( oldLeader )
- else:
- oldLeader = None
- utilities.assert_equals(
- expect=True,
- actual=sameResult,
- onpass="Leaderboards are consistent for the election topic",
- onfail=failMessage )
-
- main.step( "Find current leader and withdraw" )
- withdrawResult = main.TRUE
- # do some sanity checking on leader before using it
- if oldLeader is None:
- main.log.error( "Leadership isn't consistent." )
- withdrawResult = main.FALSE
- # Get the CLI of the oldLeader
- for i in main.activeNodes:
- if oldLeader == main.nodes[ i ].ip_address:
- oldLeaderCLI = main.CLIs[ i ]
- break
- else: # FOR/ELSE statement
- main.log.error( "Leader election, could not find current leader" )
- if oldLeader:
- withdrawResult = oldLeaderCLI.electionTestWithdraw()
- utilities.assert_equals(
- expect=main.TRUE,
- actual=withdrawResult,
- onpass="Node was withdrawn from election",
- onfail="Node was not withdrawn from election" )
-
- main.step( "Check that a new node was elected leader" )
- failMessage = "Nodes have different leaders"
- # Get new leaders and candidates
- newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
- newLeader = None
- if newLeaderResult:
- if newLeaders[ 0 ][ 0 ] == 'none':
- main.log.error( "No leader was elected on at least 1 node" )
- if not expectNoLeader:
- newLeaderResult = False
- newLeader = newLeaders[ 0 ][ 0 ]
-
- # Check that the new leader is not the older leader, which was withdrawn
- if newLeader == oldLeader:
- newLeaderResult = False
- main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
- " as the current leader" )
- utilities.assert_equals(
- expect=True,
- actual=newLeaderResult,
- onpass="Leadership election passed",
- onfail="Something went wrong with Leadership election" )
-
- main.step( "Check that that new leader was the candidate of old leader" )
- # candidates[ 2 ] should become the top candidate after withdrawl
- correctCandidateResult = main.TRUE
- if expectNoLeader:
- if newLeader == 'none':
- main.log.info( "No leader expected. None found. Pass" )
- correctCandidateResult = main.TRUE
- else:
- main.log.info( "Expected no leader, got: " + str( newLeader ) )
- correctCandidateResult = main.FALSE
- elif len( oldLeaders[ 0 ] ) >= 3:
- if newLeader == oldLeaders[ 0 ][ 2 ]:
- # correct leader was elected
- correctCandidateResult = main.TRUE
- else:
- correctCandidateResult = main.FALSE
- main.log.error( "Candidate {} was elected. {} should have had priority.".format(
- newLeader, oldLeaders[ 0 ][ 2 ] ) )
- else:
- main.log.warn( "Could not determine who should be the correct leader" )
- main.log.debug( oldLeaders[ 0 ] )
- correctCandidateResult = main.FALSE
- utilities.assert_equals(
- expect=main.TRUE,
- actual=correctCandidateResult,
- onpass="Correct Candidate Elected",
- onfail="Incorrect Candidate Elected" )
-
- main.step( "Run for election on old leader( just so everyone " +
- "is in the hat )" )
- if oldLeaderCLI is not None:
- runResult = oldLeaderCLI.electionTestRun()
- else:
- main.log.error( "No old leader to re-elect" )
- runResult = main.FALSE
- utilities.assert_equals(
- expect=main.TRUE,
- actual=runResult,
- onpass="App re-ran for election",
- onfail="App failed to run for election" )
-
- main.step(
- "Check that oldLeader is a candidate, and leader if only 1 node" )
- # verify leader didn't just change
- # Get new leaders and candidates
- reRunLeaders = []
- time.sleep( 5 ) # Paremterize
- positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
-
- # Check that the re-elected node is last on the candidate List
- if not reRunLeaders[ 0 ]:
- positionResult = main.FALSE
- elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
- main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
- str( reRunLeaders[ 0 ] ) ) )
- positionResult = main.FALSE
- utilities.assert_equals(
- expect=True,
- actual=positionResult,
- onpass="Old leader successfully re-ran for election",
- onfail="Something went wrong with Leadership election after " +
- "the old leader re-ran for election" )
+ main.HA.isElectionFunctional( main )
def CASE16( self, main ):
"""
Install Distributed Primitives app
"""
- import time
- assert main.numCtrls, "main.numCtrls not defined"
- assert main, "main not defined"
- assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
-
- # Variables for the distributed primitives tests
- main.pCounterName = "TestON-Partitions"
- main.pCounterValue = 0
- main.onosSet = set( [] )
- main.onosSetName = "TestON-set"
-
- description = "Install Primitives app"
- main.case( description )
- main.step( "Install Primitives app" )
- appName = "org.onosproject.distributedprimitives"
- node = main.activeNodes[ 0 ]
- appResults = main.CLIs[ node ].activateApp( appName )
- utilities.assert_equals( expect=main.TRUE,
- actual=appResults,
- onpass="Primitives app activated",
- onfail="Primitives app not activated" )
- # TODO check on all nodes instead of sleeping
- time.sleep( 5 ) # To allow all nodes to activate
+ main.HA.installDistributedPrimitiveApp( main )
def CASE17( self, main ):
"""
Check for basic functionality with distributed primitives
"""
- main.HA.CASE17( main )
+ main.HA.checkDistPrimitivesFunc( main )