Add HApowerFailure test
This requires at least one patch to ONOS for the `onos-power` script to
support non-default cell usernames and another patch to the onos warden
to allow multiple node failures.
Also included:
- logging changes to help debug multithreadded sections of the test.
- Some input validation in functions that don't directly call the cli
- Remove some verbose logging
- Distribute some onos commands amongst the active nodes
- Refactor out clearing the ONOS cli pexpect buffer before sending a
command into it's own function
Change-Id: If1b868b399878209ab0394956f3b3918c0176909
diff --git a/TestON/drivers/common/cli/onosclidriver.py b/TestON/drivers/common/cli/onosclidriver.py
index c341d59..4c23ec1 100755
--- a/TestON/drivers/common/cli/onosclidriver.py
+++ b/TestON/drivers/common/cli/onosclidriver.py
@@ -460,24 +460,20 @@
else:
main.cleanAndExit()
- def sendline( self, cmdStr, showResponse=False, debug=False, timeout=10, noExit=False, dollarSign=False ):
+ def clearBuffer( self, debug=False, timeout=10, noExit=False ):
"""
- Send a completely user specified string to
- the onos> prompt. Use this function if you have
- a very specific command to send.
-
- if noExit is True, TestON will not exit, and return None
- if dollarSign is True, TestON will not expect for '$' as a new CLI or onos> prompt
- since '$' can be in the output.
-
- Warning: There are no sanity checking to commands
- sent using this method.
-
+ Test cli connection and clear any left over output in the buffer
+ Optional Arguments:
+ debug - Defaults to False. If True, will enable debug logging.
+ timeout - Defaults to 10. Amount of time in seconds for a command to return
+ before a timeout.
+ noExit - Defaults to False. If True, will not exit TestON in the event of a
"""
try:
# Try to reconnect if disconnected from cli
self.handle.sendline( "" )
i = self.handle.expect( [ "onos>", self.prompt, pexpect.TIMEOUT ] )
+ response = self.handle.before
if i == 1:
main.log.error( self.name + ": onos cli session closed. " )
if self.onosIp:
@@ -499,15 +495,55 @@
self.handle.send( "\x03" ) # Send ctrl-c to clear previous output
self.handle.expect( "onos>" )
+ response += self.handle.before
if debug:
- # NOTE: This adds and average of .4 seconds per call
+ main.log.debug( self.name + ": Raw output from sending ''" )
+ main.log.debug( self.name + ": " + repr( response ) )
+ except pexpect.TIMEOUT:
+ main.log.error( self.name + ": ONOS timeout" )
+ main.log.debug( self.handle.before )
+ return None
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ if noExit:
+ return None
+ else:
+ main.cleanAndExit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ if noExit:
+ return None
+ else:
+ main.cleanAndExit()
+
+ def sendline( self, cmdStr, showResponse=False, debug=False, timeout=10, noExit=False ):
+ """
+ A wrapper around pexpect's sendline/expect. Will return all the output from a given command
+
+ Required Arguments:
+ cmdStr - String to send to the pexpect session
+
+ Optional Arguments:
+ showResponse - Defaults to False. If True will log the response.
+ debug - Defaults to False. If True, will enable debug logging.
+ timeout - Defaults to 10. Amount of time in seconds for a command to return
+ before a timeout.
+ noExit - Defaults to False. If True, will not exit TestON in the event of a
+ closed channel, but instead return None
+
+ Warning: There are no sanity checking to commands sent using this method.
+
+ """
+ try:
+ # Try to reconnect if disconnected from cli
+ self.clearBuffer( debug=debug, timeout=timeout, noExit=noExit )
+ if debug:
+ # NOTE: This adds an average of .4 seconds per call
logStr = "\"Sending CLI command: '" + cmdStr + "'\""
self.log( logStr, noExit=noExit )
self.handle.sendline( cmdStr )
- if dollarSign:
- i = self.handle.expect( [ "onos>" ], timeout )
- else:
- i = self.handle.expect( [ "onos>", self.prompt ], timeout )
+ i = self.handle.expect( "onos>", timeout )
response = self.handle.before
# TODO: do something with i
main.log.info( "Command '" + str( cmdStr ) + "' sent to "
@@ -538,16 +574,18 @@
# parse for just the output, remove the cmd from response
output = response.split( cmdStr.strip(), 1 )
- if debug:
- main.log.debug( self.name + ": split output" )
- for r in output:
- main.log.debug( self.name + ": " + repr( r ) )
- output = output[ 1 ].strip()
+ if output:
+ if debug:
+ main.log.debug( self.name + ": split output" )
+ for r in output:
+ main.log.debug( self.name + ": " + repr( r ) )
+ output = output[ 1 ].strip()
if showResponse:
main.log.info( "Response from ONOS: {}".format( output ) )
+ self.clearBuffer( debug=debug, timeout=timeout, noExit=noExit )
return output
except pexpect.TIMEOUT:
- main.log.error( self.name + ":ONOS timeout" )
+ main.log.error( self.name + ": ONOS timeout" )
if debug:
main.log.debug( self.handle.before )
return None
@@ -595,7 +633,7 @@
assert handle is not None, "Error in sendline"
assert "Command not found:" not in handle, handle
if re.search( "Error", handle ):
- main.log.error( "Error in adding node" )
+ main.log.error( self.name + ": Error in adding node" )
main.log.error( handle )
return main.FALSE
else:
@@ -629,7 +667,7 @@
assert handle is not None, "Error in sendline"
assert "Command not found:" not in handle, handle
if re.search( "Error", handle ):
- main.log.error( "Error in removing node" )
+ main.log.error( self.name + ": Error in removing node" )
main.log.error( handle )
return main.FALSE
else:
@@ -717,7 +755,7 @@
assert handle is not None, "Error in sendline"
assert "Command not found:" not in handle, handle
if re.search( "Error", handle ):
- main.log.error( "Error in removing device" )
+ main.log.error( self.name + ": Error in removing device" )
main.log.error( handle )
return main.FALSE
else:
@@ -776,7 +814,7 @@
assert handle is not None, "Error in sendline"
assert "Command not found:" not in handle, handle
if re.search( "Error", handle ):
- main.log.error( "Error in balancing masters" )
+ main.log.error( self.name + ": Error in balancing masters" )
main.log.error( handle )
return main.FALSE
else:
@@ -1022,7 +1060,7 @@
assert handle is not None, "Error in sendline"
assert "Command not found:" not in handle, handle
if re.search( "Error", handle ):
- main.log.error( "Error in getting paths" )
+ main.log.error( self.name + ": Error in getting paths" )
return ( handle, "Error" )
else:
path = handle.split( ";" )[ 0 ]
@@ -1062,7 +1100,7 @@
assert "java.lang.IllegalStateException" not in handle
return handle
except AssertionError:
- main.log.exception( "Error in processing '" + cmdStr + "' " +
+ main.log.exception( self.name + ": Error in processing '" + cmdStr + "' " +
"command: " + str( handle ) )
return None
except TypeError:
@@ -1283,7 +1321,7 @@
assert handle is not None, "Error in sendline"
assert "Command not found:" not in handle, handle
if re.search( "Error", handle ):
- main.log.error( "Error in adding Host intent" )
+ main.log.error( self.name + ": Error in adding Host intent" )
main.log.debug( "Response from ONOS was: " + repr( handle ) )
return None
else:
@@ -1331,7 +1369,7 @@
assert "Command not found:" not in handle, handle
# If error, return error message
if re.search( "Error", handle ):
- main.log.error( "Error in adding Optical intent" )
+ main.log.error( self.name + ": Error in adding Optical intent" )
return None
else:
main.log.info( "Optical intent installed between " +
@@ -1470,7 +1508,7 @@
assert "Command not found:" not in handle, handle
# If error, return error message
if re.search( "Error", handle ):
- main.log.error( "Error in adding point-to-point intent" )
+ main.log.error( self.name + ": Error in adding point-to-point intent" )
return None
else:
# TODO: print out all the options in this message?
@@ -1631,7 +1669,7 @@
assert "Command not found:" not in handle, handle
# If error, return error message
if re.search( "Error", handle ):
- main.log.error( "Error in adding multipoint-to-singlepoint " +
+ main.log.error( self.name + ": Error in adding multipoint-to-singlepoint " +
"intent" )
return None
else:
@@ -1790,7 +1828,7 @@
assert "Command not found:" not in handle, handle
# If error, return error message
if re.search( "Error", handle ):
- main.log.error( "Error in adding singlepoint-to-multipoint " +
+ main.log.error( self.name + ": Error in adding singlepoint-to-multipoint " +
"intent" )
return None
else:
@@ -1920,7 +1958,7 @@
assert "Command not found:" not in handle, handle
# If error, return error message
if re.search( "Error", handle ):
- main.log.error( "Error in adding mpls intent" )
+ main.log.error( self.name + ": Error in adding mpls intent" )
return None
else:
# TODO: print out all the options in this message?
@@ -1971,7 +2009,7 @@
assert handle is not None, "Error in sendline"
assert "Command not found:" not in handle, handle
if re.search( "Error", handle ):
- main.log.error( "Error in removing intent" )
+ main.log.error( self.name + ": Error in removing intent" )
return main.FALSE
else:
# TODO: Should this be main.TRUE
@@ -2013,7 +2051,7 @@
assert handle is not None, "Error in sendline"
assert "Command not found:" not in handle, handle
if re.search( "Error", handle ):
- main.log.error( "Error in removing intent" )
+ main.log.error( self.name + ": Error in removing intent" )
return main.FALSE
else:
return main.TRUE
@@ -2041,7 +2079,7 @@
assert handle is not None, "Error in sendline"
assert "Command not found:" not in handle, handle
if re.search( "Error", handle ):
- main.log.error( "Error in purging intents" )
+ main.log.error( self.name + ": Error in purging intents" )
return main.FALSE
else:
return main.TRUE
@@ -2143,7 +2181,7 @@
main.cleanAndExit()
# =============Function to check Bandwidth allocation========
- def allocations( self, jsonFormat = True, dollarSign = True ):
+ def allocations( self, jsonFormat = True ):
"""
Description:
Obtain Bandwidth Allocation Information from ONOS cli.
@@ -2152,7 +2190,7 @@
cmdStr = "allocations"
if jsonFormat:
cmdStr += " -j"
- handle = self.sendline( cmdStr, timeout=300, dollarSign=True )
+ handle = self.sendline( cmdStr, timeout=300 )
assert handle is not None, "Error in sendline"
assert "Command not found:" not in handle, handle
return handle
@@ -3293,7 +3331,7 @@
self.name )
return None
# error
- main.log.error( "Error in electionTestLeader on " + self.name +
+ main.log.error( self.name + ": Error in electionTestLeader on " + self.name +
": " + "unexpected response" )
main.log.error( repr( response ) )
return main.FALSE
@@ -3332,7 +3370,7 @@
"for the Election app." )
return main.TRUE
# error
- main.log.error( "Error in electionTestRun on " + self.name +
+ main.log.error( self.name + ": Error in electionTestRun on " + self.name +
": " + "unexpected response" )
main.log.error( repr( response ) )
return main.FALSE
@@ -3371,7 +3409,7 @@
"elections for the Election app." )
return main.TRUE
# error
- main.log.error( "Error in electionTestWithdraw on " +
+ main.log.error( self.name + ": Error in electionTestWithdraw on " +
self.name + ": " + "unexpected response" )
main.log.error( repr( response ) )
return main.FALSE
@@ -3400,7 +3438,7 @@
assert output is not None, "Error in sendline"
assert "Command not found:" not in output, output
if re.search( "No such device", output ):
- main.log.error( "Error in getting ports" )
+ main.log.error( self.name + ": Error in getting ports" )
return ( output, "Error" )
return output
except AssertionError:
@@ -3428,7 +3466,7 @@
assert output is not None, "Error in sendline"
assert "Command not found:" not in output, output
if re.search( "No such device", output ):
- main.log.error( "Error in getting ports " )
+ main.log.error( self.name + ": Error in getting ports " )
return ( output, "Error " )
return output
except AssertionError:
@@ -3455,7 +3493,7 @@
assert output is not None, "Error in sendline"
assert "Command not found:" not in output, output
if re.search( "Error", output ):
- main.log.error( "Error in getting ports" )
+ main.log.error( self.name + ": Error in getting ports" )
return ( output, "Error" )
return output
except AssertionError:
@@ -3676,7 +3714,7 @@
return output
# FIXME: look at specific exceptions/Errors
except AssertionError:
- main.log.exception( "Error in processing onos:app command." )
+ main.log.exception( self.name + ": Error in processing onos:app command." )
return None
except TypeError:
main.log.exception( self.name + ": Object not as expected" )
@@ -3767,7 +3805,7 @@
assert output is not None, "Error in sendline"
assert "Command not found:" not in output, output
if "Error executing command" in output:
- main.log.error( "Error in processing onos:app command: " +
+ main.log.error( self.name + ": Error in processing onos:app command: " +
str( output ) )
return main.FALSE
elif "No such application" in output:
@@ -3775,7 +3813,7 @@
"' is not installed in ONOS" )
return main.FALSE
elif "Command not found:" in output:
- main.log.error( "Error in processing onos:app command: " +
+ main.log.error( self.name + ": Error in processing onos:app command: " +
str( output ) )
return main.FALSE
elif "Unsupported command:" in output:
@@ -3976,7 +4014,7 @@
assert "Error executing command" not in output, output
return output
except AssertionError:
- main.log.exception( "Error in processing onos:app-ids command." )
+ main.log.exception( self.name + ": Error in processing onos:app-ids command." )
return None
except TypeError:
main.log.exception( self.name + ": Object not as expected" )
@@ -4003,21 +4041,22 @@
main.ERROR if there is some error in processing the test
"""
try:
- bail = False
+ # Grab IDs
rawJson = self.appIDs( jsonFormat=True )
if rawJson:
ids = json.loads( rawJson )
else:
- main.log.error( "app-ids returned nothing:" + repr( rawJson ) )
- bail = True
+ main.log.error( "app-ids returned nothing: " + repr( rawJson ) )
+ return main.FALSE
+
+ # Grab Apps
rawJson = self.apps( jsonFormat=True )
if rawJson:
apps = json.loads( rawJson )
else:
main.log.error( "apps returned nothing:" + repr( rawJson ) )
- bail = True
- if bail:
return main.FALSE
+
result = main.TRUE
for app in apps:
appID = app.get( 'id' )
@@ -4048,6 +4087,7 @@
" but 'apps' has " + str( appName ) )
else:
pass # id and name match!
+
# now make sure that app-ids has no duplicates
idsList = []
namesList = []
@@ -4110,7 +4150,7 @@
assert "Error executing command" not in output, output
return output
except AssertionError:
- main.log.exception( "Error in processing 'cfg get' command." )
+ main.log.exception( self.name + ": Error in processing 'cfg get' command." )
return None
except TypeError:
main.log.exception( self.name + ": Object not as expected" )
@@ -4165,7 +4205,7 @@
return main.FALSE
return main.TRUE
except AssertionError:
- main.log.exception( "Error in processing 'cfg set' command." )
+ main.log.exception( self.name + ": Error in processing 'cfg set' command." )
return main.FALSE
except ( TypeError, ValueError ):
main.log.exception( "{}: Object not as expected: {!r}".format( self.name, results ) )
@@ -4201,7 +4241,7 @@
# Node not leader
assert "java.lang.IllegalStateException" not in output
except AssertionError:
- main.log.error( "Error in processing '" + cmd + "' " +
+ main.log.error( self.name + ": Error in processing '" + cmd + "' " +
"command: " + str( output ) )
retryTime = 30 # Conservative time, given by Madan
main.log.info( "Waiting " + str( retryTime ) +
@@ -4214,7 +4254,7 @@
main.log.info( self.name + ": " + output )
return output
except AssertionError:
- main.log.exception( "Error in processing '" + cmd + "' command." )
+ main.log.exception( self.name + ": Error in processing '" + cmd + "' command." )
return None
except TypeError:
main.log.exception( self.name + ": Object not as expected" )
@@ -4425,19 +4465,20 @@
cmdStr = "set-test-get -s "
cmdStr += setName
output = self.distPrimitivesSend( cmdStr )
- match = re.search( pattern, output )
- if match:
- setSize = int( match.group( 1 ) )
- setMatch = match.group( 2 )
- if len( setMatch.split() ) == setSize:
- main.log.info( "The size returned by " + self.name +
- " matches the number of elements in " +
- "the returned set" )
- else:
- main.log.error( "The size returned by " + self.name +
- " does not match the number of " +
- "elements in the returned set." )
- return setSize
+ if output:
+ match = re.search( pattern, output )
+ if match:
+ setSize = int( match.group( 1 ) )
+ setMatch = match.group( 2 )
+ if len( setMatch.split() ) == setSize:
+ main.log.info( "The size returned by " + self.name +
+ " matches the number of elements in " +
+ "the returned set" )
+ else:
+ main.log.error( "The size returned by " + self.name +
+ " does not match the number of " +
+ "elements in the returned set." )
+ return setSize
else: # no match
main.log.error( self.name + ": setTestGet did not" +
" match expected output" )
@@ -4471,7 +4512,7 @@
main.log.info( self.name + ": " + output )
return output
except AssertionError:
- main.log.exception( "Error in processing 'counters' command." )
+ main.log.exception( self.name + ": Error in processing 'counters' command." )
return None
except TypeError:
main.log.exception( self.name + ": Object not as expected" )
@@ -4839,7 +4880,7 @@
updatedPattern ) )
main.log.debug( self.name + " actual: " + repr( output ) )
return results
- except TypeError:
+ except ( TypeError, AttributeError ):
main.log.exception( self.name + ": Object not as expected" )
return None
except Exception:
diff --git a/TestON/drivers/common/cli/onosclusterdriver.py b/TestON/drivers/common/cli/onosclusterdriver.py
index f8badd7..5983855 100755
--- a/TestON/drivers/common/cli/onosclusterdriver.py
+++ b/TestON/drivers/common/cli/onosclusterdriver.py
@@ -60,18 +60,18 @@
if hasattr( self.REST, name ):
if not usedDriver:
usedDriver = True
- main.log.debug("Using Rest driver's attribute for '%s'" % (name))
- f = getattr( self.REST, name)
+ main.log.debug( "%s: Using Rest driver's attribute for '%s'" % ( self.name, name ) )
+ f = getattr( self.REST, name )
if hasattr( self.CLI, name ):
if not usedDriver:
usedDriver = True
- main.log.debug("Using CLI driver's attribute for '%s'" % (name))
- f = getattr( self.CLI, name)
+ main.log.debug( "%s: Using CLI driver's attribute for '%s'" % ( self.name, name ) )
+ f = getattr( self.CLI, name )
if hasattr( self.Bench, name ):
if not usedDriver:
usedDriver = True
- main.log.debug("Using Bench driver's attribute for '%s'" % (name))
- f = getattr( self.Bench, name)
+ main.log.debug( "%s: Using Bench driver's attribute for '%s'" % ( self.name, name ) )
+ f = getattr( self.Bench, name )
if usedDriver:
return f
raise AttributeError( "Could not find the attribute %s in %r or it's component handles" % ( name, self ) )
@@ -128,9 +128,9 @@
elif key == "cluster_name":
prefix = self.options[ key ]
- self.home = self.checkOptions(self.home, "~/onos" )
- self.karafUser = self.checkOptions(self.karafUser, self.user_name)
- self.karafPass = self.checkOptions(self.karafPass, self.pwd )
+ self.home = self.checkOptions( self.home, "~/onos" )
+ self.karafUser = self.checkOptions( self.karafUser, self.user_name )
+ self.karafPass = self.checkOptions( self.karafPass, self.pwd )
prefix = self.checkOptions( prefix, "ONOS" )
self.name = self.options[ 'name' ]
diff --git a/TestON/drivers/common/cli/onosdriver.py b/TestON/drivers/common/cli/onosdriver.py
index 0cb30a5..be64d99 100755
--- a/TestON/drivers/common/cli/onosdriver.py
+++ b/TestON/drivers/common/cli/onosdriver.py
@@ -2557,7 +2557,7 @@
"""
Run onos-diagnostics with given ONOS instance IPs and save output to dstDir
with suffix specified E.g. onos-diags-suffix.tar.gz
- required argDuments:
+ required arguments:
onosIPs - list of ONOS IPs for collecting diags
dstDir - diags file will be saved under the directory specified
suffix - diags file will be named with the suffix specified
@@ -2599,3 +2599,41 @@
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
+
+ def onosPower( self, onosIP, toggle, userName=None ):
+ """
+ Run onos-power script to tell the cell warden to simulate a power faulure
+ for the given container.
+ required :
+ onosIP - ONOS node IP
+ toggle - either "off" or "on", used to indicate whether
+ the node should be powered off or on
+ returns:
+ main.FALSE if there's an error executing the command, and main.TRUE otherwise
+ """
+ try:
+ cmd = "onos-power {} {}".format( onosIP, toggle )
+ if userName:
+ cmd += " {}".format( userName )
+ self.handle.sendline( cmd )
+ self.handle.expect( self.prompt )
+ handle = self.handle.before
+ main.log.debug( handle )
+ assert handle is not None, "Error in sendline"
+ assert "Command not found:" not in handle, handle
+ assert "Exception:" not in handle, handle
+ assert "usage:" not in handle, handle
+ return main.TRUE
+ except AssertionError:
+ main.log.exception( "{} Error in onos-power output:".format( self.name ) )
+ return main.FALSE
+ except TypeError:
+ main.log.exception( self.name + ": Object not as expected" )
+ return main.FALSE
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanAndExit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanAndExit()
diff --git a/TestON/drivers/common/clidriver.py b/TestON/drivers/common/clidriver.py
index 43352c5..0b34df4 100644
--- a/TestON/drivers/common/clidriver.py
+++ b/TestON/drivers/common/clidriver.py
@@ -520,3 +520,37 @@
main.log.error( self.name + ": EOF exception found" )
main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
+
+ def setEnv( self, variable, value=None ):
+ """
+ Sets the environment variable to the given value for the current shell session.
+ If value is None, will unset the variable.
+
+ Required Arguments:
+ variable - The name of the environment variable to set.
+
+ Optional Arguments:
+ value - The value to set the variable to. ( Defaults to None, which unsets the variable )
+
+ Returns True if no errors are detected else returns False
+ """
+ try:
+ if value:
+ cmd = "export {}={}".format( variable, value )
+ else:
+ cmd = "unset {}".format( variable )
+ self.handle.sendline( cmd )
+ self.handle.expect( self.prompt )
+ main.log.debug( self.handle.before )
+ return True
+ except AssertionError:
+ main.log.error( self.name + ": Could not execute command: " + output )
+ return False
+ except pexpect.TIMEOUT:
+ main.log.exception( self.name + ": TIMEOUT exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ return False
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanAndExit()
diff --git a/TestON/tests/HA/HApowerFailure/HApowerFailure.params b/TestON/tests/HA/HApowerFailure/HApowerFailure.params
new file mode 100644
index 0000000..0a8fefe
--- /dev/null
+++ b/TestON/tests/HA/HApowerFailure/HApowerFailure.params
@@ -0,0 +1,110 @@
+<PARAMS>
+ #List of test cases:
+ #CASE1: Compile ONOS and push it to the test machines
+ #CASE2: Assign mastership to controllers
+ #CASE21: Assign mastership to controllers
+ #CASE102: Start Spine-Leaf Topology in Mininet
+ #CASE3: Assign intents
+ #CASE4: Ping across added host intents
+ #CASE104: Ping between all hosts
+ #CASE5: Reading state of ONOS
+ #CASE61: Kill a container
+ #CASE62: Restart a container
+ #CASE7: Check state after control plane failure
+ #CASE8: Compare topo
+ #CASE9: Link down
+ #CASE10: Link up
+ #CASE11: Switch down
+ #CASE12: Switch up
+ #CASE13: Clean up
+ #CASE14: Start election app on all onos nodes
+ #CASE15: Check that Leadership Election is still functional
+ #CASE16: Install Distributed Primitives app
+ #CASE17: Check for basic functionality with distributed primitives
+ <testcases>1,2,8,21,3,4,5,14,15,16,17,[61,17,17,8,7,4,15,17,62],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
+ <apps></apps>
+ <ONOS_Configuration>
+ <org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
+ <useFlowObjectives>false</useFlowObjectives>
+ </org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
+ <org.onosproject.store.flow.impl.ECFlowRuleStore>
+ <backupCount>3</backupCount>
+ </org.onosproject.store.flow.impl.ECFlowRuleStore>
+ </ONOS_Configuration>
+ <ONOS_Logging>
+ <org.onosproject.events>TRACE</org.onosproject.events>
+ </ONOS_Logging>
+ <ENV>
+ <cellName>HA</cellName>
+ <appString>events,drivers,openflow,proxyarp,mobility</appString>
+ </ENV>
+ <GIT>
+ <pull>False</pull>
+ <branch>master</branch>
+ </GIT>
+ <num_controllers> 7 </num_controllers>
+ <tcpdump> False </tcpdump>
+
+ <CTRL>
+ <port1>6653</port1>
+ <port2>6653</port2>
+ <port3>6653</port3>
+ <port4>6653</port4>
+ <port5>6653</port5>
+ <port6>6653</port6>
+ <port7>6653</port7>
+ </CTRL>
+ <BACKUP>
+ <ENABLED> False </ENABLED>
+ <TESTONUSER>sdn</TESTONUSER>
+ <TESTONIP>10.128.30.9</TESTONIP>
+ </BACKUP>
+ <PING>
+ <source1>h8</source1>
+ <source2>h9</source2>
+ <source3>h10</source3>
+ <source4>h11</source4>
+ <source5>h12</source5>
+ <source6>h13</source6>
+ <source7>h14</source7>
+ <source8>h15</source8>
+ <source9>h16</source9>
+ <source10>h17</source10>
+ <target1>10.0.0.18</target1>
+ <target2>10.0.0.19</target2>
+ <target3>10.0.0.20</target3>
+ <target4>10.0.0.21</target4>
+ <target5>10.0.0.22</target5>
+ <target6>10.0.0.23</target6>
+ <target7>10.0.0.24</target7>
+ <target8>10.0.0.25</target8>
+ <target9>10.0.0.26</target9>
+ <target10>10.0.0.27</target10>
+ </PING>
+ <timers>
+ <LinkDiscovery>12</LinkDiscovery>
+ <SwitchDiscovery>12</SwitchDiscovery>
+ <gossip>5</gossip>
+ </timers>
+ <kill>
+ <linkSrc> s28 </linkSrc>
+ <linkDst> s3 </linkDst>
+ <switch> s5 </switch>
+ <dpid> 0000000000005000 </dpid>
+ <links> h5 s2 s1 s6 </links>
+ </kill>
+ <MNtcpdump>
+ <intf>eth0</intf>
+ <port> </port>
+ <folder>~/packet_captures/</folder>
+ </MNtcpdump>
+ <cell>
+ <user>jon</user>
+ </cell>
+</PARAMS>
diff --git a/TestON/tests/HA/HApowerFailure/HApowerFailure.params.fabric b/TestON/tests/HA/HApowerFailure/HApowerFailure.params.fabric
new file mode 100644
index 0000000..210d216
--- /dev/null
+++ b/TestON/tests/HA/HApowerFailure/HApowerFailure.params.fabric
@@ -0,0 +1,99 @@
+<PARAMS>
+ #List of test cases:
+ #CASE1: Compile ONOS and push it to the test machines
+ #CASE2: Assign mastership to controllers
+ #CASE21: Assign mastership to controllers
+ #CASE102: Start Spine-Leaf Topology in Mininet
+ #CASE3: Assign intents
+ #CASE4: Ping across added host intents
+ #CASE104: Ping between all hosts
+ #CASE5: Reading state of ONOS
+ #CASE61: Kill a container
+ #CASE62: Restart a container
+ #CASE7: Check state after control plane failure
+ #CASE8: Compare topo
+ #CASE9: Link down
+ #CASE10: Link up
+ #CASE11: Switch down
+ #CASE12: Switch up
+ #CASE13: Clean up
+ #CASE14: Start election app on all onos nodes
+ #CASE15: Check that Leadership Election is still functional
+ #CASE16: Install Distributed Primitives app
+ #CASE17: Check for basic functionality with distributed primitives
+ <testcases>1,102,8,104,5,14,15,16,17,[61,8,7,104,15,17,62],8,7,8,104,15,17,9,8,104,10,8,104,[11,8,104,12,8,104]*0,13</testcases>
+
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
+ <apps></apps>
+ <ONOS_Configuration>
+ <org.onosproject.store.flow.impl.ECFlowRuleStore>
+ <backupCount>3</backupCount>
+ </org.onosproject.store.flow.impl.ECFlowRuleStore>
+ </ONOS_Configuration>
+ <ONOS_Logging>
+ <org.onosproject.events>TRACE</org.onosproject.events>
+ <org.onosproject.segmentrouting>DEBUG</org.onosproject.segmentrouting>
+ <org.onosproject.driver.pipeline>DEBUG</org.onosproject.driver.pipeline>
+ <org.onosproject.store.group.impl>DEBUG</org.onosproject.store.group.impl>
+ <org.onosproject.net.flowobjective.impl>DEBUG</org.onosproject.net.flowobjective.impl>
+ </ONOS_Logging>
+ <ENV>
+ <cellName>HA</cellName>
+ <appString>events,drivers,openflow,segmentrouting,netcfghostprovider</appString>
+ </ENV>
+ <GIT>
+ <pull>False</pull>
+ <branch>master</branch>
+ </GIT>
+ <num_controllers> 7 </num_controllers>
+ <tcpdump> False </tcpdump>
+
+ <topology>
+ <files>
+ <topo>~/TestON/tests/USECASE/SegmentRouting/dependencies/fabric.py</topo>
+ <dep1>~/TestON/tests/USECASE/SegmentRouting/dependencies/trellislib.py</dep1>
+ <dep2>~/TestON/tests/USECASE/SegmentRouting/dependencies/routinglib.py</dep2>
+ </files>
+ <topoFile>fabric.py</topoFile>
+ <args> --dhcp=0 --routers=0 --ipv6=0 --ipv4=1 </args>
+ <configPath>/HA/dependencies/json/</configPath>
+ <configName>TRELLIS_CONFIG_ipv4=1_ipv6=0_dhcp=0_routers=0.json</configName>
+ </topology>
+ <CTRL>
+ <port1>6653</port1>
+ <port2>6653</port2>
+ <port3>6653</port3>
+ <port4>6653</port4>
+ <port5>6653</port5>
+ <port6>6653</port6>
+ <port7>6653</port7>
+ </CTRL>
+ <BACKUP>
+ <ENABLED> False </ENABLED>
+ <TESTONUSER>sdn</TESTONUSER>
+ <TESTONIP>10.128.30.9</TESTONIP>
+ </BACKUP>
+ <timers>
+ <NetCfg>5</NetCfg>
+ <SRSetup>60</SRSetup>
+ <LinkDiscovery>60</LinkDiscovery>
+ <SwitchDiscovery>60</SwitchDiscovery>
+ <gossip>5</gossip>
+ </timers>
+ <kill>
+ <linkSrc>spine102</linkSrc>
+ <linkDst>leaf1</linkDst>
+ <switch> spine101 </switch>
+ <dpid> 0000000000000101 </dpid>
+ <links> leaf1 leaf2 leaf2 leaf3 leaf3 leaf4 leaf4 leaf5 leaf5 </links>
+ </kill>
+ <MNtcpdump>
+ <intf>eth0</intf>
+ <port> </port>
+ <folder>~/packet_captures/</folder>
+ </MNtcpdump>
+</PARAMS>
diff --git a/TestON/tests/HA/HApowerFailure/HApowerFailure.params.intents b/TestON/tests/HA/HApowerFailure/HApowerFailure.params.intents
new file mode 100644
index 0000000..1ddb8c1
--- /dev/null
+++ b/TestON/tests/HA/HApowerFailure/HApowerFailure.params.intents
@@ -0,0 +1,108 @@
+<PARAMS>
+ #List of test cases:
+ #CASE1: Compile ONOS and push it to the test machines
+ #CASE2: Assign mastership to controllers
+ #CASE21: Assign mastership to controllers
+ #CASE102: Start Spine-Leaf Topology in Mininet
+ #CASE3: Assign intents
+ #CASE4: Ping across added host intents
+ #CASE104: Ping between all hosts
+ #CASE5: Reading state of ONOS
+ #CASE61: Kill a container
+ #CASE62: Restart a container
+ #CASE7: Check state after control plane failure
+ #CASE8: Compare topo
+ #CASE9: Link down
+ #CASE10: Link up
+ #CASE11: Switch down
+ #CASE12: Switch up
+ #CASE13: Clean up
+ #CASE14: Start election app on all onos nodes
+ #CASE15: Check that Leadership Election is still functional
+ #CASE16: Install Distributed Primitives app
+ #CASE17: Check for basic functionality with distributed primitives
+ <testcases>1,2,8,21,3,4,5,14,15,16,17,[61,8,7,4,15,17,62],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
+ <apps></apps>
+ <ONOS_Configuration>
+ <org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
+ <useFlowObjectives>false</useFlowObjectives>
+ <defaultFlowObjectiveCompiler>org.onosproject.net.intent.impl.compiler.LinkCollectionIntentObjectiveCompiler</defaultFlowObjectiveCompiler>
+ </org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
+ <org.onosproject.store.flow.impl.ECFlowRuleStore>
+ <backupCount>3</backupCount>
+ </org.onosproject.store.flow.impl.ECFlowRuleStore>
+ </ONOS_Configuration>
+ <ONOS_Logging>
+ <org.onosproject.events>TRACE</org.onosproject.events>
+ </ONOS_Logging>
+ <ENV>
+ <cellName>HA</cellName>
+ <appString>events,drivers,openflow,proxyarp,mobility</appString>
+ </ENV>
+ <GIT>
+ <pull>False</pull>
+ <branch>master</branch>
+ </GIT>
+ <num_controllers> 7 </num_controllers>
+ <tcpdump> False </tcpdump>
+
+ <CTRL>
+ <port1>6653</port1>
+ <port2>6653</port2>
+ <port3>6653</port3>
+ <port4>6653</port4>
+ <port5>6653</port5>
+ <port6>6653</port6>
+ <port7>6653</port7>
+ </CTRL>
+ <BACKUP>
+ <ENABLED> False </ENABLED>
+ <TESTONUSER>sdn</TESTONUSER>
+ <TESTONIP>10.128.30.9</TESTONIP>
+ </BACKUP>
+ <PING>
+ <source1>h8</source1>
+ <source2>h9</source2>
+ <source3>h10</source3>
+ <source4>h11</source4>
+ <source5>h12</source5>
+ <source6>h13</source6>
+ <source7>h14</source7>
+ <source8>h15</source8>
+ <source9>h16</source9>
+ <source10>h17</source10>
+ <target1>10.0.0.18</target1>
+ <target2>10.0.0.19</target2>
+ <target3>10.0.0.20</target3>
+ <target4>10.0.0.21</target4>
+ <target5>10.0.0.22</target5>
+ <target6>10.0.0.23</target6>
+ <target7>10.0.0.24</target7>
+ <target8>10.0.0.25</target8>
+ <target9>10.0.0.26</target9>
+ <target10>10.0.0.27</target10>
+ </PING>
+ <timers>
+ <LinkDiscovery>12</LinkDiscovery>
+ <SwitchDiscovery>12</SwitchDiscovery>
+ <gossip>5</gossip>
+ </timers>
+ <kill>
+ <linkSrc> s28 </linkSrc>
+ <linkDst> s3 </linkDst>
+ <switch> s5 </switch>
+ <dpid> 0000000000005000 </dpid>
+ <links> h5 s2 s1 s6 </links>
+ </kill>
+ <MNtcpdump>
+ <intf>eth0</intf>
+ <port> </port>
+ <folder>~/packet_captures/</folder>
+ </MNtcpdump>
+</PARAMS>
diff --git a/TestON/tests/HA/HApowerFailure/HApowerFailure.py b/TestON/tests/HA/HApowerFailure/HApowerFailure.py
new file mode 100644
index 0000000..dd40e9c
--- /dev/null
+++ b/TestON/tests/HA/HApowerFailure/HApowerFailure.py
@@ -0,0 +1,375 @@
+"""
+Copyright 2018 Open Networking Foundation ( ONF )
+
+Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+
+ TestON is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 2 of the License, or
+ ( at your option ) any later version.
+
+ TestON is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TestON. If not, see <http://www.gnu.org/licenses/>.
+"""
+"""
+Description: This test is to determine if ONOS can handle
+ a minority of it's nodes restarting
+
+List of test cases:
+CASE1: Compile ONOS and push it to the test machines
+CASE2: Assign devices to controllers
+CASE21: Assign mastership to controllers
+CASE3: Assign intents
+CASE4: Ping across added host intents
+CASE5: Reading state of ONOS
+CASE61: The Failure inducing case.
+CASE62: The Failure recovery case.
+CASE7: Check state after control plane failure
+CASE8: Compare topo
+CASE9: Link s3-s28 down
+CASE10: Link s3-s28 up
+CASE11: Switch down
+CASE12: Switch up
+CASE13: Clean up
+CASE14: start election app on all onos nodes
+CASE15: Check that Leadership Election is still functional
+CASE16: Install Distributed Primitives app
+CASE17: Check for basic functionality with distributed primitives
+"""
+class HApowerFailure:
+
+ def __init__( self ):
+ self.default = ''
+
+ def CASE1( self, main ):
+ """
+ CASE1 is to compile ONOS and push it to the test machines
+
+ Startup sequence:
+ cell <name>
+ onos-verify-cell
+ NOTE: temporary - onos-remove-raft-logs
+ onos-uninstall
+ start mininet
+ git pull
+ mvn clean install
+ onos-package
+ onos-install -f
+ onos-wait-for-start
+ start cli sessions
+ start tcpdump
+ """
+ main.log.info( "ONOS HA test: Simulate a power failure on a minority of ONOS nodes - " +
+ "initialization" )
+ # These are for csv plotting in jenkins
+ main.HAlabels = []
+ main.HAdata = []
+ try:
+ from tests.dependencies.ONOSSetup import ONOSSetup
+ main.testSetUp = ONOSSetup()
+ except ImportError:
+ main.log.error( "ONOSSetup not found. exiting the test" )
+ main.cleanAndExit()
+ main.testSetUp.envSetupDescription()
+ try:
+ from tests.HA.dependencies.HA import HA
+ main.HA = HA()
+ cellName = main.params[ 'ENV' ][ 'cellName' ]
+ main.apps = main.params[ 'ENV' ][ 'appString' ]
+ stepResult = main.testSetUp.envSetup( includeCaseDesc=False )
+ except Exception as e:
+ main.testSetUp.envSetupException( e )
+ main.testSetUp.evnSetupConclusion( stepResult )
+
+ applyFuncs = [ main.HA.customizeOnosGenPartitions,
+ main.HA.copyBackupConfig,
+ main.ONOSbench.preventAutoRespawn ]
+ applyArgs = [ None, None, None ]
+ try:
+ if main.params[ 'topology' ][ 'topoFile' ]:
+ main.log.info( 'Skipping start of Mininet in this case, make sure you start it elsewhere' )
+ else:
+ applyFuncs.append( main.HA.startingMininet )
+ applyArgs.append( None )
+ except (KeyError, IndexError):
+ applyFuncs.append( main.HA.startingMininet )
+ applyArgs.append( None )
+
+ main.testSetUp.ONOSSetUp( main.Cluster, cellName=cellName, removeLog=True,
+ extraApply=applyFuncs,
+ applyArgs=applyArgs,
+ extraClean=main.HA.cleanUpGenPartition,
+ includeCaseDesc=False )
+ main.HA.initialSetUp( serviceClean=True )
+
+ main.step( 'Set logging levels' )
+ logging = True
+ try:
+ logs = main.params.get( 'ONOS_Logging', False )
+ if logs:
+ for namespace, level in logs.items():
+ for ctrl in main.Cluster.active():
+ ctrl.CLI.logSet( level, namespace )
+ except AttributeError:
+ logging = False
+ utilities.assert_equals( expect=True, actual=logging,
+ onpass="Set log levels",
+ onfail="Failed to set log levels" )
+
+ def CASE2( self, main ):
+ """
+ Assign devices to controllers
+ """
+ main.HA.assignDevices( main )
+
+ def CASE102( self, main ):
+ """
+ Set up Spine-Leaf fabric topology in Mininet
+ """
+ main.HA.startTopology( main )
+
+ def CASE21( self, main ):
+ """
+ Assign mastership to controllers
+ """
+ main.HA.assignMastership( main )
+
+ def CASE3( self, main ):
+ """
+ Assign intents
+ """
+ main.HA.assignIntents( main )
+
+ def CASE4( self, main ):
+ """
+ Ping across added host intents
+ """
+ main.HA.pingAcrossHostIntent( main )
+
+ def CASE104( self, main ):
+ """
+ Ping Hosts
+ """
+ main.case( "Check connectivity" )
+ main.step( "Ping between all hosts" )
+ pingResult = main.Mininet1.pingall()
+ utilities.assert_equals( expect=main.TRUE, actual=pingResult,
+ onpass="All Pings Passed",
+ onfail="Failed to ping between all hosts" )
+
+ def CASE5( self, main ):
+ """
+ Reading state of ONOS
+ """
+ main.HA.readingState( main )
+
+ def CASE61( self, main ):
+ """
+ The Failure case.
+ """
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ main.case( "Simulate a power failure on a minority of ONOS nodes" )
+
+ main.step( "Checking ONOS Logs for errors" )
+ for ctrl in main.Cluster.active():
+ main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
+ main.log.warn( ctrl.checkLogs( ctrl.ipAddress ) )
+
+ main.kill = [ main.Cluster.runningNodes[ 0 ] ] # ONOS node to kill, listed by index in main.nodes
+ n = len( main.Cluster.runningNodes ) # Number of nodes
+ p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
+ if n > 3:
+ main.kill.append( main.Cluster.runningNodes[ p - 1 ] )
+ # NOTE: This only works for cluster sizes of 3,5, or 7.
+
+ # NOTE: This is to fix an issue with wiki formating
+ nodeNames = [ node.name for node in main.kill ]
+ # Set the env variables so we actually use the warden power ON/OFF functionality
+ # NOTE: Only works with warden
+ main.ONOSbench.setEnv( "HARD_POWER_OFF", "True" )
+ main.ONOSbench.setEnv( "ONOS_CELL", "borrow" )
+ main.step( "Killing nodes: " + str( nodeNames ) )
+ killResults = main.TRUE
+ userName = main.params[ 'cell' ][ 'user' ]
+ for ctrl in main.kill:
+ killResults = killResults and\
+ main.ONOSbench.onosPower( ctrl.ipAddress, "off", userName )
+ ctrl.active = False
+ main.Cluster.reset()
+ utilities.assert_equals( expect=main.TRUE, actual=killResults,
+ onpass="ONOS nodes killed successfully",
+ onfail="ONOS nodes NOT successfully killed" )
+
+ main.step( "Checking ONOS nodes" )
+ nodeResults = utilities.retry( main.Cluster.nodesCheck,
+ False,
+ sleep=15,
+ attempts=5 )
+
+ utilities.assert_equals( expect=True, actual=nodeResults,
+ onpass="Nodes check successful",
+ onfail="Nodes check NOT successful" )
+
+ if not nodeResults:
+ for ctrl in main.Cluster.active():
+ main.log.debug( "{} components not ACTIVE: \n{}".format(
+ ctrl.name,
+ ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
+ main.log.error( "Failed to start ONOS, stopping test" )
+ main.cleanAndExit()
+
+ for i in range( 1, 100 ):
+ main.Cluster.next().summary()
+ for i in range( 1, 100 ):
+ main.Cluster.next().partitions()
+ for ctrl in main.Cluster.active():
+ main.log.warn( repr( ctrl ) )
+
+ def CASE62( self, main ):
+ """
+ The bring up stopped nodes
+ """
+ userName = main.params[ 'cell' ][ 'user' ]
+ # NOTE: The warden will actually power up in reverse alphabetical order of container
+ # names in a cell, ignoring the ip given.
+ for ctrl in main.kill:
+ main.ONOSbench.onosPower( ctrl.ipAddress, "on", userName )
+ for component in [ ctrl.CLI, ctrl.server ]:
+ component.connect()
+ main.HA.bringUpStoppedNodes( main )
+ for ctrl in main.Cluster.active():
+ main.log.warn( repr( ctrl ) )
+
+ def CASE7( self, main ):
+ """
+ Check state after ONOS failure
+ """
+ try:
+ main.kill
+ except AttributeError:
+ main.kill = []
+
+ main.HA.checkStateAfterEvent( main, afterWhich=0 )
+ main.step( "Leadership Election is still functional" )
+ # Test of LeadershipElection
+ leaderList = []
+
+ restarted = []
+ for ctrl in main.kill:
+ restarted.append( ctrl.ipAddress )
+ leaderResult = main.TRUE
+
+ for ctrl in main.Cluster.active():
+ leaderN = ctrl.electionTestLeader()
+ leaderList.append( leaderN )
+ if leaderN == main.FALSE:
+ # error in response
+ main.log.error( "Something is wrong with " +
+ "electionTestLeader function, check the" +
+ " error logs" )
+ leaderResult = main.FALSE
+ elif leaderN is None:
+ main.log.error( ctrl.name +
+ " shows no leader for the election-app was" +
+ " elected after the old one died" )
+ leaderResult = main.FALSE
+ elif leaderN in restarted:
+ main.log.error( ctrl.name + " shows " + str( leaderN ) +
+ " as leader for the election-app, but it " +
+ "was restarted" )
+ leaderResult = main.FALSE
+ if len( set( leaderList ) ) != 1:
+ leaderResult = main.FALSE
+ main.log.error(
+ "Inconsistent view of leader for the election test app" )
+ main.log.debug( leaderList )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=leaderResult,
+ onpass="Leadership election passed",
+ onfail="Something went wrong with Leadership election" )
+
+ def CASE8( self, main ):
+ """
+ Compare topo
+ """
+ main.HA.compareTopo( main )
+
+ def CASE9( self, main ):
+ """
+ Link down
+ """
+ src = main.params['kill']['linkSrc']
+ dst = main.params['kill']['linkDst']
+ main.HA.linkDown( main, src, dst )
+
+ def CASE10( self, main ):
+ """
+ Link up
+ """
+ src = main.params['kill']['linkSrc']
+ dst = main.params['kill']['linkDst']
+ main.HA.linkUp( main, src, dst )
+
+ def CASE11( self, main ):
+ """
+ Switch Down
+ """
+ # NOTE: You should probably run a topology check after this
+ main.HA.switchDown( main )
+
+ def CASE12( self, main ):
+ """
+ Switch Up
+ """
+ # NOTE: You should probably run a topology check after this
+ main.HA.switchUp( main )
+
+ def CASE13( self, main ):
+ """
+ Clean up
+ """
+ main.HA.cleanUp( main )
+
+ def CASE14( self, main ):
+ """
+ Start election app on all onos nodes
+ """
+ main.HA.startElectionApp( main )
+
+ def CASE15( self, main ):
+ """
+ Check that Leadership Election is still functional
+ 15.1 Run election on each node
+ 15.2 Check that each node has the same leaders and candidates
+ 15.3 Find current leader and withdraw
+ 15.4 Check that a new node was elected leader
+ 15.5 Check that that new leader was the candidate of old leader
+ 15.6 Run for election on old leader
+ 15.7 Check that oldLeader is a candidate, and leader if only 1 node
+ 15.8 Make sure that the old leader was added to the candidate list
+
+ old and new variable prefixes refer to data from before vs after
+ withdrawl and later before withdrawl vs after re-election
+ """
+ main.HA.isElectionFunctional( main )
+
+ def CASE16( self, main ):
+ """
+ Install Distributed Primitives app
+ """
+ main.HA.installDistributedPrimitiveApp( main )
+
+ def CASE17( self, main ):
+ """
+ Check for basic functionality with distributed primitives
+ """
+ main.HA.checkDistPrimitivesFunc( main )
diff --git a/TestON/tests/HA/HApowerFailure/HApowerFailure.topo b/TestON/tests/HA/HApowerFailure/HApowerFailure.topo
new file mode 100644
index 0000000..4bf4bd4
--- /dev/null
+++ b/TestON/tests/HA/HApowerFailure/HApowerFailure.topo
@@ -0,0 +1,53 @@
+<TOPOLOGY>
+ <COMPONENT>
+
+ <ONOScell>
+ <host>localhost</host> # ONOS "bench" machine
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosClusterDriver</type>
+ <connect_order>1</connect_order>
+ <COMPONENTS>
+ <cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
+ <diff_clihost></diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+ <karaf_username></karaf_username>
+ <karaf_password></karaf_password>
+ <web_user></web_user>
+ <web_pass></web_pass>
+ <rest_port></rest_port>
+ <prompt></prompt> # TODO: we technically need a few of these, one per component
+ <onos_home></onos_home> # defines where onos home is
+ <nodes> 7 </nodes> # number of nodes in the cluster
+ </COMPONENTS>
+ </ONOScell>
+
+ <Mininet1>
+ <host>OCN</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>MininetCliDriver</type>
+ <connect_order>2</connect_order>
+ <COMPONENTS>
+ #Specify the Option for mininet
+ <arg1> --custom ~/mininet/custom/obelisk.py </arg1>
+ <arg2> --topo obelisk </arg2>
+ <arg3> --switch ovs,protocols=OpenFlow13 </arg3>
+ <controller> none </controller>
+ <home>~/mininet/custom/</home>
+ <prompt></prompt>
+ </COMPONENTS>
+ </Mininet1>
+
+ <Mininet2>
+ <host>OCN</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>RemoteMininetDriver</type>
+ <connect_order>3</connect_order>
+ <COMPONENTS>
+ <prompt></prompt>
+ </COMPONENTS>
+ </Mininet2>
+
+ </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/HA/HApowerFailure/README b/TestON/tests/HA/HApowerFailure/README
new file mode 100644
index 0000000..069e5af
--- /dev/null
+++ b/TestON/tests/HA/HApowerFailure/README
@@ -0,0 +1,26 @@
+This test is designed to verify that an ONOS cluster behaves correctly when
+ONOS nodes die due to power failures. Currently, we will kill nodes so that
+each raft partition will lose a member, but we make sure that there is always
+a majority of nodes available in each partition.
+
+As written, the test only supports an ONOS cluster of 3,5, or 7 nodes.
+This is because the test doesn't apply to a single node cluster, ONOS clusters
+should be deployed in odd numbers, and the partition generation and node
+killing scheme used doesn't give the same properties for clusters of more
+than 7 nodes. Namely, each partition won't have exactly one node killed.
+
+The gerneral structure for the test:
+- Startup
+- Assign switches
+- Verify ONOS state and functionality
+ - Device mastership
+ - Intents
+ - Leadership election
+ - Distributed Primitives
+- Kill some ONOS nodes
+- Verify ONOS state and functionality
+- Restart ONOS nodes
+- Verify ONOS state and functionality
+- Dataplane failures
+ - link down and up
+ - switch down and up
diff --git a/TestON/tests/HA/HApowerFailure/__init__.py b/TestON/tests/HA/HApowerFailure/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/HA/HApowerFailure/__init__.py
diff --git a/TestON/tests/HA/HApowerFailure/dependencies/__init__.py b/TestON/tests/HA/HApowerFailure/dependencies/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/HA/HApowerFailure/dependencies/__init__.py
diff --git a/TestON/tests/HA/HAupgrade/HAupgrade.py b/TestON/tests/HA/HAupgrade/HAupgrade.py
index 8122044..3eb6c94 100644
--- a/TestON/tests/HA/HAupgrade/HAupgrade.py
+++ b/TestON/tests/HA/HAupgrade/HAupgrade.py
@@ -280,11 +280,17 @@
main.case( "Commit upgrade" )
main.step( "Send the command to commit the upgrade" )
+ for ctrl in main.Cluster.active():
+ status = ctrl.issu()
+ main.log.debug( status )
ctrl = main.Cluster.next().CLI
committed = ctrl.issuCommit()
utilities.assert_equals( expect=main.TRUE, actual=committed,
onpass="Upgrade has been committed",
onfail="Error committing the upgrade" )
+ for ctrl in main.Cluster.active():
+ status = ctrl.issu()
+ main.log.debug( status )
main.step( "Check the status of the upgrade" )
ctrl = main.Cluster.next().CLI
@@ -300,7 +306,6 @@
onpass="Nodes check successful",
onfail="Nodes check NOT successful" )
-
def CASE7( self, main ):
"""
Check state after ONOS failure
diff --git a/TestON/tests/HA/dependencies/HA.py b/TestON/tests/HA/dependencies/HA.py
index 0e5da1d..8581248 100644
--- a/TestON/tests/HA/dependencies/HA.py
+++ b/TestON/tests/HA/dependencies/HA.py
@@ -49,7 +49,7 @@
main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
- main.log.info( " Cleaning custom gen partitions file, response was: \n" +
+ main.log.info( "Cleaning custom gen partitions file, response was: \n" +
str( main.ONOSbench.handle.before ) )
except ( pexpect.TIMEOUT, pexpect.EOF ):
main.log.exception( "ONOSbench: pexpect exception found:" +
@@ -134,12 +134,10 @@
ip = main.ONOSbench.getIpAddr( iface=iface )
metaFile = "cluster.json"
javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
- main.log.warn( javaArgs )
main.log.warn( repr( javaArgs ) )
handle = main.ONOSbench.handle
sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs,
main.onosServicepath )
- main.log.warn( sed )
main.log.warn( repr( sed ) )
handle.sendline( sed )
handle.expect( metaFile )
@@ -188,7 +186,7 @@
main.log.error( "Could not parse counters response from " +
str( main.Cluster.active( i ) ) )
main.log.warn( repr( onosCountersRaw[ i ] ) )
- onosCounters.append( [] )
+ onosCounters.append( {} )
testCounters = {}
# make a list of all the "TestON-*" counters in ONOS
@@ -201,7 +199,7 @@
for controller in enumerate( onosCounters ):
for key, value in controller[ 1 ].iteritems():
if 'TestON' in key:
- node = str( main.Cluster.active( controller[ 0 ] ) )
+ node = main.Cluster.active( controller[ 0 ] )
try:
testCounters[ node ].append( { key: value } )
except KeyError:
@@ -213,7 +211,7 @@
consistent = main.TRUE
else:
consistent = main.FALSE
- main.log.error( "ONOS nodes have different values for counters:\n" +
+ main.log.error( "ONOS nodes have different values for counters: %s",
testCounters )
return ( onosCounters, consistent )
except Exception:
@@ -231,19 +229,19 @@
onosCounters, consistent = self.consistentCheck()
# Check for correct values
for i in range( len( main.Cluster.active() ) ):
+ node = str( main.Cluster.active( i ) )
current = onosCounters[ i ]
onosValue = None
try:
onosValue = current.get( counterName )
except AttributeError:
- node = str( main.Cluster.active( i ) )
main.log.exception( node + " counters result " +
"is not as expected" )
correctResults = main.FALSE
if onosValue == counterValue:
- main.log.info( counterName + " counter value is correct" )
+ main.log.info( "{}: {} counter value is correct".format( node, counterName ) )
else:
- main.log.error( counterName +
+ main.log.error( node + ": " + counterName +
" counter value is incorrect," +
" expected value: " + str( counterValue ) +
" current value: " + str( onosValue ) )
@@ -265,19 +263,12 @@
# Compare leaderboards
result = all( i == leaderList[ 0 ] for i in leaderList ) and\
leaderList is not None
- main.log.debug( leaderList )
- main.log.warn( result )
if result:
return ( result, leaderList )
time.sleep( 5 ) # TODO: paramerterize
main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
return ( result, leaderList )
- def generateGraph( self, testName, plotName="Plot-HA", index=2 ):
- # DEPRECATED: ONOSSetup.py now creates these graphs.
-
- main.log.debug( "HA.generateGraph() is deprecated; ONOSSetup now creates these graphs." )
-
def initialSetUp( self, serviceClean=False ):
"""
rest of initialSetup
@@ -384,13 +375,11 @@
sort_keys=True,
indent=4,
separators=( ',', ': ' ) )
- main.log.debug( "Leaders: " + output )
# check for all intent partitions
topics = []
for i in range( 14 ):
topics.append( "work-partition-" + str( i ) )
topics += extraTopics
- main.log.debug( topics )
ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
for topic in topics:
if topic not in ONOStopics:
@@ -600,8 +589,7 @@
# install onos-app-fwd
main.step( "Install reactive forwarding app" )
- onosCli = main.Cluster.next()
- installResults = onosCli.CLI.activateApp( "org.onosproject.fwd" )
+ installResults = main.Cluster.next().CLI.activateApp( "org.onosproject.fwd" )
utilities.assert_equals( expect=main.TRUE, actual=installResults,
onpass="Install fwd successful",
onfail="Install fwd failed" )
@@ -638,7 +626,7 @@
time.sleep( 11 )
# uninstall onos-app-fwd
main.step( "Uninstall reactive forwarding app" )
- uninstallResult = onosCli.CLI.deactivateApp( "org.onosproject.fwd" )
+ uninstallResult = main.Cluster.next().CLI.deactivateApp( "org.onosproject.fwd" )
utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
onpass="Uninstall fwd successful",
onfail="Uninstall fwd failed" )
@@ -663,8 +651,8 @@
host2 = "00:00:00:00:00:" + \
str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
# NOTE: getHost can return None
- host1Dict = onosCli.CLI.getHost( host1 )
- host2Dict = onosCli.CLI.getHost( host2 )
+ host1Dict = main.Cluster.next().CLI.getHost( host1 )
+ host2Dict = main.Cluster.next().CLI.getHost( host2 )
host1Id = None
host2Id = None
if host1Dict and host2Dict:
@@ -698,7 +686,7 @@
onfail="Error looking up host ids" )
intentStart = time.time()
- onosIds = onosCli.getAllIntentsId()
+ onosIds = main.Cluster.next().getAllIntentsId()
main.log.info( "Submitted intents: " + str( intentIds ) )
main.log.info( "Intents in ONOS: " + str( onosIds ) )
for intent in intentIds:
@@ -711,7 +699,7 @@
else:
intentStop = None
# Print the intent states
- intents = onosCli.CLI.intents()
+ intents = main.Cluster.next().CLI.intents()
intentStates = []
installedCheck = True
main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
@@ -804,11 +792,11 @@
installedCheck = True
main.log.info( "Sleeping 60 seconds to see if intents are found" )
time.sleep( 60 )
- onosIds = onosCli.getAllIntentsId()
+ onosIds = main.Cluster.next().getAllIntentsId()
main.log.info( "Submitted intents: " + str( intentIds ) )
main.log.info( "Intents in ONOS: " + str( onosIds ) )
# Print the intent states
- intents = onosCli.CLI.intents()
+ intents = main.Cluster.next().CLI.intents()
intentStates = []
main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
count = 0
@@ -950,8 +938,6 @@
( str( count ), str( i ), str( s ) ) )
self.commonChecks()
- # Print flowrules
- main.log.debug( onosCli.CLI.flows() )
main.step( "Wait a minute then ping again" )
# the wait is above
PingResult = main.TRUE
@@ -1609,7 +1595,7 @@
args=[ main.onosSetName ] )
getResults = main.TRUE
for i in range( len( main.Cluster.active() ) ):
- node = main.Cluster.active( i )
+ node = str( main.Cluster.active( i ) )
if isinstance( getResponses[ i ], list ):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -1641,7 +1627,7 @@
args=[ main.onosSetName ] )
sizeResults = main.TRUE
for i in range( len( main.Cluster.active() ) ):
- node = main.Cluster.active( i )
+ node = str( main.Cluster.active( i ) )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( node +
@@ -1683,7 +1669,7 @@
args=[ main.onosSetName ] )
getResults = main.TRUE
for i in range( len( main.Cluster.active() ) ):
- node = main.Cluster.active( i )
+ node = str( main.Cluster.active( i ) )
if isinstance( getResponses[ i ], list ):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -1707,7 +1693,7 @@
args=[ main.onosSetName ] )
sizeResults = main.TRUE
for i in range( len( main.Cluster.active() ) ):
- node = main.Cluster.active( i )
+ node = str( main.Cluster.active( i ) )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( node + " expected a size of " +
@@ -1749,7 +1735,7 @@
args=[ main.onosSetName ] )
getResults = main.TRUE
for i in range( len( main.Cluster.active() ) ):
- node = main.Cluster.active( i )
+ node = str( main.Cluster.active( i ) )
if isinstance( getResponses[ i ], list ):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -1773,7 +1759,7 @@
args=[ main.onosSetName ] )
sizeResults = main.TRUE
for i in range( len( main.Cluster.active() ) ):
- node = main.Cluster.active( i )
+ node = str( main.Cluster.active( i ) )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( node + " expected a size of " + str( size ) +
@@ -1847,7 +1833,7 @@
args=[ main.onosSetName ] )
getResults = main.TRUE
for i in range( len( main.Cluster.active() ) ):
- node = main.Cluster.active( i )
+ node = str( main.Cluster.active( i ) )
if isinstance( getResponses[ i ], list ):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -1871,7 +1857,7 @@
args=[ main.onosSetName ] )
sizeResults = main.TRUE
for i in range( len( main.Cluster.active() ) ):
- node = main.Cluster.active( i )
+ node = str( main.Cluster.active( i ) )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( node + " expected a size of " + str( size ) +
@@ -1913,7 +1899,7 @@
args=[ main.onosSetName ] )
getResults = main.TRUE
for i in range( len( main.Cluster.active() ) ):
- node = main.Cluster.active( i )
+ node = str( main.Cluster.active( i ) )
if isinstance( getResponses[ i ], list ):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -1937,7 +1923,7 @@
args=[ main.onosSetName ] )
sizeResults = main.TRUE
for i in range( len( main.Cluster.active() ) ):
- node = main.Cluster.active( i )
+ node = str( main.Cluster.active( i ) )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( node + " expected a size of " + str( size ) +
@@ -1979,7 +1965,7 @@
args=[ main.onosSetName ] )
getResults = main.TRUE
for i in range( len( main.Cluster.active() ) ):
- node = main.Cluster.active( i )
+ node = str( main.Cluster.active( i ) )
if isinstance( getResponses[ i ], list ):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -2003,7 +1989,7 @@
args=[ main.onosSetName ] )
sizeResults = main.TRUE
for i in range( len( main.Cluster.active() ) ):
- node = main.Cluster.active( i )
+ node = str( main.Cluster.active( i ) )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( node + " expected a size of " + str( size ) +
@@ -2046,7 +2032,7 @@
args=[ main.onosSetName ] )
getResults = main.TRUE
for i in range( len( main.Cluster.active() ) ):
- node = main.Cluster.active( i )
+ node = str( main.Cluster.active( i ) )
if isinstance( getResponses[ i ], list ):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -2070,7 +2056,7 @@
args=[ main.onosSetName ] )
sizeResults = main.TRUE
for i in range( len( main.Cluster.active() ) ):
- node = main.Cluster.active( i )
+ node = str( main.Cluster.active( i ) )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( node + " expected a size of " + str( size ) +
@@ -2112,7 +2098,7 @@
args=[ main.onosSetName ] )
getResults = main.TRUE
for i in range( len( main.Cluster.active() ) ):
- node = main.Cluster.active( i )
+ node = str( main.Cluster.active( i ) )
if isinstance( getResponses[ i ], list ):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -2136,7 +2122,7 @@
args=[ main.onosSetName ] )
sizeResults = main.TRUE
for i in range( len( main.Cluster.active() ) ):
- node = main.Cluster.active( i )
+ node = str( main.Cluster.active( i ) )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( node + " expected a size of " + str( size ) +
@@ -2179,7 +2165,7 @@
args=[ main.onosSetName ] )
getResults = main.TRUE
for i in range( len( main.Cluster.active() ) ):
- node = main.Cluster.active( i )
+ node = str( main.Cluster.active( i ) )
if isinstance( getResponses[ i ], list ):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
@@ -2203,7 +2189,7 @@
args=[ main.onosSetName ] )
sizeResults = main.TRUE
for i in range( len( main.Cluster.active() ) ):
- node = main.Cluster.active( i )
+ node = str( main.Cluster.active( i ) )
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
main.log.error( node + " expected a size of " +
@@ -2248,7 +2234,7 @@
if node != tMapValue:
valueCheck = False
if not valueCheck:
- main.log.warn( "Values for key 'Key" + str(n) + "' do not match:" )
+ main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
main.log.warn( getResponses )
getCheck = getCheck and valueCheck
utilities.assert_equals( expect=True,
@@ -2611,7 +2597,6 @@
ipList = []
deviceList = []
- onosCli = main.Cluster.next()
try:
# Assign mastership to specific controllers. This assignment was
# determined for a 7 node cluser, but will work with any sized
@@ -2621,45 +2606,45 @@
if i == 1:
c = 0
ip = main.Cluster.active( c ).ip_address # ONOS1
- deviceId = onosCli.getDevice( "1000" ).get( 'id' )
+ deviceId = main.Cluster.next().getDevice( "1000" ).get( 'id' )
elif i == 2:
c = 1 % main.Cluster.numCtrls
ip = main.Cluster.active( c ).ip_address # ONOS2
- deviceId = onosCli.getDevice( "2000" ).get( 'id' )
+ deviceId = main.Cluster.next().getDevice( "2000" ).get( 'id' )
elif i == 3:
c = 1 % main.Cluster.numCtrls
ip = main.Cluster.active( c ).ip_address # ONOS2
- deviceId = onosCli.getDevice( "3000" ).get( 'id' )
+ deviceId = main.Cluster.next().getDevice( "3000" ).get( 'id' )
elif i == 4:
c = 3 % main.Cluster.numCtrls
ip = main.Cluster.active( c ).ip_address # ONOS4
- deviceId = onosCli.getDevice( "3004" ).get( 'id' )
+ deviceId = main.Cluster.next().getDevice( "3004" ).get( 'id' )
elif i == 5:
c = 2 % main.Cluster.numCtrls
ip = main.Cluster.active( c ).ip_address # ONOS3
- deviceId = onosCli.getDevice( "5000" ).get( 'id' )
+ deviceId = main.Cluster.next().getDevice( "5000" ).get( 'id' )
elif i == 6:
c = 2 % main.Cluster.numCtrls
ip = main.Cluster.active( c ).ip_address # ONOS3
- deviceId = onosCli.getDevice( "6000" ).get( 'id' )
+ deviceId = main.Cluster.next().getDevice( "6000" ).get( 'id' )
elif i == 7:
c = 5 % main.Cluster.numCtrls
ip = main.Cluster.active( c ).ip_address # ONOS6
- deviceId = onosCli.getDevice( "6007" ).get( 'id' )
+ deviceId = main.Cluster.next().getDevice( "6007" ).get( 'id' )
elif i >= 8 and i <= 17:
c = 4 % main.Cluster.numCtrls
ip = main.Cluster.active( c ).ip_address # ONOS5
dpid = '3' + str( i ).zfill( 3 )
- deviceId = onosCli.getDevice( dpid ).get( 'id' )
+ deviceId = main.Cluster.next().getDevice( dpid ).get( 'id' )
elif i >= 18 and i <= 27:
c = 6 % main.Cluster.numCtrls
ip = main.Cluster.active( c ).ip_address # ONOS7
dpid = '6' + str( i ).zfill( 3 )
- deviceId = onosCli.getDevice( dpid ).get( 'id' )
+ deviceId = main.Cluster.next().getDevice( dpid ).get( 'id' )
elif i == 28:
c = 0
ip = main.Cluster.active( c ).ip_address # ONOS1
- deviceId = onosCli.getDevice( "2800" ).get( 'id' )
+ deviceId = main.Cluster.next().getDevice( "2800" ).get( 'id' )
else:
main.log.error( "You didn't write an else statement for " +
"switch s" + str( i ) )
@@ -2667,12 +2652,12 @@
# Assign switch
assert deviceId, "No device id for s" + str( i ) + " in ONOS"
# TODO: make this controller dynamic
- roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
+ roleCall = roleCall and main.Cluster.next().deviceRole( deviceId, ip )
ipList.append( ip )
deviceList.append( deviceId )
except ( AttributeError, AssertionError ):
main.log.exception( "Something is wrong with ONOS device view" )
- main.log.info( onosCli.devices() )
+ main.log.info( main.Cluster.next().devices() )
utilities.assert_equals(
expect=main.TRUE,
actual=roleCall,
@@ -2688,7 +2673,7 @@
ip = ipList[ i ]
deviceId = deviceList[ i ]
# Check assignment
- master = onosCli.getRole( deviceId ).get( 'master' )
+ master = main.Cluster.next().getRole( deviceId ).get( 'master' )
if ip in master:
roleCheck = roleCheck and main.TRUE
else:
@@ -3258,8 +3243,6 @@
port = locations[0].get( 'port' )
assert port, "port field could not be found for this host location object"
- main.log.debug( "Host: {}\nmac: {}\n location(s): {}\ndevice: {}\n port: {}".format(
- ctrl.pprint( host ), mac, ctrl.pprint( locations ), device, port ) )
# Now check if this matches where they should be
if mac and device and port:
@@ -3508,7 +3491,6 @@
switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
description = "Killing a switch to ensure it is discovered correctly"
- onosCli = main.Cluster.next()
main.case( description )
switch = main.params[ 'kill' ][ 'switch' ]
switchDPID = main.params[ 'kill' ][ 'dpid' ]
@@ -3520,7 +3502,7 @@
main.log.info( "Waiting " + str( switchSleep ) +
" seconds for switch down to be discovered" )
time.sleep( switchSleep )
- device = onosCli.getDevice( dpid=switchDPID )
+ device = main.Cluster.next().getDevice( dpid=switchDPID )
# Peek at the deleted switch
main.log.warn( "Bringing down switch " + str( device ) )
result = main.FALSE
@@ -3543,7 +3525,6 @@
switch = main.params[ 'kill' ][ 'switch' ]
switchDPID = main.params[ 'kill' ][ 'dpid' ]
links = main.params[ 'kill' ][ 'links' ].split()
- onosCli = main.Cluster.next()
description = "Adding a switch to ensure it is discovered correctly"
main.case( description )
@@ -3556,7 +3537,7 @@
main.log.info( "Waiting " + str( switchSleep ) +
" seconds for switch up to be discovered" )
time.sleep( switchSleep )
- device = onosCli.getDevice( dpid=switchDPID )
+ device = main.Cluster.next().getDevice( dpid=switchDPID )
# Peek at the deleted switch
main.log.debug( "Added device: " + str( device ) )
result = main.FALSE
@@ -3575,8 +3556,7 @@
main.case( "Start Leadership Election app" )
main.step( "Install leadership election app" )
- onosCli = main.Cluster.next()
- appResult = onosCli.CLI.activateApp( "org.onosproject.election" )
+ appResult = main.Cluster.next().CLI.activateApp( "org.onosproject.election" )
utilities.assert_equals(
expect=main.TRUE,
actual=appResult,
@@ -3584,7 +3564,7 @@
onfail="Something went wrong with installing Leadership election" )
main.step( "Run for election on each node" )
- onosCli.electionTestRun()
+ main.Cluster.next().electionTestRun()
main.Cluster.command( "electionTestRun" )
time.sleep( 5 )
sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
@@ -3901,7 +3881,7 @@
if pushedHost != onosHost:
cfgResult = False
main.log.error( "Pushed Network configuration does not match what is in " +
- "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedHost),
+ "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedHost ),
ctrl.pprint( onosHost ) ) )
utilities.assert_equals( expect=True,
actual=cfgResult,