Merge "Activate intentperf app later to make sure that it gets all cluster nodes"
diff --git a/TestON/drivers/common/cli/emulator/mininetclidriver.py b/TestON/drivers/common/cli/emulator/mininetclidriver.py
index 7541780..3a29d00 100644
--- a/TestON/drivers/common/cli/emulator/mininetclidriver.py
+++ b/TestON/drivers/common/cli/emulator/mininetclidriver.py
@@ -830,7 +830,7 @@
isReachable = main.FALSE
except pexpect.TIMEOUT:
main.log.exception( self.name + ": TIMEOUT exception" )
- self.exitFromCmd( self.hostPrompt )
+ self.exitFromCmd( [ self.hostPrompt, self.bashPrompt ] )
isReachable = main.FALSE
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
@@ -2234,7 +2234,9 @@
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
main.log.error( self.name + ": " + self.handle.before )
- main.cleanAndExit()
+ # Do not exit the entire test when pexpect.EOF is caught
+ # FIXME: We might need to do something else here
+ return main.ERROR
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
diff --git a/TestON/drivers/common/clidriver.py b/TestON/drivers/common/clidriver.py
index 6f8e257..fbd6bd8 100644
--- a/TestON/drivers/common/clidriver.py
+++ b/TestON/drivers/common/clidriver.py
@@ -561,18 +561,19 @@
command. It will retry multiple times until the running command is
completely killed and expected string is returned from the handle.
Required:
- expect: the expected string which indicates that the previous command
- was killed successfully.
+ expect: expected string or list of strings which indicates that the
+ previous command was killed successfully.
Optional:
retry: maximum number of ctrl+c that will be sent.
"""
+ expect = [ expect ] if isinstance( expect, str ) else expect
try:
while retry >= 0:
main.log.debug( self.name + ": sending ctrl+c to kill the command" )
self.handle.send( "\x03" )
- i = self.handle.expect( [ expect, pexpect.TIMEOUT ], timeout=3 )
+ i = self.handle.expect( expect + [ pexpect.TIMEOUT ], timeout=3 )
main.log.debug( self.handle.before )
- if i == 0:
+ if i < len( expect ):
main.log.debug( self.name + ": successfully killed the command" )
return main.TRUE
retry -= 1
diff --git a/TestON/tests/SCPF/SCPFmastershipFailoverLat/SCPFmastershipFailoverLat.py b/TestON/tests/SCPF/SCPFmastershipFailoverLat/SCPFmastershipFailoverLat.py
index 208ab19..24d10a6 100644
--- a/TestON/tests/SCPF/SCPFmastershipFailoverLat/SCPFmastershipFailoverLat.py
+++ b/TestON/tests/SCPF/SCPFmastershipFailoverLat/SCPFmastershipFailoverLat.py
@@ -185,21 +185,21 @@
eventOutput = main.Cluster.active( CLInum ).CLI.events( args='-a' ).split( "\r\n" )
for line in reversed( eventOutput ):
timestamp = line[ :23 ] if line[ 19 ] != '-' else line[ :19 ] + '.000'
- if "INSTANCE_DEACTIVATED" in line and len( instanceDeactivatedLats ) == CLInum:
- deactivateTime = float( datetime.datetime.strptime(
- timestamp, "%Y-%m-%dT%H:%M:%S.%f" ).strftime( '%s.%f' ) ) * 1000.0
- instanceDeactivatedLats.append( deactivateTime - time1 )
- elif "MASTER_CHANGED" in line and len( masterChangedLats ) == CLInum:
- changedTime = float( datetime.datetime.strptime(
- timestamp, "%Y-%m-%dT%H:%M:%S.%f" ).strftime( '%s.%f' ) ) * 1000.0
- masterChangedLats.append( changedTime - time1 )
- if len( instanceDeactivatedLats ) > CLInum and len( masterChangedLats ) > CLInum:
+ timestamp = float( datetime.datetime.strptime( timestamp, "%Y-%m-%dT%H:%M:%S.%f" ).strftime( '%s.%f' ) ) * 1000.0
+ if timestamp - time1 >= 0:
+ if "INSTANCE_DEACTIVATED" in line:
+ instanceDeactivatedLats.append( timestamp - time1 )
+ elif "MASTER_CHANGED" in line:
+ masterChangedLats.append( timestamp - time1 )
+ else:
break
instanceDeactivatedLats.sort()
+ masterChangedLats.sort()
instanceDeactivated = instanceDeactivatedLats[ 0 ]
+ masterChanged = masterChangedLats[ 0 ]
- eventLatCheck = True if masterChangedLats and instanceDeactivated else False
+ eventLatCheck = True if masterChanged and instanceDeactivated else False
if not eventLatCheck:
main.log.warn( "Latencies were NOT obtained from 'events' successfully." )
@@ -221,20 +221,26 @@
validDataCheck = False
if tsharkLatCheck:
main.log.info( "instanceDeactivated: " + str( instanceDeactivated ) )
- main.log.info( "roleRequestLat - instanceDeactivated: " + str( roleRequestLat - instanceDeactivated ) )
+ main.log.info( "masterChanged: " + str( masterChanged ) )
+ main.log.info( "roleRequestLat: " + str( roleRequestLat ) )
if iteration >= main.warmUp:
main.log.info( "Verifying that the data are valid." ) # Don't record data during a warm-up
- validDataCheck = roleRequestLat - instanceDeactivated >= 0 and \
- instanceDeactivated >= 0
+ validDataCheck = roleRequestLat >= 0 and \
+ instanceDeactivated >= 0 and \
+ masterChanged >= 0
if not validDataCheck:
main.log.warn( "Data are NOT valid." )
if eventLatCheck and tsharkLatCheck and validDataCheck:
main.log.info( "Saving data..." )
- main.latencyData[ 'kill_to_deactivation' ]\
- .append( instanceDeactivated )
- main.latencyData[ 'deactivation_to_role_request' ]\
- .append( roleRequestLat - instanceDeactivated )
+ if roleRequestLat >= instanceDeactivated:
+ main.latencyData[ 'kill_to_deactivation' ].append( instanceDeactivated )
+ main.latencyData[ 'deactivation_to_role_request' ].append( roleRequestLat - instanceDeactivated )
+ else:
+ main.latencyData[ 'kill_to_deactivation' ].append( roleRequestLat )
+ main.latencyData[ 'deactivation_to_role_request' ].append( 0 )
+ main.log.info( "kill_to_deactivation: " + str( main.latencyData[ 'kill_to_deactivation' ][ -1 ] ) )
+ main.log.info( "deactivation_to_role_request: " + str( main.latencyData[ 'deactivation_to_role_request' ][ -1 ] ) )
# Restart ONOS node
main.log.info( "Restart ONOS node " + strNodeNumToKill + " and checking status of restart." )