Update HATests
diff --git a/TestON/dependencies/topo-HA.py b/TestON/dependencies/topo-HA.py
new file mode 100644
index 0000000..65613d6
--- /dev/null
+++ b/TestON/dependencies/topo-HA.py
@@ -0,0 +1,63 @@
+from mininet.topo import Topo
+class MyTopo( Topo ):
+ def __init__( self ):
+ Topo.__init__( self )
+ topSwitch = self.addSwitch('s1',dpid='1000'.zfill(16))
+ leftTopSwitch = self.addSwitch('s2',dpid='2000'.zfill(16))
+ rightTopSwitch = self.addSwitch('s5',dpid='5000'.zfill(16))
+ leftBotSwitch = self.addSwitch('s3',dpid='3000'.zfill(16))
+ rightBotSwitch = self.addSwitch('s6',dpid='6000'.zfill(16))
+ midBotSwitch = self.addSwitch('s28',dpid='2800'.zfill(16))
+
+ topHost = self.addHost( 'h1' )
+ leftTopHost = self.addHost('h2')
+ rightTopHost = self.addHost('h5')
+ leftBotHost = self.addHost('h3')
+ rightBotHost = self.addHost('h6')
+ midBotHost = self.addHost('h28')
+ self.addLink(topSwitch,topHost)
+ self.addLink(leftTopSwitch,leftTopHost)
+ self.addLink(rightTopSwitch,rightTopHost)
+ self.addLink(leftBotSwitch,leftBotHost)
+ self.addLink(rightBotSwitch,rightBotHost)
+ self.addLink(midBotSwitch,midBotHost)
+ self.addLink(leftTopSwitch,rightTopSwitch)
+ self.addLink(topSwitch,leftTopSwitch)
+ self.addLink(topSwitch,rightTopSwitch)
+ self.addLink(leftTopSwitch,leftBotSwitch)
+ self.addLink(rightTopSwitch,rightBotSwitch)
+ self.addLink(leftBotSwitch,midBotSwitch)
+ self.addLink(midBotSwitch,rightBotSwitch)
+
+ agg1Switch = self.addSwitch('s4',dpid = '3004'.zfill(16))
+ agg2Switch = self.addSwitch('s7',dpid = '6007'.zfill(16))
+ agg1Host = self.addHost('h4')
+ agg2Host = self.addHost('h7')
+ self.addLink(agg1Switch,agg1Host)
+ self.addLink(agg2Switch,agg2Host)
+ self.addLink(agg1Switch, leftBotSwitch)
+ self.addLink(agg2Switch, rightBotSwitch)
+
+ for i in range(10):
+ num = str(i+8)
+ switch = self.addSwitch('s'+num,dpid = ('30'+num.zfill(2)).zfill(16))
+ host = self.addHost('h'+num)
+ self.addLink(switch, host)
+ self.addLink(switch, agg1Switch)
+
+ for i in range(10):
+ num = str(i+18)
+ switch = self.addSwitch('s'+num,dpid = ('60'+num.zfill(2)).zfill(16))
+ host = self.addHost('h'+num)
+ self.addLink(switch, host)
+ self.addLink(switch, agg2Switch)
+
+topos = { 'mytopo': (lambda: MyTopo() ) }
+
+
+
+
+
+
+
+
diff --git a/TestON/tests/HATestClusterRestart/HATestClusterRestart.py b/TestON/tests/HATestClusterRestart/HATestClusterRestart.py
index e9beb0c..1f41027 100644
--- a/TestON/tests/HATestClusterRestart/HATestClusterRestart.py
+++ b/TestON/tests/HATestClusterRestart/HATestClusterRestart.py
@@ -31,14 +31,18 @@
CASE1 is to compile ONOS and push it to the test machines
Startup sequence:
- git pull
- mvn clean install
- onos-package
cell <name>
onos-verify-cell
NOTE: temporary - onos-remove-raft-logs
+ onos-uninstall
+ start mininet
+ git pull
+ mvn clean install
+ onos-package
onos-install -f
onos-wait-for-start
+ start cli sessions
+ start tcpdump
"""
main.log.report( "ONOS HA test: Restart all ONOS nodes - " +
"initialization" )
@@ -109,8 +113,7 @@
main.step( "Compiling the latest version of ONOS" )
if PULLCODE:
- # TODO Configure branch in params
- main.step( "Git checkout and pull master" )
+ main.step( "Git checkout and pull " + gitBranch )
main.ONOSbench.gitCheckout( gitBranch )
gitPullResult = main.ONOSbench.gitPull()
@@ -145,7 +148,6 @@
and onos7InstallResult
main.step( "Checking if ONOS is up yet" )
- # TODO check bundle:list?
for i in range( 2 ):
onos1Isup = main.ONOSbench.isup( ONOS1Ip )
if not onos1Isup:
@@ -209,8 +211,8 @@
and onosIsupResult and cliResults )
utilities.assert_equals( expect=main.TRUE, actual=case1Result,
- onpass="Test startup successful",
- onfail="Test startup NOT successful" )
+ onpass="Test startup successful",
+ onfail="Test startup NOT successful" )
if case1Result == main.FALSE:
main.cleanup()
@@ -266,118 +268,131 @@
# Manually assign mastership to the controller we want
roleCall = main.TRUE
roleCheck = main.TRUE
-
- # Assign switch
- deviceId = main.ONOScli1.getDevice( "1000" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS1Ip )
- # Check assignment
- if ONOS1Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- # Assign switch
- deviceId = main.ONOScli1.getDevice( "2800" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS1Ip )
- # Check assignment
- if ONOS1Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- # Assign switch
- deviceId = main.ONOScli1.getDevice( "2000" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS2Ip )
- # Check assignment
- if ONOS2Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- # Assign switch
- deviceId = main.ONOScli1.getDevice( "3000" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS2Ip )
- # Check assignment
- if ONOS2Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- # Assign switch
- deviceId = main.ONOScli1.getDevice( "5000" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS3Ip )
- # Check assignment
- if ONOS3Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- # Assign switch
- deviceId = main.ONOScli1.getDevice( "6000" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS3Ip )
- # Check assignment
- if ONOS3Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- # Assign switch
- deviceId = main.ONOScli1.getDevice( "3004" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS4Ip )
- # Check assignment
- if ONOS4Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- for i in range( 8, 18 ):
- dpid = '3' + str( i ).zfill( 3 )
- deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
+ try:
+ # Assign switch
+ deviceId = main.ONOScli1.getDevice( "1000" ).get( 'id' )
+ assert deviceId, "No device id for s1 in ONOS"
roleCall = roleCall and main.ONOScli1.deviceRole(
deviceId,
- ONOS5Ip )
+ ONOS1Ip )
# Check assignment
- if ONOS5Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ if ONOS1Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
roleCheck = roleCheck and main.TRUE
else:
roleCheck = roleCheck and main.FALSE
- deviceId = main.ONOScli1.getDevice( "6007" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS6Ip )
- # Check assignment
- if ONOS6Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- for i in range( 18, 28 ):
- dpid = '6' + str( i ).zfill( 3 )
- deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
+ # Assign switch
+ deviceId = main.ONOScli1.getDevice( "2800" ).get( 'id' )
+ assert deviceId, "No device id for s28 in ONOS"
roleCall = roleCall and main.ONOScli1.deviceRole(
deviceId,
- ONOS7Ip )
+ ONOS1Ip )
# Check assignment
- if ONOS7Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ if ONOS1Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
roleCheck = roleCheck and main.TRUE
else:
roleCheck = roleCheck and main.FALSE
+ # Assign switch
+ deviceId = main.ONOScli1.getDevice( "2000" ).get( 'id' )
+ assert deviceId, "No device id for s2 in ONOS"
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS2Ip )
+ # Check assignment
+ if ONOS2Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+
+ # Assign switch
+ deviceId = main.ONOScli1.getDevice( "3000" ).get( 'id' )
+ assert deviceId, "No device id for s3 in ONOS"
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS2Ip )
+ # Check assignment
+ if ONOS2Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+
+ # Assign switch
+ deviceId = main.ONOScli1.getDevice( "5000" ).get( 'id' )
+ assert deviceId, "No device id for s5 in ONOS"
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS3Ip )
+ # Check assignment
+ if ONOS3Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+
+ # Assign switch
+ deviceId = main.ONOScli1.getDevice( "6000" ).get( 'id' )
+ assert deviceId, "No device id for s6 in ONOS"
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS3Ip )
+ # Check assignment
+ if ONOS3Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+
+ # Assign switch
+ deviceId = main.ONOScli1.getDevice( "3004" ).get( 'id' )
+ assert deviceId, "No device id for s4 in ONOS"
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS4Ip )
+ # Check assignment
+ if ONOS4Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+
+ for i in range( 8, 18 ):
+ dpid = '3' + str( i ).zfill( 3 )
+ deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
+ assert deviceId, "No device id for s%i in ONOS" % i
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS5Ip )
+ # Check assignment
+ if ONOS5Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+
+ deviceId = main.ONOScli1.getDevice( "6007" ).get( 'id' )
+ assert deviceId, "No device id for s7 in ONOS"
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS6Ip )
+ # Check assignment
+ if ONOS6Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+
+ for i in range( 18, 28 ):
+ dpid = '6' + str( i ).zfill( 3 )
+ deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
+ assert deviceId, "No device id for s%i in ONOS" % i
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS7Ip )
+ # Check assignment
+ if ONOS7Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+ except ( AttributeError, AssertionError ):
+ main.log.exception( "Something is wrong with ONOS device view" )
+ main.log.info( main.ONOScli1.devices() )
+
utilities.assert_equals(
expect=main.TRUE,
actual=roleCall,
@@ -403,6 +418,7 @@
# FIXME: we must reinstall intents until we have a persistant
# datastore!
import time
+ import json
main.log.report( "Adding host intents" )
main.case( "Adding host Intents" )
@@ -444,8 +460,11 @@
time.sleep( 10 )
main.step( "Add host intents" )
+ intentIds = []
# TODO: move the host numbers to params
+ # Maybe look at all the paths we ping?
intentAddResult = True
+ hostResult = main.TRUE
for i in range( 8, 18 ):
main.log.info( "Adding host intent between h" + str( i ) +
" and h" + str( i + 10 ) )
@@ -463,45 +482,95 @@
host2Id = host2Dict.get( 'id', None )
if host1Id and host2Id:
- tmpResult = main.ONOScli1.addHostIntent(
+ tmpId = main.ONOScli1.addHostIntent(
host1Id,
host2Id )
+ main.log.info( "Added intent with id: " + tmpId )
+ intentIds.append( tmpId )
else:
main.log.error( "Error, getHost() failed" )
main.log.warn( json.dumps( json.loads( main.ONOScli1.hosts() ),
sort_keys=True,
indent=4,
separators=( ',', ': ' ) ) )
- tmpResult = main.FALSE
- intentAddResult = bool( pingResult and intentAddResult
- and tmpResult )
- # TODO Check that intents were added?
+ hostResult = main.FALSE
+ onosIds = main.ONOScli1.getAllIntentsId()
+ main.log.info( "Submitted intents: " + str( intentIds ) )
+ main.log.info( "Intents in ONOS: " + str( onosIds ) )
+ for intent in intentIds:
+ if intent in onosIds:
+ pass # intent submitted is still in onos
+ else:
+ intentAddResult = False
# Print the intent states
- intents = main.ONOScli1.intents( )
+ intents = main.ONOScli1.intents()
intentStates = []
+ main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+ count = 0
for intent in json.loads( intents ): # Iter through intents of a node
- intentStates.append( intent.get( 'state', None ) )
- out = [ (i, intentStates.count( i ) ) for i in set( intentStates ) ]
- main.log.info( dict( out ) )
-
+ state = intent.get( 'state', None )
+ intentId = intent.get( 'id', None )
+ intentStates.append( ( intentId, state ) )
+ # add submitted intents not in the store
+ tmplist = [ i for i, s in intentStates ]
+ missingIntents = False
+ for i in intentIds:
+ if i not in tmplist:
+ intentStates.append( ( i, " - " ) )
+ missingIntents = True
+ intentStates.sort()
+ for i, s in intentStates:
+ count += 1
+ main.log.info( "%-6s%-15s%-15s" %
+ ( str( count ), str( i ), str( s ) ) )
+ intentAddResult = bool( pingResult and hostResult and intentAddResult
+ and not missingIntents)
utilities.assert_equals(
expect=True,
actual=intentAddResult,
onpass="Pushed host intents to ONOS",
onfail="Error in pushing host intents to ONOS" )
- # TODO Check if intents all exist in datastore
+
+ if not intentAddResult:
+ import time
+ main.log.info( "Sleeping 60 seconds to see if intents are found" )
+ time.sleep( 60 )
+ onosIds = main.ONOScli1.getAllIntentsId()
+ main.log.info( "Submitted intents: " + str( intentIds ) )
+ main.log.info( "Intents in ONOS: " + str( onosIds ) )
+ # Print the intent states
+ intents = main.ONOScli1.intents()
+ intentStates = []
+ main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+ count = 0
+ for intent in json.loads( intents ):
+ # Iter through intents of a node
+ state = intent.get( 'state', None )
+ intentId = intent.get( 'id', None )
+ intentStates.append( ( intentId, state ) )
+ # add submitted intents not in the store
+ tmplist = [ i for i, s in intentStates ]
+ for i in intentIds:
+ if i not in tmplist:
+ intentStates.append( ( i, " - " ) )
+ intentStates.sort()
+ for i, s in intentStates:
+ count += 1
+ main.log.info( "%-6s%-15s%-15s" %
+ ( str( count ), str( i ), str( s ) ) )
def CASE4( self, main ):
"""
Ping across added host intents
"""
+ import json
description = " Ping across added host intents"
main.log.report( description )
main.case( description )
PingResult = main.TRUE
for i in range( 8, 18 ):
- ping = main.Mininet1.pingHost(
- src="h" + str( i ), target="h" + str( i + 10 ) )
+ ping = main.Mininet1.pingHost( src="h" + str( i ),
+ target="h" + str( i + 10 ) )
PingResult = PingResult and ping
if ping == main.FALSE:
main.log.warn( "Ping failed between h" + str( i ) +
@@ -512,7 +581,7 @@
if PingResult == main.FALSE:
main.log.report(
"Intents have not been installed correctly, pings failed." )
- #TODO: pretty print
+ # TODO: pretty print
main.log.warn( "ONSO1 intents: " )
main.log.warn( json.dumps( json.loads( main.ONOScli1.intents() ),
sort_keys=True,
@@ -526,6 +595,22 @@
actual=PingResult,
onpass="Intents have been installed correctly and pings work",
onfail="Intents have not been installed correctly, pings failed." )
+ if PingResult is not main.TRUE:
+ # Print the intent states
+ intents = main.ONOScli1.intents()
+ intentStates = []
+ main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+ count = 0
+ # Iter through intents of a node
+ for intent in json.loads( intents ):
+ state = intent.get( 'state', None )
+ intentId = intent.get( 'id', None )
+ intentStates.append( ( intentId, state ) )
+ intentStates.sort()
+ for i, s in intentStates:
+ count += 1
+ main.log.info( "%-6s%-15s%-15s" %
+ ( str( count ), str( i ), str( s ) ) )
def CASE5( self, main ):
"""
@@ -789,29 +874,39 @@
main.step( "Get the flows from each controller" )
global flowState
flowState = []
- ONOS1Flows = main.ONOScli1.flows( jsonFormat=True )
- ONOS2Flows = main.ONOScli2.flows( jsonFormat=True )
- ONOS3Flows = main.ONOScli3.flows( jsonFormat=True )
- ONOS4Flows = main.ONOScli4.flows( jsonFormat=True )
- ONOS5Flows = main.ONOScli5.flows( jsonFormat=True )
- ONOS6Flows = main.ONOScli6.flows( jsonFormat=True )
- ONOS7Flows = main.ONOScli7.flows( jsonFormat=True )
- ONOS1FlowsJson = json.loads( ONOS1Flows )
- ONOS2FlowsJson = json.loads( ONOS2Flows )
- ONOS3FlowsJson = json.loads( ONOS3Flows )
- ONOS4FlowsJson = json.loads( ONOS4Flows )
- ONOS5FlowsJson = json.loads( ONOS5Flows )
- ONOS6FlowsJson = json.loads( ONOS6Flows )
- ONOS7FlowsJson = json.loads( ONOS7Flows )
flowCheck = main.FALSE
- if "Error" in ONOS1Flows or not ONOS1Flows\
- or "Error" in ONOS2Flows or not ONOS2Flows\
- or "Error" in ONOS3Flows or not ONOS3Flows\
- or "Error" in ONOS4Flows or not ONOS4Flows\
- or "Error" in ONOS5Flows or not ONOS5Flows\
- or "Error" in ONOS6Flows or not ONOS6Flows\
- or "Error" in ONOS7Flows or not ONOS7Flows:
- main.log.report( "Error in getting ONOS intents" )
+ try:
+ ONOS1Flows = main.ONOScli1.flows( jsonFormat=True )
+ ONOS2Flows = main.ONOScli2.flows( jsonFormat=True )
+ ONOS3Flows = main.ONOScli3.flows( jsonFormat=True )
+ ONOS4Flows = main.ONOScli4.flows( jsonFormat=True )
+ ONOS5Flows = main.ONOScli5.flows( jsonFormat=True )
+ ONOS6Flows = main.ONOScli6.flows( jsonFormat=True )
+ ONOS7Flows = main.ONOScli7.flows( jsonFormat=True )
+ assert ONOS1Flows, "ONOS1 Flows should not be empty"
+ assert ONOS2Flows, "ONOS2 Flows should not be empty"
+ assert ONOS3Flows, "ONOS3 Flows should not be empty"
+ assert ONOS4Flows, "ONOS4 Flows should not be empty"
+ assert ONOS5Flows, "ONOS5 Flows should not be empty"
+ assert ONOS6Flows, "ONOS6 Flows should not be empty"
+ assert ONOS7Flows, "ONOS7 Flows should not be empty"
+ assert "Error" not in ONOS1Flows, "ONOS1 Flows contains 'Error'"
+ assert "Error" not in ONOS2Flows, "ONOS2 Flows contains 'Error'"
+ assert "Error" not in ONOS3Flows, "ONOS3 Flows contains 'Error'"
+ assert "Error" not in ONOS4Flows, "ONOS4 Flows contains 'Error'"
+ assert "Error" not in ONOS5Flows, "ONOS5 Flows contains 'Error'"
+ assert "Error" not in ONOS6Flows, "ONOS6 Flows contains 'Error'"
+ assert "Error" not in ONOS7Flows, "ONOS7 Flows contains 'Error'"
+ ONOS1FlowsJson = json.loads( ONOS1Flows )
+ ONOS2FlowsJson = json.loads( ONOS2Flows )
+ ONOS3FlowsJson = json.loads( ONOS3Flows )
+ ONOS4FlowsJson = json.loads( ONOS4Flows )
+ ONOS5FlowsJson = json.loads( ONOS5Flows )
+ ONOS6FlowsJson = json.loads( ONOS6Flows )
+ ONOS7FlowsJson = json.loads( ONOS7Flows )
+ except ( ValueError, AssertionError ): # From json.loads, or asserts
+ main.log.exception( "One or more 'flows' responses from " +
+ "ONOS couldn't be decoded." )
main.log.warn( "ONOS1 flows repsponse: " + ONOS1Flows )
main.log.warn( "ONOS2 flows repsponse: " + ONOS2Flows )
main.log.warn( "ONOS3 flows repsponse: " + ONOS3Flows )
@@ -819,38 +914,48 @@
main.log.warn( "ONOS5 flows repsponse: " + ONOS5Flows )
main.log.warn( "ONOS6 flows repsponse: " + ONOS6Flows )
main.log.warn( "ONOS7 flows repsponse: " + ONOS7Flows )
- elif len( ONOS1FlowsJson ) == len( ONOS2FlowsJson )\
- and len( ONOS1FlowsJson ) == len( ONOS3FlowsJson )\
- and len( ONOS1FlowsJson ) == len( ONOS4FlowsJson )\
- and len( ONOS1FlowsJson ) == len( ONOS5FlowsJson )\
- and len( ONOS1FlowsJson ) == len( ONOS6FlowsJson )\
- and len( ONOS1FlowsJson ) == len( ONOS7FlowsJson ):
+ else: # No exceptions
+ if len( ONOS1FlowsJson ) == len( ONOS2FlowsJson )\
+ and len( ONOS1FlowsJson ) == len( ONOS3FlowsJson )\
+ and len( ONOS1FlowsJson ) == len( ONOS4FlowsJson )\
+ and len( ONOS1FlowsJson ) == len( ONOS5FlowsJson )\
+ and len( ONOS1FlowsJson ) == len( ONOS6FlowsJson )\
+ and len( ONOS1FlowsJson ) == len( ONOS7FlowsJson ):
# TODO: Do a better check, maybe compare flows on switches?
- flowState = ONOS1Flows
- flowCheck = main.TRUE
- main.log.report( "Flow count is consistent across all ONOS nodes" )
- else:
- main.log.warn( "ONOS1 flows: " +
- json.dumps( ONOS1FlowsJson, sort_keys=True,
- indent=4, separators=( ',', ': ' ) ) )
- main.log.warn( "ONOS2 flows: " +
- json.dumps( ONOS2FlowsJson, sort_keys=True,
- indent=4, separators=( ',', ': ' ) ) )
- main.log.warn( "ONOS3 flows: " +
- json.dumps( ONOS3FlowsJson, sort_keys=True,
- indent=4, separators=( ',', ': ' ) ) )
- main.log.warn( "ONOS4 flows: " +
- json.dumps( ONOS4FlowsJson, sort_keys=True,
- indent=4, separators=( ',', ': ' ) ) )
- main.log.warn( "ONOS5 flows: " +
- json.dumps( ONOS5FlowsJson, sort_keys=True,
- indent=4, separators=( ',', ': ' ) ) )
- main.log.warn( "ONOS6 flows: " +
- json.dumps( ONOS6FlowsJson, sort_keys=True,
- indent=4, separators=( ',', ': ' ) ) )
- main.log.warn( "ONOS7 flows: " +
- json.dumps( ONOS7FlowsJson, sort_keys=True,
- indent=4, separators=( ',', ': ' ) ) )
+ # NOTE Possible issue with this not always being set?
+ flowState = ONOS1Flows
+ flowCheck = main.TRUE
+ main.log.report( "Flow count is consistent across all" +
+ " ONOS nodes" )
+ else:
+ main.log.warn( "ONOS1 flows: " +
+ json.dumps( ONOS1FlowsJson, sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.warn( "ONOS2 flows: " +
+ json.dumps( ONOS2FlowsJson, sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.warn( "ONOS3 flows: " +
+ json.dumps( ONOS3FlowsJson, sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.warn( "ONOS4 flows: " +
+ json.dumps( ONOS4FlowsJson, sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.warn( "ONOS5 flows: " +
+ json.dumps( ONOS5FlowsJson, sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.warn( "ONOS6 flows: " +
+ json.dumps( ONOS6FlowsJson, sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.warn( "ONOS7 flows: " +
+ json.dumps( ONOS7FlowsJson, sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
utilities.assert_equals(
expect=main.TRUE,
actual=flowCheck,
@@ -862,7 +967,9 @@
flows = []
for i in range( 1, 29 ):
flows.append( main.Mininet2.getFlowTable( 1.3, "s" + str( i ) ) )
-
+ if flowCheck == main.FALSE:
+ for table in flows:
+ main.log.warn( table )
# TODO: Compare switch flow tables with ONOS flow tables
main.step( "Start continuous pings" )
@@ -936,13 +1043,13 @@
devices.append( main.ONOScli6.devices() )
devices.append( main.ONOScli7.devices() )
hosts = []
- hosts.append( main.ONOScli1.hosts() )
- hosts.append( main.ONOScli2.hosts() )
- hosts.append( main.ONOScli3.hosts() )
- hosts.append( main.ONOScli4.hosts() )
- hosts.append( main.ONOScli5.hosts() )
- hosts.append( main.ONOScli6.hosts() )
- hosts.append( main.ONOScli7.hosts() )
+ hosts.append( json.loads( main.ONOScli1.hosts() ) )
+ hosts.append( json.loads( main.ONOScli2.hosts() ) )
+ hosts.append( json.loads( main.ONOScli3.hosts() ) )
+ hosts.append( json.loads( main.ONOScli4.hosts() ) )
+ hosts.append( json.loads( main.ONOScli5.hosts() ) )
+ hosts.append( json.loads( main.ONOScli6.hosts() ) )
+ hosts.append( json.loads( main.ONOScli7.hosts() ) )
ports = []
ports.append( main.ONOScli1.ports() )
ports.append( main.ONOScli2.ports() )
@@ -996,6 +1103,21 @@
onpass="Hosts view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of hosts" )
+ ipResult = main.TRUE
+ for controller in range( 0, len( hosts ) ):
+ controllerStr = str( controller + 1 )
+ for host in hosts[ controller ]:
+ if host.get( 'ips', [] ) == []:
+ main.log.error(
+ "DEBUG:Error with host ips on controller" +
+ controllerStr + ": " + str( host ) )
+ ipResult = main.FALSE
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=ipResult,
+ onpass="The ips of the hosts aren't empty",
+ onfail="The ip of at least one host is missing" )
+
# Strongly connected clusters of devices
consistentClustersResult = main.TRUE
for controller in range( len( clusters ) ):
@@ -1022,13 +1144,14 @@
onfail="ONOS nodes have different views of clusters" )
# there should always only be one cluster
numClusters = len( json.loads( clusters[ 0 ] ) )
+ clusterResults = main.FALSE
+ if numClusters == 1:
+ clusterResults = main.TRUE
utilities.assert_equals(
expect=1,
actual=numClusters,
onpass="ONOS shows 1 SCC",
- onfail="ONOS shows " +
- str( numClusters ) +
- " SCCs" )
+ onfail="ONOS shows " + str( numClusters ) + " SCCs" )
main.step( "Comparing ONOS topology to MN" )
devicesResults = main.TRUE
@@ -1044,11 +1167,11 @@
else:
currentDevicesResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentDevicesResult,
- onpass="ONOS" + controllerStr +
- " Switches view is correct",
- onfail="ONOS" + controllerStr +
- " Switches view is incorrect" )
+ actual=currentDevicesResult,
+ onpass="ONOS" + controllerStr +
+ " Switches view is correct",
+ onfail="ONOS" + controllerStr +
+ " Switches view is incorrect" )
if ports[ controller ] or "Error" not in ports[ controller ]:
currentPortsResult = main.Mininet1.comparePorts(
@@ -1058,11 +1181,11 @@
else:
currentPortsResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentPortsResult,
- onpass="ONOS" + controllerStr +
- " ports view is correct",
- onfail="ONOS" + controllerStr +
- " ports view is incorrect" )
+ actual=currentPortsResult,
+ onpass="ONOS" + controllerStr +
+ " ports view is correct",
+ onfail="ONOS" + controllerStr +
+ " ports view is incorrect" )
if links[ controller ] or "Error" not in links[ controller ]:
currentLinksResult = main.Mininet1.compareLinks(
@@ -1072,28 +1195,29 @@
else:
currentLinksResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentLinksResult,
- onpass="ONOS" + controllerStr +
- " links view is correct",
- onfail="ONOS" + controllerStr +
- " links view is incorrect" )
+ actual=currentLinksResult,
+ onpass="ONOS" + controllerStr +
+ " links view is correct",
+ onfail="ONOS" + controllerStr +
+ " links view is incorrect" )
devicesResults = devicesResults and currentDevicesResult
portsResults = portsResults and currentPortsResult
linksResults = linksResults and currentLinksResult
topoResult = devicesResults and portsResults and linksResults\
- and consistentHostsResult and consistentClustersResult
+ and consistentHostsResult and consistentClustersResult\
+ and clusterResults and ipResult
utilities.assert_equals( expect=main.TRUE, actual=topoResult,
- onpass="Topology Check Test successful",
- onfail="Topology Check Test NOT successful" )
+ onpass="Topology Check Test successful",
+ onfail="Topology Check Test NOT successful" )
finalAssert = main.TRUE
finalAssert = finalAssert and topoResult and flowCheck \
- and intentCheck and consistentMastership and rolesNotNull
+ and intentCheck and consistentMastership and rolesNotNull
utilities.assert_equals( expect=main.TRUE, actual=finalAssert,
- onpass="State check successful",
- onfail="State check NOT successful" )
+ onpass="State check successful",
+ onfail="State check NOT successful" )
def CASE6( self, main ):
"""
@@ -1138,8 +1262,8 @@
caseResults = main.TRUE and onosIsupResult and cliResults
utilities.assert_equals( expect=main.TRUE, actual=caseResults,
- onpass="ONOS restart successful",
- onfail="ONOS restart NOT successful" )
+ onpass="ONOS restart successful",
+ onfail="ONOS restart NOT successful" )
def CASE7( self, main ):
"""
@@ -1337,15 +1461,15 @@
intentStates = []
for node in intents: # Iter through ONOS nodes
nodeStates = []
- for intent in json.loads( node ): # Iter through intents of a node
+ # Iter through intents of a node
+ for intent in json.loads( node ):
nodeStates.append( intent[ 'state' ] )
intentStates.append( nodeStates )
out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
main.log.info( dict( out ) )
-
- # NOTE: Hazelcast has no durability, so intents are lost across system
- # restarts
+ # NOTE: Store has no durability, so intents are lost across system
+ # restarts
"""
main.step( "Compare current intents with intents before the failure" )
# NOTE: this requires case 5 to pass for intentState to be set.
@@ -1424,7 +1548,8 @@
actual=LossInPings,
onpass="No Loss of connectivity",
onfail="Loss of dataplane connectivity detected" )
- # NOTE: Since intents are not persisted with Hazelcast, we expect this
+ # NOTE: Since intents are not persisted with IntnentStore,
+ # we expect loss in dataplane connectivity
LossInPings = main.FALSE
# Test of LeadershipElection
@@ -1467,8 +1592,8 @@
if result == main.TRUE:
main.log.report( "Constant State Tests Passed" )
utilities.assert_equals( expect=main.TRUE, actual=result,
- onpass="Constant State Tests Passed",
- onfail="Constant state tests failed" )
+ onpass="Constant State Tests Passed",
+ onfail="Constant state tests failed" )
def CASE8( self, main ):
"""
@@ -1508,6 +1633,7 @@
devicesResults = main.TRUE
portsResults = main.TRUE
linksResults = main.TRUE
+ hostsResults = main.TRUE
topoResult = main.FALSE
elapsed = 0
count = 0
@@ -1518,9 +1644,7 @@
count = count + 1
if count > 1:
# TODO: Depricate STS usage
- MNTopo = TestONTopology(
- main.Mininet1,
- ctrls )
+ MNTopo = TestONTopology( main.Mininet1, ctrls )
cliStart = time.time()
devices = []
devices.append( main.ONOScli1.devices() )
@@ -1538,13 +1662,15 @@
hosts.append( json.loads( main.ONOScli5.hosts() ) )
hosts.append( json.loads( main.ONOScli6.hosts() ) )
hosts.append( json.loads( main.ONOScli7.hosts() ) )
+ ipResult = main.TRUE
for controller in range( 0, len( hosts ) ):
controllerStr = str( controller + 1 )
for host in hosts[ controller ]:
- if host[ 'ips' ] == []:
+ if host is None or host.get( 'ips', [] ) == []:
main.log.error(
"DEBUG:Error with host ips on controller" +
controllerStr + ": " + str( host ) )
+ ipResult = main.FALSE
ports = []
ports.append( main.ONOScli1.ports() )
ports.append( main.ONOScli2.ports() )
@@ -1580,47 +1706,58 @@
controller ]:
currentDevicesResult = main.Mininet1.compareSwitches(
MNTopo,
- json.loads(
- devices[ controller ] ) )
+ json.loads( devices[ controller ] ) )
else:
currentDevicesResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentDevicesResult,
- onpass="ONOS" + controllerStr +
- " Switches view is correct",
- onfail="ONOS" + controllerStr +
- " Switches view is incorrect" )
+ actual=currentDevicesResult,
+ onpass="ONOS" + controllerStr +
+ " Switches view is correct",
+ onfail="ONOS" + controllerStr +
+ " Switches view is incorrect" )
if ports[ controller ] or "Error" not in ports[ controller ]:
currentPortsResult = main.Mininet1.comparePorts(
MNTopo,
- json.loads(
- ports[ controller ] ) )
+ json.loads( ports[ controller ] ) )
else:
currentPortsResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentPortsResult,
- onpass="ONOS" + controllerStr +
- " ports view is correct",
- onfail="ONOS" + controllerStr +
- " ports view is incorrect" )
+ actual=currentPortsResult,
+ onpass="ONOS" + controllerStr +
+ " ports view is correct",
+ onfail="ONOS" + controllerStr +
+ " ports view is incorrect" )
if links[ controller ] or "Error" not in links[ controller ]:
currentLinksResult = main.Mininet1.compareLinks(
MNTopo,
- json.loads(
- links[ controller ] ) )
+ json.loads( links[ controller ] ) )
else:
currentLinksResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentLinksResult,
- onpass="ONOS" + controllerStr +
- " links view is correct",
- onfail="ONOS" + controllerStr +
- " links view is incorrect" )
- devicesResults = devicesResults and currentDevicesResult
- portsResults = portsResults and currentPortsResult
- linksResults = linksResults and currentLinksResult
+ actual=currentLinksResult,
+ onpass="ONOS" + controllerStr +
+ " links view is correct",
+ onfail="ONOS" + controllerStr +
+ " links view is incorrect" )
+
+ if hosts[ controller ] or "Error" not in hosts[ controller ]:
+ currentHostsResult = main.Mininet1.compareHosts(
+ MNTopo, hosts[ controller ] )
+ else:
+ currentHostsResult = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=currentHostsResult,
+ onpass="ONOS" + controllerStr +
+ " hosts exist in Mininet",
+ onfail="ONOS" + controllerStr +
+ " hosts don't match Mininet" )
+
+ devicesResults = devicesResults and currentDevicesResult
+ portsResults = portsResults and currentPortsResult
+ linksResults = linksResults and currentLinksResult
+ hostsResults = hostsResults and currentHostsResult
# Compare json objects for hosts and dataplane clusters
@@ -1677,17 +1814,19 @@
onfail="ONOS nodes have different views of clusters" )
# there should always only be one cluster
numClusters = len( json.loads( clusters[ 0 ] ) )
+ clusterResults = main.FALSE
+ if numClusters == 1:
+ clusterResults = main.TRUE
utilities.assert_equals(
expect=1,
actual=numClusters,
onpass="ONOS shows 1 SCC",
- onfail="ONOS shows " +
- str( numClusters ) +
- " SCCs" )
+ onfail="ONOS shows " + str( numClusters ) + " SCCs" )
topoResult = ( devicesResults and portsResults and linksResults
- and consistentHostsResult
- and consistentClustersResult )
+ and hostsResults and consistentHostsResult
+ and consistentClustersResult and clusterResults
+ and ipResult )
topoResult = topoResult and int( count <= 2 )
note = "note it takes about " + str( int( cliTime ) ) + \
@@ -1698,8 +1837,8 @@
str( note ) + " ): " + str( elapsed ) + " seconds, " +
str( count ) + " tries" )
utilities.assert_equals( expect=main.TRUE, actual=topoResult,
- onpass="Topology Check Test successful",
- onfail="Topology Check Test NOT successful" )
+ onpass="Topology Check Test successful",
+ onfail="Topology Check Test NOT successful" )
if topoResult == main.TRUE:
main.log.report( "ONOS topology view matches Mininet topology" )
@@ -1713,20 +1852,18 @@
linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
description = "Turn off a link to ensure that Link Discovery " +\
- "is working properly"
+ "is working properly"
main.log.report( description )
main.case( description )
main.step( "Kill Link between s3 and s28" )
LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
- main.log.info(
- "Waiting " +
- str( linkSleep ) +
- " seconds for link down to be discovered" )
+ main.log.info( "Waiting " + str( linkSleep ) +
+ " seconds for link down to be discovered" )
time.sleep( linkSleep )
utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
- onpass="Link down succesful",
- onfail="Failed to bring link down" )
+ onpass="Link down succesful",
+ onfail="Failed to bring link down" )
# TODO do some sort of check here
def CASE10( self, main ):
@@ -1745,14 +1882,12 @@
main.step( "Bring link between s3 and s28 back up" )
LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
- main.log.info(
- "Waiting " +
- str( linkSleep ) +
- " seconds for link up to be discovered" )
+ main.log.info( "Waiting " + str( linkSleep ) +
+ " seconds for link up to be discovered" )
time.sleep( linkSleep )
utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
- onpass="Link up succesful",
- onfail="Failed to bring link up" )
+ onpass="Link up succesful",
+ onfail="Failed to bring link up" )
# TODO do some sort of check here
def CASE11( self, main ):
@@ -1784,8 +1919,8 @@
if device and device[ 'available' ] is False:
result = main.TRUE
utilities.assert_equals( expect=main.TRUE, actual=result,
- onpass="Kill switch succesful",
- onfail="Failed to kill switch?" )
+ onpass="Kill switch succesful",
+ onfail="Failed to kill switch?" )
def CASE12( self, main ):
"""
@@ -1808,27 +1943,24 @@
# TODO: New dpid or same? Ask Thomas?
for peer in links:
main.Mininet1.addLink( switch, peer )
- main.Mininet1.assignSwController(
- sw=switch.split( 's' )[ 1 ],
- count=numControllers,
- ip1=ONOS1Ip,
- port1=ONOS1Port,
- ip2=ONOS2Ip,
- port2=ONOS2Port,
- ip3=ONOS3Ip,
- port3=ONOS3Port,
- ip4=ONOS4Ip,
- port4=ONOS4Port,
- ip5=ONOS5Ip,
- port5=ONOS5Port,
- ip6=ONOS6Ip,
- port6=ONOS6Port,
- ip7=ONOS7Ip,
- port7=ONOS7Port )
- main.log.info(
- "Waiting " +
- str( switchSleep ) +
- " seconds for switch up to be discovered" )
+ main.Mininet1.assignSwController( sw=switch.split( 's' )[ 1 ],
+ count=numControllers,
+ ip1=ONOS1Ip,
+ port1=ONOS1Port,
+ ip2=ONOS2Ip,
+ port2=ONOS2Port,
+ ip3=ONOS3Ip,
+ port3=ONOS3Port,
+ ip4=ONOS4Ip,
+ port4=ONOS4Port,
+ ip5=ONOS5Ip,
+ port5=ONOS5Port,
+ ip6=ONOS6Ip,
+ port6=ONOS6Port,
+ ip7=ONOS7Ip,
+ port7=ONOS7Port )
+ main.log.info( "Waiting " + str( switchSleep ) +
+ " seconds for switch up to be discovered" )
time.sleep( switchSleep )
device = main.ONOScli1.getDevice( dpid=switchDPID )
# Peek at the deleted switch
@@ -1837,8 +1969,8 @@
if device and device[ 'available' ]:
result = main.TRUE
utilities.assert_equals( expect=main.TRUE, actual=result,
- onpass="add switch succesful",
- onfail="Failed to add switch?" )
+ onpass="add switch succesful",
+ onfail="Failed to add switch?" )
def CASE13( self, main ):
"""
@@ -1900,6 +2032,8 @@
dstDir + str( testname ) +
"-ONOS" + str( i + 1 ) + "-" +
f )
+ main.ONOSbench.handle.expect( "\$" )
+
# std*.log's
# NOTE: must end in /
logFolder = "/opt/onos/var/"
@@ -1915,15 +2049,17 @@
dstDir + str( testname ) +
"-ONOS" + str( i + 1 ) + "-" +
f )
+ main.ONOSbench.handle.expect( "\$" )
# sleep so scp can finish
time.sleep( 10 )
+ main.Mininet1.stopNet()
main.step( "Packing and rotating pcap archives" )
os.system( "~/TestON/dependencies/rotate.sh " + str( testname ) )
# TODO: actually check something here
utilities.assert_equals( expect=main.TRUE, actual=main.TRUE,
- onpass="Test cleanup successful",
- onfail="Test cleanup NOT successful" )
+ onpass="Test cleanup successful",
+ onfail="Test cleanup NOT successful" )
def CASE14( self, main ):
"""
@@ -2024,9 +2160,7 @@
elif leader is None or leader == main.FALSE:
main.log.report(
"Leader for the election app should be an ONOS node," +
- "instead got '" +
- str( leader ) +
- "'" )
+ "instead got '" + str( leader ) + "'" )
leaderResult = main.FALSE
withdrawResult = oldLeader.electionTestWithdraw()
utilities.assert_equals(
@@ -2044,10 +2178,8 @@
for leaderN in leaderList:
if leaderN == leader:
main.log.report(
- "ONOS" +
- str( controller ) +
- " still sees " +
- str( leader ) +
+ "ONOS" + str( controller ) +
+ " still sees " + str( leader ) +
" as leader after they withdrew" )
leaderResult = main.FALSE
elif leaderN == main.FALSE:
@@ -2081,8 +2213,8 @@
onpass="Leadership election passed",
onfail="Something went wrong with Leadership election" )
- main.step(
- "Run for election on old leader( just so everyone is in the hat )" )
+ main.step( "Run for election on old leader( just so everyone " +
+ "is in the hat )" )
runResult = oldLeader.electionTestRun()
utilities.assert_equals(
expect=main.TRUE,
diff --git a/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.py b/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.py
index de26e67..a87e862 100644
--- a/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.py
+++ b/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.py
@@ -31,14 +31,18 @@
CASE1 is to compile ONOS and push it to the test machines
Startup sequence:
- git pull
- mvn clean install
- onos-package
cell <name>
onos-verify-cell
NOTE: temporary - onos-remove-raft-logs
+ onos-uninstall
+ start mininet
+ git pull
+ mvn clean install
+ onos-package
onos-install -f
onos-wait-for-start
+ start cli sessions
+ start tcpdump
"""
main.log.report(
"ONOS HA test: Restart minority of ONOS nodes - initialization" )
@@ -109,8 +113,7 @@
main.step( "Compiling the latest version of ONOS" )
if PULLCODE:
- # TODO Configure branch in params
- main.step( "Git checkout and pull master" )
+ main.step( "Git checkout and pull " + gitBranch )
main.ONOSbench.gitCheckout( gitBranch )
gitPullResult = main.ONOSbench.gitPull()
@@ -145,7 +148,6 @@
and onos7InstallResult
main.step( "Checking if ONOS is up yet" )
- # TODO check bundle:list?
for i in range( 2 ):
onos1Isup = main.ONOSbench.isup( ONOS1Ip )
if not onos1Isup:
@@ -209,8 +211,8 @@
and onosIsupResult and cliResults )
utilities.assert_equals( expect=main.TRUE, actual=case1Result,
- onpass="Test startup successful",
- onfail="Test startup NOT successful" )
+ onpass="Test startup successful",
+ onfail="Test startup NOT successful" )
if case1Result == main.FALSE:
main.cleanup()
@@ -266,118 +268,131 @@
# Manually assign mastership to the controller we want
roleCall = main.TRUE
roleCheck = main.TRUE
-
- # Assign switch
- deviceId = main.ONOScli1.getDevice( "1000" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS1Ip )
- # Check assignment
- if ONOS1Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- # Assign switch
- deviceId = main.ONOScli1.getDevice( "2800" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS1Ip )
- # Check assignment
- if ONOS1Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- # Assign switch
- deviceId = main.ONOScli1.getDevice( "2000" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS2Ip )
- # Check assignment
- if ONOS2Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- # Assign switch
- deviceId = main.ONOScli1.getDevice( "3000" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS2Ip )
- # Check assignment
- if ONOS2Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- # Assign switch
- deviceId = main.ONOScli1.getDevice( "5000" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS3Ip )
- # Check assignment
- if ONOS3Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- # Assign switch
- deviceId = main.ONOScli1.getDevice( "6000" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS3Ip )
- # Check assignment
- if ONOS3Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- # Assign switch
- deviceId = main.ONOScli1.getDevice( "3004" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS4Ip )
- # Check assignment
- if ONOS4Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- for i in range( 8, 18 ):
- dpid = '3' + str( i ).zfill( 3 )
- deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
+ try:
+ # Assign switch
+ deviceId = main.ONOScli1.getDevice( "1000" ).get( 'id' )
+ assert deviceId, "No device id for s1 in ONOS"
roleCall = roleCall and main.ONOScli1.deviceRole(
deviceId,
- ONOS5Ip )
+ ONOS1Ip )
# Check assignment
- if ONOS5Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ if ONOS1Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
roleCheck = roleCheck and main.TRUE
else:
roleCheck = roleCheck and main.FALSE
- deviceId = main.ONOScli1.getDevice( "6007" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS6Ip )
- # Check assignment
- if ONOS6Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- for i in range( 18, 28 ):
- dpid = '6' + str( i ).zfill( 3 )
- deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
+ # Assign switch
+ deviceId = main.ONOScli1.getDevice( "2800" ).get( 'id' )
+ assert deviceId, "No device id for s28 in ONOS"
roleCall = roleCall and main.ONOScli1.deviceRole(
deviceId,
- ONOS7Ip )
+ ONOS1Ip )
# Check assignment
- if ONOS7Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ if ONOS1Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
roleCheck = roleCheck and main.TRUE
else:
roleCheck = roleCheck and main.FALSE
+ # Assign switch
+ deviceId = main.ONOScli1.getDevice( "2000" ).get( 'id' )
+ assert deviceId, "No device id for s2 in ONOS"
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS2Ip )
+ # Check assignment
+ if ONOS2Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+
+ # Assign switch
+ deviceId = main.ONOScli1.getDevice( "3000" ).get( 'id' )
+ assert deviceId, "No device id for s3 in ONOS"
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS2Ip )
+ # Check assignment
+ if ONOS2Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+
+ # Assign switch
+ deviceId = main.ONOScli1.getDevice( "5000" ).get( 'id' )
+ assert deviceId, "No device id for s5 in ONOS"
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS3Ip )
+ # Check assignment
+ if ONOS3Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+
+ # Assign switch
+ deviceId = main.ONOScli1.getDevice( "6000" ).get( 'id' )
+ assert deviceId, "No device id for s6 in ONOS"
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS3Ip )
+ # Check assignment
+ if ONOS3Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+
+ # Assign switch
+ deviceId = main.ONOScli1.getDevice( "3004" ).get( 'id' )
+ assert deviceId, "No device id for s4 in ONOS"
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS4Ip )
+ # Check assignment
+ if ONOS4Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+
+ for i in range( 8, 18 ):
+ dpid = '3' + str( i ).zfill( 3 )
+ deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
+ assert deviceId, "No device id for s%i in ONOS" % i
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS5Ip )
+ # Check assignment
+ if ONOS5Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+
+ deviceId = main.ONOScli1.getDevice( "6007" ).get( 'id' )
+ assert deviceId, "No device id for s7 in ONOS"
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS6Ip )
+ # Check assignment
+ if ONOS6Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+
+ for i in range( 18, 28 ):
+ dpid = '6' + str( i ).zfill( 3 )
+ deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
+ assert deviceId, "No device id for s%i in ONOS" % i
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS7Ip )
+ # Check assignment
+ if ONOS7Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+ except ( AttributeError, AssertionError ):
+ main.log.exception( "Something is wrong with ONOS device view" )
+ main.log.info( main.ONOScli1.devices() )
+
utilities.assert_equals(
expect=main.TRUE,
actual=roleCall,
@@ -401,6 +416,7 @@
Assign intents
"""
import time
+ import json
main.log.report( "Adding host intents" )
main.case( "Adding host Intents" )
@@ -442,8 +458,11 @@
time.sleep( 10 )
main.step( "Add host intents" )
+ intentIds = []
# TODO: move the host numbers to params
+ # Maybe look at all the paths we ping?
intentAddResult = True
+ hostResult = main.TRUE
for i in range( 8, 18 ):
main.log.info( "Adding host intent between h" + str( i ) +
" and h" + str( i + 10 ) )
@@ -460,39 +479,89 @@
host1Id = host1Dict.get( 'id', None )
host2Id = host2Dict.get( 'id', None )
if host1Id and host2Id:
- #Changed onos node to test something
- tmpResult = main.ONOScli4.addHostIntent(
+ # Changed onos node to test something
+ tmpId = main.ONOScli4.addHostIntent(
host1Id,
host2Id )
+ main.log.info( "Added intent with id: " + tmpId )
+ intentIds.append( tmpId )
else:
main.log.error( "Error, getHost() failed" )
main.log.warn( json.dumps( json.loads( main.ONOScli1.hosts() ),
sort_keys=True,
indent=4,
separators=( ',', ': ' ) ) )
- tmpResult = main.FALSE
- intentAddResult = bool( pingResult and intentAddResult
- and tmpResult )
- # TODO Check that intents were added?
+ hostResult = main.FALSE
+ onosIds = main.ONOScli1.getAllIntentsId()
+ main.log.info( "Submitted intents: " + str( intentIds ) )
+ main.log.info( "Intents in ONOS: " + str( onosIds ) )
+ for intent in intentIds:
+ if intent in onosIds:
+ pass # intent submitted is still in onos
+ else:
+ intentAddResult = False
# Print the intent states
- intents = main.ONOScli1.intents( )
+ intents = main.ONOScli1.intents()
intentStates = []
+ main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+ count = 0
for intent in json.loads( intents ): # Iter through intents of a node
- intentStates.append( intent.get( 'state', None ) )
- out = [ (i, intentStates.count( i ) ) for i in set( intentStates ) ]
- main.log.info( dict( out ) )
-
+ state = intent.get( 'state', None )
+ intentId = intent.get( 'id', None )
+ intentStates.append( ( intentId, state ) )
+ # add submitted intents not in the store
+ tmplist = [ i for i, s in intentStates ]
+ missingIntents = False
+ for i in intentIds:
+ if i not in tmplist:
+ intentStates.append( ( i, " - " ) )
+ missingIntents = True
+ intentStates.sort()
+ for i, s in intentStates:
+ count += 1
+ main.log.info( "%-6s%-15s%-15s" %
+ ( str( count ), str( i ), str( s ) ) )
+ intentAddResult = bool( pingResult and hostResult and intentAddResult
+ and not missingIntents)
utilities.assert_equals(
expect=True,
actual=intentAddResult,
onpass="Pushed host intents to ONOS",
onfail="Error in pushing host intents to ONOS" )
- # TODO Check if intents all exist in datastore
+
+ if not intentAddResult:
+ import time
+ main.log.info( "Sleeping 60 seconds to see if intents are found" )
+ time.sleep( 60 )
+ onosIds = main.ONOScli1.getAllIntentsId()
+ main.log.info( "Submitted intents: " + str( intentIds ) )
+ main.log.info( "Intents in ONOS: " + str( onosIds ) )
+ # Print the intent states
+ intents = main.ONOScli1.intents()
+ intentStates = []
+ main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+ count = 0
+ for intent in json.loads( intents ):
+ # Iter through intents of a node
+ state = intent.get( 'state', None )
+ intentId = intent.get( 'id', None )
+ intentStates.append( ( intentId, state ) )
+ # add submitted intents not in the store
+ tmplist = [ i for i, s in intentStates ]
+ for i in intentIds:
+ if i not in tmplist:
+ intentStates.append( ( i, " - " ) )
+ intentStates.sort()
+ for i, s in intentStates:
+ count += 1
+ main.log.info( "%-6s%-15s%-15s" %
+ ( str( count ), str( i ), str( s ) ) )
def CASE4( self, main ):
"""
Ping across added host intents
"""
+ import json
description = " Ping across added host intents"
main.log.report( description )
main.case( description )
@@ -510,7 +579,7 @@
if PingResult == main.FALSE:
main.log.report(
"Intents have not been installed correctly, pings failed." )
- #TODO: pretty print
+ # TODO: pretty print
main.log.warn( "ONSO1 intents: " )
main.log.warn( json.dumps( json.loads( main.ONOScli1.intents() ),
sort_keys=True,
@@ -524,6 +593,22 @@
actual=PingResult,
onpass="Intents have been installed correctly and pings work",
onfail="Intents have not been installed correctly, pings failed." )
+ if PingResult is not main.TRUE:
+ # Print the intent states
+ intents = main.ONOScli1.intents()
+ intentStates = []
+ main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+ count = 0
+ # Iter through intents of a node
+ for intent in json.loads( intents ):
+ state = intent.get( 'state', None )
+ intentId = intent.get( 'id', None )
+ intentStates.append( ( intentId, state ) )
+ intentStates.sort()
+ for i, s in intentStates:
+ count += 1
+ main.log.info( "%-6s%-15s%-15s" %
+ ( str( count ), str( i ), str( s ) ) )
def CASE5( self, main ):
"""
@@ -787,29 +872,39 @@
main.step( "Get the flows from each controller" )
global flowState
flowState = []
- ONOS1Flows = main.ONOScli1.flows( jsonFormat=True )
- ONOS2Flows = main.ONOScli2.flows( jsonFormat=True )
- ONOS3Flows = main.ONOScli3.flows( jsonFormat=True )
- ONOS4Flows = main.ONOScli4.flows( jsonFormat=True )
- ONOS5Flows = main.ONOScli5.flows( jsonFormat=True )
- ONOS6Flows = main.ONOScli6.flows( jsonFormat=True )
- ONOS7Flows = main.ONOScli7.flows( jsonFormat=True )
- ONOS1FlowsJson = json.loads( ONOS1Flows )
- ONOS2FlowsJson = json.loads( ONOS2Flows )
- ONOS3FlowsJson = json.loads( ONOS3Flows )
- ONOS4FlowsJson = json.loads( ONOS4Flows )
- ONOS5FlowsJson = json.loads( ONOS5Flows )
- ONOS6FlowsJson = json.loads( ONOS6Flows )
- ONOS7FlowsJson = json.loads( ONOS7Flows )
flowCheck = main.FALSE
- if "Error" in ONOS1Flows or not ONOS1Flows\
- or "Error" in ONOS2Flows or not ONOS2Flows\
- or "Error" in ONOS3Flows or not ONOS3Flows\
- or "Error" in ONOS4Flows or not ONOS4Flows\
- or "Error" in ONOS5Flows or not ONOS5Flows\
- or "Error" in ONOS6Flows or not ONOS6Flows\
- or "Error" in ONOS7Flows or not ONOS7Flows:
- main.log.report( "Error in getting ONOS intents" )
+ try:
+ ONOS1Flows = main.ONOScli1.flows( jsonFormat=True )
+ ONOS2Flows = main.ONOScli2.flows( jsonFormat=True )
+ ONOS3Flows = main.ONOScli3.flows( jsonFormat=True )
+ ONOS4Flows = main.ONOScli4.flows( jsonFormat=True )
+ ONOS5Flows = main.ONOScli5.flows( jsonFormat=True )
+ ONOS6Flows = main.ONOScli6.flows( jsonFormat=True )
+ ONOS7Flows = main.ONOScli7.flows( jsonFormat=True )
+ assert ONOS1Flows, "ONOS1 Flows should not be empty"
+ assert ONOS2Flows, "ONOS2 Flows should not be empty"
+ assert ONOS3Flows, "ONOS3 Flows should not be empty"
+ assert ONOS4Flows, "ONOS4 Flows should not be empty"
+ assert ONOS5Flows, "ONOS5 Flows should not be empty"
+ assert ONOS6Flows, "ONOS6 Flows should not be empty"
+ assert ONOS7Flows, "ONOS7 Flows should not be empty"
+ assert "Error" not in ONOS1Flows, "ONOS1 Flows contains 'Error'"
+ assert "Error" not in ONOS2Flows, "ONOS2 Flows contains 'Error'"
+ assert "Error" not in ONOS3Flows, "ONOS3 Flows contains 'Error'"
+ assert "Error" not in ONOS4Flows, "ONOS4 Flows contains 'Error'"
+ assert "Error" not in ONOS5Flows, "ONOS5 Flows contains 'Error'"
+ assert "Error" not in ONOS6Flows, "ONOS6 Flows contains 'Error'"
+ assert "Error" not in ONOS7Flows, "ONOS7 Flows contains 'Error'"
+ ONOS1FlowsJson = json.loads( ONOS1Flows )
+ ONOS2FlowsJson = json.loads( ONOS2Flows )
+ ONOS3FlowsJson = json.loads( ONOS3Flows )
+ ONOS4FlowsJson = json.loads( ONOS4Flows )
+ ONOS5FlowsJson = json.loads( ONOS5Flows )
+ ONOS6FlowsJson = json.loads( ONOS6Flows )
+ ONOS7FlowsJson = json.loads( ONOS7Flows )
+ except ( ValueError, AssertionError ): # From json.loads, or asserts
+ main.log.exception( "One or more 'flows' responses from " +
+ "ONOS couldn't be decoded." )
main.log.warn( "ONOS1 flows repsponse: " + ONOS1Flows )
main.log.warn( "ONOS2 flows repsponse: " + ONOS2Flows )
main.log.warn( "ONOS3 flows repsponse: " + ONOS3Flows )
@@ -817,38 +912,48 @@
main.log.warn( "ONOS5 flows repsponse: " + ONOS5Flows )
main.log.warn( "ONOS6 flows repsponse: " + ONOS6Flows )
main.log.warn( "ONOS7 flows repsponse: " + ONOS7Flows )
- elif len( ONOS1FlowsJson ) == len( ONOS2FlowsJson )\
- and len( ONOS1FlowsJson ) == len( ONOS3FlowsJson )\
- and len( ONOS1FlowsJson ) == len( ONOS4FlowsJson )\
- and len( ONOS1FlowsJson ) == len( ONOS5FlowsJson )\
- and len( ONOS1FlowsJson ) == len( ONOS6FlowsJson )\
- and len( ONOS1FlowsJson ) == len( ONOS7FlowsJson ):
+ else: # No exceptions
+ if len( ONOS1FlowsJson ) == len( ONOS2FlowsJson )\
+ and len( ONOS1FlowsJson ) == len( ONOS3FlowsJson )\
+ and len( ONOS1FlowsJson ) == len( ONOS4FlowsJson )\
+ and len( ONOS1FlowsJson ) == len( ONOS5FlowsJson )\
+ and len( ONOS1FlowsJson ) == len( ONOS6FlowsJson )\
+ and len( ONOS1FlowsJson ) == len( ONOS7FlowsJson ):
# TODO: Do a better check, maybe compare flows on switches?
- flowState = ONOS1Flows
- flowCheck = main.TRUE
- main.log.report( "Flow count is consistent across all ONOS nodes" )
- else:
- main.log.warn( "ONOS1 flows: " +
- json.dumps( ONOS1FlowsJson, sort_keys=True,
- indent=4, separators=( ',', ': ' ) ) )
- main.log.warn( "ONOS2 flows: " +
- json.dumps( ONOS2FlowsJson, sort_keys=True,
- indent=4, separators=( ',', ': ' ) ) )
- main.log.warn( "ONOS3 flows: " +
- json.dumps( ONOS3FlowsJson, sort_keys=True,
- indent=4, separators=( ',', ': ' ) ) )
- main.log.warn( "ONOS4 flows: " +
- json.dumps( ONOS4FlowsJson, sort_keys=True,
- indent=4, separators=( ',', ': ' ) ) )
- main.log.warn( "ONOS5 flows: " +
- json.dumps( ONOS5FlowsJson, sort_keys=True,
- indent=4, separators=( ',', ': ' ) ) )
- main.log.warn( "ONOS6 flows: " +
- json.dumps( ONOS6FlowsJson, sort_keys=True,
- indent=4, separators=( ',', ': ' ) ) )
- main.log.warn( "ONOS7 flows: " +
- json.dumps( ONOS7FlowsJson, sort_keys=True,
- indent=4, separators=( ',', ': ' ) ) )
+ # NOTE Possible issue with this not always being set?
+ flowState = ONOS1Flows
+ flowCheck = main.TRUE
+ main.log.report( "Flow count is consistent across all" +
+ " ONOS nodes" )
+ else:
+ main.log.warn( "ONOS1 flows: " +
+ json.dumps( ONOS1FlowsJson, sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.warn( "ONOS2 flows: " +
+ json.dumps( ONOS2FlowsJson, sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.warn( "ONOS3 flows: " +
+ json.dumps( ONOS3FlowsJson, sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.warn( "ONOS4 flows: " +
+ json.dumps( ONOS4FlowsJson, sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.warn( "ONOS5 flows: " +
+ json.dumps( ONOS5FlowsJson, sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.warn( "ONOS6 flows: " +
+ json.dumps( ONOS6FlowsJson, sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.warn( "ONOS7 flows: " +
+ json.dumps( ONOS7FlowsJson, sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
utilities.assert_equals(
expect=main.TRUE,
actual=flowCheck,
@@ -860,7 +965,9 @@
flows = []
for i in range( 1, 29 ):
flows.append( main.Mininet2.getFlowTable( 1.3, "s" + str( i ) ) )
-
+ if flowCheck == main.FALSE:
+ for table in flows:
+ main.log.warn( table )
# TODO: Compare switch flow tables with ONOS flow tables
main.step( "Start continuous pings" )
@@ -934,13 +1041,13 @@
devices.append( main.ONOScli6.devices() )
devices.append( main.ONOScli7.devices() )
hosts = []
- hosts.append( main.ONOScli1.hosts() )
- hosts.append( main.ONOScli2.hosts() )
- hosts.append( main.ONOScli3.hosts() )
- hosts.append( main.ONOScli4.hosts() )
- hosts.append( main.ONOScli5.hosts() )
- hosts.append( main.ONOScli6.hosts() )
- hosts.append( main.ONOScli7.hosts() )
+ hosts.append( json.loads( main.ONOScli1.hosts() ) )
+ hosts.append( json.loads( main.ONOScli2.hosts() ) )
+ hosts.append( json.loads( main.ONOScli3.hosts() ) )
+ hosts.append( json.loads( main.ONOScli4.hosts() ) )
+ hosts.append( json.loads( main.ONOScli5.hosts() ) )
+ hosts.append( json.loads( main.ONOScli6.hosts() ) )
+ hosts.append( json.loads( main.ONOScli7.hosts() ) )
ports = []
ports.append( main.ONOScli1.ports() )
ports.append( main.ONOScli2.ports() )
@@ -994,6 +1101,21 @@
onpass="Hosts view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of hosts" )
+ ipResult = main.TRUE
+ for controller in range( 0, len( hosts ) ):
+ controllerStr = str( controller + 1 )
+ for host in hosts[ controller ]:
+ if host.get( 'ips', [] ) == []:
+ main.log.error(
+ "DEBUG:Error with host ips on controller" +
+ controllerStr + ": " + str( host ) )
+ ipResult = main.FALSE
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=ipResult,
+ onpass="The ips of the hosts aren't empty",
+ onfail="The ip of at least one host is missing" )
+
# Strongly connected clusters of devices
consistentClustersResult = main.TRUE
for controller in range( len( clusters ) ):
@@ -1020,13 +1142,14 @@
onfail="ONOS nodes have different views of clusters" )
# there should always only be one cluster
numClusters = len( json.loads( clusters[ 0 ] ) )
+ clusterResults = main.FALSE
+ if numClusters == 1:
+ clusterResults = main.TRUE
utilities.assert_equals(
expect=1,
actual=numClusters,
onpass="ONOS shows 1 SCC",
- onfail="ONOS shows " +
- str( numClusters ) +
- " SCCs" )
+ onfail="ONOS shows " + str( numClusters ) + " SCCs" )
main.step( "Comparing ONOS topology to MN" )
devicesResults = main.TRUE
@@ -1042,11 +1165,11 @@
else:
currentDevicesResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentDevicesResult,
- onpass="ONOS" + controllerStr +
- " Switches view is correct",
- onfail="ONOS" + controllerStr +
- " Switches view is incorrect" )
+ actual=currentDevicesResult,
+ onpass="ONOS" + controllerStr +
+ " Switches view is correct",
+ onfail="ONOS" + controllerStr +
+ " Switches view is incorrect" )
if ports[ controller ] or "Error" not in ports[ controller ]:
currentPortsResult = main.Mininet1.comparePorts(
@@ -1056,11 +1179,11 @@
else:
currentPortsResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentPortsResult,
- onpass="ONOS" + controllerStr +
- " ports view is correct",
- onfail="ONOS" + controllerStr +
- " ports view is incorrect" )
+ actual=currentPortsResult,
+ onpass="ONOS" + controllerStr +
+ " ports view is correct",
+ onfail="ONOS" + controllerStr +
+ " ports view is incorrect" )
if links[ controller ] or "Error" not in links[ controller ]:
currentLinksResult = main.Mininet1.compareLinks(
@@ -1070,28 +1193,29 @@
else:
currentLinksResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentLinksResult,
- onpass="ONOS" + controllerStr +
- " links view is correct",
- onfail="ONOS" + controllerStr +
- " links view is incorrect" )
+ actual=currentLinksResult,
+ onpass="ONOS" + controllerStr +
+ " links view is correct",
+ onfail="ONOS" + controllerStr +
+ " links view is incorrect" )
devicesResults = devicesResults and currentDevicesResult
portsResults = portsResults and currentPortsResult
linksResults = linksResults and currentLinksResult
topoResult = devicesResults and portsResults and linksResults\
- and consistentHostsResult and consistentClustersResult
+ and consistentHostsResult and consistentClustersResult\
+ and clusterResults and ipResult
utilities.assert_equals( expect=main.TRUE, actual=topoResult,
- onpass="Topology Check Test successful",
- onfail="Topology Check Test NOT successful" )
+ onpass="Topology Check Test successful",
+ onfail="Topology Check Test NOT successful" )
finalAssert = main.TRUE
finalAssert = finalAssert and topoResult and flowCheck \
- and intentCheck and consistentMastership and rolesNotNull
+ and intentCheck and consistentMastership and rolesNotNull
utilities.assert_equals( expect=main.TRUE, actual=finalAssert,
- onpass="State check successful",
- onfail="State check NOT successful" )
+ onpass="State check successful",
+ onfail="State check NOT successful" )
def CASE6( self, main ):
"""
@@ -1128,8 +1252,8 @@
main.restartTime = time.time()
caseResults = main.TRUE and onosIsupResult and cliResults
utilities.assert_equals( expect=main.TRUE, actual=caseResults,
- onpass="ONOS restart successful",
- onfail="ONOS restart NOT successful" )
+ onpass="ONOS restart successful",
+ onfail="ONOS restart NOT successful" )
def CASE7( self, main ):
"""
@@ -1256,147 +1380,133 @@
# NOTE: we expect mastership to change on controller failure
mastershipCheck = consistentMastership
- while True:
- whileTime = time.time() - main.restartTime
- # Gossip store
- main.step( "Get the intents and compare across all nodes" )
- ONOS1Intents = main.ONOScli1.intents( jsonFormat=True )
- ONOS2Intents = main.ONOScli2.intents( jsonFormat=True )
- ONOS3Intents = main.ONOScli3.intents( jsonFormat=True )
- ONOS4Intents = main.ONOScli4.intents( jsonFormat=True )
- ONOS5Intents = main.ONOScli5.intents( jsonFormat=True )
- ONOS6Intents = main.ONOScli6.intents( jsonFormat=True )
- ONOS7Intents = main.ONOScli7.intents( jsonFormat=True )
- intentCheck = main.FALSE
- if "Error" in ONOS1Intents or not ONOS1Intents\
- or "Error" in ONOS2Intents or not ONOS2Intents\
- or "Error" in ONOS3Intents or not ONOS3Intents\
- or "Error" in ONOS4Intents or not ONOS4Intents\
- or "Error" in ONOS5Intents or not ONOS5Intents\
- or "Error" in ONOS6Intents or not ONOS6Intents\
- or "Error" in ONOS7Intents or not ONOS7Intents:
- main.log.report( "Error in getting ONOS intents" )
- main.log.warn( "ONOS1 intents response: " +
- repr( ONOS1Intents ) )
- main.log.warn( "ONOS2 intents response: " +
- repr( ONOS2Intents ) )
- main.log.warn( "ONOS3 intents response: " +
- repr( ONOS3Intents ) )
- main.log.warn( "ONOS4 intents response: " +
- repr( ONOS4Intents ) )
- main.log.warn( "ONOS5 intents response: " +
- repr( ONOS5Intents ) )
- main.log.warn( "ONOS6 intents response: " +
- repr( ONOS6Intents ) )
- main.log.warn( "ONOS7 intents response: " +
- repr( ONOS7Intents ) )
- elif ONOS1Intents == ONOS2Intents\
- and ONOS1Intents == ONOS3Intents\
- and ONOS1Intents == ONOS4Intents\
- and ONOS1Intents == ONOS5Intents\
- and ONOS1Intents == ONOS6Intents\
- and ONOS1Intents == ONOS7Intents:
- intentCheck = main.TRUE
- main.log.report( "Intents are consistent across all" +
- " ONOS nodes" )
- else:
- main.log.warn( "ONOS1 intents: " )
- print json.dumps( json.loads( ONOS1Intents ), sort_keys=True,
- indent=4, separators=( ',', ': ' ) )
- main.log.warn( "ONOS2 intents: " )
- print json.dumps( json.loads( ONOS2Intents ), sort_keys=True,
- indent=4, separators=( ',', ': ' ) )
- main.log.warn( "ONOS3 intents: " )
- print json.dumps( json.loads( ONOS3Intents ), sort_keys=True,
- indent=4, separators=( ',', ': ' ) )
- main.log.warn( "ONOS4 intents: " )
- print json.dumps( json.loads( ONOS4Intents ), sort_keys=True,
- indent=4, separators=( ',', ': ' ) )
- main.log.warn( "ONOS5 intents: " )
- print json.dumps( json.loads( ONOS5Intents ), sort_keys=True,
- indent=4, separators=( ',', ': ' ) )
- main.log.warn( "ONOS6 intents: " )
- print json.dumps( json.loads( ONOS6Intents ), sort_keys=True,
- indent=4, separators=( ',', ': ' ) )
- main.log.warn( "ONOS7 intents: " )
- print json.dumps( json.loads( ONOS7Intents ), sort_keys=True,
- indent=4, separators=( ',', ': ' ) )
- utilities.assert_equals(
- expect=main.TRUE,
- actual=intentCheck,
- onpass="Intents are consistent across all ONOS nodes",
- onfail="ONOS nodes have different views of intents" )
- # Print the intent states
- intents = []
- intents.append( ONOS1Intents )
- intents.append( ONOS2Intents )
- intents.append( ONOS3Intents )
- intents.append( ONOS4Intents )
- intents.append( ONOS5Intents )
- intents.append( ONOS6Intents )
- intents.append( ONOS7Intents )
- intentStates = []
- for node in intents: # Iter through ONOS nodes
- nodeStates = []
- # Iter through intents of a node
- for intent in json.loads( node ):
- nodeStates.append( intent[ 'state' ] )
- intentStates.append( nodeStates )
- out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
- main.log.info( dict( out ) )
+ main.step( "Get the intents and compare across all nodes" )
+ ONOS1Intents = main.ONOScli1.intents( jsonFormat=True )
+ ONOS2Intents = main.ONOScli2.intents( jsonFormat=True )
+ ONOS3Intents = main.ONOScli3.intents( jsonFormat=True )
+ ONOS4Intents = main.ONOScli4.intents( jsonFormat=True )
+ ONOS5Intents = main.ONOScli5.intents( jsonFormat=True )
+ ONOS6Intents = main.ONOScli6.intents( jsonFormat=True )
+ ONOS7Intents = main.ONOScli7.intents( jsonFormat=True )
+ intentCheck = main.FALSE
+ if "Error" in ONOS1Intents or not ONOS1Intents\
+ or "Error" in ONOS2Intents or not ONOS2Intents\
+ or "Error" in ONOS3Intents or not ONOS3Intents\
+ or "Error" in ONOS4Intents or not ONOS4Intents\
+ or "Error" in ONOS5Intents or not ONOS5Intents\
+ or "Error" in ONOS6Intents or not ONOS6Intents\
+ or "Error" in ONOS7Intents or not ONOS7Intents:
+ main.log.report( "Error in getting ONOS intents" )
+ main.log.warn( "ONOS1 intents response: " + repr( ONOS1Intents ) )
+ main.log.warn( "ONOS2 intents response: " + repr( ONOS2Intents ) )
+ main.log.warn( "ONOS3 intents response: " + repr( ONOS3Intents ) )
+ main.log.warn( "ONOS4 intents response: " + repr( ONOS4Intents ) )
+ main.log.warn( "ONOS5 intents response: " + repr( ONOS5Intents ) )
+ main.log.warn( "ONOS6 intents response: " + repr( ONOS6Intents ) )
+ main.log.warn( "ONOS7 intents response: " + repr( ONOS7Intents ) )
+ elif ONOS1Intents == ONOS2Intents\
+ and ONOS1Intents == ONOS3Intents\
+ and ONOS1Intents == ONOS4Intents\
+ and ONOS1Intents == ONOS5Intents\
+ and ONOS1Intents == ONOS6Intents\
+ and ONOS1Intents == ONOS7Intents:
+ intentCheck = main.TRUE
+ main.log.report( "Intents are consistent across all ONOS nodes" )
+ else:
+ main.log.warn( "ONOS1 intents: " )
+ print json.dumps( json.loads( ONOS1Intents ), sort_keys=True,
+ indent=4, separators=( ',', ': ' ) )
+ main.log.warn( "ONOS2 intents: " )
+ print json.dumps( json.loads( ONOS2Intents ), sort_keys=True,
+ indent=4, separators=( ',', ': ' ) )
+ main.log.warn( "ONOS3 intents: " )
+ print json.dumps( json.loads( ONOS3Intents ), sort_keys=True,
+ indent=4, separators=( ',', ': ' ) )
+ main.log.warn( "ONOS4 intents: " )
+ print json.dumps( json.loads( ONOS4Intents ), sort_keys=True,
+ indent=4, separators=( ',', ': ' ) )
+ main.log.warn( "ONOS5 intents: " )
+ print json.dumps( json.loads( ONOS5Intents ), sort_keys=True,
+ indent=4, separators=( ',', ': ' ) )
+ main.log.warn( "ONOS6 intents: " )
+ print json.dumps( json.loads( ONOS6Intents ), sort_keys=True,
+ indent=4, separators=( ',', ': ' ) )
+ main.log.warn( "ONOS7 intents: " )
+ print json.dumps( json.loads( ONOS7Intents ), sort_keys=True,
+ indent=4, separators=( ',', ': ' ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=intentCheck,
+ onpass="Intents are consistent across all ONOS nodes",
+ onfail="ONOS nodes have different views of intents" )
+ # Print the intent states
+ intents = []
+ intents.append( ONOS1Intents )
+ intents.append( ONOS2Intents )
+ intents.append( ONOS3Intents )
+ intents.append( ONOS4Intents )
+ intents.append( ONOS5Intents )
+ intents.append( ONOS6Intents )
+ intents.append( ONOS7Intents )
+ intentStates = []
+ for node in intents: # Iter through ONOS nodes
+ nodeStates = []
+ # Iter through intents of a node
+ for intent in json.loads( node ):
+ nodeStates.append( intent[ 'state' ] )
+ intentStates.append( nodeStates )
+ out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
+ main.log.info( dict( out ) )
-
- # NOTE: Store has no durability, so intents are lost across system
- # restarts
- main.step( "Compare current intents with intents before the failure" )
- # NOTE: this requires case 5 to pass for intentState to be set.
- # maybe we should stop the test if that fails?
+ # NOTE: Store has no durability, so intents are lost across system
+ # restarts
+ main.step( "Compare current intents with intents before the failure" )
+ # NOTE: this requires case 5 to pass for intentState to be set.
+ # maybe we should stop the test if that fails?
+ sameIntents = main.TRUE
+ if intentState and intentState == ONOS1Intents:
sameIntents = main.TRUE
- if intentState and intentState == ONOS1Intents:
- sameIntents = main.TRUE
- main.log.report( "Intents are consistent with before failure" )
- # TODO: possibly the states have changed? we may need to figure out
- # what the aceptable states are
- else:
- try:
- main.log.warn( "ONOS1 intents: " )
- print json.dumps( json.loads( ONOS1Intents ),
- sort_keys=True, indent=4,
- separators=( ',', ': ' ) )
- except:
- pass
- sameIntents = main.FALSE
- utilities.assert_equals(
- expect=main.TRUE,
- actual=sameIntents,
- onpass="Intents are consistent with before failure",
- onfail="The Intents changed during failure" )
- intentCheck = intentCheck and sameIntents
+ main.log.report( "Intents are consistent with before failure" )
+ # TODO: possibly the states have changed? we may need to figure out
+ # what the aceptable states are
+ else:
+ try:
+ main.log.warn( "ONOS1 intents: " )
+ print json.dumps( json.loads( ONOS1Intents ),
+ sort_keys=True, indent=4,
+ separators=( ',', ': ' ) )
+ except:
+ pass
+ sameIntents = main.FALSE
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=sameIntents,
+ onpass="Intents are consistent with before failure",
+ onfail="The Intents changed during failure" )
+ intentCheck = intentCheck and sameIntents
- main.step( "Get the OF Table entries and compare to before " +
- "component failure" )
- FlowTables = main.TRUE
- flows2 = []
- for i in range( 28 ):
- main.log.info( "Checking flow table on s" + str( i + 1 ) )
- tmpFlows = main.Mininet2.getFlowTable( 1.3, "s" + str( i + 1 ) )
- flows2.append( tmpFlows )
- tempResult = main.Mininet2.flowComp(
- flow1=flows[ i ],
- flow2=tmpFlows )
- FlowTables = FlowTables and tempResult
- if FlowTables == main.FALSE:
- main.log.info( "Differences in flow table for switch: s" +
- str( i + 1 ) )
- if FlowTables == main.TRUE:
- main.log.report( "No changes were found in the flow tables" )
- utilities.assert_equals(
- expect=main.TRUE,
- actual=FlowTables,
- onpass="No changes were found in the flow tables",
- onfail="Changes were found in the flow tables" )
- if topoResult == main.TRUE or ( whileTime > 10 ) :
- break
+ main.step( "Get the OF Table entries and compare to before " +
+ "component failure" )
+ FlowTables = main.TRUE
+ flows2 = []
+ for i in range( 28 ):
+ main.log.info( "Checking flow table on s" + str( i + 1 ) )
+ tmpFlows = main.Mininet2.getFlowTable( 1.3, "s" + str( i + 1 ) )
+ flows2.append( tmpFlows )
+ tempResult = main.Mininet2.flowComp(
+ flow1=flows[ i ],
+ flow2=tmpFlows )
+ FlowTables = FlowTables and tempResult
+ if FlowTables == main.FALSE:
+ main.log.info( "Differences in flow table for switch: s" +
+ str( i + 1 ) )
+ if FlowTables == main.TRUE:
+ main.log.report( "No changes were found in the flow tables" )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=FlowTables,
+ onpass="No changes were found in the flow tables",
+ onfail="Changes were found in the flow tables" )
main.step( "Check the continuous pings to ensure that no packets " +
"were dropped during component failure" )
@@ -1469,14 +1579,14 @@
onpass="Leadership election passed",
onfail="Something went wrong with Leadership election" )
- result = mastershipCheck and intentCheck and FlowTables and\
- ( not LossInPings ) and rolesNotNull and leaderResult
+ result = ( mastershipCheck and intentCheck and FlowTables and
+ ( not LossInPings ) and rolesNotNull and leaderResult )
result = int( result )
if result == main.TRUE:
main.log.report( "Constant State Tests Passed" )
utilities.assert_equals( expect=main.TRUE, actual=result,
- onpass="Constant State Tests Passed",
- onfail="Constant state tests failed" )
+ onpass="Constant State Tests Passed",
+ onfail="Constant state tests failed" )
def CASE8( self, main ):
"""
@@ -1516,6 +1626,7 @@
devicesResults = main.TRUE
portsResults = main.TRUE
linksResults = main.TRUE
+ hostsResults = main.TRUE
topoResult = main.FALSE
elapsed = 0
count = 0
@@ -1526,9 +1637,7 @@
count = count + 1
if count > 1:
# TODO: Depricate STS usage
- MNTopo = TestONTopology(
- main.Mininet1,
- ctrls )
+ MNTopo = TestONTopology( main.Mininet1, ctrls )
cliStart = time.time()
devices = []
devices.append( main.ONOScli1.devices() )
@@ -1546,13 +1655,15 @@
hosts.append( json.loads( main.ONOScli5.hosts() ) )
hosts.append( json.loads( main.ONOScli6.hosts() ) )
hosts.append( json.loads( main.ONOScli7.hosts() ) )
+ ipResult = main.TRUE
for controller in range( 0, len( hosts ) ):
controllerStr = str( controller + 1 )
for host in hosts[ controller ]:
- if host[ 'ips' ] == []:
+ if host is None or host.get( 'ips', [] ) == []:
main.log.error(
"DEBUG:Error with host ips on controller" +
controllerStr + ": " + str( host ) )
+ ipResult = main.FALSE
ports = []
ports.append( main.ONOScli1.ports() )
ports.append( main.ONOScli2.ports() )
@@ -1588,47 +1699,58 @@
controller ]:
currentDevicesResult = main.Mininet1.compareSwitches(
MNTopo,
- json.loads(
- devices[ controller ] ) )
+ json.loads( devices[ controller ] ) )
else:
currentDevicesResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentDevicesResult,
- onpass="ONOS" + controllerStr +
- " Switches view is correct",
- onfail="ONOS" + controllerStr +
- " Switches view is incorrect" )
+ actual=currentDevicesResult,
+ onpass="ONOS" + controllerStr +
+ " Switches view is correct",
+ onfail="ONOS" + controllerStr +
+ " Switches view is incorrect" )
if ports[ controller ] or "Error" not in ports[ controller ]:
currentPortsResult = main.Mininet1.comparePorts(
MNTopo,
- json.loads(
- ports[ controller ] ) )
+ json.loads( ports[ controller ] ) )
else:
currentPortsResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentPortsResult,
- onpass="ONOS" + controllerStr +
- " ports view is correct",
- onfail="ONOS" + controllerStr +
- " ports view is incorrect" )
+ actual=currentPortsResult,
+ onpass="ONOS" + controllerStr +
+ " ports view is correct",
+ onfail="ONOS" + controllerStr +
+ " ports view is incorrect" )
if links[ controller ] or "Error" not in links[ controller ]:
currentLinksResult = main.Mininet1.compareLinks(
MNTopo,
- json.loads(
- links[ controller ] ) )
+ json.loads( links[ controller ] ) )
else:
currentLinksResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentLinksResult,
- onpass="ONOS" + controllerStr +
- " links view is correct",
- onfail="ONOS" + controllerStr +
- " links view is incorrect" )
- devicesResults = devicesResults and currentDevicesResult
- portsResults = portsResults and currentPortsResult
- linksResults = linksResults and currentLinksResult
+ actual=currentLinksResult,
+ onpass="ONOS" + controllerStr +
+ " links view is correct",
+ onfail="ONOS" + controllerStr +
+ " links view is incorrect" )
+
+ if hosts[ controller ] or "Error" not in hosts[ controller ]:
+ currentHostsResult = main.Mininet1.compareHosts(
+ MNTopo, hosts[ controller ] )
+ else:
+ currentHostsResult = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=currentHostsResult,
+ onpass="ONOS" + controllerStr +
+ " hosts exist in Mininet",
+ onfail="ONOS" + controllerStr +
+ " hosts don't match Mininet" )
+
+ devicesResults = devicesResults and currentDevicesResult
+ portsResults = portsResults and currentPortsResult
+ linksResults = linksResults and currentLinksResult
+ hostsResults = hostsResults and currentHostsResult
# Compare json objects for hosts and dataplane clusters
@@ -1685,17 +1807,19 @@
onfail="ONOS nodes have different views of clusters" )
# there should always only be one cluster
numClusters = len( json.loads( clusters[ 0 ] ) )
+ clusterResults = main.FALSE
+ if numClusters == 1:
+ clusterResults = main.TRUE
utilities.assert_equals(
expect=1,
actual=numClusters,
onpass="ONOS shows 1 SCC",
- onfail="ONOS shows " +
- str( numClusters ) +
- " SCCs" )
+ onfail="ONOS shows " + str( numClusters ) + " SCCs" )
topoResult = ( devicesResults and portsResults and linksResults
- and consistentHostsResult
- and consistentClustersResult )
+ and hostsResults and consistentHostsResult
+ and consistentClustersResult and clusterResults
+ and ipResult )
topoResult = topoResult and int( count <= 2 )
note = "note it takes about " + str( int( cliTime ) ) + \
@@ -1706,8 +1830,8 @@
str( note ) + " ): " + str( elapsed ) + " seconds, " +
str( count ) + " tries" )
utilities.assert_equals( expect=main.TRUE, actual=topoResult,
- onpass="Topology Check Test successful",
- onfail="Topology Check Test NOT successful" )
+ onpass="Topology Check Test successful",
+ onfail="Topology Check Test NOT successful" )
if topoResult == main.TRUE:
main.log.report( "ONOS topology view matches Mininet topology" )
@@ -1721,20 +1845,18 @@
linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
description = "Turn off a link to ensure that Link Discovery " +\
- "is working properly"
+ "is working properly"
main.log.report( description )
main.case( description )
main.step( "Kill Link between s3 and s28" )
LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
- main.log.info(
- "Waiting " +
- str( linkSleep ) +
- " seconds for link down to be discovered" )
+ main.log.info( "Waiting " + str( linkSleep ) +
+ " seconds for link down to be discovered" )
time.sleep( linkSleep )
utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
- onpass="Link down succesful",
- onfail="Failed to bring link down" )
+ onpass="Link down succesful",
+ onfail="Failed to bring link down" )
# TODO do some sort of check here
def CASE10( self, main ):
@@ -1753,14 +1875,12 @@
main.step( "Bring link between s3 and s28 back up" )
LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
- main.log.info(
- "Waiting " +
- str( linkSleep ) +
- " seconds for link up to be discovered" )
+ main.log.info( "Waiting " + str( linkSleep ) +
+ " seconds for link up to be discovered" )
time.sleep( linkSleep )
utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
- onpass="Link up succesful",
- onfail="Failed to bring link up" )
+ onpass="Link up succesful",
+ onfail="Failed to bring link up" )
# TODO do some sort of check here
def CASE11( self, main ):
@@ -1792,8 +1912,8 @@
if device and device[ 'available' ] is False:
result = main.TRUE
utilities.assert_equals( expect=main.TRUE, actual=result,
- onpass="Kill switch succesful",
- onfail="Failed to kill switch?" )
+ onpass="Kill switch succesful",
+ onfail="Failed to kill switch?" )
def CASE12( self, main ):
"""
@@ -1816,27 +1936,24 @@
# TODO: New dpid or same? Ask Thomas?
for peer in links:
main.Mininet1.addLink( switch, peer )
- main.Mininet1.assignSwController(
- sw=switch.split( 's' )[ 1 ],
- count=numControllers,
- ip1=ONOS1Ip,
- port1=ONOS1Port,
- ip2=ONOS2Ip,
- port2=ONOS2Port,
- ip3=ONOS3Ip,
- port3=ONOS3Port,
- ip4=ONOS4Ip,
- port4=ONOS4Port,
- ip5=ONOS5Ip,
- port5=ONOS5Port,
- ip6=ONOS6Ip,
- port6=ONOS6Port,
- ip7=ONOS7Ip,
- port7=ONOS7Port )
- main.log.info(
- "Waiting " +
- str( switchSleep ) +
- " seconds for switch up to be discovered" )
+ main.Mininet1.assignSwController( sw=switch.split( 's' )[ 1 ],
+ count=numControllers,
+ ip1=ONOS1Ip,
+ port1=ONOS1Port,
+ ip2=ONOS2Ip,
+ port2=ONOS2Port,
+ ip3=ONOS3Ip,
+ port3=ONOS3Port,
+ ip4=ONOS4Ip,
+ port4=ONOS4Port,
+ ip5=ONOS5Ip,
+ port5=ONOS5Port,
+ ip6=ONOS6Ip,
+ port6=ONOS6Port,
+ ip7=ONOS7Ip,
+ port7=ONOS7Port )
+ main.log.info( "Waiting " + str( switchSleep ) +
+ " seconds for switch up to be discovered" )
time.sleep( switchSleep )
device = main.ONOScli1.getDevice( dpid=switchDPID )
# Peek at the deleted switch
@@ -1845,8 +1962,8 @@
if device and device[ 'available' ]:
result = main.TRUE
utilities.assert_equals( expect=main.TRUE, actual=result,
- onpass="add switch succesful",
- onfail="Failed to add switch?" )
+ onpass="add switch succesful",
+ onfail="Failed to add switch?" )
def CASE13( self, main ):
"""
@@ -1908,6 +2025,8 @@
dstDir + str( testname ) +
"-ONOS" + str( i + 1 ) + "-" +
f )
+ main.ONOSbench.handle.expect( "\$" )
+
# std*.log's
# NOTE: must end in /
logFolder = "/opt/onos/var/"
@@ -1923,15 +2042,17 @@
dstDir + str( testname ) +
"-ONOS" + str( i + 1 ) + "-" +
f )
+ main.ONOSbench.handle.expect( "\$" )
# sleep so scp can finish
time.sleep( 10 )
+ main.Mininet1.stopNet()
main.step( "Packing and rotating pcap archives" )
os.system( "~/TestON/dependencies/rotate.sh " + str( testname ) )
# TODO: actually check something here
utilities.assert_equals( expect=main.TRUE, actual=main.TRUE,
- onpass="Test cleanup successful",
- onfail="Test cleanup NOT successful" )
+ onpass="Test cleanup successful",
+ onfail="Test cleanup NOT successful" )
def CASE14( self, main ):
"""
@@ -2032,9 +2153,7 @@
elif leader is None or leader == main.FALSE:
main.log.report(
"Leader for the election app should be an ONOS node," +
- "instead got '" +
- str( leader ) +
- "'" )
+ "instead got '" + str( leader ) + "'" )
leaderResult = main.FALSE
withdrawResult = oldLeader.electionTestWithdraw()
utilities.assert_equals(
@@ -2052,10 +2171,8 @@
for leaderN in leaderList:
if leaderN == leader:
main.log.report(
- "ONOS" +
- str( controller ) +
- " still sees " +
- str( leader ) +
+ "ONOS" + str( controller ) +
+ " still sees " + str( leader ) +
" as leader after they withdrew" )
leaderResult = main.FALSE
elif leaderN == main.FALSE:
@@ -2089,8 +2206,8 @@
onpass="Leadership election passed",
onfail="Something went wrong with Leadership election" )
- main.step(
- "Run for election on old leader( just so everyone is in the hat )" )
+ main.step( "Run for election on old leader( just so everyone " +
+ "is in the hat )" )
runResult = oldLeader.electionTestRun()
utilities.assert_equals(
expect=main.TRUE,
diff --git a/TestON/tests/HATestSanity/HATestSanity.py b/TestON/tests/HATestSanity/HATestSanity.py
index 590ff56..dfa6a03 100644
--- a/TestON/tests/HATestSanity/HATestSanity.py
+++ b/TestON/tests/HATestSanity/HATestSanity.py
@@ -32,14 +32,18 @@
CASE1 is to compile ONOS and push it to the test machines
Startup sequence:
- git pull
- mvn clean install
- onos-package
cell <name>
onos-verify-cell
NOTE: temporary - onos-remove-raft-logs
+ onos-uninstall
+ start mininet
+ git pull
+ mvn clean install
+ onos-package
onos-install -f
onos-wait-for-start
+ start cli sessions
+ start tcpdump
"""
main.log.report( "ONOS HA Sanity test - initialization" )
main.case( "Setting up test environment" )
@@ -109,8 +113,7 @@
main.step( "Compiling the latest version of ONOS" )
if PULLCODE:
- # TODO Configure branch in params
- main.step( "Git checkout and pull master" )
+ main.step( "Git checkout and pull " + gitBranch )
main.ONOSbench.gitCheckout( gitBranch )
gitPullResult = main.ONOSbench.gitPull()
@@ -145,7 +148,6 @@
and onos7InstallResult
main.step( "Checking if ONOS is up yet" )
- # TODO check bundle:list?
for i in range( 2 ):
onos1Isup = main.ONOSbench.isup( ONOS1Ip )
if not onos1Isup:
@@ -209,8 +211,8 @@
and onosIsupResult and cliResults )
utilities.assert_equals( expect=main.TRUE, actual=case1Result,
- onpass="Test startup successful",
- onfail="Test startup NOT successful" )
+ onpass="Test startup successful",
+ onfail="Test startup NOT successful" )
if case1Result == main.FALSE:
main.cleanup()
@@ -266,118 +268,131 @@
# Manually assign mastership to the controller we want
roleCall = main.TRUE
roleCheck = main.TRUE
-
- # Assign switch
- deviceId = main.ONOScli1.getDevice( "1000" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS1Ip )
- # Check assignment
- if ONOS1Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- # Assign switch
- deviceId = main.ONOScli1.getDevice( "2800" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS1Ip )
- # Check assignment
- if ONOS1Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- # Assign switch
- deviceId = main.ONOScli1.getDevice( "2000" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS2Ip )
- # Check assignment
- if ONOS2Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- # Assign switch
- deviceId = main.ONOScli1.getDevice( "3000" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS2Ip )
- # Check assignment
- if ONOS2Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- # Assign switch
- deviceId = main.ONOScli1.getDevice( "5000" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS3Ip )
- # Check assignment
- if ONOS3Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- # Assign switch
- deviceId = main.ONOScli1.getDevice( "6000" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS3Ip )
- # Check assignment
- if ONOS3Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- # Assign switch
- deviceId = main.ONOScli1.getDevice( "3004" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS4Ip )
- # Check assignment
- if ONOS4Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- for i in range( 8, 18 ):
- dpid = '3' + str( i ).zfill( 3 )
- deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
+ try:
+ # Assign switch
+ deviceId = main.ONOScli1.getDevice( "1000" ).get( 'id' )
+ assert deviceId, "No device id for s1 in ONOS"
roleCall = roleCall and main.ONOScli1.deviceRole(
deviceId,
- ONOS5Ip )
+ ONOS1Ip )
# Check assignment
- if ONOS5Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ if ONOS1Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
roleCheck = roleCheck and main.TRUE
else:
roleCheck = roleCheck and main.FALSE
- deviceId = main.ONOScli1.getDevice( "6007" ).get( 'id' )
- roleCall = roleCall and main.ONOScli1.deviceRole(
- deviceId,
- ONOS6Ip )
- # Check assignment
- if ONOS6Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
- roleCheck = roleCheck and main.TRUE
- else:
- roleCheck = roleCheck and main.FALSE
-
- for i in range( 18, 28 ):
- dpid = '6' + str( i ).zfill( 3 )
- deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
+ # Assign switch
+ deviceId = main.ONOScli1.getDevice( "2800" ).get( 'id' )
+ assert deviceId, "No device id for s28 in ONOS"
roleCall = roleCall and main.ONOScli1.deviceRole(
deviceId,
- ONOS7Ip )
+ ONOS1Ip )
# Check assignment
- if ONOS7Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ if ONOS1Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
roleCheck = roleCheck and main.TRUE
else:
roleCheck = roleCheck and main.FALSE
+ # Assign switch
+ deviceId = main.ONOScli1.getDevice( "2000" ).get( 'id' )
+ assert deviceId, "No device id for s2 in ONOS"
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS2Ip )
+ # Check assignment
+ if ONOS2Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+
+ # Assign switch
+ deviceId = main.ONOScli1.getDevice( "3000" ).get( 'id' )
+ assert deviceId, "No device id for s3 in ONOS"
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS2Ip )
+ # Check assignment
+ if ONOS2Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+
+ # Assign switch
+ deviceId = main.ONOScli1.getDevice( "5000" ).get( 'id' )
+ assert deviceId, "No device id for s5 in ONOS"
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS3Ip )
+ # Check assignment
+ if ONOS3Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+
+ # Assign switch
+ deviceId = main.ONOScli1.getDevice( "6000" ).get( 'id' )
+ assert deviceId, "No device id for s6 in ONOS"
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS3Ip )
+ # Check assignment
+ if ONOS3Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+
+ # Assign switch
+ deviceId = main.ONOScli1.getDevice( "3004" ).get( 'id' )
+ assert deviceId, "No device id for s4 in ONOS"
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS4Ip )
+ # Check assignment
+ if ONOS4Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+
+ for i in range( 8, 18 ):
+ dpid = '3' + str( i ).zfill( 3 )
+ deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
+ assert deviceId, "No device id for s%i in ONOS" % i
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS5Ip )
+ # Check assignment
+ if ONOS5Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+
+ deviceId = main.ONOScli1.getDevice( "6007" ).get( 'id' )
+ assert deviceId, "No device id for s7 in ONOS"
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS6Ip )
+ # Check assignment
+ if ONOS6Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+
+ for i in range( 18, 28 ):
+ dpid = '6' + str( i ).zfill( 3 )
+ deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
+ assert deviceId, "No device id for s%i in ONOS" % i
+ roleCall = roleCall and main.ONOScli1.deviceRole(
+ deviceId,
+ ONOS7Ip )
+ # Check assignment
+ if ONOS7Ip in main.ONOScli1.getRole( deviceId ).get( 'master' ):
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+ except ( AttributeError, AssertionError ):
+ main.log.exception( "Something is wrong with ONOS device view" )
+ main.log.info( main.ONOScli1.devices() )
+
utilities.assert_equals(
expect=main.TRUE,
actual=roleCall,
@@ -401,6 +416,7 @@
Assign intents
"""
import time
+ import json
main.log.report( "Adding host intents" )
main.case( "Adding host Intents" )
@@ -442,8 +458,11 @@
time.sleep( 10 )
main.step( "Add host intents" )
+ intentIds = []
# TODO: move the host numbers to params
+ # Maybe look at all the paths we ping?
intentAddResult = True
+ hostResult = main.TRUE
for i in range( 8, 18 ):
main.log.info( "Adding host intent between h" + str( i ) +
" and h" + str( i + 10 ) )
@@ -460,46 +479,100 @@
host1Id = host1Dict.get( 'id', None )
host2Id = host2Dict.get( 'id', None )
if host1Id and host2Id:
- # TODO: distribute the intents across onos nodes
- tmpResult = main.ONOScli1.addHostIntent(
+ '''
+ nodeNum = ( i % 7 ) + 1
+ node = getattr( main, ( 'ONOScli' + str( nodeNum ) ) )
+ tmpId = node.addHostIntent(
+ '''
+ tmpId = main.ONOScli1.addHostIntent(
host1Id,
host2Id )
+ main.log.info( "Added intent with id: " + tmpId )
+ intentIds.append( tmpId )
else:
main.log.error( "Error, getHost() failed" )
main.log.warn( json.dumps( json.loads( main.ONOScli1.hosts() ),
sort_keys=True,
indent=4,
separators=( ',', ': ' ) ) )
- tmpResult = main.FALSE
- intentAddResult = bool( pingResult and intentAddResult
- and tmpResult )
- # TODO Check that intents were added?
+ hostResult = main.FALSE
+ onosIds = main.ONOScli1.getAllIntentsId()
+ main.log.info( "Submitted intents: " + str( intentIds ) )
+ main.log.info( "Intents in ONOS: " + str( onosIds ) )
+ for intent in intentIds:
+ if intent in onosIds:
+ pass # intent submitted is still in onos
+ else:
+ intentAddResult = False
# Print the intent states
- intents = main.ONOScli1.intents( )
+ intents = main.ONOScli1.intents()
intentStates = []
+ main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+ count = 0
for intent in json.loads( intents ): # Iter through intents of a node
- intentStates.append( intent.get( 'state', None ) )
- out = [ (i, intentStates.count( i ) ) for i in set( intentStates ) ]
- main.log.info( dict( out ) )
-
+ state = intent.get( 'state', None )
+ intentId = intent.get( 'id', None )
+ intentStates.append( ( intentId, state ) )
+ # add submitted intents not in the store
+ tmplist = [ i for i, s in intentStates ]
+ missingIntents = False
+ for i in intentIds:
+ if i not in tmplist:
+ intentStates.append( ( i, " - " ) )
+ missingIntents = True
+ intentStates.sort()
+ for i, s in intentStates:
+ count += 1
+ main.log.info( "%-6s%-15s%-15s" %
+ ( str( count ), str( i ), str( s ) ) )
+ intentAddResult = bool( pingResult and hostResult and intentAddResult
+ and not missingIntents)
utilities.assert_equals(
expect=True,
actual=intentAddResult,
onpass="Pushed host intents to ONOS",
onfail="Error in pushing host intents to ONOS" )
- # TODO Check if intents all exist in datastore
+
+ if not intentAddResult:
+ import time
+ main.log.info( "Sleeping 60 seconds to see if intents are found" )
+ time.sleep( 60 )
+ onosIds = main.ONOScli1.getAllIntentsId()
+ main.log.info( "Submitted intents: " + str( intentIds ) )
+ main.log.info( "Intents in ONOS: " + str( onosIds ) )
+ # Print the intent states
+ intents = main.ONOScli1.intents()
+ intentStates = []
+ main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+ count = 0
+ for intent in json.loads( intents ):
+ # Iter through intents of a node
+ state = intent.get( 'state', None )
+ intentId = intent.get( 'id', None )
+ intentStates.append( ( intentId, state ) )
+ # add submitted intents not in the store
+ tmplist = [ i for i, s in intentStates ]
+ for i in intentIds:
+ if i not in tmplist:
+ intentStates.append( ( i, " - " ) )
+ intentStates.sort()
+ for i, s in intentStates:
+ count += 1
+ main.log.info( "%-6s%-15s%-15s" %
+ ( str( count ), str( i ), str( s ) ) )
def CASE4( self, main ):
"""
Ping across added host intents
"""
+ import json
description = " Ping across added host intents"
main.log.report( description )
main.case( description )
PingResult = main.TRUE
for i in range( 8, 18 ):
- ping = main.Mininet1.pingHost(
- src="h" + str( i ), target="h" + str( i + 10 ) )
+ ping = main.Mininet1.pingHost( src="h" + str( i ),
+ target="h" + str( i + 10 ) )
PingResult = PingResult and ping
if ping == main.FALSE:
main.log.warn( "Ping failed between h" + str( i ) +
@@ -510,7 +583,7 @@
if PingResult == main.FALSE:
main.log.report(
"Intents have not been installed correctly, pings failed." )
- #TODO: pretty print
+ # TODO: pretty print
main.log.warn( "ONSO1 intents: " )
main.log.warn( json.dumps( json.loads( main.ONOScli1.intents() ),
sort_keys=True,
@@ -524,6 +597,22 @@
actual=PingResult,
onpass="Intents have been installed correctly and pings work",
onfail="Intents have not been installed correctly, pings failed." )
+ if PingResult is not main.TRUE:
+ # Print the intent states
+ intents = main.ONOScli1.intents()
+ intentStates = []
+ main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+ count = 0
+ # Iter through intents of a node
+ for intent in json.loads( intents ):
+ state = intent.get( 'state', None )
+ intentId = intent.get( 'id', None )
+ intentStates.append( ( intentId, state ) )
+ intentStates.sort()
+ for i, s in intentStates:
+ count += 1
+ main.log.info( "%-6s%-15s%-15s" %
+ ( str( count ), str( i ), str( s ) ) )
def CASE5( self, main ):
"""
@@ -787,29 +876,39 @@
main.step( "Get the flows from each controller" )
global flowState
flowState = []
- ONOS1Flows = main.ONOScli1.flows( jsonFormat=True )
- ONOS2Flows = main.ONOScli2.flows( jsonFormat=True )
- ONOS3Flows = main.ONOScli3.flows( jsonFormat=True )
- ONOS4Flows = main.ONOScli4.flows( jsonFormat=True )
- ONOS5Flows = main.ONOScli5.flows( jsonFormat=True )
- ONOS6Flows = main.ONOScli6.flows( jsonFormat=True )
- ONOS7Flows = main.ONOScli7.flows( jsonFormat=True )
- ONOS1FlowsJson = json.loads( ONOS1Flows )
- ONOS2FlowsJson = json.loads( ONOS2Flows )
- ONOS3FlowsJson = json.loads( ONOS3Flows )
- ONOS4FlowsJson = json.loads( ONOS4Flows )
- ONOS5FlowsJson = json.loads( ONOS5Flows )
- ONOS6FlowsJson = json.loads( ONOS6Flows )
- ONOS7FlowsJson = json.loads( ONOS7Flows )
flowCheck = main.FALSE
- if "Error" in ONOS1Flows or not ONOS1Flows\
- or "Error" in ONOS2Flows or not ONOS2Flows\
- or "Error" in ONOS3Flows or not ONOS3Flows\
- or "Error" in ONOS4Flows or not ONOS4Flows\
- or "Error" in ONOS5Flows or not ONOS5Flows\
- or "Error" in ONOS6Flows or not ONOS6Flows\
- or "Error" in ONOS7Flows or not ONOS7Flows:
- main.log.report( "Error in getting ONOS intents" )
+ try:
+ ONOS1Flows = main.ONOScli1.flows( jsonFormat=True )
+ ONOS2Flows = main.ONOScli2.flows( jsonFormat=True )
+ ONOS3Flows = main.ONOScli3.flows( jsonFormat=True )
+ ONOS4Flows = main.ONOScli4.flows( jsonFormat=True )
+ ONOS5Flows = main.ONOScli5.flows( jsonFormat=True )
+ ONOS6Flows = main.ONOScli6.flows( jsonFormat=True )
+ ONOS7Flows = main.ONOScli7.flows( jsonFormat=True )
+ assert ONOS1Flows, "ONOS1 Flows should not be empty"
+ assert ONOS2Flows, "ONOS2 Flows should not be empty"
+ assert ONOS3Flows, "ONOS3 Flows should not be empty"
+ assert ONOS4Flows, "ONOS4 Flows should not be empty"
+ assert ONOS5Flows, "ONOS5 Flows should not be empty"
+ assert ONOS6Flows, "ONOS6 Flows should not be empty"
+ assert ONOS7Flows, "ONOS7 Flows should not be empty"
+ assert "Error" not in ONOS1Flows, "ONOS1 Flows contains 'Error'"
+ assert "Error" not in ONOS2Flows, "ONOS2 Flows contains 'Error'"
+ assert "Error" not in ONOS3Flows, "ONOS3 Flows contains 'Error'"
+ assert "Error" not in ONOS4Flows, "ONOS4 Flows contains 'Error'"
+ assert "Error" not in ONOS5Flows, "ONOS5 Flows contains 'Error'"
+ assert "Error" not in ONOS6Flows, "ONOS6 Flows contains 'Error'"
+ assert "Error" not in ONOS7Flows, "ONOS7 Flows contains 'Error'"
+ ONOS1FlowsJson = json.loads( ONOS1Flows )
+ ONOS2FlowsJson = json.loads( ONOS2Flows )
+ ONOS3FlowsJson = json.loads( ONOS3Flows )
+ ONOS4FlowsJson = json.loads( ONOS4Flows )
+ ONOS5FlowsJson = json.loads( ONOS5Flows )
+ ONOS6FlowsJson = json.loads( ONOS6Flows )
+ ONOS7FlowsJson = json.loads( ONOS7Flows )
+ except ( ValueError, AssertionError ): # From json.loads, or asserts
+ main.log.exception( "One or more 'flows' responses from " +
+ "ONOS couldn't be decoded." )
main.log.warn( "ONOS1 flows repsponse: " + ONOS1Flows )
main.log.warn( "ONOS2 flows repsponse: " + ONOS2Flows )
main.log.warn( "ONOS3 flows repsponse: " + ONOS3Flows )
@@ -817,38 +916,48 @@
main.log.warn( "ONOS5 flows repsponse: " + ONOS5Flows )
main.log.warn( "ONOS6 flows repsponse: " + ONOS6Flows )
main.log.warn( "ONOS7 flows repsponse: " + ONOS7Flows )
- elif len( ONOS1FlowsJson ) == len( ONOS2FlowsJson )\
- and len( ONOS1FlowsJson ) == len( ONOS3FlowsJson )\
- and len( ONOS1FlowsJson ) == len( ONOS4FlowsJson )\
- and len( ONOS1FlowsJson ) == len( ONOS5FlowsJson )\
- and len( ONOS1FlowsJson ) == len( ONOS6FlowsJson )\
- and len( ONOS1FlowsJson ) == len( ONOS7FlowsJson ):
+ else: # No exceptions
+ if len( ONOS1FlowsJson ) == len( ONOS2FlowsJson )\
+ and len( ONOS1FlowsJson ) == len( ONOS3FlowsJson )\
+ and len( ONOS1FlowsJson ) == len( ONOS4FlowsJson )\
+ and len( ONOS1FlowsJson ) == len( ONOS5FlowsJson )\
+ and len( ONOS1FlowsJson ) == len( ONOS6FlowsJson )\
+ and len( ONOS1FlowsJson ) == len( ONOS7FlowsJson ):
# TODO: Do a better check, maybe compare flows on switches?
- flowState = ONOS1Flows
- flowCheck = main.TRUE
- main.log.report( "Flow count is consistent across all ONOS nodes" )
- else:
- main.log.warn( "ONOS1 flows: " +
- json.dumps( ONOS1FlowsJson, sort_keys=True,
- indent=4, separators=( ',', ': ' ) ) )
- main.log.warn( "ONOS2 flows: " +
- json.dumps( ONOS2FlowsJson, sort_keys=True,
- indent=4, separators=( ',', ': ' ) ) )
- main.log.warn( "ONOS3 flows: " +
- json.dumps( ONOS3FlowsJson, sort_keys=True,
- indent=4, separators=( ',', ': ' ) ) )
- main.log.warn( "ONOS4 flows: " +
- json.dumps( ONOS4FlowsJson, sort_keys=True,
- indent=4, separators=( ',', ': ' ) ) )
- main.log.warn( "ONOS5 flows: " +
- json.dumps( ONOS5FlowsJson, sort_keys=True,
- indent=4, separators=( ',', ': ' ) ) )
- main.log.warn( "ONOS6 flows: " +
- json.dumps( ONOS6FlowsJson, sort_keys=True,
- indent=4, separators=( ',', ': ' ) ) )
- main.log.warn( "ONOS7 flows: " +
- json.dumps( ONOS7FlowsJson, sort_keys=True,
- indent=4, separators=( ',', ': ' ) ) )
+ # NOTE Possible issue with this not always being set?
+ flowState = ONOS1Flows
+ flowCheck = main.TRUE
+ main.log.report( "Flow count is consistent across all" +
+ " ONOS nodes" )
+ else:
+ main.log.warn( "ONOS1 flows: " +
+ json.dumps( ONOS1FlowsJson, sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.warn( "ONOS2 flows: " +
+ json.dumps( ONOS2FlowsJson, sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.warn( "ONOS3 flows: " +
+ json.dumps( ONOS3FlowsJson, sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.warn( "ONOS4 flows: " +
+ json.dumps( ONOS4FlowsJson, sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.warn( "ONOS5 flows: " +
+ json.dumps( ONOS5FlowsJson, sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.warn( "ONOS6 flows: " +
+ json.dumps( ONOS6FlowsJson, sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.warn( "ONOS7 flows: " +
+ json.dumps( ONOS7FlowsJson, sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
utilities.assert_equals(
expect=main.TRUE,
actual=flowCheck,
@@ -860,7 +969,9 @@
flows = []
for i in range( 1, 29 ):
flows.append( main.Mininet2.getFlowTable( 1.3, "s" + str( i ) ) )
-
+ if flowCheck == main.FALSE:
+ for table in flows:
+ main.log.warn( table )
# TODO: Compare switch flow tables with ONOS flow tables
main.step( "Start continuous pings" )
@@ -934,13 +1045,13 @@
devices.append( main.ONOScli6.devices() )
devices.append( main.ONOScli7.devices() )
hosts = []
- hosts.append( main.ONOScli1.hosts() )
- hosts.append( main.ONOScli2.hosts() )
- hosts.append( main.ONOScli3.hosts() )
- hosts.append( main.ONOScli4.hosts() )
- hosts.append( main.ONOScli5.hosts() )
- hosts.append( main.ONOScli6.hosts() )
- hosts.append( main.ONOScli7.hosts() )
+ hosts.append( json.loads( main.ONOScli1.hosts() ) )
+ hosts.append( json.loads( main.ONOScli2.hosts() ) )
+ hosts.append( json.loads( main.ONOScli3.hosts() ) )
+ hosts.append( json.loads( main.ONOScli4.hosts() ) )
+ hosts.append( json.loads( main.ONOScli5.hosts() ) )
+ hosts.append( json.loads( main.ONOScli6.hosts() ) )
+ hosts.append( json.loads( main.ONOScli7.hosts() ) )
ports = []
ports.append( main.ONOScli1.ports() )
ports.append( main.ONOScli2.ports() )
@@ -994,6 +1105,21 @@
onpass="Hosts view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of hosts" )
+ ipResult = main.TRUE
+ for controller in range( 0, len( hosts ) ):
+ controllerStr = str( controller + 1 )
+ for host in hosts[ controller ]:
+ if host.get( 'ips', [] ) == []:
+ main.log.error(
+ "DEBUG:Error with host ips on controller" +
+ controllerStr + ": " + str( host ) )
+ ipResult = main.FALSE
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=ipResult,
+ onpass="The ips of the hosts aren't empty",
+ onfail="The ip of at least one host is missing" )
+
# Strongly connected clusters of devices
consistentClustersResult = main.TRUE
for controller in range( len( clusters ) ):
@@ -1020,13 +1146,14 @@
onfail="ONOS nodes have different views of clusters" )
# there should always only be one cluster
numClusters = len( json.loads( clusters[ 0 ] ) )
+ clusterResults = main.FALSE
+ if numClusters == 1:
+ clusterResults = main.TRUE
utilities.assert_equals(
expect=1,
actual=numClusters,
onpass="ONOS shows 1 SCC",
- onfail="ONOS shows " +
- str( numClusters ) +
- " SCCs" )
+ onfail="ONOS shows " + str( numClusters ) + " SCCs" )
main.step( "Comparing ONOS topology to MN" )
devicesResults = main.TRUE
@@ -1042,11 +1169,11 @@
else:
currentDevicesResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentDevicesResult,
- onpass="ONOS" + controllerStr +
- " Switches view is correct",
- onfail="ONOS" + controllerStr +
- " Switches view is incorrect" )
+ actual=currentDevicesResult,
+ onpass="ONOS" + controllerStr +
+ " Switches view is correct",
+ onfail="ONOS" + controllerStr +
+ " Switches view is incorrect" )
if ports[ controller ] or "Error" not in ports[ controller ]:
currentPortsResult = main.Mininet1.comparePorts(
@@ -1056,11 +1183,11 @@
else:
currentPortsResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentPortsResult,
- onpass="ONOS" + controllerStr +
- " ports view is correct",
- onfail="ONOS" + controllerStr +
- " ports view is incorrect" )
+ actual=currentPortsResult,
+ onpass="ONOS" + controllerStr +
+ " ports view is correct",
+ onfail="ONOS" + controllerStr +
+ " ports view is incorrect" )
if links[ controller ] or "Error" not in links[ controller ]:
currentLinksResult = main.Mininet1.compareLinks(
@@ -1070,28 +1197,29 @@
else:
currentLinksResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentLinksResult,
- onpass="ONOS" + controllerStr +
- " links view is correct",
- onfail="ONOS" + controllerStr +
- " links view is incorrect" )
+ actual=currentLinksResult,
+ onpass="ONOS" + controllerStr +
+ " links view is correct",
+ onfail="ONOS" + controllerStr +
+ " links view is incorrect" )
devicesResults = devicesResults and currentDevicesResult
portsResults = portsResults and currentPortsResult
linksResults = linksResults and currentLinksResult
topoResult = devicesResults and portsResults and linksResults\
- and consistentHostsResult and consistentClustersResult
+ and consistentHostsResult and consistentClustersResult\
+ and clusterResults and ipResult
utilities.assert_equals( expect=main.TRUE, actual=topoResult,
- onpass="Topology Check Test successful",
- onfail="Topology Check Test NOT successful" )
+ onpass="Topology Check Test successful",
+ onfail="Topology Check Test NOT successful" )
finalAssert = main.TRUE
finalAssert = finalAssert and topoResult and flowCheck \
- and intentCheck and consistentMastership and rolesNotNull
+ and intentCheck and consistentMastership and rolesNotNull
utilities.assert_equals( expect=main.TRUE, actual=finalAssert,
- onpass="State check successful",
- onfail="State check NOT successful" )
+ onpass="State check successful",
+ onfail="State check NOT successful" )
def CASE6( self, main ):
"""
@@ -1301,15 +1429,15 @@
intentStates = []
for node in intents: # Iter through ONOS nodes
nodeStates = []
- for intent in json.loads( node ): # Iter through intents of a node
+ # Iter through intents of a node
+ for intent in json.loads( node ):
nodeStates.append( intent[ 'state' ] )
intentStates.append( nodeStates )
out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
main.log.info( dict( out ) )
-
- # NOTE: Hazelcast has no durability, so intents are lost across system
- # restarts
+ # NOTE: Store has no durability, so intents are lost across system
+ # restarts
main.step( "Compare current intents with intents before the failure" )
# NOTE: this requires case 5 to pass for intentState to be set.
# maybe we should stop the test if that fails?
@@ -1390,7 +1518,7 @@
# Test of LeadershipElection
# NOTE: this only works for the sanity test. In case of failures,
- # leader will likely change
+ # leader will likely change
leader = ONOS1Ip
leaderResult = main.TRUE
for controller in range( 1, numControllers + 1 ):
@@ -1406,8 +1534,8 @@
elif leaderN == main.FALSE:
# error in response
main.log.report( "Something is wrong with " +
- "electionTestLeader function," +
- " check the error logs" )
+ "electionTestLeader function, check the" +
+ " error logs" )
leaderResult = main.FALSE
elif leader != leaderN:
leaderResult = main.FALSE
@@ -1425,14 +1553,14 @@
onpass="Leadership election passed",
onfail="Something went wrong with Leadership election" )
- result = mastershipCheck and intentCheck and FlowTables and\
- ( not LossInPings ) and rolesNotNull and leaderResult
+ result = ( mastershipCheck and intentCheck and FlowTables and
+ ( not LossInPings ) and rolesNotNull and leaderResult )
result = int( result )
if result == main.TRUE:
main.log.report( "Constant State Tests Passed" )
utilities.assert_equals( expect=main.TRUE, actual=result,
- onpass="Constant State Tests Passed",
- onfail="Constant state tests failed" )
+ onpass="Constant State Tests Passed",
+ onfail="Constant state tests failed" )
def CASE8( self, main ):
"""
@@ -1472,6 +1600,7 @@
devicesResults = main.TRUE
portsResults = main.TRUE
linksResults = main.TRUE
+ hostsResults = main.TRUE
topoResult = main.FALSE
elapsed = 0
count = 0
@@ -1482,9 +1611,7 @@
count = count + 1
if count > 1:
# TODO: Depricate STS usage
- MNTopo = TestONTopology(
- main.Mininet1,
- ctrls )
+ MNTopo = TestONTopology( main.Mininet1, ctrls )
cliStart = time.time()
devices = []
devices.append( main.ONOScli1.devices() )
@@ -1502,13 +1629,15 @@
hosts.append( json.loads( main.ONOScli5.hosts() ) )
hosts.append( json.loads( main.ONOScli6.hosts() ) )
hosts.append( json.loads( main.ONOScli7.hosts() ) )
+ ipResult = main.TRUE
for controller in range( 0, len( hosts ) ):
controllerStr = str( controller + 1 )
for host in hosts[ controller ]:
- if host[ 'ips' ] == []:
+ if host is None or host.get( 'ips', [] ) == []:
main.log.error(
"DEBUG:Error with host ips on controller" +
controllerStr + ": " + str( host ) )
+ ipResult = main.FALSE
ports = []
ports.append( main.ONOScli1.ports() )
ports.append( main.ONOScli2.ports() )
@@ -1544,47 +1673,58 @@
controller ]:
currentDevicesResult = main.Mininet1.compareSwitches(
MNTopo,
- json.loads(
- devices[ controller ] ) )
+ json.loads( devices[ controller ] ) )
else:
currentDevicesResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentDevicesResult,
- onpass="ONOS" + controllerStr +
- " Switches view is correct",
- onfail="ONOS" + controllerStr +
- " Switches view is incorrect" )
+ actual=currentDevicesResult,
+ onpass="ONOS" + controllerStr +
+ " Switches view is correct",
+ onfail="ONOS" + controllerStr +
+ " Switches view is incorrect" )
if ports[ controller ] or "Error" not in ports[ controller ]:
currentPortsResult = main.Mininet1.comparePorts(
MNTopo,
- json.loads(
- ports[ controller ] ) )
+ json.loads( ports[ controller ] ) )
else:
currentPortsResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentPortsResult,
- onpass="ONOS" + controllerStr +
- " ports view is correct",
- onfail="ONOS" + controllerStr +
- " ports view is incorrect" )
+ actual=currentPortsResult,
+ onpass="ONOS" + controllerStr +
+ " ports view is correct",
+ onfail="ONOS" + controllerStr +
+ " ports view is incorrect" )
if links[ controller ] or "Error" not in links[ controller ]:
currentLinksResult = main.Mininet1.compareLinks(
MNTopo,
- json.loads(
- links[ controller ] ) )
+ json.loads( links[ controller ] ) )
else:
currentLinksResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentLinksResult,
- onpass="ONOS" + controllerStr +
- " links view is correct",
- onfail="ONOS" + controllerStr +
- " links view is incorrect" )
- devicesResults = devicesResults and currentDevicesResult
- portsResults = portsResults and currentPortsResult
- linksResults = linksResults and currentLinksResult
+ actual=currentLinksResult,
+ onpass="ONOS" + controllerStr +
+ " links view is correct",
+ onfail="ONOS" + controllerStr +
+ " links view is incorrect" )
+
+ if hosts[ controller ] or "Error" not in hosts[ controller ]:
+ currentHostsResult = main.Mininet1.compareHosts(
+ MNTopo, hosts[ controller ] )
+ else:
+ currentHostsResult = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=currentHostsResult,
+ onpass="ONOS" + controllerStr +
+ " hosts exist in Mininet",
+ onfail="ONOS" + controllerStr +
+ " hosts don't match Mininet" )
+
+ devicesResults = devicesResults and currentDevicesResult
+ portsResults = portsResults and currentPortsResult
+ linksResults = linksResults and currentLinksResult
+ hostsResults = hostsResults and currentHostsResult
# Compare json objects for hosts and dataplane clusters
@@ -1641,17 +1781,19 @@
onfail="ONOS nodes have different views of clusters" )
# there should always only be one cluster
numClusters = len( json.loads( clusters[ 0 ] ) )
+ clusterResults = main.FALSE
+ if numClusters == 1:
+ clusterResults = main.TRUE
utilities.assert_equals(
expect=1,
actual=numClusters,
onpass="ONOS shows 1 SCC",
- onfail="ONOS shows " +
- str( numClusters ) +
- " SCCs" )
+ onfail="ONOS shows " + str( numClusters ) + " SCCs" )
topoResult = ( devicesResults and portsResults and linksResults
- and consistentHostsResult
- and consistentClustersResult )
+ and hostsResults and consistentHostsResult
+ and consistentClustersResult and clusterResults
+ and ipResult )
topoResult = topoResult and int( count <= 2 )
note = "note it takes about " + str( int( cliTime ) ) + \
@@ -1662,8 +1804,8 @@
str( note ) + " ): " + str( elapsed ) + " seconds, " +
str( count ) + " tries" )
utilities.assert_equals( expect=main.TRUE, actual=topoResult,
- onpass="Topology Check Test successful",
- onfail="Topology Check Test NOT successful" )
+ onpass="Topology Check Test successful",
+ onfail="Topology Check Test NOT successful" )
if topoResult == main.TRUE:
main.log.report( "ONOS topology view matches Mininet topology" )
@@ -1677,20 +1819,18 @@
linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
description = "Turn off a link to ensure that Link Discovery " +\
- "is working properly"
+ "is working properly"
main.log.report( description )
main.case( description )
main.step( "Kill Link between s3 and s28" )
LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
- main.log.info(
- "Waiting " +
- str( linkSleep ) +
- " seconds for link down to be discovered" )
+ main.log.info( "Waiting " + str( linkSleep ) +
+ " seconds for link down to be discovered" )
time.sleep( linkSleep )
utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
- onpass="Link down succesful",
- onfail="Failed to bring link down" )
+ onpass="Link down succesful",
+ onfail="Failed to bring link down" )
# TODO do some sort of check here
def CASE10( self, main ):
@@ -1709,14 +1849,12 @@
main.step( "Bring link between s3 and s28 back up" )
LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
- main.log.info(
- "Waiting " +
- str( linkSleep ) +
- " seconds for link up to be discovered" )
+ main.log.info( "Waiting " + str( linkSleep ) +
+ " seconds for link up to be discovered" )
time.sleep( linkSleep )
utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
- onpass="Link up succesful",
- onfail="Failed to bring link up" )
+ onpass="Link up succesful",
+ onfail="Failed to bring link up" )
# TODO do some sort of check here
def CASE11( self, main ):
@@ -1748,8 +1886,8 @@
if device and device[ 'available' ] is False:
result = main.TRUE
utilities.assert_equals( expect=main.TRUE, actual=result,
- onpass="Kill switch succesful",
- onfail="Failed to kill switch?" )
+ onpass="Kill switch succesful",
+ onfail="Failed to kill switch?" )
def CASE12( self, main ):
"""
@@ -1772,27 +1910,24 @@
# TODO: New dpid or same? Ask Thomas?
for peer in links:
main.Mininet1.addLink( switch, peer )
- main.Mininet1.assignSwController(
- sw=switch.split( 's' )[ 1 ],
- count=numControllers,
- ip1=ONOS1Ip,
- port1=ONOS1Port,
- ip2=ONOS2Ip,
- port2=ONOS2Port,
- ip3=ONOS3Ip,
- port3=ONOS3Port,
- ip4=ONOS4Ip,
- port4=ONOS4Port,
- ip5=ONOS5Ip,
- port5=ONOS5Port,
- ip6=ONOS6Ip,
- port6=ONOS6Port,
- ip7=ONOS7Ip,
- port7=ONOS7Port )
- main.log.info(
- "Waiting " +
- str( switchSleep ) +
- " seconds for switch up to be discovered" )
+ main.Mininet1.assignSwController( sw=switch.split( 's' )[ 1 ],
+ count=numControllers,
+ ip1=ONOS1Ip,
+ port1=ONOS1Port,
+ ip2=ONOS2Ip,
+ port2=ONOS2Port,
+ ip3=ONOS3Ip,
+ port3=ONOS3Port,
+ ip4=ONOS4Ip,
+ port4=ONOS4Port,
+ ip5=ONOS5Ip,
+ port5=ONOS5Port,
+ ip6=ONOS6Ip,
+ port6=ONOS6Port,
+ ip7=ONOS7Ip,
+ port7=ONOS7Port )
+ main.log.info( "Waiting " + str( switchSleep ) +
+ " seconds for switch up to be discovered" )
time.sleep( switchSleep )
device = main.ONOScli1.getDevice( dpid=switchDPID )
# Peek at the deleted switch
@@ -1801,8 +1936,8 @@
if device and device[ 'available' ]:
result = main.TRUE
utilities.assert_equals( expect=main.TRUE, actual=result,
- onpass="add switch succesful",
- onfail="Failed to add switch?" )
+ onpass="add switch succesful",
+ onfail="Failed to add switch?" )
def CASE13( self, main ):
"""
@@ -1864,6 +1999,8 @@
dstDir + str( testname ) +
"-ONOS" + str( i + 1 ) + "-" +
f )
+ main.ONOSbench.handle.expect( "\$" )
+
# std*.log's
# NOTE: must end in /
logFolder = "/opt/onos/var/"
@@ -1879,15 +2016,17 @@
dstDir + str( testname ) +
"-ONOS" + str( i + 1 ) + "-" +
f )
+ main.ONOSbench.handle.expect( "\$" )
# sleep so scp can finish
time.sleep( 10 )
+ main.Mininet1.stopNet()
main.step( "Packing and rotating pcap archives" )
os.system( "~/TestON/dependencies/rotate.sh " + str( testname ) )
# TODO: actually check something here
utilities.assert_equals( expect=main.TRUE, actual=main.TRUE,
- onpass="Test cleanup successful",
- onfail="Test cleanup NOT successful" )
+ onpass="Test cleanup successful",
+ onfail="Test cleanup NOT successful" )
def CASE14( self, main ):
"""
@@ -1988,9 +2127,7 @@
elif leader is None or leader == main.FALSE:
main.log.report(
"Leader for the election app should be an ONOS node," +
- "instead got '" +
- str( leader ) +
- "'" )
+ "instead got '" + str( leader ) + "'" )
leaderResult = main.FALSE
withdrawResult = oldLeader.electionTestWithdraw()
utilities.assert_equals(
@@ -2008,10 +2145,8 @@
for leaderN in leaderList:
if leaderN == leader:
main.log.report(
- "ONOS" +
- str( controller ) +
- " still sees " +
- str( leader ) +
+ "ONOS" + str( controller ) +
+ " still sees " + str( leader ) +
" as leader after they withdrew" )
leaderResult = main.FALSE
elif leaderN == main.FALSE:
@@ -2045,8 +2180,8 @@
onpass="Leadership election passed",
onfail="Something went wrong with Leadership election" )
- main.step(
- "Run for election on old leader( just so everyone is in the hat )" )
+ main.step( "Run for election on old leader( just so everyone " +
+ "is in the hat )" )
runResult = oldLeader.electionTestRun()
utilities.assert_equals(
expect=main.TRUE,
diff --git a/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.params b/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.params
index 80de267..d3eb8dd 100644
--- a/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.params
+++ b/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.params
@@ -1,4 +1,21 @@
<PARAMS>
+ #CASE1: Compile ONOS and push it to the test machines
+ #CASE2: Assign mastership to controllers
+ #CASE3: Assign intents
+ #CASE4: Ping across added host intents
+ #CASE5: Reading state of ONOS
+ #CASE6: The Failure case. Since this is the Sanity test, we do nothing.
+ #CASE7: Check state after control plane failure
+ #CASE8: Compare topo
+ #CASE9: Link s3-s28 down
+ #CASE10: Link s3-s28 up
+ #CASE11: Switch down
+ #CASE12: Switch up
+ #CASE13: Clean up
+ #CASE14: start election app on all onos nodes
+ #CASE15: Check that Leadership Election is still functional
+ #1,2,8,3,4,5,14,[6],8,3,7,4,15,9,8,4,10,8,4,11,8,4,12,8,4,13
+ #extra hosts test 1,2,8,11,8,12,8
<testcases>1,2,8,3,4,5,14,[6],8,3,7,4,15,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
<ENV>
<cellName>HA</cellName>
diff --git a/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.py b/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.py
index 44ce741..59a37e4 100644
--- a/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.py
+++ b/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.py
@@ -31,14 +31,18 @@
CASE1 is to compile ONOS and push it to the test machines
Startup sequence:
- git pull
- mvn clean install
- onos-package
cell <name>
onos-verify-cell
NOTE: temporary - onos-remove-raft-logs
+ onos-uninstall
+ start mininet
+ git pull
+ mvn clean install
+ onos-package
onos-install -f
onos-wait-for-start
+ start cli sessions
+ start tcpdump
"""
main.log.report( "ONOS Single node cluster restart " +
"HA test - initialization" )
@@ -109,8 +113,7 @@
main.step( "Compiling the latest version of ONOS" )
if PULLCODE:
- # TODO Configure branch in params
- main.step( "Git checkout and pull master" )
+ main.step( "Git checkout and pull " + gitBranch )
main.ONOSbench.gitCheckout( gitBranch )
gitPullResult = main.ONOSbench.gitPull()
@@ -131,7 +134,6 @@
node=ONOS1Ip )
main.step( "Checking if ONOS is up yet" )
- # TODO check bundle:list?
for i in range( 2 ):
onos1Isup = main.ONOSbench.isup( ONOS1Ip )
if onos1Isup:
@@ -153,8 +155,8 @@
and onos1Isup and cliResult )
utilities.assert_equals( expect=main.TRUE, actual=case1Result,
- onpass="Test startup successful",
- onfail="Test startup NOT successful" )
+ onpass="Test startup successful",
+ onfail="Test startup NOT successful" )
if case1Result == main.FALSE:
main.cleanup()
@@ -201,6 +203,7 @@
# FIXME: we must reinstall intents until we have a persistant
# datastore!
import time
+ import json
main.log.report( "Adding host intents" )
main.case( "Adding host Intents" )
@@ -230,8 +233,11 @@
time.sleep( 10 )
main.step( "Add host intents" )
+ intentIds = []
# TODO: move the host numbers to params
+ # Maybe look at all the paths we ping?
intentAddResult = True
+ hostResult = main.TRUE
for i in range( 8, 18 ):
main.log.info( "Adding host intent between h" + str( i ) +
" and h" + str( i + 10 ) )
@@ -248,45 +254,95 @@
host1Id = host1Dict.get( 'id', None )
host2Id = host2Dict.get( 'id', None )
if host1Id and host2Id:
- tmpResult = main.ONOScli1.addHostIntent(
+ tmpId = main.ONOScli1.addHostIntent(
host1Id,
host2Id )
+ main.log.info( "Added intent with id: " + tmpId )
+ intentIds.append( tmpId )
else:
main.log.error( "Error, getHost() failed" )
main.log.warn( json.dumps( json.loads( main.ONOScli1.hosts() ),
sort_keys=True,
indent=4,
separators=( ',', ': ' ) ) )
- tmpResult = main.FALSE
- intentAddResult = bool( pingResult and intentAddResult
- and tmpResult )
- # TODO Check that intents were added?
+ hostResult = main.FALSE
+ onosIds = main.ONOScli1.getAllIntentsId()
+ main.log.info( "Submitted intents: " + str( intentIds ) )
+ main.log.info( "Intents in ONOS: " + str( onosIds ) )
+ for intent in intentIds:
+ if intent in onosIds:
+ pass # intent submitted is still in onos
+ else:
+ intentAddResult = False
# Print the intent states
- intents = main.ONOScli1.intents( )
+ intents = main.ONOScli1.intents()
intentStates = []
+ main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+ count = 0
for intent in json.loads( intents ): # Iter through intents of a node
- intentStates.append( intent.get( 'state', None ) )
- out = [ (i, intentStates.count( i ) ) for i in set( intentStates ) ]
- main.log.info( dict( out ) )
-
+ state = intent.get( 'state', None )
+ intentId = intent.get( 'id', None )
+ intentStates.append( ( intentId, state ) )
+ # add submitted intents not in the store
+ tmplist = [ i for i, s in intentStates ]
+ missingIntents = False
+ for i in intentIds:
+ if i not in tmplist:
+ intentStates.append( ( i, " - " ) )
+ missingIntents = True
+ intentStates.sort()
+ for i, s in intentStates:
+ count += 1
+ main.log.info( "%-6s%-15s%-15s" %
+ ( str( count ), str( i ), str( s ) ) )
+ intentAddResult = bool( pingResult and hostResult and intentAddResult
+ and not missingIntents)
utilities.assert_equals(
expect=True,
actual=intentAddResult,
onpass="Pushed host intents to ONOS",
onfail="Error in pushing host intents to ONOS" )
- # TODO Check if intents all exist in datastore
+
+ if not intentAddResult:
+ import time
+ main.log.info( "Sleeping 60 seconds to see if intents are found" )
+ time.sleep( 60 )
+ onosIds = main.ONOScli1.getAllIntentsId()
+ main.log.info( "Submitted intents: " + str( intentIds ) )
+ main.log.info( "Intents in ONOS: " + str( onosIds ) )
+ # Print the intent states
+ intents = main.ONOScli1.intents()
+ intentStates = []
+ main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+ count = 0
+ for intent in json.loads( intents ):
+ # Iter through intents of a node
+ state = intent.get( 'state', None )
+ intentId = intent.get( 'id', None )
+ intentStates.append( ( intentId, state ) )
+ # add submitted intents not in the store
+ tmplist = [ i for i, s in intentStates ]
+ for i in intentIds:
+ if i not in tmplist:
+ intentStates.append( ( i, " - " ) )
+ intentStates.sort()
+ for i, s in intentStates:
+ count += 1
+ main.log.info( "%-6s%-15s%-15s" %
+ ( str( count ), str( i ), str( s ) ) )
def CASE4( self, main ):
"""
Ping across added host intents
"""
+ import json
description = " Ping across added host intents"
main.log.report( description )
main.case( description )
PingResult = main.TRUE
for i in range( 8, 18 ):
- ping = main.Mininet1.pingHost(
- src="h" + str( i ), target="h" + str( i + 10 ) )
+ ping = main.Mininet1.pingHost( src="h" + str( i ),
+ target="h" + str( i + 10 ) )
PingResult = PingResult and ping
if ping == main.FALSE:
main.log.warn( "Ping failed between h" + str( i ) +
@@ -297,7 +353,7 @@
if PingResult == main.FALSE:
main.log.report(
"Intents have not been installed correctly, pings failed." )
- #TODO: pretty print
+ # TODO: pretty print
main.log.warn( "ONSO1 intents: " )
main.log.warn( json.dumps( json.loads( main.ONOScli1.intents() ),
sort_keys=True,
@@ -311,6 +367,22 @@
actual=PingResult,
onpass="Intents have been installed correctly and pings work",
onfail="Intents have not been installed correctly, pings failed." )
+ if PingResult is not main.TRUE:
+ # Print the intent states
+ intents = main.ONOScli1.intents()
+ intentStates = []
+ main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+ count = 0
+ # Iter through intents of a node
+ for intent in json.loads( intents ):
+ state = intent.get( 'state', None )
+ intentId = intent.get( 'id', None )
+ intentStates.append( ( intentId, state ) )
+ intentStates.sort()
+ for i, s in intentStates:
+ count += 1
+ main.log.info( "%-6s%-15s%-15s" %
+ ( str( count ), str( i ), str( s ) ) )
def CASE5( self, main ):
"""
@@ -364,10 +436,10 @@
main.step( "Get the flows from each controller" )
global flowState
flowState = []
- ONOS1Flows = main.ONOScli1.flows( jsonFormat=True )
flowCheck = main.FALSE
+ ONOS1Flows = main.ONOScli1.flows( jsonFormat=True )
if "Error" in ONOS1Flows or not ONOS1Flows:
- main.log.report( "Error in getting ONOS intents" )
+ main.log.report( "Error in getting ONOS flows" )
main.log.warn( "ONOS1 flows repsponse: " + ONOS1Flows )
else:
# TODO: Do a better check, maybe compare flows on switches?
@@ -379,7 +451,9 @@
flows = []
for i in range( 1, 29 ):
flows.append( main.Mininet2.getFlowTable( 1.3, "s" + str( i ) ) )
-
+ if flowCheck == main.FALSE:
+ for table in flows:
+ main.log.warn( table )
# TODO: Compare switch flow tables with ONOS flow tables
main.step( "Create TestONTopology object" )
@@ -399,78 +473,110 @@
main.step( "Collecting topology information from ONOS" )
devices = []
devices.append( main.ONOScli1.devices() )
- """
hosts = []
- hosts.append( main.ONOScli1.hosts() )
- """
+ hosts.append( json.loads( main.ONOScli1.hosts() ) )
ports = []
ports.append( main.ONOScli1.ports() )
links = []
links.append( main.ONOScli1.links() )
+ clusters = []
+ clusters.append( main.ONOScli1.clusters() )
+ ipResult = main.TRUE
+ for controller in range( 0, len( hosts ) ):
+ controllerStr = str( controller + 1 )
+ for host in hosts[ controller ]:
+ if host is None or host.get( 'ips', [] ) == []:
+ main.log.error(
+ "DEBUG:Error with host ips on controller" +
+ controllerStr + ": " + str( host ) )
+ ipResult = main.FALSE
+
+ # there should always only be one cluster
+ numClusters = len( json.loads( clusters[ 0 ] ) )
+ clusterResults = main.FALSE
+ if numClusters == 1:
+ clusterResults = main.TRUE
+ utilities.assert_equals(
+ expect=1,
+ actual=numClusters,
+ onpass="ONOS shows 1 SCC",
+ onfail="ONOS shows " + str( numClusters ) + " SCCs" )
main.step( "Comparing ONOS topology to MN" )
devicesResults = main.TRUE
portsResults = main.TRUE
linksResults = main.TRUE
+ hostsResults = main.TRUE
for controller in range( numControllers ):
controllerStr = str( controller + 1 )
if devices[ controller ] or "Error" not in devices[ controller ]:
currentDevicesResult = main.Mininet1.compareSwitches(
MNTopo,
- json.loads(
- devices[ controller ] ) )
+ json.loads( devices[ controller ] ) )
else:
currentDevicesResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentDevicesResult,
- onpass="ONOS" + controllerStr +
- " Switches view is correct",
- onfail="ONOS" + controllerStr +
- " Switches view is incorrect" )
+ actual=currentDevicesResult,
+ onpass="ONOS" + controllerStr +
+ " Switches view is correct",
+ onfail="ONOS" + controllerStr +
+ " Switches view is incorrect" )
if ports[ controller ] or "Error" not in ports[ controller ]:
currentPortsResult = main.Mininet1.comparePorts(
MNTopo,
- json.loads(
- ports[ controller ] ) )
+ json.loads( ports[ controller ] ) )
else:
currentPortsResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentPortsResult,
- onpass="ONOS" + controllerStr +
- " ports view is correct",
- onfail="ONOS" + controllerStr +
- " ports view is incorrect" )
+ actual=currentPortsResult,
+ onpass="ONOS" + controllerStr +
+ " ports view is correct",
+ onfail="ONOS" + controllerStr +
+ " ports view is incorrect" )
if links[ controller ] or "Error" not in links[ controller ]:
currentLinksResult = main.Mininet1.compareLinks(
MNTopo,
- json.loads(
- links[ controller ] ) )
+ json.loads( links[ controller ] ) )
else:
currentLinksResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentLinksResult,
- onpass="ONOS" + controllerStr +
- " links view is correct",
- onfail="ONOS" + controllerStr +
- " links view is incorrect" )
+ actual=currentLinksResult,
+ onpass="ONOS" + controllerStr +
+ " links view is correct",
+ onfail="ONOS" + controllerStr +
+ " links view is incorrect" )
+
+ if hosts[ controller ] or "Error" not in hosts[ controller ]:
+ currentHostsResult = main.Mininet1.compareHosts(
+ MNTopo, hosts[ controller ] )
+ else:
+ currentHostsResult = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=currentHostsResult,
+ onpass="ONOS" + controllerStr +
+ " hosts exist in Mininet",
+ onfail="ONOS" + controllerStr +
+ " hosts don't match Mininet" )
devicesResults = devicesResults and currentDevicesResult
portsResults = portsResults and currentPortsResult
linksResults = linksResults and currentLinksResult
+ hostsResults = hostsResults and currentHostsResult
- topoResult = devicesResults and portsResults and linksResults
+ topoResult = devicesResults and portsResults and linksResults\
+ and clusterResults and ipResult and hostsResults
utilities.assert_equals( expect=main.TRUE, actual=topoResult,
- onpass="Topology Check Test successful",
- onfail="Topology Check Test NOT successful" )
+ onpass="Topology Check Test successful",
+ onfail="Topology Check Test NOT successful" )
finalAssert = main.TRUE
finalAssert = finalAssert and topoResult and flowCheck \
- and intentCheck and consistentMastership and rolesNotNull
+ and intentCheck and consistentMastership and rolesNotNull
utilities.assert_equals( expect=main.TRUE, actual=finalAssert,
- onpass="State check successful",
- onfail="State check NOT successful" )
+ onpass="State check successful",
+ onfail="State check NOT successful" )
def CASE6( self, main ):
"""
@@ -497,11 +603,10 @@
caseResults = main.TRUE and onos1Isup and cliResult
utilities.assert_equals( expect=main.TRUE, actual=caseResults,
- onpass="ONOS restart successful",
- onfail="ONOS restart NOT successful" )
- main.log.info(
- "ESTIMATE: ONOS took %s seconds to restart" %
- str( elapsed ) )
+ onpass="ONOS restart successful",
+ onfail="ONOS restart NOT successful" )
+ main.log.info( "ESTIMATE: ONOS took %s seconds to restart" %
+ str( elapsed ) )
time.sleep( 5 )
def CASE7( self, main ):
@@ -524,9 +629,8 @@
# FIXME: Refactor this whole case for single instance
if "Error" in ONOS1Mastership or not ONOS1Mastership:
main.log.report( "Error in getting ONOS mastership" )
- main.log.warn(
- "ONOS1 mastership response: " +
- repr( ONOS1Mastership ) )
+ main.log.warn( "ONOS1 mastership response: " +
+ repr( ONOS1Mastership ) )
consistentMastership = main.FALSE
else:
consistentMastership = main.TRUE
@@ -546,9 +650,7 @@
mastershipCheck = main.TRUE
for i in range( 1, 29 ):
switchDPID = str(
- main.Mininet1.getSwitchDPID(
- switch="s" +
- str( i ) ) )
+ main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
current = [ switch[ 'master' ] for switch in currentJson
if switchDPID in switch[ 'id' ] ]
@@ -588,15 +690,15 @@
intentStates = []
for node in intents: # Iter through ONOS nodes
nodeStates = []
- for intent in json.loads( node ): # Iter through intents of a node
+ # Iter through intents of a node
+ for intent in json.loads( node ):
nodeStates.append( intent[ 'state' ] )
intentStates.append( nodeStates )
out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
main.log.info( dict( out ) )
-
- # NOTE: Hazelcast has no durability, so intents are lost across system
- # restarts
+ # NOTE: Store has no durability, so intents are lost across system
+ # restarts
"""
main.step( "Compare current intents with intents before the failure" )
# NOTE: this requires case 5 to pass for intentState to be set.
@@ -668,10 +770,10 @@
leaderResult = main.FALSE
elif leader != leaderN:
leaderResult = main.FALSE
- main.log.report( "ONOS" + str( controller ) +
- " sees " + str( leaderN ) +
- " as the leader of the election app." +
- " Leader should be " + str( leader ) )
+ main.log.report( "ONOS" + str( controller ) + " sees " +
+ str( leaderN ) +
+ " as the leader of the election app. " +
+ "Leader should be " + str( leader ) )
if leaderResult:
main.log.report( "Leadership election tests passed( consistent " +
"view of leader across listeners and a new " +
@@ -688,8 +790,8 @@
if result == main.TRUE:
main.log.report( "Constant State Tests Passed" )
utilities.assert_equals( expect=main.TRUE, actual=result,
- onpass="Constant State Tests Passed",
- onfail="Constant state tests failed" )
+ onpass="Constant State Tests Passed",
+ onfail="Constant state tests failed" )
def CASE8( self, main ):
"""
@@ -724,28 +826,39 @@
devicesResults = main.TRUE
portsResults = main.TRUE
linksResults = main.TRUE
+ hostsResults = main.TRUE
topoResult = main.FALSE
elapsed = 0
count = 0
main.step( "Collecting topology information from ONOS" )
startTime = time.time()
+ # Give time for Gossip to work
while topoResult == main.FALSE and elapsed < 60:
count = count + 1
if count > 1:
- MNTopo = TestONTopology(
- main.Mininet1,
- ctrls )
+ # TODO: Depricate STS usage
+ MNTopo = TestONTopology( main.Mininet1, ctrls )
cliStart = time.time()
devices = []
devices.append( main.ONOScli1.devices() )
- """
hosts = []
- hosts.append( main.ONOScli1.hosts() )
- """
+ hosts.append( json.loads( main.ONOScli1.hosts() ) )
+ ipResult = main.TRUE
+ for controller in range( 0, len( hosts ) ):
+ controllerStr = str( controller + 1 )
+ for host in hosts[ controller ]:
+ if host is None or host.get( 'ips', [] ) == []:
+ main.log.error(
+ "DEBUG:Error with host ips on controller" +
+ controllerStr + ": " + str( host ) )
+ ipResult = main.FALSE
ports = []
ports.append( main.ONOScli1.ports() )
links = []
links.append( main.ONOScli1.links() )
+ clusters = []
+ clusters.append( main.ONOScli1.clusters() )
+
elapsed = time.time() - startTime
cliTime = time.time() - cliStart
print "CLI time: " + str( cliTime )
@@ -756,48 +869,72 @@
controller ]:
currentDevicesResult = main.Mininet1.compareSwitches(
MNTopo,
- json.loads(
- devices[ controller ] ) )
+ json.loads( devices[ controller ] ) )
else:
currentDevicesResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentDevicesResult,
- onpass="ONOS" + controllerStr +
- " Switches view is correct",
- onfail="ONOS" + controllerStr +
- " Switches view is incorrect" )
+ actual=currentDevicesResult,
+ onpass="ONOS" + controllerStr +
+ " Switches view is correct",
+ onfail="ONOS" + controllerStr +
+ " Switches view is incorrect" )
if ports[ controller ] or "Error" not in ports[ controller ]:
currentPortsResult = main.Mininet1.comparePorts(
MNTopo,
- json.loads(
- ports[ controller ] ) )
+ json.loads( ports[ controller ] ) )
else:
currentPortsResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentPortsResult,
- onpass="ONOS" + controllerStr +
- " ports view is correct",
- onfail="ONOS" + controllerStr +
- " ports view is incorrect" )
+ actual=currentPortsResult,
+ onpass="ONOS" + controllerStr +
+ " ports view is correct",
+ onfail="ONOS" + controllerStr +
+ " ports view is incorrect" )
if links[ controller ] or "Error" not in links[ controller ]:
currentLinksResult = main.Mininet1.compareLinks(
MNTopo,
- json.loads(
- links[ controller ] ) )
+ json.loads( links[ controller ] ) )
else:
currentLinksResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
- actual=currentLinksResult,
- onpass="ONOS" + controllerStr +
- " links view is correct",
- onfail="ONOS" + controllerStr +
- " links view is incorrect" )
- devicesResults = devicesResults and currentDevicesResult
- portsResults = portsResults and currentPortsResult
- linksResults = linksResults and currentLinksResult
- topoResult = devicesResults and portsResults and linksResults
+ actual=currentLinksResult,
+ onpass="ONOS" + controllerStr +
+ " links view is correct",
+ onfail="ONOS" + controllerStr +
+ " links view is incorrect" )
+
+ if hosts[ controller ] or "Error" not in hosts[ controller ]:
+ currentHostsResult = main.Mininet1.compareHosts(
+ MNTopo, hosts[ controller ] )
+ else:
+ currentHostsResult = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=currentHostsResult,
+ onpass="ONOS" + controllerStr +
+ " hosts exist in Mininet",
+ onfail="ONOS" + controllerStr +
+ " hosts don't match Mininet" )
+
+ devicesResults = devicesResults and currentDevicesResult
+ portsResults = portsResults and currentPortsResult
+ linksResults = linksResults and currentLinksResult
+ hostsResults = hostsResults and currentHostsResult
+
+ # there should always only be one cluster
+ numClusters = len( json.loads( clusters[ 0 ] ) )
+ clusterResults = main.FALSE
+ if numClusters == 1:
+ clusterResults = main.TRUE
+ utilities.assert_equals(
+ expect=1,
+ actual=numClusters,
+ onpass="ONOS shows 1 SCC",
+ onfail="ONOS shows " + str( numClusters ) + " SCCs" )
+
+ topoResult = ( devicesResults and portsResults and linksResults
+ and hostsResults and ipResult and clusterResults )
topoResult = topoResult and int( count <= 2 )
note = "note it takes about " + str( int( cliTime ) ) + \
@@ -807,11 +944,15 @@
"Very crass estimate for topology discovery/convergence( " +
str( note ) + " ): " + str( elapsed ) + " seconds, " +
str( count ) + " tries" )
- if elapsed > 60:
- main.log.report( "Giving up on topology convergence" )
utilities.assert_equals( expect=main.TRUE, actual=topoResult,
- onpass="Topology Check Test successful",
- onfail="Topology Check Test NOT successful" )
+ onpass="Topology Check Test successful",
+ onfail="Topology Check Test NOT successful" )
+ # this is temporary
+ # main.Mininet1.handle.sendline( "py [(s.intfs[i], s.intfs[i].mac) for s in net.switches for i in s.intfs]" )
+ # main.Mininet1.handle.expect( "mininet>" )
+ # main.log.error( main.Mininet1.handle.before )
+ # main.log.error( main.ONOScli1.hosts() )
+
if topoResult == main.TRUE:
main.log.report( "ONOS topology view matches Mininet topology" )
@@ -825,20 +966,18 @@
linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
description = "Turn off a link to ensure that Link Discovery " +\
- "is working properly"
+ "is working properly"
main.log.report( description )
main.case( description )
main.step( "Kill Link between s3 and s28" )
LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
- main.log.info(
- "Waiting " +
- str( linkSleep ) +
- " seconds for link down to be discovered" )
+ main.log.info( "Waiting " + str( linkSleep ) +
+ " seconds for link down to be discovered" )
time.sleep( linkSleep )
utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
- onpass="Link down succesful",
- onfail="Failed to bring link down" )
+ onpass="Link down succesful",
+ onfail="Failed to bring link down" )
# TODO do some sort of check here
def CASE10( self, main ):
@@ -851,20 +990,18 @@
linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
description = "Restore a link to ensure that Link Discovery is " + \
- "working properly"
+ "working properly"
main.log.report( description )
main.case( description )
main.step( "Bring link between s3 and s28 back up" )
LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
- main.log.info(
- "Waiting " +
- str( linkSleep ) +
- " seconds for link up to be discovered" )
+ main.log.info( "Waiting " + str( linkSleep ) +
+ " seconds for link up to be discovered" )
time.sleep( linkSleep )
utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
- onpass="Link up succesful",
- onfail="Failed to bring link up" )
+ onpass="Link up succesful",
+ onfail="Failed to bring link up" )
# TODO do some sort of check here
def CASE11( self, main ):
@@ -896,8 +1033,8 @@
if device and device[ 'available' ] is False:
result = main.TRUE
utilities.assert_equals( expect=main.TRUE, actual=result,
- onpass="Kill switch succesful",
- onfail="Failed to kill switch?" )
+ onpass="Kill switch succesful",
+ onfail="Failed to kill switch?" )
def CASE12( self, main ):
"""
@@ -921,11 +1058,10 @@
for peer in links:
main.Mininet1.addLink( switch, peer )
main.Mininet1.assignSwController( sw=switch.split( 's' )[ 1 ],
- ip1=ONOS1Ip, port1=ONOS1Port )
- main.log.info(
- "Waiting " +
- str( switchSleep ) +
- " seconds for switch up to be discovered" )
+ ip1=ONOS1Ip,
+ port1=ONOS1Port )
+ main.log.info( "Waiting " + str( switchSleep ) +
+ " seconds for switch up to be discovered" )
time.sleep( switchSleep )
device = main.ONOScli1.getDevice( dpid=switchDPID )
# Peek at the deleted switch
@@ -934,8 +1070,8 @@
if device and device[ 'available' ]:
result = main.TRUE
utilities.assert_equals( expect=main.TRUE, actual=result,
- onpass="add switch succesful",
- onfail="Failed to add switch?" )
+ onpass="add switch succesful",
+ onfail="Failed to add switch?" )
def CASE13( self, main ):
"""
@@ -983,7 +1119,6 @@
teststationIP + ":" + dstDir +
str( testname ) + "-ONOS1-" + f )
main.ONOSbench.handle.expect( "\$" )
- print main.ONOSbench.handle.before
# std*.log's
# NOTE: must end in /
@@ -997,15 +1132,17 @@
teststationUser + "@" +
teststationIP + ":" + dstDir +
str( testname ) + "-ONOS1-" + f )
+ main.ONOSbench.handle.expect( "\$" )
# sleep so scp can finish
time.sleep( 10 )
+ main.Mininet1.stopNet()
main.step( "Packing and rotating pcap archives" )
os.system( "~/TestON/dependencies/rotate.sh " + str( testname ) )
# TODO: actually check something here
utilities.assert_equals( expect=main.TRUE, actual=main.TRUE,
- onpass="Test cleanup successful",
- onfail="Test cleanup NOT successful" )
+ onpass="Test cleanup successful",
+ onfail="Test cleanup NOT successful" )
def CASE14( self, main ):
"""
@@ -1094,9 +1231,7 @@
elif leader is None or leader == main.FALSE:
main.log.report(
"Leader for the election app should be an ONOS node," +
- "instead got '" +
- str( leader ) +
- "'" )
+ "instead got '" + str( leader ) + "'" )
leaderResult = main.FALSE
withdrawResult = oldLeader.electionTestWithdraw()
utilities.assert_equals(
@@ -1131,8 +1266,8 @@
onpass="Leadership election passed",
onfail="Something went wrong with Leadership election" )
- main.step(
- "Run for election on old leader( just so everyone is in the hat )" )
+ main.step( "Run for election on old leader( just so everyone " +
+ "is in the hat )" )
runResult = oldLeader.electionTestRun()
utilities.assert_equals(
expect=main.TRUE,