Initial implementation of ONOS cluster driver
- Create CLI, REST, and "Bench" components for a cluster
- Return driver object when it is created
- Add __str__ and __repr__ implementations for drivers
- Add first pass at a cluster class
- Prototype with clustered Sample test
- Prototype with HAsanity test
- Add new Exception class for SkipCase
Change-Id: I32ee7cf655ab9a2a5cfccf5f891ca71a6a70c1ee
diff --git a/TestON/core/teston.py b/TestON/core/teston.py
index 74a4eac..3030f3f 100644
--- a/TestON/core/teston.py
+++ b/TestON/core/teston.py
@@ -55,6 +55,9 @@
from core.utilities import Utilities
from core.Thread import Thread
+class SkipCase( Exception ):
+ pass
+
class TestON:
'''
TestON will initiate the specified test.
@@ -197,6 +200,7 @@
vars( self )[component] = driverObject
self.initiated = True
+ return driverObject
def run( self ):
'''
@@ -328,7 +332,7 @@
exec code[testCaseNumber][step] in module.__dict__
self.stepCount = self.stepCount + 1
self.parseStepResults( testCaseNumber )
- except StopIteration: # Raised in self.skipCase()
+ except SkipCase: # Raised in self.skipCase()
self.log.warn( "Skipping the rest of CASE" +
str( testCaseNumber ) )
self.parseStepResults( testCaseNumber )
@@ -426,7 +430,7 @@
self.onFailMsg = "Skipping the rest of this case. "
if msg:
self.onFailMsg += str( msg )
- raise StopIteration
+ raise SkipCase
def addCaseHeader( self ):
caseHeader = "\n" + "*" * 30 + "\n Result summary for Testcase" +\
diff --git a/TestON/drivers/common/cli/emulator/remotemininetdriver.py b/TestON/drivers/common/cli/emulator/remotemininetdriver.py
index 6a8e8f8..7566a85 100644
--- a/TestON/drivers/common/cli/emulator/remotemininetdriver.py
+++ b/TestON/drivers/common/cli/emulator/remotemininetdriver.py
@@ -470,7 +470,7 @@
main.cleanup()
main.exit()
- def runOpticalMnScript( self,name = 'onos', ctrllerIP = None ):
+ def runOpticalMnScript( self, name='onos', ctrllerIP=None ):
import time
import types
"""
diff --git a/TestON/drivers/common/cli/onosclusterdriver.py b/TestON/drivers/common/cli/onosclusterdriver.py
new file mode 100755
index 0000000..0a99d05
--- /dev/null
+++ b/TestON/drivers/common/cli/onosclusterdriver.py
@@ -0,0 +1,394 @@
+#!/usr/bin/env python
+"""
+Copyright 2017 Open Networking Foundation (ONF)
+
+Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+
+ TestON is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 2 of the License, or
+ (at your option) any later version.
+
+ TestON is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TestON. If not, see <http://www.gnu.org/licenses/>.
+
+
+This driver is used to interact with an ONOS cluster. It should
+handle creating the necessary components to interact with each specific ONOS nodes.
+
+Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+
+"""
+import pexpect
+import os
+from drivers.common.clidriver import CLI
+
+# FIXME: Move this to it's own file?
+class Controller():
+ def __str__( self ):
+ return self.name
+ def __repr__( self ):
+ #TODO use repr() for components?
+ return "%s<IP=%s, CLI=%s, REST=%s, Bench=%s >" % ( self.name,
+ self.ipAddress,
+ self.CLI,
+ self.REST,
+ self.Bench )
+
+ def __getattr__( self, name ):
+ """
+ Called when an attribute lookup has not found the attribute
+ in the usual places (i.e. it is not an instance attribute nor
+ is it found in the class tree for self). name is the attribute
+ name. This method should return the (computed) attribute value
+ or raise an AttributeError exception.
+
+ We will look into each of the node's component handles to try to find the attreibute, looking at REST first
+ """
+ if hasattr( self.REST, name ):
+ main.log.debug( "Using Rest driver's attribute for '%s'" % ( name ) )
+ return getattr( self.REST, name)
+ if hasattr( self.CLI, name ):
+ main.log.debug( "Using CLI driver's attribute for '%s'" % ( name ) )
+ return getattr( self.CLI, name)
+ if hasattr( self.Bench, name ):
+ main.log.debug( "Using Bench driver's attribute for '%s'" % ( name ) )
+ return getattr( self.Bench, name)
+ raise AttributeError( "Could not find the attribute %s in %s or it's component handles" % ( name, self ) )
+
+
+
+ def __init__( self, name, ipAddress, CLI=None, REST=None, Bench=None ):
+ #TODO: validate these arguments
+ self.name = str( name )
+ self.ipAddress = ipAddress
+ self.CLI = CLI
+ self.REST = REST
+ self.Bench = Bench
+ self.active = False
+
+
+class OnosClusterDriver( CLI ):
+
+ def __init__( self ):
+ """
+ Initialize client
+ """
+ self.name = None
+ self.home = None
+ self.handle = None
+ self.nodes = []
+ super( OnosClusterDriver, self ).__init__()
+
+ def checkOptions( self, var, defaultVar ):
+ if var is None or var == "":
+ return defaultVar
+ return var
+
+ def connect( self, **connectargs ):
+ """
+ Creates ssh handle for ONOS "bench".
+ NOTE:
+ The ip_address would come from the topo file using the host tag, the
+ value can be an environment variable as well as a "localhost" to get
+ the ip address needed to ssh to the "bench"
+ """
+ try:
+ for key in connectargs:
+ vars( self )[ key ] = connectargs[ key ]
+ self.home = "~/onos"
+ for key in self.options:
+ if key == "home":
+ self.home = self.options[ 'home' ]
+ elif key == "karaf_username":
+ self.karafUser = self.options[ key ]
+ elif key == "karaf_password":
+ self.karafPass = self.options[ key ]
+ elif key == "cluster_name":
+ prefix = self.options[ key ]
+
+ self.home = self.checkOptions(self.home, "~/onos")
+ self.karafUser = self.checkOptions(self.karafUser, self.user_name)
+ self.karafPass = self.checkOptions(self.karafPass, self.pwd )
+ prefix = self.checkOptions( prefix, "ONOS" )
+
+ self.name = self.options[ 'name' ]
+
+ # The 'nodes' tag is optional and it is not required in .topo file
+ for key in self.options:
+ if key == "nodes":
+ # Maximum number of ONOS nodes to run, if there is any
+ self.maxNodes = int( self.options[ 'nodes' ] )
+ break
+ self.maxNodes = None
+
+ if self.maxNodes is None or self.maxNodes == "":
+ self.maxNodes = 100
+
+ # Grabs all OC environment variables based on max number of nodes
+ # TODO: Also support giving an ip range as a compononet option
+ self.onosIps = {} # Dictionary of all possible ONOS ip
+
+ try:
+ if self.maxNodes:
+ for i in range( self.maxNodes ):
+ envString = "OC" + str( i + 1 )
+ # If there is no more OC# then break the loop
+ if os.getenv( envString ):
+ self.onosIps[ envString ] = os.getenv( envString )
+ else:
+ self.maxNodes = len( self.onosIps )
+ main.log.info( self.name +
+ ": Created cluster data with " +
+ str( self.maxNodes ) +
+ " maximum number" +
+ " of nodes" )
+ break
+
+ if not self.onosIps:
+ main.log.info( "Could not read any environment variable"
+ + " please load a cell file with all" +
+ " onos IP" )
+ self.maxNodes = None
+ else:
+ main.log.info( self.name + ": Found " +
+ str( self.onosIps.values() ) +
+ " ONOS IPs" )
+ except KeyError:
+ main.log.info( "Invalid environment variable" )
+ except Exception as inst:
+ main.log.error( "Uncaught exception: " + str( inst ) )
+
+ try:
+ if os.getenv( str( self.ip_address ) ) is not None:
+ self.ip_address = os.getenv( str( self.ip_address ) )
+ else:
+ main.log.info( self.name +
+ ": Trying to connect to " +
+ self.ip_address )
+ except KeyError:
+ main.log.info( "Invalid host name," +
+ " connecting to local host instead" )
+ self.ip_address = 'localhost'
+ except Exception as inst:
+ main.log.error( "Uncaught exception: " + str( inst ) )
+
+ self.handle = super( OnosClusterDriver, self ).connect(
+ user_name=self.user_name,
+ ip_address=self.ip_address,
+ port=self.port,
+ pwd=self.pwd,
+ home=self.home )
+
+ if self.handle:
+ self.handle.sendline( "cd " + self.home )
+ self.handle.expect( "\$" )
+ self.createComponents( prefix=prefix )
+ return self.handle
+ else:
+ main.log.info( "Failed to create ONOS handle" )
+ return main.FALSE
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanup()
+ main.exit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanup()
+ main.exit()
+
+ def disconnect( self ):
+ """
+ Called when Test is complete to disconnect the ONOS handle.
+ """
+ response = main.TRUE
+ try:
+ if self.handle:
+ self.handle.sendline( "" )
+ self.handle.expect( "\$" )
+ self.handle.sendline( "exit" )
+ self.handle.expect( "closed" )
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ except ValueError:
+ main.log.exception( "Exception in disconnect of " + self.name )
+ response = main.TRUE
+ except Exception:
+ main.log.exception( self.name + ": Connection failed to the host" )
+ response = main.FALSE
+ return response
+
+ def setCliOptions( self, name ):
+ """
+ Parse the cluster options to create an ONOS cli component with the given name
+ """
+ main.componentDictionary[name] = main.componentDictionary[self.name].copy()
+ main.componentDictionary[name]['type'] = "OnosCliDriver"
+ main.componentDictionary[name]['connect_order'] = str( int( main.componentDictionary[name]['connect_order'] ) + 1 )
+ main.log.debug( main.componentDictionary[name] )
+
+ def createCliComponent( self, name ):
+ """
+ Creates a new onos cli component.
+
+ Arguments:
+ name - The string of the name of this component. The new component
+ will be assigned to main.<name> .
+ In addition, main.<name>.name = str( name )
+ """
+ try:
+ # look to see if this component already exists
+ getattr( main, name )
+ except AttributeError:
+ # namespace is clear, creating component
+ self.setCliOptions( name )
+ return main.componentInit( name )
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanup()
+ main.exit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanup()
+ main.exit()
+ else:
+ # namespace is not clear!
+ main.log.error( name + " component already exists!" )
+ main.cleanup()
+ main.exit()
+
+ def setRestOptions( self, name, host ):
+ """
+ Parse the cluster options to create an ONOS cli component with the given name
+ """
+ main.componentDictionary[name] = main.componentDictionary[self.name].copy()
+ main.log.debug( main.componentDictionary[name] )
+ user = main.componentDictionary[name]['COMPONENTS'].get( "web_user", "onos" )
+ main.componentDictionary[name]['user'] = self.checkOptions( user, "onos" )
+ password = main.componentDictionary[name]['COMPONENTS'].get( "web_pass", "rocks" )
+ main.componentDictionary[name]['pass'] = self.checkOptions( password, "rocks" )
+ main.componentDictionary[name]['host'] = host
+ port = main.componentDictionary[name]['COMPONENTS'].get( "rest_port", "8181" )
+ main.componentDictionary[name]['port'] = self.checkOptions( port, "8181" )
+ main.componentDictionary[name]['type'] = "OnosRestDriver"
+ main.componentDictionary[name]['connect_order'] = str( int( main.componentDictionary[name]['connect_order'] ) + 1 )
+ main.log.debug( main.componentDictionary[name] )
+
+ def createRestComponent( self, name, ipAddress ):
+ """
+ Creates a new onos rest component.
+
+ Arguments:
+ name - The string of the name of this component. The new component
+ will be assigned to main.<name> .
+ In addition, main.<name>.name = str( name )
+ """
+ try:
+ # look to see if this component already exists
+ getattr( main, name )
+ except AttributeError:
+ # namespace is clear, creating component
+ self.setRestOptions( name, ipAddress )
+ return main.componentInit( name )
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanup()
+ main.exit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanup()
+ main.exit()
+ else:
+ # namespace is not clear!
+ main.log.error( name + " component already exists!" )
+ main.cleanup()
+ main.exit()
+
+ def setBenchOptions( self, name ):
+ """
+ Parse the cluster options to create an ONOS "bench" component with the given name
+ """
+ main.componentDictionary[name] = main.componentDictionary[self.name].copy()
+ main.componentDictionary[name]['type'] = "OnosDriver"
+ home = main.componentDictionary[name]['COMPONENTS'].get( "onos_home", None )
+ main.componentDictionary[name]['home'] = self.checkOptions( home, None )
+ main.componentDictionary[name]['connect_order'] = str( int( main.componentDictionary[name]['connect_order'] ) + 1 )
+ main.log.debug( main.componentDictionary[name] )
+
+ def createBenchComponent( self, name ):
+ """
+ Creates a new onos "bench" component.
+
+ Arguments:
+ name - The string of the name of this component. The new component
+ will be assigned to main.<name> .
+ In addition, main.<name>.name = str( name )
+ """
+ try:
+ # look to see if this component already exists
+ getattr( main, name )
+ except AttributeError:
+ # namespace is clear, creating component
+ self.setBenchOptions( name )
+ return main.componentInit( name )
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanup()
+ main.exit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanup()
+ main.exit()
+ else:
+ # namespace is not clear!
+ main.log.error( name + " component already exists!" )
+ main.cleanup()
+ main.exit()
+
+ def createComponents( self, prefix='' ):
+ """
+ Creates a CLI and REST component for each nodes in the cluster
+ """
+ # TODO: This needs work to support starting two seperate clusters in one test
+ cliPrefix = prefix + "cli"
+ restPrefix = prefix + "rest"
+ benchPrefix = prefix + "bench"
+ #self.nodes = []
+ for i in xrange( 1, self.maxNodes + 1 ):
+ cliName = cliPrefix + str( i )
+ restName = restPrefix + str( i )
+ benchName = benchPrefix + str( i )
+
+ # Unfortunately this means we need to have a cell set beofre running TestON,
+ # Even if it is just the entire possible cluster size
+ ip = self.onosIps[ 'OC' + str( i ) ]
+
+ cli = self.createCliComponent( cliName )
+ rest = self.createRestComponent( restName, ip )
+ bench = self.createBenchComponent( benchName )
+ self.nodes.append( Controller( prefix + str( i ), ip, cli, rest, bench ) )
+
+ ## DEBUG ########################################################################
+ print "Prininting NODES::"
+ try:
+ print self.nodes
+ for node in self.nodes:
+ main.log.error( repr(node ))
+ main.log.warn( node )
+ except Exception as e:
+ print repr(e)
+ ## END debug ########################################################################
diff --git a/TestON/drivers/common/clidriver.py b/TestON/drivers/common/clidriver.py
index 4938369..2dc1274 100644
--- a/TestON/drivers/common/clidriver.py
+++ b/TestON/drivers/common/clidriver.py
@@ -33,6 +33,7 @@
"""
def __init__( self ):
super( CLI, self ).__init__()
+
def checkPrompt(self):
for key in self.options:
if key == "prompt" and self.options['prompt'] is not None:
diff --git a/TestON/drivers/component.py b/TestON/drivers/component.py
index 339c0b3..fd8ba67 100644
--- a/TestON/drivers/component.py
+++ b/TestON/drivers/component.py
@@ -28,18 +28,27 @@
"""
This is the tempalte class for components
"""
+ def __str__( self ):
+ try:
+ assert self.name
+ except AttributeError:
+ return repr( self )
+ return str( self.name )
+
def __init__( self ):
self.default = ''
+ self.name = ''
self.wrapped = sys.modules[ __name__ ]
self.count = 0
self.prompt = "\$"
def __getattr__( self, name ):
"""
- This will invoke, if the attribute wasn't found the usual ways.
- Here it will look for assert_attribute and will execute when
- AttributeError occurs.
- It will return the result of the assert_attribute.
+ Called when an attribute lookup has not found the attribute
+ in the usual places (i.e. it is not an instance attribute nor
+ is it found in the class tree for self). name is the attribute
+ name. This method should return the (computed) attribute value
+ or raise an AttributeError exception.
"""
try:
return getattr( self.wrapped, name )
@@ -48,22 +57,7 @@
if "'module' object has no attribute '__path__'" in error:
pass
else:
- main.log.error( str(error.__class__) + " " + str(error) )
- try:
- def experimentHandling( *args, **kwargs ):
- if main.EXPERIMENTAL_MODE == main.TRUE:
- result = self.experimentRun( *args, **kwargs )
- main.log.info( "EXPERIMENTAL MODE. API " +
- str( name ) +
- " not yet implemented. " +
- "Returning dummy values" )
- return result
- else:
- return main.FALSE
- return experimentHandling
- except TypeError as e:
- main.log.error( "Arguments for experimental mode does not" +
- " have key 'retruns'" + e )
+ raise error
def connect( self ):
diff --git a/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py b/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py
index ed3803a..14ffc7c 100644
--- a/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py
+++ b/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py
@@ -125,7 +125,7 @@
"""
Ping across added host intents
"""
- main.HA.pingAcrossHostIntent( main, True, True )
+ main.HA.pingAcrossHostIntent( main )
def CASE5( self, main ):
"""
diff --git a/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.py b/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.py
index c5dc29a..92aae09 100644
--- a/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.py
+++ b/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.py
@@ -130,7 +130,7 @@
"""
Ping across added host intents
"""
- main.HA.pingAcrossHostIntent( main, True, False )
+ main.HA.pingAcrossHostIntent( main )
def CASE5( self, main ):
"""
diff --git a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
index 29c82a1..7e10a09 100644
--- a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
+++ b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
@@ -130,7 +130,7 @@
"""
Ping across added host intents
"""
- main.HA.pingAcrossHostIntent( main, True, True )
+ main.HA.pingAcrossHostIntent( main )
def CASE5( self, main ):
"""
diff --git a/TestON/tests/HA/HAkillNodes/HAkillNodes.py b/TestON/tests/HA/HAkillNodes/HAkillNodes.py
index 24d5bf8..b2ae8c5 100644
--- a/TestON/tests/HA/HAkillNodes/HAkillNodes.py
+++ b/TestON/tests/HA/HAkillNodes/HAkillNodes.py
@@ -67,13 +67,8 @@
start cli sessions
start tcpdump
"""
- import imp
- import pexpect
- import time
- import json
main.log.info( "ONOS HA test: Restart a minority of ONOS nodes - " +
"initialization" )
- # set global variables
# These are for csv plotting in jenkins
main.HAlabels = []
main.HAdata = []
@@ -85,31 +80,27 @@
main.exit()
main.testSetUp.envSetupDescription()
try:
+ from dependencies.Cluster import Cluster
from tests.HA.dependencies.HA import HA
main.HA = HA()
- # load some variables from the params file
+ main.Cluster = Cluster( main.ONOScell.nodes )
cellName = main.params[ 'ENV' ][ 'cellName' ]
main.apps = main.params[ 'ENV' ][ 'appString' ]
- main.numCtrls = int( main.params[ 'num_controllers' ] )
- if main.ONOSbench.maxNodes and\
- main.ONOSbench.maxNodes < main.numCtrls:
- main.numCtrls = int( main.ONOSbench.maxNodes )
- main.maxNodes = main.numCtrls
- stepResult = main.testSetUp.envSetup( hasNode=True )
+ stepResult = main.testSetUp.envSetup( main.Cluster, hasNode=True )
except Exception as e:
main.testSetUp.envSetupException( e )
main.testSetUp.evnSetupConclusion( stepResult )
main.HA.generateGraph( "HAkillNodes" )
main.step( "Make sure ONOS service doesn't automatically respawn" )
- handle = main.ONOSbench.handle
+ handle = main.Cluster.controllers[0].Bench.handle
handle.sendline( "sed -i -e 's/^respawn$/#respawn/g' tools/package/init/onos.conf" )
handle.expect( "\$" ) # $ from the command
handle.sendline( "sed -i -e 's/^Restart=always/Restart=no/g' tools/package/init/onos.service" )
handle.expect( "\$" ) # $ from the command
handle.expect( "\$" ) # $ from the prompt
- main.testSetUp.ONOSSetUp( main.Mininet1, cellName=cellName, removeLog=True,
+ main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
extraApply=main.HA.customizeOnosGenPartitions,
extraClean=main.HA.cleanUpGenPartition )
@@ -137,7 +128,7 @@
"""
Ping across added host intents
"""
- main.HA.pingAcrossHostIntent( main, True, False )
+ main.HA.pingAcrossHostIntent( main )
def CASE5( self, main ):
"""
@@ -152,28 +143,26 @@
assert main.numCtrls, "main.numCtrls not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
main.case( "Kill minority of ONOS nodes" )
main.step( "Checking ONOS Logs for errors" )
- for node in main.nodes:
- main.log.debug( "Checking logs for errors on " + node.name + ":" )
- main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
+ for ctrl in main.Cluster.active():
+ main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
+ main.log.warn( ctrl.checkLogs( ctrl.ipAddress ) )
- n = len( main.nodes ) # Number of nodes
+ n = len( main.Cluster.controllers ) # Number of nodes
p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
- main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
+ main.kill = [ main.Cluster.controllers[ 0 ] ] # ONOS node to kill, listed by index in main.nodes
if n > 3:
- main.kill.append( p - 1 )
+ main.kill.append( main.Cluster.controllers[ p - 1 ] )
# NOTE: This only works for cluster sizes of 3,5, or 7.
- main.step( "Kill " + str( len( main.kill ) ) + " ONOS nodes" )
+ main.step( "Killing nodes: " + str( main.kill ) )
killResults = main.TRUE
- for i in main.kill:
+ for ctrl in main.kill:
killResults = killResults and\
- main.ONOSbench.onosKill( main.nodes[ i ].ip_address )
- main.activeNodes.remove( i )
+ ctrl.onosKill( ctrl.ipAddress )
+ ctrl.active = False
utilities.assert_equals( expect=main.TRUE, actual=killResults,
onpass="ONOS nodes killed successfully",
onfail="ONOS nodes NOT successfully killed" )
@@ -181,7 +170,7 @@
main.step( "Checking ONOS nodes" )
nodeResults = utilities.retry( main.HA.nodesCheck,
False,
- args=[ main.activeNodes ],
+ args=[ main.Cluster.active() ],
sleep=15,
attempts=5 )
@@ -190,11 +179,10 @@
onfail="Nodes check NOT successful" )
if not nodeResults:
- for i in main.activeNodes:
- cli = main.CLIs[ i ]
+ for ctrl in main.Cluster.active():
main.log.debug( "{} components not ACTIVE: \n{}".format(
- cli.name,
- cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
+ ctrl.name,
+ ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
main.exit()
@@ -203,7 +191,6 @@
"""
The bring up stopped nodes
"""
-
main.HA.bringUpStoppedNode( main )
def CASE7( self, main ):
@@ -221,13 +208,12 @@
leaderList = []
restarted = []
- for i in main.kill:
- restarted.append( main.nodes[ i ].ip_address )
+ for ctrl in main.kill:
+ restarted.append( ctrl.ipAddress )
leaderResult = main.TRUE
- for i in main.activeNodes:
- cli = main.CLIs[ i ]
- leaderN = cli.electionTestLeader()
+ for ctrl in main.Cluster.active():
+ leaderN = ctrl.electionTestLeader()
leaderList.append( leaderN )
if leaderN == main.FALSE:
# error in response
@@ -236,12 +222,12 @@
" error logs" )
leaderResult = main.FALSE
elif leaderN is None:
- main.log.error( cli.name +
+ main.log.error( ctrl.name +
" shows no leader for the election-app was" +
" elected after the old one died" )
leaderResult = main.FALSE
elif leaderN in restarted:
- main.log.error( cli.name + " shows " + str( leaderN ) +
+ main.log.error( ctrl.name + " shows " + str( leaderN ) +
" as leader for the election-app, but it " +
"was restarted" )
leaderResult = main.FALSE
diff --git a/TestON/tests/HA/HAkillNodes/HAkillNodes.topo b/TestON/tests/HA/HAkillNodes/HAkillNodes.topo
index 7c18a98..f3b9278 100644
--- a/TestON/tests/HA/HAkillNodes/HAkillNodes.topo
+++ b/TestON/tests/HA/HAkillNodes/HAkillNodes.topo
@@ -12,165 +12,24 @@
</COMPONENTS>
</ONOSbench>
- <ONOScli1>
- <host>localhost</host>
+ <ONOScell>
+ <host>localhost</host> # ONOS "bench" machine
<user>sdn</user>
<password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>2</connect_order>
+ <type>OnosClusterDriver</type>
+ <connect_order>1</connect_order>
<COMPONENTS>
+ <cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
<karaf_username></karaf_username>
<karaf_password></karaf_password>
- <prompt></prompt>
+ <web_user></web_user>
+ <web_pass></web_pass>
+ <rest_port></rest_port>
+ <prompt></prompt> # TODO: we technically need a few of these, one per component
+ <onos_home></onos_home> # defines where onos home is
+ <nodes> 7 </nodes> # number of nodes in the cluster
</COMPONENTS>
- </ONOScli1>
-
- <ONOScli2>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>3</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOScli2>
-
- <ONOScli3>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>4</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOScli3>
-
-
- <ONOScli4>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>5</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOScli4>
-
-
- <ONOScli5>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>6</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOScli5>
-
-
- <ONOScli6>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>7</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOScli6>
-
-
- <ONOScli7>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>8</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOScli7>
-
- <ONOS1>
- <host>OC1</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>9</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOS1>
-
- <ONOS2>
- <host>OC2</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>10</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOS2>
-
- <ONOS3>
- <host>OC3</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>11</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOS3>
-
- <ONOS4>
- <host>OC4</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>12</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOS4>
-
- <ONOS5>
- <host>OC5</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>13</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOS5>
-
- <ONOS6>
- <host>OC6</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>14</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOS6>
-
- <ONOS7>
- <host>OC7</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>15</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOS7>
+ </ONOScell>
<Mininet1>
<host>OCN</host>
diff --git a/TestON/tests/HA/HAsanity/HAsanity.params b/TestON/tests/HA/HAsanity/HAsanity.params
index edd6bfb..ace6b77 100644
--- a/TestON/tests/HA/HAsanity/HAsanity.params
+++ b/TestON/tests/HA/HAsanity/HAsanity.params
@@ -38,7 +38,6 @@
<pull>False</pull>
<branch>master</branch>
</GIT>
- <num_controllers> 7 </num_controllers>
<tcpdump> False </tcpdump>
<CTRL>
diff --git a/TestON/tests/HA/HAsanity/HAsanity.py b/TestON/tests/HA/HAsanity/HAsanity.py
index 981bac4..c70d921 100644
--- a/TestON/tests/HA/HAsanity/HAsanity.py
+++ b/TestON/tests/HA/HAsanity/HAsanity.py
@@ -67,9 +67,6 @@
start cli sessions
start tcpdump
"""
- import imp
- import time
- import json
main.log.info( "ONOS HA Sanity test - initialization" )
# These are for csv plotting in jenkins
main.HAlabels = []
@@ -78,27 +75,23 @@
from tests.dependencies.ONOSSetup import ONOSSetup
main.testSetUp = ONOSSetup()
except ImportError:
- main.log.error( "ONOSSetup not found exiting the test" )
+ main.log.error( "ONOSSetup not found. exiting the test" )
main.exit()
main.testSetUp.envSetupDescription()
try:
+ from dependencies.Cluster import Cluster
from tests.HA.dependencies.HA import HA
main.HA = HA()
- # load some variables from the params file
+ main.Cluster = Cluster( main.ONOScell.nodes )
cellName = main.params[ 'ENV' ][ 'cellName' ]
main.apps = main.params[ 'ENV' ][ 'appString' ]
- main.numCtrls = int( main.params[ 'num_controllers' ] )
- if main.ONOSbench.maxNodes and \
- main.ONOSbench.maxNodes < main.numCtrls:
- main.numCtrls = int( main.ONOSbench.maxNodes )
- main.maxNodes = main.numCtrls
- stepResult = main.testSetUp.envSetup( hasNode=True )
+ stepResult = main.testSetUp.envSetup( main.Cluster, hasNode=True )
except Exception as e:
main.testSetUp.envSetupException( e )
main.testSetUp.evnSetupConclusion( stepResult )
main.HA.generateGraph( "HAsanity" )
- main.testSetUp.ONOSSetUp( main.Mininet1, cellName=cellName, removeLog=True,
+ main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
extraApply=main.HA.startingMininet )
main.HA.initialSetUp()
@@ -125,8 +118,7 @@
"""
Ping across added host intents
"""
-
- main.HA.pingAcrossHostIntent( main, True, True )
+ main.HA.pingAcrossHostIntent( main )
def CASE5( self, main ):
"""
@@ -142,8 +134,6 @@
assert main.numCtrls, "main.numCtrls not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
main.case( "Wait 60 seconds instead of inducing a failure" )
time.sleep( 60 )
utilities.assert_equals(
@@ -164,12 +154,11 @@
# NOTE: this only works for the sanity test. In case of failures,
# leader will likely change
- leader = main.nodes[ main.activeNodes[ 0 ] ].ip_address
+ leader = main.Cluster.testLeader
leaderResult = main.TRUE
- for i in main.activeNodes:
- cli = main.CLIs[ i ]
- leaderN = cli.electionTestLeader()
+ for ctrl in main.Cluster.active():
+ leaderN = ctrl.electionTestLeader()
leaderList.append( leaderN )
# verify leader is ONOS1
if leaderN == leader:
@@ -192,7 +181,7 @@
leaderResult = main.FALSE
main.log.error(
"Inconsistent view of leader for the election test app" )
- # TODO: print the list
+ main.log.debug( leaderList )
utilities.assert_equals(
expect=main.TRUE,
actual=leaderResult,
diff --git a/TestON/tests/HA/HAsanity/HAsanity.topo b/TestON/tests/HA/HAsanity/HAsanity.topo
index 7c18a98..f3b9278 100644
--- a/TestON/tests/HA/HAsanity/HAsanity.topo
+++ b/TestON/tests/HA/HAsanity/HAsanity.topo
@@ -12,165 +12,24 @@
</COMPONENTS>
</ONOSbench>
- <ONOScli1>
- <host>localhost</host>
+ <ONOScell>
+ <host>localhost</host> # ONOS "bench" machine
<user>sdn</user>
<password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>2</connect_order>
+ <type>OnosClusterDriver</type>
+ <connect_order>1</connect_order>
<COMPONENTS>
+ <cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
<karaf_username></karaf_username>
<karaf_password></karaf_password>
- <prompt></prompt>
+ <web_user></web_user>
+ <web_pass></web_pass>
+ <rest_port></rest_port>
+ <prompt></prompt> # TODO: we technically need a few of these, one per component
+ <onos_home></onos_home> # defines where onos home is
+ <nodes> 7 </nodes> # number of nodes in the cluster
</COMPONENTS>
- </ONOScli1>
-
- <ONOScli2>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>3</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOScli2>
-
- <ONOScli3>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>4</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOScli3>
-
-
- <ONOScli4>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>5</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOScli4>
-
-
- <ONOScli5>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>6</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOScli5>
-
-
- <ONOScli6>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>7</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOScli6>
-
-
- <ONOScli7>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>8</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOScli7>
-
- <ONOS1>
- <host>OC1</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>9</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOS1>
-
- <ONOS2>
- <host>OC2</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>10</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOS2>
-
- <ONOS3>
- <host>OC3</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>11</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOS3>
-
- <ONOS4>
- <host>OC4</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>12</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOS4>
-
- <ONOS5>
- <host>OC5</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>13</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOS5>
-
- <ONOS6>
- <host>OC6</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>14</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOS6>
-
- <ONOS7>
- <host>OC7</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>15</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOS7>
+ </ONOScell>
<Mininet1>
<host>OCN</host>
diff --git a/TestON/tests/HA/HAscaling/HAscaling.py b/TestON/tests/HA/HAscaling/HAscaling.py
index 5ffef36..f35fa57 100644
--- a/TestON/tests/HA/HAscaling/HAscaling.py
+++ b/TestON/tests/HA/HAscaling/HAscaling.py
@@ -133,7 +133,7 @@
"""
Ping across added host intents
"""
- main.HA.pingAcrossHostIntent( main, True, True )
+ main.HA.pingAcrossHostIntent( main )
def CASE5( self, main ):
"""
diff --git a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
index 1f96ed1..dbc004e 100644
--- a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
+++ b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
@@ -241,7 +241,7 @@
"""
Ping across added host intents
"""
- main.HA.pingAcrossHostIntent( main, False, True )
+ main.HA.pingAcrossHostIntent( main )
def CASE5( self, main ):
"""
diff --git a/TestON/tests/HA/HAstopNodes/HAstopNodes.py b/TestON/tests/HA/HAstopNodes/HAstopNodes.py
index e1a66a3..c17e11d 100644
--- a/TestON/tests/HA/HAstopNodes/HAstopNodes.py
+++ b/TestON/tests/HA/HAstopNodes/HAstopNodes.py
@@ -67,13 +67,8 @@
start cli sessions
start tcpdump
"""
- import imp
- import pexpect
- import time
- import json
main.log.info( "ONOS HA test: Stop a minority of ONOS nodes - " +
"initialization" )
- # set global variables
# These are for csv plotting in jenkins
main.HAlabels = []
main.HAdata = []
@@ -85,25 +80,21 @@
main.exit()
main.testSetUp.envSetupDescription()
try:
+ from dependencies.Cluster import Cluster
from tests.HA.dependencies.HA import HA
main.HA = HA()
- # load some variables from the params file
+ main.Cluster = Cluster( main.ONOScell.nodes )
cellName = main.params[ 'ENV' ][ 'cellName' ]
main.apps = main.params[ 'ENV' ][ 'appString' ]
- main.numCtrls = int( main.params[ 'num_controllers' ] )
- if main.ONOSbench.maxNodes and\
- main.ONOSbench.maxNodes < main.numCtrls:
- main.numCtrls = int( main.ONOSbench.maxNodes )
- main.maxNodes = main.numCtrls
- stepResult = main.testSetUp.envSetup( hasNode=True )
+ stepResult = main.testSetUp.envSetup( main.Cluster, hasNode=True )
except Exception as e:
main.testSetUp.envSetupException( e )
main.testSetUp.evnSetupConclusion( stepResult )
main.HA.generateGraph( "HAstopNodes" )
- main.testSetUp.ONOSSetUp( main.Mininet1, cellName=cellName, removeLog=True,
- extraApply=main.HA.customizeOnosGenPartitions,
- extraClean=main.HA.cleanUpGenPartition )
+ main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
+ extraApply=main.HA.customizeOnosGenPartitions,
+ extraClean=main.HA.cleanUpGenPartition )
main.HA.initialSetUp()
@@ -119,20 +110,17 @@
"""
main.HA.assignMastership( main )
-
def CASE3( self, main ):
"""
Assign intents
"""
main.HA.assignIntents( main )
-
-
def CASE4( self, main ):
"""
Ping across added host intents
"""
- main.HA.pingAcrossHostIntent( main, True, False )
+ main.HA.pingAcrossHostIntent( main )
def CASE5( self, main ):
"""
@@ -147,28 +135,26 @@
assert main.numCtrls, "main.numCtrls not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
main.case( "Stop minority of ONOS nodes" )
main.step( "Checking ONOS Logs for errors" )
- for node in main.nodes:
- main.log.debug( "Checking logs for errors on " + node.name + ":" )
- main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
+ for ctrl in main.Cluster.active():
+ main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
+ main.log.warn( ctrl.checkLogs( ctrl.ipAddress ) )
- n = len( main.nodes ) # Number of nodes
+ n = len( main.Cluster.controllers ) # Number of nodes
p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
- main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
+ main.kill = [ main.Cluster.controllers[ 0 ] ] # ONOS node to kill, listed by index in main.nodes
if n > 3:
- main.kill.append( p - 1 )
+ main.kill.append( main.Cluster.controllers[ p - 1 ] )
# NOTE: This only works for cluster sizes of 3,5, or 7.
- main.step( "Stopping " + str( len( main.kill ) ) + " ONOS nodes" )
+ main.step( "Stopping nodes: " + str( main.kill ) )
killResults = main.TRUE
- for i in main.kill:
+ for ctrl in main.kill:
killResults = killResults and\
- main.ONOSbench.onosStop( main.nodes[ i ].ip_address )
- main.activeNodes.remove( i )
+ ctrl.onosStop( ctrl.ipAddress )
+ ctrl.active = False
utilities.assert_equals( expect=main.TRUE, actual=killResults,
onpass="ONOS nodes stopped successfully",
onfail="ONOS nodes NOT successfully stopped" )
@@ -176,7 +162,7 @@
main.step( "Checking ONOS nodes" )
nodeResults = utilities.retry( main.HA.nodesCheck,
False,
- args=[ main.activeNodes ],
+ args=[ main.Cluster.active() ],
sleep=15,
attempts=5 )
@@ -185,11 +171,10 @@
onfail="Nodes check NOT successful" )
if not nodeResults:
- for i in main.activeNodes:
- cli = main.CLIs[ i ]
+ for ctrl in main.Cluster.active():
main.log.debug( "{} components not ACTIVE: \n{}".format(
- cli.name,
- cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
+ ctrl.name,
+ ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
main.exit()
@@ -209,21 +194,18 @@
except AttributeError:
main.kill = []
-
main.HA.checkStateAfterONOS( main, afterWhich=0 )
-
main.step( "Leadership Election is still functional" )
# Test of LeadershipElection
leaderList = []
restarted = []
- for i in main.kill:
- restarted.append( main.nodes[ i ].ip_address )
+ for ctrl in main.kill:
+ restarted.append( ctrl.ipAddress )
leaderResult = main.TRUE
- for i in main.activeNodes:
- cli = main.CLIs[ i ]
- leaderN = cli.electionTestLeader()
+ for ctrl in main.Cluster.active():
+ leaderN = ctrl.electionTestLeader()
leaderList.append( leaderN )
if leaderN == main.FALSE:
# error in response
@@ -232,12 +214,12 @@
" error logs" )
leaderResult = main.FALSE
elif leaderN is None:
- main.log.error( cli.name +
+ main.log.error( ctrl.name +
" shows no leader for the election-app was" +
" elected after the old one died" )
leaderResult = main.FALSE
elif leaderN in restarted:
- main.log.error( cli.name + " shows " + str( leaderN ) +
+ main.log.error( ctrl.name + " shows " + str( leaderN ) +
" as leader for the election-app, but it " +
"was restarted" )
leaderResult = main.FALSE
diff --git a/TestON/tests/HA/HAstopNodes/HAstopNodes.topo b/TestON/tests/HA/HAstopNodes/HAstopNodes.topo
index 7c18a98..f3b9278 100644
--- a/TestON/tests/HA/HAstopNodes/HAstopNodes.topo
+++ b/TestON/tests/HA/HAstopNodes/HAstopNodes.topo
@@ -12,165 +12,24 @@
</COMPONENTS>
</ONOSbench>
- <ONOScli1>
- <host>localhost</host>
+ <ONOScell>
+ <host>localhost</host> # ONOS "bench" machine
<user>sdn</user>
<password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>2</connect_order>
+ <type>OnosClusterDriver</type>
+ <connect_order>1</connect_order>
<COMPONENTS>
+ <cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
<karaf_username></karaf_username>
<karaf_password></karaf_password>
- <prompt></prompt>
+ <web_user></web_user>
+ <web_pass></web_pass>
+ <rest_port></rest_port>
+ <prompt></prompt> # TODO: we technically need a few of these, one per component
+ <onos_home></onos_home> # defines where onos home is
+ <nodes> 7 </nodes> # number of nodes in the cluster
</COMPONENTS>
- </ONOScli1>
-
- <ONOScli2>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>3</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOScli2>
-
- <ONOScli3>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>4</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOScli3>
-
-
- <ONOScli4>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>5</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOScli4>
-
-
- <ONOScli5>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>6</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOScli5>
-
-
- <ONOScli6>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>7</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOScli6>
-
-
- <ONOScli7>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>8</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOScli7>
-
- <ONOS1>
- <host>OC1</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>9</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOS1>
-
- <ONOS2>
- <host>OC2</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>10</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOS2>
-
- <ONOS3>
- <host>OC3</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>11</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOS3>
-
- <ONOS4>
- <host>OC4</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>12</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOS4>
-
- <ONOS5>
- <host>OC5</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>13</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOS5>
-
- <ONOS6>
- <host>OC6</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>14</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOS6>
-
- <ONOS7>
- <host>OC7</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>15</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOS7>
+ </ONOScell>
<Mininet1>
<host>OCN</host>
diff --git a/TestON/tests/HA/HAswapNodes/HAswapNodes.py b/TestON/tests/HA/HAswapNodes/HAswapNodes.py
index e5e9f46..c1e3006 100644
--- a/TestON/tests/HA/HAswapNodes/HAswapNodes.py
+++ b/TestON/tests/HA/HAswapNodes/HAswapNodes.py
@@ -131,7 +131,7 @@
"""
Ping across added host intents
"""
- main.HA.pingAcrossHostIntent( main, True, True )
+ main.HA.pingAcrossHostIntent( main )
def CASE5( self, main ):
"""
diff --git a/TestON/tests/HA/dependencies/HA.py b/TestON/tests/HA/dependencies/HA.py
index 4b8fae7..d7b64de 100644
--- a/TestON/tests/HA/dependencies/HA.py
+++ b/TestON/tests/HA/dependencies/HA.py
@@ -40,6 +40,7 @@
dstDir,
pwd=main.ONOSbench.pwd,
direction="from" )
+
def cleanUpGenPartition( self ):
# clean up gen-partitions file
try:
@@ -54,6 +55,7 @@
main.ONOSbench.handle.before )
main.cleanup()
main.exit()
+
def startingMininet( self ):
main.step( "Starting Mininet" )
# scp topo file to mininet
@@ -68,6 +70,7 @@
utilities.assert_equals( expect=main.TRUE, actual=mnResult,
onpass="Mininet Started",
onfail="Error starting Mininet" )
+
def scalingMetadata( self ):
import re
main.scaling = main.params[ 'scaling' ].split( "," )
@@ -84,6 +87,7 @@
utilities.assert_equals( expect=main.TRUE, actual=genResult,
onpass="New cluster metadata file generated",
onfail="Failled to generate new metadata file" )
+
def swapNodeMetadata( self ):
if main.numCtrls >= 5:
main.numCtrls -= 2
@@ -93,6 +97,7 @@
utilities.assert_equals( expect=main.TRUE, actual=genResult,
onpass="New cluster metadata file generated",
onfail="Failled to generate new metadata file" )
+
def customizeOnosService( self, metadataMethod ):
import os
main.step( "Setup server for cluster metadata file" )
@@ -148,6 +153,7 @@
main.onosServicepath + ".backup",
main.onosServicepath,
direction="to" )
+
def consistentCheck( self ):
"""
Checks that TestON counters are consistent across all nodes.
@@ -162,10 +168,10 @@
# Get onos counters results
onosCountersRaw = []
threads = []
- for i in main.activeNodes:
+ for ctrl in main.Cluster.active():
t = main.Thread( target=utilities.retry,
- name="counters-" + str( i ),
- args=[ main.CLIs[ i ].counters, [ None ] ],
+ name="counters-" + str( ctrl ),
+ args=[ ctrl.counters, [ None ] ],
kwargs={ 'sleep': 5, 'attempts': 5,
'randomTime': True } )
threads.append( t )
@@ -174,12 +180,12 @@
t.join()
onosCountersRaw.append( t.result )
onosCounters = []
- for i in range( len( main.activeNodes ) ):
+ for i in range( len( onosCountersRaw ) ):
try:
onosCounters.append( json.loads( onosCountersRaw[ i ] ) )
except ( ValueError, TypeError ):
- main.log.error( "Could not parse counters response from ONOS" +
- str( main.activeNodes[ i ] + 1 ) )
+ main.log.error( "Could not parse counters response from " +
+ str( main.Cluster.active()[ i ] ) )
main.log.warn( repr( onosCountersRaw[ i ] ) )
onosCounters.append( [] )
@@ -194,7 +200,7 @@
for controller in enumerate( onosCounters ):
for key, value in controller[ 1 ].iteritems():
if 'TestON' in key:
- node = 'ONOS' + str( controller[ 0 ] + 1 )
+ node = str( main.Cluster.active()[ controller[ 0 ] ] )
try:
testCounters[ node ].append( { key: value } )
except KeyError:
@@ -224,14 +230,14 @@
# Get onos counters results and consistentCheck
onosCounters, consistent = self.consistentCheck()
# Check for correct values
- for i in range( len( main.activeNodes ) ):
+ for i in range( len( main.Cluster.active() ) ):
current = onosCounters[ i ]
onosValue = None
try:
onosValue = current.get( counterName )
except AttributeError:
- node = str( main.activeNodes[ i ] + 1 )
- main.log.exception( "ONOS" + node + " counters result " +
+ node = str( main.Cluster.active()[ i ] )
+ main.log.exception( node + " counters result " +
"is not as expected" )
correctResults = main.FALSE
if onosValue == counterValue:
@@ -272,9 +278,9 @@
nodesOutput = []
results = True
threads = []
- for i in nodes:
- t = main.Thread( target=main.CLIs[ i ].nodes,
- name="nodes-" + str( i ),
+ for node in nodes:
+ t = main.Thread( target=node.nodes,
+ name="nodes-" + str( node ),
args=[] )
threads.append( t )
t.start()
@@ -282,7 +288,7 @@
for t in threads:
t.join()
nodesOutput.append( t.result )
- ips = sorted( [ main.nodes[ node ].ip_address for node in nodes ] )
+ ips = sorted( main.Cluster.getIps( activeOnly=True ) )
for i in nodesOutput:
try:
current = json.loads( i )
@@ -300,6 +306,7 @@
currentResult = False
results = results and currentResult
return results
+
def generateGraph( self, testName, plotName="Plot-HA", index=2 ):
# GRAPHS
# NOTE: important params here:
@@ -317,13 +324,12 @@
graphs += ']]></ac:plain-text-body>\n'
graphs += '</ac:structured-macro>\n'
main.log.wiki( graphs )
+
def initialSetUp( self, serviceClean=False ):
"""
rest of initialSetup
"""
- # Create a list of active nodes for use when some nodes are stopped
- main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
if main.params[ 'tcpdump' ].lower() == "true":
main.step( "Start Packet Capture MN" )
@@ -335,14 +341,16 @@
if serviceClean:
main.step( "Clean up ONOS service changes" )
- main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
- main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
- main.ONOSbench.handle.expect( "\$" )
+ ONOSbench = main.Cluster.contollers[0].Bench
+ ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
+ ONOSbench.handle.expect( "\$" )
+ ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
+ ONOSbench.handle.expect( "\$" )
main.step( "Checking ONOS nodes" )
nodeResults = utilities.retry( self.nodesCheck,
False,
- args=[ main.activeNodes ],
+ args=[ main.Cluster.active() ],
attempts=5 )
utilities.assert_equals( expect=True, actual=nodeResults,
@@ -350,11 +358,10 @@
onfail="Nodes check NOT successful" )
if not nodeResults:
- for i in main.activeNodes:
- cli = main.CLIs[ i ]
+ for ctrl in main.Cluster.active():
main.log.debug( "{} components not ACTIVE: \n{}".format(
- cli.name,
- cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
+ ctrl.name,
+ ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
main.exit()
@@ -364,14 +371,14 @@
apps = main.params.get( 'apps' )
if apps:
apps = apps.split( ',' )
- main.log.warn( apps )
+ main.log.debug( "Apps: " + str( apps ) )
activateResult = True
for app in apps:
- main.CLIs[ 0 ].app( app, "Activate" )
+ main.Cluster.active()[0].app( app, "Activate" )
# TODO: check this worked
time.sleep( 10 ) # wait for apps to activate
for app in apps:
- state = main.CLIs[ 0 ].appStatus( app )
+ state = main.Cluster.active()[0].appStatus( app )
if state == "ACTIVE":
activateResult = activateResult and True
else:
@@ -385,6 +392,7 @@
main.log.warn( "No apps were specified to be loaded after startup" )
main.step( "Set ONOS configurations" )
+ # FIXME: This shoudl be part of the general startup sequence
config = main.params.get( 'ONOS_Configuration' )
if config:
main.log.debug( config )
@@ -392,7 +400,7 @@
for component in config:
for setting in config[ component ]:
value = config[ component ][ setting ]
- check = main.CLIs[ 0 ].setCfg( component, setting, value )
+ check = main.Cluster.next().setCfg( component, setting, value )
main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
checkResult = check and checkResult
utilities.assert_equals( expect=main.TRUE,
@@ -402,83 +410,133 @@
else:
main.log.warn( "No configurations were specified to be changed after startup" )
- main.step( "App Ids check" )
- appCheck = main.TRUE
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
- name="appToIDCheck-" + str( i ),
- args=[] )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- appCheck = appCheck and t.result
- if appCheck != main.TRUE:
- node = main.activeNodes[ 0 ]
- main.log.warn( main.CLIs[ node ].apps() )
- main.log.warn( main.CLIs[ node ].appIDs() )
- utilities.assert_equals( expect=main.TRUE, actual=appCheck,
+ main.step( "Check app ids" )
+ appCheck = self.appCheck()
+ utilities.assert_equals( expect=True, actual=appCheck,
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
+ def commonChecks( self ):
+ # TODO: make this assertable or assert in here?
+ self.topicsCheck()
+ self.partitionsCheck()
+ self.pendingMapCheck()
+ self.appCheck()
+
+ def topicsCheck( self, extraTopics=[] ):
+ """
+ Check for work partition topics in leaders output
+ """
+ leaders = main.Cluster.next().leaders()
+ missing = False
+ try:
+ if leaders:
+ parsedLeaders = json.loads( leaders )
+ output = json.dumps( parsedLeaders,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) )
+ main.log.debug( "Leaders: " + output )
+ # check for all intent partitions
+ topics = []
+ for i in range( 14 ):
+ topics.append( "work-partition-" + str( i ) )
+ topics += extraTopics
+ main.log.debug( topics )
+ ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
+ for topic in topics:
+ if topic not in ONOStopics:
+ main.log.error( "Error: " + topic +
+ " not in leaders" )
+ missing = True
+ else:
+ main.log.error( "leaders() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing leaders" )
+ main.log.error( repr( leaders ) )
+ if missing:
+ #NOTE Can we refactor this into the Cluster class? Maybe an option to print the output of a command from each node?
+ for ctrl in main.Cluster.active():
+ response = ctrl.CLI.leaders( jsonFormat=False )
+ main.log.debug( str( ctrl.name ) + " leaders output: \n" +
+ str( response ) )
+ return missing
+
+ def partitionsCheck( self ):
+ # TODO: return something assertable
+ partitions = main.Cluster.next().partitions()
+ try:
+ if partitions:
+ parsedPartitions = json.loads( partitions )
+ output = json.dumps( parsedPartitions,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) )
+ main.log.debug( "Partitions: " + output )
+ # TODO check for a leader in all paritions
+ # TODO check for consistency among nodes
+ else:
+ main.log.error( "partitions() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing partitions" )
+ main.log.error( repr( partitions ) )
+
+ def pendingMapCheck( self ):
+ pendingMap = main.Cluster.next().pendingMap()
+ try:
+ if pendingMap:
+ parsedPending = json.loads( pendingMap )
+ output = json.dumps( parsedPending,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) )
+ main.log.debug( "Pending map: " + output )
+ # TODO check something here?
+ else:
+ main.log.error( "pendingMap() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing pending map" )
+ main.log.error( repr( pendingMap ) )
+
+ def appCheck( self ):
+ """
+ Check App IDs on all nodes
+ """
+ # FIXME: Rename this to appIDCheck? or add a check for isntalled apps
+ appResults = main.Cluster.command( "appToIDCheck" )
+ appCheck = all( i == main.TRUE for i in appResults )
+ if not appCheck:
+ ctrl = main.Cluster.active()[0]
+ main.log.debug( "%s apps: %s" % ( ctrl.name, ctrl.apps() ) )
+ main.log.debug( "%s appIDs: %s" % ( ctrl.name, ctrl.appIDs() ) )
+ return appCheck
+
def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
# Completed
- threads = []
- completedValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].workQueueTotalCompleted,
- name="WorkQueueCompleted-" + str( i ),
- args=[ workQueueName ] )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- completedValues.append( int( t.result ) )
+ completedValues = main.Cluster.command( "workQueueTotalCompleted",
+ args=[ workQueueName ] )
# Check the results
- completedResults = [ x == completed for x in completedValues ]
+ completedResults = [ int( x ) == completed for x in completedValues ]
completedResult = all( completedResults )
if not completedResult:
main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
workQueueName, completed, completedValues ) )
# In Progress
- threads = []
- inProgressValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].workQueueTotalInProgress,
- name="WorkQueueInProgress-" + str( i ),
- args=[ workQueueName ] )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- inProgressValues.append( int( t.result ) )
+ inProgressValues = main.Cluster.command( "workQueueTotalInProgress",
+ args=[ workQueueName ] )
# Check the results
- inProgressResults = [ x == inProgress for x in inProgressValues ]
+ inProgressResults = [ int( x ) == inProgress for x in inProgressValues ]
inProgressResult = all( inProgressResults )
if not inProgressResult:
main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
workQueueName, inProgress, inProgressValues ) )
# Pending
- threads = []
- pendingValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].workQueueTotalPending,
- name="WorkQueuePending-" + str( i ),
- args=[ workQueueName ] )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- pendingValues.append( int( t.result ) )
+ pendingValues = main.Cluster.command( "workQueueTotalPending",
+ args=[ workQueueName ] )
# Check the results
- pendingResults = [ x == pending for x in pendingValues ]
+ pendingResults = [ int( x ) == pending for x in pendingValues ]
pendingResult = all( pendingResults )
if not pendingResult:
main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
@@ -493,8 +551,6 @@
assert main.numCtrls, "main.numCtrls not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
main.case( "Assigning devices to controllers" )
main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
@@ -502,9 +558,7 @@
"master of the device."
main.step( "Assign switches to controllers" )
- ipList = []
- for i in range( main.ONOSbench.maxNodes ):
- ipList.append( main.nodes[ i ].ip_address )
+ ipList = main.Cluster.getIps()
swList = []
for i in range( 1, 29 ):
swList.append( "s" + str( i ) )
@@ -517,11 +571,11 @@
main.log.info( str( response ) )
except Exception:
main.log.info( repr( response ) )
- for node in main.nodes:
- if re.search( "tcp:" + node.ip_address, response ):
+ for ctrl in main.Cluster.controllers:
+ if re.search( "tcp:" + ctrl.ipAddress, response ):
mastershipCheck = mastershipCheck and main.TRUE
else:
- main.log.error( "Error, node " + node.ip_address + " is " +
+ main.log.error( "Error, node " + repr( ctrl )+ " is " +
"not in the list of controllers s" +
str( i ) + " is connecting to." )
mastershipCheck = main.FALSE
@@ -530,6 +584,7 @@
actual=mastershipCheck,
onpass="Switch mastership assigned correctly",
onfail="Switches not assigned correctly to controllers" )
+
def assignIntents( self, main ):
"""
Assign intents
@@ -539,8 +594,6 @@
assert main.numCtrls, "main.numCtrls not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
try:
main.HAlabels
except ( NameError, AttributeError ):
@@ -560,29 +613,15 @@
# install onos-app-fwd
main.step( "Install reactive forwarding app" )
- onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
+ onosCli = main.Cluster.next()
installResults = onosCli.activateApp( "org.onosproject.fwd" )
utilities.assert_equals( expect=main.TRUE, actual=installResults,
onpass="Install fwd successful",
onfail="Install fwd failed" )
main.step( "Check app ids" )
- appCheck = main.TRUE
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
- name="appToIDCheck-" + str( i ),
- args=[] )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- appCheck = appCheck and t.result
- if appCheck != main.TRUE:
- main.log.warn( onosCli.apps() )
- main.log.warn( onosCli.appIDs() )
- utilities.assert_equals( expect=main.TRUE, actual=appCheck,
+ appCheck = self.appCheck()
+ utilities.assert_equals( expect=True, actual=appCheck,
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
@@ -606,34 +645,21 @@
"one or more ping pairs failed" )
main.log.info( "Time for pingall: %2f seconds" %
( time2 - time1 ) )
+ if not pingResult:
+ main.cleanup()
+ main.exit()
# timeout for fwd flows
time.sleep( 11 )
# uninstall onos-app-fwd
main.step( "Uninstall reactive forwarding app" )
- node = main.activeNodes[ 0 ]
- uninstallResult = main.CLIs[ node ].deactivateApp( "org.onosproject.fwd" )
+ uninstallResult = onosCli.deactivateApp( "org.onosproject.fwd" )
utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
onpass="Uninstall fwd successful",
onfail="Uninstall fwd failed" )
main.step( "Check app ids" )
- threads = []
- appCheck2 = main.TRUE
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
- name="appToIDCheck-" + str( i ),
- args=[] )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- appCheck2 = appCheck2 and t.result
- if appCheck2 != main.TRUE:
- node = main.activeNodes[ 0 ]
- main.log.warn( main.CLIs[ node ].apps() )
- main.log.warn( main.CLIs[ node ].appIDs() )
- utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
+ appCheck2 = self.appCheck()
+ utilities.assert_equals( expect=True, actual=appCheck2,
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
@@ -651,17 +677,17 @@
host2 = "00:00:00:00:00:" + \
str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
# NOTE: getHost can return None
- host1Dict = onosCli.getHost( host1 )
- host2Dict = onosCli.getHost( host2 )
+ host1Dict = onosCli.CLI.getHost( host1 )
+ host2Dict = onosCli.CLI.getHost( host2 )
host1Id = None
host2Id = None
if host1Dict and host2Dict:
host1Id = host1Dict.get( 'id', None )
host2Id = host2Dict.get( 'id', None )
if host1Id and host2Id:
- nodeNum = ( i % len( main.activeNodes ) )
- node = main.activeNodes[ nodeNum ]
- tmpId = main.CLIs[ node ].addHostIntent( host1Id, host2Id )
+ nodeNum = len( main.Cluster.active() )
+ ctrl = main.Cluster.active()[ i % nodeNum ]
+ tmpId = ctrl.CLI.addHostIntent( host1Id, host2Id )
if tmpId:
main.log.info( "Added intent with id: " + tmpId )
intentIds.append( tmpId )
@@ -671,16 +697,15 @@
else:
main.log.error( "Error, getHost() failed for h" + str( i ) +
" and/or h" + str( i + 10 ) )
- node = main.activeNodes[ 0 ]
- hosts = main.CLIs[ node ].hosts()
- main.log.warn( "Hosts output: " )
+ hosts = main.Cluster.next().hosts()
try:
- main.log.warn( json.dumps( json.loads( hosts ),
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
+ output = json.dumps( json.loads( hosts ),
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) )
except ( ValueError, TypeError ):
- main.log.warn( repr( hosts ) )
+ output = repr( hosts )
+ main.log.debug( "Hosts output: %s" % output )
hostResult = main.FALSE
utilities.assert_equals( expect=main.TRUE, actual=hostResult,
onpass="Found a host id for each host",
@@ -726,67 +751,7 @@
count += 1
main.log.info( "%-6s%-15s%-15s" %
( str( count ), str( i ), str( s ) ) )
- leaders = onosCli.leaders()
- try:
- missing = False
- if leaders:
- parsedLeaders = json.loads( leaders )
- main.log.warn( json.dumps( parsedLeaders,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # check for all intent partitions
- topics = []
- for i in range( 14 ):
- topics.append( "work-partition-" + str( i ) )
- main.log.debug( topics )
- ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
- for topic in topics:
- if topic not in ONOStopics:
- main.log.error( "Error: " + topic +
- " not in leaders" )
- missing = True
- else:
- main.log.error( "leaders() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing leaders" )
- main.log.error( repr( leaders ) )
- # Check all nodes
- if missing:
- for i in main.activeNodes:
- response = main.CLIs[ i ].leaders( jsonFormat=False )
- main.log.warn( str( main.CLIs[ i ].name ) + " leaders output: \n" +
- str( response ) )
-
- partitions = onosCli.partitions()
- try:
- if partitions:
- parsedPartitions = json.loads( partitions )
- main.log.warn( json.dumps( parsedPartitions,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check for a leader in all paritions
- # TODO check for consistency among nodes
- else:
- main.log.error( "partitions() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing partitions" )
- main.log.error( repr( partitions ) )
- pendingMap = onosCli.pendingMap()
- try:
- if pendingMap:
- parsedPending = json.loads( pendingMap )
- main.log.warn( json.dumps( parsedPending,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check something here?
- else:
- main.log.error( "pendingMap() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing pending map" )
- main.log.error( repr( pendingMap ) )
+ self.commonChecks()
intentAddResult = bool( intentAddResult and not missingIntents and
installedCheck )
@@ -797,18 +762,18 @@
for j in range( 100 ):
correct = True
main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
- for i in main.activeNodes:
+ for ctrl in main.Cluster.active():
onosIds = []
- ids = main.CLIs[ i ].getAllIntentsId()
+ ids = ctrl.getAllIntentsId()
onosIds.append( ids )
- main.log.debug( "Intents in " + main.CLIs[ i ].name + ": " +
+ main.log.debug( "Intents in " + ctrl.name + ": " +
str( sorted( onosIds ) ) )
if sorted( ids ) != sorted( intentIds ):
main.log.warn( "Set of intent IDs doesn't match" )
correct = False
break
else:
- intents = json.loads( main.CLIs[ i ].intents() )
+ intents = json.loads( ctrl.intents() )
for intent in intents:
if intent[ 'state' ] != "INSTALLED":
main.log.warn( "Intent " + intent[ 'id' ] +
@@ -837,7 +802,7 @@
else:
count += 1
gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
- maxGossipTime = gossipPeriod * len( main.activeNodes )
+ maxGossipTime = gossipPeriod * len( main.Cluster.controllers )
utilities.assert_greater_equals(
expect=maxGossipTime, actual=gossipTime,
onpass="ECM anti-entropy for intents worked within " +
@@ -848,6 +813,7 @@
if gossipTime <= maxGossipTime:
intentAddResult = True
+ pendingMap = main.Cluster.next().pendingMap()
if not intentAddResult or "key" in pendingMap:
import time
installedCheck = True
@@ -881,72 +847,11 @@
count += 1
main.log.info( "%-6s%-15s%-15s" %
( str( count ), str( i ), str( s ) ) )
- leaders = onosCli.leaders()
- try:
- missing = False
- if leaders:
- parsedLeaders = json.loads( leaders )
- main.log.warn( json.dumps( parsedLeaders,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # check for all intent partitions
- # check for election
- topics = []
- for i in range( 14 ):
- topics.append( "work-partition-" + str( i ) )
- # FIXME: this should only be after we start the app
- topics.append( "org.onosproject.election" )
- main.log.debug( topics )
- ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
- for topic in topics:
- if topic not in ONOStopics:
- main.log.error( "Error: " + topic +
- " not in leaders" )
- missing = True
- else:
- main.log.error( "leaders() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing leaders" )
- main.log.error( repr( leaders ) )
- # Check all nodes
- if missing:
- for i in main.activeNodes:
- node = main.CLIs[ i ]
- response = node.leaders( jsonFormat=False )
- main.log.warn( str( node.name ) + " leaders output: \n" +
- str( response ) )
+ self.topicsCheck( [ "org.onosproject.election" ] )
+ self.partitionsCheck()
+ self.pendingMapCheck()
- partitions = onosCli.partitions()
- try:
- if partitions:
- parsedPartitions = json.loads( partitions )
- main.log.warn( json.dumps( parsedPartitions,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check for a leader in all paritions
- # TODO check for consistency among nodes
- else:
- main.log.error( "partitions() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing partitions" )
- main.log.error( repr( partitions ) )
- pendingMap = onosCli.pendingMap()
- try:
- if pendingMap:
- parsedPending = json.loads( pendingMap )
- main.log.warn( json.dumps( parsedPending,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check something here?
- else:
- main.log.error( "pendingMap() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing pending map" )
- main.log.error( repr( pendingMap ) )
- def pingAcrossHostIntent( self, main, multiIntentCheck, activateNode ):
+ def pingAcrossHostIntent( self, main ):
"""
Ping across added host intents
"""
@@ -955,21 +860,19 @@
assert main.numCtrls, "main.numCtrls not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
main.case( "Verify connectivity by sending traffic across Intents" )
main.caseExplanation = "Ping across added host intents to check " +\
"functionality and check the state of " +\
"the intent"
- onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
+ onosCli = main.Cluster.next()
main.step( "Check Intent state" )
installedCheck = False
loopCount = 0
while not installedCheck and loopCount < 40:
installedCheck = True
# Print the intent states
- intents = onosCli.intents() if multiIntentCheck else main.ONOScli1.intents()
+ intents = onosCli.intents()
intentStates = []
main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
count = 0
@@ -989,8 +892,6 @@
count += 1
main.log.info( "%-6s%-15s%-15s" %
( str( count ), str( i ), str( s ) ) )
- if not multiIntentCheck:
- break
if not installedCheck:
time.sleep( 1 )
loopCount += 1
@@ -1015,15 +916,15 @@
main.log.error(
"Intents have not been installed correctly, pings failed." )
# TODO: pretty print
- main.log.warn( "ONOS1 intents: " )
try:
tmpIntents = onosCli.intents()
- main.log.warn( json.dumps( json.loads( tmpIntents ),
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
+ output = json.dumps( json.loads( tmpIntents ),
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) )
except ( ValueError, TypeError ):
- main.log.warn( repr( tmpIntents ) )
+ output = repr( tmpIntents )
+ main.log.debug( "ONOS1 intents: " + output )
utilities.assert_equals(
expect=main.TRUE,
actual=PingResult,
@@ -1031,81 +932,12 @@
onfail="Intents have not been installed correctly, pings failed." )
main.step( "Check leadership of topics" )
- leaders = onosCli.leaders()
- topicCheck = main.TRUE
- try:
- if leaders:
- parsedLeaders = json.loads( leaders )
- main.log.warn( json.dumps( parsedLeaders,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # check for all intent partitions
- # check for election
- # TODO: Look at Devices as topics now that it uses this system
- topics = []
- for i in range( 14 ):
- topics.append( "work-partition-" + str( i ) )
- # FIXME: this should only be after we start the app
- # FIXME: topics.append( "org.onosproject.election" )
- # Print leaders output
- main.log.debug( topics )
- ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
- for topic in topics:
- if topic not in ONOStopics:
- main.log.error( "Error: " + topic +
- " not in leaders" )
- topicCheck = main.FALSE
- else:
- main.log.error( "leaders() returned None" )
- topicCheck = main.FALSE
- except ( ValueError, TypeError ):
- topicCheck = main.FALSE
- main.log.exception( "Error parsing leaders" )
- main.log.error( repr( leaders ) )
- # TODO: Check for a leader of these topics
- # Check all nodes
- if topicCheck:
- for i in main.activeNodes:
- node = main.CLIs[ i ]
- response = node.leaders( jsonFormat=False )
- main.log.warn( str( node.name ) + " leaders output: \n" +
- str( response ) )
-
- utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
+ topicsCheck = self.topicsCheck()
+ utilities.assert_equals( expect=False, actual=topicsCheck,
onpass="intent Partitions is in leaders",
- onfail="Some topics were lost " )
- # Print partitions
- partitions = onosCli.partitions()
- try:
- if partitions:
- parsedPartitions = json.loads( partitions )
- main.log.warn( json.dumps( parsedPartitions,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check for a leader in all paritions
- # TODO check for consistency among nodes
- else:
- main.log.error( "partitions() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing partitions" )
- main.log.error( repr( partitions ) )
- # Print Pending Map
- pendingMap = onosCli.pendingMap()
- try:
- if pendingMap:
- parsedPending = json.loads( pendingMap )
- main.log.warn( json.dumps( parsedPending,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check something here?
- else:
- main.log.error( "pendingMap() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing pending map" )
- main.log.error( repr( pendingMap ) )
+ onfail="Some topics were lost" )
+ self.partitionsCheck()
+ self.pendingMapCheck()
if not installedCheck:
main.log.info( "Waiting 60 seconds to see if the state of " +
@@ -1131,72 +963,10 @@
count += 1
main.log.info( "%-6s%-15s%-15s" %
( str( count ), str( i ), str( s ) ) )
- leaders = onosCli.leaders()
- try:
- missing = False
- if leaders:
- parsedLeaders = json.loads( leaders )
- main.log.warn( json.dumps( parsedLeaders,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # check for all intent partitions
- # check for election
- topics = []
- for i in range( 14 ):
- topics.append( "work-partition-" + str( i ) )
- # FIXME: this should only be after we start the app
- topics.append( "org.onosproject.election" )
- main.log.debug( topics )
- ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
- for topic in topics:
- if topic not in ONOStopics:
- main.log.error( "Error: " + topic +
- " not in leaders" )
- missing = True
- else:
- main.log.error( "leaders() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing leaders" )
- main.log.error( repr( leaders ) )
- if missing:
- for i in main.activeNodes:
- node = main.CLIs[ i ]
- response = node.leaders( jsonFormat=False )
- main.log.warn( str( node.name ) + " leaders output: \n" +
- str( response ) )
+ self.commonChecks()
- partitions = onosCli.partitions()
- try:
- if partitions:
- parsedPartitions = json.loads( partitions )
- main.log.warn( json.dumps( parsedPartitions,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check for a leader in all paritions
- # TODO check for consistency among nodes
- else:
- main.log.error( "partitions() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing partitions" )
- main.log.error( repr( partitions ) )
- pendingMap = onosCli.pendingMap()
- try:
- if pendingMap:
- parsedPending = json.loads( pendingMap )
- main.log.warn( json.dumps( parsedPending,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check something here?
- else:
- main.log.error( "pendingMap() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing pending map" )
- main.log.error( repr( pendingMap ) )
# Print flowrules
- main.log.debug( main.CLIs[ main.activeNodes[0] ].flows( jsonFormat=False ) if activateNode else onosCli.flows( jsonFormat=False ) )
+ main.log.debug( onosCli.flows() )
main.step( "Wait a minute then ping again" )
# the wait is above
PingResult = main.TRUE
@@ -1214,7 +984,7 @@
main.log.error(
"Intents have not been installed correctly, pings failed." )
# TODO: pretty print
- main.log.warn( "ONOS1 intents: " )
+ main.log.warn( str( onosCli.name ) + " intents: " )
try:
tmpIntents = onosCli.intents()
main.log.warn( json.dumps( json.loads( tmpIntents ),
@@ -1238,8 +1008,6 @@
assert main.numCtrls, "main.numCtrls not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
try:
from tests.dependencies.topology import Topology
except ImportError:
@@ -1259,45 +1027,23 @@
mastershipState = '[]'
# Assert that each device has a master
- rolesNotNull = main.TRUE
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
- name="rolesNotNull-" + str( i ),
- args=[] )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- rolesNotNull = rolesNotNull and t.result
+ rolesNotNull = all( [ i == main.TRUE for i in main.Cluster.command( "rolesNotNull" ) ] )
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=rolesNotNull,
onpass="Each device has a master",
onfail="Some devices don't have a master assigned" )
main.step( "Get the Mastership of each switch from each controller" )
- ONOSMastership = []
+ ONOSMastership = main.Cluster.command( "roles" )
+ mastershipCheck = main.FALSE
consistentMastership = True
rolesResults = True
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].roles,
- name="roles-" + str( i ),
- args=[] )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- ONOSMastership.append( t.result )
-
for i in range( len( ONOSMastership ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ node = str( main.Cluster.active()[ i ] )
if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
- main.log.error( "Error in getting ONOS" + node + " roles" )
- main.log.warn( "ONOS" + node + " mastership response: " +
+ main.log.error( "Error in getting " + node + " roles" )
+ main.log.warn( node + " mastership response: " +
repr( ONOSMastership[ i ] ) )
rolesResults = False
utilities.assert_equals(
@@ -1319,11 +1065,11 @@
onfail="ONOS nodes have different views of switch roles" )
if rolesResults and not consistentMastership:
- for i in range( len( main.activeNodes ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ for i in range( len( main.Cluster.active() ) ):
+ node = str( main.Cluster.active()[ i ] )
try:
main.log.warn(
- "ONOS" + node + " roles: ",
+ node + " roles: ",
json.dumps(
json.loads( ONOSMastership[ i ] ),
sort_keys=True,
@@ -1332,32 +1078,21 @@
except ( ValueError, TypeError ):
main.log.warn( repr( ONOSMastership[ i ] ) )
elif rolesResults and consistentMastership:
+ mastershipCheck = main.TRUE
mastershipState = ONOSMastership[ 0 ]
main.step( "Get the intents from each controller" )
global intentState
intentState = []
- ONOSIntents = []
- consistentIntents = True # Are Intents consistent across nodes?
- intentsResults = True # Could we read Intents from ONOS?
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].intents,
- name="intents-" + str( i ),
- args=[],
- kwargs={ 'jsonFormat': True } )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- ONOSIntents.append( t.result )
-
+ ONOSIntents = main.Cluster.command( "intents" )
+ intentCheck = main.FALSE
+ consistentIntents = True
+ intentsResults = True
for i in range( len( ONOSIntents ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ node = str( main.Cluster.active()[ i ] )
if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
- main.log.error( "Error in getting ONOS" + node + " intents" )
- main.log.warn( "ONOS" + node + " intents response: " +
+ main.log.error( "Error in getting " + node + " intents" )
+ main.log.warn( node + " intents response: " +
repr( ONOSIntents[ i ] ) )
intentsResults = False
utilities.assert_equals(
@@ -1387,8 +1122,8 @@
# ... ... ...
# ... ... ...
title = " Id"
- for n in main.activeNodes:
- title += " " * 10 + "ONOS" + str( n + 1 )
+ for ctrl in main.Cluster.active():
+ title += " " * 10 + ctrl.name
main.log.warn( title )
# get all intent keys in the cluster
keys = []
@@ -1415,55 +1150,38 @@
if intentsResults and not consistentIntents:
# print the json objects
- n = str( main.activeNodes[ -1 ] + 1 )
- main.log.debug( "ONOS" + n + " intents: " )
+ main.log.debug( ctrl.name + " intents: " )
main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
sort_keys=True,
indent=4,
separators=( ',', ': ' ) ) )
for i in range( len( ONOSIntents ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ node = str( main.Cluster.active()[ i ] )
if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
- main.log.debug( "ONOS" + node + " intents: " )
+ main.log.debug( node + " intents: " )
main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
sort_keys=True,
indent=4,
separators=( ',', ': ' ) ) )
else:
- main.log.debug( "ONOS" + node + " intents match ONOS" +
- n + " intents" )
+ main.log.debug( node + " intents match " + ctrl.name + " intents" )
elif intentsResults and consistentIntents:
+ intentCheck = main.TRUE
intentState = ONOSIntents[ 0 ]
main.step( "Get the flows from each controller" )
global flowState
flowState = []
- ONOSFlows = []
+ ONOSFlows = main.Cluster.command( "flows" ) # TODO: Possible arg: sleep = 30
ONOSFlowsJson = []
flowCheck = main.FALSE
consistentFlows = True
flowsResults = True
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].flows,
- name="flows-" + str( i ),
- args=[],
- kwargs={ 'jsonFormat': True } )
- threads.append( t )
- t.start()
-
- # NOTE: Flows command can take some time to run
- time.sleep( 30 )
- for t in threads:
- t.join()
- result = t.result
- ONOSFlows.append( result )
-
for i in range( len( ONOSFlows ) ):
- num = str( main.activeNodes[ i ] + 1 )
+ node = str( main.Cluster.active()[ i ] )
if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
- main.log.error( "Error in getting ONOS" + num + " flows" )
- main.log.warn( "ONOS" + num + " flows response: " +
+ main.log.error( "Error in getting " + node + " flows" )
+ main.log.warn( node + " flows response: " +
repr( ONOSFlows[ i ] ) )
flowsResults = False
ONOSFlowsJson.append( None )
@@ -1472,7 +1190,7 @@
ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
except ( ValueError, TypeError ):
# FIXME: change this to log.error?
- main.log.exception( "Error in parsing ONOS" + num +
+ main.log.exception( "Error in parsing " + node +
" response as json." )
main.log.error( repr( ONOSFlows[ i ] ) )
ONOSFlowsJson.append( None )
@@ -1497,14 +1215,14 @@
if flowsResults and not consistentFlows:
for i in range( len( ONOSFlows ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ node = str( main.Cluster.active()[ i ] )
try:
main.log.warn(
- "ONOS" + node + " flows: " +
+ node + " flows: " +
json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
indent=4, separators=( ',', ': ' ) ) )
except ( ValueError, TypeError ):
- main.log.warn( "ONOS" + node + " flows: " +
+ main.log.warn( node + " flows: " +
repr( ONOSFlows[ i ] ) )
elif flowsResults and consistentFlows:
flowCheck = main.TRUE
@@ -1563,33 +1281,33 @@
pingTime=500 )
main.step( "Collecting topology information from ONOS" )
- devices = main.topoRelated.getAllDevices( main.activeNodes, False )
- hosts = main.topoRelated.getAllHosts( main.activeNodes, False, inJson=True )
- ports = main.topoRelated.getAllPorts( main.activeNodes, False )
- links = main.topoRelated.getAllLinks( main.activeNodes, False )
- clusters = main.topoRelated.getAllClusters( main.activeNodes, False )
+ devices = main.topoRelated.getAllDevices( main.Cluster.active(), False )
+ hosts = main.topoRelated.getAllHosts( main.Cluster.active(), False, inJson=True )
+ ports = main.topoRelated.getAllPorts( main.Cluster.active(), False )
+ links = main.topoRelated.getAllLinks( main.Cluster.active(), False )
+ clusters = main.topoRelated.getAllClusters( main.Cluster.active(), False )
# Compare json objects for hosts and dataplane clusters
# hosts
main.step( "Host view is consistent across ONOS nodes" )
consistentHostsResult = main.TRUE
for controller in range( len( hosts ) ):
- controllerStr = str( main.activeNodes[ controller ] + 1 )
+ controllerStr = str( main.Cluster.active()[ controller ] )
if hosts[ controller ] and "Error" not in hosts[ controller ]:
if hosts[ controller ] == hosts[ 0 ]:
continue
else: # hosts not consistent
- main.log.error( "hosts from ONOS" +
+ main.log.error( "hosts from " +
controllerStr +
" is inconsistent with ONOS1" )
main.log.warn( repr( hosts[ controller ] ) )
consistentHostsResult = main.FALSE
else:
- main.log.error( "Error in getting ONOS hosts from ONOS" +
+ main.log.error( "Error in getting ONOS hosts from " +
controllerStr )
consistentHostsResult = main.FALSE
- main.log.warn( "ONOS" + controllerStr +
+ main.log.warn( controllerStr +
" hosts response: " +
repr( hosts[ controller ] ) )
utilities.assert_equals(
@@ -1601,11 +1319,11 @@
main.step( "Each host has an IP address" )
ipResult = main.TRUE
for controller in range( 0, len( hosts ) ):
- controllerStr = str( main.activeNodes[ controller ] + 1 )
+ controllerStr = str( main.Cluster.active()[ controller ] )
if hosts[ controller ]:
for host in hosts[ controller ]:
if not host.get( 'ipAddresses', [] ):
- main.log.error( "Error with host ips on controller" +
+ main.log.error( "Error with host ips on " +
controllerStr + ": " + str( host ) )
ipResult = main.FALSE
utilities.assert_equals(
@@ -1618,20 +1336,20 @@
main.step( "Cluster view is consistent across ONOS nodes" )
consistentClustersResult = main.TRUE
for controller in range( len( clusters ) ):
- controllerStr = str( main.activeNodes[ controller ] + 1 )
+ controllerStr = str( main.Cluster.active()[ controller ] )
if "Error" not in clusters[ controller ]:
if clusters[ controller ] == clusters[ 0 ]:
continue
else: # clusters not consistent
- main.log.error( "clusters from ONOS" + controllerStr +
+ main.log.error( "clusters from " + controllerStr +
" is inconsistent with ONOS1" )
consistentClustersResult = main.FALSE
else:
main.log.error( "Error in getting dataplane clusters " +
- "from ONOS" + controllerStr )
+ "from " + controllerStr )
consistentClustersResult = main.FALSE
- main.log.warn( "ONOS" + controllerStr +
+ main.log.warn( controllerStr +
" clusters response: " +
repr( clusters[ controller ] ) )
utilities.assert_equals(
@@ -1663,16 +1381,16 @@
mnSwitches = main.Mininet1.getSwitches()
mnLinks = main.Mininet1.getLinks()
mnHosts = main.Mininet1.getHosts()
- for controller in main.activeNodes:
- controllerStr = str( main.activeNodes[ controller ] + 1 )
+ for controller in range( len( main.Cluster.active() ) ):
+ controllerStr = str( main.Cluster.active()[ controller ] )
currentDevicesResult = main.topoRelated.compareDevicePort(
main.Mininet1, controller,
mnSwitches, devices, ports )
utilities.assert_equals( expect=main.TRUE,
actual=currentDevicesResult,
- onpass="ONOS" + controllerStr +
+ onpass=controllerStr +
" Switches view is correct",
- onfail="ONOS" + controllerStr +
+ onfail=controllerStr +
" Switches view is incorrect" )
currentLinksResult = main.topoRelated.compareBase( links, controller,
@@ -1680,9 +1398,9 @@
[ mnSwitches, mnLinks ] )
utilities.assert_equals( expect=main.TRUE,
actual=currentLinksResult,
- onpass="ONOS" + controllerStr +
+ onpass=controllerStr +
" links view is correct",
- onfail="ONOS" + controllerStr +
+ onfail=controllerStr +
" links view is incorrect" )
if hosts[ controller ] and "Error" not in hosts[ controller ]:
@@ -1693,9 +1411,9 @@
currentHostsResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
actual=currentHostsResult,
- onpass="ONOS" + controllerStr +
+ onpass=controllerStr +
" hosts exist in Mininet",
- onfail="ONOS" + controllerStr +
+ onfail=controllerStr +
" hosts don't match Mininet" )
devicesResults = devicesResults and currentDevicesResult
@@ -1732,8 +1450,6 @@
# Make sure variables are defined/set
assert main.numCtrls, "main.numCtrls not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
assert main.pCounterName, "main.pCounterName not defined"
assert main.onosSetName, "main.onosSetName not defined"
# NOTE: assert fails if value is 0/None/Empty/False
@@ -1766,22 +1482,13 @@
# DISTRIBUTED ATOMIC COUNTERS
# Partitioned counters
main.step( "Increment then get a default counter on each node" )
- pCounters = []
- threads = []
+ pCounters = main.Cluster.command( "counterTestAddAndGet",
+ args=[ main.pCounterName ] )
addedPValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].counterTestAddAndGet,
- name="counterAddAndGet-" + str( i ),
- args=[ main.pCounterName ] )
+ for i in main.Cluster.active():
main.pCounterValue += 1
addedPValues.append( main.pCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- pCounters.append( t.result )
- # Check that counter incremented numController times
+ # Check that counter incremented once per controller
pCounterResults = True
for i in addedPValues:
tmpResult = i in pCounters
@@ -1796,21 +1503,12 @@
" counter" )
main.step( "Get then Increment a default counter on each node" )
- pCounters = []
- threads = []
+ pCounters = main.Cluster.command( "counterTestGetAndAdd",
+ args=[ main.pCounterName ] )
addedPValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].counterTestGetAndAdd,
- name="counterGetAndAdd-" + str( i ),
- args=[ main.pCounterName ] )
+ for i in main.Cluster.active():
addedPValues.append( main.pCounterValue )
main.pCounterValue += 1
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- pCounters.append( t.result )
# Check that counter incremented numController times
pCounterResults = True
for i in addedPValues:
@@ -1833,22 +1531,13 @@
onfail="Added counters are incorrect" )
main.step( "Add -8 to then get a default counter on each node" )
- pCounters = []
- threads = []
+ pCounters = main.Cluster.command( "counterTestAddAndGet",
+ args=[ main.pCounterName ],
+ kwargs={ "delta": -8 } )
addedPValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].counterTestAddAndGet,
- name="counterIncrement-" + str( i ),
- args=[ main.pCounterName ],
- kwargs={ "delta": -8 } )
+ for ctrl in main.Cluster.active():
main.pCounterValue += -8
addedPValues.append( main.pCounterValue )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- pCounters.append( t.result )
# Check that counter incremented numController times
pCounterResults = True
for i in addedPValues:
@@ -1864,22 +1553,14 @@
" counter" )
main.step( "Add 5 to then get a default counter on each node" )
- pCounters = []
- threads = []
+ pCounters = main.Cluster.command( "counterTestAddAndGet",
+ args=[ main.pCounterName ],
+ kwargs={ "delta": 5 } )
addedPValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].counterTestAddAndGet,
- name="counterIncrement-" + str( i ),
- args=[ main.pCounterName ],
- kwargs={ "delta": 5 } )
+ for ctrl in main.Cluster.active():
main.pCounterValue += 5
addedPValues.append( main.pCounterValue )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- pCounters.append( t.result )
# Check that counter incremented numController times
pCounterResults = True
for i in addedPValues:
@@ -1895,22 +1576,13 @@
" counter" )
main.step( "Get then add 5 to a default counter on each node" )
- pCounters = []
- threads = []
+ pCounters = main.Cluster.command( "counterTestGetAndAdd",
+ args=[ main.pCounterName ],
+ kwargs={ "delta": 5 } )
addedPValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].counterTestGetAndAdd,
- name="counterIncrement-" + str( i ),
- args=[ main.pCounterName ],
- kwargs={ "delta": 5 } )
+ for ctrl in main.Cluster.active():
addedPValues.append( main.pCounterValue )
main.pCounterValue += 5
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- pCounters.append( t.result )
# Check that counter incremented numController times
pCounterResults = True
for i in addedPValues:
@@ -1935,27 +1607,17 @@
# DISTRIBUTED SETS
main.step( "Distributed Set get" )
size = len( main.onosSet )
- getResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestGet,
- name="setTestGet-" + str( i ),
- args=[ main.onosSetName ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- getResponses.append( t.result )
-
+ getResponses = main.Cluster.command( "setTestGet",
+ args=[ main.onosSetName ] )
getResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ for i in range( len( main.Cluster.active() ) ):
+ node = main.Cluster.active()[ i ]
if isinstance( getResponses[ i ], list ):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if main.onosSet != current:
- main.log.error( "ONOS" + node +
+ main.log.error( node +
" has incorrect view" +
" of set " + main.onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -1964,7 +1626,7 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + node +
+ main.log.error( node +
" has repeat elements in" +
" set " + main.onosSetName + ":\n" +
str( getResponses[ i ] ) )
@@ -1977,24 +1639,14 @@
onfail="Set elements are incorrect" )
main.step( "Distributed Set size" )
- sizeResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestSize,
- name="setTestSize-" + str( i ),
- args=[ main.onosSetName ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- sizeResponses.append( t.result )
-
+ sizeResponses = main.Cluster.command( "setTestSize",
+ args=[ main.onosSetName ] )
sizeResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ for i in range( len( main.Cluster.active() ) ):
+ node = main.Cluster.active()[ i ]
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + node +
+ main.log.error( node +
" expected a size of " + str( size ) +
" for set " + main.onosSetName +
" but got " + str( sizeResponses[ i ] ) )
@@ -2005,23 +1657,13 @@
main.step( "Distributed Set add()" )
main.onosSet.add( addValue )
- addResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestAdd,
- name="setTestAdd-" + str( i ),
- args=[ main.onosSetName, addValue ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- addResponses.append( t.result )
-
+ addResponses = main.Cluster.command( "setTestAdd",
+ args=[ main.onosSetName, addValue ] )
# main.TRUE = successfully changed the set
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
addResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
+ for i in range( len( main.Cluster.active() ) ):
if addResponses[ i ] == main.TRUE:
# All is well
pass
@@ -2039,26 +1681,17 @@
# Check if set is still correct
size = len( main.onosSet )
- getResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestGet,
- name="setTestGet-" + str( i ),
- args=[ main.onosSetName ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- getResponses.append( t.result )
+ getResponses = main.Cluster.command( "setTestGet",
+ args=[ main.onosSetName ] )
getResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ for i in range( len( main.Cluster.active() ) ):
+ node = main.Cluster.active()[ i ]
if isinstance( getResponses[ i ], list ):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if main.onosSet != current:
- main.log.error( "ONOS" + node + " has incorrect view" +
+ main.log.error( node + " has incorrect view" +
" of set " + main.onosSetName + ":\n" +
str( getResponses[ i ] ) )
main.log.debug( "Expected: " + str( main.onosSet ) )
@@ -2066,31 +1699,21 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + node + " has repeat elements in" +
+ main.log.error( node + " has repeat elements in" +
" set " + main.onosSetName + ":\n" +
str( getResponses[ i ] ) )
getResults = main.FALSE
elif getResponses[ i ] == main.ERROR:
getResults = main.FALSE
- sizeResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestSize,
- name="setTestSize-" + str( i ),
- args=[ main.onosSetName ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- sizeResponses.append( t.result )
+ sizeResponses = main.Cluster.command( "setTestSize",
+ args=[ main.onosSetName ] )
sizeResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ for i in range( len( main.Cluster.active() ) ):
+ node = main.Cluster.active()[ i ]
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + node +
- " expected a size of " + str( size ) +
- " for set " + main.onosSetName +
+ main.log.error( node + " expected a size of " +
+ str( size ) + " for set " + main.onosSetName +
" but got " + str( sizeResponses[ i ] ) )
addResults = addResults and getResults and sizeResults
utilities.assert_equals( expect=main.TRUE,
@@ -2100,23 +1723,13 @@
main.step( "Distributed Set addAll()" )
main.onosSet.update( addAllValue.split() )
- addResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestAdd,
- name="setTestAddAll-" + str( i ),
- args=[ main.onosSetName, addAllValue ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- addResponses.append( t.result )
-
+ addResponses = main.Cluster.command( "setTestAdd",
+ args=[ main.onosSetName, addAllValue ] )
# main.TRUE = successfully changed the set
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
addAllResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
+ for i in range( len( main.Cluster.active() ) ):
if addResponses[ i ] == main.TRUE:
# All is well
pass
@@ -2134,27 +1747,17 @@
# Check if set is still correct
size = len( main.onosSet )
- getResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestGet,
- name="setTestGet-" + str( i ),
- args=[ main.onosSetName ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- getResponses.append( t.result )
+ getResponses = main.Cluster.command( "setTestGet",
+ args=[ main.onosSetName ] )
getResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ for i in range( len( main.Cluster.active() ) ):
+ node = main.Cluster.active()[ i ]
if isinstance( getResponses[ i ], list ):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if main.onosSet != current:
- main.log.error( "ONOS" + node +
- " has incorrect view" +
+ main.log.error( node + " has incorrect view" +
" of set " + main.onosSetName + ":\n" +
str( getResponses[ i ] ) )
main.log.debug( "Expected: " + str( main.onosSet ) )
@@ -2162,31 +1765,20 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + node +
- " has repeat elements in" +
+ main.log.error( node + " has repeat elements in" +
" set " + main.onosSetName + ":\n" +
str( getResponses[ i ] ) )
getResults = main.FALSE
elif getResponses[ i ] == main.ERROR:
getResults = main.FALSE
- sizeResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestSize,
- name="setTestSize-" + str( i ),
- args=[ main.onosSetName ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- sizeResponses.append( t.result )
+ sizeResponses = main.Cluster.command( "setTestSize",
+ args=[ main.onosSetName ] )
sizeResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ for i in range( len( main.Cluster.active() ) ):
+ node = main.Cluster.active()[ i ]
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + node +
- " expected a size of " + str( size ) +
+ main.log.error( node + " expected a size of " + str( size ) +
" for set " + main.onosSetName +
" but got " + str( sizeResponses[ i ] ) )
addAllResults = addAllResults and getResults and sizeResults
@@ -2196,22 +1788,11 @@
onfail="Set addAll was incorrect" )
main.step( "Distributed Set contains()" )
- containsResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestGet,
- name="setContains-" + str( i ),
- args=[ main.onosSetName ],
- kwargs={ "values": addValue } )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- # NOTE: This is the tuple
- containsResponses.append( t.result )
-
+ containsResponses = main.Cluster.command( "setTestGet",
+ args=[ main.onosSetName ],
+ kwargs={ "values": addValue } )
containsResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
+ for i in range( len( main.Cluster.active() ) ):
if containsResponses[ i ] == main.ERROR:
containsResults = main.FALSE
else:
@@ -2223,22 +1804,11 @@
onfail="Set contains failed" )
main.step( "Distributed Set containsAll()" )
- containsAllResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestGet,
- name="setContainsAll-" + str( i ),
- args=[ main.onosSetName ],
- kwargs={ "values": addAllValue } )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- # NOTE: This is the tuple
- containsAllResponses.append( t.result )
-
+ containsAllResponses = main.Cluster.command( "setTestGet",
+ args=[ main.onosSetName ],
+ kwargs={ "values": addAllValue } )
containsAllResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
+ for i in range( len( main.Cluster.active() ) ):
if containsResponses[ i ] == main.ERROR:
containsResults = main.FALSE
else:
@@ -2251,23 +1821,13 @@
main.step( "Distributed Set remove()" )
main.onosSet.remove( addValue )
- removeResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestRemove,
- name="setTestRemove-" + str( i ),
- args=[ main.onosSetName, addValue ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- removeResponses.append( t.result )
-
+ removeResponses = main.Cluster.command( "setTestRemove",
+ args=[ main.onosSetName, addValue ] )
# main.TRUE = successfully changed the set
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
removeResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
+ for i in range( len( main.Cluster.active() ) ):
if removeResponses[ i ] == main.TRUE:
# All is well
pass
@@ -2285,27 +1845,17 @@
# Check if set is still correct
size = len( main.onosSet )
- getResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestGet,
- name="setTestGet-" + str( i ),
- args=[ main.onosSetName ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- getResponses.append( t.result )
+ getResponses = main.Cluster.command( "setTestGet",
+ args=[ main.onosSetName ] )
getResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ for i in range( len( main.Cluster.active() ) ):
+ node = main.Cluster.active()[ i ]
if isinstance( getResponses[ i ], list ):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if main.onosSet != current:
- main.log.error( "ONOS" + node +
- " has incorrect view" +
+ main.log.error( node + " has incorrect view" +
" of set " + main.onosSetName + ":\n" +
str( getResponses[ i ] ) )
main.log.debug( "Expected: " + str( main.onosSet ) )
@@ -2313,31 +1863,20 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + node +
- " has repeat elements in" +
+ main.log.error( node + " has repeat elements in" +
" set " + main.onosSetName + ":\n" +
str( getResponses[ i ] ) )
getResults = main.FALSE
elif getResponses[ i ] == main.ERROR:
getResults = main.FALSE
- sizeResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestSize,
- name="setTestSize-" + str( i ),
- args=[ main.onosSetName ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- sizeResponses.append( t.result )
+ sizeResponses = main.Cluster.command( "setTestSize",
+ args=[ main.onosSetName ] )
sizeResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ for i in range( len( main.Cluster.active() ) ):
+ node = main.Cluster.active()[ i ]
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + node +
- " expected a size of " + str( size ) +
+ main.log.error( node + " expected a size of " + str( size ) +
" for set " + main.onosSetName +
" but got " + str( sizeResponses[ i ] ) )
removeResults = removeResults and getResults and sizeResults
@@ -2348,26 +1887,13 @@
main.step( "Distributed Set removeAll()" )
main.onosSet.difference_update( addAllValue.split() )
- removeAllResponses = []
- threads = []
- try:
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestRemove,
- name="setTestRemoveAll-" + str( i ),
- args=[ main.onosSetName, addAllValue ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- removeAllResponses.append( t.result )
- except Exception as e:
- main.log.exception( e )
-
+ removeAllResponses = main.Cluster.command( "setTestRemove",
+ args=[ main.onosSetName, addAllValue ] )
# main.TRUE = successfully changed the set
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
removeAllResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
+ for i in range( len( main.Cluster.active() ) ):
if removeAllResponses[ i ] == main.TRUE:
# All is well
pass
@@ -2385,27 +1911,17 @@
# Check if set is still correct
size = len( main.onosSet )
- getResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestGet,
- name="setTestGet-" + str( i ),
- args=[ main.onosSetName ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- getResponses.append( t.result )
+ getResponses = main.Cluster.command( "setTestGet",
+ args=[ main.onosSetName ] )
getResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ for i in range( len( main.Cluster.active() ) ):
+ node = main.Cluster.active()[ i ]
if isinstance( getResponses[ i ], list ):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if main.onosSet != current:
- main.log.error( "ONOS" + node +
- " has incorrect view" +
+ main.log.error( node + " has incorrect view" +
" of set " + main.onosSetName + ":\n" +
str( getResponses[ i ] ) )
main.log.debug( "Expected: " + str( main.onosSet ) )
@@ -2413,31 +1929,20 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + node +
- " has repeat elements in" +
+ main.log.error( node + " has repeat elements in" +
" set " + main.onosSetName + ":\n" +
str( getResponses[ i ] ) )
getResults = main.FALSE
elif getResponses[ i ] == main.ERROR:
getResults = main.FALSE
- sizeResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestSize,
- name="setTestSize-" + str( i ),
- args=[ main.onosSetName ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- sizeResponses.append( t.result )
+ sizeResponses = main.Cluster.command( "setTestSize",
+ args=[ main.onosSetName ] )
sizeResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ for i in range( len( main.Cluster.active() ) ):
+ node = main.Cluster.active()[ i ]
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + node +
- " expected a size of " + str( size ) +
+ main.log.error( node + " expected a size of " + str( size ) +
" for set " + main.onosSetName +
" but got " + str( sizeResponses[ i ] ) )
removeAllResults = removeAllResults and getResults and sizeResults
@@ -2448,23 +1953,13 @@
main.step( "Distributed Set addAll()" )
main.onosSet.update( addAllValue.split() )
- addResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestAdd,
- name="setTestAddAll-" + str( i ),
- args=[ main.onosSetName, addAllValue ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- addResponses.append( t.result )
-
+ addResponses = main.Cluster.command( "setTestAdd",
+ args=[ main.onosSetName, addAllValue ] )
# main.TRUE = successfully changed the set
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
addAllResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
+ for i in range( len( main.Cluster.active() ) ):
if addResponses[ i ] == main.TRUE:
# All is well
pass
@@ -2482,27 +1977,17 @@
# Check if set is still correct
size = len( main.onosSet )
- getResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestGet,
- name="setTestGet-" + str( i ),
- args=[ main.onosSetName ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- getResponses.append( t.result )
+ getResponses = main.Cluster.command( "setTestGet",
+ args=[ main.onosSetName ] )
getResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ for i in range( len( main.Cluster.active() ) ):
+ node = main.Cluster.active()[ i ]
if isinstance( getResponses[ i ], list ):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if main.onosSet != current:
- main.log.error( "ONOS" + node +
- " has incorrect view" +
+ main.log.error( node + " has incorrect view" +
" of set " + main.onosSetName + ":\n" +
str( getResponses[ i ] ) )
main.log.debug( "Expected: " + str( main.onosSet ) )
@@ -2510,31 +1995,20 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + node +
- " has repeat elements in" +
+ main.log.error( node + " has repeat elements in" +
" set " + main.onosSetName + ":\n" +
str( getResponses[ i ] ) )
getResults = main.FALSE
elif getResponses[ i ] == main.ERROR:
getResults = main.FALSE
- sizeResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestSize,
- name="setTestSize-" + str( i ),
- args=[ main.onosSetName ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- sizeResponses.append( t.result )
+ sizeResponses = main.Cluster.command( "setTestSize",
+ args=[ main.onosSetName ] )
sizeResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ for i in range( len( main.Cluster.active() ) ):
+ node = main.Cluster.active()[ i ]
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + node +
- " expected a size of " + str( size ) +
+ main.log.error( node + " expected a size of " + str( size ) +
" for set " + main.onosSetName +
" but got " + str( sizeResponses[ i ] ) )
addAllResults = addAllResults and getResults and sizeResults
@@ -2545,24 +2019,14 @@
main.step( "Distributed Set clear()" )
main.onosSet.clear()
- clearResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestRemove,
- name="setTestClear-" + str( i ),
- args=[ main.onosSetName, " " ], # Values doesn't matter
- kwargs={ "clear": True } )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- clearResponses.append( t.result )
-
+ clearResponses = main.Cluster.command( "setTestRemove",
+ args=[ main.onosSetName, " " ], # Values doesn't matter
+ kwargs={ "clear": True } )
# main.TRUE = successfully changed the set
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
clearResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
+ for i in range( len( main.Cluster.active() ) ):
if clearResponses[ i ] == main.TRUE:
# All is well
pass
@@ -2580,27 +2044,17 @@
# Check if set is still correct
size = len( main.onosSet )
- getResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestGet,
- name="setTestGet-" + str( i ),
- args=[ main.onosSetName ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- getResponses.append( t.result )
+ getResponses = main.Cluster.command( "setTestGet",
+ args=[ main.onosSetName ] )
getResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ for i in range( len( main.Cluster.active() ) ):
+ node = main.Cluster.active()[ i ]
if isinstance( getResponses[ i ], list ):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if main.onosSet != current:
- main.log.error( "ONOS" + node +
- " has incorrect view" +
+ main.log.error( node + " has incorrect view" +
" of set " + main.onosSetName + ":\n" +
str( getResponses[ i ] ) )
main.log.debug( "Expected: " + str( main.onosSet ) )
@@ -2608,31 +2062,20 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + node +
- " has repeat elements in" +
+ main.log.error( node + " has repeat elements in" +
" set " + main.onosSetName + ":\n" +
str( getResponses[ i ] ) )
getResults = main.FALSE
elif getResponses[ i ] == main.ERROR:
getResults = main.FALSE
- sizeResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestSize,
- name="setTestSize-" + str( i ),
- args=[ main.onosSetName ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- sizeResponses.append( t.result )
+ sizeResponses = main.Cluster.command( "setTestSize",
+ args=[ main.onosSetName ] )
sizeResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ for i in range( len( main.Cluster.active() ) ):
+ node = main.Cluster.active()[ i ]
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + node +
- " expected a size of " + str( size ) +
+ main.log.error( node + " expected a size of " + str( size ) +
" for set " + main.onosSetName +
" but got " + str( sizeResponses[ i ] ) )
clearResults = clearResults and getResults and sizeResults
@@ -2643,23 +2086,13 @@
main.step( "Distributed Set addAll()" )
main.onosSet.update( addAllValue.split() )
- addResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestAdd,
- name="setTestAddAll-" + str( i ),
- args=[ main.onosSetName, addAllValue ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- addResponses.append( t.result )
-
+ addResponses = main.Cluster.command( "setTestAdd",
+ args=[ main.onosSetName, addAllValue ] )
# main.TRUE = successfully changed the set
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
addAllResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
+ for i in range( len( main.Cluster.active() ) ):
if addResponses[ i ] == main.TRUE:
# All is well
pass
@@ -2677,27 +2110,17 @@
# Check if set is still correct
size = len( main.onosSet )
- getResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestGet,
- name="setTestGet-" + str( i ),
- args=[ main.onosSetName ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- getResponses.append( t.result )
+ getResponses = main.Cluster.command( "setTestGet",
+ args=[ main.onosSetName ] )
getResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ for i in range( len( main.Cluster.active() ) ):
+ node = main.Cluster.active()[ i ]
if isinstance( getResponses[ i ], list ):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if main.onosSet != current:
- main.log.error( "ONOS" + node +
- " has incorrect view" +
+ main.log.error( node + " has incorrect view" +
" of set " + main.onosSetName + ":\n" +
str( getResponses[ i ] ) )
main.log.debug( "Expected: " + str( main.onosSet ) )
@@ -2705,31 +2128,20 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + node +
- " has repeat elements in" +
+ main.log.error( node + " has repeat elements in" +
" set " + main.onosSetName + ":\n" +
str( getResponses[ i ] ) )
getResults = main.FALSE
elif getResponses[ i ] == main.ERROR:
getResults = main.FALSE
- sizeResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestSize,
- name="setTestSize-" + str( i ),
- args=[ main.onosSetName ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- sizeResponses.append( t.result )
+ sizeResponses = main.Cluster.command( "setTestSize",
+ args=[ main.onosSetName ] )
sizeResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ for i in range( len( main.Cluster.active() ) ):
+ node = main.Cluster.active()[ i ]
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + node +
- " expected a size of " + str( size ) +
+ main.log.error( node + " expected a size of " + str( size ) +
" for set " + main.onosSetName +
" but got " + str( sizeResponses[ i ] ) )
addAllResults = addAllResults and getResults and sizeResults
@@ -2740,24 +2152,14 @@
main.step( "Distributed Set retain()" )
main.onosSet.intersection_update( retainValue.split() )
- retainResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestRemove,
- name="setTestRetain-" + str( i ),
- args=[ main.onosSetName, retainValue ],
- kwargs={ "retain": True } )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- retainResponses.append( t.result )
-
+ retainResponses = main.Cluster.command( "setTestRemove",
+ args=[ main.onosSetName, retainValue ],
+ kwargs={ "retain": True } )
# main.TRUE = successfully changed the set
# main.FALSE = action resulted in no change in set
# main.ERROR - Some error in executing the function
retainResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
+ for i in range( len( main.Cluster.active() ) ):
if retainResponses[ i ] == main.TRUE:
# All is well
pass
@@ -2775,27 +2177,17 @@
# Check if set is still correct
size = len( main.onosSet )
- getResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestGet,
- name="setTestGet-" + str( i ),
- args=[ main.onosSetName ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- getResponses.append( t.result )
+ getResponses = main.Cluster.command( "setTestGet",
+ args=[ main.onosSetName ] )
getResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ for i in range( len( main.Cluster.active() ) ):
+ node = main.Cluster.active()[ i ]
if isinstance( getResponses[ i ], list ):
current = set( getResponses[ i ] )
if len( current ) == len( getResponses[ i ] ):
# no repeats
if main.onosSet != current:
- main.log.error( "ONOS" + node +
- " has incorrect view" +
+ main.log.error( node + " has incorrect view" +
" of set " + main.onosSetName + ":\n" +
str( getResponses[ i ] ) )
main.log.debug( "Expected: " + str( main.onosSet ) )
@@ -2803,30 +2195,20 @@
getResults = main.FALSE
else:
# error, set is not a set
- main.log.error( "ONOS" + node +
- " has repeat elements in" +
+ main.log.error( node + " has repeat elements in" +
" set " + main.onosSetName + ":\n" +
str( getResponses[ i ] ) )
getResults = main.FALSE
elif getResponses[ i ] == main.ERROR:
getResults = main.FALSE
- sizeResponses = []
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].setTestSize,
- name="setTestSize-" + str( i ),
- args=[ main.onosSetName ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- sizeResponses.append( t.result )
+ sizeResponses = main.Cluster.command( "setTestSize",
+ args=[ main.onosSetName ] )
sizeResults = main.TRUE
- for i in range( len( main.activeNodes ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ for i in range( len( main.Cluster.active() ) ):
+ node = main.Cluster.active()[ i ]
if size != sizeResponses[ i ]:
sizeResults = main.FALSE
- main.log.error( "ONOS" + node + " expected a size of " +
+ main.log.error( node + " expected a size of " +
str( size ) + " for set " + main.onosSetName +
" but got " + str( sizeResponses[ i ] ) )
retainResults = retainResults and getResults and sizeResults
@@ -2840,8 +2222,8 @@
tMapValue = "Testing"
numKeys = 100
putResult = True
- node = main.activeNodes[ 0 ]
- putResponses = main.CLIs[ node ].transactionalMapPut( numKeys, tMapValue )
+ ctrl = main.Cluster.next()
+ putResponses = ctrl.transactionalMapPut( numKeys, tMapValue )
if putResponses and len( putResponses ) == 100:
for i in putResponses:
if putResponses[ i ][ 'value' ] != tMapValue:
@@ -2861,18 +2243,9 @@
getCheck = True
for n in range( 1, numKeys + 1 ):
- getResponses = []
- threads = []
+ getResponses = main.Cluster.command( "transactionalMapGet",
+ args=[ "Key" + str( n ) ] )
valueCheck = True
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].transactionalMapGet,
- name="TMap-get-" + str( i ),
- args=[ "Key" + str( n ) ] )
- threads.append( t )
- t.start()
- for t in threads:
- t.join()
- getResponses.append( t.result )
for node in getResponses:
if node != tMapValue:
valueCheck = False
@@ -2887,18 +2260,8 @@
# DISTRIBUTED ATOMIC VALUE
main.step( "Get the value of a new value" )
- threads = []
- getValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].valueTestGet,
- name="ValueGet-" + str( i ),
- args=[ valueName ] )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- getValues.append( t.result )
+ getValues = main.Cluster.command( "valueTestGet",
+ args=[ valueName ] )
main.log.debug( getValues )
# Check the results
atomicValueGetResult = True
@@ -2916,18 +2279,8 @@
main.step( "Atomic Value set()" )
valueValue = "foo"
- threads = []
- setValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].valueTestSet,
- name="ValueSet-" + str( i ),
- args=[ valueName, valueValue ] )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- setValues.append( t.result )
+ setValues = main.Cluster.command( "valueTestSet",
+ args=[ valueName, valueValue ] )
main.log.debug( setValues )
# Check the results
atomicValueSetResults = True
@@ -2941,18 +2294,8 @@
str( setValues ) )
main.step( "Get the value after set()" )
- threads = []
- getValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].valueTestGet,
- name="ValueGet-" + str( i ),
- args=[ valueName ] )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- getValues.append( t.result )
+ getValues = main.Cluster.command( "valueTestGet",
+ args=[ valueName ] )
main.log.debug( getValues )
# Check the results
atomicValueGetResult = True
@@ -2971,8 +2314,8 @@
main.step( "Atomic Value compareAndSet()" )
oldValue = valueValue
valueValue = "bar"
- i = main.activeNodes[ 0 ]
- CASValue = main.CLIs[ i ].valueTestCompareAndSet( valueName, oldValue, valueValue )
+ ctrl = main.Cluster.next()
+ CASValue = ctrl.valueTestCompareAndSet( valueName, oldValue, valueValue )
main.log.debug( CASValue )
utilities.assert_equals( expect=main.TRUE,
actual=CASValue,
@@ -2981,18 +2324,8 @@
str( CASValue ) )
main.step( "Get the value after compareAndSet()" )
- threads = []
- getValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].valueTestGet,
- name="ValueGet-" + str( i ),
- args=[ valueName ] )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- getValues.append( t.result )
+ getValues = main.Cluster.command( "valueTestGet",
+ args=[ valueName ] )
main.log.debug( getValues )
# Check the results
atomicValueGetResult = True
@@ -3011,8 +2344,8 @@
main.step( "Atomic Value getAndSet()" )
oldValue = valueValue
valueValue = "baz"
- i = main.activeNodes[ 0 ]
- GASValue = main.CLIs[ i ].valueTestGetAndSet( valueName, valueValue )
+ ctrl = main.Cluster.next()
+ GASValue = ctrl.valueTestGetAndSet( valueName, valueValue )
main.log.debug( GASValue )
expected = oldValue if oldValue is not None else "null"
utilities.assert_equals( expect=expected,
@@ -3023,18 +2356,8 @@
str( GASValue ) )
main.step( "Get the value after getAndSet()" )
- threads = []
- getValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].valueTestGet,
- name="ValueGet-" + str( i ),
- args=[ valueName ] )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- getValues.append( t.result )
+ getValues = main.Cluster.command( "valueTestGet",
+ args=[ valueName ] )
main.log.debug( getValues )
# Check the results
atomicValueGetResult = True
@@ -3052,9 +2375,8 @@
main.step( "Atomic Value destory()" )
valueValue = None
- threads = []
- i = main.activeNodes[ 0 ]
- destroyResult = main.CLIs[ i ].valueTestDestroy( valueName )
+ ctrl = main.Cluster.next()
+ destroyResult = ctrl.valueTestDestroy( valueName )
main.log.debug( destroyResult )
# Check the results
utilities.assert_equals( expect=main.TRUE,
@@ -3063,18 +2385,8 @@
onfail="Error destroying atomic Value" )
main.step( "Get the value after destroy()" )
- threads = []
- getValues = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].valueTestGet,
- name="ValueGet-" + str( i ),
- args=[ valueName ] )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- getValues.append( t.result )
+ getValues = main.Cluster.command( "valueTestGet",
+ args=[ valueName ] )
main.log.debug( getValues )
# Check the results
atomicValueGetResult = True
@@ -3092,9 +2404,8 @@
# WORK QUEUES
main.step( "Work Queue add()" )
- threads = []
- i = main.activeNodes[ 0 ]
- addResult = main.CLIs[ i ].workQueueAdd( workQueueName, 'foo' )
+ ctrl = main.Cluster.next()
+ addResult = ctrl.workQueueAdd( workQueueName, 'foo' )
workQueuePending += 1
main.log.debug( addResult )
# Check the results
@@ -3114,9 +2425,8 @@
onfail="Work Queue stats incorrect " )
main.step( "Work Queue addMultiple()" )
- threads = []
- i = main.activeNodes[ 0 ]
- addMultipleResult = main.CLIs[ i ].workQueueAddMultiple( workQueueName, 'bar', 'baz' )
+ ctrl = main.Cluster.next()
+ addMultipleResult = ctrl.workQueueAddMultiple( workQueueName, 'bar', 'baz' )
workQueuePending += 2
main.log.debug( addMultipleResult )
# Check the results
@@ -3136,10 +2446,9 @@
onfail="Work Queue stats incorrect " )
main.step( "Work Queue takeAndComplete() 1" )
- threads = []
- i = main.activeNodes[ 0 ]
+ ctrl = main.Cluster.next()
number = 1
- take1Result = main.CLIs[ i ].workQueueTakeAndComplete( workQueueName, number )
+ take1Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
workQueuePending -= number
workQueueCompleted += number
main.log.debug( take1Result )
@@ -3160,10 +2469,9 @@
onfail="Work Queue stats incorrect " )
main.step( "Work Queue takeAndComplete() 2" )
- threads = []
- i = main.activeNodes[ 0 ]
+ ctrl = main.Cluster.next()
number = 2
- take2Result = main.CLIs[ i ].workQueueTakeAndComplete( workQueueName, number )
+ take2Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
workQueuePending -= number
workQueueCompleted += number
main.log.debug( take2Result )
@@ -3186,8 +2494,8 @@
main.step( "Work Queue destroy()" )
valueValue = None
threads = []
- i = main.activeNodes[ 0 ]
- destroyResult = main.CLIs[ i ].workQueueDestroy( workQueueName )
+ ctrl = main.Cluster.next()
+ destroyResult = ctrl.workQueueDestroy( workQueueName )
workQueueCompleted = 0
workQueueInProgress = 0
workQueuePending = 0
@@ -3219,8 +2527,6 @@
assert main.numCtrls, "main.numCtrls not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
# printing colors to terminal
colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
@@ -3245,9 +2551,9 @@
logFiles = [ "karaf.log", "karaf.log.1" ]
# NOTE: must end in /
for f in logFiles:
- for node in main.nodes:
- dstName = main.logdir + "/" + node.name + "-" + f
- main.ONOSbench.secureCopy( node.user_name, node.ip_address,
+ for ctrl in main.Cluster.controllers:
+ dstName = main.logdir + "/" + ctrl.name + "-" + f
+ main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
logFolder + f, dstName )
# std*.log's
# NOTE: must end in /
@@ -3255,9 +2561,9 @@
logFiles = [ "stderr.log", "stdout.log" ]
# NOTE: must end in /
for f in logFiles:
- for node in main.nodes:
- dstName = main.logdir + "/" + node.name + "-" + f
- main.ONOSbench.secureCopy( node.user_name, node.ip_address,
+ for ctrl in main.Cluster.controllers:
+ dstName = main.logdir + "/" + ctrl.name + "-" + f
+ main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
logFolder + f, dstName )
else:
main.log.debug( "skipping saving log files" )
@@ -3269,9 +2575,9 @@
onfail="MN cleanup NOT successful" )
main.step( "Checking ONOS Logs for errors" )
- for node in main.nodes:
- main.log.debug( "Checking logs for errors on " + node.name + ":" )
- main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
+ for ctrl in main.Cluster.controllers:
+ main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
+ main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
try:
timerLog = open( main.logdir + "/Timers.csv", 'w' )
@@ -3280,6 +2586,7 @@
timerLog.close()
except NameError as e:
main.log.exception( e )
+
def assignMastership( self, main ):
"""
Assign mastership to controllers
@@ -3288,8 +2595,6 @@
assert main.numCtrls, "main.numCtrls not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
main.case( "Assigning Controller roles for switches" )
main.caseExplanation = "Check that ONOS is connected to each " +\
@@ -3302,7 +2607,7 @@
ipList = []
deviceList = []
- onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
+ onosCli = main.Cluster.next()
try:
# Assign mastership to specific controllers. This assignment was
# determined for a 7 node cluser, but will work with any sized
@@ -3311,45 +2616,45 @@
# set up correct variables:
if i == 1:
c = 0
- ip = main.nodes[ c ].ip_address # ONOS1
+ ip = main.Cluster.active()[ c ].ip_address # ONOS1
deviceId = onosCli.getDevice( "1000" ).get( 'id' )
elif i == 2:
c = 1 % main.numCtrls
- ip = main.nodes[ c ].ip_address # ONOS2
+ ip = main.Cluster.active()[ c ].ip_address # ONOS2
deviceId = onosCli.getDevice( "2000" ).get( 'id' )
elif i == 3:
c = 1 % main.numCtrls
- ip = main.nodes[ c ].ip_address # ONOS2
+ ip = main.Cluster.active()[ c ].ip_address # ONOS2
deviceId = onosCli.getDevice( "3000" ).get( 'id' )
elif i == 4:
c = 3 % main.numCtrls
- ip = main.nodes[ c ].ip_address # ONOS4
+ ip = main.Cluster.active()[ c ].ip_address # ONOS4
deviceId = onosCli.getDevice( "3004" ).get( 'id' )
elif i == 5:
c = 2 % main.numCtrls
- ip = main.nodes[ c ].ip_address # ONOS3
+ ip = main.Cluster.active()[ c ].ip_address # ONOS3
deviceId = onosCli.getDevice( "5000" ).get( 'id' )
elif i == 6:
c = 2 % main.numCtrls
- ip = main.nodes[ c ].ip_address # ONOS3
+ ip = main.Cluster.active()[ c ].ip_address # ONOS3
deviceId = onosCli.getDevice( "6000" ).get( 'id' )
elif i == 7:
c = 5 % main.numCtrls
- ip = main.nodes[ c ].ip_address # ONOS6
+ ip = main.Cluster.active()[ c ].ip_address # ONOS6
deviceId = onosCli.getDevice( "6007" ).get( 'id' )
elif i >= 8 and i <= 17:
c = 4 % main.numCtrls
- ip = main.nodes[ c ].ip_address # ONOS5
+ ip = main.Cluster.active()[ c ].ip_address # ONOS5
dpid = '3' + str( i ).zfill( 3 )
deviceId = onosCli.getDevice( dpid ).get( 'id' )
elif i >= 18 and i <= 27:
c = 6 % main.numCtrls
- ip = main.nodes[ c ].ip_address # ONOS7
+ ip = main.Cluster.active()[ c ].ip_address # ONOS7
dpid = '6' + str( i ).zfill( 3 )
deviceId = onosCli.getDevice( dpid ).get( 'id' )
elif i == 28:
c = 0
- ip = main.nodes[ c ].ip_address # ONOS1
+ ip = main.Cluster.active()[ c ].ip_address # ONOS1
deviceId = onosCli.getDevice( "2800" ).get( 'id' )
else:
main.log.error( "You didn't write an else statement for " +
@@ -3394,6 +2699,7 @@
onpass="Switches were successfully reassigned to designated " +
"controller",
onfail="Switches were not successfully reassigned" )
+
def bringUpStoppedNode( self, main ):
"""
The bring up stopped nodes
@@ -3402,17 +2708,15 @@
assert main.numCtrls, "main.numCtrls not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
assert main.kill, "main.kill not defined"
main.case( "Restart minority of ONOS nodes" )
main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
startResults = main.TRUE
restartTime = time.time()
- for i in main.kill:
+ for ctrl in main.kill:
startResults = startResults and\
- main.ONOSbench.onosStart( main.nodes[ i ].ip_address )
+ ctrl.onosStart( ctrl.ipAddress )
utilities.assert_equals( expect=main.TRUE, actual=startResults,
onpass="ONOS nodes started successfully",
onfail="ONOS nodes NOT successfully started" )
@@ -3422,31 +2726,23 @@
onosIsupResult = main.FALSE
while onosIsupResult == main.FALSE and count < 10:
onosIsupResult = main.TRUE
- for i in main.kill:
+ for ctrl in main.kill:
onosIsupResult = onosIsupResult and\
- main.ONOSbench.isup( main.nodes[ i ].ip_address )
+ ctrl.isup( ctrl.ipAddress )
count = count + 1
utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
onpass="ONOS restarted successfully",
onfail="ONOS restart NOT successful" )
- main.step( "Restarting ONOS main.CLIs" )
+ main.step( "Restarting ONOS nodes" )
cliResults = main.TRUE
- for i in main.kill:
+ for ctrl in main.kill:
cliResults = cliResults and\
- main.CLIs[ i ].startOnosCli( main.nodes[ i ].ip_address )
- main.activeNodes.append( i )
+ ctrl.startOnosCli( ctrl.ipAddress )
+ ctrl.active = True
utilities.assert_equals( expect=main.TRUE, actual=cliResults,
- onpass="ONOS cli restarted",
- onfail="ONOS cli did not restart" )
- main.activeNodes.sort()
- try:
- assert list( set( main.activeNodes ) ) == main.activeNodes,\
- "List of active nodes has duplicates, this likely indicates something was run out of order"
- except AssertionError:
- main.log.exception( "" )
- main.cleanup()
- main.exit()
+ onpass="ONOS node(s) restarted",
+ onfail="ONOS node(s) did not restart" )
# Grab the time of restart so we chan check how long the gossip
# protocol has had time to work
@@ -3456,7 +2752,7 @@
main.step( "Checking ONOS nodes" )
nodeResults = utilities.retry( self.nodesCheck,
False,
- args=[ main.activeNodes ],
+ args=[ main.Cluster.active() ],
sleep=15,
attempts=5 )
@@ -3465,35 +2761,29 @@
onfail="Nodes check NOT successful" )
if not nodeResults:
- for i in main.activeNodes:
- cli = main.CLIs[ i ]
+ for ctrl in main.Cluster.active():
main.log.debug( "{} components not ACTIVE: \n{}".format(
- cli.name,
- cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
+ ctrl.name,
+ ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
main.exit()
- node = main.activeNodes[ 0 ]
- main.log.debug( main.CLIs[ node ].nodes( jsonFormat=False ) )
- main.log.debug( main.CLIs[ node ].leaders( jsonFormat=False ) )
- main.log.debug( main.CLIs[ node ].partitions( jsonFormat=False ) )
- main.log.debug(main.CLIs[node].apps(jsonFormat=False))
+ self.commonChecks()
main.step( "Rerun for election on the node(s) that were killed" )
runResults = main.TRUE
- for i in main.kill:
+ for ctrl in main.kill:
runResults = runResults and\
- main.CLIs[ i ].electionTestRun()
+ ctrl.electionTestRun()
utilities.assert_equals( expect=main.TRUE, actual=runResults,
onpass="ONOS nodes reran for election topic",
onfail="Errror rerunning for election" )
-
def checkStateAfterONOS( self, main, afterWhich, compareSwitch=False, isRestart=False ):
"""
afterWhich :
- 0: failture
+ 0: failure
1: scaling
"""
"""
@@ -3503,53 +2793,29 @@
assert main.numCtrls, "main.numCtrls not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
main.case( "Running ONOS Constant State Tests" )
OnosAfterWhich = [ "failure" , "scaliing" ]
main.step( "Check that each switch has a master" )
# Assert that each device has a master
- rolesNotNull = main.TRUE
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
- name="rolesNotNull-" + str( i ),
- args=[] )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- rolesNotNull = rolesNotNull and t.result
+ rolesNotNull = all( [ i == main.TRUE for i in main.Cluster.command( "rolesNotNull" ) ] )
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=rolesNotNull,
onpass="Each device has a master",
onfail="Some devices don't have a master assigned" )
main.step( "Read device roles from ONOS" )
- ONOSMastership = []
+ ONOSMastership = main.Cluster.command( "roles" )
+ mastershipCheck = main.FALSE
consistentMastership = True
rolesResults = True
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].roles,
- name="roles-" + str( i ),
- args=[] )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- ONOSMastership.append( t.result )
-
for i in range( len( ONOSMastership ) ):
- node = str( main.activeNodes[ i ] + 1 )
+ node = str( main.Cluster.active()[ i ] )
if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
- main.log.error( "Error in getting ONOS" + node + " roles" )
- main.log.warn( "ONOS" + node + " mastership response: " +
+ main.log.error( "Error in getting " + node + " roles" )
+ main.log.warn( node + " mastership response: " +
repr( ONOSMastership[ i ] ) )
rolesResults = False
utilities.assert_equals(
@@ -3572,8 +2838,8 @@
if rolesResults and not consistentMastership:
for i in range( len( ONOSMastership ) ):
- node = str( main.activeNodes[ i ] + 1 )
- main.log.warn( "ONOS" + node + " roles: ",
+ node = str( main.Cluster.active()[ i ] )
+ main.log.warn( node + " roles: ",
json.dumps( json.loads( ONOSMastership[ i ] ),
sort_keys=True,
indent=4,
@@ -3588,8 +2854,8 @@
except ( ValueError, TypeError ):
main.log.exception( "Something is wrong with parsing " +
"ONOSMastership[0] or mastershipState" )
- main.log.error( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
- main.log.error( "mastershipState" + repr( mastershipState ) )
+ main.log.debug( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
+ main.log.debug( "mastershipState" + repr( mastershipState ) )
main.cleanup()
main.exit()
mastershipCheck = main.TRUE
@@ -3613,28 +2879,15 @@
# NOTE: we expect mastership to change on controller failure/scaling down
main.step( "Get the intents and compare across all nodes" )
- ONOSIntents = []
+ ONOSIntents = main.Cluster.command( "intents" )
intentCheck = main.FALSE
consistentIntents = True
intentsResults = True
- threads = []
- for i in main.activeNodes:
- t = main.Thread( target=main.CLIs[ i ].intents,
- name="intents-" + str( i ),
- args=[],
- kwargs={ 'jsonFormat': True } )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- ONOSIntents.append( t.result )
-
for i in range( len( ONOSIntents ) ):
- node = str( main.activeNodes[ i ] + 1 )
if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
- main.log.error( "Error in getting ONOS" + node + " intents" )
- main.log.warn( "ONOS" + node + " intents response: " +
+ ctrl = main.Cluster.active()[ i ]
+ main.log.error( "Error in getting " + ctrl.name + " intents" )
+ main.log.warn( ctrl.name + " intents response: " +
repr( ONOSIntents[ i ] ) )
intentsResults = False
utilities.assert_equals(
@@ -3657,8 +2910,8 @@
# ... ... ...
# ... ... ...
title = " ID"
- for n in main.activeNodes:
- title += " " * 10 + "ONOS" + str( n + 1 )
+ for ctrl in main.Cluster.active():
+ title += " " * 10 + ctrl.name
main.log.warn( title )
# get all intent keys in the cluster
keys = []
@@ -3697,9 +2950,9 @@
main.log.info( dict( out ) )
if intentsResults and not consistentIntents:
- for i in range( len( main.activeNodes ) ):
- node = str( main.activeNodes[ i ] + 1 )
- main.log.warn( "ONOS" + node + " intents: " )
+ for i in range( len( main.Cluster.active() ) ):
+ ctrl = main.Cluster.contoller[ i ]
+ main.log.warn( ctrl.name + " intents: " )
main.log.warn( json.dumps(
json.loads( ONOSIntents[ i ] ),
sort_keys=True,
@@ -3777,8 +3030,7 @@
onpass="No changes were found in the flow tables",
onfail="Changes were found in the flow tables" )
- main.Mininet2.pingLongKill()
-
+ main.Mininet2.pingLongKill()
"""
main.step( "Check the continuous pings to ensure that no packets " +
"were dropped during component failure" )
@@ -3819,8 +3071,6 @@
assert main.numCtrls, "main.numCtrls not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
try:
from tests.dependencies.topology import Topology
except ImportError:
@@ -3847,18 +3097,18 @@
hostAttachmentResults = True
count += 1
cliStart = time.time()
- devices = main.topoRelated.getAllDevices( main.activeNodes, True,
- kwargs={ 'sleep': 5, 'attempts': 5,
- 'randomTime': True } )
+ devices = main.topoRelated.getAllDevices( main.Cluster.active(), True,
+ kwargs={ 'sleep': 5, 'attempts': 5,
+ 'randomTime': True } )
ipResult = main.TRUE
- hosts = main.topoRelated.getAllHosts( main.activeNodes, True,
- kwargs={ 'sleep': 5, 'attempts': 5,
- 'randomTime': True },
- inJson=True )
+ hosts = main.topoRelated.getAllHosts( main.Cluster.active(), True,
+ kwargs={ 'sleep': 5, 'attempts': 5,
+ 'randomTime': True },
+ inJson=True )
for controller in range( 0, len( hosts ) ):
- controllerStr = str( main.activeNodes[ controller ] + 1 )
+ controllerStr = str( main.Cluster.active()[ controller ] )
if hosts[ controller ]:
for host in hosts[ controller ]:
if host is None or host.get( 'ipAddresses', [] ) == []:
@@ -3866,15 +3116,15 @@
"Error with host ipAddresses on controller" +
controllerStr + ": " + str( host ) )
ipResult = main.FALSE
- ports = main.topoRelated.getAllPorts( main.activeNodes , True,
- kwargs={ 'sleep': 5, 'attempts': 5,
- 'randomTime': True } )
- links = main.topoRelated.getAllLinks( main.activeNodes, True,
- kwargs={ 'sleep': 5, 'attempts': 5,
- 'randomTime': True } )
- clusters = main.topoRelated.getAllClusters( main.activeNodes , True,
- kwargs={ 'sleep': 5, 'attempts': 5,
- 'randomTime': True } )
+ ports = main.topoRelated.getAllPorts( main.Cluster.active() , True,
+ kwargs={ 'sleep': 5, 'attempts': 5,
+ 'randomTime': True } )
+ links = main.topoRelated.getAllLinks( main.Cluster.active(), True,
+ kwargs={ 'sleep': 5, 'attempts': 5,
+ 'randomTime': True } )
+ clusters = main.topoRelated.getAllClusters( main.Cluster.active(), True,
+ kwargs={ 'sleep': 5, 'attempts': 5,
+ 'randomTime': True } )
elapsed = time.time() - startTime
cliTime = time.time() - cliStart
@@ -3893,16 +3143,16 @@
mnSwitches = main.Mininet1.getSwitches()
mnLinks = main.Mininet1.getLinks()
mnHosts = main.Mininet1.getHosts()
- for controller in range( len( main.activeNodes ) ):
- controllerStr = str( main.activeNodes[ controller ] + 1 )
+ for controller in range( len( main.Cluster.active() ) ):
+ controllerStr = str( main.Cluster.active()[ controller ] )
currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1, controller,
mnSwitches,
devices, ports )
utilities.assert_equals( expect=main.TRUE,
actual=currentDevicesResult,
- onpass="ONOS" + controllerStr +
+ onpass=controllerStr +
" Switches view is correct",
- onfail="ONOS" + controllerStr +
+ onfail=controllerStr +
" Switches view is incorrect" )
@@ -3911,9 +3161,9 @@
[mnSwitches, mnLinks] )
utilities.assert_equals( expect=main.TRUE,
actual=currentLinksResult,
- onpass="ONOS" + controllerStr +
+ onpass=controllerStr +
" links view is correct",
- onfail="ONOS" + controllerStr +
+ onfail=controllerStr +
" links view is incorrect" )
if hosts[ controller ] and "Error" not in hosts[ controller ]:
currentHostsResult = main.Mininet1.compareHosts(
@@ -3925,9 +3175,9 @@
currentHostsResult = main.FALSE
utilities.assert_equals( expect=main.TRUE,
actual=currentHostsResult,
- onpass="ONOS" + controllerStr +
+ onpass=controllerStr +
" hosts exist in Mininet",
- onfail="ONOS" + controllerStr +
+ onfail=controllerStr +
" hosts don't match Mininet" )
# CHECKING HOST ATTACHMENT POINTS
hostAttachment = True
@@ -4034,23 +3284,23 @@
main.step( "Hosts view is consistent across all ONOS nodes" )
consistentHostsResult = main.TRUE
for controller in range( len( hosts ) ):
- controllerStr = str( main.activeNodes[ controller ] + 1 )
+ controllerStr = str( main.Cluster.active()[ controller ] )
if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
if hosts[ controller ] == hosts[ 0 ]:
continue
else: # hosts not consistent
- main.log.error( "hosts from ONOS" + controllerStr +
+ main.log.error( "hosts from " + controllerStr +
" is inconsistent with ONOS1" )
- main.log.warn( repr( hosts[ controller ] ) )
+ main.log.debug( repr( hosts[ controller ] ) )
consistentHostsResult = main.FALSE
else:
- main.log.error( "Error in getting ONOS hosts from ONOS" +
+ main.log.error( "Error in getting ONOS hosts from " +
controllerStr )
consistentHostsResult = main.FALSE
- main.log.warn( "ONOS" + controllerStr +
- " hosts response: " +
- repr( hosts[ controller ] ) )
+ main.log.debug( controllerStr +
+ " hosts response: " +
+ repr( hosts[ controller ] ) )
utilities.assert_equals(
expect=main.TRUE,
actual=consistentHostsResult,
@@ -4076,22 +3326,22 @@
main.step( "Clusters view is consistent across all ONOS nodes" )
consistentClustersResult = main.TRUE
for controller in range( len( clusters ) ):
- controllerStr = str( main.activeNodes[ controller ] + 1 )
+ controllerStr = str( main.Cluster.active()[ controller ] )
if "Error" not in clusters[ controller ]:
if clusters[ controller ] == clusters[ 0 ]:
continue
else: # clusters not consistent
- main.log.error( "clusters from ONOS" +
+ main.log.error( "clusters from " +
controllerStr +
" is inconsistent with ONOS1" )
consistentClustersResult = main.FALSE
else:
main.log.error( "Error in getting dataplane clusters " +
- "from ONOS" + controllerStr )
+ "from " + controllerStr )
consistentClustersResult = main.FALSE
- main.log.warn( "ONOS" + controllerStr +
- " clusters response: " +
- repr( clusters[ controller ] ) )
+ main.log.debug( controllerStr +
+ " clusters response: " +
+ repr( clusters[ controller ] ) )
utilities.assert_equals(
expect=main.TRUE,
actual=consistentClustersResult,
@@ -4100,7 +3350,7 @@
if not consistentClustersResult:
main.log.debug( clusters )
for x in links:
- main.log.warn( "{}: {}".format( len( x ), x ) )
+ main.log.debug( "{}: {}".format( len( x ), x ) )
main.step( "There is only one SCC" )
# there should always only be one cluster
@@ -4158,20 +3408,21 @@
main.step( "Checking ONOS nodes" )
nodeResults = utilities.retry( self.nodesCheck,
False,
- args=[ main.activeNodes ],
+ args=[ main.Cluster.active() ],
attempts=5 )
utilities.assert_equals( expect=True, actual=nodeResults,
onpass="Nodes check successful",
onfail="Nodes check NOT successful" )
if not nodeResults:
- for i in main.activeNodes:
+ for ctrl in main.Cluster.active():
main.log.debug( "{} components not ACTIVE: \n{}".format(
- main.CLIs[ i ].name,
- main.CLIs[ i ].sendline( "scr:list | grep -v ACTIVE" ) ) )
+ ctrl.name,
+ ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
if not topoResult:
main.cleanup()
main.exit()
+
def linkDown( self, main, fromS="s3", toS="s28" ):
"""
Link fromS-toS down
@@ -4180,8 +3431,6 @@
assert main.numCtrls, "main.numCtrls not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
# NOTE: You should probably run a topology check after this
linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
@@ -4208,8 +3457,6 @@
assert main.numCtrls, "main.numCtrls not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
# NOTE: You should probably run a topology check after this
linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
@@ -4236,13 +3483,11 @@
assert main.numCtrls, "main.numCtrls not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
description = "Killing a switch to ensure it is discovered correctly"
- onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
+ onosCli = main.Cluster.next()
main.case( description )
switch = main.params[ 'kill' ][ 'switch' ]
switchDPID = main.params[ 'kill' ][ 'dpid' ]
@@ -4256,13 +3501,14 @@
time.sleep( switchSleep )
device = onosCli.getDevice( dpid=switchDPID )
# Peek at the deleted switch
- main.log.warn( str( device ) )
+ main.log.warn( "Bringing down switch " + str( device ) )
result = main.FALSE
if device and device[ 'available' ] is False:
result = main.TRUE
utilities.assert_equals( expect=main.TRUE, actual=result,
onpass="Kill switch successful",
onfail="Failed to kill switch?" )
+
def switchUp( self, main ):
"""
Switch Up
@@ -4272,14 +3518,12 @@
assert main.numCtrls, "main.numCtrls not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
switch = main.params[ 'kill' ][ 'switch' ]
switchDPID = main.params[ 'kill' ][ 'dpid' ]
links = main.params[ 'kill' ][ 'links' ].split()
- onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
+ onosCli = main.Cluster.next()
description = "Adding a switch to ensure it is discovered correctly"
main.case( description )
@@ -4287,14 +3531,14 @@
main.Mininet1.addSwitch( switch, dpid=switchDPID )
for peer in links:
main.Mininet1.addLink( switch, peer )
- ipList = [ node.ip_address for node in main.nodes ]
+ ipList = main.Cluster.getIps()
main.Mininet1.assignSwController( sw=switch, ip=ipList )
main.log.info( "Waiting " + str( switchSleep ) +
" seconds for switch up to be discovered" )
time.sleep( switchSleep )
device = onosCli.getDevice( dpid=switchDPID )
# Peek at the deleted switch
- main.log.warn( str( device ) )
+ main.log.debug( "Added device: " + str( device ) )
result = main.FALSE
if device and device[ 'available' ]:
result = main.TRUE
@@ -4309,12 +3553,10 @@
assert main.numCtrls, "main.numCtrls not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
main.case( "Start Leadership Election app" )
main.step( "Install leadership election app" )
- onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
+ onosCli = main.Cluster.next()
appResult = onosCli.activateApp( "org.onosproject.election" )
utilities.assert_equals(
expect=main.TRUE,
@@ -4323,11 +3565,10 @@
onfail="Something went wrong with installing Leadership election" )
main.step( "Run for election on each node" )
- for i in main.activeNodes:
- main.CLIs[ i ].electionTestRun()
+ onosCli.electionTestRun()
+ main.Cluster.command( "electionTestRun" )
time.sleep( 5 )
- activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
- sameResult, leaders = self.consistentLeaderboards( activeCLIs )
+ sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
utilities.assert_equals(
expect=True,
actual=sameResult,
@@ -4336,7 +3577,7 @@
if sameResult:
leader = leaders[ 0 ][ 0 ]
- if main.nodes[ main.activeNodes[ 0 ] ].ip_address in leader:
+ if onosCli.ipAddress in leader:
correctLeader = True
else:
correctLeader = False
@@ -4346,6 +3587,8 @@
actual=correctLeader,
onpass="Correct leader was elected",
onfail="Incorrect leader" )
+ main.Cluster.testLeader = leader
+
def isElectionFunctional( self, main ):
"""
Check that Leadership Election is still functional
@@ -4365,8 +3608,6 @@
assert main.numCtrls, "main.numCtrls not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
description = "Check that Leadership Election is still functional"
main.case( description )
@@ -4378,17 +3619,13 @@
newLeader = '' # the new leaders fron newLoeaders, None if not same
oldLeaderCLI = None # the CLI of the old leader used for re-electing
expectNoLeader = False # True when there is only one leader
- if main.numCtrls == 1:
+ if len( main.Cluster.controllers ) == 1:
expectNoLeader = True
main.step( "Run for election on each node" )
- electionResult = main.TRUE
-
- for i in main.activeNodes: # run test election on each node
- if main.CLIs[ i ].electionTestRun() == main.FALSE:
- electionResult = main.FALSE
+ electionResult = all( [ i == main.TRUE for i in main.Cluster.command( "electionTestRun" ) ] )
utilities.assert_equals(
- expect=main.TRUE,
+ expect=True,
actual=electionResult,
onpass="All nodes successfully ran for leadership",
onfail="At least one node failed to run for leadership" )
@@ -4400,11 +3637,11 @@
main.step( "Check that each node shows the same leader and candidates" )
failMessage = "Nodes have different leaderboards"
- activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
- sameResult, oldLeaders = self.consistentLeaderboards( activeCLIs )
+ activeCLIs = main.Cluster.active()
+ sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
if sameResult:
oldLeader = oldLeaders[ 0 ][ 0 ]
- main.log.warn( oldLeader )
+ main.log.info( "Old leader: " + oldLeader )
else:
oldLeader = None
utilities.assert_equals(
@@ -4420,9 +3657,9 @@
main.log.error( "Leadership isn't consistent." )
withdrawResult = main.FALSE
# Get the CLI of the oldLeader
- for i in main.activeNodes:
- if oldLeader == main.nodes[ i ].ip_address:
- oldLeaderCLI = main.CLIs[ i ]
+ for ctrl in main.Cluster.active():
+ if oldLeader == ctrl.ipAddress:
+ oldLeaderCLI = ctrl
break
else: # FOR/ELSE statement
main.log.error( "Leader election, could not find current leader" )
@@ -4519,6 +3756,7 @@
onpass="Old leader successfully re-ran for election",
onfail="Something went wrong with Leadership election after " +
"the old leader re-ran for election" )
+
def installDistributedPrimitiveApp( self, main ):
"""
Install Distributed Primitives app
@@ -4527,8 +3765,6 @@
assert main.numCtrls, "main.numCtrls not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- assert main.CLIs, "main.CLIs not defined"
- assert main.nodes, "main.nodes not defined"
# Variables for the distributed primitives tests
main.pCounterName = "TestON-Partitions"
@@ -4540,8 +3776,7 @@
main.case( description )
main.step( "Install Primitives app" )
appName = "org.onosproject.distributedprimitives"
- node = main.activeNodes[ 0 ]
- appResults = main.CLIs[ node ].activateApp( appName )
+ appResults = main.Cluster.next().activateApp( appName )
utilities.assert_equals( expect=main.TRUE,
actual=appResults,
onpass="Primitives app activated",
diff --git a/TestON/tests/SAMP/SAMPstartTemplate_3node/SAMPstartTemplate_3node.params b/TestON/tests/SAMP/SAMPstartTemplate_3node/SAMPstartTemplate_3node.params
index 99996dd..fe36d6e 100755
--- a/TestON/tests/SAMP/SAMPstartTemplate_3node/SAMPstartTemplate_3node.params
+++ b/TestON/tests/SAMP/SAMPstartTemplate_3node/SAMPstartTemplate_3node.params
@@ -1,26 +1,12 @@
<PARAMS>
<!--
CASE0: pull onos code - this case should be skipped on Jenkins-driven prod test
- -->
- <!--
CASE1: setup and clean test env
- -->
- <!--
CASE2: get onos warnings, errors from log
- -->
- <!--
CASE10: start a 3-node ONOS Cluster
- -->
- <!--
CASE11: Start Mininet and assign controllers
- -->
- <!--
CASE12: Sample case of using onos cli
- -->
- <!--
CASE22: Sample case of using onos rest
- -->
- <!--
CASE32: Configure fwd apps
-->
@@ -36,7 +22,6 @@
</CASE0>
<CASE1>
- <NodeList>OC1,OC2,OC3</NodeList>
<SleepTimers>
<onosStartup>60</onosStartup>
<onosCfg>5</onosCfg>
@@ -46,7 +31,6 @@
</CASE1>
<CASE10>
- <numNodes>3</numNodes>
<Apps>
org.onosproject.openflow,org.onosproject.fwd
</Apps>
diff --git a/TestON/tests/SAMP/SAMPstartTemplate_3node/SAMPstartTemplate_3node.py b/TestON/tests/SAMP/SAMPstartTemplate_3node/SAMPstartTemplate_3node.py
index e2e63ec..ac5e8b5 100644
--- a/TestON/tests/SAMP/SAMPstartTemplate_3node/SAMPstartTemplate_3node.py
+++ b/TestON/tests/SAMP/SAMPstartTemplate_3node/SAMPstartTemplate_3node.py
@@ -56,10 +56,12 @@
'''
try:
from tests.dependencies.ONOSSetup import ONOSSetup
+ from dependencies.Cluster import Cluster
except ImportError:
main.log.error( "ONOSSetup not found. exiting the test" )
main.exit()
try:
+ main.Cluster = Cluster( main.ONOScell.nodes )
main.testSetUp
except ( NameError, AttributeError ):
main.testSetUp = ONOSSetup()
@@ -79,12 +81,14 @@
main.testSetUp.evnSetupConclusion( stepResult )
+
def CASE2( self, main ):
'''
Report errors/warnings/exceptions
'''
main.log.info("Error report: \n" )
- main.ONOSbench.logReport( main.ONOSip[0],
+ ONOSbench = main.ONOScell.nodes[0].Bench
+ ONOSbench.logReport( main.Cluster.controllers[0].ipAddress,
[ "INFO",
"FOLLOWER",
"WARN",
@@ -112,10 +116,12 @@
import time
- main.case( "Start up " + str( main.numCtrls ) + "-node onos cluster.")
+ size = len( main.Cluster.controllers )
+ main.case( "Start up " + str( size ) + "-node onos cluster." )
+
main.step( "Start ONOS cluster with basic (drivers) app.")
- stepResult = main.ONOSbench.startBasicONOS( nodeList=main.ONOSip, opSleep=200,
- onosUser=main.ONOScli1.karafUser )
+ stepResult = ONOSbench.startBasicONOS( nodeList=main.Cluster.getIps(), opSleep=200,
+ onosUser=main.ONOScell.karafUser )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully started basic ONOS cluster ",
@@ -135,9 +141,9 @@
else:
main.log.warn( "No configurations were specified to be changed after startup" )
utilities.assert_equals( expect=main.TRUE,
- actual=activateResult,
- onpass="Successfully set config",
- onfail="Failed to set config" )
+ actual=activateResult,
+ onpass="Successfully set config",
+ onfail="Failed to set config" )
main.step( "Set ONOS configurations" )
config = main.params['CASE10'].get( 'ONOS_Configuration' )
@@ -147,7 +153,7 @@
for component in config:
for setting in config[component]:
value = config[component][setting]
- check = main.ONOScli1.setCfg( component, setting, value )
+ check = main.Cluster.controllers[0].setCfg( component, setting, value )
main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
checkResult = check and checkResult
utilities.assert_equals( expect=main.TRUE,
@@ -183,8 +189,8 @@
assignResult = main.TRUE
for i in range(1, 8):
assignResult = assignResult & main.Mininet1.assignSwController( sw="s" + str( i ),
- ip=main.ONOSip,
- port='6653' )
+ ip=main.Cluster.getIps(),
+ port='6653' )
time.sleep(main.mnCfgSleep)
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
@@ -198,8 +204,8 @@
"""
main.case( "Test some onos commands through CLI. ")
- main.log.debug( main.ONOScli1.sendline("summary") )
- main.log.debug( main.ONOScli3.sendline("devices") )
+ main.log.debug( main.Cluster.controllers[1].sendline("summary") )
+ main.log.debug( main.Cluster.controllers[2].sendline("devices") )
def CASE22( self, main ):
"""
@@ -207,8 +213,8 @@
"""
main.case( " Sample tests using ONOS REST API handles. ")
- main.log.debug( main.ONOSrest1.send("/devices") )
- main.log.debug( main.ONOSrest2.apps() )
+ main.log.debug( main.Cluster.controllers[2].send("/devices") )
+ main.log.debug( main.Cluster.controllers[-1].apps() )
def CASE32( self, main ):
"""
@@ -219,7 +225,7 @@
"""
main.case( "Configure onos-app-fwd and check if configuration successful. " )
main.step( "Install reactive forwarding app." )
- installResults = main.ONOScli1.activateApp( "org.onosproject.fwd" )
+ installResults = main.Cluster.controllers[0].activateApp( "org.onosproject.fwd" )
utilities.assert_equals( expect=main.TRUE, actual=installResults,
onpass= "Configure fwd successful", onfail= "Configure fwd failed" )
main.step( "Run pingall to check connectivity. " )
diff --git a/TestON/tests/SAMP/SAMPstartTemplate_3node/SAMPstartTemplate_3node.topo b/TestON/tests/SAMP/SAMPstartTemplate_3node/SAMPstartTemplate_3node.topo
index 9d8298e..5ab7af6 100755
--- a/TestON/tests/SAMP/SAMPstartTemplate_3node/SAMPstartTemplate_3node.topo
+++ b/TestON/tests/SAMP/SAMPstartTemplate_3node/SAMPstartTemplate_3node.topo
@@ -5,99 +5,36 @@
Even with some handles not used in test cases, we want to define
all onos cells here, for cases to set up onos cluster.
-->
- <ONOSbench>
- <host>localhost</host>
+ <ONOScell>
+ <host>localhost</host> # ONOS "bench" machine
<user>sdn</user>
<password>rocks</password>
- <type>OnosDriver</type>
+ <type>OnosClusterDriver</type>
<connect_order>1</connect_order>
<COMPONENTS>
- <home></home> #defines where onos home is
- <prompt></prompt>
- </COMPONENTS>
- </ONOSbench>
-
- <ONOScli1>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>2</connect_order>
- <COMPONENTS>
+ <cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
<karaf_username></karaf_username>
<karaf_password></karaf_password>
- <prompt></prompt>
+ <web_user></web_user>
+ <web_pass></web_pass>
+ <rest_port></rest_port>
+ <prompt></prompt> # TODO: we technically need a few of these, one per component
+ <onos_home></onos_home> # defines where onos home is
+ <nodes> 3 </nodes> # number of nodes in the cluster
</COMPONENTS>
- </ONOScli1>
-
- <ONOScli2>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>3</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOScli2>
-
- <ONOScli3>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>4</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOScli3>
+ </ONOScell>
<Mininet1>
<host>OCN</host>
<user>sdn</user>
<password>rocks</password>
<type>MininetCliDriver</type>
- <connect_order>5</connect_order>
+ <connect_order>3</connect_order>
<COMPONENTS>
<home>~/mininet/custom/</home>
</COMPONENTS>
</Mininet1>
- <ONOSrest1>
- <host>OC1</host>
- <port>8181</port>
- <user>onos</user>
- <password>rocks</password>
- <type>OnosRestDriver</type>
- <connect_order>6</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOSrest1>
-
- <ONOSrest2>
- <host>OC2</host>
- <port>8181</port>
- <user>onos</user>
- <password>rocks</password>
- <type>OnosRestDriver</type>
- <connect_order>7</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOSrest2>
-
- <ONOSrest3>
- <host>OC3</host>
- <port>8181</port>
- <user>onos</user>
- <password>rocks</password>
- <type>OnosRestDriver</type>
- <connect_order>8</connect_order>
- <COMPONENTS>
- <prompt></prompt>
- </COMPONENTS>
- </ONOSrest3>
</COMPONENT>
</TOPOLOGY>
diff --git a/TestON/tests/dependencies/Cluster.py b/TestON/tests/dependencies/Cluster.py
new file mode 100644
index 0000000..01dc767
--- /dev/null
+++ b/TestON/tests/dependencies/Cluster.py
@@ -0,0 +1,131 @@
+"""
+Copyright 2017 Open Networking Foundation (ONF)
+
+Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+
+ TestON is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 2 of the License, or
+ (at your option) any later version.
+
+ TestON is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TestON. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+
+class Cluster():
+
+ def __str__( self ):
+ return self.name
+ def __repr__( self ):
+ #TODO use repr of cli's?
+ controllers = []
+ for ctrl in self.controllers:
+ controllers.append( str( ctrl ) )
+ return "%s[%s]" % ( self.name, ", ".join( controllers ) )
+
+
+ def __init__( self, ctrlList=[], name="Cluster" ):
+ #assert isInstance( ctrlList, Controller ), "ctrlList should be a list of ONOS Controllers"
+ self.controllers = ctrlList
+ self.name = str( name )
+ self.iterator = iter( self.active() )
+
+ def getIps( self, activeOnly=False):
+ ips = []
+ if activeOnly:
+ nodeList = self.active()
+ else:
+ nodeList = self.controllers
+ for ctrl in nodeList:
+ ips.append( ctrl.ipAddress )
+ return ips
+
+ def active( self ):
+ """
+ Return a list of active controllers in the cluster
+ """
+ return [ ctrl for ctrl in self.controllers
+ if ctrl.active ]
+
+ def next( self ):
+ """
+ An iterator for the cluster's controllers that
+ resets when there are no more elements.
+
+ Returns the next controller in the cluster
+ """
+ try:
+ return self.iterator.next()
+ except StopIteration:
+ self.reset()
+ try:
+ return self.iterator.next()
+ except StopIteration:
+ raise RuntimeError( "There are no active nodes in the cluster" )
+
+ def reset( self ):
+ """
+ Resets the cluster iterator.
+
+ This should be used whenever a node's active state is changed
+ and is also used internally when the iterator has been exhausted.
+ """
+ self.iterator = iter( self.active() )
+
+ def install( self ):
+ """
+ Install ONOS on all controller nodes in the cluster
+ """
+ result = main.TRUE
+ # FIXME: use the correct onosdriver function
+ # TODO: Use threads to install in parallel, maybe have an option for sequential installs
+ for ctrl in self.controllers:
+ result &= ctrl.installOnos( ctrl.ipAddress )
+ return result
+
+ def startCLIs( self ):
+ """
+ Start the ONOS cli on all controller nodes in the cluster
+ """
+ cliResults = main.TRUE
+ threads = []
+ for ctrl in self.controllers:
+ t = main.Thread( target=ctrl.CLI.startOnosCli,
+ name="startCli-" + ctrl.name,
+ args=[ ctrl.ipAddress ] )
+ threads.append( t )
+ t.start()
+ ctrl.active = True
+
+ for t in threads:
+ t.join()
+ cliResults = cliResults and t.result
+ return cliResults
+
+ def command( self, function, args=(), kwargs={} ):
+ """
+ Send a command to all ONOS nodes and return the results as a list
+ """
+ threads = []
+ results = []
+ for ctrl in self.active():
+ f = getattr( ctrl, function )
+ t = main.Thread( target=f,
+ name=function + "-" + ctrl.name,
+ args=args,
+ kwargs=kwargs )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ results.append( t.result )
+ return results
diff --git a/TestON/tests/dependencies/ONOSSetup.py b/TestON/tests/dependencies/ONOSSetup.py
index 70fe3c2..bf3bfd1 100644
--- a/TestON/tests/dependencies/ONOSSetup.py
+++ b/TestON/tests/dependencies/ONOSSetup.py
@@ -38,83 +38,34 @@
else:
main.log.info( "Skipped git checkout and pull as they are disabled in params file" )
- return main.TRUE
-
- def setRest( self, hasRest, i ):
- if hasRest:
- main.RESTs.append( getattr( main, "ONOSrest" + str( i ) ) )
-
- def setNode( self, hasNode, i ):
- if hasNode:
- main.nodes.append( getattr( main, 'ONOS' + str(i)) )
-
- def setCli( self, hasCli, i ):
- if hasCli:
- main.CLIs.append( getattr ( main, "ONOScli" + str( i ) ) )
-
- def getNumNode( self, hasCli, hasNode, hasRest ):
- if hasCli:
- return len( main.CLIs )
- if hasNode:
- return len( main.nodes )
- if hasRest:
- return len( main.RESTs )
-
- def envSetup ( self, hasMultiNodeRounds=False, hasRest=False, hasNode=False,
- hasCli=True, specificIp="", includeGitPull=True, makeMaxNodes=True ):
+ def envSetup( self, cluster, hasMultiNodeRounds=False, hasRest=False, hasNode=False,
+ hasCli=True, specificIp="", includeGitPull=True ):
if includeGitPull :
self.gitPulling()
- if main.ONOSbench.maxNodes:
- main.maxNodes = int( main.ONOSbench.maxNodes )
+
+ ONOSbench = cluster.controllers[0].Bench
+ if ONOSbench.maxNodes:
+ main.maxNodes = int( ONOSbench.maxNodes )
else:
main.maxNodes = 0
main.cellData = {} # For creating cell file
- if hasCli:
- main.CLIs = []
- if hasRest:
- main.RESTs = []
- if hasNode:
- main.nodes = []
- main.ONOSip = [] # List of IPs of active ONOS nodes. CASE 2
+ main.ONOSip = cluster.getIps() # List of IPs of active ONOS nodes. CASE 2
- if specificIp == "":
- if makeMaxNodes:
- main.ONOSip = main.ONOSbench.getOnosIps()
- else:
+ # FIXME: Do we need this?
+ # We should be able to just use Cluster.getIps()
+ if specificIp != "":
main.ONOSip.append( specificIp )
# Assigning ONOS cli handles to a list
- try:
- for i in range( 1, ( main.maxNodes if makeMaxNodes else main.numCtrls ) + 1 ):
- self.setCli( hasCli, i )
- self.setRest( hasRest, i )
- self.setNode( hasNode, i )
- if not makeMaxNodes:
- main.ONOSip.append( main.ONOSbench.getOnosIps()[ i - 1 ] )
- except AttributeError:
- numNode = self.getNumNode( hasCli, hasNode, hasRest )
- main.log.warn( "A " + str( main.maxNodes ) + " node cluster " +
- "was defined in env variables, but only " +
- str( numNode ) +
- " nodes were defined in the .topo file. " +
- "Using " + str( numNode ) +
- " nodes for the test." )
- main.maxNodes = numNode
+ main.maxNodes = len( cluster.controllers )
+ return main.TRUE
- main.log.debug( "Found ONOS ips: {}".format( main.ONOSip ) )
- if ( not hasCli or main.CLIs ) and ( not hasRest or main.RESTs )\
- and ( not hasNode or main.nodes ):
- return main.TRUE
- else:
- main.log.error( "Did not properly created list of ONOS CLI handle" )
- return main.FALSE
-
- def envSetupException ( self, e ):
+ def envSetupException( self, e ):
main.log.exception( e )
main.cleanup()
main.exit()
- def evnSetupConclusion ( self, stepResult ):
+ def evnSetupConclusion( self, stepResult ):
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully construct " +
@@ -244,22 +195,9 @@
onfail="ONOS service did not start properly on all nodes" )
return stepResult
- def startOnosClis( self ):
- startCliResult = main.TRUE
+ def startOnosClis( self, cluster ):
main.step( "Starting ONOS CLI sessions" )
- pool = []
- main.threadID = 0
- for i in range( main.numCtrls ):
- t = main.Thread( target=main.CLIs[ i ].startOnosCli,
- threadID=main.threadID,
- name="startOnosCli-" + str( i ),
- args=[ main.ONOSip[ i ] ] )
- pool.append( t )
- t.start()
- main.threadID = main.threadID + 1
- for t in pool:
- t.join()
- startCliResult = startCliResult and t.result
+ startCliResult = cluster.startCLIs()
if not startCliResult:
main.log.info( "ONOS CLI did not start up properly" )
main.cleanup()
@@ -272,7 +210,7 @@
onfail="Failed to start ONOS cli" )
return startCliResult
- def ONOSSetUp( self, Mininet, hasMultiNodeRounds=False, hasCli=True, newCell=True,
+ def ONOSSetUp( self, Mininet, cluster, hasMultiNodeRounds=False, hasCli=True, newCell=True,
cellName="temp", removeLog=False, extraApply=None, arg=None, extraClean=None,
skipPack=False, installMax=False, useSSH=True, killRemoveMax=True,
CtrlsSet=True, stopOnos=False ):
@@ -315,7 +253,7 @@
onosServiceResult = self.checkOnosService()
if hasCli:
- onosCliResult = self.startOnosClis()
+ onosCliResult = self.startOnosClis( cluster )
return cellResult and packageResult and onosUninstallResult and \
- onosInstallResult and secureSshResult and onosServiceResult and onosCliResult
\ No newline at end of file
+ onosInstallResult and secureSshResult and onosServiceResult and onosCliResult
diff --git a/TestON/tests/dependencies/topology.py b/TestON/tests/dependencies/topology.py
index f9ce3ff..7819fec 100644
--- a/TestON/tests/dependencies/topology.py
+++ b/TestON/tests/dependencies/topology.py
@@ -15,10 +15,10 @@
"""
devices = []
threads = []
- for i in ( range ( numNode ) if isinstance( numNode, int ) else numNode ):
- t = main.Thread( target=utilities.retry if needRetry else main.CLIs[ i ].devices,
- name="devices-" + str( i ),
- args=[main.CLIs[ i ].devices, [ None ] ] if needRetry else [],
+ for ctrl in main.Cluster.active():
+ t = main.Thread( target=utilities.retry if needRetry else ctrl.devices,
+ name="devices-" + str( ctrl ),
+ args=[ ctrl.devices, [ None ] ] if needRetry else [],
kwargs=kwargs )
threads.append( t )
t.start()
@@ -35,10 +35,10 @@
hosts = []
ipResult = main.TRUE
threads = []
- for i in ( range ( numNode ) if isinstance( numNode, int ) else numNode ):
- t = main.Thread( target=utilities.retry if needRetry else main.CLIs[ i ].hosts,
- name="hosts-" + str( i ),
- args=[main.CLIs[ i ].hosts, [ None ] ] if needRetry else [],
+ for ctrl in main.Cluster.active():
+ t = main.Thread( target=utilities.retry if needRetry else ctrl.hosts,
+ name="hosts-" + str( ctrl ),
+ args=[ ctrl.hosts, [ None ] ] if needRetry else [],
kwargs=kwargs )
threads.append( t )
t.start()
@@ -62,10 +62,10 @@
"""
ports = []
threads = []
- for i in ( range ( numNode ) if isinstance( numNode, int ) else numNode ):
- t = main.Thread( target=utilities.retry if needRetry else main.CLIs[ i ].ports,
- name="ports-" + str( i ),
- args=[ main.CLIs[ i ].ports, [ None ] ] if needRetry else [],
+ for ctrl in main.Cluster.active():
+ t = main.Thread( target=utilities.retry if needRetry else ctrl.ports,
+ name="ports-" + str( ctrl ),
+ args=[ ctrl.ports, [ None ] ] if needRetry else [],
kwargs=kwargs )
threads.append( t )
t.start()
@@ -81,11 +81,10 @@
"""
links = []
threads = []
- print numNode
- for i in ( range ( numNode ) if isinstance( numNode, int ) else numNode ):
- t = main.Thread( target=utilities.retry if needRetry else main.CLIs[ i ].links,
- name="links-" + str( i ),
- args=[main.CLIs[ i ].links, [ None ] ] if needRetry else [],
+ for ctrl in main.Cluster.active():
+ t = main.Thread( target=utilities.retry if needRetry else ctrl.links,
+ name="links-" + str( ctrl ),
+ args=[ ctrl.links, [ None ] ] if needRetry else [],
kwargs=kwargs )
threads.append( t )
t.start()
@@ -102,10 +101,10 @@
"""
clusters = []
threads = []
- for i in ( range ( numNode ) if isinstance( numNode, int ) else numNode ):
- t = main.Thread( target=utilities.retry if needRetry else main.CLIs[ i ].clusters,
- name="clusters-" + str( i ),
- args=[main.CLIs[ i ].clusters, [ None ] ] if needRetry else [],
+ for ctrl in main.Cluster.active():
+ t = main.Thread( target=utilities.retry if needRetry else ctrl.clusters,
+ name="clusters-" + str( ctrl ),
+ args=[ ctrl.clusters, [ None ] ] if needRetry else [],
kwargs=kwargs )
threads.append( t )
t.start()
@@ -124,10 +123,10 @@
mnSwitches,
json.loads( devices[ controller ] ),
json.loads( ports[ controller ] ) )
- except(TypeError, ValueError):
+ except ( TypeError, ValueError ):
main.log.error(
- "Could not load json: {0} or {1}".format( str( devices[ controller ] )
- , str( ports[ controller ] ) ) )
+ "Could not load json: {0} or {1}".format( str( devices[ controller ] ),
+ str( ports[ controller ] ) ) )
currentDevicesResult = main.FALSE
else:
currentDevicesResult = main.FALSE
@@ -193,15 +192,15 @@
controllerStr = str( controller + 1 ) # ONOS node number
# Compare Devices
currentDevicesResult = self.compareDevicePort( Mininet, controller,
- mnSwitches,
- devices, ports )
+ mnSwitches,
+ devices, ports )
if not currentDevicesResult:
deviceFails.append( controllerStr )
devicesResults = devicesResults and currentDevicesResult
# Compare Links
currentLinksResult = self.compareBase( links, controller,
- Mininet.compareLinks,
- [ mnSwitches, mnLinks ] )
+ Mininet.compareLinks,
+ [ mnSwitches, mnLinks ] )
if not currentLinksResult:
linkFails.append( controllerStr )
linksResults = linksResults and currentLinksResult
diff --git a/TestON/tests/dependencies/utils.py b/TestON/tests/dependencies/utils.py
index d82ae04..075b9bd 100644
--- a/TestON/tests/dependencies/utils.py
+++ b/TestON/tests/dependencies/utils.py
@@ -21,6 +21,7 @@
"""
Copy the karaf.log files after each testcase cycle
"""
+ # TODO: Also grab the rotated karaf logs
main.log.report( "Copy karaf logs" )
main.case( "Copy karaf logs" )
main.caseExplanation = "Copying the karaf logs to preserve them through" +\
@@ -29,17 +30,14 @@
stepResult = main.TRUE
scpResult = main.TRUE
copyResult = main.TRUE
- for i in range( main.numCtrls ):
- main.node = main.CLIs[ i ]
- ip = main.ONOSip[ i ]
- main.node.ip_address = ip
- scpResult = scpResult and main.ONOSbench.scp( main.node,
+ for ctrl in main.Cluster.controllers:
+ scpResult = scpResult and main.ONOSbench.scp( ctrl.node,
"/opt/onos/log/karaf.log",
"/tmp/karaf.log",
direction="from" )
copyResult = copyResult and main.ONOSbench.cpLogsToDir( "/tmp/karaf.log", main.logdir,
- copyFileName=( "karaf.log.node{0}.cycle{1}".format(
- str( i + 1 ), str( main.cycle ) ) ) )
+ copyFileName=( "karaf.log.{0}.cycle{1}".format(
+ str( ctrl ), str( main.cycle ) ) ) )
if scpResult and copyResult:
stepResult = main.TRUE and stepResult
else:
@@ -47,4 +45,4 @@
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully copied remote ONOS logs",
- onfail="Failed to copy remote ONOS logs" )
\ No newline at end of file
+ onfail="Failed to copy remote ONOS logs" )