Merge pull request #118 from opennetworkinglab/driver_change_pingallHosts
pingallHosts - allows users to specify hosts and pingtype to ping bet…
diff --git a/TestON/core/teston.py b/TestON/core/teston.py
index 7199280..a15bb8c 100644
--- a/TestON/core/teston.py
+++ b/TestON/core/teston.py
@@ -78,8 +78,6 @@
self.testResult = "Summary"
self.stepName = ""
self.stepCache = ""
- # make this into two lists? one for step names, one for results?
- # this way, the case result could be a true AND of these results
self.EXPERIMENTAL_MODE = False
self.test_target = None
self.lastcommand = None
diff --git a/TestON/core/testparser.py b/TestON/core/testparser.py
index 37f50f0..24b1ca2 100644
--- a/TestON/core/testparser.py
+++ b/TestON/core/testparser.py
@@ -16,7 +16,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with TestON. If not, see <http://www.gnu.org/licenses/>.
+ along with TestON. If not, see <http://www.gnu.org/licenses/>.
'''
@@ -26,21 +26,21 @@
def __init__(self,testFile):
try :
testFileHandler = open(testFile, 'r')
- except IOError:
+ except IOError:
print "No such file "+testFile
sys.exit(0)
-
+
testFileList = testFileHandler.readlines()
- self.testscript = testFileList
+ self.testscript = testFileList
self.caseCode = {}
self.caseBlock = ''
self.statementsList = []
- index = 0
+ index = 0
self.statementsList = []
#initialSpaces = len(line) -len(line.lstrip())
while index < len(testFileList):
testFileList[index] = re.sub("^\s{8}|^\s{4}", "", testFileList[index])
- # Skip multiline comments
+ # Skip multiline comments
if re.match('^(\'\'\')|^(\"\"\")',testFileList[index],0) :
index = index + 1
try :
@@ -48,16 +48,15 @@
index = index + 1
except IndexError,e:
print ''
-
- # skip empty lines and single line comments
+ # skip empty lines and single line comments
elif not re.match('#|^\s*$',testFileList[index],0):
self.statementsList.append(testFileList[index])
index = index + 1
-
+
def case_code(self):
- index = 0
- statementsList = self.statementsList
+ index = 0
+ statementsList = self.statementsList
while index < len(statementsList):
#print statementsList[index]
m= re.match('def\s+CASE(\d+)',statementsList[index],0)
@@ -76,18 +75,16 @@
except IndexError,e:
#print 'IndexError'
print ''
-
self.caseCode [str(m.group(1))] = self.caseBlock
#print "Case CODE "+self.caseCode [str(m.group(1))]
index = index + 1
-
- return self.caseCode
-
+ return self.caseCode
+
def step_code(self,caseStatements):
index = 0
- step = 0
- stepCode = {}
- step_flag = False
+ step = 0
+ stepCode = {}
+ step_flag = False
while index < len(caseStatements):
m= re.match('main\.step',caseStatements[index],0)
stepBlock = ''
@@ -99,13 +96,13 @@
while i < index :
block += caseStatements[i]
i = i + 1
- stepCode[step] = block
+ stepCode[step] = block
step = step + 1
- stepBlock= stepBlock + caseStatements[index]
+ stepBlock = stepBlock + caseStatements[index]
index = index + 1
try :
while not re.match('main\.step',caseStatements[index],0) :
- stepBlock= stepBlock + caseStatements[index]
+ stepBlock = stepBlock + caseStatements[index]
if index < len(caseStatements)-1:
index = index + 1
else :
@@ -121,11 +118,10 @@
if not step_flag :
stepCode[step] = "".join(caseStatements)
return stepCode
-
+
def getStepCode(self):
case_step_code = {}
case_block = self.case_code()
-
for case in case_block :
case_step_code[case] = {}
step_block = self.step_code(case_block[case])
diff --git a/TestON/drivers/common/cli/emulator/mininetclidriver.py b/TestON/drivers/common/cli/emulator/mininetclidriver.py
index 7e02cbd..8f22fcf 100644
--- a/TestON/drivers/common/cli/emulator/mininetclidriver.py
+++ b/TestON/drivers/common/cli/emulator/mininetclidriver.py
@@ -1615,7 +1615,7 @@
if switchesJson == "": # if rest call fails
main.log.error(
self.name +
- ".compare_switches(): Empty JSON object given from ONOS" )
+ ".compareSwitches(): Empty JSON object given from ONOS" )
return main.FALSE
onos = switchesJson
onosDPIDs = []
@@ -1924,7 +1924,7 @@
if onosMAC == mnIntf[ 'hw_addr' ].lower() :
match = True
for ip in mnIntf[ 'ips' ]:
- if ip in onosHost[ 'ips' ]:
+ if ip in onosHost[ 'ipAddresses' ]:
pass # all is well
else:
# misssing ip
diff --git a/TestON/drivers/common/cli/emulator/remotemininetdriver.py b/TestON/drivers/common/cli/emulator/remotemininetdriver.py
index e95e5c1..0958531 100644
--- a/TestON/drivers/common/cli/emulator/remotemininetdriver.py
+++ b/TestON/drivers/common/cli/emulator/remotemininetdriver.py
@@ -92,7 +92,7 @@
return main.ERROR
else:
main.log.error( "Error, unexpected output in the ping file" )
- main.log.warn( outputs )
+ #main.log.warn( outputs )
return main.TRUE
def pingLong( self, **pingParams ):
diff --git a/TestON/drivers/common/cli/onosclidriver.py b/TestON/drivers/common/cli/onosclidriver.py
index a67205c..c7e25e0 100644
--- a/TestON/drivers/common/cli/onosclidriver.py
+++ b/TestON/drivers/common/cli/onosclidriver.py
@@ -1820,7 +1820,7 @@
if flow.get( 'state' ) != 'ADDED' and flow.get( 'state' ) != \
'PENDING_ADD':
main.log.info( self.name + ": flow Id: " +
- flow.get( 'flowId' ) +
+ flow.get( 'id' ) +
" | state:" + flow.get( 'state' ) )
returnValue = main.FALSE
return returnValue
@@ -3129,6 +3129,20 @@
try:
cmdStr = "set-test-add " + str( setName ) + " " + str( values )
output = self.sendline( cmdStr )
+ try:
+ # TODO: Maybe make this less hardcoded
+ # ConsistentMap Exceptions
+ assert "org.onosproject.store.service" not in output
+ # Node not leader
+ assert "java.lang.IllegalStateException" not in output
+ except AssertionError:
+ main.log.error( "Error in processing 'set-test-add' " +
+ "command: " + str( output ) )
+ retryTime = 30 # Conservative time, given by Madan
+ main.log.info( "Waiting " + str( retryTime ) +
+ "seconds before retrying." )
+ time.sleep( retryTime ) # Due to change in mastership
+ output = self.sendline( cmdStr )
assert "Error executing command" not in output
positiveMatch = "\[(.*)\] was added to the set " + str( setName )
negativeMatch = "\[(.*)\] was already in set " + str( setName )
@@ -3184,6 +3198,20 @@
else:
cmdStr += str( setName ) + " " + str( values )
output = self.sendline( cmdStr )
+ try:
+ # TODO: Maybe make this less hardcoded
+ # ConsistentMap Exceptions
+ assert "org.onosproject.store.service" not in output
+ # Node not leader
+ assert "java.lang.IllegalStateException" not in output
+ except AssertionError:
+ main.log.error( "Error in processing 'set-test-add' " +
+ "command: " + str( output ) )
+ retryTime = 30 # Conservative time, given by Madan
+ main.log.info( "Waiting " + str( retryTime ) +
+ "seconds before retrying." )
+ time.sleep( retryTime ) # Due to change in mastership
+ output = self.sendline( cmdStr )
assert "Error executing command" not in output
main.log.info( self.name + ": " + output )
if clear:
@@ -3269,6 +3297,20 @@
cmdStr = "set-test-get "
cmdStr += setName + " " + values
output = self.sendline( cmdStr )
+ try:
+ # TODO: Maybe make this less hardcoded
+ # ConsistentMap Exceptions
+ assert "org.onosproject.store.service" not in output
+ # Node not leader
+ assert "java.lang.IllegalStateException" not in output
+ except AssertionError:
+ main.log.error( "Error in processing 'set-test-add' " +
+ "command: " + str( output ) )
+ retryTime = 30 # Conservative time, given by Madan
+ main.log.info( "Waiting " + str( retryTime ) +
+ "seconds before retrying." )
+ time.sleep( retryTime ) # Due to change in mastership
+ output = self.sendline( cmdStr )
assert "Error executing command" not in output
main.log.info( self.name + ": " + output )
@@ -3334,7 +3376,7 @@
Required arguments:
setName - The name of the set to remove from.
returns:
- The integer value of the size returned or
+ The integer value of the size returned or
None on error
"""
try:
@@ -3348,6 +3390,20 @@
cmdStr = "set-test-get -s "
cmdStr += setName
output = self.sendline( cmdStr )
+ try:
+ # TODO: Maybe make this less hardcoded
+ # ConsistentMap Exceptions
+ assert "org.onosproject.store.service" not in output
+ # Node not leader
+ assert "java.lang.IllegalStateException" not in output
+ except AssertionError:
+ main.log.error( "Error in processing 'set-test-add' " +
+ "command: " + str( output ) )
+ retryTime = 30 # Conservative time, given by Madan
+ main.log.info( "Waiting " + str( retryTime ) +
+ "seconds before retrying." )
+ time.sleep( retryTime ) # Due to change in mastership
+ output = self.sendline( cmdStr )
assert "Error executing command" not in output
main.log.info( self.name + ": " + output )
match = re.search( pattern, output )
@@ -3450,6 +3506,20 @@
cmdStr += "-i "
cmdStr += counter
output = self.sendline( cmdStr )
+ try:
+ # TODO: Maybe make this less hardcoded
+ # ConsistentMap Exceptions
+ assert "org.onosproject.store.service" not in output
+ # Node not leader
+ assert "java.lang.IllegalStateException" not in output
+ except AssertionError:
+ main.log.error( "Error in processing 'set-test-add' " +
+ "command: " + str( output ) )
+ retryTime = 30 # Conservative time, given by Madan
+ main.log.info( "Waiting " + str( retryTime ) +
+ "seconds before retrying." )
+ time.sleep( retryTime ) # Due to change in mastership
+ output = self.sendline( cmdStr )
assert "Error executing command" not in output
main.log.info( self.name + ": " + output )
pattern = counter + " was incremented to (\d+)"
diff --git a/TestON/drivers/common/cli/onosdriver.py b/TestON/drivers/common/cli/onosdriver.py
index a78f4f2..a0a1b44 100644
--- a/TestON/drivers/common/cli/onosdriver.py
+++ b/TestON/drivers/common/cli/onosdriver.py
@@ -20,6 +20,7 @@
import time
import pexpect
import os.path
+from requests.models import Response
sys.path.append( "../" )
from drivers.common.clidriver import CLI
@@ -483,6 +484,22 @@
main.cleanup()
main.exit()
+ def getBranchName( self ):
+ self.handle.sendline( "cd " + self.home )
+ self.handle.expect( "ONOS\$" )
+ self.handle.sendline( "git name-rev --name-only HEAD" )
+ self.handle.expect( "git name-rev --name-only HEAD" )
+ self.handle.expect( "\$" )
+
+ lines = self.handle.before.splitlines()
+ if lines[1] == "master":
+ return "master"
+ elif lines[1] == "onos-1.0":
+ return "onos-1.0"
+ else:
+ main.log.info( lines[1] )
+ return "unexpected ONOS branch for SDN-IP test"
+
def getVersion( self, report=False ):
"""
Writes the COMMIT number to the report to be parsed
@@ -926,7 +943,7 @@
"""
try:
self.handle.sendline( "" )
- self.handle.expect( "\$" )
+ self.handle.expect( "\$", timeout=60 )
self.handle.sendline( "onos-uninstall " + str( nodeIp ) )
self.handle.expect( "\$" )
@@ -935,6 +952,9 @@
# onos-uninstall command does not return any text
return main.TRUE
+ except pexpect.TIMEOUT:
+ main.log.exception( self.name + ": Timeout in onosUninstall" )
+ return main.FALSE
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
main.log.error( self.name + ": " + self.handle.before )
diff --git a/TestON/drivers/common/cli/quaggaclidriver.py b/TestON/drivers/common/cli/quaggaclidriver.py
index 1c63206..f391e3a 100644
--- a/TestON/drivers/common/cli/quaggaclidriver.py
+++ b/TestON/drivers/common/cli/quaggaclidriver.py
@@ -40,9 +40,9 @@
ip_address="1.1.1.1",
port=self.port,
pwd=self.pwd )
- main.log.info( "connect parameters:" + str( self.user_name ) + ";"
- + str( self.ip_address ) + ";" + str( self.port )
- + ";" + str(self.pwd ) )
+ #main.log.info( "connect parameters:" + str( self.user_name ) + ";"
+ # + str( self.ip_address ) + ";" + str( self.port )
+ # + ";" + str(self.pwd ) )
if self.handle:
# self.handle.expect( "",timeout=10 )
@@ -186,7 +186,7 @@
return intents
# This method extracts all actual routes from ONOS CLI
- def extractActualRoutes( self, getRoutesResult ):
+ def extractActualRoutesOneDotZero( self, getRoutesResult ):
routesJsonObj = json.loads( getRoutesResult )
allRoutesActual = []
@@ -199,6 +199,18 @@
return sorted( allRoutesActual )
+ def extractActualRoutesMaster( self, getRoutesResult ):
+ routesJsonObj = json.loads( getRoutesResult )
+
+ allRoutesActual = []
+ for route in routesJsonObj['routes4']:
+ if route[ 'prefix' ] == '172.16.10.0/24':
+ continue
+ allRoutesActual.append(
+ route[ 'prefix' ] + "/" + route[ 'nextHop' ] )
+
+ return sorted( allRoutesActual )
+
# This method extracts all actual route intents from ONOS CLI
def extractActualRouteIntents( self, getIntentsResult ):
intents = []
@@ -489,7 +501,7 @@
if routesAdded == numRoutes:
return main.TRUE
return main.FALSE
-
+
# Please use deleteRoutes method instead of this one!
def delRoute( self, net, numRoutes, routeRate ):
try:
@@ -562,7 +574,7 @@
child.expect( "Flow table show" )
count = 0
while True:
- i = child.expect( [ '17\d\.\d{1,3}\.\d{1,3}\.\d{1,3}',
+ i = child.expect( [ '17\d\.\d{1,3}\.\d{1,3}\.\d{1,3}',
'CLI#', pexpect.TIMEOUT ] )
if i == 0:
count = count + 1
diff --git a/TestON/tests/HATestClusterRestart/HATestClusterRestart.py b/TestON/tests/HATestClusterRestart/HATestClusterRestart.py
index 2499625..bb69fcc 100644
--- a/TestON/tests/HATestClusterRestart/HATestClusterRestart.py
+++ b/TestON/tests/HATestClusterRestart/HATestClusterRestart.py
@@ -49,6 +49,9 @@
main.log.report( "ONOS HA test: Restart all ONOS nodes - " +
"initialization" )
main.case( "Setting up test environment" )
+ main.caseExplaination = "Setup the test environment including " +\
+ "installing ONOS, starting Mininet and ONOS" +\
+ "cli sessions."
# TODO: save all the timers and output them for plotting
# load some variables from the params file
@@ -115,26 +118,27 @@
onpass="Mininet Started",
onfail="Error starting Mininet" )
- main.step( "Compiling the latest version of ONOS" )
+ main.step( "Git checkout and pull " + gitBranch )
if PULLCODE:
- main.step( "Git checkout and pull " + gitBranch )
main.ONOSbench.gitCheckout( gitBranch )
gitPullResult = main.ONOSbench.gitPull()
# values of 1 or 3 are good
utilities.assert_lesser( expect=0, actual=gitPullResult,
onpass="Git pull successful",
onfail="Git pull failed" )
-
- main.step( "Using mvn clean and install" )
- cleanInstallResult = main.ONOSbench.cleanInstall()
- utilities.assert_equals( expect=main.TRUE,
- actual=cleanInstallResult,
- onpass="MCI successful",
- onfail="MCI failed" )
else:
main.log.warn( "Did not pull new code so skipping mvn " +
"clean install" )
main.ONOSbench.getVersion( report=True )
+
+ main.step( "Using mvn clean install" )
+ cleanInstallResult = main.TRUE
+ if gitPullResult == main.TRUE:
+ cleanInstallResult = main.ONOSbench.cleanInstall()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=cleanInstallResult,
+ onpass="MCI successful",
+ onfail="MCI failed" )
# GRAPHS
# NOTE: important params here:
# job = name of Jenkins job
@@ -227,10 +231,8 @@
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
- case1Result = ( cleanInstallResult and packageResult and
- cellResult and verifyResult and onosInstallResult
- and onosIsupResult and cliResults )
- if case1Result == main.FALSE:
+ if cliResults == main.FALSE:
+ main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
main.exit()
@@ -253,8 +255,12 @@
assert ONOS6Port, "ONOS6Port not defined"
assert ONOS7Port, "ONOS7Port not defined"
- main.log.report( "Assigning switches to controllers" )
main.case( "Assigning Controllers" )
+ main.caseExplaination = "Assign switches to ONOS using 'ovs-vsctl' " +\
+ "and check that an ONOS node becomes the " +\
+ "master of the device. Then manually assign" +\
+ " mastership to specific ONOS nodes using" +\
+ " 'device-role'"
main.step( "Assign switches to controllers" )
# TODO: rewrite this function to take lists of ips and ports?
@@ -388,8 +394,6 @@
"""
Assign intents
"""
- # FIXME: we must reinstall intents until we have a persistant
- # datastore!
import time
import json
assert numControllers, "numControllers not defined"
@@ -397,11 +401,14 @@
assert utilities.assert_equals, "utilities.assert_equals not defined"
assert CLIs, "CLIs not defined"
assert nodes, "nodes not defined"
- main.log.report( "Adding host intents" )
+ # NOTE: we must reinstall intents until we have a persistant intent
+ # datastore!
main.case( "Adding host Intents" )
-
- main.step( "Discovering Hosts( Via pingall for now )" )
- # FIXME: Once we have a host discovery mechanism, use that instead
+ main.caseExplaination = "Discover hosts by using pingall then " +\
+ "assign predetermined host-to-host intents." +\
+ " After installation, check that the intent" +\
+ " is distributed to all nodes and the state" +\
+ " is INSTALLED"
# install onos-app-fwd
main.step( "Install reactive forwarding app" )
@@ -410,6 +417,7 @@
onpass="Install fwd successful",
onfail="Install fwd failed" )
+ main.step( "Check app ids" )
appCheck = main.TRUE
threads = []
for i in range( numControllers ):
@@ -429,6 +437,8 @@
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
+ main.step( "Discovering Hosts( Via pingall for now )" )
+ # FIXME: Once we have a host discovery mechanism, use that instead
# REACTIVE FWD test
pingResult = main.FALSE
for i in range(2): # Retry if pingall fails first time
@@ -451,8 +461,10 @@
utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
onpass="Uninstall fwd successful",
onfail="Uninstall fwd failed" )
- main.step( "Check app ids check" )
+
+ main.step( "Check app ids" )
threads = []
+ appCheck2 = main.TRUE
for i in range( numControllers ):
t = main.Thread( target=CLIs[i].appToIDCheck,
name="appToIDCheck-" + str( i ),
@@ -462,15 +474,15 @@
for t in threads:
t.join()
- appCheck = appCheck and t.result
- if appCheck != main.TRUE:
+ appCheck2 = appCheck2 and t.result
+ if appCheck2 != main.TRUE:
main.log.warn( CLIs[0].apps() )
main.log.warn( CLIs[0].appIDs() )
- utilities.assert_equals( expect=main.TRUE, actual=appCheck,
+ utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
- main.step( "Add host intents" )
+ main.step( "Add host intents via cli" )
intentIds = []
# TODO: move the host numbers to params
# Maybe look at all the paths we ping?
@@ -513,7 +525,10 @@
except ( ValueError, TypeError ):
main.log.warn( repr( hosts ) )
hostResult = main.FALSE
- # FIXME: DEBUG
+ utilities.assert_equals( expect=main.TRUE, actual=hostResult,
+ onpass="Found a host id for each host",
+ onfail="Error looking up host ids" )
+
intentStart = time.time()
onosIds = main.ONOScli1.getAllIntentsId()
main.log.info( "Submitted intents: " + str( intentIds ) )
@@ -608,13 +623,11 @@
main.log.exception( "Error parsing pending map" )
main.log.error( repr( pendingMap ) )
- intentAddResult = bool( pingResult and hostResult and intentAddResult
- and not missingIntents and installedCheck )
- utilities.assert_equals(
- expect=True,
- actual=intentAddResult,
- onpass="Pushed host intents to ONOS",
- onfail="Error in pushing host intents to ONOS" )
+ intentAddResult = bool( intentAddResult and not missingIntents and
+ installedCheck )
+ if not intentAddResult:
+ main.log.error( "Error in pushing host intents to ONOS" )
+
main.step( "Intent Anti-Entropy dispersion" )
for i in range(100):
correct = True
@@ -747,9 +760,11 @@
assert utilities.assert_equals, "utilities.assert_equals not defined"
assert CLIs, "CLIs not defined"
assert nodes, "nodes not defined"
- description = " Ping across added host intents"
- main.log.report( description )
- main.case( description )
+ main.case( "Verify connectivity by sendind traffic across Intents" )
+ main.caseExplaination = "Ping across added host intents to check " +\
+ "functionality and check the state of " +\
+ "the intent"
+ main.step( "Ping across added host intents" )
PingResult = main.TRUE
for i in range( 8, 18 ):
ping = main.Mininet1.pingHost( src="h" + str( i ),
@@ -783,83 +798,102 @@
onpass="Intents have been installed correctly and pings work",
onfail="Intents have not been installed correctly, pings failed." )
+ main.step( "Check Intent state" )
installedCheck = True
- if PingResult is not main.TRUE:
- # Print the intent states
- intents = main.ONOScli1.intents()
- intentStates = []
- main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
- count = 0
- # Iter through intents of a node
- try:
- for intent in json.loads( intents ):
- state = intent.get( 'state', None )
- if "INSTALLED" not in state:
- installedCheck = False
- intentId = intent.get( 'id', None )
- intentStates.append( ( intentId, state ) )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing intents." )
- intentStates.sort()
- for i, s in intentStates:
- count += 1
- main.log.info( "%-6s%-15s%-15s" %
- ( str( count ), str( i ), str( s ) ) )
- leaders = main.ONOScli1.leaders()
- try:
- if leaders:
- parsedLeaders = json.loads( leaders )
- main.log.warn( json.dumps( parsedLeaders,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # check for all intent partitions
- # check for election
- topics = []
- for i in range( 14 ):
- topics.append( "intent-partition-" + str( i ) )
- # FIXME: this should only be after we start the app
- topics.append( "org.onosproject.election" )
- main.log.debug( topics )
- ONOStopics = [ j['topic'] for j in parsedLeaders ]
- for topic in topics:
- if topic not in ONOStopics:
- main.log.error( "Error: " + topic +
- " not in leaders" )
- else:
- main.log.error( "leaders() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing leaders" )
- main.log.error( repr( leaders ) )
- partitions = main.ONOScli1.partitions()
- try:
- if partitions :
- parsedPartitions = json.loads( partitions )
- main.log.warn( json.dumps( parsedPartitions,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check for a leader in all paritions
- # TODO check for consistency among nodes
- else:
- main.log.error( "partitions() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing partitions" )
- main.log.error( repr( partitions ) )
- pendingMap = main.ONOScli1.pendingMap()
- try:
- if pendingMap :
- parsedPending = json.loads( pendingMap )
- main.log.warn( json.dumps( parsedPending,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check something here?
- else:
- main.log.error( "pendingMap() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing pending map" )
- main.log.error( repr( pendingMap ) )
+ # Print the intent states
+ intents = main.ONOScli1.intents()
+ intentStates = []
+ main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+ count = 0
+ # Iter through intents of a node
+ try:
+ for intent in json.loads( intents ):
+ state = intent.get( 'state', None )
+ if "INSTALLED" not in state:
+ installedCheck = False
+ intentId = intent.get( 'id', None )
+ intentStates.append( ( intentId, state ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing intents." )
+ # Print states
+ intentStates.sort()
+ for i, s in intentStates:
+ count += 1
+ main.log.info( "%-6s%-15s%-15s" %
+ ( str( count ), str( i ), str( s ) ) )
+ utilities.assert_equals( expect=True, actual=installedCheck,
+ onpass="Intents are all INSTALLED",
+ onfail="Intents are not all in " +\
+ "INSTALLED state" )
+
+ main.step( "Check leadership of topics" )
+ leaders = main.ONOScli1.leaders()
+ topicCheck = main.TRUE
+ try:
+ if leaders:
+ parsedLeaders = json.loads( leaders )
+ main.log.warn( json.dumps( parsedLeaders,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # check for all intent partitions
+ # check for election
+ # TODO: Look at Devices as topics now that it uses this system
+ topics = []
+ for i in range( 14 ):
+ topics.append( "intent-partition-" + str( i ) )
+ # FIXME: this should only be after we start the app
+ # FIXME: topics.append( "org.onosproject.election" )
+ # Print leaders output
+ main.log.debug( topics )
+ ONOStopics = [ j['topic'] for j in parsedLeaders ]
+ for topic in topics:
+ if topic not in ONOStopics:
+ main.log.error( "Error: " + topic +
+ " not in leaders" )
+ topicCheck = main.FALSE
+ else:
+ main.log.error( "leaders() returned None" )
+ topicCheck = main.FALSE
+ except ( ValueError, TypeError ):
+ topicCheck = main.FALSE
+ main.log.exception( "Error parsing leaders" )
+ main.log.error( repr( leaders ) )
+ # TODO: Check for a leader of these topics
+ utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
+ onpass="intent Partitions is in leaders",
+ onfail="Some topics were lost " )
+ # Print partitions
+ partitions = main.ONOScli1.partitions()
+ try:
+ if partitions :
+ parsedPartitions = json.loads( partitions )
+ main.log.warn( json.dumps( parsedPartitions,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # TODO check for a leader in all paritions
+ # TODO check for consistency among nodes
+ else:
+ main.log.error( "partitions() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing partitions" )
+ main.log.error( repr( partitions ) )
+ # Print Pending Map
+ pendingMap = main.ONOScli1.pendingMap()
+ try:
+ if pendingMap :
+ parsedPending = json.loads( pendingMap )
+ main.log.warn( json.dumps( parsedPending,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # TODO check something here?
+ else:
+ main.log.error( "pendingMap() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing pending map" )
+ main.log.error( repr( pendingMap ) )
if not installedCheck:
main.log.info( "Waiting 60 seconds to see if the state of " +
@@ -942,6 +976,37 @@
main.log.error( repr( pendingMap ) )
main.log.debug( CLIs[0].flows( jsonFormat=False ) )
+ main.step( "Wait a minute then ping again" )
+ PingResult = main.TRUE
+ for i in range( 8, 18 ):
+ ping = main.Mininet1.pingHost( src="h" + str( i ),
+ target="h" + str( i + 10 ) )
+ PingResult = PingResult and ping
+ if ping == main.FALSE:
+ main.log.warn( "Ping failed between h" + str( i ) +
+ " and h" + str( i + 10 ) )
+ elif ping == main.TRUE:
+ main.log.info( "Ping test passed!" )
+ # Don't set PingResult or you'd override failures
+ if PingResult == main.FALSE:
+ main.log.report(
+ "Intents have not been installed correctly, pings failed." )
+ # TODO: pretty print
+ main.log.warn( "ONOS1 intents: " )
+ try:
+ tmpIntents = main.ONOScli1.intents()
+ main.log.warn( json.dumps( json.loads( tmpIntents ),
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ except ( ValueError, TypeError ):
+ main.log.warn( repr( tmpIntents ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=PingResult,
+ onpass="Intents have been installed correctly and pings work",
+ onfail="Intents have not been installed correctly, pings failed." )
+
def CASE5( self, main ):
"""
Reading state of ONOS
@@ -956,7 +1021,6 @@
# assumes that sts is already in you PYTHONPATH
from sts.topology.teston_topology import TestONTopology
- main.log.report( "Setting up and gathering data for current state" )
main.case( "Setting up and gathering data for current state" )
# The general idea for this test case is to pull the state of
# ( intents,flows, topology,... ) from each ONOS node
@@ -1378,7 +1442,7 @@
for controller in range( 0, len( hosts ) ):
controllerStr = str( controller + 1 )
for host in hosts[ controller ]:
- if not host.get( 'ips', [ ] ):
+ if not host.get( 'ipAddresses', [ ] ):
main.log.error( "DEBUG:Error with host ips on controller" +
controllerStr + ": " + str( host ) )
ipResult = main.FALSE
@@ -1489,6 +1553,7 @@
"""
The Failure case.
"""
+ import time
assert numControllers, "numControllers not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
@@ -1505,6 +1570,7 @@
main.case( "Restart entire ONOS cluster" )
main.step( "Killing ONOS nodes" )
killResults = main.TRUE
+ killTime = time.time()
for node in nodes:
killed = main.ONOSbench.onosKill( node.ip_address )
killResults = killResults and killed
@@ -1542,6 +1608,29 @@
utilities.assert_equals( expect=main.TRUE, actual=cliResults,
onpass="ONOS cli started",
onfail="ONOS clis did not restart" )
+
+ # Grab the time of restart so we chan check how long the gossip
+ # protocol has had time to work
+ main.restartTime = time.time() - killTime
+ main.log.debug( "Restart time: " + str( main.restartTime ) )
+ '''
+ # FIXME: revisit test plan for election with madan
+ # Rerun for election on restarted nodes
+ run1 = CLIs[0].electionTestRun()
+ run2 = CLIs[1].electionTestRun()
+ run3 = CLIs[2].electionTestRun()
+ ...
+ ...
+ runResults = run1 and run2 and run3
+ utilities.assert_equals( expect=main.TRUE, actual=runResults,
+ onpass="Reran for election",
+ onfail="Failed to rerun for election" )
+ '''
+ # TODO: Make this configurable
+ time.sleep( 60 )
+ main.log.debug( CLIs[0].nodes( jsonFormat=False ) )
+ main.log.debug( CLIs[0].leaders( jsonFormat=False ) )
+ main.log.debug( CLIs[0].partitions( jsonFormat=False ) )
def CASE7( self, main ):
"""
@@ -1630,6 +1719,7 @@
elif rolesResults and not consistentMastership:
mastershipCheck = main.TRUE
+ '''
description2 = "Compare switch roles from before failure"
main.step( description2 )
try:
@@ -1655,15 +1745,13 @@
else:
main.log.warn( "Mastership of switch %s changed" % switchDPID )
mastershipCheck = main.FALSE
- if mastershipCheck == main.TRUE:
- main.log.report( "Mastership of Switches was not changed" )
utilities.assert_equals(
expect=main.TRUE,
actual=mastershipCheck,
onpass="Mastership of Switches was not changed",
onfail="Mastership of some switches changed" )
+ '''
# NOTE: we expect mastership to change on controller failure
- mastershipCheck = mastershipCheck and consistentMastership
main.step( "Get the intents and compare across all nodes" )
ONOSIntents = []
@@ -1769,7 +1857,7 @@
sameIntents = main.TRUE
if intentState and intentState == ONOSIntents[ 0 ]:
sameIntents = main.TRUE
- main.log.report( "Intents are consistent with before failure" )
+ main.log.info( "Intents are consistent with before failure" )
# TODO: possibly the states have changed? we may need to figure out
# what the acceptable states are
else:
@@ -1804,8 +1892,6 @@
if FlowTables == main.FALSE:
main.log.info( "Differences in flow table for switch: s" +
str( i + 1 ) )
- if FlowTables == main.TRUE:
- main.log.report( "No changes were found in the flow tables" )
utilities.assert_equals(
expect=main.TRUE,
actual=FlowTables,
@@ -1849,7 +1935,7 @@
leaderN = cli.electionTestLeader()
leaderList.append( leaderN )
if leaderN == main.FALSE:
- # error in response
+ # error in response
main.log.report( "Something is wrong with " +
"electionTestLeader function, check the" +
" error logs" )
@@ -1864,10 +1950,6 @@
main.log.error(
"Inconsistent view of leader for the election test app" )
# TODO: print the list
- if leaderResult:
- main.log.report( "Leadership election tests passed( consistent " +
- "view of leader across listeners and a new " +
- "leader was re-elected if applicable )" )
utilities.assert_equals(
expect=main.TRUE,
actual=leaderResult,
@@ -1891,16 +1973,26 @@
assert CLIs, "CLIs not defined"
assert nodes, "nodes not defined"
- description = "Compare ONOS Topology view to Mininet topology"
- main.case( description )
- main.log.report( description )
+ main.case( "Compare ONOS Topology view to Mininet topology" )
+ main.caseExplaination = "Compare topology objects between Mininet" +\
+ " and ONOS"
main.step( "Create TestONTopology object" )
- ctrls = []
- for node in nodes:
- temp = ( node, node.name, node.ip_address, 6633 )
- ctrls.append( temp )
- MNTopo = TestONTopology( main.Mininet1, ctrls )
+ try:
+ ctrls = []
+ for node in nodes:
+ temp = ( node, node.name, node.ip_address, 6633 )
+ ctrls.append( temp )
+ MNTopo = TestONTopology( main.Mininet1, ctrls )
+ except Exception:
+ objResult = main.FALSE
+ else:
+ objResult = main.TRUE
+ utilities.assert_equals( expect=main.TRUE, actual=objResult,
+ onpass="Created TestONTopology object",
+ onfail="Exception while creating " +
+ "TestONTopology object" )
+ main.step( "Comparing ONOS topology to MN" )
devicesResults = main.TRUE
portsResults = main.TRUE
linksResults = main.TRUE
@@ -1949,9 +2041,9 @@
for controller in range( 0, len( hosts ) ):
controllerStr = str( controller + 1 )
for host in hosts[ controller ]:
- if host is None or host.get( 'ips', [] ) == []:
+ if host is None or host.get( 'ipAddresses', [] ) == []:
main.log.error(
- "DEBUG:Error with host ips on controller" +
+ "DEBUG:Error with host ipAddresses on controller" +
controllerStr + ": " + str( host ) )
ipResult = main.FALSE
ports = []
@@ -2142,8 +2234,6 @@
utilities.assert_equals( expect=main.TRUE, actual=topoResult,
onpass="Topology Check Test successful",
onfail="Topology Check Test NOT successful" )
- if topoResult == main.TRUE:
- main.log.report( "ONOS topology view matches Mininet topology" )
# FIXME: move this to an ONOS state case
main.step( "Checking ONOS nodes" )
@@ -2420,6 +2510,15 @@
main.step( "Packing and rotating pcap archives" )
os.system( "~/TestON/dependencies/rotate.sh " + str( testname ) )
+ try:
+ timerLog = open( main.logdir + "/Timers.csv", 'w')
+ # Overwrite with empty line and close
+ timerLog.write( "Restart\n" )
+ timerLog.write( str( main.restartTime ) )
+ timerLog.close()
+ except NameError, e:
+ main.log.exception(e)
+
def CASE14( self, main ):
"""
start election app on all onos nodes
@@ -2430,10 +2529,17 @@
assert CLIs, "CLIs not defined"
assert nodes, "nodes not defined"
- leaderResult = main.TRUE
main.case("Start Leadership Election app")
main.step( "Install leadership election app" )
- main.ONOScli1.activateApp( "org.onosproject.election" )
+ appResult = main.ONOScli1.activateApp( "org.onosproject.election" )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=appResult,
+ onpass="Election app installed",
+ onfail="Something went wrong with installing Leadership election" )
+
+ main.step( "Run for election on each node" )
+ leaderResult = main.TRUE
leaders = []
for cli in CLIs:
cli.electionTestRun()
@@ -2445,19 +2551,23 @@
str( leader ) + "'" )
leaderResult = main.FALSE
leaders.append( leader )
- if len( set( leaders ) ) != 1:
- leaderResult = main.FALSE
- main.log.error( "Results of electionTestLeader is order of CLIs:" +
- str( leaders ) )
- if leaderResult:
- main.log.report( "Leadership election tests passed( consistent " +
- "view of leader across listeners and a leader " +
- "was elected )" )
utilities.assert_equals(
expect=main.TRUE,
actual=leaderResult,
- onpass="Leadership election passed",
- onfail="Something went wrong with Leadership election" )
+ onpass="Successfully ran for leadership",
+ onfail="Failed to run for leadership" )
+
+ main.step( "Check that each node shows the same leader" )
+ sameLeader = main.TRUE
+ if len( set( leaders ) ) != 1:
+ sameLeader = main.FALSE
+ main.log.error( "Results of electionTestLeader is order of CLIs:" +
+ str( leaders ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=sameLeader,
+ onpass="Leadership is consistent for the election topic",
+ onfail="Nodes have different leaders" )
def CASE15( self, main ):
"""
@@ -2474,12 +2584,44 @@
description = "Check that Leadership Election is still functional"
main.log.report( description )
main.case( description )
+ # NOTE: Need to re-run since being a canidate is not persistant
+ main.step( "Run for election on each node" )
+ leaderResult = main.TRUE
+ leaders = []
+ for cli in CLIs:
+ cli.electionTestRun()
+ for cli in CLIs:
+ leader = cli.electionTestLeader()
+ if leader is None or leader == main.FALSE:
+ main.log.report( cli.name + ": Leader for the election app " +
+ "should be an ONOS node, instead got '" +
+ str( leader ) + "'" )
+ leaderResult = main.FALSE
+ leaders.append( leader )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=leaderResult,
+ onpass="Successfully ran for leadership",
+ onfail="Failed to run for leadership" )
+
+ main.step( "Check that each node shows the same leader" )
+ sameLeader = main.TRUE
+ if len( set( leaders ) ) != 1:
+ sameLeader = main.FALSE
+ main.log.error( "Results of electionTestLeader is order of CLIs:" +
+ str( leaders ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=sameLeader,
+ onpass="Leadership is consistent for the election topic",
+ onfail="Nodes have different leaders" )
+
main.step( "Find current leader and withdraw" )
leader = main.ONOScli1.electionTestLeader()
# do some sanity checking on leader before using it
withdrawResult = main.FALSE
if leader is None or leader == main.FALSE:
- main.log.report(
+ main.log.error(
"Leader for the election app should be an ONOS node," +
"instead got '" + str( leader ) + "'" )
leaderResult = main.FALSE
@@ -2495,8 +2637,8 @@
utilities.assert_equals(
expect=main.TRUE,
actual=withdrawResult,
- onpass="App was withdrawn from election",
- onfail="App was not withdrawn from election" )
+ onpass="Node was withdrawn from election",
+ onfail="Node was not withdrawn from election" )
main.step( "Make sure new leader is elected" )
# FIXME: use threads
@@ -2535,11 +2677,6 @@
main.log.report( "ONOS" + str( n + 1 ) + " response: " +
str( leaderList[ n ] ) )
leaderResult = leaderResult and consistentLeader
- if leaderResult:
- main.log.report( "Leadership election tests passed( consistent " +
- "view of leader across listeners and a new " +
- "leader was elected when the old leader " +
- "resigned )" )
utilities.assert_equals(
expect=main.TRUE,
actual=leaderResult,
@@ -2558,6 +2695,7 @@
onpass="App re-ran for election",
onfail="App failed to run for election" )
+ main.step( "Leader did not change when old leader re-ran" )
afterRun = main.ONOScli1.electionTestLeader()
# verify leader didn't just change
if afterRun == leaderList[ 0 ]:
@@ -2572,14 +2710,6 @@
onfail="Something went wrong with Leadership election after " +
"the old leader re-ran for election" )
- case15Result = withdrawResult and leaderResult and runResult and\
- afterResult
- utilities.assert_equals(
- expect=main.TRUE,
- actual=case15Result,
- onpass="Leadership election is still functional",
- onfail="Leadership Election is no longer functional" )
-
def CASE16( self, main ):
"""
Install Distributed Primitives app
@@ -2613,6 +2743,7 @@
actual=appResults,
onpass="Primitives app activated",
onfail="Primitives app not activated" )
+ time.sleep (5 ) # To allow all nodes to activate
def CASE17( self, main ):
"""
@@ -2656,11 +2787,13 @@
main.step( "Increment and get a default counter on each node" )
pCounters = []
threads = []
+ addedPValues = []
for i in range( numControllers ):
t = main.Thread( target=CLIs[i].counterTestIncrement,
name="counterIncrement-" + str( i ),
args=[ pCounterName ] )
pCounterValue += 1
+ addedPValues.append( pCounterValue )
threads.append( t )
t.start()
@@ -2669,8 +2802,12 @@
pCounters.append( t.result )
# Check that counter incremented numController times
pCounterResults = True
- for i in range( numControllers ):
- pCounterResults and ( i + 1 ) in pCounters
+ for i in addedPValues:
+ tmpResult = i in pCounters
+ pCounterResults = pCounterResults and tmpResult
+ if not tmpResult:
+ main.log.error( str( i ) + " is not in partitioned "
+ "counter incremented results" )
utilities.assert_equals( expect=True,
actual=pCounterResults,
onpass="Default counter incremented",
@@ -2679,6 +2816,7 @@
main.step( "Increment and get an in memory counter on each node" )
iCounters = []
+ addedIValues = []
threads = []
for i in range( numControllers ):
t = main.Thread( target=CLIs[i].counterTestIncrement,
@@ -2686,6 +2824,7 @@
args=[ iCounterName ],
kwargs={ "inMemory": True } )
iCounterValue += 1
+ addedIValues.append( iCounterValue )
threads.append( t )
t.start()
@@ -2694,8 +2833,12 @@
iCounters.append( t.result )
# Check that counter incremented numController times
iCounterResults = True
- for i in range( numControllers ):
- iCounterResults and ( i + 1 ) in iCounters
+ for i in addedIValues:
+ tmpResult = i in iCounters
+ iCounterResults = iCounterResults and tmpResult
+ if not tmpResult:
+ main.log.error( str( i ) + " is not in the in-memory "
+ "counter incremented results" )
utilities.assert_equals( expect=True,
actual=iCounterResults,
onpass="In memory counter incremented",
diff --git a/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.py b/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.py
index 0dd40ed..256561a 100644
--- a/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.py
+++ b/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.py
@@ -49,6 +49,9 @@
main.log.report( "ONOS HA test: Restart minority of ONOS nodes - " +
"initialization" )
main.case( "Setting up test environment" )
+ main.caseExplaination = "Setup the test environment including " +\
+ "installing ONOS, starting Mininet and ONOS" +\
+ "cli sessions."
# TODO: save all the timers and output them for plotting
# load some variables from the params file
@@ -115,26 +118,27 @@
onpass="Mininet Started",
onfail="Error starting Mininet" )
- main.step( "Compiling the latest version of ONOS" )
+ main.step( "Git checkout and pull " + gitBranch )
if PULLCODE:
- main.step( "Git checkout and pull " + gitBranch )
main.ONOSbench.gitCheckout( gitBranch )
gitPullResult = main.ONOSbench.gitPull()
# values of 1 or 3 are good
utilities.assert_lesser( expect=0, actual=gitPullResult,
onpass="Git pull successful",
onfail="Git pull failed" )
-
- main.step( "Using mvn clean and install" )
- cleanInstallResult = main.ONOSbench.cleanInstall()
- utilities.assert_equals( expect=main.TRUE,
- actual=cleanInstallResult,
- onpass="MCI successful",
- onfail="MCI failed" )
else:
main.log.warn( "Did not pull new code so skipping mvn " +
"clean install" )
main.ONOSbench.getVersion( report=True )
+
+ main.step( "Using mvn clean install" )
+ cleanInstallResult = main.TRUE
+ if gitPullResult == main.TRUE:
+ cleanInstallResult = main.ONOSbench.cleanInstall()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=cleanInstallResult,
+ onpass="MCI successful",
+ onfail="MCI failed" )
# GRAPHS
# NOTE: important params here:
# job = name of Jenkins job
@@ -227,10 +231,8 @@
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
- case1Result = ( cleanInstallResult and packageResult and
- cellResult and verifyResult and onosInstallResult
- and onosIsupResult and cliResults )
- if case1Result == main.FALSE:
+ if cliResults == main.FALSE:
+ main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
main.exit()
@@ -253,8 +255,12 @@
assert ONOS6Port, "ONOS6Port not defined"
assert ONOS7Port, "ONOS7Port not defined"
- main.log.report( "Assigning switches to controllers" )
main.case( "Assigning Controllers" )
+ main.caseExplaination = "Assign switches to ONOS using 'ovs-vsctl' " +\
+ "and check that an ONOS node becomes the " +\
+ "master of the device. Then manually assign" +\
+ " mastership to specific ONOS nodes using" +\
+ " 'device-role'"
main.step( "Assign switches to controllers" )
# TODO: rewrite this function to take lists of ips and ports?
@@ -286,8 +292,6 @@
"not in the list of controllers s" +
str( i ) + " is connecting to." )
mastershipCheck = main.FALSE
- if mastershipCheck == main.TRUE:
- main.log.report( "Switch mastership assigned correctly" )
utilities.assert_equals(
expect=main.TRUE,
actual=mastershipCheck,
@@ -395,11 +399,12 @@
assert utilities.assert_equals, "utilities.assert_equals not defined"
assert CLIs, "CLIs not defined"
assert nodes, "nodes not defined"
- main.log.report( "Adding host intents" )
main.case( "Adding host Intents" )
-
- main.step( "Discovering Hosts( Via pingall for now )" )
- # FIXME: Once we have a host discovery mechanism, use that instead
+ main.caseExplaination = "Discover hosts by using pingall then " +\
+ "assign predetermined host-to-host intents." +\
+ " After installation, check that the intent" +\
+ " is distributed to all nodes and the state" +\
+ " is INSTALLED"
# install onos-app-fwd
main.step( "Install reactive forwarding app" )
@@ -408,6 +413,7 @@
onpass="Install fwd successful",
onfail="Install fwd failed" )
+ main.step( "Check app ids" )
appCheck = main.TRUE
threads = []
for i in range( numControllers ):
@@ -427,6 +433,8 @@
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
+ main.step( "Discovering Hosts( Via pingall for now )" )
+ # FIXME: Once we have a host discovery mechanism, use that instead
# REACTIVE FWD test
pingResult = main.FALSE
for i in range(2): # Retry if pingall fails first time
@@ -449,8 +457,10 @@
utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
onpass="Uninstall fwd successful",
onfail="Uninstall fwd failed" )
- main.step( "Check app ids check" )
+
+ main.step( "Check app ids" )
threads = []
+ appCheck2 = main.TRUE
for i in range( numControllers ):
t = main.Thread( target=CLIs[i].appToIDCheck,
name="appToIDCheck-" + str( i ),
@@ -460,15 +470,15 @@
for t in threads:
t.join()
- appCheck = appCheck and t.result
- if appCheck != main.TRUE:
+ appCheck2 = appCheck2 and t.result
+ if appCheck2 != main.TRUE:
main.log.warn( CLIs[0].apps() )
main.log.warn( CLIs[0].appIDs() )
- utilities.assert_equals( expect=main.TRUE, actual=appCheck,
+ utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
- main.step( "Add host intents" )
+ main.step( "Add host intents via cli" )
intentIds = []
# TODO: move the host numbers to params
# Maybe look at all the paths we ping?
@@ -511,7 +521,10 @@
except ( ValueError, TypeError ):
main.log.warn( repr( hosts ) )
hostResult = main.FALSE
- # FIXME: DEBUG
+ utilities.assert_equals( expect=main.TRUE, actual=hostResult,
+ onpass="Found a host id for each host",
+ onfail="Error looking up host ids" )
+
intentStart = time.time()
onosIds = main.ONOScli1.getAllIntentsId()
main.log.info( "Submitted intents: " + str( intentIds ) )
@@ -606,13 +619,11 @@
main.log.exception( "Error parsing pending map" )
main.log.error( repr( pendingMap ) )
- intentAddResult = bool( pingResult and hostResult and intentAddResult
- and not missingIntents and installedCheck )
- utilities.assert_equals(
- expect=True,
- actual=intentAddResult,
- onpass="Pushed host intents to ONOS",
- onfail="Error in pushing host intents to ONOS" )
+ intentAddResult = bool( intentAddResult and not missingIntents and
+ installedCheck )
+ if not intentAddResult:
+ main.log.error( "Error in pushing host intents to ONOS" )
+
main.step( "Intent Anti-Entropy dispersion" )
for i in range(100):
correct = True
@@ -745,9 +756,11 @@
assert utilities.assert_equals, "utilities.assert_equals not defined"
assert CLIs, "CLIs not defined"
assert nodes, "nodes not defined"
- description = " Ping across added host intents"
- main.log.report( description )
- main.case( description )
+ main.case( "Verify connectivity by sendind traffic across Intents" )
+ main.caseExplaination = "Ping across added host intents to check " +\
+ "functionality and check the state of " +\
+ "the intent"
+ main.step( "Ping across added host intents" )
PingResult = main.TRUE
for i in range( 8, 18 ):
ping = main.Mininet1.pingHost( src="h" + str( i ),
@@ -781,83 +794,102 @@
onpass="Intents have been installed correctly and pings work",
onfail="Intents have not been installed correctly, pings failed." )
+ main.step( "Check Intent state" )
installedCheck = True
- if PingResult is not main.TRUE:
- # Print the intent states
- intents = main.ONOScli1.intents()
- intentStates = []
- main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
- count = 0
- # Iter through intents of a node
- try:
- for intent in json.loads( intents ):
- state = intent.get( 'state', None )
- if "INSTALLED" not in state:
- installedCheck = False
- intentId = intent.get( 'id', None )
- intentStates.append( ( intentId, state ) )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing intents." )
- intentStates.sort()
- for i, s in intentStates:
- count += 1
- main.log.info( "%-6s%-15s%-15s" %
- ( str( count ), str( i ), str( s ) ) )
- leaders = main.ONOScli1.leaders()
- try:
- if leaders:
- parsedLeaders = json.loads( leaders )
- main.log.warn( json.dumps( parsedLeaders,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # check for all intent partitions
- # check for election
- topics = []
- for i in range( 14 ):
- topics.append( "intent-partition-" + str( i ) )
- # FIXME: this should only be after we start the app
- topics.append( "org.onosproject.election" )
- main.log.debug( topics )
- ONOStopics = [ j['topic'] for j in parsedLeaders ]
- for topic in topics:
- if topic not in ONOStopics:
- main.log.error( "Error: " + topic +
- " not in leaders" )
- else:
- main.log.error( "leaders() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing leaders" )
- main.log.error( repr( leaders ) )
- partitions = main.ONOScli1.partitions()
- try:
- if partitions :
- parsedPartitions = json.loads( partitions )
- main.log.warn( json.dumps( parsedPartitions,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check for a leader in all paritions
- # TODO check for consistency among nodes
- else:
- main.log.error( "partitions() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing partitions" )
- main.log.error( repr( partitions ) )
- pendingMap = main.ONOScli1.pendingMap()
- try:
- if pendingMap :
- parsedPending = json.loads( pendingMap )
- main.log.warn( json.dumps( parsedPending,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check something here?
- else:
- main.log.error( "pendingMap() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing pending map" )
- main.log.error( repr( pendingMap ) )
+ # Print the intent states
+ intents = main.ONOScli1.intents()
+ intentStates = []
+ main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+ count = 0
+ # Iter through intents of a node
+ try:
+ for intent in json.loads( intents ):
+ state = intent.get( 'state', None )
+ if "INSTALLED" not in state:
+ installedCheck = False
+ intentId = intent.get( 'id', None )
+ intentStates.append( ( intentId, state ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing intents." )
+ # Print states
+ intentStates.sort()
+ for i, s in intentStates:
+ count += 1
+ main.log.info( "%-6s%-15s%-15s" %
+ ( str( count ), str( i ), str( s ) ) )
+ utilities.assert_equals( expect=True, actual=installedCheck,
+ onpass="Intents are all INSTALLED",
+ onfail="Intents are not all in " +\
+ "INSTALLED state" )
+
+ main.step( "Check leadership of topics" )
+ leaders = main.ONOScli1.leaders()
+ topicCheck = main.TRUE
+ try:
+ if leaders:
+ parsedLeaders = json.loads( leaders )
+ main.log.warn( json.dumps( parsedLeaders,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # check for all intent partitions
+ # check for election
+ # TODO: Look at Devices as topics now that it uses this system
+ topics = []
+ for i in range( 14 ):
+ topics.append( "intent-partition-" + str( i ) )
+ # FIXME: this should only be after we start the app
+ # FIXME: topics.append( "org.onosproject.election" )
+ # Print leaders output
+ main.log.debug( topics )
+ ONOStopics = [ j['topic'] for j in parsedLeaders ]
+ for topic in topics:
+ if topic not in ONOStopics:
+ main.log.error( "Error: " + topic +
+ " not in leaders" )
+ topicCheck = main.FALSE
+ else:
+ main.log.error( "leaders() returned None" )
+ topicCheck = main.FALSE
+ except ( ValueError, TypeError ):
+ topicCheck = main.FALSE
+ main.log.exception( "Error parsing leaders" )
+ main.log.error( repr( leaders ) )
+ # TODO: Check for a leader of these topics
+ utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
+ onpass="intent Partitions is in leaders",
+ onfail="Some topics were lost " )
+ # Print partitions
+ partitions = main.ONOScli1.partitions()
+ try:
+ if partitions :
+ parsedPartitions = json.loads( partitions )
+ main.log.warn( json.dumps( parsedPartitions,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # TODO check for a leader in all paritions
+ # TODO check for consistency among nodes
+ else:
+ main.log.error( "partitions() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing partitions" )
+ main.log.error( repr( partitions ) )
+ # Print Pending Map
+ pendingMap = main.ONOScli1.pendingMap()
+ try:
+ if pendingMap :
+ parsedPending = json.loads( pendingMap )
+ main.log.warn( json.dumps( parsedPending,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # TODO check something here?
+ else:
+ main.log.error( "pendingMap() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing pending map" )
+ main.log.error( repr( pendingMap ) )
if not installedCheck:
main.log.info( "Waiting 60 seconds to see if the state of " +
@@ -940,6 +972,37 @@
main.log.error( repr( pendingMap ) )
main.log.debug( CLIs[0].flows( jsonFormat=False ) )
+ main.step( "Wait a minute then ping again" )
+ PingResult = main.TRUE
+ for i in range( 8, 18 ):
+ ping = main.Mininet1.pingHost( src="h" + str( i ),
+ target="h" + str( i + 10 ) )
+ PingResult = PingResult and ping
+ if ping == main.FALSE:
+ main.log.warn( "Ping failed between h" + str( i ) +
+ " and h" + str( i + 10 ) )
+ elif ping == main.TRUE:
+ main.log.info( "Ping test passed!" )
+ # Don't set PingResult or you'd override failures
+ if PingResult == main.FALSE:
+ main.log.report(
+ "Intents have not been installed correctly, pings failed." )
+ # TODO: pretty print
+ main.log.warn( "ONOS1 intents: " )
+ try:
+ tmpIntents = main.ONOScli1.intents()
+ main.log.warn( json.dumps( json.loads( tmpIntents ),
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ except ( ValueError, TypeError ):
+ main.log.warn( repr( tmpIntents ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=PingResult,
+ onpass="Intents have been installed correctly and pings work",
+ onfail="Intents have not been installed correctly, pings failed." )
+
def CASE5( self, main ):
"""
Reading state of ONOS
@@ -954,7 +1017,6 @@
# assumes that sts is already in you PYTHONPATH
from sts.topology.teston_topology import TestONTopology
- main.log.report( "Setting up and gathering data for current state" )
main.case( "Setting up and gathering data for current state" )
# The general idea for this test case is to pull the state of
# ( intents,flows, topology,... ) from each ONOS node
@@ -1376,7 +1438,7 @@
for controller in range( 0, len( hosts ) ):
controllerStr = str( controller + 1 )
for host in hosts[ controller ]:
- if not host.get( 'ips', [ ] ):
+ if not host.get( 'ipAddresses', [ ] ):
main.log.error( "DEBUG:Error with host ips on controller" +
controllerStr + ": " + str( host ) )
ipResult = main.FALSE
@@ -1495,6 +1557,7 @@
assert nodes, "nodes not defined"
main.case( "Restart minority of ONOS nodes" )
main.step( "Killing 3 ONOS nodes" )
+ killTime = time.time()
# TODO: Randomize these nodes or base this on partitions
# TODO: use threads in this case
killResults = main.ONOSbench.onosKill( nodes[0].ip_address )
@@ -1533,7 +1596,28 @@
# Grab the time of restart so we chan check how long the gossip
# protocol has had time to work
- main.restartTime = time.time()
+ main.restartTime = time.time() - killTime
+ main.log.debug( "Restart time: " + str( main.restartTime ) )
+ '''
+ # FIXME: revisit test plan for election with madan
+ # Rerun for election on restarted nodes
+ run1 = CLIs[0].electionTestRun()
+ run2 = CLIs[1].electionTestRun()
+ run3 = CLIs[2].electionTestRun()
+ runResults = run1 and run2 and run3
+ utilities.assert_equals( expect=main.TRUE, actual=runResults,
+ onpass="Reran for election",
+ onfail="Failed to rerun for election" )
+ '''
+ # TODO: MAke this configurable. Also, we are breaking the above timer
+ time.sleep( 60 )
+ main.log.debug( CLIs[0].nodes( jsonFormat=False ) )
+ main.log.debug( CLIs[0].leaders( jsonFormat=False ) )
+ main.log.debug( CLIs[0].partitions( jsonFormat=False ) )
+ time.sleep( 200 )
+ main.log.debug( CLIs[0].nodes( jsonFormat=False ) )
+ main.log.debug( CLIs[0].leaders( jsonFormat=False ) )
+ main.log.debug( CLIs[0].partitions( jsonFormat=False ) )
def CASE7( self, main ):
"""
@@ -1569,7 +1653,6 @@
main.step( "Read device roles from ONOS" )
ONOSMastership = []
- mastershipCheck = main.FALSE
consistentMastership = True
rolesResults = True
threads = []
@@ -1619,9 +1702,9 @@
sort_keys=True,
indent=4,
separators=( ',', ': ' ) ) )
- elif rolesResults and not consistentMastership:
- mastershipCheck = main.TRUE
+ # NOTE: we expect mastership to change on controller failure
+ '''
description2 = "Compare switch roles from before failure"
main.step( description2 )
try:
@@ -1647,15 +1730,12 @@
else:
main.log.warn( "Mastership of switch %s changed" % switchDPID )
mastershipCheck = main.FALSE
- if mastershipCheck == main.TRUE:
- main.log.report( "Mastership of Switches was not changed" )
utilities.assert_equals(
expect=main.TRUE,
actual=mastershipCheck,
onpass="Mastership of Switches was not changed",
onfail="Mastership of some switches changed" )
- # NOTE: we expect mastership to change on controller failure
- mastershipCheck = consistentMastership
+ '''
main.step( "Get the intents and compare across all nodes" )
ONOSIntents = []
@@ -1760,7 +1840,7 @@
sameIntents = main.TRUE
if intentState and intentState == ONOSIntents[ 0 ]:
sameIntents = main.TRUE
- main.log.report( "Intents are consistent with before failure" )
+ main.log.info( "Intents are consistent with before failure" )
# TODO: possibly the states have changed? we may need to figure out
# what the acceptable states are
else:
@@ -1795,8 +1875,6 @@
if FlowTables == main.FALSE:
main.log.info( "Differences in flow table for switch: s" +
str( i + 1 ) )
- if FlowTables == main.TRUE:
- main.log.report( "No changes were found in the flow tables" )
utilities.assert_equals(
expect=main.TRUE,
actual=FlowTables,
@@ -1841,7 +1919,7 @@
leaderN = cli.electionTestLeader()
leaderList.append( leaderN )
if leaderN == main.FALSE:
- # error in response
+ # error in response
main.log.report( "Something is wrong with " +
"electionTestLeader function, check the" +
" error logs" )
@@ -1861,10 +1939,6 @@
main.log.error(
"Inconsistent view of leader for the election test app" )
# TODO: print the list
- if leaderResult:
- main.log.report( "Leadership election tests passed( consistent " +
- "view of leader across listeners and a new " +
- "leader was re-elected if applicable )" )
utilities.assert_equals(
expect=main.TRUE,
actual=leaderResult,
@@ -1888,16 +1962,26 @@
assert CLIs, "CLIs not defined"
assert nodes, "nodes not defined"
- description = "Compare ONOS Topology view to Mininet topology"
- main.case( description )
- main.log.report( description )
+ main.case( "Compare ONOS Topology view to Mininet topology" )
+ main.caseExplaination = "Compare topology objects between Mininet" +\
+ " and ONOS"
main.step( "Create TestONTopology object" )
- ctrls = []
- for node in nodes:
- temp = ( node, node.name, node.ip_address, 6633 )
- ctrls.append( temp )
- MNTopo = TestONTopology( main.Mininet1, ctrls )
+ try:
+ ctrls = []
+ for node in nodes:
+ temp = ( node, node.name, node.ip_address, 6633 )
+ ctrls.append( temp )
+ MNTopo = TestONTopology( main.Mininet1, ctrls )
+ except Exception:
+ objResult = main.FALSE
+ else:
+ objResult = main.TRUE
+ utilities.assert_equals( expect=main.TRUE, actual=objResult,
+ onpass="Created TestONTopology object",
+ onfail="Exception while creating " +
+ "TestONTopology object" )
+ main.step( "Comparing ONOS topology to MN" )
devicesResults = main.TRUE
portsResults = main.TRUE
linksResults = main.TRUE
@@ -1946,7 +2030,7 @@
for controller in range( 0, len( hosts ) ):
controllerStr = str( controller + 1 )
for host in hosts[ controller ]:
- if host is None or host.get( 'ips', [] ) == []:
+ if host is None or host.get( 'ipAddresses', [] ) == []:
main.log.error(
"DEBUG:Error with host ips on controller" +
controllerStr + ": " + str( host ) )
@@ -2139,8 +2223,6 @@
utilities.assert_equals( expect=main.TRUE, actual=topoResult,
onpass="Topology Check Test successful",
onfail="Topology Check Test NOT successful" )
- if topoResult == main.TRUE:
- main.log.report( "ONOS topology view matches Mininet topology" )
# FIXME: move this to an ONOS state case
main.step( "Checking ONOS nodes" )
@@ -2417,6 +2499,15 @@
main.step( "Packing and rotating pcap archives" )
os.system( "~/TestON/dependencies/rotate.sh " + str( testname ) )
+ try:
+ timerLog = open( main.logdir + "/Timers.csv", 'w')
+ # Overwrite with empty line and close
+ timerLog.write( "Restart\n" )
+ timerLog.write( str( main.restartTime ) )
+ timerLog.close()
+ except NameError, e:
+ main.log.exception(e)
+
def CASE14( self, main ):
"""
start election app on all onos nodes
@@ -2427,10 +2518,17 @@
assert CLIs, "CLIs not defined"
assert nodes, "nodes not defined"
- leaderResult = main.TRUE
main.case("Start Leadership Election app")
main.step( "Install leadership election app" )
- main.ONOScli1.activateApp( "org.onosproject.election" )
+ appResult = main.ONOScli1.activateApp( "org.onosproject.election" )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=appResult,
+ onpass="Election app installed",
+ onfail="Something went wrong with installing Leadership election" )
+
+ main.step( "Run for election on each node" )
+ leaderResult = main.TRUE
leaders = []
for cli in CLIs:
cli.electionTestRun()
@@ -2442,19 +2540,23 @@
str( leader ) + "'" )
leaderResult = main.FALSE
leaders.append( leader )
- if len( set( leaders ) ) != 1:
- leaderResult = main.FALSE
- main.log.error( "Results of electionTestLeader is order of CLIs:" +
- str( leaders ) )
- if leaderResult:
- main.log.report( "Leadership election tests passed( consistent " +
- "view of leader across listeners and a leader " +
- "was elected )" )
utilities.assert_equals(
expect=main.TRUE,
actual=leaderResult,
- onpass="Leadership election passed",
- onfail="Something went wrong with Leadership election" )
+ onpass="Successfully ran for leadership",
+ onfail="Failed to run for leadership" )
+
+ main.step( "Check that each node shows the same leader" )
+ sameLeader = main.TRUE
+ if len( set( leaders ) ) != 1:
+ sameLeader = main.FALSE
+ main.log.error( "Results of electionTestLeader is order of CLIs:" +
+ str( leaders ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=sameLeader,
+ onpass="Leadership is consistent for the election topic",
+ onfail="Nodes have different leaders" )
def CASE15( self, main ):
"""
@@ -2471,12 +2573,29 @@
description = "Check that Leadership Election is still functional"
main.log.report( description )
main.case( description )
+
+ main.step( "Check that each node shows the same leader" )
+ sameLeader = main.TRUE
+ leaders = []
+ for cli in CLIs:
+ leader = cli.electionTestLeader()
+ leaders.append( leader )
+ if len( set( leaders ) ) != 1:
+ sameLeader = main.FALSE
+ main.log.error( "Results of electionTestLeader is order of CLIs:" +
+ str( leaders ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=sameLeader,
+ onpass="Leadership is consistent for the election topic",
+ onfail="Nodes have different leaders" )
+
main.step( "Find current leader and withdraw" )
leader = main.ONOScli1.electionTestLeader()
# do some sanity checking on leader before using it
withdrawResult = main.FALSE
if leader is None or leader == main.FALSE:
- main.log.report(
+ main.log.error(
"Leader for the election app should be an ONOS node," +
"instead got '" + str( leader ) + "'" )
leaderResult = main.FALSE
@@ -2492,8 +2611,8 @@
utilities.assert_equals(
expect=main.TRUE,
actual=withdrawResult,
- onpass="App was withdrawn from election",
- onfail="App was not withdrawn from election" )
+ onpass="Node was withdrawn from election",
+ onfail="Node was not withdrawn from election" )
main.step( "Make sure new leader is elected" )
# FIXME: use threads
@@ -2532,11 +2651,6 @@
main.log.report( "ONOS" + str( n + 1 ) + " response: " +
str( leaderList[ n ] ) )
leaderResult = leaderResult and consistentLeader
- if leaderResult:
- main.log.report( "Leadership election tests passed( consistent " +
- "view of leader across listeners and a new " +
- "leader was elected when the old leader " +
- "resigned )" )
utilities.assert_equals(
expect=main.TRUE,
actual=leaderResult,
@@ -2555,6 +2669,7 @@
onpass="App re-ran for election",
onfail="App failed to run for election" )
+ main.step( "Leader did not change when old leader re-ran" )
afterRun = main.ONOScli1.electionTestLeader()
# verify leader didn't just change
if afterRun == leaderList[ 0 ]:
@@ -2569,14 +2684,6 @@
onfail="Something went wrong with Leadership election after " +
"the old leader re-ran for election" )
- case15Result = withdrawResult and leaderResult and runResult and\
- afterResult
- utilities.assert_equals(
- expect=main.TRUE,
- actual=case15Result,
- onpass="Leadership election is still functional",
- onfail="Leadership Election is no longer functional" )
-
def CASE16( self, main ):
"""
Install Distributed Primitives app
@@ -2610,6 +2717,7 @@
actual=appResults,
onpass="Primitives app activated",
onfail="Primitives app not activated" )
+ time.sleep (5 ) # To allow all nodes to activate
def CASE17( self, main ):
"""
@@ -2653,11 +2761,13 @@
main.step( "Increment and get a default counter on each node" )
pCounters = []
threads = []
+ addedPValues = []
for i in range( numControllers ):
t = main.Thread( target=CLIs[i].counterTestIncrement,
name="counterIncrement-" + str( i ),
args=[ pCounterName ] )
pCounterValue += 1
+ addedPValues.append( pCounterValue )
threads.append( t )
t.start()
@@ -2666,8 +2776,12 @@
pCounters.append( t.result )
# Check that counter incremented numController times
pCounterResults = True
- for i in range( numControllers ):
- pCounterResults and ( i + 1 ) in pCounters
+ for i in addedPValues:
+ tmpResult = i in pCounters
+ pCounterResults = pCounterResults and tmpResult
+ if not tmpResult:
+ main.log.error( str( i ) + " is not in partitioned "
+ "counter incremented results" )
utilities.assert_equals( expect=True,
actual=pCounterResults,
onpass="Default counter incremented",
@@ -2676,6 +2790,7 @@
main.step( "Increment and get an in memory counter on each node" )
iCounters = []
+ addedIValues = []
threads = []
for i in range( numControllers ):
t = main.Thread( target=CLIs[i].counterTestIncrement,
@@ -2683,6 +2798,7 @@
args=[ iCounterName ],
kwargs={ "inMemory": True } )
iCounterValue += 1
+ addedIValues.append( iCounterValue )
threads.append( t )
t.start()
@@ -2691,8 +2807,12 @@
iCounters.append( t.result )
# Check that counter incremented numController times
iCounterResults = True
- for i in range( numControllers ):
- iCounterResults and ( i + 1 ) in iCounters
+ for i in addedIValues:
+ tmpResult = i in iCounters
+ iCounterResults = iCounterResults and tmpResult
+ if not tmpResult:
+ main.log.error( str( i ) + " is not in the in-memory "
+ "counter incremented results" )
utilities.assert_equals( expect=True,
actual=iCounterResults,
onpass="In memory counter incremented",
diff --git a/TestON/tests/HATestNetworkPartition/HATestNetworkPartition.py b/TestON/tests/HATestNetworkPartition/HATestNetworkPartition.py
index 3c4541c..e4658e7 100644
--- a/TestON/tests/HATestNetworkPartition/HATestNetworkPartition.py
+++ b/TestON/tests/HATestNetworkPartition/HATestNetworkPartition.py
@@ -120,12 +120,27 @@
main.ONOSbench.gitCheckout( gitBranch )
gitPullResult = main.ONOSbench.gitPull()
- main.step( "Using mvn clean & install" )
+ main.step( "Using mvn clean and install" )
cleanInstallResult = main.ONOSbench.cleanInstall()
else:
main.log.warn( "Did not pull new code so skipping mvn " +
"clean install" )
main.ONOSbench.getVersion( report=True )
+ # GRAPHS
+ # NOTE: important params here:
+ # job = name of Jenkins job
+ # Plot Name = Plot-HA, only can be used if multiple plots
+ # index = The number of the graph under plot name
+ job = "HANetworkPartition"
+ graphs = '<ac:structured-macro ac:name="html">\n'
+ graphs += '<ac:plain-text-body><![CDATA[\n'
+ graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
+ '/plot/getPlot?index=0&width=500&height=300"' +\
+ 'noborder="0" width="500" height="300" scrolling="yes" '+\
+ 'seamless="seamless"></iframe>\n'
+ graphs += ']]></ac:plain-text-body>\n'
+ graphs += '</ac:structured-macro>\n'
+ main.log.wiki(graphs)
main.step( "Creating ONOS package" )
packageResult = main.ONOSbench.onosPackage()
@@ -218,6 +233,7 @@
onfail="Test startup NOT successful" )
if case1Result == main.FALSE:
+ main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
main.exit()
@@ -1414,7 +1430,7 @@
for controller in range( 0, len( hosts ) ):
controllerStr = str( controller + 1 )
for host in hosts[ controller ]:
- if host[ 'ips' ] == []:
+ if host[ 'ipAddresses' ] == []:
main.log.error(
"DEBUG:Error with host ips on controller" +
controllerStr + ": " + str( host ) )
diff --git a/TestON/tests/HATestSanity/HATestSanity.params b/TestON/tests/HATestSanity/HATestSanity.params
index dace37a..2440ec6 100644
--- a/TestON/tests/HATestSanity/HATestSanity.params
+++ b/TestON/tests/HATestSanity/HATestSanity.params
@@ -17,7 +17,6 @@
#CASE15: Check that Leadership Election is still functional
#CASE16: Install Distributed Primitives app
#CASE17: Check for basic functionality with distributed primitives
- #1,2,8,3,4,5,14,[6],8,7,4,15,9,8,4,10,8,4,11,8,4,12,8,4,13
#1,2,8,3,4,5,14,16,17,[6],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13
<testcases>1,2,8,3,4,5,14,16,17,[6],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
<ENV>
diff --git a/TestON/tests/HATestSanity/HATestSanity.py b/TestON/tests/HATestSanity/HATestSanity.py
index 8bc21b0..fe513ac 100644
--- a/TestON/tests/HATestSanity/HATestSanity.py
+++ b/TestON/tests/HATestSanity/HATestSanity.py
@@ -49,6 +49,9 @@
"""
main.log.report( "ONOS HA Sanity test - initialization" )
main.case( "Setting up test environment" )
+ main.caseExplaination = "Setup the test environment including " +\
+ "installing ONOS, starting Mininet and ONOS" +\
+ "cli sessions."
# TODO: save all the timers and output them for plotting
# load some variables from the params file
@@ -115,26 +118,27 @@
onpass="Mininet Started",
onfail="Error starting Mininet" )
- main.step( "Compiling the latest version of ONOS" )
+ main.step( "Git checkout and pull " + gitBranch )
if PULLCODE:
- main.step( "Git checkout and pull " + gitBranch )
main.ONOSbench.gitCheckout( gitBranch )
gitPullResult = main.ONOSbench.gitPull()
# values of 1 or 3 are good
utilities.assert_lesser( expect=0, actual=gitPullResult,
onpass="Git pull successful",
onfail="Git pull failed" )
-
- main.step( "Using mvn clean and install" )
- cleanInstallResult = main.ONOSbench.cleanInstall()
- utilities.assert_equals( expect=main.TRUE,
- actual=cleanInstallResult,
- onpass="MCI successful",
- onfail="MCI failed" )
else:
main.log.warn( "Did not pull new code so skipping mvn " +
"clean install" )
main.ONOSbench.getVersion( report=True )
+
+ main.step( "Using mvn clean install" )
+ cleanInstallResult = main.TRUE
+ if gitPullResult == main.TRUE:
+ cleanInstallResult = main.ONOSbench.cleanInstall()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=cleanInstallResult,
+ onpass="MCI successful",
+ onfail="MCI failed" )
# GRAPHS
# NOTE: important params here:
# job = name of Jenkins job
@@ -229,10 +233,8 @@
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
- case1Result = ( cleanInstallResult and packageResult and
- cellResult and verifyResult and onosInstallResult
- and onosIsupResult and cliResults )
- if case1Result == main.FALSE:
+ if cliResults == main.FALSE:
+ main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
main.exit()
@@ -255,8 +257,12 @@
assert ONOS6Port, "ONOS6Port not defined"
assert ONOS7Port, "ONOS7Port not defined"
- main.log.report( "Assigning switches to controllers" )
main.case( "Assigning Controllers" )
+ main.caseExplaination = "Assign switches to ONOS using 'ovs-vsctl' " +\
+ "and check that an ONOS node becomes the " +\
+ "master of the device. Then manually assign" +\
+ " mastership to specific ONOS nodes using" +\
+ " 'device-role'"
main.step( "Assign switches to controllers" )
# TODO: rewrite this function to take lists of ips and ports?
@@ -397,11 +403,12 @@
assert utilities.assert_equals, "utilities.assert_equals not defined"
assert CLIs, "CLIs not defined"
assert nodes, "nodes not defined"
- main.log.report( "Adding host intents" )
main.case( "Adding host Intents" )
-
- main.step( "Discovering Hosts( Via pingall for now )" )
- # FIXME: Once we have a host discovery mechanism, use that instead
+ main.caseExplaination = "Discover hosts by using pingall then " +\
+ "assign predetermined host-to-host intents." +\
+ " After installation, check that the intent" +\
+ " is distributed to all nodes and the state" +\
+ " is INSTALLED"
# install onos-app-fwd
main.step( "Install reactive forwarding app" )
@@ -410,6 +417,7 @@
onpass="Install fwd successful",
onfail="Install fwd failed" )
+ main.step( "Check app ids" )
appCheck = main.TRUE
threads = []
for i in range( numControllers ):
@@ -429,6 +437,8 @@
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
+ main.step( "Discovering Hosts( Via pingall for now )" )
+ # FIXME: Once we have a host discovery mechanism, use that instead
# REACTIVE FWD test
pingResult = main.FALSE
for i in range(2): # Retry if pingall fails first time
@@ -451,8 +461,15 @@
utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
onpass="Uninstall fwd successful",
onfail="Uninstall fwd failed" )
- main.step( "Check app ids check" )
+ '''
+ main.Mininet1.handle.sendline( "py [ h.cmd( \"arping -c 1 10.1.1.1 \" ) for h in net.hosts ] ")
+ import time
+ time.sleep(60)
+ '''
+
+ main.step( "Check app ids" )
threads = []
+ appCheck2 = main.TRUE
for i in range( numControllers ):
t = main.Thread( target=CLIs[i].appToIDCheck,
name="appToIDCheck-" + str( i ),
@@ -462,15 +479,15 @@
for t in threads:
t.join()
- appCheck = appCheck and t.result
- if appCheck != main.TRUE:
+ appCheck2 = appCheck2 and t.result
+ if appCheck2 != main.TRUE:
main.log.warn( CLIs[0].apps() )
main.log.warn( CLIs[0].appIDs() )
- utilities.assert_equals( expect=main.TRUE, actual=appCheck,
+ utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
- main.step( "Add host intents" )
+ main.step( "Add host intents via cli" )
intentIds = []
# TODO: move the host numbers to params
# Maybe look at all the paths we ping?
@@ -513,7 +530,10 @@
except ( ValueError, TypeError ):
main.log.warn( repr( hosts ) )
hostResult = main.FALSE
- # FIXME: DEBUG
+ utilities.assert_equals( expect=main.TRUE, actual=hostResult,
+ onpass="Found a host id for each host",
+ onfail="Error looking up host ids" )
+
intentStart = time.time()
onosIds = main.ONOScli1.getAllIntentsId()
main.log.info( "Submitted intents: " + str( intentIds ) )
@@ -608,13 +628,11 @@
main.log.exception( "Error parsing pending map" )
main.log.error( repr( pendingMap ) )
- intentAddResult = bool( pingResult and hostResult and intentAddResult
- and not missingIntents and installedCheck )
- utilities.assert_equals(
- expect=True,
- actual=intentAddResult,
- onpass="Pushed host intents to ONOS",
- onfail="Error in pushing host intents to ONOS" )
+ intentAddResult = bool( intentAddResult and not missingIntents and
+ installedCheck )
+ if not intentAddResult:
+ main.log.error( "Error in pushing host intents to ONOS" )
+
main.step( "Intent Anti-Entropy dispersion" )
for i in range(100):
correct = True
@@ -747,9 +765,11 @@
assert utilities.assert_equals, "utilities.assert_equals not defined"
assert CLIs, "CLIs not defined"
assert nodes, "nodes not defined"
- description = " Ping across added host intents"
- main.log.report( description )
- main.case( description )
+ main.case( "Verify connectivity by sendind traffic across Intents" )
+ main.caseExplaination = "Ping across added host intents to check " +\
+ "functionality and check the state of " +\
+ "the intent"
+ main.step( "Ping across added host intents" )
PingResult = main.TRUE
for i in range( 8, 18 ):
ping = main.Mininet1.pingHost( src="h" + str( i ),
@@ -783,83 +803,102 @@
onpass="Intents have been installed correctly and pings work",
onfail="Intents have not been installed correctly, pings failed." )
+ main.step( "Check Intent state" )
installedCheck = True
- if PingResult is not main.TRUE:
- # Print the intent states
- intents = main.ONOScli1.intents()
- intentStates = []
- main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
- count = 0
- # Iter through intents of a node
- try:
- for intent in json.loads( intents ):
- state = intent.get( 'state', None )
- if "INSTALLED" not in state:
- installedCheck = False
- intentId = intent.get( 'id', None )
- intentStates.append( ( intentId, state ) )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing intents." )
- intentStates.sort()
- for i, s in intentStates:
- count += 1
- main.log.info( "%-6s%-15s%-15s" %
- ( str( count ), str( i ), str( s ) ) )
- leaders = main.ONOScli1.leaders()
- try:
- if leaders:
- parsedLeaders = json.loads( leaders )
- main.log.warn( json.dumps( parsedLeaders,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # check for all intent partitions
- # check for election
- topics = []
- for i in range( 14 ):
- topics.append( "intent-partition-" + str( i ) )
- # FIXME: this should only be after we start the app
- topics.append( "org.onosproject.election" )
- main.log.debug( topics )
- ONOStopics = [ j['topic'] for j in parsedLeaders ]
- for topic in topics:
- if topic not in ONOStopics:
- main.log.error( "Error: " + topic +
- " not in leaders" )
- else:
- main.log.error( "leaders() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing leaders" )
- main.log.error( repr( leaders ) )
- partitions = main.ONOScli1.partitions()
- try:
- if partitions :
- parsedPartitions = json.loads( partitions )
- main.log.warn( json.dumps( parsedPartitions,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check for a leader in all paritions
- # TODO check for consistency among nodes
- else:
- main.log.error( "partitions() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing partitions" )
- main.log.error( repr( partitions ) )
- pendingMap = main.ONOScli1.pendingMap()
- try:
- if pendingMap :
- parsedPending = json.loads( pendingMap )
- main.log.warn( json.dumps( parsedPending,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check something here?
- else:
- main.log.error( "pendingMap() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing pending map" )
- main.log.error( repr( pendingMap ) )
+ # Print the intent states
+ intents = main.ONOScli1.intents()
+ intentStates = []
+ main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+ count = 0
+ # Iter through intents of a node
+ try:
+ for intent in json.loads( intents ):
+ state = intent.get( 'state', None )
+ if "INSTALLED" not in state:
+ installedCheck = False
+ intentId = intent.get( 'id', None )
+ intentStates.append( ( intentId, state ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing intents." )
+ # Print states
+ intentStates.sort()
+ for i, s in intentStates:
+ count += 1
+ main.log.info( "%-6s%-15s%-15s" %
+ ( str( count ), str( i ), str( s ) ) )
+ utilities.assert_equals( expect=True, actual=installedCheck,
+ onpass="Intents are all INSTALLED",
+ onfail="Intents are not all in " +\
+ "INSTALLED state" )
+
+ main.step( "Check leadership of topics" )
+ leaders = main.ONOScli1.leaders()
+ topicCheck = main.TRUE
+ try:
+ if leaders:
+ parsedLeaders = json.loads( leaders )
+ main.log.warn( json.dumps( parsedLeaders,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # check for all intent partitions
+ # check for election
+ # TODO: Look at Devices as topics now that it uses this system
+ topics = []
+ for i in range( 14 ):
+ topics.append( "intent-partition-" + str( i ) )
+ # FIXME: this should only be after we start the app
+ # FIXME: topics.append( "org.onosproject.election" )
+ # Print leaders output
+ main.log.debug( topics )
+ ONOStopics = [ j['topic'] for j in parsedLeaders ]
+ for topic in topics:
+ if topic not in ONOStopics:
+ main.log.error( "Error: " + topic +
+ " not in leaders" )
+ topicCheck = main.FALSE
+ else:
+ main.log.error( "leaders() returned None" )
+ topicCheck = main.FALSE
+ except ( ValueError, TypeError ):
+ topicCheck = main.FALSE
+ main.log.exception( "Error parsing leaders" )
+ main.log.error( repr( leaders ) )
+ # TODO: Check for a leader of these topics
+ utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
+ onpass="intent Partitions is in leaders",
+ onfail="Some topics were lost " )
+ # Print partitions
+ partitions = main.ONOScli1.partitions()
+ try:
+ if partitions :
+ parsedPartitions = json.loads( partitions )
+ main.log.warn( json.dumps( parsedPartitions,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # TODO check for a leader in all paritions
+ # TODO check for consistency among nodes
+ else:
+ main.log.error( "partitions() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing partitions" )
+ main.log.error( repr( partitions ) )
+ # Print Pending Map
+ pendingMap = main.ONOScli1.pendingMap()
+ try:
+ if pendingMap :
+ parsedPending = json.loads( pendingMap )
+ main.log.warn( json.dumps( parsedPending,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # TODO check something here?
+ else:
+ main.log.error( "pendingMap() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing pending map" )
+ main.log.error( repr( pendingMap ) )
if not installedCheck:
main.log.info( "Waiting 60 seconds to see if the state of " +
@@ -942,6 +981,46 @@
main.log.error( repr( pendingMap ) )
main.log.debug( CLIs[0].flows( jsonFormat=False ) )
+ main.step( "Wait a minute then ping again" )
+ PingResult = main.TRUE
+ for i in range( 8, 18 ):
+ ping = main.Mininet1.pingHost( src="h" + str( i ),
+ target="h" + str( i + 10 ) )
+ PingResult = PingResult and ping
+ if ping == main.FALSE:
+ main.log.warn( "Ping failed between h" + str( i ) +
+ " and h" + str( i + 10 ) )
+ elif ping == main.TRUE:
+ main.log.info( "Ping test passed!" )
+ # Don't set PingResult or you'd override failures
+ if PingResult == main.FALSE:
+ main.log.report(
+ "Intents have not been installed correctly, pings failed." )
+ # TODO: pretty print
+ main.log.warn( "ONOS1 intents: " )
+ try:
+ tmpIntents = main.ONOScli1.intents()
+ main.log.warn( json.dumps( json.loads( tmpIntents ),
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ except ( ValueError, TypeError ):
+ main.log.warn( repr( tmpIntents ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=PingResult,
+ onpass="Intents have been installed correctly and pings work",
+ onfail="Intents have not been installed correctly, pings failed." )
+
+
+
+ '''
+ #DEBUG
+ if PingResult == main.FALSE:
+ import time
+ time.sleep( 100000 )
+ '''
+
def CASE5( self, main ):
"""
Reading state of ONOS
@@ -956,7 +1035,6 @@
# assumes that sts is already in you PYTHONPATH
from sts.topology.teston_topology import TestONTopology
- main.log.report( "Setting up and gathering data for current state" )
main.case( "Setting up and gathering data for current state" )
# The general idea for this test case is to pull the state of
# ( intents,flows, topology,... ) from each ONOS node
@@ -1378,7 +1456,7 @@
for controller in range( 0, len( hosts ) ):
controllerStr = str( controller + 1 )
for host in hosts[ controller ]:
- if not host.get( 'ips', [ ] ):
+ if not host.get( 'ipAddresses', [ ] ):
main.log.error( "DEBUG:Error with host ips on controller" +
controllerStr + ": " + str( host ) )
ipResult = main.FALSE
@@ -1615,8 +1693,6 @@
else:
main.log.warn( "Mastership of switch %s changed" % switchDPID )
mastershipCheck = main.FALSE
- if mastershipCheck == main.TRUE:
- main.log.report( "Mastership of Switches was not changed" )
utilities.assert_equals(
expect=main.TRUE,
actual=mastershipCheck,
@@ -1727,7 +1803,7 @@
sameIntents = main.TRUE
if intentState and intentState == ONOSIntents[ 0 ]:
sameIntents = main.TRUE
- main.log.report( "Intents are consistent with before failure" )
+ main.log.info( "Intents are consistent with before failure" )
# TODO: possibly the states have changed? we may need to figure out
# what the acceptable states are
else:
@@ -1762,8 +1838,6 @@
if FlowTables == main.FALSE:
main.log.info( "Differences in flow table for switch: s" +
str( i + 1 ) )
- if FlowTables == main.TRUE:
- main.log.report( "No changes were found in the flow tables" )
utilities.assert_equals(
expect=main.TRUE,
actual=FlowTables,
@@ -1821,10 +1895,6 @@
main.log.report( cli.name + " sees " + str( leaderN ) +
" as the leader of the election app. " +
"Leader should be " + str( leader ) )
- if leaderResult:
- main.log.report( "Leadership election tests passed( consistent " +
- "view of leader across listeners and a new " +
- "leader was re-elected if applicable )" )
utilities.assert_equals(
expect=main.TRUE,
actual=leaderResult,
@@ -1848,16 +1918,26 @@
assert CLIs, "CLIs not defined"
assert nodes, "nodes not defined"
- description = "Compare ONOS Topology view to Mininet topology"
- main.case( description )
- main.log.report( description )
+ main.case( "Compare ONOS Topology view to Mininet topology" )
+ main.caseExplaination = "Compare topology objects between Mininet" +\
+ " and ONOS"
main.step( "Create TestONTopology object" )
- ctrls = []
- for node in nodes:
- temp = ( node, node.name, node.ip_address, 6633 )
- ctrls.append( temp )
- MNTopo = TestONTopology( main.Mininet1, ctrls )
+ try:
+ ctrls = []
+ for node in nodes:
+ temp = ( node, node.name, node.ip_address, 6633 )
+ ctrls.append( temp )
+ MNTopo = TestONTopology( main.Mininet1, ctrls )
+ except Exception:
+ objResult = main.FALSE
+ else:
+ objResult = main.TRUE
+ utilities.assert_equals( expect=main.TRUE, actual=objResult,
+ onpass="Created TestONTopology object",
+ onfail="Exception while creating " +
+ "TestONTopology object" )
+ main.step( "Comparing ONOS topology to MN" )
devicesResults = main.TRUE
portsResults = main.TRUE
linksResults = main.TRUE
@@ -1906,7 +1986,7 @@
for controller in range( 0, len( hosts ) ):
controllerStr = str( controller + 1 )
for host in hosts[ controller ]:
- if host is None or host.get( 'ips', [] ) == []:
+ if host is None or host.get( 'ipAddresses', [] ) == []:
main.log.error(
"DEBUG:Error with host ips on controller" +
controllerStr + ": " + str( host ) )
@@ -2099,8 +2179,6 @@
utilities.assert_equals( expect=main.TRUE, actual=topoResult,
onpass="Topology Check Test successful",
onfail="Topology Check Test NOT successful" )
- if topoResult == main.TRUE:
- main.log.report( "ONOS topology view matches Mininet topology" )
# FIXME: move this to an ONOS state case
main.step( "Checking ONOS nodes" )
@@ -2378,11 +2456,11 @@
os.system( "~/TestON/dependencies/rotate.sh " + str( testname ) )
try:
- gossipIntentLog = open( main.logdir + "/Timers.csv", 'w')
+ timerLog = open( main.logdir + "/Timers.csv", 'w')
# Overwrite with empty line and close
- gossipIntentLog.write( "Gossip Intents\n" )
- gossipIntentLog.write( str( gossipTime ) )
- gossipIntentLog.close()
+ timerLog.write( "Gossip Intents\n" )
+ timerLog.write( str( gossipTime ) )
+ timerLog.close()
except NameError, e:
main.log.exception(e)
@@ -2396,10 +2474,17 @@
assert CLIs, "CLIs not defined"
assert nodes, "nodes not defined"
- leaderResult = main.TRUE
main.case("Start Leadership Election app")
main.step( "Install leadership election app" )
- main.ONOScli1.activateApp( "org.onosproject.election" )
+ appResult = main.ONOScli1.activateApp( "org.onosproject.election" )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=appResult,
+ onpass="Election app installed",
+ onfail="Something went wrong with installing Leadership election" )
+
+ main.step( "Run for election on each node" )
+ leaderResult = main.TRUE
leaders = []
for cli in CLIs:
cli.electionTestRun()
@@ -2411,19 +2496,23 @@
str( leader ) + "'" )
leaderResult = main.FALSE
leaders.append( leader )
- if len( set( leaders ) ) != 1:
- leaderResult = main.FALSE
- main.log.error( "Results of electionTestLeader is order of CLIs:" +
- str( leaders ) )
- if leaderResult:
- main.log.report( "Leadership election tests passed( consistent " +
- "view of leader across listeners and a leader " +
- "was elected )" )
utilities.assert_equals(
expect=main.TRUE,
actual=leaderResult,
- onpass="Leadership election passed",
- onfail="Something went wrong with Leadership election" )
+ onpass="Successfully ran for leadership",
+ onfail="Failed to run for leadership" )
+
+ main.step( "Check that each node shows the same leader" )
+ sameLeader = main.TRUE
+ if len( set( leaders ) ) != 1:
+ sameLeader = main.FALSE
+ main.log.error( "Results of electionTestLeader is order of CLIs:" +
+ str( leaders ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=sameLeader,
+ onpass="Leadership is consistent for the election topic",
+ onfail="Nodes have different leaders" )
def CASE15( self, main ):
"""
@@ -2440,12 +2529,29 @@
description = "Check that Leadership Election is still functional"
main.log.report( description )
main.case( description )
+
+ main.step( "Check that each node shows the same leader" )
+ sameLeader = main.TRUE
+ leaders = []
+ for cli in CLIs:
+ leader = cli.electionTestLeader()
+ leaders.append( leader )
+ if len( set( leaders ) ) != 1:
+ sameLeader = main.FALSE
+ main.log.error( "Results of electionTestLeader is order of CLIs:" +
+ str( leaders ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=sameLeader,
+ onpass="Leadership is consistent for the election topic",
+ onfail="Nodes have different leaders" )
+
main.step( "Find current leader and withdraw" )
leader = main.ONOScli1.electionTestLeader()
# do some sanity checking on leader before using it
withdrawResult = main.FALSE
if leader is None or leader == main.FALSE:
- main.log.report(
+ main.log.error(
"Leader for the election app should be an ONOS node," +
"instead got '" + str( leader ) + "'" )
leaderResult = main.FALSE
@@ -2461,8 +2567,8 @@
utilities.assert_equals(
expect=main.TRUE,
actual=withdrawResult,
- onpass="App was withdrawn from election",
- onfail="App was not withdrawn from election" )
+ onpass="Node was withdrawn from election",
+ onfail="Node was not withdrawn from election" )
main.step( "Make sure new leader is elected" )
# FIXME: use threads
@@ -2501,11 +2607,6 @@
main.log.report( "ONOS" + str( n + 1 ) + " response: " +
str( leaderList[ n ] ) )
leaderResult = leaderResult and consistentLeader
- if leaderResult:
- main.log.report( "Leadership election tests passed( consistent " +
- "view of leader across listeners and a new " +
- "leader was elected when the old leader " +
- "resigned )" )
utilities.assert_equals(
expect=main.TRUE,
actual=leaderResult,
@@ -2524,6 +2625,7 @@
onpass="App re-ran for election",
onfail="App failed to run for election" )
+ main.step( "Leader did not change when old leader re-ran" )
afterRun = main.ONOScli1.electionTestLeader()
# verify leader didn't just change
if afterRun == leaderList[ 0 ]:
@@ -2538,14 +2640,6 @@
onfail="Something went wrong with Leadership election after " +
"the old leader re-ran for election" )
- case15Result = withdrawResult and leaderResult and runResult and\
- afterResult
- utilities.assert_equals(
- expect=main.TRUE,
- actual=case15Result,
- onpass="Leadership election is still functional",
- onfail="Leadership Election is no longer functional" )
-
def CASE16( self, main ):
"""
Install Distributed Primitives app
@@ -2579,6 +2673,7 @@
actual=appResults,
onpass="Primitives app activated",
onfail="Primitives app not activated" )
+ time.sleep (5 ) # To allow all nodes to activate
def CASE17( self, main ):
"""
@@ -2622,11 +2717,13 @@
main.step( "Increment and get a default counter on each node" )
pCounters = []
threads = []
+ addedPValues = []
for i in range( numControllers ):
t = main.Thread( target=CLIs[i].counterTestIncrement,
name="counterIncrement-" + str( i ),
args=[ pCounterName ] )
pCounterValue += 1
+ addedPValues.append( pCounterValue )
threads.append( t )
t.start()
@@ -2635,8 +2732,12 @@
pCounters.append( t.result )
# Check that counter incremented numController times
pCounterResults = True
- for i in range( numControllers ):
- pCounterResults and ( i + 1 ) in pCounters
+ for i in addedPValues:
+ tmpResult = i in pCounters
+ pCounterResults = pCounterResults and tmpResult
+ if not tmpResult:
+ main.log.error( str( i ) + " is not in partitioned "
+ "counter incremented results" )
utilities.assert_equals( expect=True,
actual=pCounterResults,
onpass="Default counter incremented",
@@ -2645,6 +2746,7 @@
main.step( "Increment and get an in memory counter on each node" )
iCounters = []
+ addedIValues = []
threads = []
for i in range( numControllers ):
t = main.Thread( target=CLIs[i].counterTestIncrement,
@@ -2652,6 +2754,7 @@
args=[ iCounterName ],
kwargs={ "inMemory": True } )
iCounterValue += 1
+ addedIValues.append( iCounterValue )
threads.append( t )
t.start()
@@ -2660,8 +2763,12 @@
iCounters.append( t.result )
# Check that counter incremented numController times
iCounterResults = True
- for i in range( numControllers ):
- iCounterResults and ( i + 1 ) in iCounters
+ for i in addedIValues:
+ tmpResult = i in iCounters
+ iCounterResults = iCounterResults and tmpResult
+ if not tmpResult:
+ main.log.error( str( i ) + " is not in the in-memory "
+ "counter incremented results" )
utilities.assert_equals( expect=True,
actual=iCounterResults,
onpass="In memory counter incremented",
diff --git a/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.params b/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.params
index d027c36..f057592 100644
--- a/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.params
+++ b/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.params
@@ -16,7 +16,7 @@
#CASE15: Check that Leadership Election is still functional
#1,2,8,3,4,5,14,[6],8,3,7,4,15,9,8,4,10,8,4,11,8,4,12,8,4,13
#extra hosts test 1,2,8,11,8,12,8
- <testcases>1,2,8,3,4,5,14,16,17,[6],8,3,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+ <testcases>1,2,8,3,4,5,14,15,16,17,[6],8,3,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
<ENV>
<cellName>HA</cellName>
</ENV>
diff --git a/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.py b/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.py
index 0f68a02..7194a3b 100644
--- a/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.py
+++ b/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.py
@@ -49,6 +49,9 @@
main.log.report( "ONOS Single node cluster restart " +
"HA test - initialization" )
main.case( "Setting up test environment" )
+ main.caseExplaination = "Setup the test environment including " +\
+ "installing ONOS, starting Mininet and ONOS" +\
+ "cli sessions."
# TODO: save all the timers and output them for plotting
# load some variables from the params file
@@ -92,15 +95,15 @@
verifyResult = main.ONOSbench.verifyCell()
# FIXME:this is short term fix
- main.log.report( "Removing raft logs" )
+ main.log.info( "Removing raft logs" )
main.ONOSbench.onosRemoveRaftLogs()
- main.log.report( "Uninstalling ONOS" )
+ main.log.info( "Uninstalling ONOS" )
for node in nodes:
main.ONOSbench.onosUninstall( node.ip_address )
# Make sure ONOS is DEAD
- main.log.report( "Killing any ONOS processes" )
+ main.log.info( "Killing any ONOS processes" )
killResults = main.TRUE
for node in nodes:
killed = main.ONOSbench.onosKill( node.ip_address )
@@ -115,26 +118,27 @@
onpass="Mininet Started",
onfail="Error starting Mininet" )
- main.step( "Compiling the latest version of ONOS" )
+ main.step( "Git checkout and pull " + gitBranch )
if PULLCODE:
- main.step( "Git checkout and pull " + gitBranch )
main.ONOSbench.gitCheckout( gitBranch )
gitPullResult = main.ONOSbench.gitPull()
# values of 1 or 3 are good
utilities.assert_lesser( expect=0, actual=gitPullResult,
onpass="Git pull successful",
onfail="Git pull failed" )
-
- main.step( "Using mvn clean and install" )
- cleanInstallResult = main.ONOSbench.cleanInstall()
- utilities.assert_equals( expect=main.TRUE,
- actual=cleanInstallResult,
- onpass="MCI successful",
- onfail="MCI failed" )
else:
main.log.warn( "Did not pull new code so skipping mvn " +
"clean install" )
main.ONOSbench.getVersion( report=True )
+
+ main.step( "Using mvn clean install" )
+ cleanInstallResult = main.TRUE
+ if gitPullResult == main.TRUE:
+ cleanInstallResult = main.ONOSbench.cleanInstall()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=cleanInstallResult,
+ onpass="MCI successful",
+ onfail="MCI failed" )
# GRAPHS
# NOTE: important params here:
# job = name of Jenkins job
@@ -171,8 +175,6 @@
onos1Isup = main.ONOSbench.isup( ONOS1Ip )
if onos1Isup:
break
- if not onos1Isup:
- main.log.report( "ONOS1 didn't start!" )
utilities.assert_equals( expect=main.TRUE, actual=onos1Isup,
onpass="ONOS startup successful",
onfail="ONOS startup failed" )
@@ -191,18 +193,7 @@
port=main.params[ 'MNtcpdump' ][ 'port' ] )
main.step( "App Ids check" )
- appCheck = main.TRUE
- threads = []
- for i in range( numControllers ):
- t = main.Thread( target=CLIs[i].appToIDCheck,
- name="appToIDCheck-" + str( i ),
- args=[] )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- appCheck = appCheck and t.result
+ appCheck = main.ONOScli1.appToIDCheck()
if appCheck != main.TRUE:
main.log.warn( CLIs[0].apps() )
main.log.warn( CLIs[0].appIDs() )
@@ -210,11 +201,8 @@
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
- case1Result = ( cleanInstallResult and packageResult and
- cellResult and verifyResult and onosInstallResult
- and onos1Isup and cliResults )
-
- if case1Result == main.FALSE:
+ if cliResults == main.FALSE:
+ main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
main.exit()
@@ -234,8 +222,10 @@
assert ONOS6Port, "ONOS6Port not defined"
assert ONOS7Port, "ONOS7Port not defined"
- main.log.report( "Assigning switches to controllers" )
main.case( "Assigning Controllers" )
+ main.caseExplaination = "Assign switches to ONOS using 'ovs-vsctl' " +\
+ "and check that an ONOS node becomes the " +\
+ "master of the device."
main.step( "Assign switches to controllers" )
for i in range( 1, 29 ):
@@ -255,7 +245,7 @@
else:
mastershipCheck = main.FALSE
if mastershipCheck == main.TRUE:
- main.log.report( "Switch mastership assigned correctly" )
+ main.log.info( "Switch mastership assigned correctly" )
utilities.assert_equals(
expect=main.TRUE,
actual=mastershipCheck,
@@ -340,13 +330,14 @@
assert numControllers, "numControllers not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- # FIXME: we must reinstall intents until we have a persistant
- # datastore!
- main.log.report( "Adding host intents" )
+ # NOTE: we must reinstall intents until we have a persistant intent
+ # datastore!
main.case( "Adding host Intents" )
-
- main.step( "Discovering Hosts( Via pingall for now )" )
- # FIXME: Once we have a host discovery mechanism, use that instead
+ main.caseExplaination = "Discover hosts by using pingall then " +\
+ "assign predetermined host-to-host intents." +\
+ " After installation, check that the intent" +\
+ " is distributed to all nodes and the state" +\
+ " is INSTALLED"
# install onos-app-fwd
main.step( "Install reactive forwarding app" )
@@ -355,6 +346,7 @@
onpass="Install fwd successful",
onfail="Install fwd failed" )
+ main.step( "Check app ids" )
appCheck = main.ONOScli1.appToIDCheck()
if appCheck != main.TRUE:
main.log.warn( CLIs[0].apps() )
@@ -363,6 +355,8 @@
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
+ main.step( "Discovering Hosts( Via pingall for now )" )
+ # FIXME: Once we have a host discovery mechanism, use that instead
# REACTIVE FWD test
pingResult = main.FALSE
for i in range(2): # Retry if pingall fails first time
@@ -385,6 +379,8 @@
utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
onpass="Uninstall fwd successful",
onfail="Uninstall fwd failed" )
+
+ main.step( "Check app ids" )
appCheck2 = main.ONOScli1.appToIDCheck()
if appCheck2 != main.TRUE:
main.log.warn( CLIs[0].apps() )
@@ -393,7 +389,7 @@
onpass="App Ids seem to be correct",
onfail="Something is wrong with app Ids" )
- main.step( "Add host intents" )
+ main.step( "Add host intents via cli" )
intentIds = []
# TODO: move the host numbers to params
# Maybe look at all the paths we ping?
@@ -435,7 +431,10 @@
except ( ValueError, TypeError ):
main.log.warn( repr( hosts ) )
hostResult = main.FALSE
- # FIXME: DEBUG
+ utilities.assert_equals( expect=main.TRUE, actual=hostResult,
+ onpass="Found a host id for each host",
+ onfail="Error looking up host ids" )
+
intentStart = time.time()
onosIds = main.ONOScli1.getAllIntentsId()
main.log.info( "Submitted intents: " + str( intentIds ) )
@@ -530,13 +529,11 @@
main.log.exception( "Error parsing pending map" )
main.log.error( repr( pendingMap ) )
- intentAddResult = bool( pingResult and hostResult and intentAddResult
- and not missingIntents and installedCheck )
- utilities.assert_equals(
- expect=True,
- actual=intentAddResult,
- onpass="Pushed host intents to ONOS",
- onfail="Error in pushing host intents to ONOS" )
+ intentAddResult = bool( intentAddResult and not missingIntents and
+ installedCheck )
+ if not intentAddResult:
+ main.log.error( "Error in pushing host intents to ONOS" )
+
main.step( "Intent Anti-Entropy dispersion" )
for i in range(100):
correct = True
@@ -667,9 +664,11 @@
assert numControllers, "numControllers not defined"
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- description = " Ping across added host intents"
- main.log.report( description )
- main.case( description )
+ main.case( "Verify connectivity by sendind traffic across Intents" )
+ main.caseExplaination = "Ping across added host intents to check " +\
+ "functionality and check the state of " +\
+ "the intent"
+ main.step( "Ping across added host intents" )
PingResult = main.TRUE
for i in range( 8, 18 ):
ping = main.Mininet1.pingHost( src="h" + str( i ),
@@ -695,7 +694,7 @@
except ( ValueError, TypeError ):
main.log.warn( repr( tmpIntents ) )
if PingResult == main.TRUE:
- main.log.report(
+ main.log.info(
"Intents have been installed correctly and verified by pings" )
utilities.assert_equals(
expect=main.TRUE,
@@ -703,83 +702,102 @@
onpass="Intents have been installed correctly and pings work",
onfail="Intents have not been installed correctly, pings failed." )
+ main.step( "Check Intent state" )
installedCheck = True
- if PingResult is not main.TRUE:
- # Print the intent states
- intents = main.ONOScli1.intents()
- intentStates = []
- main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
- count = 0
- # Iter through intents of a node
- try:
- for intent in json.loads( intents ):
- state = intent.get( 'state', None )
- if "INSTALLED" not in state:
- installedCheck = False
- intentId = intent.get( 'id', None )
- intentStates.append( ( intentId, state ) )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing intents." )
- intentStates.sort()
- for i, s in intentStates:
- count += 1
- main.log.info( "%-6s%-15s%-15s" %
- ( str( count ), str( i ), str( s ) ) )
- leaders = main.ONOScli1.leaders()
- try:
- if leaders:
- parsedLeaders = json.loads( leaders )
- main.log.warn( json.dumps( parsedLeaders,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # check for all intent partitions
- # check for election
- topics = []
- for i in range( 14 ):
- topics.append( "intent-partition-" + str( i ) )
- # FIXME: this should only be after we start the app
- topics.append( "org.onosproject.election" )
- main.log.debug( topics )
- ONOStopics = [ j['topic'] for j in parsedLeaders ]
- for topic in topics:
- if topic not in ONOStopics:
- main.log.error( "Error: " + topic +
- " not in leaders" )
- else:
- main.log.error( "leaders() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing leaders" )
- main.log.error( repr( leaders ) )
- partitions = main.ONOScli1.partitions()
- try:
- if partitions :
- parsedPartitions = json.loads( partitions )
- main.log.warn( json.dumps( parsedPartitions,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check for a leader in all paritions
- # TODO check for consistency among nodes
- else:
- main.log.error( "partitions() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing partitions" )
- main.log.error( repr( partitions ) )
- pendingMap = main.ONOScli1.pendingMap()
- try:
- if pendingMap :
- parsedPending = json.loads( pendingMap )
- main.log.warn( json.dumps( parsedPending,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- # TODO check something here?
- else:
- main.log.error( "pendingMap() returned None" )
- except ( ValueError, TypeError ):
- main.log.exception( "Error parsing pending map" )
- main.log.error( repr( pendingMap ) )
+ # Print the intent states
+ intents = main.ONOScli1.intents()
+ intentStates = []
+ main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+ count = 0
+ # Iter through intents of a node
+ try:
+ for intent in json.loads( intents ):
+ state = intent.get( 'state', None )
+ if "INSTALLED" not in state:
+ installedCheck = False
+ intentId = intent.get( 'id', None )
+ intentStates.append( ( intentId, state ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing intents." )
+ # Print states
+ intentStates.sort()
+ for i, s in intentStates:
+ count += 1
+ main.log.info( "%-6s%-15s%-15s" %
+ ( str( count ), str( i ), str( s ) ) )
+ utilities.assert_equals( expect=True, actual=installedCheck,
+ onpass="Intents are all INSTALLED",
+ onfail="Intents are not all in " +\
+ "INSTALLED state" )
+
+ main.step( "Check leadership of topics" )
+ leaders = main.ONOScli1.leaders()
+ topicCheck = main.TRUE
+ try:
+ if leaders:
+ parsedLeaders = json.loads( leaders )
+ main.log.warn( json.dumps( parsedLeaders,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # check for all intent partitions
+ # check for election
+ # TODO: Look at Devices as topics now that it uses this system
+ topics = []
+ for i in range( 14 ):
+ topics.append( "intent-partition-" + str( i ) )
+ # FIXME: this should only be after we start the app
+ # FIXME: topics.append( "org.onosproject.election" )
+ # Print leaders output
+ main.log.debug( topics )
+ ONOStopics = [ j['topic'] for j in parsedLeaders ]
+ for topic in topics:
+ if topic not in ONOStopics:
+ main.log.error( "Error: " + topic +
+ " not in leaders" )
+ topicCheck = main.FALSE
+ else:
+ main.log.error( "leaders() returned None" )
+ topicCheck = main.FALSE
+ except ( ValueError, TypeError ):
+ topicCheck = main.FALSE
+ main.log.exception( "Error parsing leaders" )
+ main.log.error( repr( leaders ) )
+ # TODO: Check for a leader of these topics
+ utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
+ onpass="intent Partitions is in leaders",
+ onfail="Some topics were lost " )
+ # Print partitions
+ partitions = main.ONOScli1.partitions()
+ try:
+ if partitions :
+ parsedPartitions = json.loads( partitions )
+ main.log.warn( json.dumps( parsedPartitions,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # TODO check for a leader in all paritions
+ # TODO check for consistency among nodes
+ else:
+ main.log.error( "partitions() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing partitions" )
+ main.log.error( repr( partitions ) )
+ # Print Pending Map
+ pendingMap = main.ONOScli1.pendingMap()
+ try:
+ if pendingMap :
+ parsedPending = json.loads( pendingMap )
+ main.log.warn( json.dumps( parsedPending,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # TODO check something here?
+ else:
+ main.log.error( "pendingMap() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing pending map" )
+ main.log.error( repr( pendingMap ) )
if not installedCheck:
main.log.info( "Waiting 60 seconds to see if the state of " +
@@ -862,6 +880,37 @@
main.log.error( repr( pendingMap ) )
main.log.debug( main.ONOScli1.flows( jsonFormat=False ) )
+ main.step( "Wait a minute then ping again" )
+ PingResult = main.TRUE
+ for i in range( 8, 18 ):
+ ping = main.Mininet1.pingHost( src="h" + str( i ),
+ target="h" + str( i + 10 ) )
+ PingResult = PingResult and ping
+ if ping == main.FALSE:
+ main.log.warn( "Ping failed between h" + str( i ) +
+ " and h" + str( i + 10 ) )
+ elif ping == main.TRUE:
+ main.log.info( "Ping test passed!" )
+ # Don't set PingResult or you'd override failures
+ if PingResult == main.FALSE:
+ main.log.report(
+ "Intents have not been installed correctly, pings failed." )
+ # TODO: pretty print
+ main.log.warn( "ONOS1 intents: " )
+ try:
+ tmpIntents = main.ONOScli1.intents()
+ main.log.warn( json.dumps( json.loads( tmpIntents ),
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ except ( ValueError, TypeError ):
+ main.log.warn( repr( tmpIntents ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=PingResult,
+ onpass="Intents have been installed correctly and pings work",
+ onfail="Intents have not been installed correctly, pings failed." )
+
def CASE5( self, main ):
"""
Reading state of ONOS
@@ -873,7 +922,6 @@
# assumes that sts is already in you PYTHONPATH
from sts.topology.teston_topology import TestONTopology
- main.log.report( "Setting up and gathering data for current state" )
main.case( "Setting up and gathering data for current state" )
# The general idea for this test case is to pull the state of
# ( intents,flows, topology,... ) from each ONOS node
@@ -961,7 +1009,7 @@
for controller in range( 0, len( hosts ) ):
controllerStr = str( controller + 1 )
for host in hosts[ controller ]:
- if host is None or host.get( 'ips', [] ) == []:
+ if host is None or host.get( 'ipAddresses', [] ) == []:
main.log.error(
"DEBUG:Error with host ips on controller" +
controllerStr + ": " + str( host ) )
@@ -1074,7 +1122,9 @@
iCounterValue = 0
main.log.report( "Restart ONOS node" )
- main.log.case( "Restart ONOS node" )
+ main.case( "Restart ONOS node" )
+ main.caseExplaination = "Killing ONOS process and restart cli " +\
+ "sessions once onos is up."
main.step( "Killing ONOS processes" )
killResult = main.ONOSbench.onosKill( ONOS1Ip )
start = time.time()
@@ -1105,6 +1155,8 @@
main.log.info( "ESTIMATE: ONOS took %s seconds to restart" %
str( elapsed ) )
time.sleep( 5 )
+ # rerun on election apps
+ main.ONOScli1.electionTestRun()
def CASE7( self, main ):
"""
@@ -1115,7 +1167,6 @@
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
main.case( "Running ONOS Constant State Tests" )
-
main.step( "Check that each switch has a master" )
# Assert that each device has a master
rolesNotNull = main.ONOScli1.rolesNotNull()
@@ -1162,8 +1213,6 @@
else:
main.log.warn( "Mastership of switch %s changed" % switchDPID )
mastershipCheck = main.FALSE
- if mastershipCheck == main.TRUE:
- main.log.report( "Mastership of Switches was not changed" )
utilities.assert_equals(
expect=main.TRUE,
actual=mastershipCheck,
@@ -1207,9 +1256,9 @@
sameIntents = main.TRUE
if intentState and intentState == ONOS1Intents:
sameIntents = main.TRUE
- main.log.report( "Intents are consistent with before failure" )
+ main.log.info( "Intents are consistent with before failure" )
# TODO: possibly the states have changed? we may need to figure out
- # what the aceptable states are
+ # what the acceptable states are
else:
try:
main.log.warn( "ONOS1 intents: " )
@@ -1241,14 +1290,13 @@
if FlowTables == main.FALSE:
main.log.info( "Differences in flow table for switch: s" +
str( i + 1 ) )
- if FlowTables == main.TRUE:
- main.log.report( "No changes were found in the flow tables" )
utilities.assert_equals(
expect=main.TRUE,
actual=FlowTables,
onpass="No changes were found in the flow tables",
onfail="Changes were found in the flow tables" )
+ main.step( "Leadership Election is still functional" )
# Test of LeadershipElection
leader = ONOS1Ip
@@ -1264,7 +1312,7 @@
# all is well
pass
elif leaderN == main.FALSE:
- # error in response
+ # error in response
main.log.report( "Something is wrong with " +
"electionTestLeader function, check the" +
" error logs" )
@@ -1275,25 +1323,12 @@
str( leaderN ) +
" as the leader of the election app. " +
"Leader should be " + str( leader ) )
- if leaderResult:
- main.log.report( "Leadership election tests passed( consistent " +
- "view of leader across listeners and a new " +
- "leader was re-elected if applicable )" )
utilities.assert_equals(
expect=main.TRUE,
actual=leaderResult,
onpass="Leadership election passed",
onfail="Something went wrong with Leadership election" )
- result = ( mastershipCheck and intentCheck and FlowTables and
- rolesNotNull and leaderResult )
- result = int( result )
- if result == main.TRUE:
- main.log.report( "Constant State Tests Passed" )
- utilities.assert_equals( expect=main.TRUE, actual=result,
- onpass="Constant State Tests Passed",
- onfail="Constant state tests failed" )
-
def CASE8( self, main ):
"""
Compare topo
@@ -1309,15 +1344,24 @@
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- description = "Compare ONOS Topology view to Mininet topology"
- main.case( description )
- main.log.report( description )
+ main.case( "Compare ONOS Topology view to Mininet topology" )
+ main.caseExplaination = "Compare topology objects between Mininet" +\
+ " and ONOS"
main.step( "Create TestONTopology object" )
- ctrls = []
- node = main.ONOS1
- temp = ( node, node.name, node.ip_address, 6633 )
- ctrls.append( temp )
- MNTopo = TestONTopology( main.Mininet1, ctrls )
+ try:
+ ctrls = []
+ node = main.ONOS1
+ temp = ( node, node.name, node.ip_address, 6633 )
+ ctrls.append( temp )
+ MNTopo = TestONTopology( main.Mininet1, ctrls )
+ except Exception:
+ objResult = main.FALSE
+ else:
+ objResult = main.TRUE
+ utilities.assert_equals( expect=main.TRUE, actual=objResult,
+ onpass="Created TestONTopology object",
+ onfail="Exception while creating " +
+ "TestONTopology object" )
main.step( "Comparing ONOS topology to MN" )
devicesResults = main.TRUE
@@ -1344,7 +1388,7 @@
for controller in range( 0, len( hosts ) ):
controllerStr = str( controller + 1 )
for host in hosts[ controller ]:
- if host is None or host.get( 'ips', [] ) == []:
+ if host is None or host.get( 'ipAddresses', [] ) == []:
main.log.error(
"DEBUG:Error with host ips on controller" +
controllerStr + ": " + str( host ) )
@@ -1445,8 +1489,6 @@
utilities.assert_equals( expect=main.TRUE, actual=topoResult,
onpass="Topology Check Test successful",
onfail="Topology Check Test NOT successful" )
- if topoResult == main.TRUE:
- main.log.report( "ONOS topology view matches Mininet topology" )
def CASE9( self, main ):
"""
@@ -1662,10 +1704,17 @@
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- leaderResult = main.TRUE
main.case("Start Leadership Election app")
main.step( "Install leadership election app" )
- main.ONOScli1.activateApp( "org.onosproject.election" )
+ appResult = main.ONOScli1.activateApp( "org.onosproject.election" )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=appResult,
+ onpass="Election app installed",
+ onfail="Something went wrong with installing Leadership election" )
+
+ main.step( "Run for election on each node" )
+ leaderResult = main.ONOScli1.electionTestRun()
# check for leader
leader = main.ONOScli1.electionTestLeader()
# verify leader is ONOS1
@@ -1674,32 +1723,27 @@
pass
elif leader is None:
# No leader elected
- main.log.report( "No leader was elected" )
+ main.log.error( "No leader was elected" )
leaderResult = main.FALSE
elif leader == main.FALSE:
# error in response
# TODO: add check for "Command not found:" in the driver, this
# means the app isn't loaded
- main.log.report( "Something is wrong with electionTestLeader" +
+ main.log.error( "Something is wrong with electionTestLeader" +
" function, check the error logs" )
leaderResult = main.FALSE
else:
# error in response
- main.log.report(
+ main.log.error(
"Unexpected response from electionTestLeader function:'" +
str( leader ) +
"'" )
leaderResult = main.FALSE
-
- if leaderResult:
- main.log.report( "Leadership election tests passed( consistent " +
- "view of leader across listeners and a leader " +
- "was elected )" )
utilities.assert_equals(
expect=main.TRUE,
actual=leaderResult,
- onpass="Leadership election passed",
- onfail="Something went wrong with Leadership election" )
+ onpass="Successfully ran for leadership",
+ onfail="Failed to run for leadership" )
def CASE15( self, main ):
"""
@@ -1726,13 +1770,15 @@
oldLeader = None
else:
main.log.error( "Leader election --- why am I HERE?!?")
+ leaderResult = main.FALSE
+ oldLeader = None
if oldLeader:
withdrawResult = oldLeader.electionTestWithdraw()
utilities.assert_equals(
expect=main.TRUE,
actual=withdrawResult,
- onpass="App was withdrawn from election",
- onfail="App was not withdrawn from election" )
+ onpass="Node was withdrawn from election",
+ onfail="Node was not withdrawn from election" )
main.step( "Make sure new leader is elected" )
leaderN = main.ONOScli1.electionTestLeader()
@@ -1750,9 +1796,7 @@
elif leaderN is None:
main.log.info(
"There is no leader after the app withdrew from election" )
- if leaderResult:
- main.log.report( "Leadership election tests passed( There is no " +
- "leader after the old leader resigned )" )
+ leaderResult = main.TRUE
utilities.assert_equals(
expect=main.TRUE,
actual=leaderResult,
@@ -1770,20 +1814,21 @@
actual=runResult,
onpass="App re-ran for election",
onfail="App failed to run for election" )
- leader = main.ONOScli1.electionTestLeader()
+
+ main.step( "Node became leader when it ran for election" )
+ afterRun = main.ONOScli1.electionTestLeader()
# verify leader is ONOS1
- if leader == ONOS1Ip:
- leaderResult = main.TRUE
+ if afterRun == ONOS1Ip:
+ afterResult = main.TRUE
else:
- leaderResult = main.FALSE
- # TODO: assert on run and withdraw results?
+ afterResult = main.FALSE
utilities.assert_equals(
expect=main.TRUE,
- actual=leaderResult,
- onpass="Leadership election passed",
- onfail="ONOS1's election app was not leader after it re-ran " +
- "for election" )
+ actual=afterResult,
+ onpass="Old leader successfully re-ran for election",
+ onfail="Something went wrong with Leadership election after " +
+ "the old leader re-ran for election" )
def CASE16( self, main ):
"""
@@ -1861,11 +1906,13 @@
main.step( "Increment and get a default counter on each node" )
pCounters = []
threads = []
+ addedPValues = []
for i in range( numControllers ):
t = main.Thread( target=CLIs[i].counterTestIncrement,
name="counterIncrement-" + str( i ),
args=[ pCounterName ] )
pCounterValue += 1
+ addedPValues.append( pCounterValue )
threads.append( t )
t.start()
@@ -1874,8 +1921,12 @@
pCounters.append( t.result )
# Check that counter incremented numController times
pCounterResults = True
- for i in range( numControllers ):
- pCounterResults and ( i + 1 ) in pCounters
+ for i in addedPValues:
+ tmpResult = i in pCounters
+ pCounterResults = pCounterResults and tmpResult
+ if not tmpResult:
+ main.log.error( str( i ) + " is not in partitioned "
+ "counter incremented results" )
utilities.assert_equals( expect=True,
actual=pCounterResults,
onpass="Default counter incremented",
@@ -1884,6 +1935,7 @@
main.step( "Increment and get an in memory counter on each node" )
iCounters = []
+ addedIValues = []
threads = []
for i in range( numControllers ):
t = main.Thread( target=CLIs[i].counterTestIncrement,
@@ -1891,6 +1943,7 @@
args=[ iCounterName ],
kwargs={ "inMemory": True } )
iCounterValue += 1
+ addedIValues.append( iCounterValue )
threads.append( t )
t.start()
@@ -1899,8 +1952,12 @@
iCounters.append( t.result )
# Check that counter incremented numController times
iCounterResults = True
- for i in range( numControllers ):
- iCounterResults and ( i + 1 ) in iCounters
+ for i in addedIValues:
+ tmpResult = i in iCounters
+ iCounterResults = iCounterResults and tmpResult
+ if not tmpResult:
+ main.log.error( str( i ) + " is not in the in-memory "
+ "counter incremented results" )
utilities.assert_equals( expect=True,
actual=iCounterResults,
onpass="In memory counter incremented",
diff --git a/TestON/tests/SdnIpTest/SdnIpTest.params b/TestON/tests/SdnIpTest/SdnIpTest.params
index 7f7ef2d..e47adf6 100755
--- a/TestON/tests/SdnIpTest/SdnIpTest.params
+++ b/TestON/tests/SdnIpTest/SdnIpTest.params
@@ -1,6 +1,6 @@
<PARAMS>
- <testcases>4</testcases>
+ <testcases>100, [4]*5</testcases>
#Environment variables
<ENV>
@@ -8,13 +8,15 @@
</ENV>
<CTRL>
+ <numCtrl>1</numCtrl>
<ip1>10.128.4.52</ip1>
<port1>6633</port1>
</CTRL>
<GIT>
- <autoPull>off</autoPull>
- <checkout>master</checkout>
+ <autoPull>on</autoPull>
+ <branch1>master</branch1>
+ <branch2>onos-1.0</branch2>
</GIT>
<JSON>
diff --git a/TestON/tests/SdnIpTest/SdnIpTest.py b/TestON/tests/SdnIpTest/SdnIpTest.py
index 85ac21e..ebbd57e 100644
--- a/TestON/tests/SdnIpTest/SdnIpTest.py
+++ b/TestON/tests/SdnIpTest/SdnIpTest.py
@@ -1,22 +1,86 @@
-# from cupshelpers.config import prefix
-# Testing the basic functionality of SDN-IP
-
-
+# Testing the functionality of SDN-IP with single ONOS instance
class SdnIpTest:
def __init__( self ):
self.default = ''
+ global branchName
-# from cupshelpers.config import prefix
+ # This case is to setup ONOS
+ def CASE100( self, main ):
+ """
+ CASE100 is to compile ONOS and push it to the test machines
+ Startup sequence:
+ git pull
+ mvn clean install
+ onos-package
+ cell <name>
+ onos-verify-cell
+ onos-install -f
+ onos-wait-for-start
+ """
+ main.case( "Setting up test environment" )
-# Testing the basic functionality of SDN-IP
+ cellName = main.params[ 'ENV' ][ 'cellName' ]
+ ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
+ main.step( "Applying cell variable to environment" )
+ cellResult = main.ONOSbench.setCell( cellName )
+ verifyResult = main.ONOSbench.verifyCell()
-class SdnIpTest:
+ branchName = main.ONOSbench.getBranchName()
+ main.log.info( "ONOS is on branch: " + branchName )
- def __init__( self ):
- self.default = ''
+ main.log.report( "Uninstalling ONOS" )
+ main.ONOSbench.onosUninstall( ONOS1Ip )
+
+ cleanInstallResult = main.TRUE
+ gitPullResult = main.TRUE
+
+ main.step( "Git pull" )
+ gitPullResult = main.ONOSbench.gitPull()
+
+ main.step( "Using mvn clean & install" )
+ cleanInstallResult = main.TRUE
+# if gitPullResult == main.TRUE:
+# cleanInstallResult = main.ONOSbench.cleanInstall()
+# else:
+# main.log.warn( "Did not pull new code so skipping mvn " +
+# "clean install" )
+ cleanInstallResult = main.ONOSbench.cleanInstall()
+ main.ONOSbench.getVersion( report=True )
+
+ #cellResult = main.ONOSbench.setCell( cellName )
+ #verifyResult = main.ONOSbench.verifyCell()
+ main.step( "Creating ONOS package" )
+ packageResult = main.ONOSbench.onosPackage()
+
+ main.step( "Installing ONOS package" )
+ onos1InstallResult = main.ONOSbench.onosInstall( options="-f",
+ node=ONOS1Ip )
+
+ main.step( "Checking if ONOS is up yet" )
+ for i in range( 2 ):
+ onos1Isup = main.ONOSbench.isup( ONOS1Ip )
+ if onos1Isup:
+ break
+ if not onos1Isup:
+ main.log.report( "ONOS1 didn't start!" )
+
+ cliResult = main.ONOScli.startOnosCli( ONOS1Ip )
+
+ case1Result = ( cleanInstallResult and packageResult and
+ cellResult and verifyResult and
+ onos1InstallResult and
+ onos1Isup and cliResult )
+
+ utilities.assert_equals( expect=main.TRUE, actual=case1Result,
+ onpass="ONOS startup successful",
+ onfail="ONOS startup NOT successful" )
+
+ if case1Result == main.FALSE:
+ main.cleanup()
+ main.exit()
def CASE4( self, main ):
"""
@@ -33,14 +97,12 @@
import time
import json
from operator import eq
- # from datetime import datetime
from time import localtime, strftime
- main.case("The test case is to help to setup the TestON environment \
- and test new drivers" )
- # SDNIPJSONFILEPATH = "../tests/SdnIpTest/sdnip.json"
+ main.case("This case is to testing the functionality of SDN-IP with \
+ single ONOS instance" )
SDNIPJSONFILEPATH = \
- "/home/admin/workspace/onos/tools/package/config/sdnip.json"
+ "/home/admin/ONOS/tools/package/config/sdnip.json"
# all expected routes for all BGP peers
allRoutesExpected = []
main.step( "Start to generate routes for all BGP peers" )
@@ -79,31 +141,6 @@
routeIntentsExpected = routeIntentsExpectedHost3 + \
routeIntentsExpectedHost4 + routeIntentsExpectedHost5
- cellName = main.params[ 'ENV' ][ 'cellName' ]
- ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
- main.step( "Set cell for ONOS-cli environment" )
- main.ONOScli.setCell( cellName )
- verifyResult = main.ONOSbench.verifyCell()
-
- main.log.report( "Removing raft logs" )
- main.ONOSbench.onosRemoveRaftLogs()
- main.log.report( "Uninstalling ONOS" )
- main.ONOSbench.onosUninstall( ONOS1Ip )
-
- main.step( "Installing ONOS package" )
- onos1InstallResult = main.ONOSbench.onosInstall(
- options="-f", node=ONOS1Ip )
-
- main.step( "Checking if ONOS is up yet" )
- time.sleep( 150 )
- onos1Isup = main.ONOSbench.isup( ONOS1Ip )
- if not onos1Isup:
- main.log.report( "ONOS1 didn't start!" )
-
- main.step( "Start ONOS-cli" )
-
- main.ONOScli.startOnosCli( ONOS1Ip )
-
main.step( "Get devices in the network" )
listResult = main.ONOScli.devices( jsonFormat=False )
main.log.info( listResult )
@@ -138,8 +175,8 @@
prefixesHostX = main.QuaggaCliHost.generatePrefixes( str( i ), 10 )
main.log.info( prefixesHostX )
for prefix in prefixesHostX:
- allRoutesExpected.append(
- prefix + "/" + "192.168.40." + str( i - 100 ) )
+ allRoutesExpected.append( prefix + "/" + "192.168.40."
+ + str( i - 100 ) )
routeIntentsExpectedHostX = \
main.QuaggaCliHost.generateExpectedOnePeerRouteIntents(
@@ -158,13 +195,19 @@
QuaggaCliHostX.addRoutes( prefixesHostX, 1 )
time.sleep( 60 )
-
# get routes inside SDN-IP
getRoutesResult = main.ONOScli.routes( jsonFormat=True )
# parse routes from ONOS CLI
- allRoutesActual = \
- main.QuaggaCliHost3.extractActualRoutes( getRoutesResult )
+ if branchName == "master":
+ allRoutesActual = \
+ main.QuaggaCliHost3.extractActualRoutesMaster( getRoutesResult )
+ elif branchName == "onos-1.0":
+ allRoutesActual = \
+ main.QuaggaCliHost3.extractActualRoutesOneDotZero( getRoutesResult )
+ else:
+ main.log("ONOS is on wrong branch")
+ exit
allRoutesStrExpected = str( sorted( allRoutesExpected ) )
allRoutesStrActual = str( allRoutesActual ).replace( 'u', "" )
@@ -189,7 +232,7 @@
main.step( "Check MultiPointToSinglePointIntent intents installed" )
# routeIntentsExpected are generated when generating routes
- # get rpoute intents from ONOS CLI
+ # get route intents from ONOS CLI
routeIntentsActual = \
main.QuaggaCliHost3.extractActualRouteIntents(
getIntentsResult )
@@ -243,7 +286,7 @@
"***PointToPointIntent Intents in SDN-IP are wrong!***" )
#============================= Ping Test ========================
- # wait until all MultiPointToSinglePoint
+ # Wait until all MultiPointToSinglePoint intents are in system
time.sleep( 20 )
pingTestScript = "~/SDNIP/test-tools/CASE4-ping-as2host.sh"
pingTestResultsFile = \
@@ -306,22 +349,19 @@
time.sleep( 20 )
pingTestScript = "~/SDNIP/test-tools/CASE4-ping-as2host.sh"
pingTestResultsFile = \
- "~/SDNIP/SdnIpIntentDemo/log/CASE4-ping-results-after-delete-routes-" \
+ "~/SDNIP/SdnIpIntentDemo/log/CASE4-ping-results-after-delete-routes-"\
+ strftime( "%Y-%m-%d_%H:%M:%S", localtime() ) + ".txt"
pingTestResults = main.QuaggaCliHost.pingTest(
"1.168.30.100", pingTestScript, pingTestResultsFile )
main.log.info( pingTestResults )
time.sleep( 100 )
- # main.step( "Test whether Mininet is started" )
- # main.Mininet2.handle.sendline( "xterm host1" )
- # main.Mininet2.handle.expect( "mininet>" )
-
def CASE3( self, main ):
"""
Test the SDN-IP functionality
allRoutesExpected: all expected routes for all BGP peers
- routeIntentsExpected: all expected MultiPointToSinglePointIntent intents
+ routeIntentsExpected: all expected MultiPointToSinglePointIntent \
+ intents
bgpIntentsExpected: expected PointToPointIntent intents
allRoutesActual: all routes from ONOS LCI
routeIntentsActual: actual MultiPointToSinglePointIntent intents from \
@@ -338,7 +378,7 @@
environment and test new drivers" )
# SDNIPJSONFILEPATH = "../tests/SdnIpTest/sdnip.json"
SDNIPJSONFILEPATH = \
- "/home/admin/workspace/onos/tools/package/config/sdnip.json"
+ "/home/admin/ONOS/tools/package/config/sdnip.json"
# all expected routes for all BGP peers
allRoutesExpected = []
main.step( "Start to generate routes for all BGP peers" )
diff --git a/TestON/tests/TopoPerfNext/Backup/TopoPerfNext.py b/TestON/tests/TopoPerfNext/Backup/TopoPerfNext.py
deleted file mode 100644
index cc40b94..0000000
--- a/TestON/tests/TopoPerfNext/Backup/TopoPerfNext.py
+++ /dev/null
@@ -1,1710 +0,0 @@
-#TopoPerfNext
-#
-#Topology Performance test for ONOS-next
-#
-#andrew@onlab.us
-#
-#If your machine does not come with numpy
-#run the following command:
-#sudo apt-get install python-numpy python-scipy
-
-import time
-import sys
-import os
-import re
-
-class TopoPerfNext:
- def __init__(self):
- self.default = ''
-
- def CASE1(self, main):
- '''
- ONOS startup sequence
- '''
- import time
-
- ## Global cluster count for scale-out purposes
- global cluster_count
- #Set initial cluster count
- cluster_count = 1
- ##
-
- cell_name = main.params['ENV']['cellName']
-
- git_pull = main.params['GIT']['autoPull']
- checkout_branch = main.params['GIT']['checkout']
-
- ONOS1_ip = main.params['CTRL']['ip1']
- ONOS2_ip = main.params['CTRL']['ip2']
- ONOS3_ip = main.params['CTRL']['ip3']
-
- #### Hardcoded ONOS nodes particular to my env ####
- ONOS4_ip = "10.128.174.4"
- ONOS5_ip = "10.128.174.5"
- ONOS6_ip = "10.128.174.6"
- ONOS7_ip = "10.128.174.7"
- #### ####
-
- MN1_ip = main.params['MN']['ip1']
- BENCH_ip = main.params['BENCH']['ip']
-
- topo_cfg_file = main.params['TEST']['topo_config_file']
- topo_cfg_name = main.params['TEST']['topo_config_name']
-
- main.case("Setting up test environment")
- main.log.info("Copying topology event accumulator config"+\
- " to ONOS /package/etc")
- main.ONOSbench.handle.sendline("cp ~/"+\
- topo_cfg_file+\
- " ~/ONOS/tools/package/etc/"+\
- topo_cfg_name)
- main.ONOSbench.handle.expect("\$")
-
- main.log.report("Setting up test environment")
-
- main.step("Cleaning previously installed ONOS if any")
- main.ONOSbench.onos_uninstall(node_ip=ONOS2_ip)
- main.ONOSbench.onos_uninstall(node_ip=ONOS3_ip)
- main.ONOSbench.onos_uninstall(node_ip=ONOS4_ip)
- main.ONOSbench.onos_uninstall(node_ip=ONOS5_ip)
- main.ONOSbench.onos_uninstall(node_ip=ONOS6_ip)
- main.ONOSbench.onos_uninstall(node_ip=ONOS7_ip)
-
- main.step("Creating cell file")
- cell_file_result = main.ONOSbench.create_cell_file(
- BENCH_ip, cell_name, MN1_ip, "onos-core,onos-app-metrics",
- ONOS1_ip)
-
- main.step("Applying cell file to environment")
- cell_apply_result = main.ONOSbench.set_cell(cell_name)
- verify_cell_result = main.ONOSbench.verify_cell()
-
- #NOTE: This step may be removed after proper
- # copy cat log functionality
- main.step("Removing raft/copy-cat logs from ONOS nodes")
- main.ONOSbench.onos_remove_raft_logs()
- time.sleep(30)
-
- main.step("Git checkout and pull "+checkout_branch)
- if git_pull == 'on':
- checkout_result = \
- main.ONOSbench.git_checkout(checkout_branch)
- pull_result = main.ONOSbench.git_pull()
- else:
- checkout_result = main.TRUE
- pull_result = main.TRUE
- main.log.info("Skipped git checkout and pull")
-
- main.log.report("Commit information - ")
- main.ONOSbench.get_version(report=True)
-
- main.step("Using mvn clean & install")
- mvn_result = main.ONOSbench.clean_install()
- mvn_result = main.TRUE
-
- main.step("Set cell for ONOS cli env")
- main.ONOS1cli.set_cell(cell_name)
- #main.ONOS2cli.set_cell(cell_name)
- #main.ONOS3cli.set_cell(cell_name)
-
- main.step("Creating ONOS package")
- package_result = main.ONOSbench.onos_package()
-
- main.step("Installing ONOS package")
- install1_result = main.ONOSbench.onos_install(node=ONOS1_ip)
- #install2_result = main.ONOSbench.onos_install(node=ONOS2_ip)
- #install3_result = main.ONOSbench.onos_install(node=ONOS3_ip)
-
- time.sleep(10)
-
- main.step("Start onos cli")
- cli1 = main.ONOS1cli.start_onos_cli(ONOS1_ip)
- #cli2 = main.ONOS2cli.start_onos_cli(ONOS2_ip)
- #cli3 = main.ONOS3cli.start_onos_cli(ONOS3_ip)
-
- utilities.assert_equals(expect=main.TRUE,
- actual= cell_file_result and cell_apply_result and\
- verify_cell_result and checkout_result and\
- pull_result and mvn_result and\
- install1_result, #and install2_result and\
- #install3_result,
- onpass="Test Environment setup successful",
- onfail="Failed to setup test environment")
-
- def CASE2(self, main):
- '''
- Assign s1 to ONOS1 and measure latency
-
- There are 4 levels of latency measurements to this test:
- 1) End-to-end measurement: Complete end-to-end measurement
- from TCP (SYN/ACK) handshake to Graph change
- 2) OFP-to-graph measurement: 'ONOS processing' snippet of
- measurement from OFP Vendor message to Graph change
- 3) OFP-to-device measurement: 'ONOS processing without
- graph change' snippet of measurement from OFP vendor
- message to Device change timestamp
- 4) T0-to-device measurement: Measurement that includes
- the switch handshake to devices timestamp without
- the graph view change. (TCP handshake -> Device
- change)
- '''
- import time
- import subprocess
- import json
- import requests
- import os
- import numpy
- global cluster_count
-
- ONOS1_ip = main.params['CTRL']['ip1']
- ONOS2_ip = main.params['CTRL']['ip2']
- ONOS3_ip = main.params['CTRL']['ip3']
- ONOS4_ip = main.params['CTRL']['ip4']
- ONOS5_ip = main.params['CTRL']['ip5']
- ONOS6_ip = main.params['CTRL']['ip6']
- ONOS7_ip = main.params['CTRL']['ip7']
-
- ONOS_user = main.params['CTRL']['user']
-
- default_sw_port = main.params['CTRL']['port1']
-
- #Number of iterations of case
- num_iter = main.params['TEST']['numIter']
- #Number of first 'x' iterations to ignore:
- iter_ignore = int(main.params['TEST']['iterIgnore'])
-
- #Timestamp 'keys' for json metrics output.
- #These are subject to change, hence moved into params
- deviceTimestamp = main.params['JSON']['deviceTimestamp']
- graphTimestamp = main.params['JSON']['graphTimestamp']
-
- debug_mode = main.params['TEST']['debugMode']
- onos_log = main.params['TEST']['onosLogFile']
-
- #Threshold for the test
- threshold_str = main.params['TEST']['singleSwThreshold']
- threshold_obj = threshold_str.split(",")
- threshold_min = int(threshold_obj[0])
- threshold_max = int(threshold_obj[1])
-
- #List of switch add latency collected from
- #all iterations
- latency_end_to_end_list = []
- latency_ofp_to_graph_list = []
- latency_ofp_to_device_list = []
- latency_t0_to_device_list = []
- latency_tcp_to_ofp_list = []
-
- #Directory/file to store tshark results
- tshark_of_output = "/tmp/tshark_of_topo.txt"
- tshark_tcp_output = "/tmp/tshark_tcp_topo.txt"
-
- #String to grep in tshark output
- tshark_tcp_string = "TCP 74 "+default_sw_port
- tshark_of_string = "OFP 86 Vendor"
-
- #Initialize assertion to TRUE
- assertion = main.TRUE
-
- local_time = time.strftime('%x %X')
- local_time = local_time.replace("/","")
- local_time = local_time.replace(" ","_")
- local_time = local_time.replace(":","")
- if debug_mode == 'on':
- main.ONOS1.tshark_pcap("eth0",
- "/tmp/single_sw_lat_pcap_"+local_time)
-
- main.log.info("Debug mode is on")
-
- main.log.report("Latency of adding one switch to controller")
- main.log.report("First "+str(iter_ignore)+" iterations ignored"+
- " for jvm warmup time")
- main.log.report("Total iterations of test: "+str(num_iter))
-
- for i in range(0, int(num_iter)):
- main.log.info("Starting tshark capture")
-
- #* TCP [ACK, SYN] is used as t0_a, the
- # very first "exchange" between ONOS and
- # the switch for end-to-end measurement
- #* OFP [Stats Reply] is used for t0_b
- # the very last OFP message between ONOS
- # and the switch for ONOS measurement
- main.ONOS1.tshark_grep(tshark_tcp_string,
- tshark_tcp_output)
- main.ONOS1.tshark_grep(tshark_of_string,
- tshark_of_output)
-
- #Wait and ensure tshark is started and
- #capturing
- time.sleep(10)
-
- main.log.info("Assigning s1 to controller")
-
- main.Mininet1.assign_sw_controller(sw="1",
- ip1=ONOS1_ip, port1=default_sw_port)
-
- #Wait and ensure switch is assigned
- #before stopping tshark
- time.sleep(30)
-
- main.log.info("Stopping all Tshark processes")
- main.ONOS1.stop_tshark()
-
- #tshark output is saved in ONOS. Use subprocess
- #to copy over files to TestON for parsing
- main.log.info("Copying over tshark files")
-
- #TCP CAPTURE ****
- #Copy the tshark output from ONOS machine to
- #TestON machine in tshark_tcp_output directory>file
- os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
- tshark_tcp_output+" /tmp/")
- tcp_file = open(tshark_tcp_output, 'r')
- temp_text = tcp_file.readline()
- temp_text = temp_text.split(" ")
-
- main.log.info("Object read in from TCP capture: "+
- str(temp_text))
- if len(temp_text) > 1:
- t0_tcp = float(temp_text[1])*1000.0
- else:
- main.log.error("Tshark output file for TCP"+
- " returned unexpected results")
- t0_tcp = 0
- assertion = main.FALSE
-
- tcp_file.close()
- #****************
-
- #OF CAPTURE ****
- os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
- tshark_of_output+" /tmp/")
- of_file = open(tshark_of_output, 'r')
-
- line_ofp = ""
- #Read until last line of file
- while True:
- temp_text = of_file.readline()
- if temp_text !='':
- line_ofp = temp_text
- else:
- break
- obj = line_ofp.split(" ")
-
- main.log.info("Object read in from OFP capture: "+
- str(line_ofp))
-
- if len(line_ofp) > 1:
- t0_ofp = float(obj[1])*1000.0
- else:
- main.log.error("Tshark output file for OFP"+
- " returned unexpected results")
- t0_ofp = 0
- assertion = main.FALSE
-
- of_file.close()
- #****************
-
- json_str_1 = main.ONOS1cli.topology_events_metrics()
- #Initialize scale-out variables
- json_str_2 = ""
- json_str_3 = ""
- json_str_4 = ""
- json_str_5 = ""
- json_str_6 = ""
- json_str_7 = ""
-
- json_obj_1 = json.loads(json_str_1)
- json_obj_2 = json.loads(json_str_2)
- json_obj_3 = json.loads(json_str_3)
- #Initialize scale-out variables
- json_obj_4 = ""
- json_obj_5 = ""
- json_obj_6 = ""
- json_obj_7 = ""
-
- #Include scale-out measurements when applicable
- if cluster_count == 5:
- json_str_4 = main.ONOS4cli.topology_events_metrics()
- json_str_5 = main.ONOS5cli.topology_events_metrics()
-
- json_obj_4 = json.loads(json_str_4)
- json_obj_5 = json.loads(json_str_5)
- elif cluster_count == 6:
- main.log.info("TODO: create even number cluster events")
- elif cluster_count == 7:
- json_str_6 = main.ONOS6cli.topology_events_metrics()
- json_str_7 = main.ONOS7cli.topology_events_metrics()
-
- json_obj_6 = json.loads(json_str_6)
- json_obj_7 = json.loads(json_str_7)
-
- #Obtain graph timestamp. This timestsamp captures
- #the epoch time at which the topology graph was updated.
- graph_timestamp_1 = \
- json_obj_1[graphTimestamp]['value']
- graph_timestamp_2 = \
- json_obj_2[graphTimestamp]['value']
- graph_timestamp_3 = \
- json_obj_3[graphTimestamp]['value']
-
- #Obtain device timestamp. This timestamp captures
- #the epoch time at which the device event happened
- device_timestamp_1 = \
- json_obj_1[deviceTimestamp]['value']
- device_timestamp_2 = \
- json_obj_2[deviceTimestamp]['value']
- device_timestamp_3 = \
- json_obj_3[deviceTimestamp]['value']
-
- #t0 to device processing latency
- delta_device_1 = int(device_timestamp_1) - int(t0_tcp)
- delta_device_2 = int(device_timestamp_2) - int(t0_tcp)
- delta_device_3 = int(device_timestamp_3) - int(t0_tcp)
-
- #Get average of delta from all instances
- avg_delta_device = \
- (int(delta_device_1)+\
- int(delta_device_2)+\
- int(delta_device_3)) / 3
-
- #Ensure avg delta meets the threshold before appending
- if avg_delta_device > 0.0 and avg_delta_device < 10000\
- and int(i) > iter_ignore:
- latency_t0_to_device_list.append(avg_delta_device)
- else:
- main.log.info("Results for t0-to-device ignored"+\
- "due to excess in threshold / warmup iteration.")
-
- #t0 to graph processing latency (end-to-end)
- delta_graph_1 = int(graph_timestamp_1) - int(t0_tcp)
- delta_graph_2 = int(graph_timestamp_2) - int(t0_tcp)
- delta_graph_3 = int(graph_timestamp_3) - int(t0_tcp)
-
- #Get average of delta from all instances
- #TODO: use max delta graph
- #max_delta_graph = max(three)
- avg_delta_graph = \
- (int(delta_graph_1)+\
- int(delta_graph_2)+\
- int(delta_graph_3)) / 3
-
- #Ensure avg delta meets the threshold before appending
- if avg_delta_graph > 0.0 and avg_delta_graph < 10000\
- and int(i) > iter_ignore:
- latency_end_to_end_list.append(avg_delta_graph)
- else:
- main.log.info("Results for end-to-end ignored"+\
- "due to excess in threshold")
-
- #ofp to graph processing latency (ONOS processing)
- delta_ofp_graph_1 = int(graph_timestamp_1) - int(t0_ofp)
- delta_ofp_graph_2 = int(graph_timestamp_2) - int(t0_ofp)
- delta_ofp_graph_3 = int(graph_timestamp_3) - int(t0_ofp)
-
- avg_delta_ofp_graph = \
- (int(delta_ofp_graph_1)+\
- int(delta_ofp_graph_2)+\
- int(delta_ofp_graph_3)) / 3
-
- if avg_delta_ofp_graph > threshold_min \
- and avg_delta_ofp_graph < threshold_max\
- and int(i) > iter_ignore:
- latency_ofp_to_graph_list.append(avg_delta_ofp_graph)
- elif avg_delta_ofp_graph > (-10) and \
- avg_delta_ofp_graph < 0.0 and\
- int(i) > iter_ignore:
- main.log.info("Sub-millisecond result likely; "+
- "negative result was rounded to 0")
- #NOTE: Current metrics framework does not
- #support sub-millisecond accuracy. Therefore,
- #if the result is negative, we can reasonably
- #conclude sub-millisecond results and just
- #append the best rounded effort - 0 ms.
- latency_ofp_to_graph_list.append(0)
- else:
- main.log.info("Results for ofp-to-graph "+\
- "ignored due to excess in threshold")
-
- #ofp to device processing latency (ONOS processing)
- delta_ofp_device_1 = float(device_timestamp_1) - float(t0_ofp)
- delta_ofp_device_2 = float(device_timestamp_2) - float(t0_ofp)
- delta_ofp_device_3 = float(device_timestamp_3) - float(t0_ofp)
-
- avg_delta_ofp_device = \
- (float(delta_ofp_device_1)+\
- float(delta_ofp_device_2)+\
- float(delta_ofp_device_3)) / 3
-
- #NOTE: ofp - delta measurements are occasionally negative
- # due to system time misalignment.
- latency_ofp_to_device_list.append(avg_delta_ofp_device)
-
- delta_ofp_tcp = int(t0_ofp) - int(t0_tcp)
- if delta_ofp_tcp > threshold_min \
- and delta_ofp_tcp < threshold_max and\
- int(i) > iter_ignore:
- latency_tcp_to_ofp_list.append(delta_ofp_tcp)
- else:
- main.log.info("Results fo tcp-to-ofp "+\
- "ignored due to excess in threshold")
-
- #TODO:
- #Fetch logs upon threshold excess
-
- main.log.info("ONOS1 delta end-to-end: "+
- str(delta_graph_1) + " ms")
- main.log.info("ONOS2 delta end-to-end: "+
- str(delta_graph_2) + " ms")
- main.log.info("ONOS3 delta end-to-end: "+
- str(delta_graph_3) + " ms")
-
- main.log.info("ONOS1 delta OFP - graph: "+
- str(delta_ofp_graph_1) + " ms")
- main.log.info("ONOS2 delta OFP - graph: "+
- str(delta_ofp_graph_2) + " ms")
- main.log.info("ONOS3 delta OFP - graph: "+
- str(delta_ofp_graph_3) + " ms")
-
- main.log.info("ONOS1 delta device - t0: "+
- str(delta_device_1) + " ms")
- main.log.info("ONOS2 delta device - t0: "+
- str(delta_device_2) + " ms")
- main.log.info("ONOS3 delta device - t0: "+
- str(delta_device_3) + " ms")
-
- main.log.info("TCP to OFP delta: "+
- str(delta_ofp_tcp) + " ms")
- #main.log.info("ONOS1 delta OFP - device: "+
- # str(delta_ofp_device_1) + " ms")
- #main.log.info("ONOS2 delta OFP - device: "+
- # str(delta_ofp_device_2) + " ms")
- #main.log.info("ONOS3 delta OFP - device: "+
- # str(delta_ofp_device_3) + " ms")
-
- main.step("Remove switch from controller")
- main.Mininet1.delete_sw_controller("s1")
-
- time.sleep(5)
-
- #END of for loop iteration
-
- #If there is at least 1 element in each list,
- #pass the test case
- if len(latency_end_to_end_list) > 0 and\
- len(latency_ofp_to_graph_list) > 0 and\
- len(latency_ofp_to_device_list) > 0 and\
- len(latency_t0_to_device_list) > 0 and\
- len(latency_tcp_to_ofp_list) > 0:
- assertion = main.TRUE
- elif len(latency_end_to_end_list) == 0:
- #The appending of 0 here is to prevent
- #the min,max,sum functions from failing
- #below
- latency_end_to_end_list.append(0)
- assertion = main.FALSE
- elif len(latency_ofp_to_graph_list) == 0:
- latency_ofp_to_graph_list.append(0)
- assertion = main.FALSE
- elif len(latency_ofp_to_device_list) == 0:
- latency_ofp_to_device_list.append(0)
- assertion = main.FALSE
- elif len(latency_t0_to_device_list) == 0:
- latency_t0_to_device_list.append(0)
- assertion = main.FALSE
- elif len(latency_tcp_to_ofp_list) == 0:
- latency_tcp_to_ofp_list.append(0)
- assertion = main.FALSE
-
- #Calculate min, max, avg of latency lists
- latency_end_to_end_max = \
- int(max(latency_end_to_end_list))
- latency_end_to_end_min = \
- int(min(latency_end_to_end_list))
- latency_end_to_end_avg = \
- (int(sum(latency_end_to_end_list)) / \
- len(latency_end_to_end_list))
- latency_end_to_end_std_dev = \
- str(round(numpy.std(latency_end_to_end_list),1))
-
- latency_ofp_to_graph_max = \
- int(max(latency_ofp_to_graph_list))
- latency_ofp_to_graph_min = \
- int(min(latency_ofp_to_graph_list))
- latency_ofp_to_graph_avg = \
- (int(sum(latency_ofp_to_graph_list)) / \
- len(latency_ofp_to_graph_list))
- latency_ofp_to_graph_std_dev = \
- str(round(numpy.std(latency_ofp_to_graph_list),1))
-
- latency_ofp_to_device_max = \
- int(max(latency_ofp_to_device_list))
- latency_ofp_to_device_min = \
- int(min(latency_ofp_to_device_list))
- latency_ofp_to_device_avg = \
- (int(sum(latency_ofp_to_device_list)) / \
- len(latency_ofp_to_device_list))
- latency_ofp_to_device_std_dev = \
- str(round(numpy.std(latency_ofp_to_device_list),1))
-
- latency_t0_to_device_max = \
- int(max(latency_t0_to_device_list))
- latency_t0_to_device_min = \
- int(min(latency_t0_to_device_list))
- latency_t0_to_device_avg = \
- (int(sum(latency_t0_to_device_list)) / \
- len(latency_t0_to_device_list))
- latency_ofp_to_device_std_dev = \
- str(round(numpy.std(latency_t0_to_device_list),1))
-
- latency_tcp_to_ofp_max = \
- int(max(latency_tcp_to_ofp_list))
- latency_tcp_to_ofp_min = \
- int(min(latency_tcp_to_ofp_list))
- latency_tcp_to_ofp_avg = \
- (int(sum(latency_tcp_to_ofp_list)) / \
- len(latency_tcp_to_ofp_list))
- latency_tcp_to_ofp_std_dev = \
- str(round(numpy.std(latency_tcp_to_ofp_list),1))
-
- main.log.report("Switch add - End-to-end latency: "+\
- "Avg: "+str(latency_end_to_end_avg)+" ms "+
- "Std Deviation: "+latency_end_to_end_std_dev+" ms")
- main.log.report("Switch add - OFP-to-Graph latency: "+\
- "Note: results are not accurate to sub-millisecond. "+
- "Any sub-millisecond results are rounded to 0 ms. ")
- main.log.report("Avg: "+str(latency_ofp_to_graph_avg)+" ms "+
- "Std Deviation: "+latency_ofp_to_graph_std_dev+" ms")
- main.log.report("Switch add - TCP-to-OFP latency: "+\
- "Avg: "+str(latency_tcp_to_ofp_avg)+" ms "+
- "Std Deviation: "+latency_tcp_to_ofp_std_dev+" ms")
-
- if debug_mode == 'on':
- main.ONOS1.cp_logs_to_dir("/opt/onos/log/karaf.log",
- "/tmp/", copy_file_name="sw_lat_karaf")
-
- utilities.assert_equals(expect=main.TRUE, actual=assertion,
- onpass="Switch latency test successful",
- onfail="Switch latency test failed")
-
- def CASE3(self, main):
- '''
- Bring port up / down and measure latency.
- Port enable / disable is simulated by ifconfig up / down
-
- In ONOS-next, we must ensure that the port we are
- manipulating is connected to another switch with a valid
- connection. Otherwise, graph view will not be updated.
- '''
- import time
- import subprocess
- import os
- import requests
- import json
- import numpy
-
- ONOS1_ip = main.params['CTRL']['ip1']
- ONOS2_ip = main.params['CTRL']['ip2']
- ONOS3_ip = main.params['CTRL']['ip3']
- ONOS_user = main.params['CTRL']['user']
-
- default_sw_port = main.params['CTRL']['port1']
-
- assertion = main.TRUE
- #Number of iterations of case
- num_iter = main.params['TEST']['numIter']
-
- #Timestamp 'keys' for json metrics output.
- #These are subject to change, hence moved into params
- deviceTimestamp = main.params['JSON']['deviceTimestamp']
- graphTimestamp = main.params['JSON']['graphTimestamp']
-
- debug_mode = main.params['TEST']['debugMode']
-
- local_time = time.strftime('%x %X')
- local_time = local_time.replace("/","")
- local_time = local_time.replace(" ","_")
- local_time = local_time.replace(":","")
- if debug_mode == 'on':
- main.ONOS1.tshark_pcap("eth0",
- "/tmp/port_lat_pcap_"+local_time)
-
- #Threshold for this test case
- up_threshold_str = main.params['TEST']['portUpThreshold']
- down_threshold_str = main.params['TEST']['portDownThreshold']
-
- up_threshold_obj = up_threshold_str.split(",")
- down_threshold_obj = down_threshold_str.split(",")
-
- up_threshold_min = int(up_threshold_obj[0])
- up_threshold_max = int(up_threshold_obj[1])
-
- down_threshold_min = int(down_threshold_obj[0])
- down_threshold_max = int(down_threshold_obj[1])
-
- #NOTE: Some hardcoded variables you may need to configure
- # besides the params
-
- tshark_port_status = "OFP 130 Port Status"
-
- tshark_port_up = "/tmp/tshark_port_up.txt"
- tshark_port_down = "/tmp/tshark_port_down.txt"
- interface_config = "s1-eth1"
-
- main.log.report("Port enable / disable latency")
- main.log.report("Simulated by ifconfig up / down")
- main.log.report("Total iterations of test: "+str(num_iter))
-
- main.step("Assign switches s1 and s2 to controller 1")
- main.Mininet1.assign_sw_controller(sw="1",ip1=ONOS1_ip,
- port1=default_sw_port)
- main.Mininet1.assign_sw_controller(sw="2",ip1=ONOS1_ip,
- port1=default_sw_port)
-
- #Give enough time for metrics to propagate the
- #assign controller event. Otherwise, these events may
- #carry over to our measurements
- time.sleep(15)
-
- port_up_device_to_ofp_list = []
- port_up_graph_to_ofp_list = []
- port_down_device_to_ofp_list = []
- port_down_graph_to_ofp_list = []
-
- for i in range(0, int(num_iter)):
- main.step("Starting wireshark capture for port status down")
- main.ONOS1.tshark_grep(tshark_port_status,
- tshark_port_down)
-
- time.sleep(5)
-
- #Disable interface that is connected to switch 2
- main.step("Disable port: "+interface_config)
- main.Mininet1.handle.sendline("sh ifconfig "+
- interface_config+" down")
- main.Mininet1.handle.expect("mininet>")
-
- time.sleep(3)
- main.ONOS1.tshark_stop()
-
- main.step("Obtain t1 by metrics call")
- json_str_up_1 = main.ONOS1cli.topology_events_metrics()
- json_str_up_2 = main.ONOS2cli.topology_events_metrics()
- json_str_up_3 = main.ONOS3cli.topology_events_metrics()
-
- json_obj_1 = json.loads(json_str_up_1)
- json_obj_2 = json.loads(json_str_up_2)
- json_obj_3 = json.loads(json_str_up_3)
-
- #Copy tshark output file from ONOS to TestON instance
- #/tmp directory
- os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
- tshark_port_down+" /tmp/")
-
- f_port_down = open(tshark_port_down, 'r')
- #Get first line of port down event from tshark
- f_line = f_port_down.readline()
- obj_down = f_line.split(" ")
- if len(f_line) > 0:
- timestamp_begin_pt_down = int(float(obj_down[1])*1000)
- main.log.info("Port down begin timestamp: "+
- str(timestamp_begin_pt_down))
- else:
- main.log.info("Tshark output file returned unexpected"+
- " results: "+str(obj_down))
- timestamp_begin_pt_down = 0
-
- f_port_down.close()
-
- main.log.info("TEST tshark obj: "+str(obj_down))
-
- time.sleep(3)
-
- #Obtain graph timestamp. This timestsamp captures
- #the epoch time at which the topology graph was updated.
- graph_timestamp_1 = \
- json_obj_1[graphTimestamp]['value']
- graph_timestamp_2 = \
- json_obj_2[graphTimestamp]['value']
- graph_timestamp_3 = \
- json_obj_3[graphTimestamp]['value']
-
- main.log.info("TEST graph timestamp ONOS1: "+
- str(graph_timestamp_1))
-
- #Obtain device timestamp. This timestamp captures
- #the epoch time at which the device event happened
- device_timestamp_1 = \
- json_obj_1[deviceTimestamp]['value']
- device_timestamp_2 = \
- json_obj_2[deviceTimestamp]['value']
- device_timestamp_3 = \
- json_obj_3[deviceTimestamp]['value']
-
- #Get delta between graph event and OFP
- pt_down_graph_to_ofp_1 = int(graph_timestamp_1) -\
- int(timestamp_begin_pt_down)
- pt_down_graph_to_ofp_2 = int(graph_timestamp_2) -\
- int(timestamp_begin_pt_down)
- pt_down_graph_to_ofp_3 = int(graph_timestamp_3) -\
- int(timestamp_begin_pt_down)
-
- #Get delta between device event and OFP
- pt_down_device_to_ofp_1 = int(device_timestamp_1) -\
- int(timestamp_begin_pt_down)
- pt_down_device_to_ofp_2 = int(device_timestamp_2) -\
- int(timestamp_begin_pt_down)
- pt_down_device_to_ofp_3 = int(device_timestamp_3) -\
- int(timestamp_begin_pt_down)
-
- #Caluclate average across clusters
- pt_down_graph_to_ofp_avg =\
- (int(pt_down_graph_to_ofp_1) +
- int(pt_down_graph_to_ofp_2) +
- int(pt_down_graph_to_ofp_3)) / 3
- pt_down_device_to_ofp_avg = \
- (int(pt_down_device_to_ofp_1) +
- int(pt_down_device_to_ofp_2) +
- int(pt_down_device_to_ofp_3)) / 3
-
- if pt_down_graph_to_ofp_avg > down_threshold_min and \
- pt_down_graph_to_ofp_avg < down_threshold_max:
- port_down_graph_to_ofp_list.append(
- pt_down_graph_to_ofp_avg)
- main.log.info("Port down: graph to ofp avg: "+
- str(pt_down_graph_to_ofp_avg) + " ms")
- else:
- main.log.info("Average port down graph-to-ofp result" +
- " exceeded the threshold: "+
- str(pt_down_graph_to_ofp_avg))
-
- if pt_down_device_to_ofp_avg > 0 and \
- pt_down_device_to_ofp_avg < 1000:
- port_down_device_to_ofp_list.append(
- pt_down_device_to_ofp_avg)
- main.log.info("Port down: device to ofp avg: "+
- str(pt_down_device_to_ofp_avg) + " ms")
- else:
- main.log.info("Average port down device-to-ofp result" +
- " exceeded the threshold: "+
- str(pt_down_device_to_ofp_avg))
-
- #Port up events
- main.step("Enable port and obtain timestamp")
- main.step("Starting wireshark capture for port status up")
- main.ONOS1.tshark_grep(tshark_port_status, tshark_port_up)
- time.sleep(5)
-
- main.Mininet1.handle.sendline("sh ifconfig "+
- interface_config+" up")
- main.Mininet1.handle.expect("mininet>")
-
- #Allow time for tshark to capture event
- time.sleep(3)
- main.ONOS1.tshark_stop()
-
- #Obtain metrics shortly afterwards
- #This timestsamp captures
- #the epoch time at which the topology graph was updated.
- main.step("Obtain t1 by REST call")
- json_str_up_1 = main.ONOS1cli.topology_events_metrics()
- json_str_up_2 = main.ONOS2cli.topology_events_metrics()
- json_str_up_3 = main.ONOS3cli.topology_events_metrics()
-
- json_obj_1 = json.loads(json_str_up_1)
- json_obj_2 = json.loads(json_str_up_2)
- json_obj_3 = json.loads(json_str_up_3)
-
- os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
- tshark_port_up+" /tmp/")
-
- f_port_up = open(tshark_port_up, 'r')
- f_line = f_port_up.readline()
- obj_up = f_line.split(" ")
- if len(f_line) > 0:
- timestamp_begin_pt_up = int(float(obj_up[1])*1000)
- main.log.info("Port up begin timestamp: "+
- str(timestamp_begin_pt_up))
- else:
- main.log.info("Tshark output file returned unexpected"+
- " results.")
- timestamp_begin_pt_up = 0
-
- f_port_up.close()
-
- graph_timestamp_1 = \
- json_obj_1[graphTimestamp]['value']
- graph_timestamp_2 = \
- json_obj_2[graphTimestamp]['value']
- graph_timestamp_3 = \
- json_obj_3[graphTimestamp]['value']
-
- #Obtain device timestamp. This timestamp captures
- #the epoch time at which the device event happened
- device_timestamp_1 = \
- json_obj_1[deviceTimestamp]['value']
- device_timestamp_2 = \
- json_obj_2[deviceTimestamp]['value']
- device_timestamp_3 = \
- json_obj_3[deviceTimestamp]['value']
-
- #Get delta between graph event and OFP
- pt_up_graph_to_ofp_1 = int(graph_timestamp_1) -\
- int(timestamp_begin_pt_up)
- pt_up_graph_to_ofp_2 = int(graph_timestamp_2) -\
- int(timestamp_begin_pt_up)
- pt_up_graph_to_ofp_3 = int(graph_timestamp_3) -\
- int(timestamp_begin_pt_up)
-
- #Get delta between device event and OFP
- pt_up_device_to_ofp_1 = int(device_timestamp_1) -\
- int(timestamp_begin_pt_up)
- pt_up_device_to_ofp_2 = int(device_timestamp_2) -\
- int(timestamp_begin_pt_up)
- pt_up_device_to_ofp_3 = int(device_timestamp_3) -\
- int(timestamp_begin_pt_up)
-
- main.log.info("ONOS1 delta G2O: "+str(pt_up_graph_to_ofp_1))
- main.log.info("ONOS2 delta G2O: "+str(pt_up_graph_to_ofp_2))
- main.log.info("ONOS3 delta G2O: "+str(pt_up_graph_to_ofp_3))
-
- main.log.info("ONOS1 delta D2O: "+str(pt_up_device_to_ofp_1))
- main.log.info("ONOS2 delta D2O: "+str(pt_up_device_to_ofp_2))
- main.log.info("ONOS3 delta D2O: "+str(pt_up_device_to_ofp_3))
-
- pt_up_graph_to_ofp_avg = \
- (int(pt_up_graph_to_ofp_1) +
- int(pt_up_graph_to_ofp_2) +
- int(pt_up_graph_to_ofp_3)) / 3
-
- pt_up_device_to_ofp_avg = \
- (int(pt_up_device_to_ofp_1) +
- int(pt_up_device_to_ofp_2) +
- int(pt_up_device_to_ofp_3)) / 3
-
- if pt_up_graph_to_ofp_avg > up_threshold_min and \
- pt_up_graph_to_ofp_avg < up_threshold_max:
- port_up_graph_to_ofp_list.append(
- pt_up_graph_to_ofp_avg)
- main.log.info("Port down: graph to ofp avg: "+
- str(pt_up_graph_to_ofp_avg) + " ms")
- else:
- main.log.info("Average port up graph-to-ofp result"+
- " exceeded the threshold: "+
- str(pt_up_graph_to_ofp_avg))
-
- if pt_up_device_to_ofp_avg > up_threshold_min and \
- pt_up_device_to_ofp_avg < up_threshold_max:
- port_up_device_to_ofp_list.append(
- pt_up_device_to_ofp_avg)
- main.log.info("Port up: device to ofp avg: "+
- str(pt_up_device_to_ofp_avg) + " ms")
- else:
- main.log.info("Average port up device-to-ofp result"+
- " exceeded the threshold: "+
- str(pt_up_device_to_ofp_avg))
-
- #END ITERATION FOR LOOP
-
- #Check all list for latency existence and set assertion
- if (port_down_graph_to_ofp_list and port_down_device_to_ofp_list\
- and port_up_graph_to_ofp_list and port_up_device_to_ofp_list):
- assertion = main.TRUE
-
- #Calculate and report latency measurements
- port_down_graph_to_ofp_min = min(port_down_graph_to_ofp_list)
- port_down_graph_to_ofp_max = max(port_down_graph_to_ofp_list)
- port_down_graph_to_ofp_avg = \
- (sum(port_down_graph_to_ofp_list) /
- len(port_down_graph_to_ofp_list))
- port_down_graph_to_ofp_std_dev = \
- str(round(numpy.std(port_down_graph_to_ofp_list),1))
-
- main.log.report("Port down graph-to-ofp "+
- "Avg: "+str(port_down_graph_to_ofp_avg)+" ms "+
- "Std Deviation: "+port_down_graph_to_ofp_std_dev+" ms")
-
- port_down_device_to_ofp_min = min(port_down_device_to_ofp_list)
- port_down_device_to_ofp_max = max(port_down_device_to_ofp_list)
- port_down_device_to_ofp_avg = \
- (sum(port_down_device_to_ofp_list) /\
- len(port_down_device_to_ofp_list))
- port_down_device_to_ofp_std_dev = \
- str(round(numpy.std(port_down_device_to_ofp_list),1))
-
- main.log.report("Port down device-to-ofp "+
- "Avg: "+str(port_down_device_to_ofp_avg)+" ms "+
- "Std Deviation: "+port_down_device_to_ofp_std_dev+" ms")
-
- port_up_graph_to_ofp_min = min(port_up_graph_to_ofp_list)
- port_up_graph_to_ofp_max = max(port_up_graph_to_ofp_list)
- port_up_graph_to_ofp_avg = \
- (sum(port_up_graph_to_ofp_list) /\
- len(port_up_graph_to_ofp_list))
- port_up_graph_to_ofp_std_dev = \
- str(round(numpy.std(port_up_graph_to_ofp_list),1))
-
- main.log.report("Port up graph-to-ofp "+
- "Avg: "+str(port_up_graph_to_ofp_avg)+" ms "+
- "Std Deviation: "+port_up_graph_to_ofp_std_dev+" ms")
-
- port_up_device_to_ofp_min = min(port_up_device_to_ofp_list)
- port_up_device_to_ofp_max = max(port_up_device_to_ofp_list)
- port_up_device_to_ofp_avg = \
- (sum(port_up_device_to_ofp_list) /\
- len(port_up_device_to_ofp_list))
- port_up_device_to_ofp_std_dev = \
- str(round(numpy.std(port_up_device_to_ofp_list),1))
-
- main.log.report("Port up device-to-ofp "+
- "Avg: "+str(port_up_device_to_ofp_avg)+" ms "+
- "Std Deviation: "+port_up_device_to_ofp_std_dev+" ms")
-
- utilities.assert_equals(expect=main.TRUE, actual=assertion,
- onpass="Port discovery latency calculation successful",
- onfail="Port discovery test failed")
-
- def CASE4(self, main):
- '''
- Link down event using loss rate 100%
-
- Important:
- Use a simple 2 switch topology with 1 link between
- the two switches. Ensure that mac addresses of the
- switches are 1 / 2 respectively
- '''
- import time
- import subprocess
- import os
- import requests
- import json
- import numpy
-
- ONOS1_ip = main.params['CTRL']['ip1']
- ONOS2_ip = main.params['CTRL']['ip2']
- ONOS3_ip = main.params['CTRL']['ip3']
- ONOS_user = main.params['CTRL']['user']
-
- default_sw_port = main.params['CTRL']['port1']
-
- #Number of iterations of case
- num_iter = main.params['TEST']['numIter']
-
- #Timestamp 'keys' for json metrics output.
- #These are subject to change, hence moved into params
- deviceTimestamp = main.params['JSON']['deviceTimestamp']
- linkTimestamp = main.params['JSON']['linkTimestamp']
- graphTimestamp = main.params['JSON']['graphTimestamp']
-
- debug_mode = main.params['TEST']['debugMode']
-
- local_time = time.strftime('%x %X')
- local_time = local_time.replace("/","")
- local_time = local_time.replace(" ","_")
- local_time = local_time.replace(":","")
- if debug_mode == 'on':
- main.ONOS1.tshark_pcap("eth0",
- "/tmp/link_lat_pcap_"+local_time)
-
- #Threshold for this test case
- up_threshold_str = main.params['TEST']['linkUpThreshold']
- down_threshold_str = main.params['TEST']['linkDownThreshold']
-
- up_threshold_obj = up_threshold_str.split(",")
- down_threshold_obj = down_threshold_str.split(",")
-
- up_threshold_min = int(up_threshold_obj[0])
- up_threshold_max = int(up_threshold_obj[1])
-
- down_threshold_min = int(down_threshold_obj[0])
- down_threshold_max = int(down_threshold_obj[1])
-
- assertion = main.TRUE
- #Link event timestamp to system time list
- link_down_link_to_system_list = []
- link_up_link_to_system_list = []
- #Graph event timestamp to system time list
- link_down_graph_to_system_list = []
- link_up_graph_to_system_list = []
-
- main.log.report("Link up / down discovery latency between "+
- "two switches")
- main.log.report("Simulated by setting loss-rate 100%")
- main.log.report("'tc qdisc add dev <intfs> root netem loss 100%'")
- main.log.report("Total iterations of test: "+str(num_iter))
-
- main.step("Assign all switches")
- main.Mininet1.assign_sw_controller(sw="1",
- ip1=ONOS1_ip, port1=default_sw_port)
- main.Mininet1.assign_sw_controller(sw="2",
- ip1=ONOS1_ip, port1=default_sw_port)
-
- main.step("Verifying switch assignment")
- result_s1 = main.Mininet1.get_sw_controller(sw="s1")
- result_s2 = main.Mininet1.get_sw_controller(sw="s2")
-
- #Allow time for events to finish before taking measurements
- time.sleep(10)
-
- link_down1 = False
- link_down2 = False
- link_down3 = False
- #Start iteration of link event test
- for i in range(0, int(num_iter)):
- main.step("Getting initial system time as t0")
-
- #System time in epoch ms
- timestamp_link_down_t0 = time.time() * 1000
- #Link down is simulated by 100% loss rate using traffic
- #control command
- main.Mininet1.handle.sendline(
- "sh tc qdisc add dev s1-eth1 root netem loss 100%")
-
- #TODO: Iterate through 'links' command to verify that
- # link s1 -> s2 went down (loop timeout 30 seconds)
- # on all 3 ONOS instances
- main.log.info("Checking ONOS for link update")
- loop_count = 0
- while( not (link_down1 and link_down2 and link_down3)\
- and loop_count < 30 ):
- json_str1 = main.ONOS1cli.links()
- json_str2 = main.ONOS2cli.links()
- json_str3 = main.ONOS3cli.links()
-
- if not (json_str1 and json_str2 and json_str3):
- main.log.error("CLI command returned error ")
- break
- else:
- json_obj1 = json.loads(json_str1)
- json_obj2 = json.loads(json_str2)
- json_obj3 = json.loads(json_str3)
- for obj1 in json_obj1:
- if '01' not in obj1['src']['device']:
- link_down1 = True
- main.log.info("Link down from "+
- "s1 -> s2 on ONOS1 detected")
- for obj2 in json_obj2:
- if '01' not in obj2['src']['device']:
- link_down2 = True
- main.log.info("Link down from "+
- "s1 -> s2 on ONOS2 detected")
- for obj3 in json_obj3:
- if '01' not in obj3['src']['device']:
- link_down3 = True
- main.log.info("Link down from "+
- "s1 -> s2 on ONOS3 detected")
-
- loop_count += 1
- #If CLI doesn't like the continuous requests
- #and exits in this loop, increase the sleep here.
- #Consequently, while loop timeout will increase
- time.sleep(1)
-
- #Give time for metrics measurement to catch up
- #NOTE: May need to be configured more accurately
- time.sleep(10)
- #If we exited the while loop and link down 1,2,3 are still
- #false, then ONOS has failed to discover link down event
- if not (link_down1 and link_down2 and link_down3):
- main.log.info("Link down discovery failed")
-
- link_down_lat_graph1 = 0
- link_down_lat_graph2 = 0
- link_down_lat_graph3 = 0
- link_down_lat_device1 = 0
- link_down_lat_device2 = 0
- link_down_lat_device3 = 0
-
- assertion = main.FALSE
- else:
- json_topo_metrics_1 =\
- main.ONOS1cli.topology_events_metrics()
- json_topo_metrics_2 =\
- main.ONOS2cli.topology_events_metrics()
- json_topo_metrics_3 =\
- main.ONOS3cli.topology_events_metrics()
- json_topo_metrics_1 = json.loads(json_topo_metrics_1)
- json_topo_metrics_2 = json.loads(json_topo_metrics_2)
- json_topo_metrics_3 = json.loads(json_topo_metrics_3)
-
- main.log.info("Obtaining graph and device timestamp")
- graph_timestamp_1 = \
- json_topo_metrics_1[graphTimestamp]['value']
- graph_timestamp_2 = \
- json_topo_metrics_2[graphTimestamp]['value']
- graph_timestamp_3 = \
- json_topo_metrics_3[graphTimestamp]['value']
-
- link_timestamp_1 = \
- json_topo_metrics_1[linkTimestamp]['value']
- link_timestamp_2 = \
- json_topo_metrics_2[linkTimestamp]['value']
- link_timestamp_3 = \
- json_topo_metrics_3[linkTimestamp]['value']
-
- if graph_timestamp_1 and graph_timestamp_2 and\
- graph_timestamp_3 and link_timestamp_1 and\
- link_timestamp_2 and link_timestamp_3:
- link_down_lat_graph1 = int(graph_timestamp_1) -\
- int(timestamp_link_down_t0)
- link_down_lat_graph2 = int(graph_timestamp_2) -\
- int(timestamp_link_down_t0)
- link_down_lat_graph3 = int(graph_timestamp_3) -\
- int(timestamp_link_down_t0)
-
- link_down_lat_link1 = int(link_timestamp_1) -\
- int(timestamp_link_down_t0)
- link_down_lat_link2 = int(link_timestamp_2) -\
- int(timestamp_link_down_t0)
- link_down_lat_link3 = int(link_timestamp_3) -\
- int(timestamp_link_down_t0)
- else:
- main.log.error("There was an error calculating"+
- " the delta for link down event")
- link_down_lat_graph1 = 0
- link_down_lat_graph2 = 0
- link_down_lat_graph3 = 0
-
- link_down_lat_device1 = 0
- link_down_lat_device2 = 0
- link_down_lat_device3 = 0
-
- main.log.info("Link down latency ONOS1 iteration "+
- str(i)+" (end-to-end): "+
- str(link_down_lat_graph1)+" ms")
- main.log.info("Link down latency ONOS2 iteration "+
- str(i)+" (end-to-end): "+
- str(link_down_lat_graph2)+" ms")
- main.log.info("Link down latency ONOS3 iteration "+
- str(i)+" (end-to-end): "+
- str(link_down_lat_graph3)+" ms")
-
- main.log.info("Link down latency ONOS1 iteration "+
- str(i)+" (link-event-to-system-timestamp): "+
- str(link_down_lat_link1)+" ms")
- main.log.info("Link down latency ONOS2 iteration "+
- str(i)+" (link-event-to-system-timestamp): "+
- str(link_down_lat_link2)+" ms")
- main.log.info("Link down latency ONOS3 iteration "+
- str(i)+" (link-event-to-system-timestamp): "+
- str(link_down_lat_link3))
-
- #Calculate avg of node calculations
- link_down_lat_graph_avg =\
- (link_down_lat_graph1 +
- link_down_lat_graph2 +
- link_down_lat_graph3) / 3
- link_down_lat_link_avg =\
- (link_down_lat_link1 +
- link_down_lat_link2 +
- link_down_lat_link3) / 3
-
- #Set threshold and append latency to list
- if link_down_lat_graph_avg > down_threshold_min and\
- link_down_lat_graph_avg < down_threshold_max:
- link_down_graph_to_system_list.append(
- link_down_lat_graph_avg)
- else:
- main.log.info("Link down latency exceeded threshold")
- main.log.info("Results for iteration "+str(i)+
- "have been omitted")
- if link_down_lat_link_avg > down_threshold_min and\
- link_down_lat_link_avg < down_threshold_max:
- link_down_link_to_system_list.append(
- link_down_lat_link_avg)
- else:
- main.log.info("Link down latency exceeded threshold")
- main.log.info("Results for iteration "+str(i)+
- "have been omitted")
-
- #NOTE: To remove loss rate and measure latency:
- # 'sh tc qdisc del dev s1-eth1 root'
- timestamp_link_up_t0 = time.time() * 1000
- main.Mininet1.handle.sendline("sh tc qdisc del dev "+
- "s1-eth1 root")
- main.Mininet1.handle.expect("mininet>")
-
- main.log.info("Checking ONOS for link update")
-
- link_down1 = True
- link_down2 = True
- link_down3 = True
- loop_count = 0
- while( (link_down1 and link_down2 and link_down3)\
- and loop_count < 30 ):
- json_str1 = main.ONOS1cli.links()
- json_str2 = main.ONOS2cli.links()
- json_str3 = main.ONOS3cli.links()
- if not (json_str1 and json_str2 and json_str3):
- main.log.error("CLI command returned error ")
- break
- else:
- json_obj1 = json.loads(json_str1)
- json_obj2 = json.loads(json_str2)
- json_obj3 = json.loads(json_str3)
-
- for obj1 in json_obj1:
- if '01' in obj1['src']['device']:
- link_down1 = False
- main.log.info("Link up from "+
- "s1 -> s2 on ONOS1 detected")
- for obj2 in json_obj2:
- if '01' in obj2['src']['device']:
- link_down2 = False
- main.log.info("Link up from "+
- "s1 -> s2 on ONOS2 detected")
- for obj3 in json_obj3:
- if '01' in obj3['src']['device']:
- link_down3 = False
- main.log.info("Link up from "+
- "s1 -> s2 on ONOS3 detected")
-
- loop_count += 1
- time.sleep(1)
-
- if (link_down1 and link_down2 and link_down3):
- main.log.info("Link up discovery failed")
-
- link_up_lat_graph1 = 0
- link_up_lat_graph2 = 0
- link_up_lat_graph3 = 0
- link_up_lat_device1 = 0
- link_up_lat_device2 = 0
- link_up_lat_device3 = 0
-
- assertion = main.FALSE
- else:
- json_topo_metrics_1 =\
- main.ONOS1cli.topology_events_metrics()
- json_topo_metrics_2 =\
- main.ONOS2cli.topology_events_metrics()
- json_topo_metrics_3 =\
- main.ONOS3cli.topology_events_metrics()
- json_topo_metrics_1 = json.loads(json_topo_metrics_1)
- json_topo_metrics_2 = json.loads(json_topo_metrics_2)
- json_topo_metrics_3 = json.loads(json_topo_metrics_3)
-
- main.log.info("Obtaining graph and device timestamp")
- graph_timestamp_1 = \
- json_topo_metrics_1[graphTimestamp]['value']
- graph_timestamp_2 = \
- json_topo_metrics_2[graphTimestamp]['value']
- graph_timestamp_3 = \
- json_topo_metrics_3[graphTimestamp]['value']
-
- link_timestamp_1 = \
- json_topo_metrics_1[linkTimestamp]['value']
- link_timestamp_2 = \
- json_topo_metrics_2[linkTimestamp]['value']
- link_timestamp_3 = \
- json_topo_metrics_3[linkTimestamp]['value']
-
- if graph_timestamp_1 and graph_timestamp_2 and\
- graph_timestamp_3 and link_timestamp_1 and\
- link_timestamp_2 and link_timestamp_3:
- link_up_lat_graph1 = int(graph_timestamp_1) -\
- int(timestamp_link_up_t0)
- link_up_lat_graph2 = int(graph_timestamp_2) -\
- int(timestamp_link_up_t0)
- link_up_lat_graph3 = int(graph_timestamp_3) -\
- int(timestamp_link_up_t0)
-
- link_up_lat_link1 = int(link_timestamp_1) -\
- int(timestamp_link_up_t0)
- link_up_lat_link2 = int(link_timestamp_2) -\
- int(timestamp_link_up_t0)
- link_up_lat_link3 = int(link_timestamp_3) -\
- int(timestamp_link_up_t0)
- else:
- main.log.error("There was an error calculating"+
- " the delta for link down event")
- link_up_lat_graph1 = 0
- link_up_lat_graph2 = 0
- link_up_lat_graph3 = 0
-
- link_up_lat_device1 = 0
- link_up_lat_device2 = 0
- link_up_lat_device3 = 0
-
- if debug_mode == 'on':
- main.log.info("Link up latency ONOS1 iteration "+
- str(i)+" (end-to-end): "+
- str(link_up_lat_graph1)+" ms")
- main.log.info("Link up latency ONOS2 iteration "+
- str(i)+" (end-to-end): "+
- str(link_up_lat_graph2)+" ms")
- main.log.info("Link up latency ONOS3 iteration "+
- str(i)+" (end-to-end): "+
- str(link_up_lat_graph3)+" ms")
-
- main.log.info("Link up latency ONOS1 iteration "+
- str(i)+" (link-event-to-system-timestamp): "+
- str(link_up_lat_link1)+" ms")
- main.log.info("Link up latency ONOS2 iteration "+
- str(i)+" (link-event-to-system-timestamp): "+
- str(link_up_lat_link2)+" ms")
- main.log.info("Link up latency ONOS3 iteration "+
- str(i)+" (link-event-to-system-timestamp): "+
- str(link_up_lat_link3))
-
- #Calculate avg of node calculations
- link_up_lat_graph_avg =\
- (link_up_lat_graph1 +
- link_up_lat_graph2 +
- link_up_lat_graph3) / 3
- link_up_lat_link_avg =\
- (link_up_lat_link1 +
- link_up_lat_link2 +
- link_up_lat_link3) / 3
-
- #Set threshold and append latency to list
- if link_up_lat_graph_avg > up_threshold_min and\
- link_up_lat_graph_avg < up_threshold_max:
- link_up_graph_to_system_list.append(
- link_up_lat_graph_avg)
- else:
- main.log.info("Link up latency exceeded threshold")
- main.log.info("Results for iteration "+str(i)+
- "have been omitted")
- if link_up_lat_link_avg > up_threshold_min and\
- link_up_lat_link_avg < up_threshold_max:
- link_up_link_to_system_list.append(
- link_up_lat_link_avg)
- else:
- main.log.info("Link up latency exceeded threshold")
- main.log.info("Results for iteration "+str(i)+
- "have been omitted")
-
- #Calculate min, max, avg of list and report
- link_down_min = min(link_down_graph_to_system_list)
- link_down_max = max(link_down_graph_to_system_list)
- link_down_avg = sum(link_down_graph_to_system_list) / \
- len(link_down_graph_to_system_list)
- link_up_min = min(link_up_graph_to_system_list)
- link_up_max = max(link_up_graph_to_system_list)
- link_up_avg = sum(link_up_graph_to_system_list) / \
- len(link_up_graph_to_system_list)
- link_down_std_dev = \
- str(round(numpy.std(link_down_graph_to_system_list),1))
- link_up_std_dev = \
- str(round(numpy.std(link_up_graph_to_system_list),1))
-
- main.log.report("Link down latency " +
- "Avg: "+str(link_down_avg)+" ms "+
- "Std Deviation: "+link_down_std_dev+" ms")
- main.log.report("Link up latency "+
- "Avg: "+str(link_up_avg)+" ms "+
- "Std Deviation: "+link_up_std_dev+" ms")
-
- utilities.assert_equals(expect=main.TRUE, actual=assertion,
- onpass="Link discovery latency calculation successful",
- onfail="Link discovery latency case failed")
-
- def CASE5(self, main):
- '''
- 100 Switch discovery latency
-
- Important:
- This test case can be potentially dangerous if
- your machine has previously set iptables rules.
- One of the steps of the test case will flush
- all existing iptables rules.
- Note:
- You can specify the number of switches in the
- params file to adjust the switch discovery size
- (and specify the corresponding topology in Mininet1
- .topo file)
- '''
- import time
- import subprocess
- import os
- import requests
- import json
-
- ONOS1_ip = main.params['CTRL']['ip1']
- ONOS2_ip = main.params['CTRL']['ip2']
- ONOS3_ip = main.params['CTRL']['ip3']
- MN1_ip = main.params['MN']['ip1']
- ONOS_user = main.params['CTRL']['user']
-
- default_sw_port = main.params['CTRL']['port1']
-
- #Number of iterations of case
- num_iter = main.params['TEST']['numIter']
- num_sw = main.params['TEST']['numSwitch']
-
- #Timestamp 'keys' for json metrics output.
- #These are subject to change, hence moved into params
- deviceTimestamp = main.params['JSON']['deviceTimestamp']
- graphTimestamp = main.params['JSON']['graphTimestamp']
-
- debug_mode = main.params['TEST']['debugMode']
-
- local_time = time.strftime('%X')
- local_time = local_time.replace("/","")
- local_time = local_time.replace(" ","_")
- local_time = local_time.replace(":","")
- if debug_mode == 'on':
- main.ONOS1.tshark_pcap("eth0",
- "/tmp/100_sw_lat_pcap_"+local_time)
-
- #Threshold for this test case
- sw_disc_threshold_str = main.params['TEST']['swDisc100Threshold']
- sw_disc_threshold_obj = sw_disc_threshold_str.split(",")
- sw_disc_threshold_min = int(sw_disc_threshold_obj[0])
- sw_disc_threshold_max = int(sw_disc_threshold_obj[1])
-
- tshark_ofp_output = "/tmp/tshark_ofp_"+num_sw+"sw.txt"
- tshark_tcp_output = "/tmp/tshark_tcp_"+num_sw+"sw.txt"
-
- tshark_ofp_result_list = []
- tshark_tcp_result_list = []
-
- sw_discovery_lat_list = []
-
- main.case(num_sw+" Switch discovery latency")
- main.step("Assigning all switches to ONOS1")
- for i in range(1, int(num_sw)+1):
- main.Mininet1.assign_sw_controller(
- sw=str(i),
- ip1=ONOS1_ip,
- port1=default_sw_port)
-
- #Ensure that nodes are configured with ptpd
- #Just a warning message
- main.log.info("Please check ptpd configuration to ensure"+\
- " All nodes' system times are in sync")
- time.sleep(5)
-
- for i in range(0, int(num_iter)):
-
- main.step("Set iptables rule to block incoming sw connections")
- #Set iptables rule to block incoming switch connections
- #The rule description is as follows:
- # Append to INPUT rule,
- # behavior DROP that matches following:
- # * packet type: tcp
- # * source IP: MN1_ip
- # * destination PORT: 6633
- main.ONOS1.handle.sendline(
- "sudo iptables -A INPUT -p tcp -s "+MN1_ip+
- " --dport "+default_sw_port+" -j DROP")
- main.ONOS1.handle.expect("\$")
- # Append to OUTPUT rule,
- # behavior DROP that matches following:
- # * packet type: tcp
- # * source IP: MN1_ip
- # * destination PORT: 6633
- main.ONOS1.handle.sendline(
- "sudo iptables -A OUTPUT -p tcp -s "+MN1_ip+
- " --dport "+default_sw_port+" -j DROP")
- main.ONOS1.handle.expect("\$")
- #Give time to allow rule to take effect
- #NOTE: Sleep period may need to be configured
- # based on the number of switches in the topology
- main.log.info("Please wait for switch connection to "+
- "time out")
- time.sleep(60)
-
- #Gather vendor OFP with tshark
- main.ONOS1.tshark_grep("OFP 86 Vendor",
- tshark_ofp_output)
- main.ONOS1.tshark_grep("TCP 74 ",
- tshark_tcp_output)
-
- #NOTE: Remove all iptables rule quickly (flush)
- # Before removal, obtain TestON timestamp at which
- # removal took place
- # (ensuring nodes are configured via ptp)
- # sudo iptables -F
-
- t0_system = time.time() * 1000
- main.ONOS1.handle.sendline(
- "sudo iptables -F")
-
- #Counter to track loop count
- counter_loop = 0
- counter_avail1 = 0
- counter_avail2 = 0
- counter_avail3 = 0
- onos1_dev = False
- onos2_dev = False
- onos3_dev = False
- while counter_loop < 60:
- #Continue to check devices for all device
- #availability. When all devices in all 3
- #ONOS instances indicate that devices are available
- #obtain graph event timestamp for t1.
- device_str_obj1 = main.ONOS1cli.devices()
- device_str_obj2 = main.ONOS2cli.devices()
- device_str_obj3 = main.ONOS3cli.devices()
-
- device_json1 = json.loads(device_str_obj1)
- device_json2 = json.loads(device_str_obj2)
- device_json3 = json.loads(device_str_obj3)
-
- for device1 in device_json1:
- if device1['available'] == True:
- counter_avail1 += 1
- if counter_avail1 == int(num_sw):
- onos1_dev = True
- main.log.info("All devices have been "+
- "discovered on ONOS1")
- else:
- counter_avail1 = 0
- for device2 in device_json2:
- if device2['available'] == True:
- counter_avail2 += 1
- if counter_avail2 == int(num_sw):
- onos2_dev = True
- main.log.info("All devices have been "+
- "discovered on ONOS2")
- else:
- counter_avail2 = 0
- for device3 in device_json3:
- if device3['available'] == True:
- counter_avail3 += 1
- if counter_avail3 == int(num_sw):
- onos3_dev = True
- main.log.info("All devices have been "+
- "discovered on ONOS3")
- else:
- counter_avail3 = 0
-
- if onos1_dev and onos2_dev and onos3_dev:
- main.log.info("All devices have been discovered "+
- "on all ONOS instances")
- json_str_topology_metrics_1 =\
- main.ONOS1cli.topology_events_metrics()
- json_str_topology_metrics_2 =\
- main.ONOS2cli.topology_events_metrics()
- json_str_topology_metrics_3 =\
- main.ONOS3cli.topology_events_metrics()
-
- #Exit while loop if all devices discovered
- break
-
- counter_loop += 1
- #Give some time in between CLI calls
- #(will not affect measurement)
- time.sleep(3)
-
- main.ONOS1.tshark_stop()
-
- os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
- tshark_ofp_output+" /tmp/")
- os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
- tshark_tcp_output+" /tmp/")
-
- #TODO: Automate OFP output analysis
- #Debug mode - print out packets captured at runtime
- if debug_mode == 'on':
- ofp_file = open(tshark_ofp_output, 'r')
- main.log.info("Tshark OFP Vendor output: ")
- for line in ofp_file:
- tshark_ofp_result_list.append(line)
- main.log.info(line)
- ofp_file.close()
-
- tcp_file = open(tshark_tcp_output, 'r')
- main.log.info("Tshark TCP 74 output: ")
- for line in tcp_file:
- tshark_tcp_result_list.append(line)
- main.log.info(line)
- tcp_file.close()
-
- json_obj_1 = json.loads(json_str_topology_metrics_1)
- json_obj_2 = json.loads(json_str_topology_metrics_2)
- json_obj_3 = json.loads(json_str_topology_metrics_3)
-
- graph_timestamp_1 = \
- json_obj_1[graphTimestamp]['value']
- graph_timestamp_2 = \
- json_obj_2[graphTimestamp]['value']
- graph_timestamp_3 = \
- json_obj_3[graphTimestamp]['value']
-
- graph_lat_1 = int(graph_timestamp_1) - int(t0_system)
- graph_lat_2 = int(graph_timestamp_2) - int(t0_system)
- graph_lat_3 = int(graph_timestamp_3) - int(t0_system)
-
- avg_graph_lat = \
- (int(graph_lat_1) +\
- int(graph_lat_2) +\
- int(graph_lat_3)) / 3
-
- if avg_graph_lat > sw_disc_threshold_min \
- and avg_graph_lat < sw_disc_threshold_max:
- sw_discovery_lat_list.append(
- avg_graph_lat)
- else:
- main.log.info("100 Switch discovery latency "+
- "exceeded the threshold.")
-
- #END ITERATION FOR LOOP
-
- sw_lat_min = min(sw_discovery_lat_list)
- sw_lat_max = max(sw_discovery_lat_list)
- sw_lat_avg = sum(sw_discovery_lat_list) /\
- len(sw_discovery_lat_list)
-
- main.log.report("100 Switch discovery lat "+\
- "Min: "+str(sw_lat_min)+" ms"+\
- "Max: "+str(sw_lat_max)+" ms"+\
- "Avg: "+str(sw_lat_avg)+" ms")
-
- def CASE6(self, main):
- '''
- Increase number of nodes and initiate CLI
- '''
- import time
-
- ONOS1_ip = main.params['CTRL']['ip1']
- ONOS2_ip = main.params['CTRL']['ip2']
- ONOS3_ip = main.params['CTRL']['ip3']
- ONOS4_ip = main.params['CTRL']['ip4']
- ONOS5_ip = main.params['CTRL']['ip5']
- ONOS6_ip = main.params['CTRL']['ip6']
- ONOS7_ip = main.params['CTRL']['ip7']
-
- cell_name = main.params['ENV']['cellName']
-
- global cluster_count
-
- #Cluster size increased everytime the case is defined
- cluster_count += 2
-
- main.log.report("Increasing cluster size to "+
- str(cluster_count))
-
- install_result = main.FALSE
- if cluster_count == 5:
- main.log.info("Installing nodes 4 and 5")
- node4_result = \
- main.ONOSbench.onos_install(node=ONOS4_ip)
- node5_result = \
- main.ONOSbench.onos_install(node=ONOS5_ip)
- install_result = node4_result and node5_result
-
- time.sleep(5)
-
- main.ONOS4cli.start_onos_cli(ONOS4_ip)
- main.ONOS5cli.start_onos_cli(ONOS5_ip)
-
- elif cluster_count == 7:
- main.log.info("Installing nodes 4 and 5")
- node6_result = \
- main.ONOSbench.onos_install(node=ONOS6_ip)
- node7_result = \
- main.ONOSbench.onos_install(node=ONOS7_ip)
- install_result = node6_result and node7_result
-
- time.sleep(5)
-
- main.ONOS6cli.start_onos_cli(ONOS6_ip)
- main.ONOS7cli.start_onos_cli(ONOS7_ip)
-
-
-
-
diff --git a/TestON/tests/TopoPerfNext/TopoPerfNext.py b/TestON/tests/TopoPerfNext/TopoPerfNext.py
deleted file mode 100644
index 0f37909..0000000
--- a/TestON/tests/TopoPerfNext/TopoPerfNext.py
+++ /dev/null
@@ -1,2006 +0,0 @@
-# TopoPerfNext
-#
-# Topology Performance test for ONOS-next
-#
-# andrew@onlab.us
-#
-# If your machine does not come with numpy
-# run the following command:
-# sudo apt-get install python-numpy python-scipy
-
-import time
-import sys
-import os
-import re
-
-
-class TopoPerfNext:
-
- def __init__( self ):
- self.default = ''
-
- def CASE1( self, main ):
- """
- ONOS startup sequence
- """
- import time
-
- # Global cluster count for scale-out purposes
- global clusterCount
- #TODO: fix run number implementation
- global runNum
- global timeToPost
-
- #Test run time
- timeToPost = time.strftime("%Y-%m-%d %H:%M:%S")
- # Set initial cluster count
- clusterCount = 1
- ##
-
- runNum = time.strftime("%d%H%M%S")
-
- cellName = main.params[ 'ENV' ][ 'cellName' ]
-
- gitPull = main.params[ 'GIT' ][ 'autoPull' ]
- checkoutBranch = main.params[ 'GIT' ][ 'checkout' ]
-
- ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
- ONOS2Ip = main.params[ 'CTRL' ][ 'ip2' ]
- ONOS3Ip = main.params[ 'CTRL' ][ 'ip3' ]
- ONOS4Ip = main.params[ 'CTRL' ][ 'ip4' ]
- ONOS5Ip = main.params[ 'CTRL' ][ 'ip5' ]
- ONOS6Ip = main.params[ 'CTRL' ][ 'ip6' ]
- ONOS7Ip = main.params[ 'CTRL' ][ 'ip7' ]
-
- MN1Ip = main.params[ 'MN' ][ 'ip1' ]
- BENCHIp = main.params[ 'BENCH' ][ 'ip' ]
-
- topoCfgFile = main.params[ 'TEST' ][ 'topoConfigFile' ]
- topoCfgName = main.params[ 'TEST' ][ 'topoConfigName' ]
-
- portEventResultPath = main.params[ 'DB' ][ 'portEventResultPath' ]
- switchEventResultPath = main.params[ 'DB' ][ 'switchEventResultPath' ]
-
- mvnCleanInstall = main.params[ 'TEST' ][ 'mci' ]
-
- main.case( "Setting up test environment" )
- main.log.info( "Copying topology event accumulator config" +
- " to ONOS /package/etc" )
- main.ONOSbench.handle.sendline( "cp ~/" +
- topoCfgFile +
- " ~/ONOS/tools/package/etc/" +
- topoCfgName )
- main.ONOSbench.handle.expect( "\$" )
-
- main.log.report( "Setting up test environment" )
-
- main.step( "Starting mininet topology " )
- main.Mininet1.startNet()
-
- main.step( "Cleaning previously installed ONOS if any" )
- main.ONOSbench.onosUninstall( nodeIp=ONOS2Ip )
- main.ONOSbench.onosUninstall( nodeIp=ONOS3Ip )
- main.ONOSbench.onosUninstall( nodeIp=ONOS4Ip )
- main.ONOSbench.onosUninstall( nodeIp=ONOS5Ip )
- main.ONOSbench.onosUninstall( nodeIp=ONOS6Ip )
- main.ONOSbench.onosUninstall( nodeIp=ONOS7Ip )
-
- main.step( "Clearing previous DB log files" )
- fPortLog = open(portEventResultPath, 'w')
- fPortLog.write('')
- fPortLog.close()
- fSwitchLog = open(switchEventResultPath, 'w')
- fSwitchLog.write('')
- fSwitchLog.close()
-
- main.step( "Creating cell file" )
- cellFileResult = main.ONOSbench.createCellFile(
- BENCHIp, cellName, MN1Ip,
- ("onos-core,onos-api,webconsole,onos-app-metrics,onos-app-gui,"
- "onos-cli,onos-openflow"),
- ONOS1Ip )
-
- main.step( "Applying cell file to environment" )
- cellApplyResult = main.ONOSbench.setCell( cellName )
- verifyCellResult = main.ONOSbench.verifyCell()
-
- # NOTE: This step may be removed after proper
- # copy cat log functionality
- main.step( "Removing raft/copy-cat logs from ONOS nodes" )
- main.ONOSbench.onosRemoveRaftLogs()
- time.sleep( 30 )
-
- main.step( "Git checkout and pull " + checkoutBranch )
- if gitPull == 'on':
- # checkoutResult = \
- # main.ONOSbench.gitCheckout( checkoutBranch )
- checkoutResult = main.TRUE
- pullResult = main.ONOSbench.gitPull()
- else:
- checkoutResult = main.TRUE
- pullResult = main.TRUE
- main.log.info( "Skipped git checkout and pull" )
-
- main.log.report( "Commit information - " )
- main.ONOSbench.getVersion( report=True )
-
- main.step( "Using mvn clean & install" )
- if mvnCleanInstall == 'on':
- mvnResult = main.ONOSbench.cleanInstall()
- elif mvnCleanInstall == 'off':
- main.log.info("mci turned off by settings")
- mvnResult = main.TRUE
-
- main.step( "Set cell for ONOS cli env" )
- main.ONOS1cli.setCell( cellName )
-
- main.step( "Creating ONOS package" )
- packageResult = main.ONOSbench.onosPackage()
-
- main.step( "Installing ONOS package" )
- install1Result = main.ONOSbench.onosInstall( node=ONOS1Ip )
-
- time.sleep( 10 )
-
- main.step( "Start onos cli" )
- cli1 = main.ONOS1cli.startOnosCli( ONOS1Ip )
-
- utilities.assert_equals( expect=main.TRUE,
- actual=cellFileResult and cellApplyResult and
- verifyCellResult and checkoutResult and
- pullResult and mvnResult and
- install1Result, # and install2Result and
- # install3Result,
- onpass="Test Environment setup successful",
- onfail="Failed to setup test environment" )
-
- def CASE2( self, main ):
- """
- Assign s1 to ONOS1 and measure latency
-
- There are 4 levels of latency measurements to this test:
- 1 ) End-to-end measurement: Complete end-to-end measurement
- from TCP ( SYN/ACK ) handshake to Graph change
- 2 ) OFP-to-graph measurement: 'ONOS processing' snippet of
- measurement from OFP Vendor message to Graph change
- 3 ) OFP-to-device measurement: 'ONOS processing without
- graph change' snippet of measurement from OFP vendor
- message to Device change timestamp
- 4 ) T0-to-device measurement: Measurement that includes
- the switch handshake to devices timestamp without
- the graph view change. ( TCP handshake -> Device
- change )
- """
- import time
- import subprocess
- import json
- import requests
- import os
- import numpy
- global clusterCount
- global timeToPost
-
- ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
- ONOS2Ip = main.params[ 'CTRL' ][ 'ip2' ]
- ONOS3Ip = main.params[ 'CTRL' ][ 'ip3' ]
- ONOS4Ip = main.params[ 'CTRL' ][ 'ip4' ]
- ONOS5Ip = main.params[ 'CTRL' ][ 'ip5' ]
- ONOS6Ip = main.params[ 'CTRL' ][ 'ip6' ]
- ONOS7Ip = main.params[ 'CTRL' ][ 'ip7' ]
-
- ONOSUser = main.params[ 'CTRL' ][ 'user' ]
-
- defaultSwPort = main.params[ 'CTRL' ][ 'port1' ]
-
- # Number of iterations of case
- numIter = main.params[ 'TEST' ][ 'numIter' ]
- # Number of first 'x' iterations to ignore:
- iterIgnore = int( main.params[ 'TEST' ][ 'iterIgnore' ] )
-
- # Timestamp 'keys' for json metrics output.
- # These are subject to change, hence moved into params
- deviceTimestamp = main.params[ 'JSON' ][ 'deviceTimestamp' ]
- graphTimestamp = main.params[ 'JSON' ][ 'graphTimestamp' ]
-
- debugMode = main.params[ 'TEST' ][ 'debugMode' ]
- onosLog = main.params[ 'TEST' ][ 'onosLogFile' ]
- resultPath = main.params[ 'DB' ][ 'switchEventResultPath' ]
-
- # Threshold for the test
- thresholdStr = main.params[ 'TEST' ][ 'singleSwThreshold' ]
- thresholdObj = thresholdStr.split( "," )
- thresholdMin = int( thresholdObj[ 0 ] )
- thresholdMax = int( thresholdObj[ 1 ] )
-
- # List of switch add latency collected from
- # all iterations
- latencyEndToEndList = []
- latencyOfpToGraphList = []
- latencyOfpToDeviceList = []
- latencyT0ToDeviceList = []
- latencyTcpToOfpList = []
-
- # Initialize 2d array for [node][iteration] storage
- endToEndLatNodeIter = numpy.zeros(( clusterCount, int(numIter) ))
- ofpToGraphLatNodeIter = numpy.zeros(( clusterCount, int(numIter) ))
- # tcp-to-ofp measurements are same throughout each iteration
- tcpToOfpLatIter = []
-
- # Directory/file to store tshark results
- tsharkOfOutput = "/tmp/tshark_of_topo.txt"
- tsharkTcpOutput = "/tmp/tshark_tcp_topo.txt"
-
- # String to grep in tshark output
- tsharkTcpString = "TCP 74 " + defaultSwPort
- tsharkOfString = "OFP 86 Vendor"
-
- # Initialize assertion to TRUE
- assertion = main.TRUE
-
- localTime = time.strftime( '%x %X' )
- localTime = localTime.replace( "/", "" )
- localTime = localTime.replace( " ", "_" )
- localTime = localTime.replace( ":", "" )
- if debugMode == 'on':
- main.ONOS1.tsharkPcap( "eth0",
- "/tmp/single_sw_lat_pcap_" + localTime )
-
- main.log.info( "Debug mode is on" )
-
- main.log.report( "Latency of adding one switch to controller" )
- main.log.report( "First " + str( iterIgnore ) + " iterations ignored" +
- " for jvm warmup time" )
- main.log.report( "Total iterations of test: " + str( numIter ) )
-
- for i in range( 0, int( numIter ) ):
- main.log.info( "Starting tshark capture" )
-
- #* TCP [ ACK, SYN ] is used as t0A, the
- # very first "exchange" between ONOS and
- # the switch for end-to-end measurement
- #* OFP [ Stats Reply ] is used for t0B
- # the very last OFP message between ONOS
- # and the switch for ONOS measurement
- main.ONOS1.tsharkGrep( tsharkTcpString,
- tsharkTcpOutput )
- main.ONOS1.tsharkGrep( tsharkOfString,
- tsharkOfOutput )
-
- # Wait and ensure tshark is started and
- # capturing
- time.sleep( 10 )
-
- main.log.info( "Assigning s1 to controller" )
-
- main.Mininet1.assignSwController(
- sw="1",
- ip1=ONOS1Ip,
- port1=defaultSwPort )
-
- # Wait and ensure switch is assigned
- # before stopping tshark
- time.sleep( 30 )
-
- main.log.info( "Stopping all Tshark processes" )
- main.ONOS1.stopTshark()
-
- # tshark output is saved in ONOS. Use subprocess
- # to copy over files to TestON for parsing
- main.log.info( "Copying over tshark files" )
-
- # TCP CAPTURE ****
- # Copy the tshark output from ONOS machine to
- # TestON machine in tsharkTcpOutput directory>file
- os.system( "scp " + ONOSUser + "@" + ONOS1Ip + ":" +
- tsharkTcpOutput + " /tmp/" )
- tcpFile = open( tsharkTcpOutput, 'r' )
- tempText = tcpFile.readline()
- tempText = tempText.split( " " )
-
- main.log.info( "Object read in from TCP capture: " +
- str( tempText ) )
- if len( tempText ) > 1:
- t0Tcp = float( tempText[ 1 ] ) * 1000.0
- else:
- main.log.error( "Tshark output file for TCP" +
- " returned unexpected results" )
- t0Tcp = 0
- assertion = main.FALSE
-
- tcpFile.close()
- #****************
-
- # OF CAPTURE ****
- os.system( "scp " + ONOSUser + "@" + ONOS1Ip + ":" +
- tsharkOfOutput + " /tmp/" )
- ofFile = open( tsharkOfOutput, 'r' )
-
- lineOfp = ""
- # Read until last line of file
- while True:
- tempText = ofFile.readline()
- if tempText != '':
- lineOfp = tempText
- else:
- break
- obj = lineOfp.split( " " )
-
- main.log.info( "Object read in from OFP capture: " +
- str( lineOfp ) )
-
- if len( obj ) > 1:
- t0Ofp = float( obj[ 1 ] ) * 1000.0
- else:
- main.log.error( "Tshark output file for OFP" +
- " returned unexpected results" )
- t0Ofp = 0
- assertion = main.FALSE
-
- ofFile.close()
- #****************
-
- jsonStr1 = main.ONOS1cli.topologyEventsMetrics()
- # Initialize scale-out variables
- jsonStr2 = ""
- jsonStr3 = ""
- jsonStr4 = ""
- jsonStr5 = ""
- jsonStr6 = ""
- jsonStr7 = ""
-
- jsonObj1 = json.loads( jsonStr1 )
- # Initialize scale-out variables
- jsonObj2 = ""
- jsonObj3 = ""
- jsonObj4 = ""
- jsonObj5 = ""
- jsonObj6 = ""
- jsonObj7 = ""
-
- # Obtain graph timestamp. This timestsamp captures
- # the epoch time at which the topology graph was updated.
- graphTimestamp1 = \
- jsonObj1[ graphTimestamp ][ 'value' ]
- # Obtain device timestamp. This timestamp captures
- # the epoch time at which the device event happened
- deviceTimestamp1 = \
- jsonObj1[ deviceTimestamp ][ 'value' ]
-
- # t0 to device processing latency
- deltaDevice1 = int( deviceTimestamp1 ) - int( t0Tcp )
- # t0 to graph processing latency ( end-to-end )
- deltaGraph1 = int( graphTimestamp1 ) - int( t0Tcp )
- # ofp to graph processing latency ( ONOS processing )
- deltaOfpGraph1 = int( graphTimestamp1 ) - int( t0Ofp )
- # ofp to device processing latency ( ONOS processing )
- deltaOfpDevice1 = float( deviceTimestamp1 ) - float( t0Ofp )
- # tcp to ofp processing latency ( switch connection )
- deltaTcpOfp1 = int(t0Ofp) - int(t0Tcp)
-
- if deltaTcpOfp1 > thresholdMin and deltaTcpOfp1 < thresholdMax\
- and i >= iterIgnore:
- tcpToOfpLatIter.append(deltaTcpOfp1)
- main.log.info("iter"+str(i)+" tcp-to-ofp: "+
- str(deltaTcpOfp1)+" ms")
- else:
- tcpToOfpLatIter.append(0)
- main.log.info("iter"+str(i)+" tcp-to-ofp: "+
- str(deltaTcpOfp1)+" ms - ignored this iteration")
-
- # Store initial measurements in data array
- #This measurement is for node 1
-
- if deltaGraph1 > thresholdMin and deltaGraph1 < thresholdMax\
- and i >= iterIgnore:
- endToEndLatNodeIter[0][i] = deltaGraph1
- main.log.info("ONOS1 iter"+str(i)+" end-to-end: "+
- str(deltaGraph1)+" ms")
- else:
- main.log.info("ONOS1 iter"+str(i)+" end-to-end: "+
- str(deltaGraph1)+" ms - ignored this iteration")
-
-
- if deltaOfpGraph1 > thresholdMin and deltaOfpGraph1 < thresholdMax\
- and i >= iterIgnore:
- ofpToGraphLatNodeIter[0][i] = deltaOfpGraph1
-
- main.log.info("ONOS1 iter"+str(i)+" ofp-to-graph: "+
- str(deltaOfpGraph1)+" ms")
-
- # TODO: Create even cluster number events
-
- # Include scale-out measurements when applicable
- if clusterCount >= 3:
- jsonStr2 = main.ONOS2cli.topologyEventsMetrics()
- jsonStr3 = main.ONOS3cli.topologyEventsMetrics()
- jsonObj2 = json.loads( jsonStr2 )
- jsonObj3 = json.loads( jsonStr3 )
- graphTimestamp2 = \
- jsonObj2[ graphTimestamp ][ 'value' ]
- graphTimestamp3 = \
- jsonObj3[ graphTimestamp ][ 'value' ]
- deviceTimestamp2 = \
- jsonObj2[ deviceTimestamp ][ 'value' ]
- deviceTimestamp3 = \
- jsonObj3[ deviceTimestamp ][ 'value' ]
- deltaDevice2 = int( deviceTimestamp2 ) - int( t0Tcp )
- deltaDevice3 = int( deviceTimestamp3 ) - int( t0Tcp )
- deltaGraph2 = int( graphTimestamp2 ) - int( t0Tcp )
- deltaGraph3 = int( graphTimestamp3 ) - int( t0Tcp )
- deltaOfpGraph2 = int( graphTimestamp2 ) - int( t0Ofp )
- deltaOfpGraph3 = int( graphTimestamp3 ) - int( t0Ofp )
- deltaOfpDevice2 = float( deviceTimestamp2 ) -\
- float( t0Ofp )
- deltaOfpDevice3 = float( deviceTimestamp3 ) -\
- float( t0Ofp )
-
- if deltaGraph2 > thresholdMin and\
- deltaGraph2 < thresholdMax and i >= iterIgnore:
- endToEndLatNodeIter[1][i] = deltaGraph2
- main.log.info("ONOS2 iter"+str(i)+" end-to-end: "+
- str(deltaGraph2)+" ms")
-
- if deltaOfpGraph2 > thresholdMin and\
- deltaOfpGraph2 < thresholdMax and i >= iterIgnore:
- ofpToGraphLatNodeIter[1][i] = deltaOfpGraph2
- main.log.info("ONOS2 iter"+str(i)+" ofp-to-graph: "+
- str(deltaOfpGraph2)+" ms")
-
- if deltaGraph3 > thresholdMin and\
- deltaGraph3 < thresholdMax and i >= iterIgnore:
- endToEndLatNodeIter[2][i] = deltaGraph3
- main.log.info("ONOS3 iter"+str(i)+" end-to-end: "+
- str(deltaGraph3)+" ms")
-
- if deltaOfpGraph3 > thresholdMin and\
- deltaOfpGraph3 < thresholdMax and i >= iterIgnore:
- ofpToGraphLatNodeIter[2][i] = deltaOfpGraph3
- main.log.info("ONOS3 iter"+str(i)+" ofp-to-graph: "+
- str(deltaOfpGraph3)+" ms")
-
- if clusterCount >= 5:
- jsonStr4 = main.ONOS4cli.topologyEventsMetrics()
- jsonStr5 = main.ONOS5cli.topologyEventsMetrics()
- jsonObj4 = json.loads( jsonStr4 )
- jsonObj5 = json.loads( jsonStr5 )
- graphTimestamp4 = \
- jsonObj4[ graphTimestamp ][ 'value' ]
- graphTimestamp5 = \
- jsonObj5[ graphTimestamp ][ 'value' ]
- deviceTimestamp4 = \
- jsonObj4[ deviceTimestamp ][ 'value' ]
- deviceTimestamp5 = \
- jsonObj5[ deviceTimestamp ][ 'value' ]
- deltaDevice4 = int( deviceTimestamp4 ) - int( t0Tcp )
- deltaDevice5 = int( deviceTimestamp5 ) - int( t0Tcp )
- deltaGraph4 = int( graphTimestamp4 ) - int( t0Tcp )
- deltaGraph5 = int( graphTimestamp5 ) - int( t0Tcp )
- deltaOfpGraph4 = int( graphTimestamp4 ) - int( t0Ofp )
- deltaOfpGraph5 = int( graphTimestamp5 ) - int( t0Ofp )
- deltaOfpDevice4 = float( deviceTimestamp4 ) -\
- float( t0Ofp )
- deltaOfpDevice5 = float( deviceTimestamp5 ) -\
- float( t0Ofp )
-
- if deltaGraph4 > thresholdMin and\
- deltaGraph4 < thresholdMax and i >= iterIgnore:
- endToEndLatNodeIter[3][i] = deltaGraph4
- main.log.info("ONOS4 iter"+str(i)+" end-to-end: "+
- str(deltaGraph4)+" ms")
-
- #TODO:
- if deltaOfpGraph4 > thresholdMin and\
- deltaOfpGraph4 < thresholdMax and i >= iterIgnore:
- ofpToGraphLatNodeIter[3][i] = deltaOfpGraph4
- main.log.info("ONOS4 iter"+str(i)+" ofp-to-graph: "+
- str(deltaOfpGraph4)+" ms")
-
- if deltaGraph5 > thresholdMin and\
- deltaGraph5 < thresholdMax and i >= iterIgnore:
- endToEndLatNodeIter[4][i] = deltaGraph5
- main.log.info("ONOS5 iter"+str(i)+" end-to-end: "+
- str(deltaGraph5)+" ms")
-
- if deltaOfpGraph5 > thresholdMin and\
- deltaOfpGraph5 < thresholdMax and i >= iterIgnore:
- ofpToGraphLatNodeIter[4][i] = deltaOfpGraph5
- main.log.info("ONOS5 iter"+str(i)+" ofp-to-graph: "+
- str(deltaOfpGraph5)+" ms")
-
- if clusterCount >= 7:
- jsonStr6 = main.ONOS6cli.topologyEventsMetrics()
- jsonStr7 = main.ONOS7cli.topologyEventsMetrics()
- jsonObj6 = json.loads( jsonStr6 )
- jsonObj7 = json.loads( jsonStr7 )
- graphTimestamp6 = \
- jsonObj6[ graphTimestamp ][ 'value' ]
- graphTimestamp7 = \
- jsonObj7[ graphTimestamp ][ 'value' ]
- deviceTimestamp6 = \
- jsonObj6[ deviceTimestamp ][ 'value' ]
- deviceTimestamp7 = \
- jsonObj7[ deviceTimestamp ][ 'value' ]
- deltaDevice6 = int( deviceTimestamp6 ) - int( t0Tcp )
- deltaDevice7 = int( deviceTimestamp7 ) - int( t0Tcp )
- deltaGraph6 = int( graphTimestamp6 ) - int( t0Tcp )
- deltaGraph7 = int( graphTimestamp7 ) - int( t0Tcp )
- deltaOfpGraph6 = int( graphTimestamp6 ) - int( t0Ofp )
- deltaOfpGraph7 = int( graphTimestamp7 ) - int( t0Ofp )
- deltaOfpDevice6 = float( deviceTimestamp6 ) -\
- float( t0Ofp )
- deltaOfpDevice7 = float( deviceTimestamp7 ) -\
- float( t0Ofp )
-
- if deltaGraph6 > thresholdMin and\
- deltaGraph6 < thresholdMax and i >= iterIgnore:
- endToEndLatNodeIter[5][i] = deltaGraph6
- main.log.info("ONOS6 iter"+str(i)+" end-to-end: "+
- str(deltaGraph6)+" ms")
-
- #TODO:
- if deltaOfpGraph6 > thresholdMin and\
- deltaOfpGraph6 < thresholdMax and i >= iterIgnore:
- ofpToGraphLatNodeIter[5][i] = deltaOfpGraph6
- main.log.info("ONOS6 iter"+str(i)+" ofp-to-graph: "+
- str(deltaOfpGraph6)+" ms")
-
- if deltaGraph7 > thresholdMin and\
- deltaGraph7 < thresholdMax and i >= iterIgnore:
- endToEndLatNodeIter[6][i] = deltaGraph7
- main.log.info("ONOS7 iter"+str(i)+" end-to-end: "+
- str(deltaGraph7)+" ms")
-
- if deltaOfpGraph7 > thresholdMin and\
- deltaOfpGraph7 < thresholdMax and i >= iterIgnore:
- ofpToGraphLatNodeIter[6][i] = deltaOfpGraph7
- main.log.info("ONOS7 iter"+str(i)+" ofp-to-graph: "+
- str(deltaOfpGraph7)+" ms")
-
- main.log.info("Switch up discovery latency")
-
- main.log.info("Starting tshark capture")
-
- main.step( "Remove switch from controller" )
- main.Mininet1.deleteSwController( "s1" )
-
- #TODO: del controller does not have an OFP message.
- # However, we can capture TCP Fin,Ack as T0
-
- time.sleep( 5 )
-
- # END of for loop iteration
-
- #str( round( numpy.std( latencyT0ToDeviceList ), 1 ) )
-
- endToEndAvg = 0
- ofpToGraphAvg = 0
- endToEndList = []
- ofpToGraphList = []
- dbCmdList = []
-
- for node in range( 0, clusterCount ):
- # The latency 2d array was initialized to 0.
- # If an iteration was ignored, then we have some 0's in
- # our calculation. To avoid having this interfere with our
- # results, we must delete any index where 0 is found...
- # WARN: Potentially, we could have latency that hovers at
- # 0 ms once we have optimized code. FIXME for when this is
- # the case. Being able to obtain sub-millisecond accuracy
- # can prevent this from happening
- for item in endToEndLatNodeIter[node]:
- if item > 0.0:
- endToEndList.append(item)
- for item in ofpToGraphLatNodeIter[node]:
- if item > 0.0:
- ofpToGraphList.append(item)
-
- endToEndAvg = round(numpy.mean(endToEndList), 2)
- ofpToGraphAvg = numpy.mean(ofpToGraphList)
- endToEndStd = round(numpy.std(endToEndList), 2)
-
- main.log.report( " - Node "+str(node+1)+" Summary - " )
- main.log.report( " End-to-end Avg: "+
- str(round(endToEndAvg,2))+" ms"+
- " End-to-end Std dev: "+
- str(round(endToEndStd,2))+" ms")
-
- dbCmdList.append(
- "INSERT INTO switch_latency_tests VALUES("
- "'"+timeToPost+"','switch_latency_results',"
- ""+runNum+","+str(clusterCount)+",'baremetal"+str(node+1)+"',"
- ""+str(endToEndAvg)+","+str(endToEndStd)+",0,0);"
- )
- #main.log.report( " Ofp-to-graph Avg: "+
- # str(round(ofpToGraphAvg,2))+" ms"+
- # " Ofp-to-graph Std dev: "+
- # str(round(numpy.std(ofpToGraphList),2))+
- # " ms")
-
- if debugMode == 'on':
- main.ONOS1.cpLogsToDir( "/opt/onos/log/karaf.log",
- "/tmp/", copyFileName="sw_lat_karaf" )
-
- #Write to file for posting to DB
- fResult = open(resultPath, 'a')
- for line in dbCmdList:
- if line:
- fResult.write(line+"\n")
- fResult.close()
-
- #TODO: correct assert
- assertion = main.TRUE
-
- utilities.assert_equals( expect=main.TRUE, actual=assertion,
- onpass="Switch latency test successful",
- onfail="Switch latency test failed" )
-
- def CASE3( self, main ):
- """
- Bring port up / down and measure latency.
- Port enable / disable is simulated by ifconfig up / down
-
- In ONOS-next, we must ensure that the port we are
- manipulating is connected to another switch with a valid
- connection. Otherwise, graph view will not be updated.
- """
- import time
- import subprocess
- import os
- import requests
- import json
- import numpy
- global clusterCount
- global runNum
- global timeToPost
-
- ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
- ONOS2Ip = main.params[ 'CTRL' ][ 'ip2' ]
- ONOS3Ip = main.params[ 'CTRL' ][ 'ip3' ]
- ONOSUser = main.params[ 'CTRL' ][ 'user' ]
-
- defaultSwPort = main.params[ 'CTRL' ][ 'port1' ]
-
- assertion = main.TRUE
- # Number of iterations of case
- numIter = main.params[ 'TEST' ][ 'numIter' ]
- iterIgnore = int( main.params[ 'TEST' ][ 'iterIgnore' ] )
-
- # Timestamp 'keys' for json metrics output.
- # These are subject to change, hence moved into params
- deviceTimestamp = main.params[ 'JSON' ][ 'deviceTimestamp' ]
- graphTimestamp = main.params[ 'JSON' ][ 'graphTimestamp' ]
-
- debugMode = main.params[ 'TEST' ][ 'debugMode' ]
- postToDB = main.params[ 'DB' ][ 'postToDB' ]
- resultPath = main.params[ 'DB' ][ 'portEventResultPath' ]
-
- localTime = time.strftime( '%x %X' )
- localTime = localTime.replace( "/", "" )
- localTime = localTime.replace( " ", "_" )
- localTime = localTime.replace( ":", "" )
- if debugMode == 'on':
- main.ONOS1.tsharkPcap( "eth0",
- "/tmp/port_lat_pcap_" + localTime )
-
- # Threshold for this test case
- upThresholdStr = main.params[ 'TEST' ][ 'portUpThreshold' ]
- downThresholdStr = main.params[ 'TEST' ][ 'portDownThreshold' ]
-
- upThresholdObj = upThresholdStr.split( "," )
- downThresholdObj = downThresholdStr.split( "," )
-
- upThresholdMin = int( upThresholdObj[ 0 ] )
- upThresholdMax = int( upThresholdObj[ 1 ] )
-
- downThresholdMin = int( downThresholdObj[ 0 ] )
- downThresholdMax = int( downThresholdObj[ 1 ] )
-
- # NOTE: Some hardcoded variables you may need to configure
- # besides the params
-
- tsharkPortStatus = "OFP 130 Port Status"
-
- tsharkPortUp = "/tmp/tshark_port_up.txt"
- tsharkPortDown = "/tmp/tshark_port_down.txt"
- interfaceConfig = "s1-eth1"
-
- main.log.report( "Port enable / disable latency" )
- main.log.report( "Simulated by ifconfig up / down" )
- main.log.report( "Total iterations of test: " + str( numIter ) )
-
- main.step( "Assign switches s1 and s2 to controller 1" )
- main.Mininet1.assignSwController( sw="1", ip1=ONOS1Ip,
- port1=defaultSwPort )
- main.Mininet1.assignSwController( sw="2", ip1=ONOS1Ip,
- port1=defaultSwPort )
-
- # Give enough time for metrics to propagate the
- # assign controller event. Otherwise, these events may
- # carry over to our measurements
- time.sleep( 15 )
-
- portUpDeviceToOfpList = []
- portUpGraphToOfpList = []
- portDownDeviceToOfpList = []
- portDownGraphToOfpList = []
-
- # Initialize 2d array filled with 0's
- # arraySizeFormat[clusterCount][numIter]
- portUpDevNodeIter = numpy.zeros(( clusterCount, int(numIter) ))
- portUpGraphNodeIter = numpy.zeros(( clusterCount, int(numIter) ))
- portDownDevNodeIter = numpy.zeros(( clusterCount, int(numIter) ))
- portDownGraphNodeIter = numpy.zeros(( clusterCount, int(numIter) ))
-
- for i in range( 0, int( numIter ) ):
- main.step( "Starting wireshark capture for port status down" )
- main.ONOS1.tsharkGrep( tsharkPortStatus,
- tsharkPortDown )
-
- time.sleep( 5 )
-
- # Disable interface that is connected to switch 2
- main.step( "Disable port: " + interfaceConfig )
- main.Mininet1.handle.sendline( "sh ifconfig " +
- interfaceConfig + " down" )
- main.Mininet1.handle.expect( "mininet>" )
-
- time.sleep( 3 )
- main.ONOS1.tsharkStop()
-
- # Copy tshark output file from ONOS to TestON instance
- #/tmp directory
- os.system( "scp " + ONOSUser + "@" + ONOS1Ip + ":" +
- tsharkPortDown + " /tmp/" )
-
- fPortDown = open( tsharkPortDown, 'r' )
- # Get first line of port down event from tshark
- fLine = fPortDown.readline()
- objDown = fLine.split( " " )
- if len( fLine ) > 0:
- # NOTE: objDown[ 1 ] is a very unreliable
- # way to determine the timestamp. If
- # results seem way off, check the object
- # itself by printing it out
- timestampBeginPtDown = int( float( objDown[ 1 ] ) * 1000 )
- # For some reason, wireshark decides to record the
- # timestamp at the 3rd object position instead of
- # 2nd at unpredictable times. This statement is
- # used to capture that odd behavior and use the
- # correct epoch time
- if timestampBeginPtDown < 1400000000000:
- timestampBeginPtDown = \
- int( float( objDown[ 2 ] ) * 1000 )
-
- main.log.info( "Port down begin timestamp: " +
- str( timestampBeginPtDown ) )
- else:
- main.log.info( "Tshark output file returned unexpected" +
- " results: " + str( objDown ) )
- timestampBeginPtDown = 0
- fPortDown.close()
-
- main.step( "Obtain t1 by metrics call" )
- jsonStrUp1 = main.ONOS1cli.topologyEventsMetrics()
- jsonObj1 = json.loads( jsonStrUp1 )
- # Obtain graph timestamp. This timestsamp captures
- # the epoch time at which the topology graph was updated.
- graphTimestamp1 = \
- jsonObj1[ graphTimestamp ][ 'value' ]
- # Obtain device timestamp. This timestamp captures
- # the epoch time at which the device event happened
- deviceTimestamp1 = \
- jsonObj1[ deviceTimestamp ][ 'value' ]
- # Get delta between graph event and OFP
- ptDownGraphToOfp1 = int( graphTimestamp1 ) -\
- int( timestampBeginPtDown )
- # Get delta between device event and OFP
- ptDownDeviceToOfp1 = int( deviceTimestamp1 ) -\
- int( timestampBeginPtDown )
-
- if ptDownGraphToOfp1 > downThresholdMin and\
- ptDownGraphToOfp1 < downThresholdMax and i > iterIgnore:
- portDownGraphNodeIter[0][i] = ptDownGraphToOfp1
- main.log.info("iter"+str(i)+" port down graph-to-ofp: "+
- str(ptDownGraphToOfp1)+" ms")
- else:
- main.log.info("iter"+str(i)+" skipped. Result: "+
- str(ptDownGraphToOfp1)+" ms")
- if ptDownDeviceToOfp1 > downThresholdMin and\
- ptDownDeviceToOfp1 < downThresholdMax and i > iterIgnore:
- portDownDevNodeIter[0][i] = ptDownDeviceToOfp1
- main.log.info("iter"+str(i)+" port down device-to-ofp: "+
- str(ptDownDeviceToOfp1)+" ms")
- else:
- main.log.info("iter"+str(i)+" skipped. Result: "+
- str(ptDownDeviceToOfp1)+" ms")
-
- if clusterCount >= 3:
- jsonStrUp2 = main.ONOS2cli.topologyEventsMetrics()
- jsonStrUp3 = main.ONOS3cli.topologyEventsMetrics()
- jsonObj2 = json.loads( jsonStrUp2 )
- jsonObj3 = json.loads( jsonStrUp3 )
- graphTimestamp2 = \
- jsonObj2[ graphTimestamp ][ 'value' ]
- graphTimestamp3 = \
- jsonObj3[ graphTimestamp ][ 'value' ]
- deviceTimestamp2 = \
- jsonObj2[ deviceTimestamp ][ 'value' ]
- deviceTimestamp3 = \
- jsonObj3[ deviceTimestamp ][ 'value' ]
- ptDownGraphToOfp2 = int( graphTimestamp2 ) -\
- int( timestampBeginPtDown )
- ptDownGraphToOfp3 = int( graphTimestamp3 ) -\
- int( timestampBeginPtDown )
- ptDownDeviceToOfp2 = int( deviceTimestamp2 ) -\
- int( timestampBeginPtDown )
- ptDownDeviceToOfp3 = int( deviceTimestamp3 ) -\
- int( timestampBeginPtDown )
-
- if ptDownGraphToOfp2 > downThresholdMin and\
- ptDownGraphToOfp2 < downThresholdMax and i > iterIgnore:
- portDownGraphNodeIter[1][i] = ptDownGraphToOfp2
- main.log.info("ONOS2 iter"+str(i)+" graph-to-ofp: "+
- str(ptDownGraphToOfp2)+" ms")
-
- if ptDownDeviceToOfp2 > downThresholdMin and\
- ptDownDeviceToOfp2 < downThresholdMax and i > iterIgnore:
- portDownDevNodeIter[1][i] = ptDownDeviceToOfp2
- main.log.info("ONOS2 iter"+str(i)+" device-to-ofp: "+
- str(ptDownDeviceToOfp2)+" ms")
-
- if ptDownGraphToOfp3 > downThresholdMin and\
- ptDownGraphToOfp3 < downThresholdMax and i > iterIgnore:
- portDownGraphNodeIter[2][i] = ptDownGraphToOfp3
- main.log.info("ONOS3 iter"+str(i)+" graph-to-ofp: "+
- str(ptDownGraphToOfp3)+" ms")
-
- if ptDownDeviceToOfp3 > downThresholdMin and\
- ptDownDeviceToOfp3 < downThresholdMax and i > iterIgnore:
- portDownDevNodeIter[2][i] = ptDownDeviceToOfp3
- main.log.info("ONOS3 iter"+str(i)+" device-to-ofp: "+
- str(ptDownDeviceToOfp3)+" ms")
-
- if clusterCount >= 5:
- jsonStrUp4 = main.ONOS4cli.topologyEventsMetrics()
- jsonStrUp5 = main.ONOS5cli.topologyEventsMetrics()
- jsonObj4 = json.loads( jsonStrUp4 )
- jsonObj5 = json.loads( jsonStrUp5 )
- graphTimestamp4 = \
- jsonObj4[ graphTimestamp ][ 'value' ]
- graphTimestamp5 = \
- jsonObj5[ graphTimestamp ][ 'value' ]
- deviceTimestamp4 = \
- jsonObj4[ deviceTimestamp ][ 'value' ]
- deviceTimestamp5 = \
- jsonObj5[ deviceTimestamp ][ 'value' ]
- ptDownGraphToOfp4 = int( graphTimestamp4 ) -\
- int( timestampBeginPtDown )
- ptDownGraphToOfp5 = int( graphTimestamp5 ) -\
- int( timestampBeginPtDown )
- ptDownDeviceToOfp4 = int( deviceTimestamp4 ) -\
- int( timestampBeginPtDown )
- ptDownDeviceToOfp5 = int( deviceTimestamp5 ) -\
- int( timestampBeginPtDown )
-
- if ptDownGraphToOfp4 > downThresholdMin and\
- ptDownGraphToOfp4 < downThresholdMax and i > iterIgnore:
- portDownGraphNodeIter[3][i] = ptDownGraphToOfp4
- main.log.info("ONOS4 iter"+str(i)+" graph-to-ofp: "+
- str(ptDownGraphToOfp4)+" ms")
-
- if ptDownDeviceToOfp4 > downThresholdMin and\
- ptDownDeviceToOfp4 < downThresholdMax and i > iterIgnore:
- portDownDevNodeIter[3][i] = ptDownDeviceToOfp4
- main.log.info("ONOS4 iter"+str(i)+" device-to-ofp: "+
- str(ptDownDeviceToOfp4)+" ms")
-
- if ptDownGraphToOfp5 > downThresholdMin and\
- ptDownGraphToOfp5 < downThresholdMax and i > iterIgnore:
- portDownGraphNodeIter[4][i] = ptDownGraphToOfp5
- main.log.info("ONOS5 iter"+str(i)+" graph-to-ofp: "+
- str(ptDownGraphToOfp5)+" ms")
-
- if ptDownDeviceToOfp5 > downThresholdMin and\
- ptDownDeviceToOfp5 < downThresholdMax and i > iterIgnore:
- portDownDevNodeIter[4][i] = ptDownDeviceToOfp5
- main.log.info("ONOS5 iter"+str(i)+" device-to-ofp: "+
- str(ptDownDeviceToOfp5)+" ms")
-
- if clusterCount >= 7:
- jsonStrUp6 = main.ONOS6cli.topologyEventsMetrics()
- jsonStrUp7 = main.ONOS7cli.topologyEventsMetrics()
- jsonObj6 = json.loads( jsonStrUp6 )
- jsonObj7 = json.loads( jsonStrUp7 )
- graphTimestamp6 = \
- jsonObj6[ graphTimestamp ][ 'value' ]
- graphTimestamp7 = \
- jsonObj7[ graphTimestamp ][ 'value' ]
- deviceTimestamp6 = \
- jsonObj6[ deviceTimestamp ][ 'value' ]
- deviceTimestamp7 = \
- jsonObj7[ deviceTimestamp ][ 'value' ]
- ptDownGraphToOfp6 = int( graphTimestamp6 ) -\
- int( timestampBeginPtDown )
- ptDownGraphToOfp7 = int( graphTimestamp7 ) -\
- int( timestampBeginPtDown )
- ptDownDeviceToOfp6 = int( deviceTimestamp6 ) -\
- int( timestampBeginPtDown )
- ptDownDeviceToOfp7 = int( deviceTimestamp7 ) -\
- int( timestampBeginPtDown )
-
- if ptDownGraphToOfp6 > downThresholdMin and\
- ptDownGraphToOfp6 < downThresholdMax and i > iterIgnore:
- portDownGraphNodeIter[5][i] = ptDownGraphToOfp6
- main.log.info("ONOS6 iter"+str(i)+" graph-to-ofp: "+
- str(ptDownGraphToOfp6)+" ms")
-
- if ptDownDeviceToOfp6 > downThresholdMin and\
- ptDownDeviceToOfp6 < downThresholdMax and i > iterIgnore:
- portDownDevNodeIter[5][i] = ptDownDeviceToOfp6
- main.log.info("ONOS6 iter"+str(i)+" device-to-ofp: "+
- str(ptDownDeviceToOfp6)+" ms")
-
- if ptDownGraphToOfp7 > downThresholdMin and\
- ptDownGraphToOfp7 < downThresholdMax and i > iterIgnore:
- portDownGraphNodeIter[6][i] = ptDownGraphToOfp7
- main.log.info("ONOS7 iter"+str(i)+" graph-to-ofp: "+
- str(ptDownGraphToOfp7)+" ms")
-
- if ptDownDeviceToOfp7 > downThresholdMin and\
- ptDownDeviceToOfp7 < downThresholdMax and i > iterIgnore:
- portDownDevNodeIter[6][i] = ptDownDeviceToOfp7
- main.log.info("ONOS7 iter"+str(i)+" device-to-ofp: "+
- str(ptDownDeviceToOfp7)+" ms")
-
- time.sleep( 3 )
-
- # Port up events
- main.step( "Enable port and obtain timestamp" )
- main.step( "Starting wireshark capture for port status up" )
- main.ONOS1.tsharkGrep( tsharkPortStatus, tsharkPortUp )
- time.sleep( 5 )
-
- main.Mininet1.handle.sendline( "sh ifconfig " +
- interfaceConfig + " up" )
- main.Mininet1.handle.expect( "mininet>" )
-
- # Allow time for tshark to capture event
- time.sleep( 5 )
- main.ONOS1.tsharkStop()
-
- time.sleep( 3 )
- os.system( "scp " + ONOSUser + "@" + ONOS1Ip + ":" +
- tsharkPortUp + " /tmp/" )
- fPortUp = open( tsharkPortUp, 'r' )
- fLine = fPortUp.readline()
- objUp = fLine.split( " " )
- if len( fLine ) > 0:
- timestampBeginPtUp = int( float( objUp[ 1 ] ) * 1000 )
- if timestampBeginPtUp < 1400000000000:
- timestampBeginPtUp = \
- int( float( objUp[ 2 ] ) * 1000 )
- main.log.info( "Port up begin timestamp: " +
- str( timestampBeginPtUp ) )
- else:
- main.log.info( "Tshark output file returned unexpected" +
- " results." )
- timestampBeginPtUp = 0
- fPortUp.close()
-
- # Obtain metrics shortly afterwards
- # This timestsamp captures
- # the epoch time at which the topology graph was updated.
- main.step( "Obtain t1 by REST call" )
- jsonStrUp1 = main.ONOS1cli.topologyEventsMetrics()
- jsonObj1 = json.loads( jsonStrUp1 )
- graphTimestamp1 = \
- jsonObj1[ graphTimestamp ][ 'value' ]
- # Obtain device timestamp. This timestamp captures
- # the epoch time at which the device event happened
- deviceTimestamp1 = \
- jsonObj1[ deviceTimestamp ][ 'value' ]
- # Get delta between graph event and OFP
- ptUpGraphToOfp1 = int( graphTimestamp1 ) -\
- int( timestampBeginPtUp )
- # Get delta between device event and OFP
- ptUpDeviceToOfp1 = int( deviceTimestamp1 ) -\
- int( timestampBeginPtUp )
-
- if ptUpGraphToOfp1 > upThresholdMin and\
- ptUpGraphToOfp1 < upThresholdMax and i > iterIgnore:
- portUpGraphNodeIter[0][i] = ptUpGraphToOfp1
- main.log.info("iter"+str(i)+" port up graph-to-ofp: "+
- str(ptUpGraphToOfp1)+" ms")
- else:
- main.log.info("iter"+str(i)+" skipped. Result: "+
- str(ptUpGraphToOfp1)+" ms")
-
- if ptUpDeviceToOfp1 > upThresholdMin and\
- ptUpDeviceToOfp1 < upThresholdMax and i > iterIgnore:
- portUpDevNodeIter[0][i] = ptUpDeviceToOfp1
- main.log.info("iter"+str(i)+" port up device-to-ofp: "+
- str(ptUpDeviceToOfp1)+" ms")
- else:
- main.log.info("iter"+str(i)+" skipped. Result: "+
- str(ptUpDeviceToOfp1)+" ms")
-
- if clusterCount >= 3:
- jsonStrUp2 = main.ONOS2cli.topologyEventsMetrics()
- jsonStrUp3 = main.ONOS3cli.topologyEventsMetrics()
- jsonObj2 = json.loads( jsonStrUp2 )
- jsonObj3 = json.loads( jsonStrUp3 )
- graphTimestamp2 = \
- jsonObj2[ graphTimestamp ][ 'value' ]
- graphTimestamp3 = \
- jsonObj3[ graphTimestamp ][ 'value' ]
- deviceTimestamp2 = \
- jsonObj2[ deviceTimestamp ][ 'value' ]
- deviceTimestamp3 = \
- jsonObj3[ deviceTimestamp ][ 'value' ]
- ptUpGraphToOfp2 = int( graphTimestamp2 ) -\
- int( timestampBeginPtUp )
- ptUpGraphToOfp3 = int( graphTimestamp3 ) -\
- int( timestampBeginPtUp )
- ptUpDeviceToOfp2 = int( deviceTimestamp2 ) -\
- int( timestampBeginPtUp )
- ptUpDeviceToOfp3 = int( deviceTimestamp3 ) -\
- int( timestampBeginPtUp )
-
- if ptUpGraphToOfp2 > upThresholdMin and\
- ptUpGraphToOfp2 < upThresholdMax and i > iterIgnore:
- portUpGraphNodeIter[1][i] = ptUpGraphToOfp2
- main.log.info("iter"+str(i)+" port up graph-to-ofp: "+
- str(ptUpGraphToOfp2)+" ms")
-
- if ptUpDeviceToOfp2 > upThresholdMin and\
- ptUpDeviceToOfp2 < upThresholdMax and i > iterIgnore:
- portUpDevNodeIter[1][i] = ptUpDeviceToOfp2
- main.log.info("iter"+str(i)+" port up device-to-ofp: "+
- str(ptUpDeviceToOfp2)+" ms")
-
- if ptUpGraphToOfp3 > upThresholdMin and\
- ptUpGraphToOfp3 < upThresholdMax and i > iterIgnore:
- portUpGraphNodeIter[2][i] = ptUpGraphToOfp3
- main.log.info("iter"+str(i)+" port up graph-to-ofp: "+
- str(ptUpGraphToOfp3)+" ms")
-
- if ptUpDeviceToOfp3 > upThresholdMin and\
- ptUpDeviceToOfp3 < upThresholdMax and i > iterIgnore:
- portUpDevNodeIter[2][i] = ptUpDeviceToOfp3
- main.log.info("iter"+str(i)+" port up device-to-ofp: "+
- str(ptUpDeviceToOfp3)+" ms")
-
- if clusterCount >= 5:
- jsonStrUp4 = main.ONOS4cli.topologyEventsMetrics()
- jsonStrUp5 = main.ONOS5cli.topologyEventsMetrics()
- jsonObj4 = json.loads( jsonStrUp4 )
- jsonObj5 = json.loads( jsonStrUp5 )
- graphTimestamp4 = \
- jsonObj4[ graphTimestamp ][ 'value' ]
- graphTimestamp5 = \
- jsonObj5[ graphTimestamp ][ 'value' ]
- deviceTimestamp4 = \
- jsonObj4[ deviceTimestamp ][ 'value' ]
- deviceTimestamp5 = \
- jsonObj5[ deviceTimestamp ][ 'value' ]
- ptUpGraphToOfp4 = int( graphTimestamp4 ) -\
- int( timestampBeginPtUp )
- ptUpGraphToOfp5 = int( graphTimestamp5 ) -\
- int( timestampBeginPtUp )
- ptUpDeviceToOfp4 = int( deviceTimestamp4 ) -\
- int( timestampBeginPtUp )
- ptUpDeviceToOfp5 = int( deviceTimestamp5 ) -\
- int( timestampBeginPtUp )
-
- if ptUpGraphToOfp4 > upThresholdMin and\
- ptUpGraphToOfp4 < upThresholdMax and i > iterIgnore:
- portUpGraphNodeIter[3][i] = ptUpGraphToOfp4
- main.log.info("iter"+str(i)+" port up graph-to-ofp: "+
- str(ptUpGraphToOfp4)+" ms")
-
- if ptUpDeviceToOfp4 > upThresholdMin and\
- ptUpDeviceToOfp4 < upThresholdMax and i > iterIgnore:
- portUpDevNodeIter[3][i] = ptUpDeviceToOfp4
- main.log.info("iter"+str(i)+" port up device-to-ofp: "+
- str(ptUpDeviceToOfp4)+" ms")
-
- if ptUpGraphToOfp5 > upThresholdMin and\
- ptUpGraphToOfp5 < upThresholdMax and i > iterIgnore:
- portUpGraphNodeIter[4][i] = ptUpGraphToOfp5
- main.log.info("iter"+str(i)+" port up graph-to-ofp: "+
- str(ptUpGraphToOfp5)+" ms")
-
- if ptUpDeviceToOfp5 > upThresholdMin and\
- ptUpDeviceToOfp5 < upThresholdMax and i > iterIgnore:
- portUpDevNodeIter[4][i] = ptUpDeviceToOfp5
- main.log.info("iter"+str(i)+" port up device-to-ofp: "+
- str(ptUpDeviceToOfp5)+" ms")
-
- if clusterCount >= 7:
- jsonStrUp6 = main.ONOS6cli.topologyEventsMetrics()
- jsonStrUp7 = main.ONOS7cli.topologyEventsMetrics()
- jsonObj6 = json.loads( jsonStrUp6 )
- jsonObj7 = json.loads( jsonStrUp7 )
- graphTimestamp6 = \
- jsonObj6[ graphTimestamp ][ 'value' ]
- graphTimestamp7 = \
- jsonObj7[ graphTimestamp ][ 'value' ]
- deviceTimestamp6 = \
- jsonObj6[ deviceTimestamp ][ 'value' ]
- deviceTimestamp7 = \
- jsonObj7[ deviceTimestamp ][ 'value' ]
- ptUpGraphToOfp6 = int( graphTimestamp6 ) -\
- int( timestampBeginPtUp )
- ptUpGraphToOfp7 = int( graphTimestamp7 ) -\
- int( timestampBeginPtUp )
- ptUpDeviceToOfp6 = int( deviceTimestamp6 ) -\
- int( timestampBeginPtUp )
- ptUpDeviceToOfp7 = int( deviceTimestamp7 ) -\
- int( timestampBeginPtUp )
-
- if ptUpGraphToOfp6 > upThresholdMin and\
- ptUpGraphToOfp6 < upThresholdMax and i > iterIgnore:
- portUpGraphNodeIter[5][i] = ptUpGraphToOfp6
- main.log.info("iter"+str(i)+" port up graph-to-ofp: "+
- str(ptUpGraphToOfp6)+" ms")
-
- if ptUpDeviceToOfp6 > upThresholdMin and\
- ptUpDeviceToOfp6 < upThresholdMax and i > iterIgnore:
- portUpDevNodeIter[5][i] = ptUpDeviceToOfp6
- main.log.info("iter"+str(i)+" port up device-to-ofp: "+
- str(ptUpDeviceToOfp6)+" ms")
-
- if ptUpGraphToOfp7 > upThresholdMin and\
- ptUpGraphToOfp7 < upThresholdMax and i > iterIgnore:
- portUpGraphNodeIter[6][i] = ptUpGraphToOfp7
- main.log.info("iter"+str(i)+" port up graph-to-ofp: "+
- str(ptUpGraphToOfp7)+" ms")
-
- if ptUpDeviceToOfp7 > upThresholdMin and\
- ptUpDeviceToOfp7 < upThresholdMax and i > iterIgnore:
- portUpDevNodeIter[6][i] = ptUpDeviceToOfp7
- main.log.info("iter"+str(i)+" port up device-to-ofp: "+
- str(ptUpDeviceToOfp7)+" ms")
-
- # END ITERATION FOR LOOP
-
- portUpDevList = []
- portUpGraphList = []
- portDownDevList = []
- portDownGraphList = []
-
- portUpDevAvg = 0
- portUpGraphAvg = 0
- portDownDevAvg = 0
- portDownGraphAvg = 0
-
- dbCmdList = []
-
- for node in range( 0, clusterCount ):
-
- # NOTE:
- # Currently the 2d array is initialized with 0's.
- # We want to avoid skewing our results if the array
- # was not modified with the correct latency.
- for item in portUpDevNodeIter[node]:
- if item > 0.0:
- portUpDevList.append(item)
- for item in portUpGraphNodeIter[node]:
- if item > 0.0:
- portUpGraphList.append(item)
- for item in portDownDevNodeIter[node]:
- if item > 0.0:
- portDownDevList.append(item)
- for item in portDownGraphNodeIter[node]:
- if item > 0.0:
- portDownGraphList.append(item)
-
- portUpDevAvg = round(numpy.mean(portUpDevList), 2)
- portUpGraphAvg = round(numpy.mean(portUpGraphList), 2)
- portDownDevAvg = round(numpy.mean(portDownDevList), 2)
- portDownGraphAvg = round(numpy.mean(portDownGraphList), 2)
-
- portUpStdDev = round(numpy.std(portUpGraphList),2)
- portDownStdDev = round(numpy.std(portDownGraphList),2)
-
- main.log.report( " - Node "+str(node+1)+" Summary - " )
- #main.log.report( " Port up ofp-to-device "+
- # str(round(portUpDevAvg, 2))+" ms")
- main.log.report( " Port up ofp-to-graph "+
- str(portUpGraphAvg)+" ms")
- #main.log.report( " Port down ofp-to-device "+
- # str(round(portDownDevAvg, 2))+" ms")
- main.log.report( " Port down ofp-to-graph "+
- str(portDownGraphAvg)+" ms")
-
- dbCmdList.append(
- "INSERT INTO port_latency_tests VALUES("
- "'"+timeToPost+"','port_latency_results',"
- ""+runNum+","+str(clusterCount)+",'baremetal"+str(node+1)+"',"
- ""+str(portUpGraphAvg)+","+str(portUpStdDev)+
- ","+str(portDownGraphAvg)+","+str(portDownStdDev)+");"
- )
-
- #Write to file for posting to DB
- fResult = open(resultPath, 'a')
- for line in dbCmdList:
- if line:
- fResult.write(line+"\n")
- fResult.close()
-
- print dbCmdList
-
- # Remove switches from controller for next test
- main.Mininet1.deleteSwController( "s1" )
- main.Mininet1.deleteSwController( "s2" )
-
- #TODO: correct assertion
-
- utilities.assert_equals(
- expect=main.TRUE,
- actual=assertion,
- onpass="Port discovery latency calculation successful",
- onfail="Port discovery test failed" )
-
- def CASE4( self, main ):
- """
- Link down event using loss rate 100%
-
- Important:
- Use a simple 2 switch topology with 1 link between
- the two switches. Ensure that mac addresses of the
- switches are 1 / 2 respectively
- """
- import time
- import subprocess
- import os
- import requests
- import json
- import numpy
-
- ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
- ONOS2Ip = main.params[ 'CTRL' ][ 'ip2' ]
- ONOS3Ip = main.params[ 'CTRL' ][ 'ip3' ]
- ONOSUser = main.params[ 'CTRL' ][ 'user' ]
-
- defaultSwPort = main.params[ 'CTRL' ][ 'port1' ]
-
- # Number of iterations of case
- numIter = main.params[ 'TEST' ][ 'numIter' ]
-
- # Timestamp 'keys' for json metrics output.
- # These are subject to change, hence moved into params
- deviceTimestamp = main.params[ 'JSON' ][ 'deviceTimestamp' ]
- linkTimestamp = main.params[ 'JSON' ][ 'linkTimestamp' ]
- graphTimestamp = main.params[ 'JSON' ][ 'graphTimestamp' ]
-
- debugMode = main.params[ 'TEST' ][ 'debugMode' ]
-
- localTime = time.strftime( '%x %X' )
- localTime = localTime.replace( "/", "" )
- localTime = localTime.replace( " ", "_" )
- localTime = localTime.replace( ":", "" )
- if debugMode == 'on':
- main.ONOS1.tsharkPcap( "eth0",
- "/tmp/link_lat_pcap_" + localTime )
-
- # Threshold for this test case
- upThresholdStr = main.params[ 'TEST' ][ 'linkUpThreshold' ]
- downThresholdStr = main.params[ 'TEST' ][ 'linkDownThreshold' ]
-
- upThresholdObj = upThresholdStr.split( "," )
- downThresholdObj = downThresholdStr.split( "," )
-
- upThresholdMin = int( upThresholdObj[ 0 ] )
- upThresholdMax = int( upThresholdObj[ 1 ] )
-
- downThresholdMin = int( downThresholdObj[ 0 ] )
- downThresholdMax = int( downThresholdObj[ 1 ] )
-
- assertion = main.TRUE
- # Link event timestamp to system time list
- linkDownLinkToSystemList = []
- linkUpLinkToSystemList = []
- # Graph event timestamp to system time list
- linkDownGraphToSystemList = []
- linkUpGraphToSystemList = []
-
- main.log.report( "Link up / down discovery latency between " +
- "two switches" )
- main.log.report( "Simulated by setting loss-rate 100%" )
- main.log.report( "'tc qdisc add dev <intfs> root netem loss 100%'" )
- main.log.report( "Total iterations of test: " + str( numIter ) )
-
- main.step( "Assign all switches" )
- main.Mininet1.assignSwController( sw="1",
- ip1=ONOS1Ip, port1=defaultSwPort )
- main.Mininet1.assignSwController( sw="2",
- ip1=ONOS1Ip, port1=defaultSwPort )
-
- main.step( "Verifying switch assignment" )
- resultS1 = main.Mininet1.getSwController( sw="s1" )
- resultS2 = main.Mininet1.getSwController( sw="s2" )
-
- # Allow time for events to finish before taking measurements
- time.sleep( 10 )
-
- linkDown1 = False
- linkDown2 = False
- linkDown3 = False
- # Start iteration of link event test
- for i in range( 0, int( numIter ) ):
- main.step( "Getting initial system time as t0" )
-
- # System time in epoch ms
- timestampLinkDownT0 = time.time() * 1000
- # Link down is simulated by 100% loss rate using traffic
- # control command
- main.Mininet1.handle.sendline(
- "sh tc qdisc add dev s1-eth1 root netem loss 100%" )
-
- # TODO: Iterate through 'links' command to verify that
- # link s1 -> s2 went down ( loop timeout 30 seconds )
- # on all 3 ONOS instances
- main.log.info( "Checking ONOS for link update" )
- loopCount = 0
- while( not ( linkDown1 and linkDown2 and linkDown3 )
- and loopCount < 30 ):
- jsonStr1 = main.ONOS1cli.links()
- jsonStr2 = main.ONOS2cli.links()
- jsonStr3 = main.ONOS3cli.links()
-
- if not ( jsonStr1 and jsonStr2 and jsonStr3 ):
- main.log.error( "CLI command returned error " )
- break
- else:
- jsonObj1 = json.loads( jsonStr1 )
- jsonObj2 = json.loads( jsonStr2 )
- jsonObj3 = json.loads( jsonStr3 )
- for obj1 in jsonObj1:
- if '01' not in obj1[ 'src' ][ 'device' ]:
- linkDown1 = True
- main.log.info( "Link down from " +
- "s1 -> s2 on ONOS1 detected" )
- for obj2 in jsonObj2:
- if '01' not in obj2[ 'src' ][ 'device' ]:
- linkDown2 = True
- main.log.info( "Link down from " +
- "s1 -> s2 on ONOS2 detected" )
- for obj3 in jsonObj3:
- if '01' not in obj3[ 'src' ][ 'device' ]:
- linkDown3 = True
- main.log.info( "Link down from " +
- "s1 -> s2 on ONOS3 detected" )
-
- loopCount += 1
- # If CLI doesn't like the continuous requests
- # and exits in this loop, increase the sleep here.
- # Consequently, while loop timeout will increase
- time.sleep( 1 )
-
- # Give time for metrics measurement to catch up
- # NOTE: May need to be configured more accurately
- time.sleep( 10 )
- # If we exited the while loop and link down 1,2,3 are still
- # false, then ONOS has failed to discover link down event
- if not ( linkDown1 and linkDown2 and linkDown3 ):
- main.log.info( "Link down discovery failed" )
-
- linkDownLatGraph1 = 0
- linkDownLatGraph2 = 0
- linkDownLatGraph3 = 0
- linkDownLatDevice1 = 0
- linkDownLatDevice2 = 0
- linkDownLatDevice3 = 0
-
- assertion = main.FALSE
- else:
- jsonTopoMetrics1 =\
- main.ONOS1cli.topologyEventsMetrics()
- jsonTopoMetrics2 =\
- main.ONOS2cli.topologyEventsMetrics()
- jsonTopoMetrics3 =\
- main.ONOS3cli.topologyEventsMetrics()
- jsonTopoMetrics1 = json.loads( jsonTopoMetrics1 )
- jsonTopoMetrics2 = json.loads( jsonTopoMetrics2 )
- jsonTopoMetrics3 = json.loads( jsonTopoMetrics3 )
-
- main.log.info( "Obtaining graph and device timestamp" )
- graphTimestamp1 = \
- jsonTopoMetrics1[ graphTimestamp ][ 'value' ]
- graphTimestamp2 = \
- jsonTopoMetrics2[ graphTimestamp ][ 'value' ]
- graphTimestamp3 = \
- jsonTopoMetrics3[ graphTimestamp ][ 'value' ]
-
- linkTimestamp1 = \
- jsonTopoMetrics1[ linkTimestamp ][ 'value' ]
- linkTimestamp2 = \
- jsonTopoMetrics2[ linkTimestamp ][ 'value' ]
- linkTimestamp3 = \
- jsonTopoMetrics3[ linkTimestamp ][ 'value' ]
-
- if graphTimestamp1 and graphTimestamp2 and\
- graphTimestamp3 and linkTimestamp1 and\
- linkTimestamp2 and linkTimestamp3:
- linkDownLatGraph1 = int( graphTimestamp1 ) -\
- int( timestampLinkDownT0 )
- linkDownLatGraph2 = int( graphTimestamp2 ) -\
- int( timestampLinkDownT0 )
- linkDownLatGraph3 = int( graphTimestamp3 ) -\
- int( timestampLinkDownT0 )
-
- linkDownLatLink1 = int( linkTimestamp1 ) -\
- int( timestampLinkDownT0 )
- linkDownLatLink2 = int( linkTimestamp2 ) -\
- int( timestampLinkDownT0 )
- linkDownLatLink3 = int( linkTimestamp3 ) -\
- int( timestampLinkDownT0 )
- else:
- main.log.error( "There was an error calculating" +
- " the delta for link down event" )
- linkDownLatGraph1 = 0
- linkDownLatGraph2 = 0
- linkDownLatGraph3 = 0
-
- linkDownLatDevice1 = 0
- linkDownLatDevice2 = 0
- linkDownLatDevice3 = 0
-
- main.log.info( "Link down latency ONOS1 iteration " +
- str( i ) + " (end-to-end): " +
- str( linkDownLatGraph1 ) + " ms" )
- main.log.info( "Link down latency ONOS2 iteration " +
- str( i ) + " (end-to-end): " +
- str( linkDownLatGraph2 ) + " ms" )
- main.log.info( "Link down latency ONOS3 iteration " +
- str( i ) + " (end-to-end): " +
- str( linkDownLatGraph3 ) + " ms" )
-
- main.log.info( "Link down latency ONOS1 iteration " +
- str( i ) + " (link-event-to-system-timestamp): " +
- str( linkDownLatLink1 ) + " ms" )
- main.log.info( "Link down latency ONOS2 iteration " +
- str( i ) + " (link-event-to-system-timestamp): " +
- str( linkDownLatLink2 ) + " ms" )
- main.log.info( "Link down latency ONOS3 iteration " +
- str( i ) + " (link-event-to-system-timestamp): " +
- str( linkDownLatLink3 ) )
-
- # Calculate avg of node calculations
- linkDownLatGraphAvg =\
- ( linkDownLatGraph1 +
- linkDownLatGraph2 +
- linkDownLatGraph3 ) / 3
- linkDownLatLinkAvg =\
- ( linkDownLatLink1 +
- linkDownLatLink2 +
- linkDownLatLink3 ) / 3
-
- # Set threshold and append latency to list
- if linkDownLatGraphAvg > downThresholdMin and\
- linkDownLatGraphAvg < downThresholdMax:
- linkDownGraphToSystemList.append(
- linkDownLatGraphAvg )
- else:
- main.log.info( "Link down latency exceeded threshold" )
- main.log.info( "Results for iteration " + str( i ) +
- "have been omitted" )
- if linkDownLatLinkAvg > downThresholdMin and\
- linkDownLatLinkAvg < downThresholdMax:
- linkDownLinkToSystemList.append(
- linkDownLatLinkAvg )
- else:
- main.log.info( "Link down latency exceeded threshold" )
- main.log.info( "Results for iteration " + str( i ) +
- "have been omitted" )
-
- # NOTE: To remove loss rate and measure latency:
- # 'sh tc qdisc del dev s1-eth1 root'
- timestampLinkUpT0 = time.time() * 1000
- main.Mininet1.handle.sendline( "sh tc qdisc del dev " +
- "s1-eth1 root" )
- main.Mininet1.handle.expect( "mininet>" )
-
- main.log.info( "Checking ONOS for link update" )
-
- linkDown1 = True
- linkDown2 = True
- linkDown3 = True
- loopCount = 0
- while( ( linkDown1 and linkDown2 and linkDown3 )
- and loopCount < 30 ):
- jsonStr1 = main.ONOS1cli.links()
- jsonStr2 = main.ONOS2cli.links()
- jsonStr3 = main.ONOS3cli.links()
- if not ( jsonStr1 and jsonStr2 and jsonStr3 ):
- main.log.error( "CLI command returned error " )
- break
- else:
- jsonObj1 = json.loads( jsonStr1 )
- jsonObj2 = json.loads( jsonStr2 )
- jsonObj3 = json.loads( jsonStr3 )
-
- for obj1 in jsonObj1:
- if '01' in obj1[ 'src' ][ 'device' ]:
- linkDown1 = False
- main.log.info( "Link up from " +
- "s1 -> s2 on ONOS1 detected" )
- for obj2 in jsonObj2:
- if '01' in obj2[ 'src' ][ 'device' ]:
- linkDown2 = False
- main.log.info( "Link up from " +
- "s1 -> s2 on ONOS2 detected" )
- for obj3 in jsonObj3:
- if '01' in obj3[ 'src' ][ 'device' ]:
- linkDown3 = False
- main.log.info( "Link up from " +
- "s1 -> s2 on ONOS3 detected" )
-
- loopCount += 1
- time.sleep( 1 )
-
- if ( linkDown1 and linkDown2 and linkDown3 ):
- main.log.info( "Link up discovery failed" )
-
- linkUpLatGraph1 = 0
- linkUpLatGraph2 = 0
- linkUpLatGraph3 = 0
- linkUpLatDevice1 = 0
- linkUpLatDevice2 = 0
- linkUpLatDevice3 = 0
-
- assertion = main.FALSE
- else:
- jsonTopoMetrics1 =\
- main.ONOS1cli.topologyEventsMetrics()
- jsonTopoMetrics2 =\
- main.ONOS2cli.topologyEventsMetrics()
- jsonTopoMetrics3 =\
- main.ONOS3cli.topologyEventsMetrics()
- jsonTopoMetrics1 = json.loads( jsonTopoMetrics1 )
- jsonTopoMetrics2 = json.loads( jsonTopoMetrics2 )
- jsonTopoMetrics3 = json.loads( jsonTopoMetrics3 )
-
- main.log.info( "Obtaining graph and device timestamp" )
- graphTimestamp1 = \
- jsonTopoMetrics1[ graphTimestamp ][ 'value' ]
- graphTimestamp2 = \
- jsonTopoMetrics2[ graphTimestamp ][ 'value' ]
- graphTimestamp3 = \
- jsonTopoMetrics3[ graphTimestamp ][ 'value' ]
-
- linkTimestamp1 = \
- jsonTopoMetrics1[ linkTimestamp ][ 'value' ]
- linkTimestamp2 = \
- jsonTopoMetrics2[ linkTimestamp ][ 'value' ]
- linkTimestamp3 = \
- jsonTopoMetrics3[ linkTimestamp ][ 'value' ]
-
- if graphTimestamp1 and graphTimestamp2 and\
- graphTimestamp3 and linkTimestamp1 and\
- linkTimestamp2 and linkTimestamp3:
- linkUpLatGraph1 = int( graphTimestamp1 ) -\
- int( timestampLinkUpT0 )
- linkUpLatGraph2 = int( graphTimestamp2 ) -\
- int( timestampLinkUpT0 )
- linkUpLatGraph3 = int( graphTimestamp3 ) -\
- int( timestampLinkUpT0 )
-
- linkUpLatLink1 = int( linkTimestamp1 ) -\
- int( timestampLinkUpT0 )
- linkUpLatLink2 = int( linkTimestamp2 ) -\
- int( timestampLinkUpT0 )
- linkUpLatLink3 = int( linkTimestamp3 ) -\
- int( timestampLinkUpT0 )
- else:
- main.log.error( "There was an error calculating" +
- " the delta for link down event" )
- linkUpLatGraph1 = 0
- linkUpLatGraph2 = 0
- linkUpLatGraph3 = 0
-
- linkUpLatDevice1 = 0
- linkUpLatDevice2 = 0
- linkUpLatDevice3 = 0
-
- if debugMode == 'on':
- main.log.info( "Link up latency ONOS1 iteration " +
- str( i ) + " (end-to-end): " +
- str( linkUpLatGraph1 ) + " ms" )
- main.log.info( "Link up latency ONOS2 iteration " +
- str( i ) + " (end-to-end): " +
- str( linkUpLatGraph2 ) + " ms" )
- main.log.info( "Link up latency ONOS3 iteration " +
- str( i ) + " (end-to-end): " +
- str( linkUpLatGraph3 ) + " ms" )
-
- main.log.info(
- "Link up latency ONOS1 iteration " +
- str( i ) +
- " (link-event-to-system-timestamp): " +
- str( linkUpLatLink1 ) +
- " ms" )
- main.log.info(
- "Link up latency ONOS2 iteration " +
- str( i ) +
- " (link-event-to-system-timestamp): " +
- str( linkUpLatLink2 ) +
- " ms" )
- main.log.info(
- "Link up latency ONOS3 iteration " +
- str( i ) +
- " (link-event-to-system-timestamp): " +
- str( linkUpLatLink3 ) )
-
- # Calculate avg of node calculations
- linkUpLatGraphAvg =\
- ( linkUpLatGraph1 +
- linkUpLatGraph2 +
- linkUpLatGraph3 ) / 3
- linkUpLatLinkAvg =\
- ( linkUpLatLink1 +
- linkUpLatLink2 +
- linkUpLatLink3 ) / 3
-
- # Set threshold and append latency to list
- if linkUpLatGraphAvg > upThresholdMin and\
- linkUpLatGraphAvg < upThresholdMax:
- linkUpGraphToSystemList.append(
- linkUpLatGraphAvg )
- else:
- main.log.info( "Link up latency exceeded threshold" )
- main.log.info( "Results for iteration " + str( i ) +
- "have been omitted" )
- if linkUpLatLinkAvg > upThresholdMin and\
- linkUpLatLinkAvg < upThresholdMax:
- linkUpLinkToSystemList.append(
- linkUpLatLinkAvg )
- else:
- main.log.info( "Link up latency exceeded threshold" )
- main.log.info( "Results for iteration " + str( i ) +
- "have been omitted" )
-
- # Calculate min, max, avg of list and report
- linkDownMin = min( linkDownGraphToSystemList )
- linkDownMax = max( linkDownGraphToSystemList )
- linkDownAvg = sum( linkDownGraphToSystemList ) / \
- len( linkDownGraphToSystemList )
- linkUpMin = min( linkUpGraphToSystemList )
- linkUpMax = max( linkUpGraphToSystemList )
- linkUpAvg = sum( linkUpGraphToSystemList ) / \
- len( linkUpGraphToSystemList )
- linkDownStdDev = \
- str( round( numpy.std( linkDownGraphToSystemList ), 1 ) )
- linkUpStdDev = \
- str( round( numpy.std( linkUpGraphToSystemList ), 1 ) )
-
- main.log.report( "Link down latency " +
- "Avg: " + str( linkDownAvg ) + " ms " +
- "Std Deviation: " + linkDownStdDev + " ms" )
- main.log.report( "Link up latency " +
- "Avg: " + str( linkUpAvg ) + " ms " +
- "Std Deviation: " + linkUpStdDev + " ms" )
-
- utilities.assert_equals(
- expect=main.TRUE,
- actual=assertion,
- onpass="Link discovery latency calculation successful",
- onfail="Link discovery latency case failed" )
-
- def CASE5( self, main ):
- """
- 100 Switch discovery latency
-
- Important:
- This test case can be potentially dangerous if
- your machine has previously set iptables rules.
- One of the steps of the test case will flush
- all existing iptables rules.
- Note:
- You can specify the number of switches in the
- params file to adjust the switch discovery size
- ( and specify the corresponding topology in Mininet1
- .topo file )
- """
- import time
- import subprocess
- import os
- import requests
- import json
-
- ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
- ONOS2Ip = main.params[ 'CTRL' ][ 'ip2' ]
- ONOS3Ip = main.params[ 'CTRL' ][ 'ip3' ]
- MN1Ip = main.params[ 'MN' ][ 'ip1' ]
- ONOSUser = main.params[ 'CTRL' ][ 'user' ]
-
- defaultSwPort = main.params[ 'CTRL' ][ 'port1' ]
-
- # Number of iterations of case
- numIter = main.params[ 'TEST' ][ 'numIter' ]
- numSw = main.params[ 'TEST' ][ 'numSwitch' ]
-
- # Timestamp 'keys' for json metrics output.
- # These are subject to change, hence moved into params
- deviceTimestamp = main.params[ 'JSON' ][ 'deviceTimestamp' ]
- graphTimestamp = main.params[ 'JSON' ][ 'graphTimestamp' ]
-
- debugMode = main.params[ 'TEST' ][ 'debugMode' ]
-
- localTime = time.strftime( '%X' )
- localTime = localTime.replace( "/", "" )
- localTime = localTime.replace( " ", "_" )
- localTime = localTime.replace( ":", "" )
- if debugMode == 'on':
- main.ONOS1.tsharkPcap( "eth0",
- "/tmp/100_sw_lat_pcap_" + localTime )
-
- # Threshold for this test case
- swDiscThresholdStr = main.params[ 'TEST' ][ 'swDisc100Threshold' ]
- swDiscThresholdObj = swDiscThresholdStr.split( "," )
- swDiscThresholdMin = int( swDiscThresholdObj[ 0 ] )
- swDiscThresholdMax = int( swDiscThresholdObj[ 1 ] )
-
- tsharkOfpOutput = "/tmp/tshark_ofp_" + numSw + "sw.txt"
- tsharkTcpOutput = "/tmp/tshark_tcp_" + numSw + "sw.txt"
-
- tsharkOfpResultList = []
- tsharkTcpResultList = []
-
- swDiscoveryLatList = []
-
- main.case( numSw + " Switch discovery latency" )
- main.step( "Assigning all switches to ONOS1" )
- for i in range( 1, int( numSw ) + 1 ):
- main.Mininet1.assignSwController(
- sw=str( i ),
- ip1=ONOS1Ip,
- port1=defaultSwPort )
-
- # Ensure that nodes are configured with ptpd
- # Just a warning message
- main.log.info( "Please check ptpd configuration to ensure" +
- " All nodes' system times are in sync" )
- time.sleep( 5 )
-
- for i in range( 0, int( numIter ) ):
-
- main.step( "Set iptables rule to block incoming sw connections" )
- # Set iptables rule to block incoming switch connections
- # The rule description is as follows:
- # Append to INPUT rule,
- # behavior DROP that matches following:
- # * packet type: tcp
- # * source IP: MN1Ip
- # * destination PORT: 6633
- main.ONOS1.handle.sendline(
- "sudo iptables -A INPUT -p tcp -s " + MN1Ip +
- " --dport " + defaultSwPort + " -j DROP" )
- main.ONOS1.handle.expect( "\$" )
- # Append to OUTPUT rule,
- # behavior DROP that matches following:
- # * packet type: tcp
- # * source IP: MN1Ip
- # * destination PORT: 6633
- main.ONOS1.handle.sendline(
- "sudo iptables -A OUTPUT -p tcp -s " + MN1Ip +
- " --dport " + defaultSwPort + " -j DROP" )
- main.ONOS1.handle.expect( "\$" )
- # Give time to allow rule to take effect
- # NOTE: Sleep period may need to be configured
- # based on the number of switches in the topology
- main.log.info( "Please wait for switch connection to " +
- "time out" )
- time.sleep( 60 )
-
- # Gather vendor OFP with tshark
- main.ONOS1.tsharkGrep( "OFP 86 Vendor",
- tsharkOfpOutput )
- main.ONOS1.tsharkGrep( "TCP 74 ",
- tsharkTcpOutput )
-
- # NOTE: Remove all iptables rule quickly ( flush )
- # Before removal, obtain TestON timestamp at which
- # removal took place
- # ( ensuring nodes are configured via ptp )
- # sudo iptables -F
-
- t0System = time.time() * 1000
- main.ONOS1.handle.sendline(
- "sudo iptables -F" )
-
- # Counter to track loop count
- counterLoop = 0
- counterAvail1 = 0
- counterAvail2 = 0
- counterAvail3 = 0
- onos1Dev = False
- onos2Dev = False
- onos3Dev = False
- while counterLoop < 60:
- # Continue to check devices for all device
- # availability. When all devices in all 3
- # ONOS instances indicate that devices are available
- # obtain graph event timestamp for t1.
- deviceStrObj1 = main.ONOS1cli.devices()
- deviceStrObj2 = main.ONOS2cli.devices()
- deviceStrObj3 = main.ONOS3cli.devices()
-
- deviceJson1 = json.loads( deviceStrObj1 )
- deviceJson2 = json.loads( deviceStrObj2 )
- deviceJson3 = json.loads( deviceStrObj3 )
-
- for device1 in deviceJson1:
- if device1[ 'available' ]:
- counterAvail1 += 1
- if counterAvail1 == int( numSw ):
- onos1Dev = True
- main.log.info( "All devices have been " +
- "discovered on ONOS1" )
- else:
- counterAvail1 = 0
- for device2 in deviceJson2:
- if device2[ 'available' ]:
- counterAvail2 += 1
- if counterAvail2 == int( numSw ):
- onos2Dev = True
- main.log.info( "All devices have been " +
- "discovered on ONOS2" )
- else:
- counterAvail2 = 0
- for device3 in deviceJson3:
- if device3[ 'available' ]:
- counterAvail3 += 1
- if counterAvail3 == int( numSw ):
- onos3Dev = True
- main.log.info( "All devices have been " +
- "discovered on ONOS3" )
- else:
- counterAvail3 = 0
-
- if onos1Dev and onos2Dev and onos3Dev:
- main.log.info( "All devices have been discovered " +
- "on all ONOS instances" )
- jsonStrTopologyMetrics1 =\
- main.ONOS1cli.topologyEventsMetrics()
- jsonStrTopologyMetrics2 =\
- main.ONOS2cli.topologyEventsMetrics()
- jsonStrTopologyMetrics3 =\
- main.ONOS3cli.topologyEventsMetrics()
-
- # Exit while loop if all devices discovered
- break
-
- counterLoop += 1
- # Give some time in between CLI calls
- #( will not affect measurement )
- time.sleep( 3 )
-
- main.ONOS1.tsharkStop()
-
- os.system( "scp " + ONOSUser + "@" + ONOS1Ip + ":" +
- tsharkOfpOutput + " /tmp/" )
- os.system( "scp " + ONOSUser + "@" + ONOS1Ip + ":" +
- tsharkTcpOutput + " /tmp/" )
-
- # TODO: Automate OFP output analysis
- # Debug mode - print out packets captured at runtime
- if debugMode == 'on':
- ofpFile = open( tsharkOfpOutput, 'r' )
- main.log.info( "Tshark OFP Vendor output: " )
- for line in ofpFile:
- tsharkOfpResultList.append( line )
- main.log.info( line )
- ofpFile.close()
-
- tcpFile = open( tsharkTcpOutput, 'r' )
- main.log.info( "Tshark TCP 74 output: " )
- for line in tcpFile:
- tsharkTcpResultList.append( line )
- main.log.info( line )
- tcpFile.close()
-
- jsonObj1 = json.loads( jsonStrTopologyMetrics1 )
- jsonObj2 = json.loads( jsonStrTopologyMetrics2 )
- jsonObj3 = json.loads( jsonStrTopologyMetrics3 )
-
- graphTimestamp1 = \
- jsonObj1[ graphTimestamp ][ 'value' ]
- graphTimestamp2 = \
- jsonObj2[ graphTimestamp ][ 'value' ]
- graphTimestamp3 = \
- jsonObj3[ graphTimestamp ][ 'value' ]
-
- graphLat1 = int( graphTimestamp1 ) - int( t0System )
- graphLat2 = int( graphTimestamp2 ) - int( t0System )
- graphLat3 = int( graphTimestamp3 ) - int( t0System )
-
- avgGraphLat = \
- ( int( graphLat1 ) +
- int( graphLat2 ) +
- int( graphLat3 ) ) / 3
-
- if avgGraphLat > swDiscThresholdMin \
- and avgGraphLat < swDiscThresholdMax:
- swDiscoveryLatList.append(
- avgGraphLat )
- else:
- main.log.info( "100 Switch discovery latency " +
- "exceeded the threshold." )
-
- # END ITERATION FOR LOOP
-
- swLatMin = min( swDiscoveryLatList )
- swLatMax = max( swDiscoveryLatList )
- swLatAvg = sum( swDiscoveryLatList ) /\
- len( swDiscoveryLatList )
-
- main.log.report( "100 Switch discovery lat " +
- "Min: " + str( swLatMin ) + " ms" +
- "Max: " + str( swLatMax ) + " ms" +
- "Avg: " + str( swLatAvg ) + " ms" )
-
- def CASE6( self, main ):
- """
- Increase number of nodes and initiate CLI
- """
- import time
-
- ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
- ONOS2Ip = main.params[ 'CTRL' ][ 'ip2' ]
- ONOS3Ip = main.params[ 'CTRL' ][ 'ip3' ]
- ONOS4Ip = main.params[ 'CTRL' ][ 'ip4' ]
- ONOS5Ip = main.params[ 'CTRL' ][ 'ip5' ]
- ONOS6Ip = main.params[ 'CTRL' ][ 'ip6' ]
- ONOS7Ip = main.params[ 'CTRL' ][ 'ip7' ]
-
- cellName = main.params[ 'ENV' ][ 'cellName' ]
-
- global clusterCount
-
- # Cluster size increased everytime the case is defined
- clusterCount += 2
-
- main.log.report( "Increasing cluster size to " +
- str( clusterCount ) )
-
- installResult = main.FALSE
- if clusterCount == 3:
- main.log.info( "Installing nodes 2 and 3" )
- node2Result = \
- main.ONOSbench.onosInstall( node=ONOS2Ip )
- node3Result = \
- main.ONOSbench.onosInstall( node=ONOS3Ip )
- installResult = node2Result and node3Result
-
- time.sleep( 5 )
-
- main.ONOS2cli.startOnosCli( ONOS2Ip )
- main.ONOS3cli.startOnosCli( ONOS3Ip )
-
- elif clusterCount == 5:
- main.log.info( "Installing nodes 4 and 5" )
- node4Result = \
- main.ONOSbench.onosInstall( node=ONOS4Ip )
- node5Result = \
- main.ONOSbench.onosInstall( node=ONOS5Ip )
- installResult = node4Result and node5Result
-
- time.sleep( 5 )
-
- main.ONOS4cli.startOnosCli( ONOS4Ip )
- main.ONOS5cli.startOnosCli( ONOS5Ip )
-
- elif clusterCount == 7:
- main.log.info( "Installing nodes 6 and 7" )
- node6Result = \
- main.ONOSbench.onosInstall( node=ONOS6Ip )
- node7Result = \
- main.ONOSbench.onosInstall( node=ONOS7Ip )
- installResult = node6Result and node7Result
-
- time.sleep( 5 )
-
- main.ONOS6cli.startOnosCli( ONOS6Ip )
- main.ONOS7cli.startOnosCli( ONOS7Ip )
diff --git a/TestON/tests/TopoPerfNext/TopoPerfNext.topo b/TestON/tests/TopoPerfNext/TopoPerfNext.topo
deleted file mode 100644
index f12d192..0000000
--- a/TestON/tests/TopoPerfNext/TopoPerfNext.topo
+++ /dev/null
@@ -1,163 +0,0 @@
-<TOPOLOGY>
- <COMPONENT>
-
- <ONOSbench>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>1</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOSbench>
-
- <ONOS1cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>2</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS1cli>
-
- <ONOS2cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>3</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS2cli>
-
- <ONOS3cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>4</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS3cli>
-
- <ONOS4cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>5</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS4cli>
-
- <ONOS5cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>6</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS5cli>
-
- <ONOS6cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>7</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS6cli>
-
- <ONOS7cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>8</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS7cli>
-
- <ONOS1>
- <host>10.128.174.1</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>9</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS1>
-
- <ONOS2>
- <host>10.128.174.2</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>10</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS2>
-
- <ONOS3>
- <host>10.128.174.3</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>11</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS3>
-
- <ONOS4>
- <host>10.128.174.4</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>12</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS4>
-
- <ONOS5>
- <host>10.128.174.5</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>13</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS5>
-
- <ONOS6>
- <host>10.128.174.6</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>14</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS6>
-
- <ONOS7>
- <host>10.128.174.7</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>15</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS7>
-
- <Mininet1>
- <host>10.128.10.90</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>MininetCliDriver</type>
- <connect_order>16</connect_order>
- <COMPONENTS>
- <arg1> --custom topo-perf-2sw.py </arg1>
- <arg2> --arp --mac --topo mytopo</arg2>
- <arg3> </arg3>
- <controller> remote </controller>
- </COMPONENTS>
- </Mininet1>
-
- <Mininet2>
- <host>10.128.10.90</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>RemoteMininetDriver</type>
- <connect_order>17</connect_order>
- <COMPONENTS> </COMPONENTS>
- </Mininet2>
-
- </COMPONENT>
-</TOPOLOGY>
diff --git a/TestON/tests/TopoPerfNextBM/TopoPerfNextBM.params b/TestON/tests/TopoPerfNextBM/TopoPerfNextBM.params
index ab215e9..eba319c 100644
--- a/TestON/tests/TopoPerfNextBM/TopoPerfNextBM.params
+++ b/TestON/tests/TopoPerfNextBM/TopoPerfNextBM.params
@@ -3,7 +3,7 @@
<ENV>
<cellName>topo_perf_test</cellName>
- <cellFeatures>"org.onosproject.metrics,org.onosproject.openflow"</cellFeatures>
+ <cellFeatures>"drivers,metrics,openflow"</cellFeatures>
</ENV>
<GIT>
@@ -13,7 +13,7 @@
</GIT>
<CTRL>
- <user>sdn</user>
+ <user>admin</user>
<ip1>10.254.1.201</ip1>
<port1>6633</port1>
<ip2>10.254.1.202</ip2>
@@ -62,7 +62,7 @@
</topoConfigName>
#Number of times to iterate each case
- <numIter>35</numIter>
+ <numIter>13</numIter>
<numSwitch>2</numSwitch>
#Number of iterations to ignore initially
<iterIgnore>2</iterIgnore>
diff --git a/TestON/tests/TopoPerfNextBM/TopoPerfNextBM.py b/TestON/tests/TopoPerfNextBM/TopoPerfNextBM.py
index 0f0fe55..e2245ba 100644
--- a/TestON/tests/TopoPerfNextBM/TopoPerfNextBM.py
+++ b/TestON/tests/TopoPerfNextBM/TopoPerfNextBM.py
@@ -272,10 +272,10 @@
metricsSwUp = CLIs[node].topologyEventsMetrics()
jsonStr.append(metricsSwUp)
- time.sleep(1)
-
main.log.info('Stopping all Tshark processes')
main.ONOS1.tsharkStop()
+
+ time.sleep(5)
main.log.info('Copying over tshark files')
os.system('scp ' + ONOSUser + '@' + nodeIpList[0] +
@@ -288,7 +288,8 @@
nodeIpList[0] + ':' + tsharkOfOutput + ' /tmp/')
# Get tcp syn / ack output
- # time.sleep(1)
+ time.sleep(1)
+
tcpFile = open(tsharkTcpOutput, 'r')
tempText = tcpFile.readline()
tempText = tempText.split(' ')
diff --git a/TestON/tests/TopoPerfNextBM/TopoPerfNextBM.topo b/TestON/tests/TopoPerfNextBM/TopoPerfNextBM.topo
index 6a70f60..570dece 100644
--- a/TestON/tests/TopoPerfNextBM/TopoPerfNextBM.topo
+++ b/TestON/tests/TopoPerfNextBM/TopoPerfNextBM.topo
@@ -2,7 +2,7 @@
<COMPONENT>
<ONOSbench>
- <host>10.128.174.10</host>
+ <host>10.254.1.200</host>
<user>admin</user>
<password>onos_test</password>
<type>OnosDriver</type>
@@ -11,7 +11,7 @@
</ONOSbench>
<ONOS1cli>
- <host>10.128.174.10</host>
+ <host>10.254.1.200</host>
<user>admin</user>
<password>onos_test</password>
<type>OnosCliDriver</type>
@@ -20,7 +20,7 @@
</ONOS1cli>
<ONOS2cli>
- <host>10.128.174.10</host>
+ <host>10.254.1.200</host>
<user>admin</user>
<password>onos_test</password>
<type>OnosCliDriver</type>
@@ -29,7 +29,7 @@
</ONOS2cli>
<ONOS3cli>
- <host>10.128.174.10</host>
+ <host>10.254.1.200</host>
<user>admin</user>
<password>onos_test</password>
<type>OnosCliDriver</type>
@@ -38,7 +38,7 @@
</ONOS3cli>
<ONOS4cli>
- <host>10.128.174.10</host>
+ <host>10.254.1.200</host>
<user>admin</user>
<password>onos_test</password>
<type>OnosCliDriver</type>
@@ -47,7 +47,7 @@
</ONOS4cli>
<ONOS5cli>
- <host>10.128.174.10</host>
+ <host>10.254.1.200</host>
<user>admin</user>
<password>onos_test</password>
<type>OnosCliDriver</type>
@@ -56,7 +56,7 @@
</ONOS5cli>
<ONOS6cli>
- <host>10.128.174.10</host>
+ <host>10.254.1.200</host>
<user>admin</user>
<password>onos_test</password>
<type>OnosCliDriver</type>
@@ -65,7 +65,7 @@
</ONOS6cli>
<ONOS7cli>
- <host>10.128.174.10</host>
+ <host>10.254.1.200</host>
<user>admin</user>
<password>onos_test</password>
<type>OnosCliDriver</type>
@@ -74,9 +74,9 @@
</ONOS7cli>
<ONOS1>
- <host>10.128.174.1</host>
- <user>sdn</user>
- <password>rocks</password>
+ <host>10.254.1.201</host>
+ <user>admin</user>
+ <password>onos_test</password>
<type>OnosDriver</type>
<connect_order>9</connect_order>
<COMPONENTS> </COMPONENTS>
@@ -84,8 +84,8 @@
<ONOS2>
<host>10.254.1.202</host>
- <user>sdn</user>
- <password>rocks</password>
+ <user>admin</user>
+ <password>onos_test</password>
<type>OnosDriver</type>
<connect_order>10</connect_order>
<COMPONENTS> </COMPONENTS>
@@ -93,8 +93,8 @@
<ONOS3>
<host>10.254.1.203</host>
- <user>sdn</user>
- <password>rocks</password>
+ <user>admin</user>
+ <password>onos_test</password>
<type>OnosDriver</type>
<connect_order>11</connect_order>
<COMPONENTS> </COMPONENTS>
@@ -102,8 +102,8 @@
<ONOS4>
<host>10.254.1.204</host>
- <user>sdn</user>
- <password>rocks</password>
+ <user>admin</user>
+ <password>onos_test</password>
<type>OnosDriver</type>
<connect_order>12</connect_order>
<COMPONENTS> </COMPONENTS>
@@ -111,8 +111,8 @@
<ONOS5>
<host>10.254.1.205</host>
- <user>sdn</user>
- <password>rocks</password>
+ <user>admin</user>
+ <password>onos_test</password>
<type>OnosDriver</type>
<connect_order>13</connect_order>
<COMPONENTS> </COMPONENTS>
@@ -120,8 +120,8 @@
<ONOS6>
<host>10.254.1.206</host>
- <user>sdn</user>
- <password>rocks</password>
+ <user>admin</user>
+ <password>onos_test</password>
<type>OnosDriver</type>
<connect_order>14</connect_order>
<COMPONENTS> </COMPONENTS>
@@ -129,8 +129,8 @@
<ONOS7>
<host>10.254.1.207</host>
- <user>sdn</user>
- <password>rocks</password>
+ <user>admin</user>
+ <password>onos_test</password>
<type>OnosDriver</type>
<connect_order>15</connect_order>
<COMPONENTS> </COMPONENTS>
@@ -144,7 +144,7 @@
<connect_order>16</connect_order>
<COMPONENTS>
<arg1> --custom topo-perf-2sw.py </arg1>
- <arg2> --arp --mac --topo mytopo</arg2>
+ <arg2> --topo mytopo</arg2>
<arg3> </arg3>
<controller> remote </controller>
</COMPONENTS>
diff --git a/TestON/tests/TopoPerfNext/__init__.py b/TestON/tests/TopoPerfNextBM/__init__.py
similarity index 100%
rename from TestON/tests/TopoPerfNext/__init__.py
rename to TestON/tests/TopoPerfNextBM/__init__.py
diff --git a/TestON/tests/TopoPerfNext/TopoPerfNext.params b/TestON/tests/TopoPerfNextBM/backup/TopoPerfNextBM.params
similarity index 77%
rename from TestON/tests/TopoPerfNext/TopoPerfNext.params
rename to TestON/tests/TopoPerfNextBM/backup/TopoPerfNextBM.params
index dc2d519..d681433 100644
--- a/TestON/tests/TopoPerfNext/TopoPerfNext.params
+++ b/TestON/tests/TopoPerfNextBM/backup/TopoPerfNextBM.params
@@ -1,8 +1,9 @@
<PARAMS>
- <testcases>1,2,3,6,2,3,6,2,3,6,2,3</testcases>
+ <testcases>1,2,3,4,2,3,4,2,3,4,2,3</testcases>
<ENV>
<cellName>topo_perf_test</cellName>
+ <cellFeatures>"org.onosproject.metrics,org.onosproject.openflow"</cellFeatures>
</ENV>
<GIT>
@@ -12,7 +13,7 @@
</GIT>
<CTRL>
- <user>admin</user>
+ <user>sdn</user>
<ip1>10.128.174.1</ip1>
<port1>6633</port1>
<ip2>10.128.174.2</ip2>
@@ -35,6 +36,15 @@
<ip>10.128.174.10</ip>
</BENCH>
+ <TSHARK>
+ <ofpPortStatus>OF 1.3 146</ofpPortStatus>
+ <ofpRoleReply>OF 1.3 90 of_role_reply</ofpRoleReply>
+ <featureReply>OF 1.3 98 of_features_reply</featureReply>
+ <roleRequest>OF 1.3 90 of_role_request</roleRequest>
+ <tcpSynAck>TCP 74 6633</tcpSynAck>
+ <finAckSequence>FIN</finAckSequence>
+ </TSHARK>
+
<TEST>
#'on' or 'off' debug mode.
#If on, logging will be more verbose and
@@ -70,10 +80,10 @@
<DB>
<postToDB>on</postToDB>
<portEventResultPath>
- /home/admin/ONLabTest/TestON/tests/TopoPerfNext/portEventResultDb.log
+ /home/admin/ONLabTest/TestON/tests/TopoPerfNextBM/portEventResultDb.log
</portEventResultPath>
<switchEventResultPath>
- /home/admin/ONLabTest/TestON/tests/TopoPerfNext/switchEventResultDb.log
+ /home/admin/ONLabTest/TestON/tests/TopoPerfNextBM/switchEventResultDb.log
</switchEventResultPath>
</DB>
diff --git a/TestON/tests/TopoPerfNext/__init__.py b/TestON/tests/TopoPerfNextBM/portEventResultDb.log
similarity index 100%
copy from TestON/tests/TopoPerfNext/__init__.py
copy to TestON/tests/TopoPerfNextBM/portEventResultDb.log
diff --git a/TestON/tests/TopoPerfNext/__init__.py b/TestON/tests/TopoPerfNextBM/switchEventResultDb.log
similarity index 100%
copy from TestON/tests/TopoPerfNext/__init__.py
copy to TestON/tests/TopoPerfNextBM/switchEventResultDb.log