Remove old and deprecated tests
diff --git a/TestON/tests/ClassTest/ClassTest.params b/TestON/tests/ClassTest/ClassTest.params
deleted file mode 100644
index 456881b..0000000
--- a/TestON/tests/ClassTest/ClassTest.params
+++ /dev/null
@@ -1,4 +0,0 @@
-<PARAMS>
- <testcases>1,2</testcases>
-
-</PARAMS>
diff --git a/TestON/tests/ClassTest/ClassTest.py b/TestON/tests/ClassTest/ClassTest.py
deleted file mode 100644
index 0fd8ccc..0000000
--- a/TestON/tests/ClassTest/ClassTest.py
+++ /dev/null
@@ -1,27 +0,0 @@
-
-import time
-import os
-import re
-
-
-class ClassTest:
-
- def __init__( self ):
- self.default = ''
-
- def CASE1( self, main ):
- import time
- import imp
-
- init = imp.load_source(
- 'ClassInit',
- '/home/admin/ONLabTest/TestON/tests/ClassTest/Dependency/ClassInit.py' )
-
- ip1_from_class = init.getIp1()
- init.printMain( main )
-
- main.log.info( ip1_from_class )
-
- def CASE2( self, main ):
-
- main.log.info( "Case 2" )
diff --git a/TestON/tests/ClassTest/ClassTest.topo b/TestON/tests/ClassTest/ClassTest.topo
deleted file mode 100644
index 1c8bc4f..0000000
--- a/TestON/tests/ClassTest/ClassTest.topo
+++ /dev/null
@@ -1,26 +0,0 @@
-<TOPOLOGY>
- <COMPONENT>
- <ONOSbench>
- <host>10.128.10.20</host>
- <user>admin</user>
- <password></password>
- <type>OnosDriver</type>
- <connect_order>1</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOSbench>
-
- <Mininet1>
- <host>10.128.10.24</host>
- <user>admin</user>
- <password></password>
- <type>MininetCliDriver</type>
- <connect_order>2</connect_order>
- <COMPONENTS>
- <arg1> --custom topo-perf-2sw.py </arg1>
- <arg2> --arp --mac --topo mytopo </arg2>
- <arg3> </arg3>
- <controller> remote </controller>
- </COMPONENTS>
- </Mininet1>
- </COMPONENT>
-</TOPOLOGY>
diff --git a/TestON/tests/ClassTest/Dependency/ClassInit.py b/TestON/tests/ClassTest/Dependency/ClassInit.py
deleted file mode 100644
index 74ff627..0000000
--- a/TestON/tests/ClassTest/Dependency/ClassInit.py
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
-def __init__(self):
- self._ip1 = '1'
- self._ip2 = '2'
-
-def getIp1():
- print 'some ip'
-
-def printMain(main):
- print main.log.info("Main from classinit")
diff --git a/TestON/tests/ClassTest/Dependency/__init__.py b/TestON/tests/ClassTest/Dependency/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/TestON/tests/ClassTest/Dependency/__init__.py
+++ /dev/null
diff --git a/TestON/tests/ClassTest/Dependencyc b/TestON/tests/ClassTest/Dependencyc
deleted file mode 100644
index bafe4c9..0000000
--- a/TestON/tests/ClassTest/Dependencyc
+++ /dev/null
Binary files differ
diff --git a/TestON/tests/ClassTest/__init__.py b/TestON/tests/ClassTest/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/TestON/tests/ClassTest/__init__.py
+++ /dev/null
diff --git a/TestON/tests/IntentPerfNext/Dependency/IntentClass.py b/TestON/tests/IntentPerfNext/Dependency/IntentClass.py
deleted file mode 100644
index f5b17c2..0000000
--- a/TestON/tests/IntentPerfNext/Dependency/IntentClass.py
+++ /dev/null
@@ -1,56 +0,0 @@
-
-def __init__(self):
- self_ = self
-
-def printLog(main):
- main.log.info("Print log success")
-
-def iptablesDropAllNodes(main, MN_ip, sw_port):
- #INPUT RULES
- main.ONOS1.handle.sendline(
- "sudo iptables -A INPUT -p tcp -s "+
- MN_ip+" --dport "+sw_port+" -j DROP")
- main.ONOS2.handle.sendline(
- "sudo iptables -A INPUT -p tcp -s "+
- MN_ip+" --dport "+sw_port+" -j DROP")
- main.ONOS3.handle.sendline(
- "sudo iptables -A INPUT -p tcp -s "+
- MN_ip+" --dport "+sw_port+" -j DROP")
- main.ONOS4.handle.sendline(
- "sudo iptables -A INPUT -p tcp -s "+
- MN_ip+" --dport "+sw_port+" -j DROP")
- main.ONOS5.handle.sendline(
- "sudo iptables -A INPUT -p tcp -s "+
- MN_ip+" --dport "+sw_port+" -j DROP")
- main.ONOS6.handle.sendline(
- "sudo iptables -A INPUT -p tcp -s "+
- MN_ip+" --dport "+sw_port+" -j DROP")
- main.ONOS7.handle.sendline(
- "sudo iptables -A INPUT -p tcp -s "+
- MN_ip+" --dport "+sw_port+" -j DROP")
-
- main.ONOS1.handle.sendline(
- "sudo iptables -A OUTPUT -p tcp -s "+
- MN_ip+" --dport "+sw_port+" -j DROP")
- main.ONOS2.handle.sendline(
- "sudo iptables -A OUTPUT -p tcp -s "+
- MN_ip+" --dport "+sw_port+" -j DROP")
- main.ONOS3.handle.sendline(
- "sudo iptables -A OUTPUT -p tcp -s "+
- MN_ip+" --dport "+sw_port+" -j DROP")
- main.ONOS4.handle.sendline(
- "sudo iptables -A OUTPUT -p tcp -s "+
- MN_ip+" --dport "+sw_port+" -j DROP")
- main.ONOS5.handle.sendline(
- "sudo iptables -A OUTPUT -p tcp -s "+
- MN_ip+" --dport "+sw_port+" -j DROP")
- main.ONOS6.handle.sendline(
- "sudo iptables -A OUTPUT -p tcp -s "+
- MN_ip+" --dport "+sw_port+" -j DROP")
- main.ONOS7.handle.sendline(
- "sudo iptables -A OUTPUT -p tcp -s "+
- MN_ip+" --dport "+sw_port+" -j DROP")
-
-def uninstallAllNodes(main, node_ip_list):
- for node in node_ip_list:
- main.ONOSbench.onos_uninstall(node_ip = node)
diff --git a/TestON/tests/IntentPerfNext/Dependency/__init__.py b/TestON/tests/IntentPerfNext/Dependency/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/TestON/tests/IntentPerfNext/Dependency/__init__.py
+++ /dev/null
diff --git a/TestON/tests/IntentPerfNext/IntentPerfNext.params b/TestON/tests/IntentPerfNext/IntentPerfNext.params
deleted file mode 100644
index 90ef160..0000000
--- a/TestON/tests/IntentPerfNext/IntentPerfNext.params
+++ /dev/null
@@ -1,62 +0,0 @@
-<PARAMS>
- <testcases>1,4,5,4,5,4,5,4</testcases>
-
- <ENV>
- <cellName>intent_perf_test</cellName>
- </ENV>
-
- <GIT>
- #autoPull 'on' or 'off'
- <autoPull>off</autoPull>
- <checkout>master</checkout>
- </GIT>
-
- <CTRL>
- <user>admin</user>
- <ip1>10.128.174.1</ip1>
- <port1>6633</port1>
- <ip2>10.128.174.2</ip2>
- <port2>6633</port2>
- <ip3>10.128.174.3</ip3>
- <port3>6633</port3>
- <ip4>10.128.174.4</ip4>
- <ip5>10.128.174.5</ip5>
- <ip6>10.128.174.6</ip6>
- <ip7>10.128.174.7</ip7>
- </CTRL>
-
- <MN>
- <ip1>10.128.10.90</ip1>
- <ip2>10.128.10.91</ip2>
- </MN>
-
- <BENCH>
- <ip>10.128.174.10</ip>
- </BENCH>
-
- <TEST>
- #Number of times to iterate each case
- <numIter>8</numIter>
- <numIgnore>2</numIgnore>
- <numSwitch>8</numSwitch>
- <batchThresholdMin>0</batchThresholdMin>
- <batchThresholdMax>1000</batchThresholdMax>
- <batchIntentSize>1</batchIntentSize>
- <numMult>1</numMult>
- #Interface to bring down for intent reroute case
- <intfs>s3-eth2</intfs>
- </TEST>
-
- <DB>
- <intentFilePath>
- /home/admin/ONLabTest/TestON/tests/IntentPerfNext/intentLatencyResultDb.log
- </intentFilePath>
- </DB>
-
- <JSON>
- <submittedTime>intentSubmittedTimestamp</submittedTime>
- <installedTime>intentInstalledTimestamp</installedTime>
- <wdRequestTime>intentWithdrawRequestedTimestamp</wdRequestTime>
- <withdrawnTime>intentWithdrawnTimestamp</withdrawnTime>
- </JSON>
-</PARAMS>
diff --git a/TestON/tests/IntentPerfNext/IntentPerfNext.py b/TestON/tests/IntentPerfNext/IntentPerfNext.py
deleted file mode 100644
index 7807daf..0000000
--- a/TestON/tests/IntentPerfNext/IntentPerfNext.py
+++ /dev/null
@@ -1,1292 +0,0 @@
-# Intent Performance Test for ONOS-next
-#
-# andrew@onlab.us
-#
-# November 5, 2014
-
-
-class IntentPerfNext:
-
- def __init__( self ):
- self.default = ""
-
- def CASE1( self, main ):
- """
- ONOS startup sequence
- """
- import time
- global clusterCount
- global timeToPost
- global runNum
-
- clusterCount = 1
- timeToPost = time.strftime("%Y-%m-%d %H:%M:%S")
- runNum = time.strftime("%d%H%M%S")
-
- cellName = main.params[ 'ENV' ][ 'cellName' ]
-
- gitPull = main.params[ 'GIT' ][ 'autoPull' ]
- checkoutBranch = main.params[ 'GIT' ][ 'checkout' ]
- intentFilePath = main.params[ 'DB' ][ 'intentFilePath' ]
-
- ONOSIp = []
- for i in range(1, 8):
- ONOSIp.append(main.params[ 'CTRL' ][ 'ip'+str(i) ])
- main.ONOSbench.onosUninstall( nodeIp = ONOSIp[i-1] )
-
- MN1Ip = main.params[ 'MN' ][ 'ip1' ]
- BENCHIp = main.params[ 'BENCH' ][ 'ip' ]
-
- main.case( "Setting up test environment" )
-
- main.step( "Clearing previous DB log file" )
- fIntentLog = open(intentFilePath, 'w')
- # Overwrite with empty line and close
- fIntentLog.write('')
- fIntentLog.close()
-
- main.step( "Starting mininet topology" )
- main.Mininet1.startNet()
-
- main.step( "Creating cell file" )
- cellFileResult = main.ONOSbench.createCellFile(
- BENCHIp, cellName, MN1Ip,
- ("onos-core,webconsole,onos-api,onos-app-metrics,onos-gui,"
- "onos-cli,onos-openflow"),
- ONOSIp[0] )
-
- main.step( "Applying cell file to environment" )
- cellApplyResult = main.ONOSbench.setCell( cellName )
- verifyCellResult = main.ONOSbench.verifyCell()
-
- main.step( "Removing raft logs" )
- main.ONOSbench.onosRemoveRaftLogs()
-
- main.step( "Git checkout and pull " + checkoutBranch )
- if gitPull == 'on':
- checkoutResult = \
- main.ONOSbench.gitCheckout( checkoutBranch )
- pullResult = main.ONOSbench.gitPull()
-
- # If you used git pull, auto compile
- main.step( "Using onos-build to compile ONOS" )
- buildResult = main.ONOSbench.onosBuild()
- else:
- checkoutResult = main.TRUE
- pullResult = main.TRUE
- buildResult = main.TRUE
- main.log.info( "Git pull skipped by configuration" )
-
- main.log.report( "Commit information - " )
- main.ONOSbench.getVersion( report=True )
-
- main.step( "Creating ONOS package" )
- packageResult = main.ONOSbench.onosPackage()
-
- main.step( "Installing ONOS package" )
- install1Result = main.ONOSbench.onosInstall( node=ONOSIp[0] )
-
- main.step( "Set cell for ONOScli env" )
- main.ONOS1cli.setCell( cellName )
-
- time.sleep( 5 )
-
- main.step( "Start onos cli" )
- cli1 = main.ONOS1cli.startOnosCli( ONOSIp[0] )
-
- utilities.assert_equals( expect=main.TRUE,
- actual=cellFileResult and cellApplyResult and
- verifyCellResult and checkoutResult and
- pullResult and buildResult and
- install1Result, # and install2Result and
- # install3Result,
- onpass="ONOS started successfully",
- onfail="Failed to start ONOS" )
-
- def CASE2( self, main ):
- """
- Single intent add latency
-
- """
- import time
- import json
- import requests
- import os
- import numpy
- global clusterCount
-
- ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
- ONOS2Ip = main.params[ 'CTRL' ][ 'ip2' ]
- ONOS3Ip = main.params[ 'CTRL' ][ 'ip3' ]
- ONOSIpList = []
- for i in range( 1, 8 ):
- ONOSIpList.append( main.params[ 'CTRL' ][ 'ip' + str( i ) ] )
-
- ONOSUser = main.params[ 'CTRL' ][ 'user' ]
-
- defaultSwPort = main.params[ 'CTRL' ][ 'port1' ]
-
- # number of iterations of case
- numIter = main.params[ 'TEST' ][ 'numIter' ]
- numIgnore = int( main.params[ 'TEST' ][ 'numIgnore' ] )
-
- # Timestamp keys for json metrics output
- submitTime = main.params[ 'JSON' ][ 'submittedTime' ]
- installTime = main.params[ 'JSON' ][ 'installedTime' ]
- wdRequestTime = main.params[ 'JSON' ][ 'wdRequestTime' ]
- withdrawnTime = main.params[ 'JSON' ][ 'withdrawnTime' ]
-
- assertion = main.TRUE
-
- intentAddLatList = []
-
- # Distribute switches according to cluster count
- for i in range( 1, 9 ):
- if clusterCount == 1:
- main.Mininet1.assignSwController(
- sw="s" + str( i ),
- ip=ONOSIpList[ 0 ],
- port=defaultSwPort
- )
- elif clusterCount == 3:
- if i < 3:
- index = 0
- elif i < 6 and i >= 3:
- index = 1
- else:
- index = 2
- main.Mininet1.assignSwController(
- sw="s" + str( i ),
- ip=ONOSIpList[ index ],
- port=defaultSwPort
- )
- elif clusterCount == 5:
- if i < 3:
- index = 0
- elif i < 5 and i >= 3:
- index = 1
- elif i < 7 and i >= 5:
- index = 2
- elif i == 7:
- index = 3
- else:
- index = 4
- main.Mininet1.assignSwController(
- sw="s" + str( i ),
- ip=ONOSIpList[ index ],
- port=defaultSwPort
- )
- elif clusterCount == 7:
- if i < 6:
- index = i
- else:
- index = 6
- main.Mininet1.assignSwController(
- sw="s" + str( i ),
- ip=ONOSIpList[ index ],
- port=defaultSwPort
- )
-
- time.sleep( 10 )
-
- main.log.report( "Single intent add latency test" )
-
- devicesJsonStr = main.ONOS1cli.devices()
- devicesJsonObj = json.loads( devicesJsonStr )
-
- if not devicesJsonObj:
- main.log.report( "Devices not discovered" )
- main.log.report( "Aborting test" )
- main.exit()
- else:
- main.log.info( "Devices discovered successfully" )
-
- deviceIdList = []
-
- # Obtain device id list in ONOS format.
- # They should already be in order ( 1,2,3,10,11,12,13, etc )
- for device in devicesJsonObj:
- deviceIdList.append( device[ 'id' ] )
-
- for i in range( 0, int( numIter ) ):
- # addPointIntent( ingrDevice, egrDevice,
- # ingrPort, egrPort )
- main.ONOS1cli.addPointIntent(
- deviceIdList[ 0 ] + "/2", deviceIdList[ 7 ] + "/2" )
-
- # Allow some time for intents to propagate
- time.sleep( 5 )
-
- intentsStr = main.ONOS1cli.intents( jsonFormat=True )
- intentsObj = json.loads( intentsStr )
- for intent in intentsObj:
- if intent[ 'state' ] == "INSTALLED":
- main.log.info( "Intent installed successfully" )
- intentId = intent[ 'id' ]
- main.log.info( "Intent id: " + str( intentId ) )
- else:
- # TODO: Add error handling
- main.log.info( "Intent installation failed" )
- intentId = ""
-
- # Obtain metrics from ONOS 1, 2, 3
- intentsJsonStr1 = main.ONOS1cli.intentsEventsMetrics()
- intentsJsonObj1 = json.loads( intentsJsonStr1 )
- # Parse values from the json object
- intentSubmit1 = \
- intentsJsonObj1[ submitTime ][ 'value' ]
- intentInstall1 = \
- intentsJsonObj1[ installTime ][ 'value' ]
- intentInstallLat1 = \
- int( intentInstall1 ) - int( intentSubmit1 )
-
- if clusterCount == 3:
- intentsJsonStr2 = main.ONOS2cli.intentsEventsMetrics()
- intentsJsonStr3 = main.ONOS3cli.intentsEventsMetrics()
- intentsJsonObj2 = json.loads( intentsJsonStr2 )
- intentsJsonObj3 = json.loads( intentsJsonStr3 )
- intentSubmit2 = \
- intentsJsonObj2[ submitTime ][ 'value' ]
- intentSubmit3 = \
- intentsJsonObj3[ submitTime ][ 'value' ]
- intentInstall2 = \
- intentsJsonObj2[ installTime ][ 'value' ]
- intentInstall3 = \
- intentsJsonObj3[ installTime ][ 'value' ]
- intentInstallLat2 = \
- int( intentInstall2 ) - int( intentSubmit2 )
- intentInstallLat3 = \
- int( intentInstall3 ) - int( intentSubmit3 )
- else:
- intentInstallLat2 = 0
- intentInstallLat3 = 0
-
- if clusterCount == 5:
- intentsJsonStr4 = main.ONOS4cli.intentsEventsMetrics()
- intentsJsonStr5 = main.ONOS5cli.intentsEventsMetrics()
- intentsJsonObj4 = json.loads( intentsJsonStr4 )
- intentsJsonObj5 = json.loads( intentsJsonStr5 )
- intentSubmit4 = \
- intentsJsonObj4[ submitTime ][ 'value' ]
- intentSubmit5 = \
- intentsJsonObj5[ submitTime ][ 'value' ]
- intentInstall4 = \
- intentsJsonObj5[ installTime ][ 'value' ]
- intentInstall5 = \
- intentsJsonObj5[ installTime ][ 'value' ]
- intentInstallLat4 = \
- int( intentInstall4 ) - int( intentSubmit4 )
- intentInstallLat5 = \
- int( intentInstall5 ) - int( intentSubmit5 )
- else:
- intentInstallLat4 = 0
- intentInstallLat5 = 0
-
- if clusterCount == 7:
- intentsJsonStr6 = main.ONOS6cli.intentsEventsMetrics()
- intentsJsonStr7 = main.ONOS7cli.intentsEventsMetrics()
- intentsJsonObj6 = json.loads( intentsJsonStr6 )
- intentsJsonObj7 = json.loads( intentsJsonStr7 )
- intentSubmit6 = \
- intentsJsonObj6[ submitTime ][ 'value' ]
- intentSubmit7 = \
- intentsJsonObj6[ submitTime ][ 'value' ]
- intentInstall6 = \
- intentsJsonObj6[ installTime ][ 'value' ]
- intentInstall7 = \
- intentsJsonObj7[ installTime ][ 'value' ]
- intentInstallLat6 = \
- int( intentInstall6 ) - int( intentSubmit6 )
- intentInstallLat7 = \
- int( intentInstall7 ) - int( intentSubmit7 )
- else:
- intentInstallLat6 = 0
- intentInstallLat7 = 0
-
- intentInstallLatAvg = \
- ( intentInstallLat1 +
- intentInstallLat2 +
- intentInstallLat3 +
- intentInstallLat4 +
- intentInstallLat5 +
- intentInstallLat6 +
- intentInstallLat7 ) / clusterCount
-
- main.log.info( "Intent add latency avg for iteration " + str( i ) +
- ": " + str( intentInstallLatAvg ) + " ms" )
-
- if intentInstallLatAvg > 0.0 and \
- intentInstallLatAvg < 1000 and i > numIgnore:
- intentAddLatList.append( intentInstallLatAvg )
- else:
- main.log.info( "Intent add latency exceeded " +
- "threshold. Skipping iteration " + str( i ) )
-
- time.sleep( 3 )
-
- # TODO: Only remove intents that were installed
- # in this case... Otherwise many other intents
- # may show up distorting the results
- main.log.info( "Removing intents for next iteration" )
- jsonTemp = \
- main.ONOS1cli.intents( jsonFormat=True )
- jsonObjIntents = json.loads( jsonTemp )
- if jsonObjIntents:
- for intents in jsonObjIntents:
- tempId = intents[ 'id' ]
- # main.ONOS1cli.removeIntent( tempId )
- main.log.info( "Removing intent id: " +
- str( tempId ) )
- main.ONOS1cli.removeIntent( tempId )
- else:
- main.log.info( "Intents were not installed correctly" )
-
- time.sleep( 5 )
-
- if intentAddLatList:
- intentAddLatAvg = sum( intentAddLatList ) /\
- len( intentAddLatList )
- else:
- main.log.report( "Intent installation latency test failed" )
- intentAddLatAvg = "NA"
- assertion = main.FALSE
-
- intentAddLatStd = \
- round( numpy.std( intentAddLatList ), 1 )
- # END ITERATION FOR LOOP
- main.log.report( "Single intent add latency - " )
- main.log.report( "Avg: " + str( intentAddLatAvg ) + " ms" )
- main.log.report( "Std Deviation: " + str( intentAddLatStd ) + " ms" )
-
- utilities.assert_equals(
- expect=main.TRUE,
- actual=assertion,
- onpass="Single intent install latency test successful",
- onfail="Single intent install latency test failed" )
-
- def CASE3( self, main ):
- """
- Intent Reroute latency
- """
- import time
- import json
- import requests
- import os
- import numpy
- global clusterCount
-
- ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
- ONOS2Ip = main.params[ 'CTRL' ][ 'ip2' ]
- ONOS3Ip = main.params[ 'CTRL' ][ 'ip3' ]
- ONOSUser = main.params[ 'CTRL' ][ 'user' ]
-
- ONOSIpList = []
- for i in range( 1, 8 ):
- ONOSIpList.append( main.params[ 'CTRL' ][ 'ip' + str( i ) ] )
-
- defaultSwPort = main.params[ 'CTRL' ][ 'port1' ]
-
- # number of iterations of case
- numIter = main.params[ 'TEST' ][ 'numIter' ]
- numIgnore = int( main.params[ 'TEST' ][ 'numIgnore' ] )
- assertion = main.TRUE
-
- # Timestamp keys for json metrics output
- submitTime = main.params[ 'JSON' ][ 'submittedTime' ]
- installTime = main.params[ 'JSON' ][ 'installedTime' ]
- wdRequestTime = main.params[ 'JSON' ][ 'wdRequestTime' ]
- withdrawnTime = main.params[ 'JSON' ][ 'withdrawnTime' ]
-
- # NOTE: May need to configure interface depending on topology
- intfs = main.params[ 'TEST' ][ 'intfs' ]
-
- # Distribute switches according to cluster count
- for i in range( 1, 9 ):
- if clusterCount == 1:
- main.Mininet1.assignSwController(
- sw="s" + str( i ),
- ip=ONOSIpList[ 0 ],
- port=defaultSwPort
- )
- elif clusterCount == 3:
- if i < 3:
- index = 0
- elif i < 6 and i >= 3:
- index = 1
- else:
- index = 2
- main.Mininet1.assignSwController(
- sw="s" + str( i ),
- ip=ONOSIpList[ index ],
- port=defaultSwPort
- )
- elif clusterCount == 5:
- if i < 3:
- index = 0
- elif i < 5 and i >= 3:
- index = 1
- elif i < 7 and i >= 5:
- index = 2
- elif i == 7:
- index = 3
- else:
- index = 4
- main.Mininet1.assignSwController(
- sw="s" + str( i ),
- ip=ONOSIpList[ index ],
- port=defaultSwPort
- )
- elif clusterCount == 7:
- if i < 6:
- index = i
- else:
- index = 6
- main.Mininet1.assignSwController(
- sw="s" + str( i ),
- ip=ONOSIpList[ index ],
- port=defaultSwPort
- )
-
- time.sleep(10)
-
- devicesJsonStr = main.ONOS1cli.devices()
- devicesJsonObj = json.loads( devicesJsonStr )
-
- deviceIdList = []
-
- # Obtain device id list in ONOS format.
- # They should already be in order ( 1,2,3,10,11,12,13, etc )
- for device in devicesJsonObj:
- deviceIdList.append( device[ 'id' ] )
-
- intentRerouteLatList = []
-
- for i in range( 0, int( numIter ) ):
- # addPointIntent( ingrDevice, ingrPort,
- # egrDevice, egrPort )
- if len( deviceIdList ) > 0:
- main.ONOS1cli.addPointIntent(
- deviceIdList[ 0 ] + "/2", deviceIdList[ 7 ] + "/2" )
- else:
- main.log.info( "Failed to fetch devices from ONOS" )
-
- time.sleep( 5 )
-
- intentsStr = main.ONOS1cli.intents( jsonFormat=True )
- intentsObj = json.loads( intentsStr )
- for intent in intentsObj:
- main.log.info(intent)
- if intent[ 'state' ] == "INSTALLED":
- main.log.info( "Intent installed successfully" )
- intentId = intent[ 'id' ]
- main.log.info( "Intent id: " + str( intentId ) )
- #else:
- #TODO: Add error handling
- #main.log.info( "Intent installation failed" )
- #intentId = ""
-
- main.log.info( "Disabling interface " + intfs )
- t0System = time.time() * 1000
- main.Mininet1.handle.sendline(
- "sh ifconfig " + intfs + " down" )
- main.Mininet1.handle.expect( "mininet>" )
-
- # TODO: Check for correct intent reroute
- time.sleep( 1 )
-
- # Obtain metrics from ONOS 1, 2, 3
- intentsJsonStr1 = main.ONOS1cli.intentsEventsMetrics()
- intentsJsonObj1 = json.loads( intentsJsonStr1 )
- # Parse values from the json object
- intentInstall1 = \
- intentsJsonObj1[ installTime ][ 'value' ]
- intentRerouteLat1 = \
- int( intentInstall1 ) - int( t0System )
-
- if clusterCount == 3:
- intentsJsonStr2 = main.ONOS2cli.intentsEventsMetrics()
- intentsJsonStr3 = main.ONOS3cli.intentsEventsMetrics()
-
- intentsJsonObj2 = json.loads( intentsJsonStr2 )
- intentsJsonObj3 = json.loads( intentsJsonStr3 )
- intentInstall2 = \
- intentsJsonObj2[ installTime ][ 'value' ]
- intentInstall3 = \
- intentsJsonObj3[ installTime ][ 'value' ]
- intentRerouteLat2 = \
- int( intentInstall2 ) - int( t0System )
- intentRerouteLat3 = \
- int( intentInstall3 ) - int( t0System )
- else:
- intentRerouteLat2 = 0
- intentRerouteLat3 = 0
-
- if clusterCount == 5:
- intentsJsonStr4 = main.ONOS4cli.intentsEventsMetrics()
- intentsJsonStr5 = main.ONOS5cli.intentsEventsMetrics()
-
- intentsJsonObj4 = json.loads( intentsJsonStr4 )
- intentsJsonObj5 = json.loads( intentsJsonStr5 )
- intentInstall4 = \
- intentsJsonObj4[ installTime ][ 'value' ]
- intentInstall5 = \
- intentsJsonObj5[ installTime ][ 'value' ]
- intentRerouteLat4 = \
- int( intentInstall4 ) - int( t0System )
- intentRerouteLat5 = \
- int( intentInstall5 ) - int( t0System )
- else:
- intentRerouteLat4 = 0
- intentRerouteLat5 = 0
-
- if clusterCount == 7:
- intentsJsonStr6 = main.ONOS6cli.intentsEventsMetrics()
- intentsJsonStr7 = main.ONOS7cli.intentsEventsMetrics()
-
- intentsJsonObj6 = json.loads( intentsJsonStr6 )
- intentsJsonObj7 = json.loads( intentsJsonStr7 )
- intentInstall6 = \
- intentsJsonObj6[ installTime ][ 'value' ]
- intentInstall7 = \
- intentsJsonObj7[ installTime ][ 'value' ]
- intentRerouteLat6 = \
- int( intentInstall6 ) - int( t0System )
- intentRerouteLat7 = \
- int( intentInstall7 ) - int( t0System )
- else:
- intentRerouteLat6 = 0
- intentRerouteLat7 = 0
-
- intentRerouteLatAvg = \
- ( intentRerouteLat1 +
- intentRerouteLat2 +
- intentRerouteLat3 +
- intentRerouteLat4 +
- intentRerouteLat5 +
- intentRerouteLat6 +
- intentRerouteLat7 ) / clusterCount
-
- main.log.info( "Intent reroute latency avg for iteration " +
- str( i ) + ": " + str( intentRerouteLatAvg )+ " ms")
-
- if intentRerouteLatAvg > 0.0 and \
- intentRerouteLatAvg < 1000 and i > numIgnore:
- intentRerouteLatList.append( intentRerouteLatAvg )
- else:
- main.log.info( "Intent reroute latency exceeded " +
- "threshold. Skipping iteration " + str( i ) )
-
- main.log.info( "Removing intents for next iteration" )
- main.ONOS1cli.removeIntent( intentId )
-
- main.log.info( "Bringing Mininet interface up for next " +
- "iteration" )
- main.Mininet1.handle.sendline(
- "sh ifconfig " + intfs + " up" )
- main.Mininet1.handle.expect( "mininet>" )
-
- if intentRerouteLatList:
- intentRerouteLatAvg = sum( intentRerouteLatList ) /\
- len( intentRerouteLatList )
- else:
- main.log.report( "Intent reroute test failed. Results NA" )
- intentRerouteLatAvg = "NA"
- # NOTE: fails test when list is empty
- assertion = main.FALSE
-
- intentRerouteLatStd = \
- round( numpy.std( intentRerouteLatList ), 1 )
- # END ITERATION FOR LOOP
- main.log.report( "Single intent reroute latency - " )
- main.log.report( "Avg: " + str( intentRerouteLatAvg ) + " ms" )
- main.log.report(
- "Std Deviation: " +
- str( intentRerouteLatStd ) +
- " ms" )
-
- utilities.assert_equals(
- expect=main.TRUE,
- actual=assertion,
- onpass="Single intent reroute latency test successful",
- onfail="Single intent reroute latency test failed" )
-
- def CASE4( self, main ):
- """
- Batch intent install
-
- Supports scale-out scenarios and increasing
- number of intents within each iteration
- """
- import time
- import json
- import requests
- import os
- import numpy
- global clusterCount
- global timeToPost
-
- ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
- ONOS2Ip = main.params[ 'CTRL' ][ 'ip2' ]
- ONOS3Ip = main.params[ 'CTRL' ][ 'ip3' ]
- ONOS4Ip = main.params[ 'CTRL' ][ 'ip4' ]
- ONOS5Ip = main.params[ 'CTRL' ][ 'ip5' ]
- ONOS6Ip = main.params[ 'CTRL' ][ 'ip6' ]
- ONOS7Ip = main.params[ 'CTRL' ][ 'ip7' ]
-
- assertion = main.TRUE
-
- ONOSIpList = []
- for i in range( 1, 8 ):
- ONOSIpList.append( main.params[ 'CTRL' ][ 'ip' + str( i ) ] )
-
- ONOSUser = main.params[ 'CTRL' ][ 'user' ]
-
- defaultSwPort = main.params[ 'CTRL' ][ 'port1' ]
-
- batchIntentSize = int( main.params[ 'TEST' ][ 'batchIntentSize' ] )
- batchThreshMin = int( main.params[ 'TEST' ][ 'batchThresholdMin' ] )
- batchThreshMax = int( main.params[ 'TEST' ][ 'batchThresholdMax' ] )
-
- # number of iterations of case
- numIter = main.params[ 'TEST' ][ 'numIter' ]
- numIgnore = int( main.params[ 'TEST' ][ 'numIgnore' ] )
- numSwitch = int( main.params[ 'TEST' ][ 'numSwitch' ] )
- nThread = main.params[ 'TEST' ][ 'numMult' ]
- #nThread = 105
-
- # DB operation variables
- intentFilePath = main.params[ 'DB' ][ 'intentFilePath' ]
-
- # Switch assignment NOTE: hardcoded
- if clusterCount == 1:
- for i in range( 1, numSwitch + 1 ):
- main.Mininet1.assignSwController(
- sw="s" + str( i ),
- ip=ONOS1Ip,
- port=defaultSwPort )
- if clusterCount == 3:
- for i in range( 1, 3 ):
- main.Mininet1.assignSwController(
- sw="s" + str( i ),
- ip=ONOS1Ip,
- port=defaultSwPort )
- for i in range( 3, 6 ):
- main.Mininet1.assignSwController(
- sw="s" + str( i ),
- ip=ONOS2Ip,
- port=defaultSwPort )
- for i in range( 6, 9 ):
- main.Mininet1.assignSwController(
- sw="s" + str( i ),
- ip=ONOS3Ip,
- port=defaultSwPort )
- if clusterCount == 5:
- main.Mininet1.assignSwController(
- sw="s1",
- ip=ONOS1Ip,
- port=defaultSwPort )
- main.Mininet1.assignSwController(
- sw="s2",
- ip=ONOS1Ip,
- port=defaultSwPort )
- for i in range( 3, 6 ):
- main.Mininet1.assignSwController(
- sw="s" + str( i ),
- ip=ONOS3Ip,
- port=defaultSwPort )
- main.Mininet1.assignSwController(
- sw="s6",
- ip=ONOS4Ip,
- port=defaultSwPort )
- main.Mininet1.assignSwController(
- sw="s7",
- ip=ONOS5Ip,
- port=defaultSwPort )
- main.Mininet1.assignSwController(
- sw="s8",
- ip=ONOS5Ip,
- port=defaultSwPort )
-
- if clusterCount == 7:
- for i in range( 1, 9 ):
- if i < 8:
- main.Mininet1.assignSwController(
- sw="s" + str( i ),
- ip=ONOSIpList[ i - 1 ],
- port=defaultSwPort )
- elif i >= 8:
- main.Mininet1.assignSwController(
- sw="s" + str( i ),
- ip=ONOSIpList[ 6 ],
- port=defaultSwPort )
-
- time.sleep( 20 )
-
- main.log.report( "Batch intent installation test of " +
- str( batchIntentSize ) + " intent(s)" )
-
- batchResultList = []
-
- main.log.info( "Getting list of available devices" )
- deviceIdList = []
- jsonStr = main.ONOS1cli.devices()
- jsonObj = json.loads( jsonStr )
- for device in jsonObj:
- deviceIdList.append( device[ 'id' ] )
-
- # List of install / witdhraw latencies for each batch
- batchInstallLat = []
- batchWithdrawLat = []
-
- sleepTime = 10
-
- baseDir = "/tmp/"
-
- # Batch size increase loop
- for batch in range( 0, 5 ):
- # Max intent install measurement of all nodes
- # Resets after each batch calculation
- maxInstallLat = []
- maxWithdrawLat = []
- # Max single intent install measurement of all nodes
- # For example, if batch size is 1000, result latency
- # will be divided by 1000
- maxSingleInstallLat = []
- maxSingleWithdrawLat = []
- # Statistical gathering loop over number of iterations
- for i in range( 0, int( numIter ) ):
- main.log.info( "Pushing " +
- str( int( batchIntentSize ) * int( nThread ) ) +
- " intents. Iteration " + str( i ) )
-
- for node in range( 1, clusterCount + 1 ):
- saveDir = baseDir + "batch_intent_" + str( node ) + ".txt"
- main.ONOSbench.pushTestIntentsShell(
- deviceIdList[ 0 ] + "/2",
- deviceIdList[ 7 ] + "/2",
- batchIntentSize,
- saveDir, ONOSIpList[ node - 1 ],
- numMult=nThread )
-
- # Wait sufficient time for intents to start
- # installing
- time.sleep( sleepTime )
-
- intent = ""
- counter = 300
- while len( intent ) > 0 and counter > 0:
- main.ONOS1cli.handle.sendline(
- "intents | wc -l" )
- main.ONOS1cli.handle.expect(
- "intents | wc -l" )
- main.ONOS1cli.handle.expect(
- "onos>" )
- intentTemp = main.ONOS1cli.handle.before()
- intent = main.ONOS1cli.intents()
- intent = json.loads( intent )
- counter = counter - 1
- time.sleep( 1 )
-
- time.sleep( 5 )
-
- for node in range( 1, clusterCount + 1 ):
- saveDir = baseDir + "batch_intent_" + str( node ) + ".txt"
- with open( saveDir ) as fOnos:
- lineCount = 0
- for line in fOnos:
- line_temp = ""
- main.log.info( "Line read: " + str( line ) )
- line_temp = line[ 1: ]
- line_temp = line_temp.split( ": " )
- #Prevent split method if line doesn't have
- #space
- if " " in str(line_temp):
- result = line_temp[ 1 ].split( " " )[ 0 ]
- else:
- main.log.warn( "Empty line read" )
- result = 0
- # TODO: add parameters before appending latency
- if lineCount == 0:
- if "Failure" in str(line):
- main.log.warn("Intent installation failed")
- result = 'NA'
- else:
- main.log.info("Install result: "+result)
- batchInstallLat.append( int( result ) )
- installResult = result
- elif lineCount == 1:
- if "Failure" in str(line):
- main.log.warn("Intent withdraw failed")
- result = 'NA'
- else:
- main.log.info("Withdraw result: "+result)
- batchWithdrawLat.append( int( result ) )
- withdrawResult = result
- else:
- main.log.warn("Invalid results: excess lines")
- installResult = 'NA'
- withdrawResult = 'NA'
- lineCount += 1
- main.log.info( "Batch install latency for ONOS" +
- str( node ) + " with " +
- str( batchIntentSize ) + "intents: " +
- str( installResult ) + " ms" )
- main.log.info( "Batch withdraw latency for ONOS" +
- str( node ) + " with " +
- str( batchIntentSize ) + "intents: " +
- str( withdrawResult ) + " ms" )
-
- main.log.info( "Single intent install latency ONOS" +
- str( node ) + " with " +
- str( batchIntentSize ) + "intents: " +
- str( float(installResult) /\
- int(batchIntentSize) ) + " ms" )
- main.log.info( "Single intent withdraw latency ONOS" +
- str( node ) + " with " +
- str( batchIntentSize ) + "intents: " +
- str( float(withdrawResult) /\
- int(batchIntentSize) ) + " ms" )
-
- #NOTE: END node loop
-
- if len( batchInstallLat ) > 0 and int( i ) > numIgnore:
- maxInstallLat.append( max( batchInstallLat ) )
- maxSingleInstallLat.append(
- max( batchInstallLat ) / int( batchIntentSize )
- )
- elif len( batchInstallLat ) == 0:
- # If I failed to read anything from the file,
- # increase the wait time before checking intents
- sleepTime += 30
- if len( batchWithdrawLat ) > 0 and int( i ) > numIgnore:
- maxWithdrawLat.append( max( batchWithdrawLat ) )
- maxSingleWithdrawLat.append(
- max( batchWithdrawLat ) / int( batchIntentSize )
- )
- batchInstallLat = []
- batchWithdrawLat = []
-
- # Sleep in between iterations
- time.sleep( 5 )
-
- #NOTE: END iteration loop
-
- if maxInstallLat:
- avgInstallLat = str( round( numpy.average(maxInstallLat)
- , 2 ))
- stdInstallLat = str( round(
- numpy.std(maxInstallLat), 2))
- avgSingleInstallLat = str( round(
- numpy.average(maxSingleInstallLat)
- , 3 ))
- stdSingleInstallLat = str( round(
- numpy.std(maxSingleInstallLat),
- 3 ))
- else:
- avgInstallLat = "NA"
- stdInstallLat = "NA"
- main.log.report( "Batch installation failed" )
- assertion = main.FALSE
-
- if maxWithdrawLat:
- avgWithdrawLat = str( round( numpy.average(maxWithdrawLat)
- , 2 ))
- stdWithdrawLat = str( round(
- numpy.std(maxWithdrawLat), 2))
- avgSingleWithdrawLat = str( round(
- numpy.average(maxSingleWithdrawLat)
- , 3 ))
- stdSingleWithdrawLat = str( round(
- numpy.std(maxSingleWithdrawLat),
- 3 ))
- else:
- avgWithdrawLat = "NA"
- stdWithdrawLat = "NA"
- main.log.report( "Batch withdraw failed" )
- assertion = main.FALSE
-
- main.log.report( "Avg of batch installation latency " +
- "of size " + str( batchIntentSize ) + ": " +
- str( avgInstallLat ) + " ms" )
- main.log.report( "Std Deviation of batch installation latency " +
- ": " +
- str( stdInstallLat ) + " ms" )
- main.log.report( "Avg of single installation latency " +
- "of size " + str( batchIntentSize ) + ": " +
- str( avgSingleInstallLat ) + " ms" )
- main.log.report( "Std Deviation of single installation latency " +
- ": " +
- str( stdSingleInstallLat ) + " ms" )
-
- main.log.report( "Avg of batch withdraw latency " +
- "of size " + str( batchIntentSize ) + ": " +
- str( avgWithdrawLat ) + " ms" )
- main.log.report( "Std Deviation of batch withdraw latency " +
- ": " +
- str( stdWithdrawLat ) + " ms" )
- main.log.report( "Avg of single withdraw latency " +
- "of size " + str( batchIntentSize ) + ": " +
- str( avgSingleWithdrawLat ) + " ms" )
- main.log.report( "Std Deviation of single withdraw latency " +
- ": " +
- str( stdSingleWithdrawLat ) + " ms" )
-
- dbCmd = (
- "INSERT INTO intents_latency_tests VALUES("
- "'"+timeToPost+"','intents_latency_results',"
- ""+runNum+","+str(clusterCount)+","+str(batchIntentSize)+","
- ""+str(avgInstallLat)+","+str(stdInstallLat)+","
- ""+str(avgWithdrawLat)+","+str(stdWithdrawLat)+");"
- )
-
- # Write result to file (which is posted to DB by jenkins)
- fResult = open(intentFilePath, 'a')
- if dbCmd:
- fResult.write(dbCmd+"\n")
- fResult.close()
-
- if batch == 0:
- batchIntentSize = 10
- elif batch == 1:
- batchIntentSize = 100
- elif batch == 2:
- batchIntentSize = 1000
- elif batch == 3:
- batchIntentSize = 2000
- if batch < 4:
- main.log.report( "Increasing batch intent size to " +
- str(batchIntentSize) )
-
- #NOTE: END batch loop
-
- #main.log.info( "Removing all intents for next test case" )
- #jsonTemp = main.ONOS1cli.intents( jsonFormat=True )
- #jsonObjIntents = json.loads( jsonTemp )
- # if jsonObjIntents:
- # for intents in jsonObjIntents:
- # tempId = intents[ 'id' ]
- # main.ONOS1cli.removeIntent( tempId )
- # main.ONOS1cli.removeIntent( tempId )
-
- utilities.assert_equals(
- expect=main.TRUE,
- actual=assertion,
- onpass="Batch intent install/withdraw test successful",
- onfail="Batch intent install/withdraw test failed" )
-
- def CASE5( self, main ):
- """
- Increase number of nodes and initiate CLI
- """
- import time
- import json
-
- ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
- ONOS2Ip = main.params[ 'CTRL' ][ 'ip2' ]
- ONOS3Ip = main.params[ 'CTRL' ][ 'ip3' ]
- ONOS4Ip = main.params[ 'CTRL' ][ 'ip4' ]
- ONOS5Ip = main.params[ 'CTRL' ][ 'ip5' ]
- ONOS6Ip = main.params[ 'CTRL' ][ 'ip6' ]
- ONOS7Ip = main.params[ 'CTRL' ][ 'ip7' ]
-
- global clusterCount
- clusterCount += 2
- main.log.report( "Increasing cluster size to " +
- str( clusterCount ) )
-
- installResult = main.FALSE
-
- if clusterCount == 3:
- installResult1 = \
- main.ONOSbench.onosInstall( node=ONOS2Ip )
- installResult2 = \
- main.ONOSbench.onosInstall( node=ONOS3Ip )
- time.sleep( 5 )
-
- main.log.info( "Starting ONOS CLI" )
- main.ONOS2cli.startOnosCli( ONOS2Ip )
- main.ONOS3cli.startOnosCli( ONOS3Ip )
-
- installResult = installResult1 and installResult2
-
- if clusterCount == 5:
- main.log.info( "Installing ONOS on node 4 and 5" )
- installResult1 = \
- main.ONOSbench.onosInstall( node=ONOS4Ip )
- installResult2 = \
- main.ONOSbench.onosInstall( node=ONOS5Ip )
-
- main.log.info( "Starting ONOS CLI" )
- main.ONOS4cli.startOnosCli( ONOS4Ip )
- main.ONOS5cli.startOnosCli( ONOS5Ip )
-
- installResult = installResult1 and installResult2
-
- if clusterCount == 7:
- main.log.info( "Installing ONOS on node 6 and 7" )
- installResult1 = \
- main.ONOSbench.onosInstall( node=ONOS6Ip )
- installResult2 = \
- main.ONOSbench.onosInstall( node=ONOS7Ip )
-
- main.log.info( "Starting ONOS CLI" )
- main.ONOS6cli.startOnosCli( ONOS6Ip )
- main.ONOS7cli.startOnosCli( ONOS7Ip )
-
- installResult = installResult1 and installResult2
-
- time.sleep( 5 )
-
- if installResult == main.TRUE:
- assertion = main.TRUE
- else:
- assertion = main.FALSE
-
- utilities.assert_equals( expect=main.TRUE, actual=assertion,
- onpass="Scale out to " + str( clusterCount ) +
- " nodes successful",
- onfail="Scale out to " + str( clusterCount ) +
- " nodes failed" )
-
- def CASE7( self, main ):
- # TODO: Fix for scale-out scenario
- """
- Batch intent reroute latency
- """
- import time
- import json
- import requests
- import os
- import numpy
- global clusterCount
-
- ONOSIpList = []
- for i in range( 1, 8 ):
- ONOSIpList.append( main.params[ 'CTRL' ][ 'ip' + str( i ) ] )
-
- ONOSUser = main.params[ 'CTRL' ][ 'user' ]
- defaultSwPort = main.params[ 'CTRL' ][ 'port1' ]
-
- batchIntentSize = main.params[ 'TEST' ][ 'batchIntentSize' ]
- batchThreshMin = int( main.params[ 'TEST' ][ 'batchThresholdMin' ] )
- batchThreshMax = int( main.params[ 'TEST' ][ 'batchThresholdMax' ] )
- intfs = main.params[ 'TEST' ][ 'intfs' ]
- installTime = main.params[ 'JSON' ][ 'installedTime' ]
-
- # number of iterations of case
- numIter = main.params[ 'TEST' ][ 'numIter' ]
- numIgnore = int( main.params[ 'TEST' ][ 'numIgnore' ] )
- numSwitch = int( main.params[ 'TEST' ][ 'numSwitch' ] )
- nThread = main.params[ 'TEST' ][ 'numMult' ]
-
- main.log.report( "Batch intent installation test of " +
- batchIntentSize + " intents" )
-
- batchResultList = []
-
- time.sleep( 10 )
-
- main.log.info( "Getting list of available devices" )
- deviceIdList = []
- jsonStr = main.ONOS1cli.devices()
- jsonObj = json.loads( jsonStr )
- for device in jsonObj:
- deviceIdList.append( device[ 'id' ] )
-
- batchInstallLat = []
- batchWithdrawLat = []
- sleepTime = 10
-
- baseDir = "/tmp/"
- maxInstallLat = []
-
- for i in range( 0, int( numIter ) ):
- main.log.info( "Pushing " +
- str( int( batchIntentSize ) * int( nThread ) ) +
- " intents. Iteration " + str( i ) )
-
- main.ONOSbench.pushTestIntentsShell(
- deviceIdList[ 0 ] + "/2",
- deviceIdList[ 7 ] + "/2",
- batchIntentSize, "/tmp/batch_install.txt",
- ONOSIpList[ 0 ], numMult="1", appId="1",
- report=False, options="--install" )
- # main.ONOSbench.pushTestIntentsShell(
- # "of:0000000000001002/1",
- # "of:0000000000002002/1",
- # 133, "/tmp/temp2.txt", "10.128.174.2",
- # numMult="6", appId="2",report=False )
-
- # TODO: Check for installation success then proceed
- time.sleep( 30 )
-
- # NOTE: this interface is specific to
- # topo-intentFlower.py topology
- # reroute case.
- main.log.info( "Disabling interface " + intfs )
- main.Mininet1.handle.sendline(
- "sh ifconfig " + intfs + " down" )
- t0System = time.time() * 1000
-
- # TODO: Wait sufficient time for intents to install
- time.sleep( 10 )
-
- # TODO: get intent installation time
-
- # Obtain metrics from ONOS 1, 2, 3
- intentsJsonStr1 = main.ONOS1cli.intentsEventsMetrics()
- intentsJsonObj1 = json.loads( intentsJsonStr1 )
- # Parse values from the json object
- intentInstall1 = \
- intentsJsonObj1[ installTime ][ 'value' ]
- intentRerouteLat1 = \
- int( intentInstall1 ) - int( t0System )
-
- if clusterCount == 3:
- intentsJsonStr2 =\
- main.ONOS2cli.intentsEventsMetrics()
- intentsJsonStr3 =\
- main.ONOS3cli.intentsEventsMetrics()
- intentsJsonObj2 = json.loads( intentsJsonStr2 )
- intentsJsonObj3 = json.loads( intentsJsonStr3 )
- intentInstall2 = \
- intentsJsonObj2[ installTime ][ 'value' ]
- intentInstall3 = \
- intentsJsonObj3[ installTime ][ 'value' ]
- intentRerouteLat2 = \
- int( intentInstall2 ) - int( t0System )
- intentRerouteLat3 = \
- int( intentInstall3 ) - int( t0System )
- else:
- intentRerouteLat2 = 0
- intentRerouteLat3 = 0
-
- if clusterCount == 5:
- intentsJsonStr4 =\
- main.ONOS4cli.intentsEventsMetrics()
- intentsJsonStr5 =\
- main.ONOS5cli.intentsEventsMetrics()
- intentsJsonObj4 = json.loads( intentsJsonStr4 )
- intentsJsonObj5 = json.loads( intentsJsonStr5 )
- intentInstall4 = \
- intentsJsonObj4[ installTime ][ 'value' ]
- intentInstall5 = \
- intentsJsonObj5[ installTime ][ 'value' ]
- intentRerouteLat4 = \
- int( intentInstall4 ) - int( t0System )
- intentRerouteLat5 = \
- int( intentInstall5 ) - int( t0System )
- else:
- intentRerouteLat4 = 0
- intentRerouteLat5 = 0
-
- if clusterCount == 7:
- intentsJsonStr6 =\
- main.ONOS6cli.intentsEventsMetrics()
- intentsJsonStr7 =\
- main.ONOS7cli.intentsEventsMetrics()
- intentsJsonObj6 = json.loads( intentsJsonStr6 )
- intentsJsonObj7 = json.loads( intentsJsonStr7 )
- intentInstall6 = \
- intentsJsonObj6[ installTime ][ 'value' ]
- intentInstall7 = \
- intentsJsonObj7[ installTime ][ 'value' ]
- intentRerouteLat6 = \
- int( intentInstall6 ) - int( t0System )
- intentRerouteLat7 = \
- int( intentInstall7 ) - int( t0System )
- else:
- intentRerouteLat6 = 0
- intentRerouteLat7 = 0
-
- intentRerouteLatAvg = \
- ( intentRerouteLat1 +
- intentRerouteLat2 +
- intentRerouteLat3 +
- intentRerouteLat4 +
- intentRerouteLat5 +
- intentRerouteLat6 +
- intentRerouteLat7 ) / clusterCount
-
- main.log.info( "Intent reroute latency avg for iteration " +
- str( i ) + ": " + str( intentRerouteLatAvg ) )
- # TODO: Remove intents for next iteration
-
- time.sleep( 5 )
-
- intentsStr = main.ONOS1cli.intents()
- intentsJson = json.loads( intentsStr )
- for intents in intentsJson:
- intentId = intents[ 'id' ]
- # TODO: make sure this removes all intents
- # print intentId
- if intentId:
- main.ONOS1cli.removeIntent( intentId )
-
- main.Mininet1.handle.sendline(
- "sh ifconfig " + intfs + " up" )
-
- main.log.info( "Intents removed and port back up" )
-
- def CASE9( self, main ):
- count = 0
- swNum1 = 1
- swNum2 = 1
- appid = 0
- portNum1 = 1
- portNum2 = 1
-
- time.sleep( 30 )
-
- while True:
- # main.ONOS1cli.pushTestIntents(
- #"of:0000000000001001/1",
- #"of:0000000000002001/1",
- # 100, numMult="10", appId="1" )
- # main.ONOS2cli.pushTestIntents(
- # "of:0000000000001002/1",
- # "of:0000000000002002/1",
- # 100, numMult="10", appId="2" )
- # main.ONOS2cli.pushTestIntents(
- # "of:0000000000001003/1",
- # "of:0000000000002003/1",
- # 100, numMult="10", appId="3" )
- count += 1
-
- if count >= 100:
- main.ONOSbench.handle.sendline(
- "onos 10.128.174.1 intents-events-metrics >>" +
- " /tmp/metrics_intents_temp.txt &" )
- count = 0
-
- arg1 = "of:000000000000100" + str( swNum1 ) + "/" + str( portNum1 )
- arg2 = "of:000000000000200" + str( swNum2 ) + "/" + str( portNum2 )
-
- swNum1 += 1
-
- if swNum1 > 7:
- swNum1 = 1
- swNum2 += 1
- if swNum2 > 7:
- appid += 1
-
- if swNum2 > 7:
- swNum2 = 1
-
- main.ONOSbench.pushTestIntentsShell(
- arg1,
- arg2,
- 100, "/tmp/temp.txt", "10.128.174.1",
- numMult="10", appId=appid, report=False )
- # main.ONOSbench.pushTestIntentsShell(
- # "of:0000000000001002/1",
- # "of:0000000000002002/1",
- # 133, "/tmp/temp2.txt", "10.128.174.2",
- # numMult="6", appId="2",report=False )
- # main.ONOSbench.pushTestIntentsShell(
- # "of:0000000000001003/1",
- # "of:0000000000002003/1",
- # 133, "/tmp/temp3.txt", "10.128.174.3",
- # numMult="6", appId="3",report=False )
-
- time.sleep( 0.2 )
diff --git a/TestON/tests/IntentPerfNext/IntentPerfNext.topo b/TestON/tests/IntentPerfNext/IntentPerfNext.topo
deleted file mode 100644
index 5575237..0000000
--- a/TestON/tests/IntentPerfNext/IntentPerfNext.topo
+++ /dev/null
@@ -1,109 +0,0 @@
-<TOPOLOGY>
- <COMPONENT>
-
- <ONOSbench>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>1</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOSbench>
-
- <ONOS1cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>2</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS1cli>
-
- <ONOS2cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>3</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS2cli>
-
- <ONOS3cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>4</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS3cli>
-
- <ONOS4cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>5</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS4cli>
-
- <ONOS5cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>6</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS5cli>
-
- <ONOS6cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>7</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS6cli>
-
- <ONOS7cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>8</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS7cli>
-
- <ONOS1>
- <host>10.128.174.1</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>9</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS1>
-
- <Mininet1>
- <host>10.128.10.90</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>MininetCliDriver</type>
- <connect_order>10</connect_order>
- <COMPONENTS>
- <arg1> --custom topo-intent-8sw.py </arg1>
- <arg2> --arp --mac --topo mytopo </arg2>
- <arg3> </arg3>
- <controller> remote </controller>
- </COMPONENTS>
- </Mininet1>
-
- <Mininet2>
- <host>10.128.10.90</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>RemoteMininetDriver</type>
- <connect_order>11</connect_order>
- <COMPONENTS> </COMPONENTS>
- </Mininet2>
-
- </COMPONENT>
-</TOPOLOGY>
diff --git a/TestON/tests/IntentPerfNext/__init__.py b/TestON/tests/IntentPerfNext/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/TestON/tests/IntentPerfNext/__init__.py
+++ /dev/null
diff --git a/TestON/tests/LincOETest/LincOETest.params b/TestON/tests/LincOETest/LincOETest.params
deleted file mode 100755
index 46e4f19..0000000
--- a/TestON/tests/LincOETest/LincOETest.params
+++ /dev/null
@@ -1,20 +0,0 @@
-<PARAMS>
-
- <testcases>1,2</testcases>
-
- #Environment variables
- <ENV>
- <cellName>linc_oe_test</cellName>
- </ENV>
-
- <CTRL>
- <ip1>10.128.174.1</ip1>
- <port1>6633</port1>
- </CTRL>
-
- <GIT>
- <autoPull>off</autoPull>
- <checkout>master</checkout>
- </GIT>
-
-</PARAMS>
diff --git a/TestON/tests/LincOETest/LincOETest.py b/TestON/tests/LincOETest/LincOETest.py
deleted file mode 100644
index b693138..0000000
--- a/TestON/tests/LincOETest/LincOETest.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# LincOETest
-#
-# Packet-Optical Intent Testing
-#
-# andrew@onlab.us
-
-
-import time
-import sys
-import os
-import re
-
-
-class LincOETest:
-
- def __init__( self ):
- self.default = ''
-
- def CASE1( self, main ):
- """
- Startup sequence:
- git pull
- mvn clean install
- onos-package
- cell <name>
- onos-verify-cell
- onos-install -f
- onos-wait-for-start
- """
- import time
-
- cellName = main.params[ 'ENV' ][ 'cellName' ]
-
- ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
- ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
-
- gitPullTrigger = main.params[ 'GIT' ][ 'autoPull' ]
- gitCheckoutBranch = main.params[ 'GIT' ][ 'checkout' ]
-
- main.case( "Setting up test environment" )
-
- main.step( "Creating cell file" )
- # params: ( bench ip, cell name, mininet ip, *onos ips )
- cellFileResult = main.ONOSbench.createCellFile(
- "10.128.20.10", cellName, "10.128.10.90",
- "onos-core-trivial,onos-app-fwd",
- "10.128.174.1" )
-
- main.step( "Applying cell variable to environment" )
- # cellResult = main.ONOSbench.setCell( cellName )
- cellResult = main.ONOSbench.setCell( "temp_cell_2" )
- verifyResult = main.ONOSbench.verifyCell()
-
- if gitPullTrigger == 'on':
- main.step( "Git checkout and pull master" )
- main.ONOSbench.gitCheckout( gitCheckoutBranch )
- gitPullResult = main.ONOSbench.gitPull()
- else:
- main.log.info( "Git checkout and pull skipped by config" )
- gitPullResult = main.TRUE
-
- main.step( "Using mvn clean & install" )
- # cleanInstallResult = main.ONOSbench.cleanInstall()
- cleanInstallResult = main.TRUE
-
- main.step( "Creating ONOS package" )
- packageResult = main.ONOSbench.onosPackage()
-
- main.step( "Installing ONOS package" )
- onosInstallResult = main.ONOSbench.onosInstall()
- onos1Isup = main.ONOSbench.isup()
-
- main.step( "Starting ONOS service" )
- startResult = main.ONOSbench.onosStart( ONOS1Ip )
-
- main.step( "Setting cell for ONOScli" )
- main.ONOScli.setCell( cellName )
-
- main.step( "Starting ONOScli" )
- main.ONOScli.startOnosCli( ONOS1Ip )
-
- case1Result = ( cleanInstallResult and packageResult and
- cellResult and verifyResult and onosInstallResult and
- onos1Isup and startResult )
- utilities.assertEquals( expect=main.TRUE, actual=case1Result,
- onpass="Test startup successful",
- onfail="Test startup NOT successful" )
-
- time.sleep( 10 )
-
- def CASE2( self, main ):
- """
- Configure topology
- """
- import time
-
- ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
- defaultSwPort = main.params[ 'CTRL' ][ 'port1' ]
-
- # Assign packet level switches to controller
- main.Mininet1.assignSwController(
- sw="1",
- ip1=ONOS1Ip,
- port1=defaultSwPort )
- main.Mininet1.assignSwController(
- sw="2",
- ip1=ONOS1Ip,
- port1=defaultSwPort )
-
- # Check devices in controller
- # This should include Linc-OE devices as well
- devices = main.ONOScli.devices()
- main.log.info( devices )
-
- def CASE3( self, main ):
- """
- Install multi-layer intents
- """
diff --git a/TestON/tests/LincOETest/LincOETest.topo b/TestON/tests/LincOETest/LincOETest.topo
deleted file mode 100755
index 5d572ca..0000000
--- a/TestON/tests/LincOETest/LincOETest.topo
+++ /dev/null
@@ -1,55 +0,0 @@
-<TOPOLOGY>
- <COMPONENT>
-
- <ONOSbench>
- <host>10.128.20.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>1</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOSbench>
-
- <ONOScli>
- <host>10.128.20.10<</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>2</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOScli>
-
- <ONOS1>
- <host>10.128.174.1</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>3</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS1>
-
- <LincOE>
- <host>10.128.10.90</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>LincOEDriver</type>
- <connect_order>4</connect_order>
- <COMPONENTS> </COMPONENTS>
- </LincOE>
-
- <Mininet1>
- <host>10.128.10.90</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>MininetCliDriver</type>
- <connect_order>5</connect_order>
- <COMPONENTS>
- <arg1> --custom optical.py </arg1>
- <arg2> --arp --mac</arg2>
- <arg3> --topo optical</arg3>
- <controller> remote </controller>
- </COMPONENTS>
- </Mininet1>
-
- </COMPONENT>
-</TOPOLOGY>
diff --git a/TestON/tests/LincOETest/__init__.py b/TestON/tests/LincOETest/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/TestON/tests/LincOETest/__init__.py
+++ /dev/null
diff --git a/TestON/tests/LinkEventTP/LinkEventTP.params b/TestON/tests/LinkEventTP/LinkEventTP.params
deleted file mode 100644
index 7a47a51..0000000
--- a/TestON/tests/LinkEventTP/LinkEventTP.params
+++ /dev/null
@@ -1,62 +0,0 @@
-<PARAMS>
- <testcases>1,2,3</testcases>
-
- <ENV>
- <cellName>network_tp_test</cellName>
- <cellFeatures>"webconsole,onos-core,onos-api,onos-cli,onos-null,onos-rest,onos-app-metrics,onos-app-metrics-intent,onos-app-metrics-topology"</cellFeatures>
- <onBaremetal>true</onBaremetal> "true"
- </ENV>
-
- <SCALE>6</SCALE>
-
- <availableNodes>7</availableNodes>
-
- <GIT>
- <autopull>off</autopull>
- <checkout>master</checkout>
- </GIT>
-
- <CTRL>
- <USER>admin</USER>
- <ip1>10.254.1.201</ip1>
- <port1>6633</port1>
- <ip2>10.254.1.202</ip2>
- <port2>6633</port2>
- <ip3>10.254.1.203</ip3>
- <port3>6633</port3>
- <ip4>10.254.1.204</ip4>
- <port4>6633</port4>
- <ip5>10.254.1.205</ip5>
- <port5>6633</port5>
- <ip6>10.254.1.206</ip6>
- <port6>6633</port6>
- <ip7>10.254.1.207</ip7>
- <port7>6633</port7>
- </CTRL>
-
- <BENCH>
- <user>admin</user>
- <ip1>localhost</ip1>
- </BENCH>
-
- <TEST> # duration = time the test loop runs
- # log_interval = how often the data is reported
- # wait = time between tests, used to let the averages run down
- <configFile>/onos/tools/package/etc/org.onosproject.net.topology.impl.DefaultTopologyProvider.cfg</configFile>
- <flickerRates>20000,20000,20000,20000,20000,20000,20000</flickerRates>
- <devicesPerNode>40,40,40,40,40,40,40</devicesPerNode>
- <flickerRate>1000</flickerRate>
- <linkgraphdif>.03</linkgraphdif> # 0-1 indicated link/graph rate dif tolerance
- <duration>1800</duration>
- <log_interval>15</log_interval>
- <wait>60</wait>
- <skipCleanInstall>yes</skipCleanInstall>
- <MN>localhost</MN>
- <logFile>link_event_tp_results_LOG</logFile>
- </TEST>
-
- <JSON>
- <intents_rate>intentInstalledRate</intents_rate>
- </JSON>
-
-</PARAMS>
diff --git a/TestON/tests/LinkEventTP/LinkEventTP.py b/TestON/tests/LinkEventTP/LinkEventTP.py
deleted file mode 100644
index a4bc0aa..0000000
--- a/TestON/tests/LinkEventTP/LinkEventTP.py
+++ /dev/null
@@ -1,336 +0,0 @@
-# ScaleOutTemplate --> LinkEventTP
-#
-# CASE1 starts number of nodes specified in param file
-#
-# cameron@onlab.us
-
-import sys
-import os
-
-
-class LinkEventTP:
-
- def __init__( self ):
- self.default = ''
-
- def CASE1( self, main ): #This is the initialization case
- import os.path #this case will clean up all nodes
- #but only node 1 isnodestarted in this case
-
- global clusterCount #number of nodes running
- global ONOSIp #list of ONOS IP addresses
-
- clusterCount = 1
- ONOSIp = [ 0 ]
-
- #Load values from params file
- checkoutBranch = main.params[ 'GIT' ][ 'checkout' ]
- gitPull = main.params[ 'GIT' ][ 'autopull' ]
- cellName = main.params[ 'ENV' ][ 'cellName' ]
- Features= main.params[ 'ENV' ][ 'cellFeatures' ]
- BENCHIp = main.params[ 'BENCH' ][ 'ip1' ]
- BENCHUser = main.params[ 'BENCH' ][ 'user' ]
- maxNodes = int(main.params[ 'availableNodes' ])
- Features = main.params[ 'ENV' ][ 'cellFeatures' ]
- skipMvn = main.params[ 'TEST' ][ 'skipCleanInstall' ]
- flickerRate = main.params[ 'TEST' ][ 'flickerRate']
- deviceDistribution = (main.params[ 'TEST' ][ 'devicesPerNode']).split(",")
- MNip = main.params[ 'TEST' ][ 'MN' ]
- logFileName = main.params[ 'TEST' ][ 'logFile' ]
- onBaremetal = main.params[ 'ENV' ][ 'onBaremetal' ]
-
- main.ONOSbench.handle.sendline("export TERM=vt100")
- main.ONOSbench.handle.expect(":~")
- homeDir = os.path.expanduser('~')
-
- #Populate ONOSIp with ips from params
- for i in range(1, maxNodes + 1):
- ipString = 'ip' + str(i)
- ONOSIp.append(main.params[ 'CTRL' ][ ipString ])
-
- #kill off all onos processes
- main.log.step("Safety check, killing all ONOS processes")
- main.log.step("before initiating enviornment setup")
- for node in range(1, maxNodes + 1):
- main.ONOSbench.onosDie(ONOSIp[node])
-
- #construct the cell file
- main.log.step("Creating cell file")
- cellIp = []
- for node in range (1, clusterCount + 1):
- cellIp.append(ONOSIp[node])
- main.ONOSbench.createCellFile(BENCHIp,cellName,MNip,str(Features), *cellIp)
-
- main.step( "Set Cell" )
- main.ONOSbench.setCell(cellName)
-
- #Uninstall everywhere
- main.log.step( "Cleaning Enviornment..." )
- for i in range(1, maxNodes + 1):
- main.log.info(" Uninstalling ONOS " + str(i) )
- main.ONOSbench.onosUninstall( ONOSIp[i] )
-
- myDistribution = []
- for node in range (1, clusterCount + 1):
- myDistribution.append(deviceDistribution[node-1])
-
- main.ONOSbench.createLinkGraphFile( BENCHIp,cellIp,myDistribution)
- main.ONOSbench.createNullDevProviderFile( BENCHIp, cellIp, myDistribution)
- main.ONOSbench.createNullLinkProviderFile(BENCHIp)
-
- #git step - skipable
- main.step( "Git checkout and pull " + checkoutBranch )
- if gitPull == 'on':
- checkoutResult = main.ONOSbench.gitCheckout( checkoutBranch )
- pullResult = main.ONOSbench.gitPull()
-
- else:
- checkoutResult = main.TRUE
- pullResult = main.TRUE
- main.log.info( "Skipped git checkout and pull" )
-
- #mvn clean install, for debugging set param 'skipCleanInstall' to yes to speed up test
- if skipMvn != "yes":
- mvnResult = main.ONOSbench.cleanInstall()
-
- ### configure event rate file ###
- main.log.step("Writing Default Topology Provider config file")
- localPath = main.params[ 'TEST' ][ 'configFile' ]
- filePath = homeDir + localPath
- main.log.info(filePath)
- configFile = open(filePath, 'w+')
- main.log.info("File Opened")
- configFile.write("maxEvents = 1\n")
- configFile.write("maxIdleMs = 0\n")
- configFile.write("maxBatchMs = 0\n")
- main.log.info("File written and closed")
- configFile.close()
-
- logFile = open(logFileName, 'w+')
- main.log.info("Created log File")
- logFile.close()
-
- if onBaremetal == "true":
- filename = "/onos/tools/package/bin/onos-service"
- serviceConfig = open(homeDir + filename, 'w+')
- serviceConfig.write("#!/bin/bash\n ")
- serviceConfig.write("#------------------------------------- \n ")
- serviceConfig.write("# Starts ONOS Apache Karaf container\n ")
- serviceConfig.write("#------------------------------------- \n ")
- serviceConfig.write("#export JAVA_HOME=${JAVA_HOME:-/usr/lib/jvm/java-7-openjdk-amd64/}\n ")
- serviceConfig.write("""export JAVA_OPTS="${JAVA_OPTS:--Xms256m -Xmx8G}" \n """)
- serviceConfig.write("")
- serviceConfig.write("ONOS_HOME=/opt/onos \n ")
- serviceConfig.write("")
- serviceConfig.write("[ -d $ONOS_HOME ] && cd $ONOS_HOME || ONOS_HOME=$(dirname $0)/..\n")
- serviceConfig.write("""${ONOS_HOME}/apache-karaf-$KARAF_VERSION/bin/karaf "$@" \n """)
- serviceConfig.close()
-
- main.step( "Creating ONOS package" )
- packageResult = main.ONOSbench.onosPackage()
-
- main.step( "Installing ONOS package" )
- install1Result = main.ONOSbench.onosInstall( node=ONOSIp[1] )
-
- main.step( "Verify cells" )
- verifyCellResult = main.ONOSbench.verifyCell()
-
- main.step( "Enviornment setup and verification complete." )
- main.ONOS1cli.startOnosCli( ONOSIp[1] )
- main.step( "ONOS 1 is up and running." )
- main.ONOSbench.handle.expect(":~") #there is a dangling sendline somewhere...
-
- def CASE2( self, main ):
- # This case increases the cluster size by whatever scale is
- # Note: 'scale' is the size of the step
- # if scaling is not a part of your test, simply run this case
- # once after CASE1 to set up your enviornment for your desired
- # cluster size. If scaling is a part of you test call this case each time
- # you want to increase cluster size
-
- ''
- 'Increase number of nodes and initiate CLI'
- ''
- import time
- global clusterCount
-
- cellName = main.params[ 'ENV' ][ 'cellName' ]
- Features= main.params[ 'ENV' ][ 'cellFeatures' ]
- BENCHIp = main.params[ 'BENCH' ][ 'ip1' ]
- MNip = main.params[ 'TEST' ][ 'MN' ]
- deviceDistribution = (main.params[ 'TEST' ][ 'devicesPerNode']).split(",")
-
- scale = int( main.params[ 'SCALE' ] )
- clusterCount += scale
-
- main.log.step( "Cleaning Enviornment..." )
- for i in range(1, maxNodes + 1):
- main.ONOSbench.onosDie(ONOSIp[i])
- main.log.info(" Uninstalling ONOS " + str(i) )
- main.ONOSbench.onosUninstall( ONOSIp[i] )
-
- myDistribution = []
- for node in range (1, clusterCount + 1):
- myDistribution.append(deviceDistribution[node-1])
-
- main.log.step("Creating cell file")
- cellIp = []
- for node in range (1, clusterCount + 1):
- cellIp.append(ONOSIp[node])
- main.ONOSbench.createCellFile(BENCHIp,cellName,MNip,str(Features), *cellIp)
-
- main.ONOSbench.createLinkGraphFile( BENCHIp,cellIp,myDistribution)
- main.ONOSbench.createNullDevProviderFile( BENCHIp, cellIp, myDistribution)
- main.ONOSbench.createNullLinkProviderFile(BENCHIp)
-
- main.step( "Set Cell" )
- main.ONOSbench.setCell(cellName)
-
- main.step( "Packaging" )
- main.ONOSbench.onosPackage()
-
- main.log.report( "Increasing cluster size to " + str( clusterCount ) )
- for node in range(1, clusterCount + 1):
- time.sleep(10)
- main.log.info("Starting ONOS " + str(node) + " at IP: " + ONOSIp[node])
- main.ONOSbench.onosInstall( node=ONOSIp[node] )
- exec "a = main.ONOS%scli.startOnosCli" %str(node)
- a(ONOSIp[node])
-
- for node in range(1, clusterCount + 1):
- for i in range( 2 ):
- isup = main.ONOSbench.isup( ONOSIp[node] )
- if isup:
- main.log.info("ONOS " + str(node) + " is up\n")
- break
- if not isup:
- main.log.report( "ONOS " + str(node) + " didn't start!" )
-
- def CASE3( self, main ):
- import time
- import json
- import string
- import csv
- import os.path
- import requests
- import numpy
-
- sustainability = float(main.params[ 'TEST' ][ 'linkgraphdif' ])
- flickerRates = (main.params[ 'TEST' ][ 'flickerRates']).split(",")
- homeDir = os.path.expanduser('~')
-
- linkResult = main.FALSE
- scale = int( main.params[ 'SCALE' ] )
-
- testDelay = main.params[ 'TEST' ][ 'wait' ]
-
- for node in range(1, clusterCount + 1):
- main.log.info("Writing flicker file to node " + str(node))
- main.ONOSbench.createNullLinkProviderFile( ONOSIp[node], eventRate=flickerRates[node-1], onNode=True )
-
- testDuration = main.params[ 'TEST' ][ 'duration' ]
- stop = time.time() + float( testDuration )
-
- msg = ( "Starting test loop for " + str(testDuration) + " seconds on a " + str(clusterCount) + " node cluster" )
- main.log.info( msg )
- logInterval = main.params[ 'TEST' ][ 'log_interval' ]
-
- linkResults = [0,0,0,0,0,0,0,0]
- graphResults = [0,0,0,0,0,0,0,0]
- JsonStr = [ 0,0,0,0,0,0,0,0 ]
- JsonObj = [ 0,0,0,0,0,0,0,0 ]
-
- while time.time() < stop:
- time.sleep( float( logInterval ) )
- for node in range(1, clusterCount+1):
- main.ONOSbench.handle.sendline("""onos $OC1 topology-events-metrics|grep "Topology Link Events"|cut -d ' ' -f7 """)
- main.ONOSbench.handle.expect(":~")
- raw = (main.ONOSbench.handle.before).splitlines()
- myresult = "--"
- for word in raw:
- if "m1" in word:
- myresult = word
- myresult = myresult.replace("m1=","")
- break
- if myresult == "--":
- main.log.error("Parse error or no data error")
- msg = ( "Node " + str(node) + " Link Event TP: " + str(myresult) )
- main.log.info( msg )
- linkResults[node] = round(float(myresult),2)
- myLinkRate = round(float(myresult),2)
-
- main.ONOSbench.handle.sendline("""onos $OC1 topology-events-metrics|grep "Topology Graph Events"|cut -d ' ' -f7 """)
- main.ONOSbench.handle.expect(":~")
- raw = (main.ONOSbench.handle.before).splitlines()
- myresult = "--"
- for word in raw:
- if "m1" in word:
- myresult = word
- myresult = myresult.replace("m1=","")
- break
- if myresult == "--":
- main.log.error("Parse error or no data error")
- msg = ( "Node " + str(node) + " Graph Event TP: " + str(myresult) )
- main.log.info( msg )
- graphResults[node] = round(float(myresult),2)
- myGraphRate = round(float(myresult),2)
-
- difLinkGraph = float(myLinkRate - myGraphRate)
- difLinkGraph = numpy.absolute(difLinkGraph)
- main.log.info("Node " + str(node) + " abs(Link event - Graph event) = " + str(difLinkGraph))
- tempx = numpy.divide(difLinkGraph,float(myLinkRate))
- if tempx > sustainability:
- main.log.error("Difference in link event rate and graph event rate above " + str(sustainability) + " tolerance")
- print("")
-
- print("")
- print("")
-
- main.log.report("Final Link Event TP Results on " + str(clusterCount) + " node cluster")
- main.log.report("_______________________________________________")
- for node in range(1, clusterCount+1):
- main.log.report("Node " + str(node) + ": " + str(linkResults[node]))
-
- print("")
- print("")
-
- main.log.report("Final Graph Event TP Results on " + str(clusterCount) + " node cluster")
- main.log.report("_______________________________________________")
- for node in range(1, clusterCount+1):
- main.log.report("Node " + str(node) + ": " + str(graphResults[node]))
-
- #################################################################################
- # Data Logging
-
- logFileName = main.params[ 'TEST' ][ 'logFile' ]
- logFile = open(logFileName, 'a')
- main.log.info("Log file opened")
- flickerRate = main.params[ 'TEST' ][ 'flickerRate']
-
- for node in range (1, clusterCount + 1):
- # replare -> logFile.write( str(clusterCount) + "," + flickerNodes + "," )
- logFile.write("'" + "baremetal" + str(node) + "'," )
- logFile.write( testDuration + "," )
- logFile.write( flickerRate + "," )
- logFile.write( str(linkResults[node]) + "," )
- logFile.write( str(graphResults[node]) + "\n" )
-
- logFile.close()
- main.log.info("Log file closed")
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/TestON/tests/LinkEventTP/LinkEventTP.pyc b/TestON/tests/LinkEventTP/LinkEventTP.pyc
deleted file mode 100644
index 644df6d..0000000
--- a/TestON/tests/LinkEventTP/LinkEventTP.pyc
+++ /dev/null
Binary files differ
diff --git a/TestON/tests/LinkEventTP/LinkEventTP.topo b/TestON/tests/LinkEventTP/LinkEventTP.topo
deleted file mode 100644
index 3c24161..0000000
--- a/TestON/tests/LinkEventTP/LinkEventTP.topo
+++ /dev/null
@@ -1,146 +0,0 @@
-<TOPOLOGY>
-
- <COMPONENT>
-
- <ONOSbench>
- <host>10.254.1.200</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>1</connect_order>
- <COMPONENTS>
- <home>~/onos</home>
- </COMPONENTS>
-
- </ONOSbench>
-
- <ONOS1cli>
- <host>10.254.1.200</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>2</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS1cli>
-
- <ONOS2cli>
- <host>10.254.1.200</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>3</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS2cli>
-
- <ONOS3cli>
- <host>10.254.1.200</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>4</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS3cli>
-
- <ONOS4cli>
- <host>10.254.1.200</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>5</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS4cli>
-
- <ONOS5cli>
- <host>10.254.1.200</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>6</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS5cli>
-
- <ONOS6cli>
- <host>10.254.1.200</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>7</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS6cli>
-
- <ONOS7cli>
- <host>10.254.1.200</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>8</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS7cli>
-
- <ONOS1>
- <host>10.254.1.201</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>9</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS1>
-
- <ONOS2>
- <host>10.254.1.202</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>10</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS2>
-
- <ONOS3>
- <host>10.254.1.203</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>11</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS3>
-
- <ONOS4>
- <host>10.254.1.204</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>12</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS4>
-
-
- <ONOS5>
- <host>10.254.1.205</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>13</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS5>
-
- <ONOS6>
- <host>10.254.1.206</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>14</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS6>
-
- <ONOS7>
- <host>10.254.1.207</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>15</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS7>
-
- </COMPONENT>
-
-</TOPOLOGY>
diff --git a/TestON/tests/LinkEventTP/__init__.py b/TestON/tests/LinkEventTP/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/TestON/tests/LinkEventTP/__init__.py
+++ /dev/null
diff --git a/TestON/tests/LinkEventTP/__init__.pyc b/TestON/tests/LinkEventTP/__init__.pyc
deleted file mode 100644
index 62abfab..0000000
--- a/TestON/tests/LinkEventTP/__init__.pyc
+++ /dev/null
Binary files differ
diff --git a/TestON/tests/OpticalFunc13/OpticalFunc13.params b/TestON/tests/OpticalFunc13/OpticalFunc13.params
deleted file mode 100755
index 173d48b..0000000
--- a/TestON/tests/OpticalFunc13/OpticalFunc13.params
+++ /dev/null
@@ -1,47 +0,0 @@
-<PARAMS>
-
- <testcases>1,21,22,23</testcases>
-
- #Environment variables
- <ENV>
- <cellName>driver_test</cellName>
- </ENV>
-
- <CTRL>
- <ip1>10.128.20.11</ip1>
- <port1>6633</port1>
- </CTRL>
-
- <PING>
- <source1>h8</source1>
- <source2>h9</source2>
- <source3>h10</source3>
- <source4>h11</source4>
- <source5>h12</source5>
- <source6>h13</source6>
- <source7>h14</source7>
- <source8>h15</source8>
- <source9>h16</source9>
- <source10>h17</source10>
- <target1>10.0.0.18</target1>
- <target2>10.0.0.19</target2>
- <target3>10.0.0.20</target3>
- <target4>10.0.0.21</target4>
- <target5>10.0.0.22</target5>
- <target6>10.0.0.23</target6>
- <target7>10.0.0.24</target7>
- <target8>10.0.0.25</target8>
- <target9>10.0.0.26</target9>
- <target10>10.0.0.27</target10>
- </PING>
-
- <timers>
- <LinkDiscovery>5</LinkDiscovery>
- <SwitchDiscovery>31</SwitchDiscovery>
- </timers>
-
- <OPTICAL>
- <jsonfile> /home/admin/ONOS/tools/test/topos/oe-nonlinear-4.json </jsonfile>
- </OPTICAL>
-
-</PARAMS>
diff --git a/TestON/tests/OpticalFunc13/OpticalFunc13.py b/TestON/tests/OpticalFunc13/OpticalFunc13.py
deleted file mode 100755
index 7ddd532..0000000
--- a/TestON/tests/OpticalFunc13/OpticalFunc13.py
+++ /dev/null
@@ -1,248 +0,0 @@
-
-#Testing the basic functionality of ONOS Next
-#For sanity and driver functionality excercises only.
-
-import time
-import sys
-import os
-import re
-import time
-import json
-
-time.sleep(1)
-class OpticalFunc13:
- def __init__(self):
- self.default = ''
-
- def CASE1(self, main):
- '''
- Startup sequence:
- git pull
- mvn clean install
- onos-package
- cell <name>
- onos-verify-cell
- onos-install -f
- onos-wait-for-start
- '''
-
- cell_name = main.params['ENV']['cellName']
- ONOS1_ip = main.params['CTRL']['ip1']
- ONOS1_port = main.params['CTRL']['port1']
-
- main.case("Setting up test environment")
-
- main.step("Git checkout and pull master and get version")
- main.ONOSbench.git_checkout("master")
- git_pull_result = main.ONOSbench.git_pull()
- print "git_pull_result = ", git_pull_result
- version_result = main.ONOSbench.get_version()
- main.log.report(main.ONOSbench.get_version())
- if git_pull_result == 1:
- main.step("Using mvn clean & install")
- clean_install_result = main.ONOSbench.clean_install()
- #clean_install_result = main.TRUE
-
- main.step("Applying cell variable to environment")
- cell_result1 = main.ONOSbench.set_cell(cell_name)
- verify_result = main.ONOSbench.verify_cell()
- cell_result2 = main.ONOS2.set_cell(cell_name)
- #verify_result = main.ONOS2.verify_cell()
- main.ONOS2.start_onos_cli(ONOS_ip=main.params['CTRL']['ip1'])
-
- cell_result = cell_result1 and cell_result2
-
- main.step("Creating ONOS package")
- package_result = main.ONOSbench.onos_package()
-
- #main.step("Creating a cell")
- #cell_create_result = main.ONOSbench.create_cell_file(**************)
-
- main.step("Installing ONOS package")
- onos_install_result = main.ONOSbench.onos_install()
- onos1_isup = main.ONOSbench.isup()
-
- main.step("Starting ONOS service")
- start_result = main.ONOSbench.onos_start(ONOS1_ip)
-
- case1_result = (package_result and\
- cell_result and verify_result and onos_install_result and\
- onos1_isup and start_result )
- utilities.assert_equals(expect=main.TRUE, actual=case1_result,
- onpass="Test startup successful",
- onfail="Test startup NOT successful")
-
- def CASE11(self, main):
- '''
- Cleanup sequence:
- onos-service <node_ip> stop
- onos-uninstall
-
- TODO: Define rest of cleanup
-
- '''
-
- ONOS1_ip = main.params['CTRL']['ip1']
-
- main.case("Cleaning up test environment")
-
- main.step("Testing ONOS kill function")
- kill_result = main.ONOSbench.onos_kill(ONOS1_ip)
-
- main.step("Stopping ONOS service")
- stop_result = main.ONOSbench.onos_stop(ONOS1_ip)
-
- main.step("Uninstalling ONOS service")
- uninstall_result = main.ONOSbench.onos_uninstall()
-
-
- def CASE21(self, main):
- import time
- '''
- On ONOS bench, run this command: ./~/ONOS/tools/test/bin/onos-topo-cfg
- which starts the rest and copies the links json file to the onos instance
- Note that in case of Packet Optical, the links are not learnt from the topology, instead the links are learnt
- from the json config file
- '''
- main.log.report("This testcase starts the packet layer topology and REST")
- main.log.report("_____________________________________________")
- sart_console_result = main.LincOE1.start_console()
- optical_mn_script = main.LincOE2.run_optical_mn_script()
- onos_topo_cfg_result = main.ONOSbench.run_onos_topo_cfg(instance_name = main.params['CTRL']['ip1'], json_file = main.params['OPTICAL']['jsonfile'])
-
-
-
- def CASE22(self, main):
- '''
- Curretly we use, 4 linear switch optical topology and 2 packet layer mininet switches each with one host.
- Therefore, the roadmCount variable = 4, packetLayerSWCount variable = 2, hostCount =2
- and this is hardcoded in the testcase. If the topology changes, these hardcoded values need to be changed
- '''
-
- main.log.report("This testcase compares the optical+packet topology against what is expected")
- main.ONOS2.start_onos_cli(ONOS_ip=main.params['CTRL']['ip1'])
- devices_result = main.ONOS2.devices(json_format = False)
-
- print "devices_result = ", devices_result
- devices_linewise = devices_result.split("\n")
- devices_linewise = devices_linewise[1:-1]
- roadmCount = 0
- packetLayerSWCount = 0
- for line in devices_linewise:
- components = line.split(",")
- availability = components[1].split("=")[1]
- type = components[3].split("=")[1]
- if availability == 'true' and type == 'ROADM':
- roadmCount += 1
- elif availability == 'true' and type =='SWITCH':
- packetLayerSWCount += 1
- if roadmCount == 4:
- print "Number of Optical Switches = %d and is correctly detected" %roadmCount
- main.log.info ("Number of Optical Switches = " +str(roadmCount) +" and is correctly detected")
- opticalSW_result = main.TRUE
- else:
- print "Number of Optical Switches = %d and is wrong" %roadCount
- main.log.info ("Number of Optical Switches = " +str(roadmCount) +" and is wrong")
- opticalSW_result = main.FALSE
-
- if packetLayerSWCount == 2:
- print "Number of Packet layer or mininet Switches = %d and is correctly detected" %packetLayerSWCount
- main.log.info("Number of Packet layer or mininet Switches = " +str(packetLayerSWCount) + " and is correctly detected")
- packetSW_result = main.TRUE
- else:
- print "Number of Packet layer or mininet Switches = %d and is wrong" %packetLayerSWCount
- main.log.info("Number of Packet layer or mininet Switches = " +str(packetLayerSWCount) + " and is wrong")
- packetSW_result = main.FALSE
- print "_________________________________"
-
- links_result = main.ONOS2.links(json_format = False)
- print "links_result = ", links_result
- print "_________________________________"
-
-
-
- #Discover hosts using pingall
- pingall_result = main.LincOE2.pingall()
-
- hosts_result = main.ONOS2.hosts(json_format = False)
- print "hosts_result = ", hosts_result
- print "_________________________________"
- hosts_linewise = hosts_result.split("\n")
- hosts_linewise = hosts_linewise[1:-1]
- hostCount = 0
- for line in hosts_linewise:
- hostid = line.split(",")[0].split("=")[1]
- hostCount +=1
- if hostCount ==2:
- print "Number of hosts = %d and is correctly detected" %hostCount
- main.log.info("Number of hosts = " + str(hostCount) +" and is correctly detected")
- hostDiscovery = main.TRUE
- else:
- print "Number of hosts = %d and is wrong" %hostCount
- main.log.info("Number of hosts = " + str(hostCount) +" and is wrong")
- hostDiscovery = main.FALSE
-
- case22_result = opticalSW_result and packetSW_result and hostDiscovery
- utilities.assert_equals(expect=main.TRUE, actual=case22_result,
- onpass="Packet optical topology discovery successful",
- onfail="Packet optical topology discovery failed")
-
- def CASE23(self, main):
- import time
- '''
- Add bidirectional point intents between 2 packet layer(mininet) devices and
- ping mininet hosts
- '''
- main.log.report("This testcase adds bidirectional point intents between 2 packet layer(mininet) devices and ping mininet hosts")
- ptp_intent_result = main.ONOS2.add_point_intent("of:0000ffffffff0001", 1, "of:0000ffffffff0002", 1)
- if ptp_intent_result == main.TRUE:
- get_intent_result = main.ONOS2.intents()
- main.log.info("Point to point intent install successful")
- main.log.info(get_intent_result)
-
- ptp_intent_result = main.ONOS2.add_point_intent("of:0000ffffffff0002", 1, "of:0000ffffffff0001", 1)
- if ptp_intent_result == main.TRUE:
- get_intent_result = main.ONOS2.intents()
- main.log.info("Point to point intent install successful")
- main.log.info(get_intent_result)
-
- time.sleep(10)
- flowHandle = main.ONOS2.flows()
- #print "flowHandle = ", flowHandle
- main.log.info("flows :" + flowHandle)
- intentHandle = main.ONOS2.intents()
- main.log.info("intents :" + intentHandle)
-
- Ping_Result = main.TRUE
- count = 1
- main.log.info("\n\nh1 is Pinging h2")
- ping = main.LincOE2.pingHostOptical(src="h1", target="h2")
- #ping = main.LincOE2.pinghost()
- if ping == main.FALSE and count<5:
- count+=1
- Ping_Result = main.FALSE
- main.log.report("Ping between h1 and h2 failed. Making attempt number "+str(count) + " in 2 seconds")
- time.sleep(2)
- ping = main.LincOE2.pingHostOptical(src="h1", target="h2")
- #ping = main.LincOE2.pinghost()
- elif ping==main.FALSE:
- main.log.report("All ping attempts between h1 and h2 have failed")
- Ping_Result = main.FALSE
- elif ping==main.TRUE:
- main.log.info("Ping test between h1 and h2 passed!")
- Ping_Result = main.TRUE
- else:
- main.log.info("Unknown error")
- Ping_Result = main.ERROR
-
- if Ping_Result==main.FALSE:
- main.log.report("Point intents for packet optical have not ben installed correctly. Cleaning up")
- if Ping_Result==main.TRUE:
- main.log.report("Point Intents for packet optical have been installed correctly")
-
- case23_result = Ping_Result
- utilities.assert_equals(expect=main.TRUE, actual=case23_result,
- onpass="Point intents addition for packet optical and Pingall Test successful",
- onfail="Point intents addition for packet optical and Pingall Test NOT successful")
-
-
diff --git a/TestON/tests/OpticalFunc13/OpticalFunc13.topo b/TestON/tests/OpticalFunc13/OpticalFunc13.topo
deleted file mode 100755
index a3d6cfd..0000000
--- a/TestON/tests/OpticalFunc13/OpticalFunc13.topo
+++ /dev/null
@@ -1,94 +0,0 @@
-<TOPOLOGY>
- <COMPONENT>
-
- <ONOSbench>
- <host>10.128.10.11</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>1</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOSbench>
-
- <ONOS1>
- <host>10.128.10.11</host>
- <user>sdn</user>
- <password>sdn</password>
- <type>OnosDriver</type>
- <connect_order>2</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS1>
-
- <ONOS2>
- <host>10.128.10.11</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>3</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS2>
-
- <Mininet1>
- <host>10.128.10.11</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>MininetCliDriver</type>
- <connect_order>4</connect_order>
- <COMPONENTS>
- #Specify the Option for mininet
- <arg1> --custom ~/mininet/custom/topo-HA.py </arg1>
- <arg2> --topo mytopo </arg2>
- <arg3> --switch ovs,protocols=OpenFlow13 </arg3>
- <controller> remote </controller>
- </COMPONENTS>
- </Mininet1>
-
- <Mininet2>
- <host>10.128.10.11</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>RemoteMininetDriver</type>
- <connect_order>5</connect_order>
- <COMPONENTS>
- #Specify the Option for mininet
- <arg1> --custom ~/mininet/custom/topo-HA.py </arg1>
- <arg2> --topo mytopo </arg2>
- <arg3> --switch ovs,protocols=OpenFlow13 </arg3>
- <controller> remote </controller>
- </COMPONENTS>
- </Mininet2>
-
- <LincOE1>
- <host>10.128.20.30</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>LincOEDriver</type>
- <connect_order>6</connect_order>
- <COMPONENTS>
- <arg1> </arg1>
- </COMPONENTS>
- </LincOE1>
-
- <LincOE2>
- <host>10.128.20.30</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>RemoteMininetDriver</type>
- <connect_order>7</connect_order>
- <COMPONENTS>
- <arg1> sudo python /home/admin/optical.py </arg1>
- <arg2> </arg2>
- </COMPONENTS>
- </LincOE2>
-
- <LincOE3>
- <host>10.128.20.30</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>RemoteMininetDriver</type>
- <connect_order>8</connect_order>
- </LincOE3>
-
-
- </COMPONENT>
-</TOPOLOGY>
diff --git a/TestON/tests/OpticalFunc13/__init__.py b/TestON/tests/OpticalFunc13/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/TestON/tests/OpticalFunc13/__init__.py
+++ /dev/null
diff --git a/TestON/tests/ScaleOutTemplate/README b/TestON/tests/ScaleOutTemplate/README
deleted file mode 100644
index 2d5ae1c..0000000
--- a/TestON/tests/ScaleOutTemplate/README
+++ /dev/null
@@ -1,22 +0,0 @@
--------------------
-----Setup Guide----
--------------------
-
-CASE 1: init case; cleans and sets up enviornment, starts up node 1
-
-CASE 2: Increments scale case; starts up additional nodes, determined by 'SCALE' in params
- Ex: cluster size = 1 and scale = 2 ==> call CASE2 ==> cluster size = 3
-
-Params file:
- SCALE = cluster scale step size
- availableNodes = number of nodes you have provided data for in .topo file
-
- ENV:
- cellName = desired name of cell file to be created at runtime
- cellFeatures = list of features desired
- NOTE: webconsole, onos-api, onos-cli and onos-openflow are loaded automatically.
- adjust your test and feature list accordingly
- TEST:
- skipCleanInstall = set yes if you want to skip for the sake of test debugging, otherwise set no
-
-
diff --git a/TestON/tests/ScaleOutTemplate/ScaleOutTemplate.params b/TestON/tests/ScaleOutTemplate/ScaleOutTemplate.params
deleted file mode 100644
index 4e3ffdb..0000000
--- a/TestON/tests/ScaleOutTemplate/ScaleOutTemplate.params
+++ /dev/null
@@ -1,60 +0,0 @@
-<PARAMS>
-
- <testcases>1,2,1,2,1,2,1,2</testcases>
-
- <SCALE>1,3,5,7</SCALE>
- <availableNodes>7</availableNodes>
-
- <ENV>
- <cellName>defaultCell</cellName>
- <cellApps></cellApps>
- </ENV>
-
- <TEST>
- <skipCleanInstall>yes</skipCleanInstall>
- </TEST>
-
- <GIT>
- <autopull>off</autopull>
- <checkout>master</checkout>
- </GIT>
-
- <CTRL>
- <USER>admin</USER>
-
- <ip1>OC1</ip1>
- <port1>6633</port1>
-
- <ip2>OC2</ip2>
- <port2>6633</port2>
-
- <ip3>OC3</ip3>
- <port3>6633</port3>
-
- <ip4>OC4</ip4>
- <port4>6633</port4>
-
- <ip5>OC5</ip5>
- <port5>6633</port5>
-
- <ip6>OC6</ip6>
- <port6>6633</port6>
-
- <ip7>OC7</ip7>
- <port7>6633</port7>
-
- </CTRL>
-
- <MN>
- <ip1>OCN</ip1>
- </MN>
-
- <BENCH>
- <user>admin</user>
- <ip1>OCN</ip1>
- </BENCH>
-
- <JSON>
- </JSON>
-
-</PARAMS>
diff --git a/TestON/tests/ScaleOutTemplate/ScaleOutTemplate.py b/TestON/tests/ScaleOutTemplate/ScaleOutTemplate.py
deleted file mode 100644
index a8d21d2..0000000
--- a/TestON/tests/ScaleOutTemplate/ScaleOutTemplate.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# ScaleOutTemplate
-#
-# CASE1 starts number of nodes specified in param file
-#
-# cameron@onlab.us
-
-import sys
-import os.path
-
-
-class ScaleOutTemplate:
-
- def __init__( self ):
- self.default = ''
-
- def CASE1( self, main ):
-
- import time
- global init
-
- try:
- if type(init) is not bool:
- init = False
- except NameError:
- init = False
-
- #Load values from params file
- checkoutBranch = main.params[ 'GIT' ][ 'checkout' ]
- gitPull = main.params[ 'GIT' ][ 'autopull' ]
- cellName = main.params[ 'ENV' ][ 'cellName' ]
- Apps = main.params[ 'ENV' ][ 'cellApps' ]
- BENCHUser = main.params[ 'BENCH' ][ 'user' ]
- maxNodes = int(main.params[ 'availableNodes' ])
- skipMvn = main.params[ 'TEST' ][ 'skipCleanInstall' ]
- cellName = main.params[ 'ENV' ][ 'cellName' ]
-
- # -- INIT SECTION, ONLY RUNS ONCE -- #
- if init == False:
- init = True
- global clusterCount #number of nodes running
- global ONOSIp #list of ONOS IP addresses
- global scale
-
- clusterCount = 0
- ONOSIp = [ 0 ]
- scale = (main.params[ 'SCALE' ]).split(",")
- clusterCount = int(scale[0])
-
- #Populate ONOSIp with ips from params
- ONOSIp = [0]
- ONOSIp.extend(main.ONOSbench.getOnosIps())
- MN1Ip = ONOSIp[len(ONOSIp) -1]
- BENCHIp = ONOSIp[len(ONOSIp) -2]
-
- #git
- main.step( "Git checkout and pull " + checkoutBranch )
- if gitPull == 'on':
- checkoutResult = main.ONOSbench.gitCheckout( checkoutBranch )
- pullResult = main.ONOSbench.gitPull()
- else:
- main.log.info( "Skipped git checkout and pull" )
-
- if skipMvn != "yes":
- mvnResult = main.ONOSbench.cleanInstall()
-
- # -- END OF INIT SECTION --#
-
- clusterCount = int(scale[0])
- scale.remove(scale[0])
-
- #kill off all onos processes
- main.log.step("Safety check, killing all ONOS processes")
- main.log.step("before initiating enviornment setup")
- for node in range(1, maxNodes + 1):
- main.ONOSbench.onosDie(ONOSIp[node])
-
- #Uninstall everywhere
- main.log.step( "Cleaning Enviornment..." )
- for i in range(1, maxNodes + 1):
- main.log.info(" Uninstalling ONOS " + str(i) )
- main.ONOSbench.onosUninstall( ONOSIp[i] )
-
- #construct the cell file
- main.log.info("Creating cell file")
- cellIp = []
- for node in range (1, clusterCount + 1):
- cellIp.append(ONOSIp[node])
-
- main.ONOSbench.createCellFile(BENCHIp,cellName,MN1Ip,str(Apps), *cellIp)
-
- main.step( "Set Cell" )
- main.ONOSbench.setCell(cellName)
-
- main.step( "Creating ONOS package" )
- packageResult = main.ONOSbench.onosPackage()
-
- main.step( "verify cells" )
- verifyCellResult = main.ONOSbench.verifyCell()
-
- main.log.report( "Initializeing " + str( clusterCount ) + " node cluster." )
- for node in range(1, clusterCount + 1):
- main.log.info("Starting ONOS " + str(node) + " at IP: " + ONOSIp[node])
- main.ONOSbench.onosInstall( ONOSIp[node])
-
- for node in range(1, clusterCount + 1):
- for i in range( 2 ):
- isup = main.ONOSbench.isup( ONOSIp[node] )
- if isup:
- main.log.info("ONOS " + str(node) + " is up\n")
- break
- if not isup:
- main.log.report( "ONOS " + str(node) + " didn't start!" )
- main.log.info("Startup sequence complete")
-
- def CASE2( self, main ):
-
- print ("clusterCount: " + str(clusterCount))
- print ("scale: " + str(scale))
- print ("ONOSIp: " + str(ONOSIp))
- print ("INIT: " + str(init))
-
diff --git a/TestON/tests/ScaleOutTemplate/ScaleOutTemplate.topo b/TestON/tests/ScaleOutTemplate/ScaleOutTemplate.topo
deleted file mode 100644
index d82f3fd..0000000
--- a/TestON/tests/ScaleOutTemplate/ScaleOutTemplate.topo
+++ /dev/null
@@ -1,144 +0,0 @@
-<TOPOLOGY>
-
- <COMPONENT>
-
- <ONOSbench>
- <host>OCN</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>1</connect_order>
- <COMPONENTS><home>~/onos</home></COMPONENTS>
- </ONOSbench>
-
- <ONOS1cli>
- <host>OCN</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>2</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS1cli>
-
- <ONOS2cli>
- <host>OCN</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>3</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS2cli>
-
- <ONOS3cli>
- <host>OCN</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>4</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS3cli>
-
- <ONOS4cli>
- <host>OCN</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>5</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS4cli>
-
- <ONOS5cli>
- <host>OCN</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>6</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS5cli>
-
- <ONOS6cli>
- <host>OCN</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>7</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS6cli>
-
- <ONOS7cli>
- <host>OCN</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>8</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS7cli>
-
- <ONOS1>
- <host>OC1</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>9</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS1>
-
- <ONOS2>
- <host>OC2</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>10</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS2>
-
- <ONOS3>
- <host>OC3</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>11</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS3>
-
- <ONOS4>
- <host>OC4</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>12</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS4>
-
-
- <ONOS5>
- <host>OC5</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>13</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS5>
-
- <ONOS6>
- <host>OC6</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>14</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS6>
-
- <ONOS7>
- <host>OC7</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosDriver</type>
- <connect_order>15</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS7>
-
- </COMPONENT>
-
-</TOPOLOGY>
-
diff --git a/TestON/tests/ScaleOutTemplate/__init__.py b/TestON/tests/ScaleOutTemplate/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/TestON/tests/ScaleOutTemplate/__init__.py
+++ /dev/null
diff --git a/TestON/tests/TopoConvNext/TopoConvNext.params b/TestON/tests/TopoConvNext/TopoConvNext.params
deleted file mode 100644
index 198befb..0000000
--- a/TestON/tests/TopoConvNext/TopoConvNext.params
+++ /dev/null
@@ -1,69 +0,0 @@
-<PARAMS>
- <testcases>1,2,3,2,3,2,3,2,4,2,3,2,3,2,3,2</testcases>
-
- <ENV>
- <cellName>topo_conv_test</cellName>
- </ENV>
-
- <GIT>
- #autoPull 'on' or 'off'
- <autoPull>off</autoPull>
- <checkout>master</checkout>
- </GIT>
-
- <CTRL>
- <user>admin</user>
- <ip1>10.128.174.1</ip1>
- <port1>6633</port1>
- <ip2>10.128.174.2</ip2>
- <port2>6633</port2>
- <ip3>10.128.174.3</ip3>
- <port3>6633</port3>
- <ip4>10.128.174.4</ip4>
- <port4>6633</port4>
- <ip5>10.128.174.5</ip5>
- <port5>6633</port5>
- <ip6>10.128.174.6</ip6>
- <port6>6633</port6>
- <ip7>10.128.174.7</ip7>
- <port7>6633</port7>
- </CTRL>
-
- <MN>
- <ip1>10.128.10.90</ip1>
- <ip2>10.128.10.91</ip2>
- </MN>
-
- <BENCH>
- <ip>10.128.174.10</ip>
- </BENCH>
-
- <TEST>
- <onosLogFile>/opt/onos/log/karaf*</onosLogFile>
-
- #Number of times to iterate each case
- <numIter>3</numIter>
- <numSwitch1>500</numSwitch1>
- <numSwitch2>400</numSwitch2>
- <numSwitch3>200</numSwitch3>
- <numSwitch4>300</numSwitch4>
- #Number of iterations to ignore initially
- <iterIgnore>1</iterIgnore>
-
- <topo_accumulator_config>
- large_topo_event_accumulator.cfg
- </topo_accumulator_config>
- <topo_config_name>
- org.onlab.onos.net.topology.impl.DefaultTopologyProvider.cfg
- </topo_config_name>
-
- <swDisc100Threshold>0,100000</swDisc100Threshold>
- </TEST>
-
- <JSON>
- <deviceTimestamp>topologyDeviceEventTimestamp</deviceTimestamp>
- <hostTimestamp>topologyHostEventTimestamp</hostTimestamp>
- <linkTimestamp>topologyLinkEventTimestamp</linkTimestamp>
- <graphTimestamp>topologyGraphEventTimestamp</graphTimestamp>
- </JSON>
-</PARAMS>
diff --git a/TestON/tests/TopoConvNext/TopoConvNext.py b/TestON/tests/TopoConvNext/TopoConvNext.py
deleted file mode 100644
index c114e48..0000000
--- a/TestON/tests/TopoConvNext/TopoConvNext.py
+++ /dev/null
@@ -1,1379 +0,0 @@
-# TopoPerfNext
-#
-# Topology Convergence scale-out test for ONOS-next
-# NOTE: This test supports up to 7 nodes scale-out scenario
-#
-# NOTE: Ensure that you have 'tablet.json' file
-# in the onos/tools/package/config directory
-# NOTE: You must start this test initially with 3 nodes
-#
-# andrew@onlab.us
-
-import time
-import sys
-import os
-import re
-
-
-class TopoConvNext:
-
- def __init__( self ):
- self.default = ''
-
- def CASE1( self, main ):
- """
- ONOS startup sequence
- """
- import time
-
- #******
- # Global cluster count for scale-out purposes
- global cluster_count
- global topo_iteration
- topo_iteration = 1
- cluster_count = 1
- #******
- cell_name = main.params[ 'ENV' ][ 'cellName' ]
-
- git_pull = main.params[ 'GIT' ][ 'autoPull' ]
- checkout_branch = main.params[ 'GIT' ][ 'checkout' ]
-
- ONOS1_ip = main.params[ 'CTRL' ][ 'ip1' ]
- ONOS2_ip = main.params[ 'CTRL' ][ 'ip2' ]
- ONOS3_ip = main.params[ 'CTRL' ][ 'ip3' ]
- ONOS4_ip = main.params[ 'CTRL' ][ 'ip4' ]
- ONOS5_ip = main.params[ 'CTRL' ][ 'ip5' ]
- ONOS6_ip = main.params[ 'CTRL' ][ 'ip6' ]
- ONOS7_ip = main.params[ 'CTRL' ][ 'ip7' ]
- MN1_ip = main.params[ 'MN' ][ 'ip1' ]
- BENCH_ip = main.params[ 'BENCH' ][ 'ip' ]
-
- main.case( "Setting up test environment" )
- main.log.info( "copying topology event accumulator config file" +
- " to ONOS package/etc/ directory" )
- topo_config_name = main.params[ 'TEST' ][ 'topo_config_name' ]
- topo_config =\
- main.params[ 'TEST' ][ 'topo_accumulator_config' ]
- main.ONOSbench.handle.sendline( "cp ~/" + topo_config +
- " ~/ONOS/tools/package/etc/" +
- topo_config_name )
- main.ONOSbench.handle.expect( "\$" )
-
- main.log.info( "Uninstalling previous instances" )
- #main.ONOSbench.onos_uninstall( node_ip=ONOS1_ip )
- main.ONOSbench.onos_uninstall( node_ip=ONOS2_ip )
- main.ONOSbench.onos_uninstall( node_ip=ONOS3_ip )
- main.ONOSbench.onos_uninstall( node_ip=ONOS4_ip )
- main.ONOSbench.onos_uninstall( node_ip=ONOS5_ip )
- main.ONOSbench.onos_uninstall( node_ip=ONOS6_ip )
- main.ONOSbench.onos_uninstall( node_ip=ONOS7_ip )
-
- main.log.report( "Setting up test environment" )
-
- main.step( "Creating cell file" )
- cell_file_result = main.ONOSbench.create_cell_file(
- BENCH_ip, cell_name, MN1_ip,
- "onos-core,onos-app-metrics",
- # ONOS1_ip, ONOS2_ip, ONOS3_ip )
- ONOS1_ip )
-
- main.step( "Applying cell file to environment" )
- cell_apply_result = main.ONOSbench.set_cell( cell_name )
- verify_cell_result = main.ONOSbench.verify_cell()
-
- main.step( "Removing raft logs" )
- main.ONOSbench.onos_remove_raft_logs()
- time.sleep( 10 )
-
- main.step( "Git checkout and pull " + checkout_branch )
- if git_pull == 'on':
- checkout_result = \
- main.ONOSbench.git_checkout( checkout_branch )
- pull_result = main.ONOSbench.git_pull()
- else:
- checkout_result = main.TRUE
- pull_result = main.TRUE
- main.log.info( "Skipped git checkout and pull" )
-
- main.log.report( "Commit information - " )
- main.ONOSbench.get_version()
-
- main.step( "Using mvn clean & install" )
- #mvn_result = main.ONOSbench.clean_install()
- mvn_result = main.TRUE
-
- main.step( "Set cell for ONOS cli env" )
- main.ONOS1cli.set_cell( cell_name )
- # main.ONOS2cli.set_cell( cell_name )
- # main.ONOS3cli.set_cell( cell_name )
-
- main.step( "Creating ONOS package" )
- package_result = main.ONOSbench.onos_package()
-
- # Start test with single node only
- main.step( "Installing ONOS package" )
- install1_result = main.ONOSbench.onos_install( node=ONOS1_ip )
- #install2_result = main.ONOSbench.onos_install( node=ONOS2_ip )
- #install3_result = main.ONOSbench.onos_install( node=ONOS3_ip )
-
- time.sleep( 10 )
-
- main.step( "Start onos cli" )
- cli1 = main.ONOS1cli.start_onos_cli( ONOS1_ip )
- #cli2 = main.ONOS2cli.start_onos_cli( ONOS2_ip )
- #cli3 = main.ONOS3cli.start_onos_cli( ONOS3_ip )
-
- main.step( "Enable metrics feature" )
- # main.ONOS1cli.feature_install( "onos-app-metrics" )
-
- utilities.assert_equals( expect=main.TRUE,
- actual=cell_file_result and cell_apply_result and
- verify_cell_result and checkout_result and
- pull_result and mvn_result and
- install1_result, # and install2_result and
- # install3_result,
- onpass="Test Environment setup successful",
- onfail="Failed to setup test environment" )
-
- def CASE2( self, main ):
- """
- 100 Switch discovery latency
-
- Important:
- This test case can be potentially dangerous if
- your machine has previously set iptables rules.
- One of the steps of the test case will flush
- all existing iptables rules.
- Note:
- You can specify the number of switches in the
- params file to adjust the switch discovery size
- ( and specify the corresponding topology in Mininet1
- .topo file )
- """
- import time
- import subprocess
- import os
- import requests
- import json
- import numpy
-
- ONOS_ip_list = []
- ONOS_ip_list.append( '0' )
- ONOS_ip_list.append( main.params[ 'CTRL' ][ 'ip1' ] )
- ONOS_ip_list.append( main.params[ 'CTRL' ][ 'ip2' ] )
- ONOS_ip_list.append( main.params[ 'CTRL' ][ 'ip3' ] )
- ONOS_ip_list.append( main.params[ 'CTRL' ][ 'ip4' ] )
- ONOS_ip_list.append( main.params[ 'CTRL' ][ 'ip5' ] )
- ONOS_ip_list.append( main.params[ 'CTRL' ][ 'ip6' ] )
- ONOS_ip_list.append( main.params[ 'CTRL' ][ 'ip7' ] )
- MN1_ip = main.params[ 'MN' ][ 'ip1' ]
- ONOS_user = main.params[ 'CTRL' ][ 'user' ]
-
- default_sw_port = main.params[ 'CTRL' ][ 'port1' ]
-
- # Number of iterations of case
- num_iter = main.params[ 'TEST' ][ 'numIter' ]
- iter_ignore = int( main.params[ 'TEST' ][ 'iterIgnore' ] )
-
- #***********
- # Global number of switches that change
- # throughout the test
- global num_sw
- global topo_iteration
- global cluster_count
- if topo_iteration == 1:
- num_sw = main.params[ 'TEST' ][ 'numSwitch1' ]
- elif topo_iteration == 2:
- num_sw = main.params[ 'TEST' ][ 'numSwitch2' ]
- elif topo_iteration == 3:
- num_sw = main.params[ 'TEST' ][ 'numSwitch3' ]
- elif topo_iteration == 4:
- num_sw = main.params[ 'TEST' ][ 'numSwitch4' ]
- #***********
-
- # Timestamp 'keys' for json metrics output.
- # These are subject to change, hence moved into params
- deviceTimestamp = main.params[ 'JSON' ][ 'deviceTimestamp' ]
- graphTimestamp = main.params[ 'JSON' ][ 'graphTimestamp' ]
-
- # Threshold for this test case
- sw_disc_threshold_str = main.params[ 'TEST' ][ 'swDisc100Threshold' ]
- sw_disc_threshold_obj = sw_disc_threshold_str.split( "," )
- sw_disc_threshold_min = int( sw_disc_threshold_obj[ 0 ] )
- sw_disc_threshold_max = int( sw_disc_threshold_obj[ 1 ] )
-
- assertion = main.TRUE
- sw_discovery_lat_list = []
- syn_ack_delta_list = []
-
- main.case( str( num_sw ) + " switches distributed across " +
- str( cluster_count ) + " nodes convergence latency" )
-
- main.log.report( "Large topology convergence and scale-out test" )
- main.log.report( "Currently active ONOS node(s): " )
- report_str = "Node "
- for node in range( 1, cluster_count + 1 ):
- report_str += ( str( node ) + " " )
- main.log.report( report_str )
- main.log.report( "Topology size: " + str( num_sw ) + " switches" )
-
- main.step( "Distributing " + num_sw + " switches to each ONOS" )
- index = 1
- for node in range( 1, cluster_count + 1 ):
- for i in range( index, ( int( num_sw ) / cluster_count ) + index ):
- main.Mininet1.assign_sw_controller(
- sw=str( i ),
- ip1=ONOS_ip_list[ node ],
- port1=default_sw_port )
- index = i + 1
- # for i in range( 1, int( num_sw )+1 ):
- # main.Mininet1.assign_sw_controller(
- # sw=str( i ),
- # ip1="10.128.174.1",
- # port1="6633" )
-
- main.log.info( "Please check ptpd configuration to ensure " +
- "all nodes' system times are in sync" )
-
- time.sleep( 10 )
-
- for i in range( 0, int( num_iter ) ):
- main.step( "Set iptables rule to block sw connections" )
-
- # INPUT rules
- main.ONOS1.handle.sendline(
- "sudo iptables -A INPUT -p tcp -s " +
- MN1_ip + " --dport " + default_sw_port + " -j DROP" )
- main.ONOS2.handle.sendline(
- "sudo iptables -A INPUT -p tcp -s " +
- MN1_ip + " --dport " + default_sw_port + " -j DROP" )
- main.ONOS3.handle.sendline(
- "sudo iptables -A INPUT -p tcp -s " +
- MN1_ip + " --dport " + default_sw_port + " -j DROP" )
- main.ONOS4.handle.sendline(
- "sudo iptables -A INPUT -p tcp -s " +
- MN1_ip + " --dport " + default_sw_port + " -j DROP" )
- main.ONOS5.handle.sendline(
- "sudo iptables -A INPUT -p tcp -s " +
- MN1_ip + " --dport " + default_sw_port + " -j DROP" )
- main.ONOS6.handle.sendline(
- "sudo iptables -A INPUT -p tcp -s " +
- MN1_ip + " --dport " + default_sw_port + " -j DROP" )
- main.ONOS7.handle.sendline(
- "sudo iptables -A INPUT -p tcp -s " +
- MN1_ip + " --dport " + default_sw_port + " -j DROP" )
-
- # OUTPUT rules
- main.ONOS1.handle.sendline(
- "sudo iptables -A OUTPUT -p tcp -s " +
- MN1_ip + " --dport " + default_sw_port + " -j DROP" )
- main.ONOS2.handle.sendline(
- "sudo iptables -A OUTPUT -p tcp -s " +
- MN1_ip + " --dport " + default_sw_port + " -j DROP" )
- main.ONOS3.handle.sendline(
- "sudo iptables -A OUTPUT -p tcp -s " +
- MN1_ip + " --dport " + default_sw_port + " -j DROP" )
- main.ONOS4.handle.sendline(
- "sudo iptables -A OUTPUT -p tcp -s " +
- MN1_ip + " --dport " + default_sw_port + " -j DROP" )
- main.ONOS5.handle.sendline(
- "sudo iptables -A OUTPUT -p tcp -s " +
- MN1_ip + " --dport " + default_sw_port + " -j DROP" )
- main.ONOS6.handle.sendline(
- "sudo iptables -A OUTPUT -p tcp -s " +
- MN1_ip + " --dport " + default_sw_port + " -j DROP" )
- main.ONOS7.handle.sendline(
- "sudo iptables -A OUTPUT -p tcp -s " +
- MN1_ip + " --dport " + default_sw_port + " -j DROP" )
-
- main.log.info( "Please wait for switch connection to timeout" )
-
- # time.sleep( 60 )
- # if cluster_count >= 3:
- # time.sleep( 60 )
- # if cluster_count >= 5:
- # time.sleep( 30 )
- # if cluster_count >= 6:
- # time.sleep( 30 )
-
- if cluster_count >= 3:
- main.ONOS1.handle.sendline(
- "tshark -i eth0 -t e | " +
- "grep 'SYN, ACK' | grep '6633' >" +
- "/tmp/syn_ack_onos1_iter" +
- str( i ) +
- ".txt &" )
- main.ONOS2.handle.sendline(
- "tshark -i eth0 -t e | " +
- "grep 'SYN, ACK' | grep '6633' >" +
- "/tmp/syn_ack_onos2_iter" +
- str( i ) +
- ".txt &" )
- main.ONOS3.handle.sendline(
- "tshark -i eth0 -t e | " +
- "grep 'SYN, ACK' | grep '6633' >" +
- "/tmp/syn_ack_onos3_iter" +
- str( i ) +
- ".txt &" )
- if cluster_count >= 4:
- main.ONOS4.handle.sendline(
- "tshark -i eth0 -t e | " +
- "grep 'SYN, ACK' | grep '6633' >" +
- "/tmp/syn_ack_onos4_iter" +
- str( i ) +
- ".txt &" )
- if cluster_count >= 5:
- main.ONOS5.handle.sendline(
- "tshark -i eth0 -t e | " +
- "grep 'SYN, ACK' | grep '6633' >" +
- "/tmp/syn_ack_onos5_iter" +
- str( i ) +
- ".txt &" )
- if cluster_count >= 6:
- main.ONOS6.handle.sendline(
- "tshark -i eth0 -t e | " +
- "grep 'SYN, ACK' | grep '6633' >" +
- "/tmp/syn_ack_onos6_iter" +
- str( i ) +
- ".txt &" )
- if cluster_count == 7:
- main.ONOS7.handle.sendline(
- "tshark -i eth0 -t e | " +
- "grep 'SYN, ACK' | grep '6633' >" +
- "/tmp/syn_ack_onos6_iter" +
- str( i ) +
- ".txt &" )
-
- # NOTE:
- # Delay before checking devices to
- # help prevent timing out from CLI
- # due to multiple command issuing
- time.sleep( 20 )
-
- loop = True
- loop_count = 0
- device_count = 0
- while loop_count < 60 and loop:
- main.log.info( "Checking devices for device down" )
-
- temp_len = 0
- device_str1 = main.ONOS1cli.devices(
- node_ip=ONOS_ip_list[ 1 ] )
- device_json1 = json.loads( device_str1 )
- json_len = len( device_json1 )
-
- # NOTE: May want to check the rest of
- # the ONOS instances for device down as well
-
- for device1 in device_json1:
- temp_len = temp_len + 1
- if device1[ 'available' ]:
- loop = True
- break
- # if I'm on the last json object and I still haven't
- # broken out of the loop, it means there were
- # no available devices
- elif temp_len == json_len - 1:
- main.log.info( "Temp length: " + str( temp_len ) )
- main.step( "Flushing iptables and obtaining t0" )
- t0_system = time.time() * 1000
-
- main.ONOS1.handle.sendline( "sudo iptables -F" )
- main.ONOS2.handle.sendline( "sudo iptables -F" )
- main.ONOS3.handle.sendline( "sudo iptables -F" )
- main.ONOS4.handle.sendline( "sudo iptables -F" )
- main.ONOS5.handle.sendline( "sudo iptables -F" )
- main.ONOS6.handle.sendline( "sudo iptables -F" )
- main.ONOS7.handle.sendline( "sudo iptables -F" )
-
- loop = False
- break
-
- loop_count += 1
- time.sleep( 1 )
-
- main.log.info( "System time t0: " + str( t0_system ) )
-
- counter_loop = 0
- counter_avail1 = 0
- counter_avail2 = 0
- counter_avail3 = 0
- counter_avail4 = 0
- counter_avail5 = 0
- counter_avail6 = 0
- counter_avail7 = 0
- onos1_dev = False
- onos2_dev = False
- onos3_dev = False
- onos4_dev = False
- onos5_dev = False
- onos6_dev = False
- onos7_dev = False
-
- # TODO: Think of a more elegant way to check all
- # switches across all nodes
- # Goodluck debugging this loop
- while counter_loop < 60:
- for node in range( 1, cluster_count + 1 ):
- if node == 1 and not onos1_dev:
- main.log.info( "Checking node 1 for device " +
- "discovery" )
- device_str_obj1 = main.ONOS1cli.devices(
- node_ip=ONOS_ip_list[ 1 ] )
- device_json1 = json.loads( device_str_obj1 )
- for device1 in device_json1:
- if device1[ 'available' ]:
- counter_avail1 += 1
- if counter_avail1 == int( num_sw ):
- onos1_dev = True
- main.log.info( "All devices have been" +
- " discovered on ONOS1" )
- else:
- counter_avail1 = 0
- if node == 2 and not onos2_dev:
- main.log.info( "Checking node 2 for device " +
- "discovery" )
- device_str_obj2 = main.ONOS2cli.devices(
- node_ip=ONOS_ip_list[ 2 ] )
- device_json2 = json.loads( device_str_obj2 )
- for device2 in device_json2:
- if device2[ 'available' ]:
- counter_avail2 += 1
- if counter_avail2 == int( num_sw ):
- onos2_dev = True
- main.log.info( "All devices have been" +
- " discovered on ONOS2" )
- else:
- counter_avail2 = 0
- if node == 3 and not onos3_dev:
- main.log.info( "Checking node 3 for device " +
- "discovery" )
- device_str_obj3 = main.ONOS3cli.devices(
- node_ip=ONOS_ip_list[ 3 ] )
- device_json3 = json.loads( device_str_obj3 )
- for device3 in device_json3:
- if device3[ 'available' ]:
- counter_avail3 += 1
- if counter_avail3 == int( num_sw ):
- onos3_dev = True
- main.log.info( "All devices have been" +
- " discovered on ONOS3" )
- else:
- counter_avail3 = 0
- if node == 4 and not onos4_dev:
- main.log.info( "Checking node 4 for device " +
- "discovery" )
- device_str_obj4 = main.ONOS4cli.devices(
- node_ip=ONOS_ip_list[ 4 ] )
- device_json4 = json.loads( device_str_obj4 )
- for device4 in device_json4:
- if device4[ 'available' ]:
- counter_avail4 += 1
- if counter_avail4 == int( num_sw ):
- onos4_dev = True
- main.log.info( "All devices have been" +
- " discovered on ONOS4" )
- else:
- counter_avail4 = 0
- if node == 5 and not onos5_dev:
- main.log.info( "Checking node 5 for device " +
- "discovery" )
- device_str_obj5 = main.ONOS5cli.devices(
- node_ip=ONOS_ip_list[ 5 ] )
- device_json5 = json.loads( device_str_obj5 )
- for device5 in device_json5:
- if device5[ 'available' ]:
- counter_avail5 += 1
- if counter_avail5 == int( num_sw ):
- onos5_dev = True
- main.log.info( "All devices have been" +
- " discovered on ONOS5" )
- else:
- counter_avail5 = 0
- if node == 6 and not onos6_dev:
- main.log.info( "Checking node 6 for device " +
- "discovery" )
- device_str_obj6 = main.ONOS6cli.devices(
- node_ip=ONOS_ip_list[ 6 ] )
- device_json6 = json.loads( device_str_obj6 )
- for device6 in device_json6:
- if device6[ 'available' ]:
- counter_avail6 += 1
- if counter_avail6 == int( num_sw ):
- onos6_dev = True
- main.log.info( "All devices have been" +
- " discovered on ONOS6" )
- else:
- counter_avail6 = 0
- if node == 7 and not onos7_dev:
- main.log.info( "Checking node 7 for device " +
- "discovery" )
- device_str_obj7 = main.ONOS7cli.devices(
- node_ip=ONOS_ip_list[ 7 ] )
- device_json7 = json.loads( device_str_obj7 )
- for device7 in device_json7:
- if device7[ 'available' ]:
- counter_avail7 += 1
- if counter_avail7 == int( num_sw ):
- onos7_dev = True
- main.log.info( "All devices have been" +
- " discovered on ONOS7" )
- else:
- counter_avail7 = 0
- # END node loop
-
- # TODO: clean up this mess of an if statements if possible
- # Treat each if as a separate test case with the given
- # cluster count. Hence when the cluster count changes
- # the desired calculations will be made
- if cluster_count == 1:
- if onos1_dev:
- main.log.info( "All devices have been discovered" +
- " on all ONOS instances" )
- time.sleep( 5 )
- json_str_metrics_1 =\
- main.ONOS1cli.topology_events_metrics()
- json_obj_1 = json.loads( json_str_metrics_1 )
- graph_timestamp_1 =\
- json_obj_1[ graphTimestamp ][ 'value' ]
-
- graph_lat_1 = \
- int( graph_timestamp_1 ) - int( t0_system )
-
- main.log.info( "Graph Timestamp ONOS1: " +
- str( graph_timestamp_1 ) )
-
- if graph_lat_1 > sw_disc_threshold_min\
- and graph_lat_1 < sw_disc_threshold_max\
- and int( i ) > iter_ignore:
- sw_discovery_lat_list.append(
- graph_lat_1 )
- main.log.info(
- "Sw discovery latency of " +
- str( cluster_count ) +
- " node(s): " +
- str( graph_lat_1 ) +
- " ms" )
- else:
- main.log.info( "Switch discovery latency " +
- "exceeded the threshold." )
- main.log.info( str( graph_lat_1 ) + " ms" )
- # Break while loop
- break
- if cluster_count == 2:
- if onos1_dev and onos2_dev:
- main.log.info( "All devices have been discovered" +
- " on all " + str( cluster_count ) +
- " ONOS instances" )
- time.sleep( 5 )
-
- json_str_metrics_1 =\
- main.ONOS1cli.topology_events_metrics()
- json_str_metrics_2 =\
- main.ONOS2cli.topology_events_metrics()
- json_obj_1 = json.loads( json_str_metrics_1 )
- json_obj_2 = json.loads( json_str_metrics_2 )
- graph_timestamp_1 =\
- json_obj_1[ graphTimestamp ][ 'value' ]
- graph_timestamp_2 =\
- json_obj_2[ graphTimestamp ][ 'value' ]
-
- graph_lat_1 = \
- int( graph_timestamp_1 ) - int( t0_system )
- graph_lat_2 = \
- int( graph_timestamp_2 ) - int( t0_system )
-
- main.log.info( "Graph Timestamp ONOS1: " +
- str( graph_timestamp_1 ) )
- main.log.info( "Graph Timestamp ONOS2: " +
- str( graph_timestamp_2 ) )
-
- max_graph_lat = max( graph_lat_1,
- graph_lat_2, graph_lat_3 )
-
- if max_graph_lat > sw_disc_threshold_min\
- and max_graph_lat < sw_disc_threshold_max\
- and int( i ) > iter_ignore:
- sw_discovery_lat_list.append(
- max_graph_lat )
- main.log.info(
- "Sw discovery latency of " +
- str( cluster_count ) +
- " node(s): " +
- str( max_graph_lat ) +
- " ms" )
- else:
- main.log.info( "Switch discovery latency " +
- "exceeded the threshold." )
- main.log.info( str( max_graph_lat ) + " ms" )
- break
- if cluster_count == 3:
- if onos1_dev and onos2_dev and onos3_dev:
- main.log.info( "All devices have been discovered" +
- " on all " + str( cluster_count ) +
- " ONOS instances" )
-
- # TODO: Investigate this sleep
- # added to 'pad' the results with
- # plenty of time to 'catch up'
- time.sleep( 5 )
-
- json_str_metrics_1 =\
- main.ONOS1cli.topology_events_metrics()
- json_str_metrics_2 =\
- main.ONOS2cli.topology_events_metrics()
- json_str_metrics_3 =\
- main.ONOS3cli.topology_events_metrics()
- json_obj_1 = json.loads( json_str_metrics_1 )
- json_obj_2 = json.loads( json_str_metrics_2 )
- json_obj_3 = json.loads( json_str_metrics_3 )
- graph_timestamp_1 =\
- json_obj_1[ graphTimestamp ][ 'value' ]
- graph_timestamp_2 =\
- json_obj_2[ graphTimestamp ][ 'value' ]
- graph_timestamp_3 =\
- json_obj_3[ graphTimestamp ][ 'value' ]
-
- graph_lat_1 = \
- int( graph_timestamp_1 ) - int( t0_system )
- graph_lat_2 = \
- int( graph_timestamp_2 ) - int( t0_system )
- graph_lat_3 = \
- int( graph_timestamp_3 ) - int( t0_system )
-
- main.log.info( "Graph Timestamp ONOS1: " +
- str( graph_timestamp_1 ) )
- main.log.info( "Graph Timestamp ONOS2: " +
- str( graph_timestamp_2 ) )
- main.log.info( "Graph Timestamp ONOS3: " +
- str( graph_timestamp_3 ) )
-
- max_graph_lat = max( graph_lat_1,
- graph_lat_2,
- graph_lat_3 )
-
- if max_graph_lat > sw_disc_threshold_min\
- and max_graph_lat < sw_disc_threshold_max\
- and int( i ) > iter_ignore:
- sw_discovery_lat_list.append(
- max_graph_lat )
- main.log.info(
- "Sw discovery latency of " +
- str( cluster_count ) +
- " node(s): " +
- str( max_graph_lat ) +
- " ms" )
- else:
- main.log.info( "Switch discovery latency " +
- "exceeded the threshold." )
- main.log.info( str( max_graph_lat ) + " ms" )
-
- break
- if cluster_count == 4:
- if onos1_dev and onos2_dev and onos3_dev and\
- onos4_dev:
- main.log.info( "All devices have been discovered" +
- " on all ONOS instances" )
- json_str_metrics_1 =\
- main.ONOS1cli.topology_events_metrics()
- json_str_metrics_2 =\
- main.ONOS2cli.topology_events_metrics()
- json_str_metrics_3 =\
- main.ONOS3cli.topology_events_metrics()
- json_str_metrics_4 =\
- main.ONOS4cli.topology_events_metrics()
- json_obj_1 = json.loads( json_str_metrics_1 )
- json_obj_2 = json.loads( json_str_metrics_2 )
- json_obj_3 = json.loads( json_str_metrics_3 )
- json_obj_4 = json.loads( json_str_metrics_4 )
- graph_timestamp_1 =\
- json_obj_1[ graphTimestamp ][ 'value' ]
- graph_timestamp_2 =\
- json_obj_2[ graphTimestamp ][ 'value' ]
- graph_timestamp_3 =\
- json_obj_3[ graphTimestamp ][ 'value' ]
- graph_timestamp_4 =\
- json_obj_4[ graphTimestamp ][ 'value' ]
-
- graph_lat_1 = \
- int( graph_timestamp_1 ) - int( t0_system )
- graph_lat_2 = \
- int( graph_timestamp_2 ) - int( t0_system )
- graph_lat_3 = \
- int( graph_timestamp_3 ) - int( t0_system )
- graph_lat_4 = \
- int( graph_timestamp_4 ) - int( t0_system )
-
- main.log.info( "Graph Timestamp ONOS1: " +
- str( graph_timestamp_1 ) )
- main.log.info( "Graph Timestamp ONOS2: " +
- str( graph_timestamp_2 ) )
- main.log.info( "Graph Timestamp ONOS3: " +
- str( graph_timestamp_3 ) )
- main.log.info( "Graph Timestamp ONOS4: " +
- str( graph_timestamp_4 ) )
-
- max_graph_lat = max( graph_lat_1,
- graph_lat_2,
- graph_lat_3,
- graph_lat_4 )
-
- if max_graph_lat > sw_disc_threshold_min\
- and max_graph_lat < sw_disc_threshold_max\
- and int( i ) > iter_ignore:
- sw_discovery_lat_list.append(
- max_graph_lat )
- main.log.info(
- "Sw discovery latency of " +
- str( cluster_count ) +
- " node(s): " +
- str( max_graph_lat ) +
- " ms" )
- else:
- main.log.info( "Switch discovery latency " +
- "exceeded the threshold." )
- main.log.info( str( max_graph_lat ) + " ms" )
-
- break
- if cluster_count == 5:
- if onos1_dev and onos2_dev and onos3_dev and\
- onos4_dev and onos5_dev:
- main.log.info( "All devices have been discovered" +
- " on all ONOS instances" )
-
- # TODO: Investigate this sleep
- # added to 'pad' the results with
- # plenty of time to 'catch up'
- time.sleep( 5 )
-
- json_str_metrics_1 =\
- main.ONOS1cli.topology_events_metrics()
- json_str_metrics_2 =\
- main.ONOS2cli.topology_events_metrics()
- json_str_metrics_3 =\
- main.ONOS3cli.topology_events_metrics()
- json_str_metrics_4 =\
- main.ONOS4cli.topology_events_metrics()
- json_str_metrics_5 =\
- main.ONOS5cli.topology_events_metrics()
- json_obj_1 = json.loads( json_str_metrics_1 )
- json_obj_2 = json.loads( json_str_metrics_2 )
- json_obj_3 = json.loads( json_str_metrics_3 )
- json_obj_4 = json.loads( json_str_metrics_4 )
- json_obj_5 = json.loads( json_str_metrics_5 )
- graph_timestamp_1 =\
- json_obj_1[ graphTimestamp ][ 'value' ]
- graph_timestamp_2 =\
- json_obj_2[ graphTimestamp ][ 'value' ]
- graph_timestamp_3 =\
- json_obj_3[ graphTimestamp ][ 'value' ]
- graph_timestamp_4 =\
- json_obj_4[ graphTimestamp ][ 'value' ]
- graph_timestamp_5 =\
- json_obj_5[ graphTimestamp ][ 'value' ]
-
- graph_lat_1 = \
- int( graph_timestamp_1 ) - int( t0_system )
- graph_lat_2 = \
- int( graph_timestamp_2 ) - int( t0_system )
- graph_lat_3 = \
- int( graph_timestamp_3 ) - int( t0_system )
- graph_lat_4 = \
- int( graph_timestamp_4 ) - int( t0_system )
- graph_lat_5 = \
- int( graph_timestamp_5 ) - int( t0_system )
-
- main.log.info( "Graph Timestamp ONOS1: " +
- str( graph_timestamp_1 ) )
- main.log.info( "Graph Timestamp ONOS2: " +
- str( graph_timestamp_2 ) )
- main.log.info( "Graph Timestamp ONOS3: " +
- str( graph_timestamp_3 ) )
- main.log.info( "Graph Timestamp ONOS4: " +
- str( graph_timestamp_4 ) )
- main.log.info( "Graph Timestamp ONOS5: " +
- str( graph_timestamp_5 ) )
-
- max_graph_lat = max( graph_lat_1,
- graph_lat_2,
- graph_lat_3,
- graph_lat_4,
- graph_lat_5 )
-
- if max_graph_lat > sw_disc_threshold_min\
- and max_graph_lat < sw_disc_threshold_max\
- and int( i ) > iter_ignore:
- sw_discovery_lat_list.append(
- max_graph_lat )
- main.log.info(
- "Sw discovery latency of " +
- str( cluster_count ) +
- " node(s): " +
- str( max_graph_lat ) +
- " ms" )
- else:
- main.log.info( "Switch discovery latency " +
- "exceeded the threshold." )
- main.log.info( str( max_graph_lat ) + " ms" )
-
- break
- if cluster_count == 6:
- if onos1_dev and onos2_dev and onos3_dev and\
- onos4_dev and onos5_dev and onos6_dev:
- main.log.info( "All devices have been discovered" +
- " on all ONOS instances" )
- json_str_metrics_1 =\
- main.ONOS1cli.topology_events_metrics()
- json_str_metrics_2 =\
- main.ONOS2cli.topology_events_metrics()
- json_str_metrics_3 =\
- main.ONOS3cli.topology_events_metrics()
- json_str_metrics_4 =\
- main.ONOS4cli.topology_events_metrics()
- json_str_metrics_5 =\
- main.ONOS5cli.topology_events_metrics()
- json_str_metrics_6 =\
- main.ONOS6cli.topology_events_metrics()
- json_obj_1 = json.loads( json_str_metrics_1 )
- json_obj_2 = json.loads( json_str_metrics_2 )
- json_obj_3 = json.loads( json_str_metrics_3 )
- json_obj_4 = json.loads( json_str_metrics_4 )
- json_obj_5 = json.loads( json_str_metrics_5 )
- json_obj_6 = json.loads( json_str_metrics_6 )
- graph_timestamp_1 =\
- json_obj_1[ graphTimestamp ][ 'value' ]
- graph_timestamp_2 =\
- json_obj_2[ graphTimestamp ][ 'value' ]
- graph_timestamp_3 =\
- json_obj_3[ graphTimestamp ][ 'value' ]
- graph_timestamp_4 =\
- json_obj_4[ graphTimestamp ][ 'value' ]
- graph_timestamp_5 =\
- json_obj_5[ graphTimestamp ][ 'value' ]
- graph_timestamp_6 =\
- json_obj_6[ graphTimestamp ][ 'value' ]
-
- graph_lat_1 = \
- int( graph_timestamp_1 ) - int( t0_system )
- graph_lat_2 = \
- int( graph_timestamp_2 ) - int( t0_system )
- graph_lat_3 = \
- int( graph_timestamp_3 ) - int( t0_system )
- graph_lat_4 = \
- int( graph_timestamp_4 ) - int( t0_system )
- graph_lat_5 = \
- int( graph_timestamp_5 ) - int( t0_system )
- graph_lat_6 = \
- int( graph_timestamp_6 ) - int( t0_system )
-
- main.log.info( "Graph Timestamp ONOS1: " +
- str( graph_timestamp_1 ) )
- main.log.info( "Graph Timestamp ONOS2: " +
- str( graph_timestamp_2 ) )
- main.log.info( "Graph Timestamp ONOS3: " +
- str( graph_timestamp_3 ) )
- main.log.info( "Graph Timestamp ONOS4: " +
- str( graph_timestamp_4 ) )
- main.log.info( "Graph Timestamp ONOS5: " +
- str( graph_timestamp_5 ) )
- main.log.info( "Graph Timestamp ONOS6: " +
- str( graph_timestamp_6 ) )
-
- max_graph_lat = max( graph_lat_1,
- graph_lat_2,
- graph_lat_3,
- graph_lat_4,
- graph_lat_5,
- graph_lat_6 )
-
- if max_graph_lat > sw_disc_threshold_min\
- and max_graph_lat < sw_disc_threshold_max\
- and int( i ) > iter_ignore:
- sw_discovery_lat_list.append(
- max_graph_lat )
- main.log.info(
- "Sw discovery latency of " +
- str( cluster_count ) +
- " node(s): " +
- str( max_graph_lat ) +
- " ms" )
- else:
- main.log.info( "Switch discovery latency " +
- "exceeded the threshold." )
- main.log.info( str( max_graph_lat ) + " ms" )
-
- break
- if cluster_count == 7:
- if onos1_dev and onos2_dev and onos3_dev and\
- onos4_dev and onos5_dev and onos6_dev and\
- onos7_dev:
- main.log.info( "All devices have been discovered" +
- " on all ONOS instances" )
-
- # TODO: Investigate this sleep
- # added to 'pad' the results with
- # plenty of time to 'catch up'
- time.sleep( 5 )
-
- json_str_metrics_1 =\
- main.ONOS1cli.topology_events_metrics()
- json_str_metrics_2 =\
- main.ONOS2cli.topology_events_metrics()
- json_str_metrics_3 =\
- main.ONOS3cli.topology_events_metrics()
- json_str_metrics_4 =\
- main.ONOS4cli.topology_events_metrics()
- json_str_metrics_5 =\
- main.ONOS5cli.topology_events_metrics()
- json_str_metrics_6 =\
- main.ONOS6cli.topology_events_metrics()
- json_str_metrics_7 =\
- main.ONOS7cli.topology_events_metrics()
- json_obj_1 = json.loads( json_str_metrics_1 )
- json_obj_2 = json.loads( json_str_metrics_2 )
- json_obj_3 = json.loads( json_str_metrics_3 )
- json_obj_4 = json.loads( json_str_metrics_4 )
- json_obj_5 = json.loads( json_str_metrics_5 )
- json_obj_6 = json.loads( json_str_metrics_6 )
- json_obj_7 = json.loads( json_str_metrics_7 )
- graph_timestamp_1 =\
- json_obj_1[ graphTimestamp ][ 'value' ]
- graph_timestamp_2 =\
- json_obj_2[ graphTimestamp ][ 'value' ]
- graph_timestamp_3 =\
- json_obj_3[ graphTimestamp ][ 'value' ]
- graph_timestamp_4 =\
- json_obj_4[ graphTimestamp ][ 'value' ]
- graph_timestamp_5 =\
- json_obj_5[ graphTimestamp ][ 'value' ]
- graph_timestamp_6 =\
- json_obj_6[ graphTimestamp ][ 'value' ]
- graph_timestamp_7 =\
- json_obj_7[ graphTimestamp ][ 'value' ]
-
- graph_lat_1 = \
- int( graph_timestamp_1 ) - int( t0_system )
- graph_lat_2 = \
- int( graph_timestamp_2 ) - int( t0_system )
- graph_lat_3 = \
- int( graph_timestamp_3 ) - int( t0_system )
- graph_lat_4 = \
- int( graph_timestamp_4 ) - int( t0_system )
- graph_lat_5 = \
- int( graph_timestamp_5 ) - int( t0_system )
- graph_lat_6 = \
- int( graph_timestamp_6 ) - int( t0_system )
- graph_lat_7 = \
- int( graph_timestamp_7 ) - int( t0_system )
-
- main.log.info( "Graph Timestamp ONOS1: " +
- str( graph_timestamp_1 ) )
- main.log.info( "Graph Timestamp ONOS2: " +
- str( graph_timestamp_2 ) )
- main.log.info( "Graph Timestamp ONOS3: " +
- str( graph_timestamp_3 ) )
- main.log.info( "Graph Timestamp ONOS4: " +
- str( graph_timestamp_4 ) )
- main.log.info( "Graph Timestamp ONOS5: " +
- str( graph_timestamp_5 ) )
- main.log.info( "Graph Timestamp ONOS6: " +
- str( graph_timestamp_6 ) )
- main.log.info( "Graph Timestamp ONOS7: " +
- str( graph_timestamp_7 ) )
-
- max_graph_lat = max( graph_lat_1,
- graph_lat_2,
- graph_lat_3,
- graph_lat_4,
- graph_lat_5,
- graph_lat_6,
- graph_lat_7 )
-
- if max_graph_lat > sw_disc_threshold_min\
- and max_graph_lat < sw_disc_threshold_max\
- and int( i ) > iter_ignore:
- sw_discovery_lat_list.append(
- max_graph_lat )
- main.log.info(
- "Sw discovery latency of " +
- str( cluster_count ) +
- " node(s): " +
- str( max_graph_lat ) +
- " ms" )
- else:
- main.log.info( "Switch discovery latency " +
- "exceeded the threshold." )
- main.log.info( str( max_graph_lat ) + " ms" )
-
- break
-
- counter_loop += 1
- time.sleep( 3 )
- # END WHILE LOOP
-
- # Below is used for reporting SYN / ACK timing
- # of all switches
- main.ONOS1.tshark_stop()
- syn_ack_timestamp_list = []
- if cluster_count < 3:
- # TODO: capture synack on nodes less than 3
- syn_ack_timestamp_list.append( 0 )
-
- if cluster_count >= 3:
- main.ONOS2.tshark_stop()
- main.ONOS3.tshark_stop()
- time.sleep( 5 )
- os.system(
- "scp " +
- ONOS_user +
- "@" +
- ONOS1_ip +
- ":" +
- "/tmp/syn_ack_onos1_iter" +
- str( i ) +
- ".txt /tmp/" )
- os.system(
- "scp " +
- ONOS_user +
- "@" +
- ONOS2_ip +
- ":" +
- "/tmp/syn_ack_onos2_iter" +
- str( i ) +
- ".txt /tmp/" )
- os.system(
- "scp " +
- ONOS_user +
- "@" +
- ONOS3_ip +
- ":" +
- "/tmp/syn_ack_onos3_iter" +
- str( i ) +
- ".txt /tmp/" )
- time.sleep( 5 )
- # Read each of the files and append all
- # SYN / ACK timestamps to the list
- with open( "/tmp/syn_ack_onos1_iter" + str( i ) + ".txt" ) as\
- f_onos1:
- for line in f_onos1:
- line = line.split( " " )
- try:
- float( line[ 1 ] )
- syn_ack_timestamp_list.append( line[ 1 ] )
- except ValueError:
- main.log.info( "String cannot be converted" )
- with open( "/tmp/syn_ack_onos2_iter" + str( i ) + ".txt" ) as\
- f_onos2:
- for line in f_onos2:
- line = line.split( " " )
- try:
- float( line[ 1 ] )
- syn_ack_timestamp_list.append( line[ 1 ] )
- except ValueError:
- main.log.info( "String cannot be converted" )
- with open( "/tmp/syn_ack_onos3_iter" + str( i ) + ".txt" ) as\
- f_onos3:
- for line in f_onos3:
- line = line.split( " " )
- try:
- float( line[ 1 ] )
- syn_ack_timestamp_list.append( line[ 1 ] )
- except ValueError:
- main.log.info( "String cannot be converted" )
- if cluster_count >= 4:
- main.ONOS4.tshark_stop()
- time.sleep( 5 )
- os.system(
- "scp " +
- ONOS_user +
- "@" +
- ONOS4_ip +
- ":" +
- "/tmp/syn_ack_onos4_iter" +
- str( i ) +
- ".txt /tmp/" )
- time.sleep( 5 )
- with open( "/tmp/syn_ack_onos4_iter" + str( i ) + ".txt" ) as\
- f_onos4:
- for line in f_onos4:
- line = line.split( " " )
- try:
- float( line[ 1 ] )
- syn_ack_timestamp_list.append( line[ 1 ] )
- except ValueError:
- main.log.info( "String cannot be converted" )
- if cluster_count >= 5:
- main.ONOS5.tshark_stop()
- time.sleep( 5 )
- os.system(
- "scp " +
- ONOS_user +
- "@" +
- ONOS5_ip +
- ":" +
- "/tmp/syn_ack_onos5_iter" +
- str( i ) +
- ".txt /tmp/" )
- time.sleep( 5 )
- with open( "/tmp/syn_ack_onos5_iter" + str( i ) + ".txt" ) as\
- f_onos5:
- for line in f_onos5:
- line = line.split( " " )
- try:
- float( line[ 1 ] )
- syn_ack_timestamp_list.append( line[ 1 ] )
- except ValueError:
- main.log.info( "String cannot be converted" )
- if cluster_count >= 6:
- main.ONOS6.tshark_stop()
- time.sleep( 5 )
- os.system(
- "scp " +
- ONOS_user +
- "@" +
- ONOS6_ip +
- ":" +
- "/tmp/syn_ack_onos6_iter" +
- str( i ) +
- ".txt /tmp/" )
- time.sleep( 5 )
- with open( "/tmp/syn_ack_onos6_iter" + str( i ) + ".txt" ) as\
- f_onos6:
- for line in f_onos6:
- line = line.split( " " )
- try:
- float( line[ 1 ] )
- syn_ack_timestamp_list.append( line[ 1 ] )
- except ValueError:
- main.log.info( "String cannot be converted" )
- if cluster_count == 7:
- main.ONOS7.tshark_stop()
- time.sleep( 5 )
- os.system(
- "scp " +
- ONOS_user +
- "@" +
- ONOS7_ip +
- ":" +
- "/tmp/syn_ack_onos7_iter" +
- str( i ) +
- ".txt /tmp/" )
- time.sleep( 5 )
- with open( "/tmp/syn_ack_onos7_iter" + str( i ) + ".txt" ) as\
- f_onos7:
- for line in f_onos7:
- line = line.split( " " )
- try:
- float( line[ 1 ] )
- syn_ack_timestamp_list.append( line[ 1 ] )
- except ValueError:
- main.log.info( "String cannot be converted" )
-
- # Sort the list by timestamp
- syn_ack_timestamp_list = sorted( syn_ack_timestamp_list )
- print "syn_ack_-1 " + str( syn_ack_timestamp_list )
-
- syn_ack_delta =\
- int( float( syn_ack_timestamp_list[ -1 ] ) * 1000 ) -\
- int( float( syn_ack_timestamp_list[ 0 ] ) * 1000 )
-
- main.log.info( "Switch connection attempt delta iteration " +
- str( i ) + ": " + str( syn_ack_delta ) )
- syn_ack_delta_list.append( syn_ack_delta )
- # END ITERATION LOOP
- # REPORT HERE
-
- if len( sw_discovery_lat_list ) > 0:
- sw_lat_avg = sum( sw_discovery_lat_list ) / \
- len( sw_discovery_lat_list )
- sw_lat_dev = numpy.std( sw_discovery_lat_list )
- else:
- sw_lat_avg = 0
- sw_lat_dev = 0
- assertion = main.FALSE
-
- main.log.report( "Switch connection attempt time avg " +
- "(last sw SYN/ACK time - first sw SYN/ACK time) " +
- str( sum( syn_ack_delta_list ) /
- len( syn_ack_delta_list ) ) +
- " ms" )
- main.log.report( str( num_sw ) + " Switch discovery lat for " +
- str( cluster_count ) + " instance(s): " )
- main.log.report( "Avg: " +
- str( sw_lat_avg ) +
- " ms " +
- "Std Deviation: " +
- str( round( sw_lat_dev, 1 ) ) +
- " ms" )
-
- utilities.assert_equals(
- expect=main.TRUE,
- actual=assertion,
- onpass="Switch discovery convergence latency" +
- " for " +
- str( cluster_count ) +
- " nodes successful",
- onfail="Switch discovery convergence latency" +
- " test failed" )
-
- def CASE3( self, main ):
- """
- Increase number of nodes and initiate CLI
- """
- import time
- import subprocess
- import os
- import requests
- import json
-
- ONOS1_ip = main.params[ 'CTRL' ][ 'ip1' ]
- ONOS2_ip = main.params[ 'CTRL' ][ 'ip2' ]
- ONOS3_ip = main.params[ 'CTRL' ][ 'ip3' ]
- ONOS4_ip = main.params[ 'CTRL' ][ 'ip4' ]
- ONOS5_ip = main.params[ 'CTRL' ][ 'ip5' ]
- ONOS6_ip = main.params[ 'CTRL' ][ 'ip6' ]
- ONOS7_ip = main.params[ 'CTRL' ][ 'ip7' ]
-
- cell_name = main.params[ 'ENV' ][ 'cellName' ]
-
- MN1_ip = main.params[ 'MN' ][ 'ip1' ]
- BENCH_ip = main.params[ 'BENCH' ][ 'ip' ]
-
- # NOTE:We start with cluster_count at 3. The first
- # case already initialized ONOS1. Increase the
- # cluster count and start from 3.
- # You can optionally change the increment to
- # test steps of node sizes, such as 3,5,7
-
- global cluster_count
- cluster_count += 2
- main.log.report( "Increasing cluster size to " +
- str( cluster_count ) )
-
- install_result = main.FALSE
- # Supports up to 7 node configuration
- # TODO: Cleanup this ridiculous repetitive code
- if cluster_count == 3:
- install_result = \
- main.ONOSbench.onos_install( node=ONOS2_ip )
- install_result = \
- main.ONOSbench.onos_install( node=ONOS3_ip )
- time.sleep( 5 )
- main.log.info( "Starting CLI" )
- main.ONOS2cli.start_onos_cli( ONOS2_ip )
- main.ONOS3cli.start_onos_cli( ONOS3_ip )
- main.ONOS1cli.add_node( ONOS2_ip, ONOS2_ip )
- main.ONOS1cli.add_node( ONOS3_ip, ONOS3_ip )
-
- if cluster_count == 4:
- main.log.info( "Installing ONOS on node 4" )
- install_result = \
- main.ONOSbench.onos_install( node=ONOS4_ip )
- time.sleep( 5 )
- main.log.info( "Starting CLI" )
- main.ONOS4cli.start_onos_cli( ONOS4_ip )
- main.ONOS1cli.add_node( ONOS4_ip, ONOS4_ip )
-
- elif cluster_count == 5:
- main.log.info( "Installing ONOS on nodes 4 and 5" )
- install_result2 = \
- main.ONOSbench.onos_install( options="", node=ONOS4_ip )
- install_result3 = \
- main.ONOSbench.onos_install( options="", node=ONOS5_ip )
- time.sleep( 5 )
- main.log.info( "Starting CLI" )
- main.ONOS4cli.start_onos_cli( ONOS4_ip )
- main.ONOS5cli.start_onos_cli( ONOS5_ip )
- main.ONOS1cli.add_node( ONOS4_ip, ONOS4_ip )
- main.ONOS1cli.add_node( ONOS5_ip, ONOS5_ip )
- install_result = install_result2 and install_result3
-
- elif cluster_count == 6:
- main.log.info( "Installing ONOS on nodes 4, 5,and 6" )
- install_result1 = \
- main.ONOSbench.onos_install( options="", node=ONOS4_ip )
- install_result2 = \
- main.ONOSbench.onos_install( options="", node=ONOS5_ip )
- install_result3 = \
- main.ONOSbench.onos_install( node=ONOS6_ip )
- time.sleep( 5 )
- main.log.info( "Starting CLI" )
- main.ONOS4cli.start_onos_cli( ONOS4_ip )
- main.ONOS5cli.start_onos_cli( ONOS5_ip )
- main.ONOS6cli.start_onos_cli( ONOS6_ip )
- main.ONOS1cli.add_node( ONOS4_ip, ONOS4_ip )
- main.ONOS1cli.add_node( ONOS5_ip, ONOS5_ip )
- main.ONOS1cli.add_node( ONOS6_ip, ONOS6_ip )
- install_result = install_result1 and install_result2 and\
- install_result3
-
- elif cluster_count == 7:
- main.log.info( "Installing ONOS on nodes 4, 5, 6,and 7" )
- install_result3 = \
- main.ONOSbench.onos_install( node=ONOS6_ip )
- install_result4 = \
- main.ONOSbench.onos_install( node=ONOS7_ip )
- main.log.info( "Starting CLI" )
- main.ONOS4cli.start_onos_cli( ONOS4_ip )
- main.ONOS5cli.start_onos_cli( ONOS5_ip )
- main.ONOS6cli.start_onos_cli( ONOS6_ip )
- main.ONOS7cli.start_onos_cli( ONOS7_ip )
- main.ONOS1cli.add_node( ONOS4_ip, ONOS4_ip )
- main.ONOS1cli.add_node( ONOS5_ip, ONOS5_ip )
- main.ONOS1cli.add_node( ONOS6_ip, ONOS6_ip )
- main.ONOS1cli.add_node( ONOS7_ip, ONOS7_ip )
-
- install_result = \
- install_result3 and install_result4
-
- time.sleep( 5 )
-
- if install_result == main.TRUE:
- assertion = main.TRUE
- else:
- assertion = main.FALSE
-
- utilities.assert_equals(
- expect=main.TRUE,
- actual=assertion,
- onpass="Scale out to " +
- str( cluster_count ) +
- " nodes successful",
- onfail="Scale out to " +
- str( cluster_count ) +
- " nodes failed" )
-
- def CASE4( self, main ):
- """
- Cleanup ONOS nodes and Increase topology size
- """
- # TODO: use meaningful assertion
- assertion = main.TRUE
-
- ONOS1_ip = main.params[ 'CTRL' ][ 'ip1' ]
- ONOS2_ip = main.params[ 'CTRL' ][ 'ip2' ]
- ONOS3_ip = main.params[ 'CTRL' ][ 'ip3' ]
- ONOS4_ip = main.params[ 'CTRL' ][ 'ip4' ]
- ONOS5_ip = main.params[ 'CTRL' ][ 'ip5' ]
- ONOS6_ip = main.params[ 'CTRL' ][ 'ip6' ]
- ONOS7_ip = main.params[ 'CTRL' ][ 'ip7' ]
- MN1_ip = main.params[ 'MN' ][ 'ip1' ]
- BENCH_ip = main.params[ 'BENCH' ][ 'ip' ]
-
- main.log.info( "Uninstalling previous instances" )
- main.ONOSbench.onos_uninstall( node_ip=ONOS2_ip )
- main.ONOSbench.onos_uninstall( node_ip=ONOS3_ip )
- main.ONOSbench.onos_uninstall( node_ip=ONOS4_ip )
- main.ONOSbench.onos_uninstall( node_ip=ONOS5_ip )
- main.ONOSbench.onos_uninstall( node_ip=ONOS6_ip )
- main.ONOSbench.onos_uninstall( node_ip=ONOS7_ip )
-
- global topo_iteration
- global cluster_count
- cluster_count = 1
- topo_iteration += 1
-
- main.log.report( "Increasing topology size" )
- utilities.assert_equals( expect=main.TRUE, actual=assertion,
- onpass="Topology size increased successfully",
- onfail="Topology size was not increased" )
diff --git a/TestON/tests/TopoConvNext/TopoConvNext.topo b/TestON/tests/TopoConvNext/TopoConvNext.topo
deleted file mode 100644
index b7e9e96..0000000
--- a/TestON/tests/TopoConvNext/TopoConvNext.topo
+++ /dev/null
@@ -1,163 +0,0 @@
-<TOPOLOGY>
- <COMPONENT>
-
- <ONOSbench>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>1</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOSbench>
-
- <ONOS1cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>2</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS1cli>
-
- <ONOS2cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>3</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS2cli>
-
- <ONOS3cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>4</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS3cli>
-
- <ONOS4cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>5</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS4cli>
-
- <ONOS5cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>6</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS5cli>
-
- <ONOS6cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>7</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS6cli>
-
- <ONOS7cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>8</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS7cli>
-
- <ONOS1>
- <host>10.128.174.1</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>9</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS1>
-
- <ONOS2>
- <host>10.128.174.2</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>10</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS2>
-
- <ONOS3>
- <host>10.128.174.3</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>11</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS3>
-
- <ONOS4>
- <host>10.128.174.4</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>12</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS4>
-
- <ONOS5>
- <host>10.128.174.5</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>13</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS5>
-
- <ONOS6>
- <host>10.128.174.6</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>14</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS6>
-
- <ONOS7>
- <host>10.128.174.7</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>15</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS7>
-
- <Mininet1>
- <host>10.128.10.90</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>MininetCliDriver</type>
- <connect_order>16</connect_order>
- <COMPONENTS>
- <arg1> --custom topo-500sw.py </arg1>
- <arg2> --arp --mac --topo mytopo</arg2>
- <arg3> </arg3>
- <controller> remote </controller>
- </COMPONENTS>
- </Mininet1>
-
- <Mininet2>
- <host>10.128.10.90</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>RemoteMininetDriver</type>
- <connect_order>17</connect_order>
- <COMPONENTS> </COMPONENTS>
- </Mininet2>
-
- </COMPONENT>
-</TOPOLOGY>
diff --git a/TestON/tests/TopoConvNext/__init__.py b/TestON/tests/TopoConvNext/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/TestON/tests/TopoConvNext/__init__.py
+++ /dev/null
diff --git a/TestON/tests/TopoPerfNextSingleNode/TopoPerfNextSingleNode.params b/TestON/tests/TopoPerfNextSingleNode/TopoPerfNextSingleNode.params
deleted file mode 100644
index f797706..0000000
--- a/TestON/tests/TopoPerfNextSingleNode/TopoPerfNextSingleNode.params
+++ /dev/null
@@ -1,45 +0,0 @@
-<PARAMS>
- <testcases>1,2,3,4,5</testcases>
-
- <ENV>
- <cellName>topo_perf_test</cellName>
- </ENV>
-
- <GIT>
- #autoPull 'on' or 'off'
- <autoPull>off</autoPull>
- <checkout>master</checkout>
- </GIT>
-
- <CTRL>
- <user>admin</user>
- <ip1>10.128.174.1</ip1>
- <port1>6633</port1>
- <ip2>10.128.174.2</ip2>
- <port2>6633</port2>
- <ip3>10.128.174.3</ip3>
- <port3>6633</port3>
- </CTRL>
-
- <MN>
- <ip1>10.128.10.90</ip1>
- <ip2>10.128.10.91</ip2>
- </MN>
-
- <BENCH>
- <ip>10.128.174.10</ip>
- </BENCH>
-
- <TEST>
- #Number of times to iterate each case
- <numIter>5</numIter>
- <numSwitch>100</numSwitch>
- </TEST>
-
- <JSON>
- <deviceTimestamp>topologyDeviceEventTimestamp</deviceTimestamp>
- <hostTimestamp>topologyHostEventTimestamp</hostTimestamp>
- <linkTimestamp>topologyLinkEventTimestamp</linkTimestamp>
- <graphTimestamp>topologyGraphEventTimestamp</graphTimestamp>
- </JSON>
-</PARAMS>
diff --git a/TestON/tests/TopoPerfNextSingleNode/TopoPerfNextSingleNode.py b/TestON/tests/TopoPerfNextSingleNode/TopoPerfNextSingleNode.py
deleted file mode 100644
index e81d905..0000000
--- a/TestON/tests/TopoPerfNextSingleNode/TopoPerfNextSingleNode.py
+++ /dev/null
@@ -1,1113 +0,0 @@
-# TopoPerfNext
-#
-# Topology Performance test for ONOS-next
-#*** Revised for single node operation ***
-#
-# andrew@onlab.us
-
-import time
-import sys
-import os
-import re
-
-
-class TopoPerfNextSingleNode:
-
- def __init__( self ):
- self.default = ''
-
- def CASE1( self, main ):
- """
- ONOS startup sequence
- """
- import time
-
- cell_name = main.params[ 'ENV' ][ 'cellName' ]
-
- git_pull = main.params[ 'GIT' ][ 'autoPull' ]
- checkout_branch = main.params[ 'GIT' ][ 'checkout' ]
-
- ONOS1_ip = main.params[ 'CTRL' ][ 'ip1' ]
- MN1_ip = main.params[ 'MN' ][ 'ip1' ]
- BENCH_ip = main.params[ 'BENCH' ][ 'ip' ]
-
- main.case( "Setting up test environment" )
-
- main.step( "Creating cell file" )
- cell_file_result = main.ONOSbench.create_cell_file(
- BENCH_ip, cell_name, MN1_ip, "onos-core",
- ONOS1_ip )
-
- main.step( "Applying cell file to environment" )
- cell_apply_result = main.ONOSbench.set_cell( cell_name )
- verify_cell_result = main.ONOSbench.verify_cell()
-
- main.step( "Git checkout and pull " + checkout_branch )
- if git_pull == 'on':
- checkout_result = \
- main.ONOSbench.git_checkout( checkout_branch )
- pull_result = main.ONOSbench.git_pull()
- else:
- checkout_result = main.TRUE
- pull_result = main.TRUE
- main.log.info( "Skipped git checkout and pull" )
-
- main.step( "Using mvn clean & install" )
- #mvn_result = main.ONOSbench.clean_install()
- mvn_result = main.TRUE
-
- main.step( "Creating ONOS package" )
- package_result = main.ONOSbench.onos_package()
-
- main.step( "Installing ONOS package" )
- install1_result = main.ONOSbench.onos_install( node=ONOS1_ip )
-
- # NOTE: This step may be unnecessary
- #main.step( "Starting ONOS service" )
- #start_result = main.ONOSbench.onos_start( ONOS1_ip )
-
- main.step( "Set cell for ONOS cli env" )
- main.ONOS1cli.set_cell( cell_name )
-
- time.sleep( 10 )
-
- main.step( "Start onos cli" )
- cli1 = main.ONOS1cli.start_onos_cli( ONOS1_ip )
-
- main.step( "Enable metrics feature" )
- main.ONOS1cli.feature_install( "onos-app-metrics" )
-
- utilities.assert_equals( expect=main.TRUE,
- actual=cell_file_result and cell_apply_result and
- verify_cell_result and checkout_result and
- pull_result and mvn_result and
- install1_result,
- onpass="ONOS started successfully",
- onfail="Failed to start ONOS" )
-
- def CASE2( self, main ):
- """
- Assign s1 to ONOS1 and measure latency
-
- There are 4 levels of latency measurements to this test:
- 1 ) End-to-end measurement: Complete end-to-end measurement
- from TCP ( SYN/ACK ) handshake to Graph change
- 2 ) OFP-to-graph measurement: 'ONOS processing' snippet of
- measurement from OFP Vendor message to Graph change
- 3 ) OFP-to-device measurement: 'ONOS processing without
- graph change' snippet of measurement from OFP vendor
- message to Device change timestamp
- 4 ) T0-to-device measurement: Measurement that includes
- the switch handshake to devices timestamp without
- the graph view change. ( TCP handshake -> Device
- change )
- """
- import time
- import subprocess
- import json
- import requests
- import os
-
- ONOS1_ip = main.params[ 'CTRL' ][ 'ip1' ]
- ONOS_user = main.params[ 'CTRL' ][ 'user' ]
-
- default_sw_port = main.params[ 'CTRL' ][ 'port1' ]
-
- # Number of iterations of case
- num_iter = main.params[ 'TEST' ][ 'numIter' ]
-
- # Timestamp 'keys' for json metrics output.
- # These are subject to change, hence moved into params
- deviceTimestamp = main.params[ 'JSON' ][ 'deviceTimestamp' ]
- graphTimestamp = main.params[ 'JSON' ][ 'graphTimestamp' ]
-
- # List of switch add latency collected from
- # all iterations
- latency_end_to_end_list = []
- latency_ofp_to_graph_list = []
- latency_ofp_to_device_list = []
- latency_t0_to_device_list = []
-
- # Directory/file to store tshark results
- tshark_of_output = "/tmp/tshark_of_topo.txt"
- tshark_tcp_output = "/tmp/tshark_tcp_topo.txt"
-
- # String to grep in tshark output
- tshark_tcp_string = "TCP 74 " + default_sw_port
- tshark_of_string = "OFP 86 Vendor"
-
- # Initialize assertion to TRUE
- assertion = main.TRUE
-
- main.log.report( "Latency of adding one switch" )
-
- for i in range( 0, int( num_iter ) ):
- main.log.info( "Starting tshark capture" )
-
- #* TCP [ ACK, SYN ] is used as t0_a, the
- # very first "exchange" between ONOS and
- # the switch for end-to-end measurement
- #* OFP [ Stats Reply ] is used for t0_b
- # the very last OFP message between ONOS
- # and the switch for ONOS measurement
- main.ONOS1.tshark_grep( tshark_tcp_string,
- tshark_tcp_output )
- main.ONOS1.tshark_grep( tshark_of_string,
- tshark_of_output )
-
- # Wait and ensure tshark is started and
- # capturing
- time.sleep( 10 )
-
- main.log.info( "Assigning s1 to controller" )
-
- main.Mininet1.assign_sw_controller(
- sw="1",
- ip1=ONOS1_ip,
- port1=default_sw_port )
-
- # Wait and ensure switch is assigned
- # before stopping tshark
- time.sleep( 30 )
-
- main.log.info( "Stopping all Tshark processes" )
- main.ONOS1.stop_tshark()
-
- # tshark output is saved in ONOS. Use subprocess
- # to copy over files to TestON for parsing
- main.log.info( "Copying over tshark files" )
-
- # TCP CAPTURE ****
- # Copy the tshark output from ONOS machine to
- # TestON machine in tshark_tcp_output directory>file
- os.system( "scp " + ONOS_user + "@" + ONOS1_ip + ":" +
- tshark_tcp_output + " /tmp/" )
- tcp_file = open( tshark_tcp_output, 'r' )
- temp_text = tcp_file.readline()
- temp_text = temp_text.split( " " )
-
- main.log.info( "Object read in from TCP capture: " +
- str( temp_text ) )
- if len( temp_text ) > 1:
- t0_tcp = float( temp_text[ 1 ] ) * 1000.0
- else:
- main.log.error( "Tshark output file for TCP" +
- " returned unexpected results" )
- t0_tcp = 0
- assertion = main.FALSE
-
- tcp_file.close()
- #****************
-
- # OF CAPTURE ****
- os.system( "scp " + ONOS_user + "@" + ONOS1_ip + ":" +
- tshark_of_output + " /tmp/" )
- of_file = open( tshark_of_output, 'r' )
-
- line_ofp = ""
- # Read until last line of file
- while True:
- temp_text = of_file.readline()
- if temp_text != '':
- line_ofp = temp_text
- else:
- break
- obj = line_ofp.split( " " )
-
- main.log.info( "Object read in from OFP capture: " +
- str( line_ofp ) )
-
- if len( line_ofp ) > 1:
- t0_ofp = float( obj[ 1 ] ) * 1000.0
- else:
- main.log.error( "Tshark output file for OFP" +
- " returned unexpected results" )
- t0_ofp = 0
- assertion = main.FALSE
-
- of_file.close()
- #****************
-
- json_str_1 = main.ONOS1cli.topology_events_metrics()
-
- json_obj_1 = json.loads( json_str_1 )
-
- # Obtain graph timestamp. This timestsamp captures
- # the epoch time at which the topology graph was updated.
- graph_timestamp_1 = \
- json_obj_1[ graphTimestamp ][ 'value' ]
-
- # Obtain device timestamp. This timestamp captures
- # the epoch time at which the device event happened
- device_timestamp_1 = \
- json_obj_1[ deviceTimestamp ][ 'value' ]
-
- # t0 to device processing latency
- delta_device_1 = int( device_timestamp_1 ) - int( t0_tcp )
-
- # Get average of delta from all instances
- avg_delta_device = ( int( delta_device_1 ) )
-
- # Ensure avg delta meets the threshold before appending
- if avg_delta_device > 0.0 and avg_delta_device < 10000:
- latency_t0_to_device_list.append( avg_delta_device )
- else:
- main.log.info( "Results for t0-to-device ignored" +
- "due to excess in threshold" )
-
- # t0 to graph processing latency ( end-to-end )
- delta_graph_1 = int( graph_timestamp_1 ) - int( t0_tcp )
-
- # Get average of delta from all instances
- avg_delta_graph = int( delta_graph_1 )
-
- # Ensure avg delta meets the threshold before appending
- if avg_delta_graph > 0.0 and avg_delta_graph < 10000:
- latency_end_to_end_list.append( avg_delta_graph )
- else:
- main.log.info( "Results for end-to-end ignored" +
- "due to excess in threshold" )
-
- # ofp to graph processing latency ( ONOS processing )
- delta_ofp_graph_1 = int( graph_timestamp_1 ) - int( t0_ofp )
-
- avg_delta_ofp_graph = int( delta_ofp_graph_1 )
-
- if avg_delta_ofp_graph > 0.0 and avg_delta_ofp_graph < 10000:
- latency_ofp_to_graph_list.append( avg_delta_ofp_graph )
- else:
- main.log.info( "Results for ofp-to-graph " +
- "ignored due to excess in threshold" )
-
- # ofp to device processing latency ( ONOS processing )
- delta_ofp_device_1 = float( device_timestamp_1 ) - float( t0_ofp )
-
- avg_delta_ofp_device = float( delta_ofp_device_1 )
-
- # NOTE: ofp - delta measurements are occasionally negative
- # due to system time misalignment.
- latency_ofp_to_device_list.append( avg_delta_ofp_device )
-
- # TODO:
- # Fetch logs upon threshold excess
-
- main.log.info( "ONOS1 delta end-to-end: " +
- str( delta_graph_1 ) + " ms" )
-
- main.log.info( "ONOS1 delta OFP - graph: " +
- str( delta_ofp_graph_1 ) + " ms" )
-
- main.log.info( "ONOS1 delta device - t0: " +
- str( delta_device_1 ) + " ms" )
-
- main.step( "Remove switch from controller" )
- main.Mininet1.delete_sw_controller( "s1" )
-
- time.sleep( 5 )
-
- # END of for loop iteration
-
- # If there is at least 1 element in each list,
- # pass the test case
- if len( latency_end_to_end_list ) > 0 and\
- len( latency_ofp_to_graph_list ) > 0 and\
- len( latency_ofp_to_device_list ) > 0 and\
- len( latency_t0_to_device_list ) > 0:
- assertion = main.TRUE
- elif len( latency_end_to_end_list ) == 0:
- # The appending of 0 here is to prevent
- # the min,max,sum functions from failing
- # below
- latency_end_to_end_list.append( 0 )
- assertion = main.FALSE
- elif len( latency_ofp_to_graph_list ) == 0:
- latency_ofp_to_graph_list.append( 0 )
- assertion = main.FALSE
- elif len( latency_ofp_to_device_list ) == 0:
- latency_ofp_to_device_list.append( 0 )
- assertion = main.FALSE
- elif len( latency_t0_to_device_list ) == 0:
- latency_t0_to_device_list.append( 0 )
- assertion = main.FALSE
-
- # Calculate min, max, avg of latency lists
- latency_end_to_end_max = \
- int( max( latency_end_to_end_list ) )
- latency_end_to_end_min = \
- int( min( latency_end_to_end_list ) )
- latency_end_to_end_avg = \
- ( int( sum( latency_end_to_end_list ) ) /
- len( latency_end_to_end_list ) )
-
- latency_ofp_to_graph_max = \
- int( max( latency_ofp_to_graph_list ) )
- latency_ofp_to_graph_min = \
- int( min( latency_ofp_to_graph_list ) )
- latency_ofp_to_graph_avg = \
- ( int( sum( latency_ofp_to_graph_list ) ) /
- len( latency_ofp_to_graph_list ) )
-
- latency_ofp_to_device_max = \
- int( max( latency_ofp_to_device_list ) )
- latency_ofp_to_device_min = \
- int( min( latency_ofp_to_device_list ) )
- latency_ofp_to_device_avg = \
- ( int( sum( latency_ofp_to_device_list ) ) /
- len( latency_ofp_to_device_list ) )
-
- latency_t0_to_device_max = \
- float( max( latency_t0_to_device_list ) )
- latency_t0_to_device_min = \
- float( min( latency_t0_to_device_list ) )
- latency_t0_to_device_avg = \
- ( float( sum( latency_t0_to_device_list ) ) /
- len( latency_ofp_to_device_list ) )
-
- main.log.report( "Switch add - End-to-end latency: \n" +
- "Min: " + str( latency_end_to_end_min ) + "\n" +
- "Max: " + str( latency_end_to_end_max ) + "\n" +
- "Avg: " + str( latency_end_to_end_avg ) )
- main.log.report( "Switch add - OFP-to-Graph latency: \n" +
- "Min: " + str( latency_ofp_to_graph_min ) + "\n" +
- "Max: " + str( latency_ofp_to_graph_max ) + "\n" +
- "Avg: " + str( latency_ofp_to_graph_avg ) )
- main.log.report( "Switch add - t0-to-Device latency: \n" +
- "Min: " + str( latency_t0_to_device_min ) + "\n" +
- "Max: " + str( latency_t0_to_device_max ) + "\n" +
- "Avg: " + str( latency_t0_to_device_avg ) )
-
- utilities.assert_equals( expect=main.TRUE, actual=assertion,
- onpass="Switch latency test successful",
- onfail="Switch latency test failed" )
-
- def CASE3( self, main ):
- """
- Bring port up / down and measure latency.
- Port enable / disable is simulated by ifconfig up / down
-
- In ONOS-next, we must ensure that the port we are
- manipulating is connected to another switch with a valid
- connection. Otherwise, graph view will not be updated.
- """
- import time
- import subprocess
- import os
- import requests
- import json
-
- ONOS1_ip = main.params[ 'CTRL' ][ 'ip1' ]
- ONOS_user = main.params[ 'CTRL' ][ 'user' ]
-
- default_sw_port = main.params[ 'CTRL' ][ 'port1' ]
-
- assertion = main.TRUE
- # Number of iterations of case
- num_iter = main.params[ 'TEST' ][ 'numIter' ]
-
- # Timestamp 'keys' for json metrics output.
- # These are subject to change, hence moved into params
- deviceTimestamp = main.params[ 'JSON' ][ 'deviceTimestamp' ]
- graphTimestamp = main.params[ 'JSON' ][ 'graphTimestamp' ]
-
- # NOTE: Some hardcoded variables you may need to configure
- # besides the params
-
- tshark_port_status = "OFP 130 Port Status"
-
- tshark_port_up = "/tmp/tshark_port_up.txt"
- tshark_port_down = "/tmp/tshark_port_down.txt"
- interface_config = "s1-eth1"
-
- main.log.report( "Port enable / disable latency" )
-
- main.step( "Assign switches s1 and s2 to controller 1" )
- main.Mininet1.assign_sw_controller( sw="1", ip1=ONOS1_ip,
- port1=default_sw_port )
- main.Mininet1.assign_sw_controller( sw="2", ip1=ONOS1_ip,
- port1=default_sw_port )
-
- # Give enough time for metrics to propagate the
- # assign controller event. Otherwise, these events may
- # carry over to our measurements
- time.sleep( 10 )
-
- main.step( "Verify switch is assigned correctly" )
- result_s1 = main.Mininet1.get_sw_controller( sw="s1" )
- result_s2 = main.Mininet1.get_sw_controller( sw="s2" )
- if result_s1 == main.FALSE or result_s2 == main.FALSE:
- main.log.info( "Switch s1 was not assigned correctly" )
- assertion = main.FALSE
- else:
- main.log.info( "Switch s1 was assigned correctly" )
-
- port_up_device_to_ofp_list = []
- port_up_graph_to_ofp_list = []
- port_down_device_to_ofp_list = []
- port_down_graph_to_ofp_list = []
-
- for i in range( 0, int( num_iter ) ):
- main.step( "Starting wireshark capture for port status down" )
- main.ONOS1.tshark_grep( tshark_port_status,
- tshark_port_down )
-
- time.sleep( 10 )
-
- # Disable interface that is connected to switch 2
- main.step( "Disable port: " + interface_config )
- main.Mininet2.handle.sendline( "sudo ifconfig " +
- interface_config + " down" )
- main.Mininet2.handle.expect( "\$" )
- time.sleep( 10 )
-
- main.ONOS1.tshark_stop()
- time.sleep( 5 )
-
- # Copy tshark output file from ONOS to TestON instance
- #/tmp directory
- os.system( "scp " + ONOS_user + "@" + ONOS1_ip + ":" +
- tshark_port_down + " /tmp/" )
-
- f_port_down = open( tshark_port_down, 'r' )
- # Get first line of port down event from tshark
- f_line = f_port_down.readline()
- obj_down = f_line.split( " " )
- if len( f_line ) > 0:
- timestamp_begin_pt_down = int( float( obj_down[ 1 ] ) ) * 1000
- main.log.info( "Port down begin timestamp: " +
- str( timestamp_begin_pt_down ) )
- else:
- main.log.info( "Tshark output file returned unexpected" +
- " results: " + str( obj_down ) )
- timestamp_begin_pt_down = 0
-
- f_port_down.close()
-
- main.log.info( "TEST tshark obj: " + str( obj_down ) )
-
- main.step( "Obtain t1 by REST call" )
- json_str_1 = main.ONOS1cli.topology_events_metrics()
-
- main.log.info( "TEST json_str 1: " + str( json_str_1 ) )
-
- json_obj_1 = json.loads( json_str_1 )
-
- time.sleep( 5 )
-
- # Obtain graph timestamp. This timestsamp captures
- # the epoch time at which the topology graph was updated.
- graph_timestamp_1 = \
- json_obj_1[ graphTimestamp ][ 'value' ]
-
- # Obtain device timestamp. This timestamp captures
- # the epoch time at which the device event happened
- device_timestamp_1 = \
- json_obj_1[ deviceTimestamp ][ 'value' ]
-
- # Get delta between graph event and OFP
- pt_down_graph_to_ofp_1 = int( graph_timestamp_1 ) -\
- int( timestamp_begin_pt_down )
-
- # Get delta between device event and OFP
- pt_down_device_to_ofp_1 = int( device_timestamp_1 ) -\
- int( timestamp_begin_pt_down )
-
- # Caluclate average across clusters
- pt_down_graph_to_ofp_avg = int( pt_down_graph_to_ofp_1 )
- pt_down_device_to_ofp_avg = int( pt_down_device_to_ofp_1 )
-
- if pt_down_graph_to_ofp_avg > 0.0 and \
- pt_down_graph_to_ofp_avg < 1000:
- port_down_graph_to_ofp_list.append(
- pt_down_graph_to_ofp_avg )
- main.log.info( "Port down: graph to ofp avg: " +
- str( pt_down_graph_to_ofp_avg ) + " ms" )
- else:
- main.log.info( "Average port down graph-to-ofp result" +
- " exceeded the threshold: " +
- str( pt_down_graph_to_ofp_avg ) )
-
- if pt_down_device_to_ofp_avg > 0 and \
- pt_down_device_to_ofp_avg < 1000:
- port_down_device_to_ofp_list.append(
- pt_down_device_to_ofp_avg )
- main.log.info( "Port down: device to ofp avg: " +
- str( pt_down_device_to_ofp_avg ) + " ms" )
- else:
- main.log.info( "Average port down device-to-ofp result" +
- " exceeded the threshold: " +
- str( pt_down_device_to_ofp_avg ) )
-
- # Port up events
- main.step( "Enable port and obtain timestamp" )
- main.step( "Starting wireshark capture for port status up" )
- main.ONOS1.tshark_grep( "OFP 130 Port Status", tshark_port_up )
- time.sleep( 5 )
-
- main.Mininet2.handle.sendline( "sudo ifconfig " +
- interface_config + " up" )
- main.Mininet2.handle.expect( "\$" )
- time.sleep( 10 )
-
- main.ONOS1.tshark_stop()
-
- os.system( "scp " + ONOS_user + "@" + ONOS1_ip + ":" +
- tshark_port_up + " /tmp/" )
-
- f_port_up = open( tshark_port_up, 'r' )
- f_line = f_port_up.readline()
- obj_up = f_line.split( " " )
- if len( f_line ) > 0:
- timestamp_begin_pt_up = int( float( obj_up[ 1 ] ) ) * 1000
- main.log.info( "Port up begin timestamp: " +
- str( timestamp_begin_pt_up ) )
- else:
- main.log.info( "Tshark output file returned unexpected" +
- " results." )
- timestamp_begin_pt_up = 0
-
- f_port_up.close()
-
- main.step( "Obtain t1 by REST call" )
- json_str_1 = main.ONOS1cli.topology_events_metrics()
-
- json_obj_1 = json.loads( json_str_1 )
-
- # Obtain graph timestamp. This timestsamp captures
- # the epoch time at which the topology graph was updated.
- graph_timestamp_1 = \
- json_obj_1[ graphTimestamp ][ 'value' ]
-
- # Obtain device timestamp. This timestamp captures
- # the epoch time at which the device event happened
- device_timestamp_1 = \
- json_obj_1[ deviceTimestamp ][ 'value' ]
-
- # Get delta between graph event and OFP
- pt_up_graph_to_ofp_1 = int( graph_timestamp_1 ) -\
- int( timestamp_begin_pt_up )
-
- # Get delta between device event and OFP
- pt_up_device_to_ofp_1 = int( device_timestamp_1 ) -\
- int( timestamp_begin_pt_up )
-
- pt_up_graph_to_ofp_avg = float( pt_up_graph_to_ofp_1 )
-
- pt_up_device_to_ofp_avg = float( pt_up_device_to_ofp_1 )
-
- if pt_up_graph_to_ofp_avg > 0 and \
- pt_up_graph_to_ofp_avg < 1000:
- port_up_graph_to_ofp_list.append(
- pt_up_graph_to_ofp_avg )
- main.log.info( "Port down: graph to ofp avg: " +
- str( pt_up_graph_to_ofp_avg ) + " ms" )
- else:
- main.log.info( "Average port up graph-to-ofp result" +
- " exceeded the threshold: " +
- str( pt_up_graph_to_ofp_avg ) )
-
- if pt_up_device_to_ofp_avg > 0 and \
- pt_up_device_to_ofp_avg < 1000:
- port_up_device_to_ofp_list.append(
- pt_up_device_to_ofp_avg )
- main.log.info( "Port up: device to ofp avg: " +
- str( pt_up_device_to_ofp_avg ) + " ms" )
- else:
- main.log.info( "Average port up device-to-ofp result" +
- " exceeded the threshold: " +
- str( pt_up_device_to_ofp_avg ) )
-
- # END ITERATION FOR LOOP
-
- # Check all list for latency existence and set assertion
- if ( port_down_graph_to_ofp_list and port_down_device_to_ofp_list
- and port_up_graph_to_ofp_list and port_up_device_to_ofp_list ):
- assertion = main.TRUE
-
- # Calculate and report latency measurements
- port_down_graph_to_ofp_min = min( port_down_graph_to_ofp_list )
- port_down_graph_to_ofp_max = max( port_down_graph_to_ofp_list )
- port_down_graph_to_ofp_avg = \
- ( sum( port_down_graph_to_ofp_list ) /
- len( port_down_graph_to_ofp_list ) )
-
- main.log.report( "Port down graph-to-ofp Min: " +
- str( port_down_graph_to_ofp_min ) + " ms Max: " +
- str( port_down_graph_to_ofp_max ) + " ms Avg: " +
- str( port_down_graph_to_ofp_avg ) )
-
- port_down_device_to_ofp_min = min( port_down_device_to_ofp_list )
- port_down_device_to_ofp_max = max( port_down_device_to_ofp_list )
- port_down_device_to_ofp_avg = \
- ( sum( port_down_device_to_ofp_list ) /
- len( port_down_device_to_ofp_list ) )
-
- main.log.report( "Port down device-to-ofp Min: " +
- str( port_down_device_to_ofp_min ) + " ms Max: " +
- str( port_down_device_to_ofp_max ) + " ms Avg: " +
- str( port_down_device_to_ofp_avg ) )
-
- port_up_graph_to_ofp_min = min( port_up_graph_to_ofp_list )
- port_up_graph_to_ofp_max = max( port_up_graph_to_ofp_list )
- port_up_graph_to_ofp_avg = \
- ( sum( port_up_graph_to_ofp_list ) /
- len( port_up_graph_to_ofp_list ) )
-
- main.log.report( "Port up graph-to-ofp Min: " +
- str( port_up_graph_to_ofp_min ) + " ms Max: " +
- str( port_up_graph_to_ofp_max ) + " ms Avg: " +
- str( port_up_graph_to_ofp_avg ) )
-
- port_up_device_to_ofp_min = min( port_up_device_to_ofp_list )
- port_up_device_to_ofp_max = max( port_up_device_to_ofp_list )
- port_up_device_to_ofp_avg = \
- ( sum( port_up_device_to_ofp_list ) /
- len( port_up_device_to_ofp_list ) )
-
- main.log.report( "Port up device-to-ofp Min: " +
- str( port_up_device_to_ofp_min ) + " ms Max: " +
- str( port_up_device_to_ofp_max ) + " ms Avg: " +
- str( port_up_device_to_ofp_avg ) )
-
- utilities.assert_equals(
- expect=main.TRUE,
- actual=assertion,
- onpass="Port discovery latency calculation successful",
- onfail="Port discovery test failed" )
-
- def CASE4( self, main ):
- """
- Link down event using loss rate 100%
-
- Important:
- Use a simple 2 switch topology with 1 link between
- the two switches. Ensure that mac addresses of the
- switches are 1 / 2 respectively
- """
- import time
- import subprocess
- import os
- import requests
- import json
-
- ONOS1_ip = main.params[ 'CTRL' ][ 'ip1' ]
- ONOS_user = main.params[ 'CTRL' ][ 'user' ]
-
- default_sw_port = main.params[ 'CTRL' ][ 'port1' ]
-
- # Number of iterations of case
- num_iter = main.params[ 'TEST' ][ 'numIter' ]
-
- # Timestamp 'keys' for json metrics output.
- # These are subject to change, hence moved into params
- deviceTimestamp = main.params[ 'JSON' ][ 'deviceTimestamp' ]
- linkTimestamp = main.params[ 'JSON' ][ 'linkTimestamp' ]
- graphTimestamp = main.params[ 'JSON' ][ 'graphTimestamp' ]
-
- assertion = main.TRUE
- # Link event timestamp to system time list
- link_down_link_to_system_list = []
- link_up_link_to_system_list = []
- # Graph event timestamp to system time list
- link_down_graph_to_system_list = []
- link_up_graph_to_system_list = []
-
- main.log.report( "Add / remove link latency between " +
- "two switches" )
-
- main.step( "Assign all switches" )
- main.Mininet1.assign_sw_controller(
- sw="1",
- ip1=ONOS1_ip,
- port1=default_sw_port )
- main.Mininet1.assign_sw_controller(
- sw="2",
- ip1=ONOS1_ip,
- port1=default_sw_port )
-
- main.step( "Verifying switch assignment" )
- result_s1 = main.Mininet1.get_sw_controller( sw="s1" )
- result_s2 = main.Mininet1.get_sw_controller( sw="s2" )
-
- # Allow time for events to finish before taking measurements
- time.sleep( 10 )
-
- link_down = False
- # Start iteration of link event test
- for i in range( 0, int( num_iter ) ):
- main.step( "Getting initial system time as t0" )
-
- timestamp_link_down_t0 = time.time() * 1000
- # Link down is simulated by 100% loss rate using traffic
- # control command
- main.Mininet1.handle.sendline(
- "sh tc qdisc add dev s1-eth1 root netem loss 100%" )
-
- # TODO: Iterate through 'links' command to verify that
- # link s1 -> s2 went down ( loop timeout 30 seconds )
- # on all 3 ONOS instances
- main.log.info( "Checking ONOS for link update" )
- loop_count = 0
- while( not link_down and loop_count < 30 ):
- json_str = main.ONOS1cli.links()
-
- if not json_str:
- main.log.error( "CLI command returned error " )
- break
- else:
- json_obj = json.loads( json_str )
- for obj in json_obj:
- if '01' not in obj[ 'src' ][ 'device' ]:
- link_down = True
- main.log.report( "Link down from " +
- "s1 -> s2 on ONOS1 detected" )
- loop_count += 1
- # If CLI doesn't like the continuous requests
- # and exits in this loop, increase the sleep here.
- # Consequently, while loop timeout will increase
- time.sleep( 1 )
-
- # Give time for metrics measurement to catch up
- # NOTE: May need to be configured more accurately
- time.sleep( 10 )
- # If we exited the while loop and link down 1,2,3 are still
- # false, then ONOS has failed to discover link down event
- if not link_down:
- main.log.info( "Link down discovery failed" )
-
- link_down_lat_graph1 = 0
- link_down_lat_device1 = 0
- assertion = main.FALSE
- else:
- json_topo_metrics_1 =\
- main.ONOS1cli.topology_events_metrics()
- json_topo_metrics_1 = json.loads( json_topo_metrics_1 )
-
- main.log.info( "Obtaining graph and device timestamp" )
- graph_timestamp_1 = \
- json_topo_metrics_1[ graphTimestamp ][ 'value' ]
-
- link_timestamp_1 = \
- json_topo_metrics_1[ linkTimestamp ][ 'value' ]
-
- if graph_timestamp_1 and link_timestamp_1:
- link_down_lat_graph1 = int( graph_timestamp_1 ) -\
- timestamp_link_down_t0
-
- link_down_lat_link1 = int( link_timestamp_1 ) -\
- timestamp_link_down_t0
- else:
- main.log.error( "There was an error calculating" +
- " the delta for link down event" )
- link_down_lat_graph1 = 0
-
- link_down_lat_device1 = 0
-
- main.log.report( "Link down latency ONOS1 iteration " +
- str( i ) + " (end-to-end): " +
- str( link_down_lat_graph1 ) + " ms" )
-
- main.log.report( "Link down latency ONOS1 iteration " +
- str( i ) + " (link-event-to-system-timestamp): " +
- str( link_down_lat_link1 ) + " ms" )
-
- # Calculate avg of node calculations
- link_down_lat_graph_avg = link_down_lat_graph1
- link_down_lat_link_avg = link_down_lat_link1
-
- # Set threshold and append latency to list
- if link_down_lat_graph_avg > 0.0 and\
- link_down_lat_graph_avg < 30000:
- link_down_graph_to_system_list.append(
- link_down_lat_graph_avg )
- else:
- main.log.info( "Link down latency exceeded threshold" )
- main.log.info( "Results for iteration " + str( i ) +
- "have been omitted" )
- if link_down_lat_link_avg > 0.0 and\
- link_down_lat_link_avg < 30000:
- link_down_link_to_system_list.append(
- link_down_lat_link_avg )
- else:
- main.log.info( "Link down latency exceeded threshold" )
- main.log.info( "Results for iteration " + str( i ) +
- "have been omitted" )
-
- # NOTE: To remove loss rate and measure latency:
- # 'sh tc qdisc del dev s1-eth1 root'
- timestamp_link_up_t0 = time.time() * 1000
- main.Mininet1.handle.sendline( "sh tc qdisc del dev " +
- "s1-eth1 root" )
- main.Mininet1.handle.expect( "mininet>" )
-
- main.log.info( "Checking ONOS for link update" )
-
- link_down1 = True
- loop_count = 0
- while( link_down1 and loop_count < 30 ):
- json_str1 = main.ONOS1cli.links()
- if not json_str1:
- main.log.error( "CLI command returned error " )
- break
- else:
- json_obj1 = json.loads( json_str1 )
-
- for obj1 in json_obj1:
- if '01' in obj1[ 'src' ][ 'device' ]:
- link_down1 = False
- main.log.report( "Link up from " +
- "s1 -> s2 on ONOS1 detected" )
- loop_count += 1
- time.sleep( 1 )
-
- if link_down1:
- main.log.info( "Link up discovery failed" )
- link_up_lat_graph1 = 0
- link_up_lat_device1 = 0
- assertion = main.FALSE
- else:
- json_topo_metrics_1 =\
- main.ONOS1cli.topology_events_metrics()
- json_topo_metrics_1 = json.loads( json_topo_metrics_1 )
-
- main.log.info( "Obtaining graph and device timestamp" )
- graph_timestamp_1 = \
- json_topo_metrics_1[ graphTimestamp ][ 'value' ]
-
- link_timestamp_1 = \
- json_topo_metrics_1[ linkTimestamp ][ 'value' ]
-
- if graph_timestamp_1 and link_timestamp_1:
- link_up_lat_graph1 = int( graph_timestamp_1 ) -\
- timestamp_link_up_t0
- link_up_lat_link1 = int( link_timestamp_1 ) -\
- timestamp_link_up_t0
- else:
- main.log.error( "There was an error calculating" +
- " the delta for link down event" )
- link_up_lat_graph1 = 0
- link_up_lat_device1 = 0
-
- main.log.info( "Link up latency ONOS1 iteration " +
- str( i ) + " (end-to-end): " +
- str( link_up_lat_graph1 ) + " ms" )
-
- main.log.info( "Link up latency ONOS1 iteration " +
- str( i ) + " (link-event-to-system-timestamp): " +
- str( link_up_lat_link1 ) + " ms" )
-
- # Calculate avg of node calculations
- link_up_lat_graph_avg = link_up_lat_graph1
- link_up_lat_link_avg = link_up_lat_link1
-
- # Set threshold and append latency to list
- if link_up_lat_graph_avg > 0.0 and\
- link_up_lat_graph_avg < 30000:
- link_up_graph_to_system_list.append(
- link_up_lat_graph_avg )
- else:
- main.log.info( "Link up latency exceeded threshold" )
- main.log.info( "Results for iteration " + str( i ) +
- "have been omitted" )
- if link_up_lat_link_avg > 0.0 and\
- link_up_lat_link_avg < 30000:
- link_up_link_to_system_list.append(
- link_up_lat_link_avg )
- else:
- main.log.info( "Link up latency exceeded threshold" )
- main.log.info( "Results for iteration " + str( i ) +
- "have been omitted" )
-
- # Calculate min, max, avg of list and report
- link_down_min = min( link_down_graph_to_system_list )
- link_down_max = max( link_down_graph_to_system_list )
- link_down_avg = sum( link_down_graph_to_system_list ) / \
- len( link_down_graph_to_system_list )
- link_up_min = min( link_up_graph_to_system_list )
- link_up_max = max( link_up_graph_to_system_list )
- link_up_avg = sum( link_up_graph_to_system_list ) / \
- len( link_up_graph_to_system_list )
-
- main.log.report( "Link down latency - Min: " +
- str( link_down_min ) + "ms Max: " +
- str( link_down_max ) + "ms Avg: " +
- str( link_down_avg ) + "ms" )
- main.log.report( "Link up latency - Min: " +
- str( link_up_min ) + "ms Max: " +
- str( link_up_max ) + "ms Avg: " +
- str( link_up_avg ) + "ms" )
-
- utilities.assert_equals(
- expect=main.TRUE,
- actual=assertion,
- onpass="Link discovery latency calculation successful",
- onfail="Link discovery latency case failed" )
-
- def CASE5( self, main ):
- """
- 100 Switch discovery latency
-
- Important:
- This test case can be potentially dangerous if
- your machine has previously set iptables rules.
- One of the steps of the test case will flush
- all existing iptables rules.
- Note:
- You can specify the number of switches in the
- params file to adjust the switch discovery size
- ( and specify the corresponding topology in Mininet1
- .topo file )
- """
- import time
- import subprocess
- import os
- import requests
- import json
-
- ONOS1_ip = main.params[ 'CTRL' ][ 'ip1' ]
- MN1_ip = main.params[ 'MN' ][ 'ip1' ]
- ONOS_user = main.params[ 'CTRL' ][ 'user' ]
-
- default_sw_port = main.params[ 'CTRL' ][ 'port1' ]
-
- # Number of iterations of case
- num_iter = main.params[ 'TEST' ][ 'numIter' ]
- num_sw = main.params[ 'TEST' ][ 'numSwitch' ]
-
- # Timestamp 'keys' for json metrics output.
- # These are subject to change, hence moved into params
- deviceTimestamp = main.params[ 'JSON' ][ 'deviceTimestamp' ]
- graphTimestamp = main.params[ 'JSON' ][ 'graphTimestamp' ]
-
- tshark_ofp_output = "/tmp/tshark_ofp_" + num_sw + "sw.txt"
- tshark_tcp_output = "/tmp/tshark_tcp_" + num_sw + "sw.txt"
-
- tshark_ofp_result_list = []
- tshark_tcp_result_list = []
-
- main.case( num_sw + " Switch discovery latency" )
- main.step( "Assigning all switches to ONOS1" )
- for i in range( 1, int( num_sw ) + 1 ):
- main.Mininet1.assign_sw_controller(
- sw=str( i ),
- ip1=ONOS1_ip,
- port1=default_sw_port )
-
- # Ensure that nodes are configured with ptpd
- # Just a warning message
- main.log.info( "Please check ptpd configuration to ensure" +
- " All nodes' system times are in sync" )
- time.sleep( 5 )
-
- for i in range( 0, int( num_iter ) ):
-
- main.step( "Set iptables rule to block incoming sw connections" )
- # Set iptables rule to block incoming switch connections
- # The rule description is as follows:
- # Append to INPUT rule,
- # behavior DROP that matches following:
- # * packet type: tcp
- # * source IP: MN1_ip
- # * destination PORT: 6633
- main.ONOS1.handle.sendline(
- "sudo iptables -A INPUT -p tcp -s " + MN1_ip +
- " --dport " + default_sw_port + " -j DROP" )
- main.ONOS1.handle.expect( "\$" )
- # Append to OUTPUT rule,
- # behavior DROP that matches following:
- # * packet type: tcp
- # * source IP: MN1_ip
- # * destination PORT: 6633
- main.ONOS1.handle.sendline(
- "sudo iptables -A OUTPUT -p tcp -s " + MN1_ip +
- " --dport " + default_sw_port + " -j DROP" )
- main.ONOS1.handle.expect( "\$" )
- # Give time to allow rule to take effect
- # NOTE: Sleep period may need to be configured
- # based on the number of switches in the topology
- main.log.info( "Please wait for switch connection to " +
- "time out" )
- time.sleep( 60 )
-
- # Gather vendor OFP with tshark
- main.ONOS1.tshark_grep( "OFP 86 Vendor",
- tshark_ofp_output )
- main.ONOS1.tshark_grep( "TCP 74 ",
- tshark_tcp_output )
-
- # NOTE: Remove all iptables rule quickly ( flush )
- # Before removal, obtain TestON timestamp at which
- # removal took place
- # ( ensuring nodes are configured via ptp )
- # sudo iptables -F
-
- t0_system = time.time() * 1000
- main.ONOS1.handle.sendline(
- "sudo iptables -F" )
-
- # Counter to track loop count
- counter_loop = 0
- counter_avail1 = 0
- onos1_dev = False
- while counter_loop < 60:
- # Continue to check devices for all device
- # availability. When all devices in all 3
- # ONOS instances indicate that devices are available
- # obtain graph event timestamp for t1.
- device_str_obj1 = main.ONOS1cli.devices()
- device_json1 = json.loads( device_str_obj1 )
-
- for device1 in device_json1:
- if device1[ 'available' ]:
- counter_avail1 += 1
- if counter_avail1 == int( num_sw ):
- onos1_dev = True
- main.log.info( "All devices have been " +
- "discovered on ONOS1" )
- else:
- counter_avail1 = 0
-
- if onos1_dev:
- main.log.info( "All devices have been discovered " +
- "on all ONOS instances" )
- json_str_topology_metrics_1 =\
- main.ONOS1cli.topology_events_metrics()
- # Exit while loop if all devices discovered
- break
-
- counter_loop += 1
- # Give some time in between CLI calls
- #( will not affect measurement )
- time.sleep( 3 )
-
- main.ONOS1.tshark_stop()
-
- os.system( "scp " + ONOS_user + "@" + ONOS1_ip + ":" +
- tshark_ofp_output + " /tmp/" )
- os.system( "scp " + ONOS_user + "@" + ONOS1_ip + ":" +
- tshark_tcp_output + " /tmp/" )
- ofp_file = open( tshark_ofp_output, 'r' )
-
- # The following is for information purpose only.
- # TODO: Automate OFP output analysis
- main.log.info( "Tshark OFP Vendor output: " )
- for line in ofp_file:
- tshark_ofp_result_list.append( line )
- main.log.info( line )
-
- ofp_file.close()
-
- tcp_file = open( tshark_tcp_output, 'r' )
- main.log.info( "Tshark TCP 74 output: " )
- for line in tcp_file:
- tshark_tcp_result_list.append( line )
- main.log.info( line )
-
- tcp_file.close()
-
- json_obj_1 = json.loads( json_str_topology_metrics_1 )
-
- graph_timestamp_1 = \
- json_obj_1[ graphTimestamp ][ 'value' ]
-
- main.log.info(
- int( graph_timestamp_1 ) - int( t0_system ) )
diff --git a/TestON/tests/TopoPerfNextSingleNode/TopoPerfNextSingleNode.topo b/TestON/tests/TopoPerfNextSingleNode/TopoPerfNextSingleNode.topo
deleted file mode 100644
index 3fc7bdc..0000000
--- a/TestON/tests/TopoPerfNextSingleNode/TopoPerfNextSingleNode.topo
+++ /dev/null
@@ -1,55 +0,0 @@
-<TOPOLOGY>
- <COMPONENT>
-
- <ONOSbench>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>1</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOSbench>
-
- <ONOS1cli>
- <host>10.128.174.10</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosCliDriver</type>
- <connect_order>2</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS1cli>
-
- <ONOS1>
- <host>10.128.174.1</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>OnosDriver</type>
- <connect_order>3</connect_order>
- <COMPONENTS> </COMPONENTS>
- </ONOS1>
-
- <Mininet1>
- <host>10.128.10.90</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>MininetCliDriver</type>
- <connect_order>4</connect_order>
- <COMPONENTS>
- <arg1> --custom topo-100sw.py </arg1>
- <arg2> --arp --mac --topo mytopo</arg2>
- <arg3> </arg3>
- <controller> remote </controller>
- </COMPONENTS>
- </Mininet1>
-
- <Mininet2>
- <host>10.128.10.90</host>
- <user>admin</user>
- <password>onos_test</password>
- <type>RemoteMininetDriver</type>
- <connect_order>5</connect_order>
- <COMPONENTS> </COMPONENTS>
- </Mininet2>
-
- </COMPONENT>
-</TOPOLOGY>
diff --git a/TestON/tests/TopoPerfNextSingleNode/__init__.py b/TestON/tests/TopoPerfNextSingleNode/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/TestON/tests/TopoPerfNextSingleNode/__init__.py
+++ /dev/null