Merge "remove unneeded keyword arguments from send() call"
diff --git a/TestON/drivers/common/api/controller/onosrestdriver.py b/TestON/drivers/common/api/controller/onosrestdriver.py
index 50e4847..1b7c8fd 100644
--- a/TestON/drivers/common/api/controller/onosrestdriver.py
+++ b/TestON/drivers/common/api/controller/onosrestdriver.py
@@ -75,7 +75,7 @@
main.log.exception( "Error parsing jsonObject" )
return None
- def send( self, url, base="/onos/v1", method="GET",
+ def send( self, url, ip = "DEFAULT", port = "DEFAULT", base="/onos/v1", method="GET",
query=None, data=None, debug=False ):
"""
Arguments:
@@ -94,8 +94,13 @@
# TODO: should we maybe just pass kwargs straight to response?
# TODO: Do we need to allow for other protocols besides http?
# ANSWER: Not yet, but potentially https with certificates
- ip = self.ip_address
- port = self.port
+ if ip == "DEFAULT":
+ main.log.warn( "No ip given, reverting to ip from topo file" )
+ ip = self.ip_address
+ if port == "DEFAULT":
+ main.log.warn( "No port given, reverting to port " +
+ "from topo file" )
+ port = self.port
try:
path = "http://" + str( ip ) + ":" + str( port ) + base + url
@@ -140,7 +145,7 @@
main.log.warn( "No port given, reverting to port " +
"from topo file" )
port = self.port
- response = self.send( url="/intents" )
+ response = self.send( url="/intents", ip = ip, port = port )
if response:
if 200 <= response[ 0 ] <= 299:
output = response[ 1 ]
@@ -188,7 +193,7 @@
port = self.port
# NOTE: REST url requires the intent id to be in decimal form
query = "/" + str( appId ) + "/" + str( intentId )
- response = self.send( url="/intents" + query )
+ response = self.send( url="/intents" + query, ip = ip, port = port )
if response:
if 200 <= response[ 0 ] <= 299:
output = response[ 1 ]
@@ -251,7 +256,7 @@
main.log.warn( "No port given, reverting to port " +
"from topo file" )
port = self.port
- response = self.send( url="/applications" )
+ response = self.send( url="/applications", ip = ip, port = port )
if response:
if 200 <= response[ 0 ] <= 299:
output = response[ 1 ]
@@ -295,7 +300,8 @@
port = self.port
query = "/" + str( appName ) + "/active"
response = self.send( method="POST",
- url="/applications" + query )
+ url="/applications" + query,
+ ip = ip, port = port)
if response:
output = response[ 1 ]
app = json.loads( output )
@@ -351,7 +357,8 @@
port = self.port
query = "/" + str( appName ) + "/active"
response = self.send( method="DELETE",
- url="/applications" + query )
+ url="/applications" + query,
+ ip = ip, port = port )
if response:
output = response[ 1 ]
app = json.loads( output )
@@ -404,7 +411,8 @@
"from topo file" )
port = self.port
query = "/" + project + str( appName )
- response = self.send( url="/applications" + query )
+ response = self.send( url="/applications" + query,
+ ip = ip, port = port )
if response:
if 200 <= response[ 0 ] <= 299:
output = response[ 1 ]
@@ -455,7 +463,7 @@
"from topo file" )
port = self.port
response = self.send( method="POST",
- url="/intents",
+ url="/intents", ip = ip, port = port,
data=json.dumps( intentJson ) )
if response:
if 201:
@@ -605,7 +613,7 @@
"from topo file" )
port = self.port
response = self.send( method="POST",
- url="/intents",
+ url="/intents", ip = ip, port = port,
data=json.dumps( intentJson ) )
if response:
if 201:
@@ -644,7 +652,7 @@
# NOTE: REST url requires the intent id to be in decimal form
query = "/" + str( appId ) + "/" + str( int( intentId, 16 ) )
response = self.send( method="DELETE",
- url="/intents" + query )
+ url="/intents" + query, ip = ip, port = port )
if response:
if 200 <= response[ 0 ] <= 299:
return main.TRUE
@@ -744,7 +752,7 @@
main.log.warn( "No port given, reverting to port " +
"from topo file" )
port = self.port
- response = self.send( url="/hosts" )
+ response = self.send( url="/hosts", ip = ip, port = port )
if response:
if 200 <= response[ 0 ] <= 299:
output = response[ 1 ]
@@ -790,7 +798,7 @@
"from topo file" )
port = self.port
query = "/" + mac + "/" + vlan
- response = self.send( url="/hosts" + query )
+ response = self.send( url="/hosts" + query, ip = ip, port = port )
if response:
# NOTE: What if the person wants other values? would it be better
# to have a function that gets a key and return a value instead?
@@ -829,7 +837,7 @@
main.log.warn( "No port given, reverting to port " +
"from topo file" )
port = self.port
- response = self.send( url="/topology" )
+ response = self.send( url="/topology", ip = ip, port = port )
if response:
if 200 <= response[ 0 ] <= 299:
output = response[ 1 ]
@@ -866,7 +874,7 @@
main.log.warn( "No port given, reverting to port " +
"from topo file" )
port = self.port
- response = self.send( url="/devices" )
+ response = self.send( url="/devices", ip = ip, port = port )
if response:
if 200 <= response[ 0 ] <= 299:
output = response[ 1 ]
@@ -1029,7 +1037,7 @@
main.log.warn( "No port given, reverting to port " +
"from topo file" )
port = self.port
- response = self.send( url="/flows" )
+ response = self.send( url="/flows", ip = ip, port = port )
if response:
if 200 <= response[ 0 ] <= 299:
output = response[ 1 ]
@@ -1072,7 +1080,7 @@
if flowId:
url += "/" + str( int( flowId ) )
print url
- response = self.send( url=url )
+ response = self.send( url=url, ip = ip, port = port )
if response:
if 200 <= response[ 0 ] <= 299:
output = response[ 1 ]
@@ -1121,7 +1129,7 @@
port = self.port
url = "/flows/" + deviceId
response = self.send( method="POST",
- url=url,
+ url=url, ip = ip, port = port,
data=json.dumps( flowJson ) )
if response:
if 201:
@@ -1288,7 +1296,7 @@
# NOTE: REST url requires the intent id to be in decimal form
query = "/" + str( deviceId ) + "/" + str( int( flowId ) )
response = self.send( method="DELETE",
- url="/flows" + query )
+ url="/flows" + query, ip = ip, port = port )
if response:
if 200 <= response[ 0 ] <= 299:
return main.TRUE
@@ -1358,7 +1366,7 @@
url += "/" + subjectKey
if configKey:
url += "/" + configKey
- response = self.send( url=url )
+ response = self.send( url=url, ip = ip, port = port )
if response:
if 200 <= response[ 0 ] <= 299:
output = response[ 1 ]
@@ -1409,7 +1417,7 @@
if configKey:
url += "/" + configKey
response = self.send( method="POST",
- url=url,
+ url=url, ip = ip, port = port,
data=json.dumps( cfgJson ) )
if response:
if 200 <= response[ 0 ] <= 299:
@@ -1455,7 +1463,7 @@
if configKey:
url += "/" + configKey
response = self.send( method="DELETE",
- url=url )
+ url=url, ip = ip, port = port )
if response:
if 200 <= response[ 0 ] <= 299:
main.log.info( self.name + ": Successfully delete cfg" )
@@ -1659,7 +1667,7 @@
port = self.port
url = "/flows/"
response = self.send( method="POST",
- url=url,
+ url=url, ip = ip, port = port,
data=json.dumps( batch ) )
#main.log.info("Post response is: ", str(response[0]))
if response[0] == 200:
@@ -1702,7 +1710,7 @@
# NOTE: REST url requires the intent id to be in decimal form
response = self.send( method="DELETE",
- url="/flows/",
+ url="/flows/", ip = ip, port = port,
data = json.dumps(batch) )
if response:
if 200 <= response[ 0 ] <= 299:
diff --git a/TestON/drivers/common/api/dockerapidriver.py b/TestON/drivers/common/api/dockerapidriver.py
index 2c1520d..4a874c6 100644
--- a/TestON/drivers/common/api/dockerapidriver.py
+++ b/TestON/drivers/common/api/dockerapidriver.py
@@ -251,7 +251,7 @@
command = "{}/onos-form-cluster -u {} -p {} {}".format( cmdPath,
user,
passwd,
- onosIps )
+ onosIPs )
result = subprocess.call( command, shell=True )
if result == 0:
return main.TRUE
diff --git a/TestON/drivers/common/cli/emulator/mininetclidriver.py b/TestON/drivers/common/cli/emulator/mininetclidriver.py
index c22f67b..8165d3a 100644
--- a/TestON/drivers/common/cli/emulator/mininetclidriver.py
+++ b/TestON/drivers/common/cli/emulator/mininetclidriver.py
@@ -617,6 +617,60 @@
main.cleanup()
main.exit()
+ def pingHostSetAlternative( self, dstIPList, wait=1, IPv6=False ):
+ """
+ Description:
+ Ping a set of destination host from host CLI.
+ Logging into a Mininet host CLI is required before calling this funtion.
+ Params:
+ dstIPList is a list of destination ip addresses
+ Returns:
+ main.TRUE if the destination host is reachable
+ main.FALSE otherwise
+ """
+ isReachable = main.TRUE
+ wait = int( wait )
+ cmd = "ping"
+ if IPv6:
+ cmd = cmd + "6"
+ cmd = cmd + " -c 1 -i 1 -W " + str( wait )
+ try:
+ for dstIP in dstIPList:
+ pingCmd = cmd + " " + dstIP
+ self.handle.sendline( pingCmd )
+ i = self.handle.expect( [ self.hostPrompt,
+ '\*\*\* Unknown command: ' + pingCmd,
+ pexpect.TIMEOUT ],
+ timeout=wait + 1 )
+ if i == 0:
+ response = self.handle.before
+ if not re.search( ',\s0\%\spacket\sloss', response ):
+ main.log.debug( "Ping failed between %s and %s" % ( self.name, dstIP ) )
+ isReachable = main.FALSE
+ elif i == 1:
+ main.log.error( self.name + ": function should be called from host CLI instead of Mininet CLI" )
+ main.cleanup()
+ main.exit()
+ elif i == 2:
+ main.log.error( self.name + ": timeout when waiting for response" )
+ isReachable = main.FALSE
+ else:
+ main.log.error( self.name + ": unknown response: " + self.handle.before )
+ isReachable = main.FALSE
+ except pexpect.TIMEOUT:
+ main.log.exception( self.name + ": TIMEOUT exception" )
+ isReachable = main.FALSE
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanup()
+ main.exit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanup()
+ main.exit()
+ return isReachable
+
def checkIP( self, host ):
"""
Verifies the host's ip configured or not."""
diff --git a/TestON/drivers/common/cli/onosclidriver.py b/TestON/drivers/common/cli/onosclidriver.py
index b487096..0822879 100644
--- a/TestON/drivers/common/cli/onosclidriver.py
+++ b/TestON/drivers/common/cli/onosclidriver.py
@@ -382,11 +382,12 @@
main.cleanup()
main.exit()
- def log( self, cmdStr, level="" ):
+ def log( self, cmdStr, level="",noExit=False):
"""
log the commands in the onos CLI.
returns main.TRUE on success
returns main.FALSE if Error occurred
+ if noExit is True, TestON will not exit, but clean up
Available level: DEBUG, TRACE, INFO, WARN, ERROR
Level defaults to INFO
"""
@@ -424,31 +425,47 @@
return main.TRUE
except pexpect.TIMEOUT:
main.log.exception( self.name + ": TIMEOUT exception found" )
- main.cleanup()
- main.exit()
+ if noExit:
+ main.cleanup()
+ return None
+ else:
+ main.cleanup()
+ main.exit()
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
main.log.error( self.name + ": " + self.handle.before )
- main.cleanup()
- main.exit()
+ if noExit:
+ main.cleanup()
+ return None
+ else:
+ main.cleanup()
+ main.exit()
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
- main.cleanup()
- main.exit()
+ if noExit:
+ main.cleanup()
+ return None
+ else:
+ main.cleanup()
+ main.exit()
- def sendline( self, cmdStr, showResponse=False, debug=False, timeout=10 ):
+ def sendline( self, cmdStr, showResponse=False, debug=False, timeout=10, noExit=False ):
"""
Send a completely user specified string to
the onos> prompt. Use this function if you have
a very specific command to send.
+ if noExit is True, TestON will not exit, but clean up
+
Warning: There are no sanity checking to commands
sent using this method.
"""
try:
- logStr = "\"Sending CLI command: '" + cmdStr + "'\""
- self.log( logStr )
+ if debug:
+ # NOTE: This adds and average of .4 seconds per call
+ logStr = "\"Sending CLI command: '" + cmdStr + "'\""
+ self.log( logStr,noExit=noExit )
self.handle.sendline( cmdStr )
i = self.handle.expect( ["onos>", "\$"], timeout )
response = self.handle.before
@@ -503,12 +520,20 @@
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
main.log.error( self.name + ": " + self.handle.before )
- main.cleanup()
- main.exit()
+ if noExit:
+ main.cleanup()
+ return None
+ else:
+ main.cleanup()
+ main.exit()
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
- main.cleanup()
- main.exit()
+ if noExit:
+ main.cleanup()
+ return None
+ else:
+ main.cleanup()
+ main.exit()
# IMPORTANT NOTE:
# For all cli commands, naming convention should match
@@ -1113,11 +1138,13 @@
main.cleanup()
main.exit()
- def addHostIntent( self, hostIdOne, hostIdTwo ):
+ def addHostIntent( self, hostIdOne, hostIdTwo, vlanId="" ):
"""
Required:
* hostIdOne: ONOS host id for host1
* hostIdTwo: ONOS host id for host2
+ Optional:
+ * vlanId: specify a VLAN id for the intent
Description:
Adds a host-to-host intent ( bidirectional ) by
specifying the two hosts.
@@ -1125,8 +1152,10 @@
A string of the intent id or None on Error
"""
try:
- cmdStr = "add-host-intent " + str( hostIdOne ) +\
- " " + str( hostIdTwo )
+ cmdStr = "add-host-intent "
+ if vlanId:
+ cmdStr += "-v " + str( vlanId ) + " "
+ cmdStr += str( hostIdOne ) + " " + str( hostIdTwo )
handle = self.sendline( cmdStr )
assert "Command not found:" not in handle, handle
if re.search( "Error", handle ):
@@ -1222,7 +1251,8 @@
ipSrc="",
ipDst="",
tcpSrc="",
- tcpDst="" ):
+ tcpDst="",
+ vlanId="" ):
"""
Required:
* ingressDevice: device id of ingress device
@@ -1239,6 +1269,7 @@
* ipDst: specify ip destination address
* tcpSrc: specify tcp source port
* tcpDst: specify tcp destination port
+ * vlanId: specify vlan ID
Description:
Adds a point-to-point intent ( uni-directional ) by
specifying device id's and optional fields
@@ -1280,6 +1311,8 @@
cmd += " --tcpSrc " + str( tcpSrc )
if tcpDst:
cmd += " --tcpDst " + str( tcpDst )
+ if vlanId:
+ cmd += " -v " + str( vlanId )
# Check whether the user appended the port
# or provided it as an input
@@ -1358,7 +1391,8 @@
tcpSrc="",
tcpDst="",
setEthSrc="",
- setEthDst="" ):
+ setEthDst="",
+ vlanId="" ):
"""
Note:
This function assumes the format of all ingress devices
@@ -1383,6 +1417,7 @@
* tcpDst: specify tcp destination port
* setEthSrc: action to Rewrite Source MAC Address
* setEthDst: action to Rewrite Destination MAC Address
+ * vlanId: specify vlan Id
Description:
Adds a multipoint-to-singlepoint intent ( uni-directional ) by
specifying device id's and optional fields
@@ -1429,6 +1464,8 @@
cmd += " --setEthSrc " + str( setEthSrc )
if setEthDst:
cmd += " --setEthDst " + str( setEthDst )
+ if vlanId:
+ cmd += " -v " + str( vlanId )
# Check whether the user appended the port
# or provided it as an input
@@ -1511,7 +1548,8 @@
tcpSrc="",
tcpDst="",
setEthSrc="",
- setEthDst="" ):
+ setEthDst="",
+ vlanId="" ):
"""
Note:
This function assumes the format of all egress devices
@@ -1536,6 +1574,7 @@
* tcpDst: specify tcp destination port
* setEthSrc: action to Rewrite Source MAC Address
* setEthDst: action to Rewrite Destination MAC Address
+ * vlanId: specify vlan Id
Description:
Adds a singlepoint-to-multipoint intent ( uni-directional ) by
specifying device id's and optional fields
@@ -1582,6 +1621,8 @@
cmd += " --setEthSrc " + str( setEthSrc )
if setEthDst:
cmd += " --setEthDst " + str( setEthDst )
+ if vlanId:
+ cmd += " -v " + str( vlanId )
# Check whether the user appended the port
# or provided it as an input
@@ -2182,8 +2223,11 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanup()
main.exit()
+ except pexpect.TIMEOUT:
+ main.log.error( self.name + ": ONOS timeout" )
+ return None
- def flows( self, state="", jsonFormat=True, timeout=60 ):
+ def flows( self, state="", jsonFormat=True, timeout=60, noExit=False ):
"""
Optional:
* jsonFormat: enable output formatting in json
@@ -2195,7 +2239,7 @@
if jsonFormat:
cmdStr += " -j "
cmdStr += state
- handle = self.sendline( cmdStr, timeout=timeout )
+ handle = self.sendline( cmdStr, timeout=timeout, noExit=noExit )
assert "Command not found:" not in handle, handle
if re.search( "Error:", handle ):
main.log.error( self.name + ": flows() response: " +
@@ -2224,7 +2268,7 @@
count = int(self.getTotalFlowsNum( timeout=timeout ))
return count if (count > min) else False
- def checkFlowsState( self, isPENDING=True, timeout=60 ):
+ def checkFlowsState( self, isPENDING=True, timeout=60,noExit=False ):
"""
Description:
Check the if all the current flows are in ADDED state
@@ -2273,9 +2317,13 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanup()
main.exit()
+ except pexpect.TIMEOUT:
+ main.log.error( self.name + ": ONOS timeout" )
+ return None
+
def pushTestIntents( self, ingress, egress, batchSize, offset="",
- options="", timeout=10, background = False ):
+ options="", timeout=10, background = False, noExit=False ):
"""
Description:
Push a number of intents in a batch format to
@@ -2304,7 +2352,7 @@
batchSize,
offset,
back )
- response = self.sendline( cmd, timeout=timeout )
+ response = self.sendline( cmd, timeout=timeout, noExit=noExit )
assert "Command not found:" not in response, response
main.log.info( response )
if response == None:
@@ -2332,7 +2380,7 @@
main.cleanup()
main.exit()
- def getTotalFlowsNum( self, timeout=60 ):
+ def getTotalFlowsNum( self, timeout=60, noExit=False ):
"""
Description:
Get the number of ADDED flows.
@@ -2343,7 +2391,7 @@
try:
# get total added flows number
cmd = "flows -s|grep ADDED|wc -l"
- totalFlows = self.sendline( cmd, timeout=timeout )
+ totalFlows = self.sendline( cmd, timeout=timeout, noExit=noExit )
if totalFlows == None:
# if timeout, we will get total number of all flows, and subtract other states
@@ -2353,7 +2401,7 @@
statesCount = [0, 0, 0, 0]
# get total flows from summary
- response = json.loads( self.sendline( "summary -j", timeout=timeout ) )
+ response = json.loads( self.sendline( "summary -j", timeout=timeout, noExit=noExit ) )
totalFlows = int( response.get("flows") )
for s in states:
@@ -2389,8 +2437,11 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanup()
main.exit()
+ except pexpect.TIMEOUT:
+ main.log.error( self.name + ": ONOS timeout" )
+ return None
- def getTotalIntentsNum( self ):
+ def getTotalIntentsNum( self, timeout=60 ):
"""
Description:
Get the total number of intents, include every states.
@@ -2399,7 +2450,7 @@
"""
try:
cmd = "summary -j"
- response = self.sendline( cmd )
+ response = self.sendline( cmd, timeout=timeout )
if response == None:
return -1
response = json.loads( response )
@@ -4321,6 +4372,7 @@
return None
pattern = "Key-value pair \(" + keyName + ", (?P<value>.+)\) found."
if "Key " + keyName + " not found." in output:
+ main.log.warn( output )
return None
else:
match = re.search( pattern, output )
diff --git a/TestON/tests/CHO/CHOtest/CHOtest.params b/TestON/tests/CHO/CHOtest/CHOtest.params
index 45d0fef..9cf0709 100644
--- a/TestON/tests/CHO/CHOtest/CHOtest.params
+++ b/TestON/tests/CHO/CHOtest/CHOtest.params
@@ -18,7 +18,7 @@
# 19X. IPv6 ping across Point,Multi-single,Single-Multi Intents
<testcases>
- 1,21,3,48,148,[5,61,161,72,172,82,182,10,5,91,191,73,173,83,183,10]*20,200,20,3,47,147,[5,60,160,70,170,80,180,10,5,90,190,71,171,81,181,10]*20,200,22,3,49,149,[5,62,162,74,174,84,184,10,5,92,192,75,175,85,185,10]*20
+ 1,20,3,47,147,[5,60,160,70,170,80,180,10,5,90,190,71,171,81,181,10]*500
</testcases>
<DEPENDENCY>
diff --git a/TestON/tests/CHOTestMonkey/CHOTestMonkey.params b/TestON/tests/CHOTestMonkey/CHOTestMonkey.params
new file mode 100644
index 0000000..7d7457b
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/CHOTestMonkey.params
@@ -0,0 +1,378 @@
+<PARAMS>
+ # 0. Initialize CHOTestMonkey
+ # 1. Load topology and balances all switches
+ # 2. Collect and store device and link data from ONOS
+ # 3. Collect and store host data from ONOS
+ # 10. Run all enabled checks
+ # 20. Bring down/up links and check topology and ping
+ # 21. Bring down/up a group of links and check topology and ping
+ # 30. Install host intents and check intent states and ping
+ # 31. Uninstall host intents and check intent states
+ # 32. Install point intents and check intent states and ping
+ # 33. Uninstall point intents and check intent states
+ # 40. Randomly bring down one ONOS node
+ # 41. Randomly bring up one ONOS node that is down
+ # 50. Set FlowObjective to True
+ # 51. Set FlowObjective to False
+ # 60. Rebalance devices across controllers
+ # 90. Sleep for some time
+ # 100. Do something else
+ # Sample sequence: 0,1,2,3,[10,30,21,31,10,32,21,33,50,10,30,21,31,10,32,21,33,51,40,60,10,30,21,31,10,32,21,33,50,10,30,21,31,10,32,21,33,51,41,60]*500,100
+ <testcases>
+ 0,1,2,3,100,[10,40,60,10,30,21,31,41,90,60]*500,100
+ </testcases>
+
+ <TEST>
+ <topo>1</topo>
+ <IPv6>on</IPv6>
+ <numCtrl>3</numCtrl>
+ <pauseTest>off</pauseTest>
+ <caseSleep>0</caseSleep>
+ <setIPv6CfgSleep>5</setIPv6CfgSleep>
+ <loadTopoSleep>5</loadTopoSleep>
+ <ipv6Prefix>1000::</ipv6Prefix>
+ <ipv4Prefix>10.1.</ipv4Prefix>
+ <karafCliTimeout>7200000</karafCliTimeout>
+ <testDuration>86400</testDuration>
+ <package>off</package>
+ <autoPull>off</autoPull>
+ <branch>master</branch>
+ </TEST>
+
+ <EVENT>
+ <Event>
+ <status>on</status>
+ <typeIndex>0</typeIndex>
+ <typeString>NULL</typeString>
+ <CLI>null</CLI>
+ <CLIParamNum>0</CLIParamNum>
+ <rerunInterval>5</rerunInterval>
+ <maxRerunNum>5</maxRerunNum>
+ </Event>
+
+ <TestPause>
+ <status>on</status>
+ <typeIndex>1</typeIndex>
+ <typeString>TEST_PAUSE</typeString>
+ <CLI>pause-test</CLI>
+ <CLIParamNum>0</CLIParamNum>
+ </TestPause>
+
+ <TestResume>
+ <status>on</status>
+ <typeIndex>2</typeIndex>
+ <typeString>TEST_RESUME</typeString>
+ <CLI>resume-test</CLI>
+ <CLIParamNum>0</CLIParamNum>
+ </TestResume>
+
+ <TestSleep>
+ <status>on</status>
+ <typeIndex>3</typeIndex>
+ <typeString>TEST_SLEEP</typeString>
+ <CLI>sleep</CLI>
+ <CLIParamNum>1</CLIParamNum>
+ </TestSleep>
+
+ <IntentCheck>
+ <status>on</status>
+ <typeIndex>10</typeIndex>
+ <typeString>CHECK_INTENT</typeString>
+ <CLI>check-intent</CLI>
+ <CLIParamNum>0</CLIParamNum>
+ <rerunInterval>5</rerunInterval>
+ <maxRerunNum>5</maxRerunNum>
+ </IntentCheck>
+
+ <TrafficCheck>
+ <status>on</status>
+ <typeIndex>12</typeIndex>
+ <typeString>CHECK_TRAFFIC</typeString>
+ <CLI>check-traffic</CLI>
+ <CLIParamNum>0</CLIParamNum>
+ <rerunInterval>5</rerunInterval>
+ <maxRerunNum>5</maxRerunNum>
+ <pingWait>1</pingWait>
+ <pingTimeout>10</pingTimeout>
+ </TrafficCheck>
+
+ <TopoCheck>
+ <status>on</status>
+ <typeIndex>13</typeIndex>
+ <typeString>CHECK_TOPO</typeString>
+ <CLI>check-topo</CLI>
+ <CLIParamNum>0</CLIParamNum>
+ <rerunInterval>5</rerunInterval>
+ <maxRerunNum>5</maxRerunNum>
+ </TopoCheck>
+
+ <ONOSCheck>
+ <status>on</status>
+ <typeIndex>14</typeIndex>
+ <typeString>CHECK_ONOS</typeString>
+ <CLI>check-onos</CLI>
+ <CLIParamNum>0</CLIParamNum>
+ <rerunInterval>5</rerunInterval>
+ <maxRerunNum>5</maxRerunNum>
+ </ONOSCheck>
+
+ <LinkDown>
+ <status>on</status>
+ <typeIndex>20</typeIndex>
+ <typeString>NETWORK_LINK_DOWN</typeString>
+ <CLI>link-down</CLI>
+ <CLIParamNum>2</CLIParamNum>
+ </LinkDown>
+
+ <LinkUp>
+ <status>on</status>
+ <typeIndex>21</typeIndex>
+ <typeString>NETWORK_LINK_UP</typeString>
+ <CLI>link-up</CLI>
+ <CLIParamNum>2</CLIParamNum>
+ </LinkUp>
+
+ <DeviceDown>
+ <status>on</status>
+ <typeIndex>22</typeIndex>
+ <typeString>NETWORK_DEVICE_DOWN</typeString>
+ <CLI>device-down</CLI>
+ <CLIParamNum>1</CLIParamNum>
+ </DeviceDown>
+
+ <DeviceUp>
+ <status>on</status>
+ <typeIndex>23</typeIndex>
+ <typeString>NETWORK_DEVICE_UP</typeString>
+ <CLI>device-up</CLI>
+ <CLIParamNum>1</CLIParamNum>
+ </DeviceUp>
+
+ <AddHostIntent>
+ <status>on</status>
+ <typeIndex>30</typeIndex>
+ <typeString>APP_INTENT_HOST_ADD</typeString>
+ <CLI>add-host-intent</CLI>
+ <CLIParamNum>3</CLIParamNum>
+ <rerunInterval>5</rerunInterval>
+ <maxRerunNum>5</maxRerunNum>
+ </AddHostIntent>
+
+ <DelHostIntent>
+ <status>on</status>
+ <typeIndex>31</typeIndex>
+ <typeString>APP_INTENT_HOST_DEL</typeString>
+ <CLI>del-host-intent</CLI>
+ <CLIParamNum>3</CLIParamNum>
+ <rerunInterval>5</rerunInterval>
+ <maxRerunNum>5</maxRerunNum>
+ </DelHostIntent>
+
+ <AddPointIntent>
+ <status>on</status>
+ <typeIndex>32</typeIndex>
+ <typeString>APP_INTENT_POINT_ADD</typeString>
+ <CLI>add-point-intent</CLI>
+ <CLIParamNum>3</CLIParamNum>
+ <rerunInterval>5</rerunInterval>
+ <maxRerunNum>5</maxRerunNum>
+ </AddPointIntent>
+
+ <DelPointIntent>
+ <status>on</status>
+ <typeIndex>33</typeIndex>
+ <typeString>APP_INTENT_POINT_DEL</typeString>
+ <CLI>del-point-intent</CLI>
+ <CLIParamNum>3</CLIParamNum>
+ <rerunInterval>5</rerunInterval>
+ <maxRerunNum>5</maxRerunNum>
+ </DelPointIntent>
+
+ <ONOSDown>
+ <status>on</status>
+ <typeIndex>40</typeIndex>
+ <typeString>ONOS_ONOS_DOWN</typeString>
+ <CLI>onos-down</CLI>
+ <CLIParamNum>1</CLIParamNum>
+ <rerunInterval>5</rerunInterval>
+ <maxRerunNum>5</maxRerunNum>
+ </ONOSDown>
+
+ <ONOSUp>
+ <status>on</status>
+ <typeIndex>41</typeIndex>
+ <typeString>ONOS_ONOS_UP</typeString>
+ <CLI>onos-up</CLI>
+ <CLIParamNum>1</CLIParamNum>
+ <rerunInterval>5</rerunInterval>
+ <maxRerunNum>5</maxRerunNum>
+ </ONOSUp>
+
+ <SetCfg>
+ <status>on</status>
+ <typeIndex>42</typeIndex>
+ <typeString>ONOS_SET_CFG</typeString>
+ <CLI>set-cfg</CLI>
+ <CLIParamNum>3</CLIParamNum>
+ <rerunInterval>5</rerunInterval>
+ <maxRerunNum>5</maxRerunNum>
+ </SetCfg>
+
+ <SetFlowObj>
+ <status>on</status>
+ <typeIndex>43</typeIndex>
+ <typeString>ONOS_SET_FLOWOBJ</typeString>
+ <CLI>set-flowobj</CLI>
+ <CLIParamNum>1</CLIParamNum>
+ <rerunInterval>5</rerunInterval>
+ <maxRerunNum>5</maxRerunNum>
+ </SetFlowObj>
+
+ <BalanceMasters>
+ <status>on</status>
+ <typeIndex>44</typeIndex>
+ <typeString>ONOS_BALANCE_MASTERS</typeString>
+ <CLI>balance-masters</CLI>
+ <CLIParamNum>0</CLIParamNum>
+ <rerunInterval>5</rerunInterval>
+ <maxRerunNum>5</maxRerunNum>
+ </BalanceMasters>
+
+ <addAllChecks>
+ <status>on</status>
+ <typeIndex>110</typeIndex>
+ <typeString>CHECK_ALL</typeString>
+ <CLI>check-all</CLI>
+ <CLIParamNum>0</CLIParamNum>
+ </addAllChecks>
+
+ <randomLinkToggle>
+ <status>on</status>
+ <typeIndex>120</typeIndex>
+ <typeString>NETWORK_LINK_RANDOM_TOGGLE</typeString>
+ <CLI>link-toggle-random</CLI>
+ <CLIParamNum>1</CLIParamNum>
+ <sleepBeforeCheck>10</sleepBeforeCheck>
+ </randomLinkToggle>
+
+ <randomLinkGroupToggle>
+ <status>on</status>
+ <typeIndex>121</typeIndex>
+ <typeString>NETWORK_LINK_GROUP_RANDOM_TOGGLE</typeString>
+ <CLI>link-group-toggle-random</CLI>
+ <CLIParamNum>3</CLIParamNum>
+ <sleepBeforeCheck>10</sleepBeforeCheck>
+ </randomLinkGroupToggle>
+
+ <randomDeviceToggle>
+ <status>on</status>
+ <typeIndex>122</typeIndex>
+ <typeString>NETWORK_DEVICE_RANDOM_TOGGLE</typeString>
+ <CLI>device-toggle-random</CLI>
+ <CLIParamNum>1</CLIParamNum>
+ <sleepBeforeCheck>10</sleepBeforeCheck>
+ </randomDeviceToggle>
+
+ <randomDeviceGroupToggle>
+ <status>on</status>
+ <typeIndex>123</typeIndex>
+ <typeString>NETWORK_DEVICE_GROUP_RANDOM_TOGGLE</typeString>
+ <CLI>device-group-toggle-random</CLI>
+ <CLIParamNum>3</CLIParamNum>
+ <sleepBeforeCheck>10</sleepBeforeCheck>
+ </randomDeviceGroupToggle>
+
+ <installAllHostIntents>
+ <status>on</status>
+ <typeIndex>130</typeIndex>
+ <typeString>APP_INTENT_HOST_ADD_ALL</typeString>
+ <CLI>add-all-host-intents</CLI>
+ <CLIParamNum>0</CLIParamNum>
+ <sleepBeforeCheck>10</sleepBeforeCheck>
+ </installAllHostIntents>
+
+ <removeAllHostIntents>
+ <status>on</status>
+ <typeIndex>131</typeIndex>
+ <typeString>APP_INTENT_HOST_DEL_ALL</typeString>
+ <CLI>del-all-host-intents</CLI>
+ <CLIParamNum>0</CLIParamNum>
+ <sleepBeforeCheck>5</sleepBeforeCheck>
+ </removeAllHostIntents>
+
+ <installAllPointIntents>
+ <status>on</status>
+ <typeIndex>132</typeIndex>
+ <typeString>APP_INTENT_POINT_ADD_ALL</typeString>
+ <CLI>add-all-point-intents</CLI>
+ <CLIParamNum>0</CLIParamNum>
+ <sleepBeforeCheck>10</sleepBeforeCheck>
+ </installAllPointIntents>
+
+ <removeAllPointIntents>
+ <status>on</status>
+ <typeIndex>133</typeIndex>
+ <typeString>APP_INTENT_POINT_DEL_ALL</typeString>
+ <CLI>del-all-point-intents</CLI>
+ <CLIParamNum>0</CLIParamNum>
+ <sleepBeforeCheck>5</sleepBeforeCheck>
+ </removeAllPointIntents>
+
+ <randomONOSToggle>
+ <status>on</status>
+ <typeIndex>140</typeIndex>
+ <typeString>ONOS_ONOS_RANDOM_TOGGLE</typeString>
+ <CLI>onos-toggle-random</CLI>
+ <CLIParamNum>1</CLIParamNum>
+ <sleepBeforeCheck>10</sleepBeforeCheck>
+ </randomONOSToggle>
+ </EVENT>
+
+ <SCHEDULER>
+ <pendingEventsCapacity>1</pendingEventsCapacity>
+ <runningEventsCapacity>10</runningEventsCapacity>
+ <scheduleLoopSleep>0.1</scheduleLoopSleep>
+ </SCHEDULER>
+
+ <GENERATOR>
+ <listenerPort>6000</listenerPort>
+ <insertEventRetryInterval>1</insertEventRetryInterval>
+ </GENERATOR>
+
+ <TOPO>
+ <topo0>
+ <fileName>topoTripleIpv6.py</fileName>
+ </topo0>
+
+ <topo1>
+ <fileName>topoAttIpv6.py</fileName>
+ </topo1>
+
+ <topo2>
+ <fileName>topoChordalIpv6.py</fileName>
+ </topo2>
+
+ <topo3>
+ <fileName>topoSpineIpv6.py</fileName>
+ </topo3>
+
+ <topo4>
+ <fileName>topoRingIpv6.py</fileName>
+ </topo4>
+ </TOPO>
+
+ <CASE20>
+ <linkToggleNum>5</linkToggleNum>
+ <linkDownUpInterval>1</linkDownUpInterval>
+ </CASE20>
+
+ <CASE21>
+ <linkGroupSize>5</linkGroupSize>
+ <linkDownDownInterval>1</linkDownDownInterval>
+ <linkDownUpInterval>1</linkDownUpInterval>
+ </CASE21>
+
+ <CASE90>
+ <sleepSec>60</sleepSec>
+ </CASE90>
+</PARAMS>
diff --git a/TestON/tests/CHOTestMonkey/CHOTestMonkey.py b/TestON/tests/CHOTestMonkey/CHOTestMonkey.py
new file mode 100644
index 0000000..314bde9
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/CHOTestMonkey.py
@@ -0,0 +1,806 @@
+"""
+CHOTestMonkey class
+Author: you@onlab.us
+"""
+
+import sys
+import os
+import re
+import time
+import json
+import itertools
+
+class CHOTestMonkey:
+
+ def __init__( self ):
+ self.default = ''
+
+ def CASE0( self, main ):
+ """
+ Startup sequence:
+ apply cell <name>
+ git pull
+ mvn clean install
+ onos-package
+ onos-verify-cell
+ onos-uninstall
+ onos-install
+ onos-start-cli
+ Set IPv6 cfg parameters for Neighbor Discovery
+ start event scheduler
+ start event listener
+ """
+ import time
+ from threading import Lock, Condition
+ from tests.CHOTestMonkey.dependencies.elements.ONOSElement import Controller
+ from tests.CHOTestMonkey.dependencies.EventGenerator import EventGenerator
+ from tests.CHOTestMonkey.dependencies.EventScheduler import EventScheduler
+
+ gitPull = main.params[ 'TEST' ][ 'autoPull' ]
+ onosPackage = main.params[ 'TEST' ][ 'package' ]
+ gitBranch = main.params[ 'TEST' ][ 'branch' ]
+ karafTimeout = main.params[ 'TEST' ][ 'karafCliTimeout' ]
+ main.enableIPv6 = main.params[ 'TEST' ][ 'IPv6' ]
+ main.enableIPv6 = True if main.enableIPv6 == "on" else False
+ main.caseSleep = int( main.params[ 'TEST' ][ 'caseSleep' ] )
+ main.numCtrls = main.params[ 'TEST' ][ 'numCtrl' ]
+ main.controllers = []
+ for i in range( 1, int( main.numCtrls ) + 1 ):
+ newController = Controller( i )
+ newController.setCLI( getattr( main, 'ONOScli' + str( i ) ) )
+ main.controllers.append( newController )
+ main.devices = []
+ main.links = []
+ main.hosts = []
+ main.intents = []
+ main.enabledEvents = {}
+ for eventName in main.params[ 'EVENT' ].keys():
+ if main.params[ 'EVENT' ][ eventName ][ 'status' ] == 'on':
+ main.enabledEvents[ int( main.params[ 'EVENT' ][ eventName ][ 'typeIndex' ] ) ] = eventName
+ print main.enabledEvents
+ main.eventScheduler = EventScheduler()
+ main.eventGenerator = EventGenerator()
+ main.variableLock = Lock()
+ main.mininetLock = Lock()
+ main.ONOSbenchLock = Lock()
+ main.threadID = 0
+ main.eventID = 0
+ main.caseResult = main.TRUE
+
+ main.case( "Set up test environment" )
+ main.log.report( "Set up test environment" )
+ main.log.report( "_______________________" )
+
+ main.step( "Apply Cell environment for ONOS" )
+ if ( main.onoscell ):
+ cellName = main.onoscell
+ cellResult = main.ONOSbench.setCell( cellName )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=cellResult,
+ onpass="Test step PASS",
+ onfail="Test step FAIL" )
+ else:
+ main.log.error( "Please provide onoscell option at TestON CLI to run CHO tests" )
+ main.log.error( "Example: ~/TestON/bin/cli.py run CHOTestMonkey onoscell <cellName>" )
+ main.cleanup()
+ main.exit()
+
+ main.step( "Git checkout and pull " + gitBranch )
+ if gitPull == 'on':
+ checkoutResult = main.ONOSbench.gitCheckout( gitBranch )
+ pullResult = main.ONOSbench.gitPull()
+ cpResult = ( checkoutResult and pullResult )
+ else:
+ checkoutResult = main.TRUE
+ pullResult = main.TRUE
+ main.log.info( "Skipped git checkout and pull as they are disabled in params file" )
+ cpResult = ( checkoutResult and pullResult )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=cpResult,
+ onpass="Test step PASS",
+ onfail="Test step FAIL" )
+
+ main.step( "mvn clean & install" )
+ if gitPull == 'on':
+ mvnResult = main.ONOSbench.cleanInstall()
+ else:
+ mvnResult = main.TRUE
+ main.log.info( "Skipped mvn clean install as it is disabled in params file" )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=mvnResult,
+ onpass="Test step PASS",
+ onfail="Test step FAIL" )
+ main.ONOSbench.getVersion( report=True )
+
+ main.step( "Create ONOS package" )
+ if onosPackage == 'on':
+ packageResult = main.ONOSbench.onosPackage()
+ else:
+ packageResult = main.TRUE
+ main.log.info( "Skipped onos package as it is disabled in params file" )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=packageResult,
+ onpass="Test step PASS",
+ onfail="Test step FAIL" )
+
+ main.step( "Uninstall ONOS package on all Nodes" )
+ uninstallResult = main.TRUE
+ for i in range( int( main.numCtrls ) ):
+ main.log.info( "Uninstalling package on ONOS Node IP: " + main.onosIPs[i] )
+ uResult = main.ONOSbench.onosUninstall( main.onosIPs[i] )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=uResult,
+ onpass="Test step PASS",
+ onfail="Test step FAIL" )
+ uninstallResult = ( uninstallResult and uResult )
+
+ main.step( "Install ONOS package on all Nodes" )
+ installResult = main.TRUE
+ for i in range( int( main.numCtrls ) ):
+ main.log.info( "Installing package on ONOS Node IP: " + main.onosIPs[i] )
+ iResult = main.ONOSbench.onosInstall( node=main.onosIPs[i] )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=iResult,
+ onpass="Test step PASS",
+ onfail="Test step FAIL" )
+ installResult = ( installResult and iResult )
+
+ main.step( "Start ONOS CLI on all nodes" )
+ cliResult = main.TRUE
+ startCliResult = main.TRUE
+ pool = []
+ for controller in main.controllers:
+ t = main.Thread( target=controller.startCLI,
+ threadID=main.threadID,
+ name="startOnosCli",
+ args=[ ] )
+ pool.append(t)
+ t.start()
+ main.threadID = main.threadID + 1
+ for t in pool:
+ t.join()
+ startCliResult = startCliResult and t.result
+ if not startCliResult:
+ main.log.info( "ONOS CLI did not start up properly" )
+ main.cleanup()
+ main.exit()
+ else:
+ main.log.info( "Successful CLI startup" )
+ startCliResult = main.TRUE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=startCliResult,
+ onpass="Test step PASS",
+ onfail="Test step FAIL" )
+
+ main.step( "Set IPv6 cfg parameters for Neighbor Discovery" )
+ setIPv6CfgSleep = int( main.params[ 'TEST' ][ 'setIPv6CfgSleep' ] )
+ if main.enableIPv6:
+ time.sleep( setIPv6CfgSleep )
+ cfgResult1 = main.controllers[ 0 ].CLI.setCfg( "org.onosproject.proxyarp.ProxyArp",
+ "ipv6NeighborDiscovery",
+ "true" )
+ time.sleep( setIPv6CfgSleep )
+ cfgResult2 = main.controllers[ 0 ].CLI.setCfg( "org.onosproject.provider.host.impl.HostLocationProvider",
+ "ipv6NeighborDiscovery",
+ "true" )
+ else:
+ main.log.info( "Skipped setting IPv6 cfg parameters as it is disabled in params file" )
+ cfgResult1 = main.TRUE
+ cfgResult2 = main.TRUE
+ cfgResult = cfgResult1 and cfgResult2
+ utilities.assert_equals( expect=main.TRUE,
+ actual=cfgResult,
+ onpass="ipv6NeighborDiscovery cfg is set to true",
+ onfail="Failed to cfg set ipv6NeighborDiscovery" )
+
+ main.step( "Start a thread for the scheduler" )
+ t = main.Thread( target=main.eventScheduler.startScheduler,
+ threadID=main.threadID,
+ name="startScheduler",
+ args=[] )
+ t.start()
+ stepResult = main.TRUE
+ with main.variableLock:
+ main.threadID = main.threadID + 1
+
+ utilities.assert_equals( expect=main.TRUE,
+ actual=stepResult,
+ onpass="Test step PASS",
+ onfail="Test step FAIL" )
+
+ main.step( "Start a thread to listen to and handle network, ONOS and application events" )
+ t = main.Thread( target=main.eventGenerator.startListener,
+ threadID=main.threadID,
+ name="startListener",
+ args=[] )
+ t.start()
+ with main.variableLock:
+ main.threadID = main.threadID + 1
+
+ caseResult = installResult and uninstallResult and startCliResult and cfgResult
+ utilities.assert_equals( expect=main.TRUE,
+ actual=caseResult,
+ onpass="Set up test environment PASS",
+ onfail="Set up test environment FAIL" )
+
+ def CASE1( self, main ):
+ """
+ Load Mininet topology and balances all switches
+ """
+ import re
+ import time
+ import copy
+
+ main.topoIndex = "topo" + str ( main.params[ 'TEST' ][ 'topo' ] )
+
+ main.log.report( "Load Mininet topology and Balance all Mininet switches across controllers" )
+ main.log.report( "________________________________________________________________________" )
+ main.case( "Assign and Balance all Mininet switches across controllers" )
+
+ main.step( "Start Mininet topology" )
+ newTopo = main.params[ 'TOPO' ][ main.topoIndex ][ 'fileName' ]
+ mininetDir = main.Mininet1.home + "/custom/"
+ topoPath = main.testDir + "/" + main.TEST + "/dependencies/topologies/" + newTopo
+ main.ONOSbench.secureCopy( main.Mininet1.user_name, main.Mininet1.ip_address, topoPath, mininetDir, direction="to" )
+ topoPath = mininetDir + newTopo
+ startStatus = main.Mininet1.startNet( topoFile = topoPath )
+ main.mininetSwitches = main.Mininet1.getSwitches()
+ main.mininetHosts = main.Mininet1.getHosts()
+ main.mininetLinks = main.Mininet1.getLinks()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=startStatus,
+ onpass="Start Mininet topology test PASS",
+ onfail="Start Mininet topology test FAIL" )
+
+ main.step( "Assign switches to controllers" )
+ switchMastership = main.TRUE
+ for switchName in main.mininetSwitches.keys():
+ main.Mininet1.assignSwController( sw=switchName, ip=main.onosIPs )
+ response = main.Mininet1.getSwController( switchName )
+ print( "Response is " + str( response ) )
+ if re.search( "tcp:" + main.onosIPs[ 0 ], response ):
+ switchMastership = switchMastership and main.TRUE
+ else:
+ switchMastership = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=switchMastership,
+ onpass="Assign switches to controllers test PASS",
+ onfail="Assign switches to controllers test FAIL" )
+ # Waiting here to make sure topology converges across all nodes
+ sleep = int( main.params[ 'TEST' ][ 'loadTopoSleep' ] )
+ time.sleep( sleep )
+
+ main.step( "Balance devices across controllers" )
+ balanceResult = main.ONOScli1.balanceMasters()
+ # giving some breathing time for ONOS to complete re-balance
+ time.sleep( sleep )
+
+ caseResult = ( startStatus and switchMastership and balanceResult )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=caseResult,
+ onpass="Starting new Att topology test PASS",
+ onfail="Starting new Att topology test FAIL" )
+
+ def CASE2( self, main ):
+ """
+ Collect and store device and link data from ONOS
+ """
+ import json
+ from tests.CHOTestMonkey.dependencies.elements.NetworkElement import Device, Link
+
+ main.log.report( "Collect and Store topology details from ONOS" )
+ main.log.report( "____________________________________________________________________" )
+ main.case( "Collect and Store Topology Details from ONOS" )
+ topoResult = main.TRUE
+ topologyOutput = main.ONOScli1.topology()
+ topologyResult = main.ONOScli1.getTopology( topologyOutput )
+ ONOSDeviceNum = int( topologyResult[ 'devices' ] )
+ ONOSLinkNum = int( topologyResult[ 'links' ] )
+ mininetSwitchNum = len( main.mininetSwitches )
+ mininetLinkNum = ( len( main.mininetLinks ) - len( main.mininetHosts ) ) * 2
+ if mininetSwitchNum == ONOSDeviceNum and mininetLinkNum == ONOSLinkNum:
+ main.step( "Collect and store device data" )
+ stepResult = main.TRUE
+ dpidToName = {}
+ for key, value in main.mininetSwitches.items():
+ dpidToName[ 'of:' + str( value[ 'dpid' ] ) ] = key
+ devicesRaw = main.ONOScli1.devices()
+ devices = json.loads( devicesRaw )
+ deviceInitIndex = 0
+ for device in devices:
+ name = dpidToName[ device[ 'id' ] ]
+ newDevice = Device( deviceInitIndex, name, device[ 'id' ] )
+ print newDevice
+ main.devices.append( newDevice )
+ deviceInitIndex += 1
+ utilities.assert_equals( expect=main.TRUE,
+ actual=stepResult,
+ onpass="Successfully collected and stored device data",
+ onfail="Failed to collect and store device data" )
+
+ main.step( "Collect and store link data" )
+ stepResult = main.TRUE
+ linksRaw = main.ONOScli1.links()
+ links = json.loads( linksRaw )
+ linkInitIndex = 0
+ for link in links:
+ for device in main.devices:
+ if device.dpid == link[ 'src' ][ 'device' ]:
+ deviceA = device
+ elif device.dpid == link[ 'dst' ][ 'device' ]:
+ deviceB = device
+ assert deviceA != None and deviceB != None
+ newLink = Link( linkInitIndex, deviceA, link[ 'src' ][ 'port' ], deviceB, link[ 'dst' ][ 'port' ] )
+ print newLink
+ main.links.append( newLink )
+ linkInitIndex += 1
+ # Set backward links and outgoing links of devices
+ for linkA in main.links:
+ linkA.deviceA.outgoingLinks.append( linkA )
+ if linkA.backwardLink != None:
+ continue
+ for linkB in main.links:
+ if linkB.backwardLink != None:
+ continue
+ if linkA.deviceA == linkB.deviceB and\
+ linkA.deviceB == linkB.deviceA and\
+ linkA.portA == linkB.portB and\
+ linkA.portB == linkB.portA:
+ linkA.setBackwardLink( linkB )
+ linkB.setBackwardLink( linkA )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=stepResult,
+ onpass="Successfully collected and stored link data",
+ onfail="Failed to collect and store link data" )
+ else:
+ main.log.info( "Devices (expected): %s, Links (expected): %s" % ( mininetSwitchNum, mininetLinkNum ) )
+ main.log.info( "Devices (actual): %s, Links (actual): %s" % ( ONOSDeviceNum, ONOSLinkNum ) )
+ topoResult = main.FALSE
+
+ caseResult = topoResult
+ utilities.assert_equals( expect=main.TRUE,
+ actual=caseResult,
+ onpass="Saving ONOS topology data test PASS",
+ onfail="Saving ONOS topology data test FAIL" )
+
+ if not caseResult:
+ main.log.info("Topology does not match, exiting test...")
+ main.cleanup()
+ main.exit()
+
+ def CASE3( self, main ):
+ """
+ Collect and store host data from ONOS
+ """
+ import json
+ from tests.CHOTestMonkey.dependencies.elements.NetworkElement import Host
+
+ main.log.report( "Collect and store host adta from ONOS" )
+ main.log.report( "______________________________________________" )
+ main.case( "Use fwd app and pingall to discover all the hosts, then collect and store host data" )
+
+ main.step( "Enable Reactive forwarding" )
+ appResult = main.controllers[ 0 ].CLI.activateApp( "org.onosproject.fwd" )
+ cfgResult1 = main.TRUE
+ cfgResult2 = main.TRUE
+ if main.enableIPv6:
+ cfgResult1 = main.controllers[ 0 ].CLI.setCfg( "org.onosproject.fwd.ReactiveForwarding", "ipv6Forwarding", "true" )
+ cfgResult2 = main.controllers[ 0 ].CLI.setCfg( "org.onosproject.fwd.ReactiveForwarding", "matchIpv6Address", "true" )
+ stepResult = appResult and cfgResult1 and cfgResult2
+ utilities.assert_equals( expect=main.TRUE,
+ actual=stepResult,
+ onpass="Successfully enabled reactive forwarding",
+ onfail="Failed to enable reactive forwarding" )
+
+ main.step( "Discover hosts using pingall" )
+ stepResult = main.TRUE
+ main.Mininet1.pingall()
+ if main.enableIPv6:
+ ping6Result = main.Mininet1.pingall( protocol="IPv6" )
+ hosts = main.controllers[ 0 ].CLI.hosts()
+ hosts = json.loads( hosts )
+ if not len( hosts ) == len( main.mininetHosts ):
+ stepResult = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=stepResult,
+ onpass="Host discovery PASS",
+ onfail="Host discovery FAIL" )
+ if not stepResult:
+ main.log.debug( hosts )
+ main.cleanup()
+ main.exit()
+
+ main.step( "Disable Reactive forwarding" )
+ appResult = main.controllers[ 0 ].CLI.deactivateApp( "org.onosproject.fwd" )
+ stepResult = appResult
+ utilities.assert_equals( expect=main.TRUE,
+ actual=stepResult,
+ onpass="Successfully deactivated fwd app",
+ onfail="Failed to deactivate fwd app" )
+
+ main.step( "Collect and store host data" )
+ stepResult = main.TRUE
+ macToName = {}
+ for key, value in main.mininetHosts.items():
+ macToName[ value[ 'interfaces' ][ 0 ][ 'mac' ].upper() ] = key
+ dpidToDevice = {}
+ for device in main.devices:
+ dpidToDevice[ device.dpid ] = device
+ hostInitIndex = 0
+ for host in hosts:
+ name = macToName[ host[ 'mac' ] ]
+ dpid = host[ 'location' ][ 'elementId' ]
+ device = dpidToDevice[ dpid ]
+ newHost = Host( hostInitIndex,
+ name, host[ 'id' ], host[ 'mac' ],
+ device, host[ 'location' ][ 'port' ],
+ host[ 'vlan' ], host[ 'ipAddresses' ] )
+ print newHost
+ main.hosts.append( newHost )
+ main.devices[ device.index ].hosts.append( newHost )
+ hostInitIndex += 1
+ utilities.assert_equals( expect=main.TRUE,
+ actual=stepResult,
+ onpass="Successfully collected and stored host data",
+ onfail="Failed to collect and store host data" )
+
+ main.step( "Create one host component for each host and then start host cli" )
+ for host in main.hosts:
+ main.Mininet1.createHostComponent( host.name )
+ hostHandle = getattr( main, host.name )
+ main.log.info( "Starting CLI on host " + str( host.name ) )
+ startCLIResult = hostHandle.startHostCli()
+ host.setHandle( hostHandle )
+ stepResult = startCLIResult
+ utilities.assert_equals( expect=main.TRUE,
+ actual=startCLIResult,
+ onpass="Host CLI started",
+ onfail="Failed to start host CLI" )
+
+ def CASE10( self, main ):
+ """
+ Run all enabled checks
+ """
+ import time
+ from tests.CHOTestMonkey.dependencies.events.Event import EventType
+ from tests.CHOTestMonkey.dependencies.EventScheduler import EventScheduleMethod
+
+ main.log.report( "Run all enabled checks" )
+ main.log.report( "__________________________________________________" )
+ main.case( "Run all enabled checks" )
+ main.step( "Run all enabled checks" )
+ main.caseResult = main.TRUE
+ main.eventGenerator.triggerEvent( EventType().CHECK_ALL, EventScheduleMethod().RUN_BLOCK )
+ # Wait for the scheduler to become idle before going to the next testcase
+ with main.eventScheduler.idleCondition:
+ while not main.eventScheduler.isIdle():
+ main.eventScheduler.idleCondition.wait()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=main.caseResult,
+ onpass="All enabled checks passed",
+ onfail="Not all enabled checks passed" )
+ time.sleep( main.caseSleep )
+
+ def CASE20( self, main ):
+ """
+ Bring down/up links and check topology and ping
+ """
+ import time
+ from tests.CHOTestMonkey.dependencies.events.Event import EventType
+ from tests.CHOTestMonkey.dependencies.EventScheduler import EventScheduleMethod
+
+ main.log.report( "Bring down/up links and check topology and ping" )
+ main.log.report( "__________________________________________________" )
+ main.case( "Bring down/up links and check topology and ping" )
+ main.step( "Bring down/up links and check topology and ping" )
+ main.caseResult = main.TRUE
+ linkToggleNum = int( main.params[ 'CASE20' ][ 'linkToggleNum' ] )
+ linkDownUpInterval = int( main.params[ 'CASE20' ][ 'linkDownUpInterval' ] )
+ for i in range( 0, linkToggleNum ):
+ main.eventGenerator.triggerEvent( EventType().NETWORK_LINK_RANDOM_TOGGLE, EventScheduleMethod().RUN_BLOCK, linkDownUpInterval )
+ with main.eventScheduler.idleCondition:
+ while not main.eventScheduler.isIdle():
+ main.eventScheduler.idleCondition.wait()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=main.caseResult,
+ onpass="Toggle network links test passed",
+ onfail="Toggle network links test failed" )
+ time.sleep( main.caseSleep )
+
+ def CASE21( self, main ):
+ """
+ Bring down/up a group of links and check topology and ping
+ """
+ import time
+ from tests.CHOTestMonkey.dependencies.events.Event import EventType
+ from tests.CHOTestMonkey.dependencies.EventScheduler import EventScheduleMethod
+
+ main.log.report( "Bring down/up a group of links and check topology and ping" )
+ main.log.report( "__________________________________________________" )
+ main.case( "Bring down/up a group of links and check topology and ping" )
+ main.step( "Bring down/up a group of links and check topology and ping" )
+ main.caseResult = main.TRUE
+ linkGroupSize = int( main.params[ 'CASE21' ][ 'linkGroupSize' ] )
+ linkDownDownInterval = int( main.params[ 'CASE21' ][ 'linkDownDownInterval' ] )
+ linkDownUpInterval = int( main.params[ 'CASE21' ][ 'linkDownUpInterval' ] )
+ main.eventGenerator.triggerEvent( EventType().NETWORK_LINK_GROUP_RANDOM_TOGGLE, EventScheduleMethod().RUN_BLOCK, linkGroupSize, linkDownDownInterval, linkDownUpInterval )
+ with main.eventScheduler.idleCondition:
+ while not main.eventScheduler.isIdle():
+ main.eventScheduler.idleCondition.wait()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=main.caseResult,
+ onpass="Toggle network link group test passed",
+ onfail="Toggle network link group test failed" )
+ time.sleep( main.caseSleep )
+
+ def CASE30( self, main ):
+ """
+ Install host intents and check intent states and ping
+ """
+ import time
+ from tests.CHOTestMonkey.dependencies.events.Event import EventType
+ from tests.CHOTestMonkey.dependencies.EventScheduler import EventScheduleMethod
+
+ main.log.report( "Install host intents and check intent states and ping" )
+ main.log.report( "__________________________________________________" )
+ main.case( "Install host intents and check intent states and ping" )
+ main.step( "Install host intents and check intent states and ping" )
+ main.caseResult = main.TRUE
+ main.eventGenerator.triggerEvent( EventType().APP_INTENT_HOST_ADD_ALL, EventScheduleMethod().RUN_BLOCK )
+ with main.eventScheduler.idleCondition:
+ while not main.eventScheduler.isIdle():
+ main.eventScheduler.idleCondition.wait()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=main.caseResult,
+ onpass="Install host intents test passed",
+ onfail="Install host intents test failed" )
+ time.sleep( main.caseSleep )
+
+ def CASE31( self, main ):
+ """
+ Uninstall host intents and check intent states and ping
+ """
+ import time
+ from tests.CHOTestMonkey.dependencies.events.Event import EventType
+ from tests.CHOTestMonkey.dependencies.EventScheduler import EventScheduleMethod
+
+ main.log.report( "Uninstall host intents and check intent states and ping" )
+ main.log.report( "__________________________________________________" )
+ main.case( "Uninstall host intents and check intent states and ping" )
+ main.step( "Uninstall host intents and check intent states and ping" )
+ main.caseResult = main.TRUE
+ main.eventGenerator.triggerEvent( EventType().APP_INTENT_HOST_DEL_ALL, EventScheduleMethod().RUN_BLOCK )
+ with main.eventScheduler.idleCondition:
+ while not main.eventScheduler.isIdle():
+ main.eventScheduler.idleCondition.wait()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=main.caseResult,
+ onpass="Uninstall host intents test passed",
+ onfail="Uninstall host intents test failed" )
+ time.sleep( main.caseSleep )
+
+ def CASE32( self, main ):
+ """
+ Install point intents and check intent states and ping
+ """
+ import time
+ from tests.CHOTestMonkey.dependencies.events.Event import EventType
+ from tests.CHOTestMonkey.dependencies.EventScheduler import EventScheduleMethod
+
+ main.log.report( "Install point intents and check intent states and ping" )
+ main.log.report( "__________________________________________________" )
+ main.case( "Install point intents and check intent states and ping" )
+ main.step( "Install point intents and check intent states and ping" )
+ main.caseResult = main.TRUE
+ main.eventGenerator.triggerEvent( EventType().APP_INTENT_POINT_ADD_ALL, EventScheduleMethod().RUN_BLOCK )
+ with main.eventScheduler.idleCondition:
+ while not main.eventScheduler.isIdle():
+ main.eventScheduler.idleCondition.wait()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=main.caseResult,
+ onpass="Install point intents test passed",
+ onfail="Install point intents test failed" )
+ time.sleep( main.caseSleep )
+
+ def CASE33( self, main ):
+ """
+ Uninstall point intents and check intent states and ping
+ """
+ import time
+ from tests.CHOTestMonkey.dependencies.events.Event import EventType
+ from tests.CHOTestMonkey.dependencies.EventScheduler import EventScheduleMethod
+
+ main.log.report( "Uninstall point intents and check intent states and ping" )
+ main.log.report( "__________________________________________________" )
+ main.case( "Uninstall point intents and check intent states and ping" )
+ main.step( "Uninstall point intents and check intent states and ping" )
+ main.caseResult = main.TRUE
+ main.eventGenerator.triggerEvent( EventType().APP_INTENT_POINT_DEL_ALL, EventScheduleMethod().RUN_BLOCK )
+ with main.eventScheduler.idleCondition:
+ while not main.eventScheduler.isIdle():
+ main.eventScheduler.idleCondition.wait()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=main.caseResult,
+ onpass="Uninstall point intents test passed",
+ onfail="Uninstall point intents test failed" )
+ time.sleep( main.caseSleep )
+
+ def CASE40( self, main ):
+ """
+ Randomly bring down one ONOS node
+ """
+ import time
+ import random
+ from tests.CHOTestMonkey.dependencies.events.Event import EventType
+ from tests.CHOTestMonkey.dependencies.EventScheduler import EventScheduleMethod
+
+ main.log.report( "Randomly bring down one ONOS node" )
+ main.log.report( "__________________________________________________" )
+ main.case( "Randomly bring down one ONOS node" )
+ main.step( "Randomly bring down one ONOS node" )
+ main.caseResult = main.TRUE
+ availableControllers = []
+ for controller in main.controllers:
+ if controller.isUp():
+ availableControllers.append( controller.index )
+ if len( availableControllers ) == 0:
+ main.log.warn( "No available controllers" )
+ main.caseResult = main.FALSE
+ else:
+ index = random.sample( availableControllers, 1 )
+ main.eventGenerator.triggerEvent( EventType().ONOS_ONOS_DOWN, EventScheduleMethod().RUN_BLOCK, index[ 0 ] )
+ with main.eventScheduler.idleCondition:
+ while not main.eventScheduler.isIdle():
+ main.eventScheduler.idleCondition.wait()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=main.caseResult,
+ onpass="Randomly bring down ONOS test passed",
+ onfail="Randomly bring down ONOS test failed" )
+ time.sleep( main.caseSleep )
+
+ def CASE41( self, main ):
+ """
+ Randomly bring up one ONOS node that is down
+ """
+ import time
+ import random
+ from tests.CHOTestMonkey.dependencies.events.Event import EventType
+ from tests.CHOTestMonkey.dependencies.EventScheduler import EventScheduleMethod
+
+ main.log.report( "Randomly bring up one ONOS node that is down" )
+ main.log.report( "__________________________________________________" )
+ main.case( "Randomly bring up one ONOS node that is down" )
+ main.step( "Randomly bring up one ONOS node that is down" )
+ main.caseResult = main.TRUE
+ targetControllers = []
+ for controller in main.controllers:
+ if not controller.isUp():
+ targetControllers.append( controller.index )
+ if len( targetControllers ) == 0:
+ main.log.warn( "All controllers are up" )
+ main.caseResult = main.FALSE
+ else:
+ index = random.sample( targetControllers, 1 )
+ main.eventGenerator.triggerEvent( EventType().ONOS_ONOS_UP, EventScheduleMethod().RUN_BLOCK, index[ 0 ] )
+ with main.eventScheduler.idleCondition:
+ while not main.eventScheduler.isIdle():
+ main.eventScheduler.idleCondition.wait()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=main.caseResult,
+ onpass="Randomly bring up ONOS test passed",
+ onfail="Randomly bring up ONOS test failed" )
+ time.sleep( main.caseSleep )
+
+ def CASE50( self, main ):
+ """
+ Set FlowObjective to True
+ """
+ import time
+ from tests.CHOTestMonkey.dependencies.events.Event import EventType
+ from tests.CHOTestMonkey.dependencies.EventScheduler import EventScheduleMethod
+
+ main.log.report( "Set FlowObjective to True" )
+ main.log.report( "__________________________________________________" )
+ main.case( "Set FlowObjective to True" )
+ main.step( "Set FlowObjective to True" )
+ main.caseResult = main.TRUE
+ main.eventGenerator.triggerEvent( EventType().ONOS_SET_FLOWOBJ, EventScheduleMethod().RUN_BLOCK, 'true' )
+ with main.eventScheduler.idleCondition:
+ while not main.eventScheduler.isIdle():
+ main.eventScheduler.idleCondition.wait()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=main.caseResult,
+ onpass="Set FlowObjective test passed",
+ onfail="Set FlowObjective test failed" )
+ time.sleep( main.caseSleep )
+
+ def CASE51( self, main ):
+ """
+ Set FlowObjective to False
+ """
+ import time
+ from tests.CHOTestMonkey.dependencies.events.Event import EventType
+ from tests.CHOTestMonkey.dependencies.EventScheduler import EventScheduleMethod
+
+ main.log.report( "Set FlowObjective to False" )
+ main.log.report( "__________________________________________________" )
+ main.case( "Set FlowObjective to False" )
+ main.step( "Set FlowObjective to False" )
+ main.caseResult = main.TRUE
+ main.eventGenerator.triggerEvent( EventType().ONOS_SET_FLOWOBJ, EventScheduleMethod().RUN_BLOCK, 'false' )
+ with main.eventScheduler.idleCondition:
+ while not main.eventScheduler.isIdle():
+ main.eventScheduler.idleCondition.wait()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=main.caseResult,
+ onpass="Set FlowObjective test passed",
+ onfail="Set FlowObjective test failed" )
+ time.sleep( main.caseSleep )
+
+ def CASE60( self, main ):
+ """
+ Balance device masters
+ """
+ import time
+ from tests.CHOTestMonkey.dependencies.events.Event import EventType
+ from tests.CHOTestMonkey.dependencies.EventScheduler import EventScheduleMethod
+
+ main.log.report( "Balance device masters" )
+ main.log.report( "__________________________________________________" )
+ main.case( "Balance device masters" )
+ main.step( "Balance device masters" )
+ main.caseResult = main.TRUE
+ main.eventGenerator.triggerEvent( EventType().ONOS_BALANCE_MASTERS, EventScheduleMethod().RUN_BLOCK )
+ with main.eventScheduler.idleCondition:
+ while not main.eventScheduler.isIdle():
+ main.eventScheduler.idleCondition.wait()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=main.caseResult,
+ onpass="Balance masters test passed",
+ onfail="Balance masters test failed" )
+ time.sleep( main.caseSleep )
+
+ def CASE90( self, main ):
+ """
+ Sleep for some time
+ """
+ import time
+ from tests.CHOTestMonkey.dependencies.events.Event import EventType
+ from tests.CHOTestMonkey.dependencies.EventScheduler import EventScheduleMethod
+
+ main.log.report( "Sleep for some time" )
+ main.log.report( "__________________________________________________" )
+ main.case( "Sleep for some time" )
+ main.step( "Sleep for some time" )
+ main.caseResult = main.TRUE
+ sleepSec = int( main.params[ 'CASE90' ][ 'sleepSec' ] )
+ main.eventGenerator.triggerEvent( EventType().TEST_SLEEP, EventScheduleMethod().RUN_BLOCK, sleepSec )
+ with main.eventScheduler.idleCondition:
+ while not main.eventScheduler.isIdle():
+ main.eventScheduler.idleCondition.wait()
+ utilities.assert_equals( expect=main.TRUE,
+ actual=main.caseResult,
+ onpass="Sleep test passed",
+ onfail="Sleep test failed" )
+ time.sleep( main.caseSleep )
+
+ def CASE100( self, main ):
+ """
+ Do something else?
+ """
+ import time
+
+ main.log.report( "Do something else?" )
+ main.log.report( "__________________________________________________" )
+ main.case( "..." )
+
+ main.step( "Wait until the test stops" )
+
+ main.caseResult = main.TRUE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=main.caseResult,
+ onpass="Test PASS",
+ onfail="Test FAIL" )
+
+ testDuration = int( main.params[ 'TEST' ][ 'testDuration' ] )
+ time.sleep( testDuration )
diff --git a/TestON/tests/CHOTestMonkey/CHOTestMonkey.topo b/TestON/tests/CHOTestMonkey/CHOTestMonkey.topo
new file mode 100644
index 0000000..7c7cfd7
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/CHOTestMonkey.topo
@@ -0,0 +1,50 @@
+<TOPOLOGY>
+ <COMPONENT>
+
+ <ONOSbench>
+ <host>localhost</host>
+ <user>admin</user>
+ <password></password>
+ <type>OnosDriver</type>
+ <connect_order>1</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOSbench>
+
+ <ONOScli1>
+ <host>localhost</host>
+ <user>admin</user>
+ <password></password>
+ <type>OnosCliDriver</type>
+ <connect_order>2</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOScli1>
+
+ <ONOScli2>
+ <host>localhost</host>
+ <user>admin</user>
+ <password></password>
+ <type>OnosCliDriver</type>
+ <connect_order>3</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOScli2>
+
+ <ONOScli3>
+ <host>localhost</host>
+ <user>admin</user>
+ <password></password>
+ <type>OnosCliDriver</type>
+ <connect_order>4</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOScli3>
+
+ <Mininet1>
+ <host>OCN</host>
+ <user>admin</user>
+ <password></password>
+ <type>MininetCliDriver</type>
+ <connect_order>10</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </Mininet1>
+
+ </COMPONENT>
+</TOPOLOGY>
\ No newline at end of file
diff --git a/TestON/tests/CHOTestMonkey/README b/TestON/tests/CHOTestMonkey/README
new file mode 100644
index 0000000..cfd85e7
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/README
@@ -0,0 +1,33 @@
+CHO TEST MONKEY
+
+Summary:
+ This is a long-term regression test that is aimed to run for weeks
+ at a time. It's goal is to find memory leaks or bugs that otherwise
+ cannot be easily seen with short-term tests.
+ CHOTestMonkey is the 2016 version of CHOtest. The suffix "Monkey"
+ implies both the Chaos Monkey style testing and the year of the
+ Monkey 2016.
+
+Topology:
+ Att topology - 25 switches and 25 hosts and its structure is
+ designed to simulate a real world configuration.
+ Chordal topology - 25 swithces and 25 hosts and because of its
+ chordal graph structure, it's particulary useful
+ in testing the rerouting capability of ONOS.
+ Leaf-spine topology - 78 switches and 68 hosts and designed to
+ simulate modern data centers.
+
+Pre-requisites:
+ To run out-of-the box this test requires 3 NODES. The cell file
+ must be passed through the startup test command, e.g.,
+
+ ./cli.py run CHOTestMonkey onoscell <cell name>.
+
+ Passwordless login must be set from TestStation "sdn" root user.
+ This test relies on the topology files to start Mininet located in
+ the dependencies/topologies folder. Be sure to check that each
+ topology file can be loaded properly by Mininet by using this
+ command:
+
+ sudo ~/dependencies/topologies/<topology name>
+
diff --git a/TestON/tests/CHOTestMonkey/__init__.py b/TestON/tests/CHOTestMonkey/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/__init__.py
diff --git a/TestON/tests/CHOTestMonkey/dependencies/EventGenerator.py b/TestON/tests/CHOTestMonkey/dependencies/EventGenerator.py
new file mode 100644
index 0000000..fb72d00
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/EventGenerator.py
@@ -0,0 +1,559 @@
+"""
+This file contains the event generator class for CHOTestMonkey
+Author: you@onlab.us
+"""
+from threading import Lock, Condition
+from tests.CHOTestMonkey.dependencies.events.Event import EventType, EventStates, Event
+from tests.CHOTestMonkey.dependencies.EventScheduler import EventScheduleMethod
+from tests.CHOTestMonkey.dependencies.GraphHelper import GraphHelper
+
+class MessageType:
+ def __init__( self ):
+ self.map = {}
+ # This message type is used for requesting an event injection from outside CHOTestMonkey
+ self.EVENT_REQUEST = 1
+ self.map[ 1 ] = 'EVENT_REQUEST'
+ # This message tyoe will force the event generator to accept the event injection request for debugging purpose
+ self.EVENT_REQUEST_DEBUG = 2
+ self.map[ 2 ] = 'EVENT_REQUEST_DEBUG'
+ # This message type implies the event generator has inserted the event
+ self.EVENT_INSERTED = 10
+ self.map[ 10 ] = 'EVENT_INSERTED'
+ # This message type implies CHOTestMonkey has refused the event injection request
+ # due to, e.g. too many pending events in the scheduler
+ self.EVENT_DENIED = 11
+ self.map[ 11 ] = 'EVENT_DENIED'
+ # The followings are error messages
+ self.UNKNOWN_MESSAGE = 20
+ self.map[ 20 ] = 'UNKNOWN_MESSAGE'
+ self.UNKNOWN_EVENT_TYPE = 21
+ self.map[ 21 ] = 'UNKNOWN_EVENT_TYPE'
+ self.UNKNOWN_SCHEDULE_METHOD = 22
+ self.map[ 22 ] = 'UNKNOWN_SCHEDULE_METHOD'
+ self.NOT_ENOUGH_ARGUMENT = 23
+ self.map[ 23 ] = 'NOT_ENOUGH_ARGUMENT'
+
+class EventGenerator:
+ def __init__( self ):
+ self.default = ''
+ self.eventGeneratorLock = Lock()
+
+ def startListener( self ):
+ """
+ Listen to event triggers
+ """
+ from multiprocessing.connection import Listener
+ import time
+
+ host = "localhost"
+ port = int( main.params[ 'GENERATOR' ][ 'listenerPort' ] )
+ address = ( host, port )
+ listener = Listener( address )
+ main.log.info( "Event Generator - Event listener start listening on %s:%s" % ( host, port ) )
+
+ while 1:
+ conn = listener.accept()
+ t = main.Thread( target=self.handleConnection,
+ threadID=main.threadID,
+ name="handleConnection",
+ args=[ conn ])
+ t.start()
+ with main.variableLock:
+ main.threadID += 1
+ listener.close()
+
+ def handleConnection( self, conn ):
+ """
+ Handle connections from event triggers
+ """
+ request = conn.recv()
+ if isinstance( request, list ) and ( request[ 0 ] == MessageType().EVENT_REQUEST or request[ 0 ] == MessageType().EVENT_REQUEST_DEBUG ):
+ if len( request ) < 3:
+ response = MessageType().NOT_ENOUGH_ARGUMENT
+ elif request[ 0 ] == MessageType().EVENT_REQUEST and not main.eventScheduler.isAvailable():
+ response = MessageType().EVENT_DENIED
+ else:
+ typeString = str( request[ 1 ] )
+ scheduleMethodString = str( request[ 2 ] )
+ if len( request ) > 3:
+ args = request[ 3: ]
+ else:
+ args = None
+ for key, value in EventType().map.items():
+ if value == typeString:
+ typeIndex = key
+ break
+ if not value == typeString:
+ response = MessageType().UNKNOWN_EVENT_TYPE
+ else:
+ for key, value in EventScheduleMethod().map.items():
+ if value == scheduleMethodString:
+ scheduleMethod = key
+ break
+ if not value == scheduleMethodString:
+ response = MessageType().UNKNOWN_SCHEDULE_METHOD
+ else:
+ self.insertEvent( typeIndex, scheduleMethod, args )
+ response = MessageType().EVENT_INSERTED
+ else:
+ response = MessageType().UNKNOWN_MESSAGE
+ conn.send( response )
+ conn.close()
+
+ def triggerEvent( self, typeIndex, scheduleMethod, *args ):
+ """
+ This function triggers an event from inside of CHOTestMonkey
+ """
+ import time
+ if not typeIndex in EventType().map.keys():
+ main.log.warn( "Event Generator - Unknown event type: " + str( typeIndex ) )
+ return
+ if not scheduleMethod in EventScheduleMethod().map.keys():
+ main.log.warn( "Event Generator - Unknown event schedule method: " + str( scheduleMethod ) )
+ return
+ while not main.eventScheduler.isAvailable():
+ time.sleep( int( main.params[ 'GENERATOR' ][ 'insertEventRetryInterval' ] ) )
+ self.insertEvent( typeIndex, scheduleMethod, list( args ) )
+
+ def insertEvent( self, typeIndex, scheduleMethod, args=None ):
+ """
+ This function inserts an event into the scheduler
+ """
+ if typeIndex > 100:
+ # Handle group events
+ if not typeIndex in main.enabledEvents.keys():
+ main.log.warn( "Event Generator - event type %s not enabled" % ( typeIndex ) )
+ return
+ function = getattr( self, main.enabledEvents[ typeIndex ] )
+ assert function != None, "Event Generator - funtion for group event " + typeIndex + " not found"
+ function( scheduleMethod, args )
+ else:
+ # Add individual events to the scheduler
+ main.eventScheduler.scheduleEvent( typeIndex, scheduleMethod, args )
+
+ def insertAllChecks( self, args=None ):
+ """
+ Acquire eventGeneratorLock before calling this funtion
+ """
+ for eventType in main.enabledEvents.keys():
+ if eventType < 100 and EventType().map[ eventType ].startswith( 'CHECK' ):
+ main.eventScheduler.scheduleEvent( eventType,
+ EventScheduleMethod().RUN_NON_BLOCK,
+ args )
+
+ def addAllChecks( self, scheduleMethod, args=None ):
+ """
+ The function adds all check events into the scheduler
+ """
+ with self.eventGeneratorLock:
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
+ self.insertAllChecks( args )
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
+
+ def randomLinkToggle( self, scheduleMethod, args=[ 5 ], blocking=True ):
+ """
+ The function randomly adds a link down-up event pair into the scheduler
+ After each individual link event, all checks are inserted into the scheduler
+ param:
+ args[0] is the average interval between link down and link up events
+ blocking means blocking other events from being scheduled between link down and link up
+ """
+ import random
+ import time
+
+ if len( args ) < 1:
+ main.log.warn( "Event Generator - Not enough arguments for randomLinkToggle: %s" % ( args ) )
+ elif len( args ) > 1:
+ main.log.warn( "Event Generator - Too many arguments for randomLinkToggle: %s" % ( args ) )
+ else:
+ downUpAvgInterval = int( args[ 0 ] )
+ with main.variableLock:
+ graphHelper = GraphHelper()
+ availableLinks = graphHelper.getNonCutEdges()
+ if len( availableLinks ) == 0:
+ main.log.warn( "All links are cut edges, aborting event" )
+ return
+ link = random.sample( availableLinks, 1 )
+
+ self.eventGeneratorLock.acquire()
+ main.eventScheduler.scheduleEvent( EventType().NETWORK_LINK_DOWN,
+ scheduleMethod,
+ [ link[ 0 ].deviceA.name, link[ 0 ].deviceB.name ] )
+ with main.variableLock:
+ link[ 0 ].setPendingDown()
+ link[ 0 ].backwardLink.setPendingDown()
+ sleepTime = int( main.params[ 'EVENT' ][ 'randomLinkToggle' ][ 'sleepBeforeCheck' ] )
+ main.eventScheduler.scheduleEvent( EventType().TEST_SLEEP, EventScheduleMethod().RUN_BLOCK, [ sleepTime ] )
+ self.insertAllChecks( EventScheduleMethod().RUN_NON_BLOCK )
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ # Insert a NULL BLOCK event
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
+ downUpInterval = abs( random.gauss( downUpAvgInterval, 1 ) )
+ if not blocking:
+ self.eventGeneratorLock.release()
+ time.sleep( downUpInterval )
+ self.eventGeneratorLock.acquire()
+ else:
+ time.sleep( downUpInterval )
+ main.eventScheduler.scheduleEvent( EventType().NETWORK_LINK_UP,
+ scheduleMethod,
+ [ link[ 0 ].deviceA.name, link[ 0 ].deviceB.name ] )
+ main.eventScheduler.scheduleEvent( EventType().TEST_SLEEP, EventScheduleMethod().RUN_BLOCK, [ sleepTime ] )
+ self.insertAllChecks( EventScheduleMethod().RUN_NON_BLOCK )
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
+ self.eventGeneratorLock.release()
+
+ def randomLinkGroupToggle( self, scheduleMethod, args=None, blocking=True ):
+ """
+ The function randomly adds a group of link down-up events into the scheduler
+ After each link down or up, all checks are inserted into the scheduler
+ param:
+ args[0] is the number of links that are to be brought down
+ args[1] is the average interval between link down events
+ args[2] is the average interval between link group down and group up events
+ blocking means blocking other events from being scheduled between link events
+ """
+ import random
+ import time
+
+ if len( args ) < 3:
+ main.log.warn( "Event Generator - Not enough arguments for randomLinkGroupToggle: %s" % ( args ) )
+ elif len( args ) > 3:
+ main.log.warn( "Event Generator - Too many arguments for randomLinkGroupToggle: %s" % ( args ) )
+ else:
+ linkGroupSize = int( args[ 0 ] )
+ downDownAvgInterval = int( args[ 1 ] )
+ downUpAvgInterval = int( args[ 2 ] )
+ downLinks = []
+ for i in range( 0, linkGroupSize ):
+ with main.variableLock:
+ graphHelper = GraphHelper()
+ availableLinks = graphHelper.getNonCutEdges()
+ if len( availableLinks ) == 0:
+ main.log.warn( "All links are cut edges, aborting event" )
+ continue
+ link = random.sample( availableLinks, 1 )
+ if i == 0:
+ self.eventGeneratorLock.acquire()
+ main.eventScheduler.scheduleEvent( EventType().NETWORK_LINK_DOWN,
+ scheduleMethod,
+ [ link[ 0 ].deviceA.name, link[ 0 ].deviceB.name ] )
+ with main.variableLock:
+ link[ 0 ].setPendingDown()
+ link[ 0 ].backwardLink.setPendingDown()
+ downLinks.append( link[ 0 ] )
+ sleepTime = int( main.params[ 'EVENT' ][ 'randomLinkGroupToggle' ][ 'sleepBeforeCheck' ] )
+ main.eventScheduler.scheduleEvent( EventType().TEST_SLEEP, EventScheduleMethod().RUN_BLOCK, [ sleepTime ] )
+ self.insertAllChecks( EventScheduleMethod().RUN_NON_BLOCK )
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ # Insert a NULL BLOCK event
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
+ downDownInterval = abs( random.gauss( downDownAvgInterval, 1 ) )
+ if not blocking:
+ self.eventGeneratorLock.release()
+ time.sleep( downDownInterval )
+ self.eventGeneratorLock.acquire()
+ else:
+ time.sleep( downDownInterval )
+
+ downUpInterval = abs( random.gauss( downUpAvgInterval, 1 ) )
+ if not blocking:
+ self.eventGeneratorLock.release()
+ time.sleep( downUpInterval )
+ self.eventGeneratorLock.acquire()
+ else:
+ time.sleep( downUpInterval )
+
+ for link in downLinks:
+ main.eventScheduler.scheduleEvent( EventType().NETWORK_LINK_UP,
+ scheduleMethod,
+ [ link.deviceA.name, link.deviceB.name ] )
+ main.eventScheduler.scheduleEvent( EventType().TEST_SLEEP, EventScheduleMethod().RUN_BLOCK, [ sleepTime ] )
+ self.insertAllChecks( EventScheduleMethod().RUN_NON_BLOCK )
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
+ upUpInterval = abs( random.gauss( downDownAvgInterval, 1 ) )
+ if not blocking:
+ self.eventGeneratorLock.release()
+ time.sleep( upUpInterval )
+ self.eventGeneratorLock.acquire()
+ else:
+ time.sleep( upUpInterval )
+ self.eventGeneratorLock.release()
+
+ def randomDeviceToggle( self, scheduleMethod, args=[ 5 ], blocking=True ):
+ """
+ The function randomly removes a device and then adds it back
+ After each individual device event, all checks are inserted into the scheduler
+ param:
+ args[0] is the average interval between device down and device up events
+ blocking means blocking other events from being scheduled between device down and device up
+ """
+ import random
+ import time
+
+ if len( args ) < 1:
+ main.log.warn( "Event Generator - Not enough arguments for randomDeviceToggle: %s" % ( args ) )
+ elif len( args ) > 1:
+ main.log.warn( "Event Generator - Too many arguments for randomDeviceToggle: %s" % ( args ) )
+ else:
+ downUpAvgInterval = int( args[ 0 ] )
+ with main.variableLock:
+ graphHelper = GraphHelper()
+ availableDevices = graphHelper.getNonCutVertices()
+ if len( availableDevices ) == 0:
+ main.log.warn( "All devices are Cut vertices, aborting event" )
+ return
+ device = random.sample( availableDevices, 1 )
+
+ self.eventGeneratorLock.acquire()
+ main.eventScheduler.scheduleEvent( EventType().NETWORK_DEVICE_DOWN,
+ scheduleMethod,
+ [ device[ 0 ].name ] )
+ with main.variableLock:
+ device[ 0 ].setPendingDown()
+ sleepTime = int( main.params[ 'EVENT' ][ 'randomLinkToggle' ][ 'sleepBeforeCheck' ] )
+ main.eventScheduler.scheduleEvent( EventType().TEST_SLEEP, EventScheduleMethod().RUN_BLOCK, [ sleepTime ] )
+ self.insertAllChecks( EventScheduleMethod().RUN_NON_BLOCK )
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ # Insert a NULL BLOCK event
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
+ downUpInterval = abs( random.gauss( downUpAvgInterval, 1 ) )
+ if not blocking:
+ self.eventGeneratorLock.release()
+ time.sleep( downUpInterval )
+ self.eventGeneratorLock.acquire()
+ else:
+ time.sleep( downUpInterval )
+ main.eventScheduler.scheduleEvent( EventType().NETWORK_DEVICE_UP,
+ scheduleMethod,
+ [ device[ 0 ].name ] )
+ main.eventScheduler.scheduleEvent( EventType().TEST_SLEEP, EventScheduleMethod().RUN_BLOCK, [ sleepTime ] )
+ self.insertAllChecks( EventScheduleMethod().RUN_NON_BLOCK )
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
+ self.eventGeneratorLock.release()
+
+ def randomDeviceGroupToggle( self, scheduleMethod, args=None, blocking=True ):
+ """
+ The function randomly adds a group of device down-up events into the scheduler
+ After each device down or up, all checks are inserted into the scheduler
+ param:
+ args[0] is the number of devices that are to be brought down
+ args[1] is the average interval between device down events
+ args[2] is the average interval between device group down and group up events
+ blocking means blocking other events from being scheduled between device events
+ """
+ import random
+ import time
+
+ if len( args ) < 3:
+ main.log.warn( "Event Generator - Not enough arguments for randomDeviceGroupToggle: %s" % ( args ) )
+ elif len( args ) > 3:
+ main.log.warn( "Event Generator - Too many arguments for randomDeviceGroupToggle: %s" % ( args ) )
+ else:
+ deviceGroupSize = int( args[ 0 ] )
+ downDownAvgInterval = int( args[ 1 ] )
+ downUpAvgInterval = int( args[ 2 ] )
+ downDevices = []
+ for i in range( 0, deviceGroupSize ):
+ with main.variableLock:
+ graphHelper = GraphHelper()
+ availableDevices = graphHelper.getNonCutVertices()
+ if len( availableDevices ) == 0:
+ main.log.warn( "All devices are cut vertices, aborting event" )
+ continue
+ device = random.sample( availableDevices, 1 )
+ if i == 0:
+ self.eventGeneratorLock.acquire()
+ main.eventScheduler.scheduleEvent( EventType().NETWORK_DEVICE_DOWN,
+ scheduleMethod,
+ [ device[ 0 ].name ] )
+ with main.variableLock:
+ device[ 0 ].setPendingDown()
+ downDevices.append( device[ 0 ] )
+ sleepTime = int( main.params[ 'EVENT' ][ 'randomLinkGroupToggle' ][ 'sleepBeforeCheck' ] )
+ main.eventScheduler.scheduleEvent( EventType().TEST_SLEEP, EventScheduleMethod().RUN_BLOCK, [ sleepTime ] )
+ self.insertAllChecks( EventScheduleMethod().RUN_NON_BLOCK )
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ # Insert a NULL BLOCK event
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
+ downDownInterval = abs( random.gauss( downDownAvgInterval, 1 ) )
+ if not blocking:
+ self.eventGeneratorLock.release()
+ time.sleep( downDownInterval )
+ self.eventGeneratorLock.acquire()
+ else:
+ time.sleep( downDownInterval )
+
+ downUpInterval = abs( random.gauss( downUpAvgInterval, 1 ) )
+ if not blocking:
+ self.eventGeneratorLock.release()
+ time.sleep( downUpInterval )
+ self.eventGeneratorLock.acquire()
+ else:
+ time.sleep( downUpInterval )
+
+ for device in downDevices:
+ main.eventScheduler.scheduleEvent( EventType().NETWORK_DEVICE_UP,
+ scheduleMethod,
+ [ device.name ] )
+ main.eventScheduler.scheduleEvent( EventType().TEST_SLEEP, EventScheduleMethod().RUN_BLOCK, [ sleepTime ] )
+ self.insertAllChecks( EventScheduleMethod().RUN_NON_BLOCK )
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
+ upUpInterval = abs( random.gauss( downDownAvgInterval, 1 ) )
+ if not blocking:
+ self.eventGeneratorLock.release()
+ time.sleep( upUpInterval )
+ self.eventGeneratorLock.acquire()
+ else:
+ time.sleep( upUpInterval )
+ self.eventGeneratorLock.release()
+
+ def installAllHostIntents( self, scheduleMethod, args=None ):
+ """
+ This function installs host intents for all host pairs
+ After all intent events are inserted, this funciton also insert intent and traffic checks
+ """
+ import itertools
+
+ with self.eventGeneratorLock:
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
+ availableControllers = []
+ for controller in main.controllers:
+ if controller.isUp():
+ availableControllers.append( controller.index )
+ if len( availableControllers ) == 0:
+ main.log.warn( "Event Generator - No available controllers" )
+ return
+ hostCombos = list( itertools.combinations( main.hosts, 2 ) )
+ for i in xrange( 0, len( hostCombos ), len( availableControllers ) ):
+ for CLIIndex in availableControllers:
+ if i >= len( hostCombos ):
+ break
+ main.eventScheduler.scheduleEvent( EventType().APP_INTENT_HOST_ADD,
+ EventScheduleMethod().RUN_NON_BLOCK,
+ [ hostCombos[ i ][ 0 ].name, hostCombos[ i ][ 1 ].name, CLIIndex ] )
+ i += 1
+ # Pending checks after installing all intents
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
+ sleepTime = int( main.params[ 'EVENT' ][ 'installAllHostIntents' ][ 'sleepBeforeCheck' ] )
+ main.eventScheduler.scheduleEvent( EventType().TEST_SLEEP, EventScheduleMethod().RUN_BLOCK, [ sleepTime ] )
+ main.eventScheduler.scheduleEvent( EventType().CHECK_INTENT, EventScheduleMethod().RUN_NON_BLOCK )
+ main.eventScheduler.scheduleEvent( EventType().CHECK_TRAFFIC, EventScheduleMethod().RUN_NON_BLOCK )
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
+
+ def removeAllHostIntents( self, scheduleMethod, args=None ):
+ """
+ This function removes host intents for all host pairs
+ After all intent events are inserted, this funciton also insert intent and traffic checks
+ """
+ import itertools
+
+ with self.eventGeneratorLock:
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
+ availableControllers = []
+ for controller in main.controllers:
+ if controller.isUp():
+ availableControllers.append( controller.index )
+ if len( availableControllers ) == 0:
+ main.log.warn( "Event Generator - No available controllers" )
+ return
+ hostCombos = list( itertools.combinations( main.hosts, 2 ) )
+ for i in xrange( 0, len( hostCombos ), len( availableControllers ) ):
+ for CLIIndex in availableControllers:
+ if i >= len( hostCombos ):
+ break
+ main.eventScheduler.scheduleEvent( EventType().APP_INTENT_HOST_DEL,
+ EventScheduleMethod().RUN_NON_BLOCK,
+ [ hostCombos[ i ][ 0 ].name, hostCombos[ i ][ 1 ].name, CLIIndex ] )
+ i += 1
+ # Pending checks after removing all intents
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
+ sleepTime = int( main.params[ 'EVENT' ][ 'removeAllHostIntents' ][ 'sleepBeforeCheck' ] )
+ main.eventScheduler.scheduleEvent( EventType().TEST_SLEEP, EventScheduleMethod().RUN_BLOCK, [ sleepTime ] )
+ main.eventScheduler.scheduleEvent( EventType().CHECK_INTENT, EventScheduleMethod().RUN_NON_BLOCK )
+ main.eventScheduler.scheduleEvent( EventType().CHECK_TRAFFIC, EventScheduleMethod().RUN_NON_BLOCK )
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
+
+ def installAllPointIntents( self, scheduleMethod, args=None ):
+ """
+ This function installs point intents for all device pairs
+ After all intent events are inserted, this funciton also insert intent and traffic checks
+ """
+ import itertools
+
+ with self.eventGeneratorLock:
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
+ availableControllers = []
+ for controller in main.controllers:
+ if controller.isUp():
+ availableControllers.append( controller.index )
+ if len( availableControllers ) == 0:
+ main.log.warn( "Event Generator - No available controllers" )
+ return
+ deviceCombos = list( itertools.permutations( main.devices, 2 ) )
+ for i in xrange( 0, len( deviceCombos ), len( availableControllers ) ):
+ for CLIIndex in availableControllers:
+ if i >= len( deviceCombos ):
+ break
+ main.eventScheduler.scheduleEvent( EventType().APP_INTENT_POINT_ADD,
+ EventScheduleMethod().RUN_NON_BLOCK,
+ [ deviceCombos[ i ][ 0 ].name, deviceCombos[ i ][ 1 ].name, CLIIndex ] )
+ i += 1
+ # Pending checks after installing all intents
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
+ sleepTime = int( main.params[ 'EVENT' ][ 'installAllPointIntents' ][ 'sleepBeforeCheck' ] )
+ main.eventScheduler.scheduleEvent( EventType().TEST_SLEEP, EventScheduleMethod().RUN_BLOCK, [ sleepTime ] )
+ main.eventScheduler.scheduleEvent( EventType().CHECK_INTENT, EventScheduleMethod().RUN_NON_BLOCK )
+ main.eventScheduler.scheduleEvent( EventType().CHECK_TRAFFIC, EventScheduleMethod().RUN_NON_BLOCK )
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
+
+ def removeAllPointIntents( self, scheduleMethod, args=None ):
+ """
+ This function removes point intents for all device pairs
+ After all intent events are inserted, this funciton also insert intent and traffic checks
+ """
+ import itertools
+
+ with self.eventGeneratorLock:
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
+ availableControllers = []
+ for controller in main.controllers:
+ if controller.isUp():
+ availableControllers.append( controller.index )
+ if len( availableControllers ) == 0:
+ main.log.warn( "Event Generator - No available controllers" )
+ return
+ deviceCombos = list( itertools.permutations( main.devices, 2 ) )
+ for i in xrange( 0, len( deviceCombos ), len( availableControllers ) ):
+ for CLIIndex in availableControllers:
+ if i >= len( deviceCombos ):
+ break
+ main.eventScheduler.scheduleEvent( EventType().APP_INTENT_POINT_DEL,
+ EventScheduleMethod().RUN_NON_BLOCK,
+ [ deviceCombos[ i ][ 0 ].name, deviceCombos[ i ][ 1 ].name, CLIIndex ] )
+ i += 1
+ # Pending checks after removing all intents
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
+ sleepTime = int( main.params[ 'EVENT' ][ 'removeAllPointIntents' ][ 'sleepBeforeCheck' ] )
+ main.eventScheduler.scheduleEvent( EventType().TEST_SLEEP, EventScheduleMethod().RUN_BLOCK, [ sleepTime ] )
+ main.eventScheduler.scheduleEvent( EventType().CHECK_INTENT, EventScheduleMethod().RUN_NON_BLOCK )
+ main.eventScheduler.scheduleEvent( EventType().CHECK_TRAFFIC, EventScheduleMethod().RUN_NON_BLOCK )
+ if scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ main.eventScheduler.scheduleEvent( EventType().NULL, EventScheduleMethod().RUN_BLOCK )
diff --git a/TestON/tests/CHOTestMonkey/dependencies/EventScheduler.py b/TestON/tests/CHOTestMonkey/dependencies/EventScheduler.py
new file mode 100644
index 0000000..7e4c95c
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/EventScheduler.py
@@ -0,0 +1,197 @@
+"""
+This file contains the event scheduler class for CHOTestMonkey
+Author: you@onlab.us
+"""
+from threading import Lock, Condition
+from tests.CHOTestMonkey.dependencies.events.Event import EventType, EventStates, Event
+from tests.CHOTestMonkey.dependencies.events.TestEvent import *
+from tests.CHOTestMonkey.dependencies.events.CheckEvent import *
+from tests.CHOTestMonkey.dependencies.events.NetworkEvent import *
+from tests.CHOTestMonkey.dependencies.events.AppEvent import *
+from tests.CHOTestMonkey.dependencies.events.ONOSEvent import *
+
+class EventScheduleMethod:
+ def __init__( self ):
+ self.map = {}
+ self.RUN_NON_BLOCK = 1
+ self.map[ 1 ] = 'RUN_NON_BLOCK'
+ self.RUN_BLOCK = -1
+ self.map[ -1 ] = 'RUN_BLOCK'
+
+class EventTuple:
+ def __init__( self, id, className, typeString, typeIndex, scheduleMethod, args, rerunInterval, maxRerunNum ):
+ self.default = ''
+ self.id = 0
+ self.className = className
+ self.typeString = typeString
+ self.typeIndex = typeIndex
+ self.scheduleMethod = scheduleMethod
+ self.args = args
+ self.rerunInterval = rerunInterval
+ self.maxRerunNum = maxRerunNum
+
+ def startEvent( self ):
+ assert self.className in globals().keys()
+ event = globals()[ self.className ]
+ return event().startEvent( self.args )
+
+class EventScheduler:
+ def __init__( self ):
+ self.default = ''
+ self.pendingEvents = []
+ self.pendingEventsCondition = Condition()
+ self.runningEvents = []
+ self.runningEventsCondition = Condition()
+ self.isRunning = True
+ self.idleCondition = Condition()
+ self.pendingEventsCapacity = int( main.params[ 'SCHEDULER' ][ 'pendingEventsCapacity' ] )
+ self.runningEventsCapacity = int( main.params[ 'SCHEDULER' ][ 'runningEventsCapacity' ] )
+ self.scheduleLoopSleep = float( main.params[ 'SCHEDULER' ][ 'scheduleLoopSleep' ] )
+
+ def scheduleEvent( self, typeIndex, scheduleMethod, args=None, index=-1 ):
+ """
+ Insert an event to pendingEvents
+ param:
+ index: the position to insert into pendingEvents, default value -1 implies the tail of pendingEvents
+ """
+ if not typeIndex in main.enabledEvents.keys():
+ main.log.warn( "Event Scheduler - event type %s not enabled" % ( typeIndex ) )
+ return
+ if main.enabledEvents[ typeIndex ] in main.params[ 'EVENT' ].keys():
+ if 'rerunInterval' in main.params[ 'EVENT' ][ main.enabledEvents[ typeIndex ] ].keys():
+ rerunInterval = int( main.params[ 'EVENT' ][ main.enabledEvents[ typeIndex ] ][ 'rerunInterval' ] )
+ maxRerunNum = int( main.params[ 'EVENT' ][ main.enabledEvents[ typeIndex ] ][ 'maxRerunNum' ] )
+ else:
+ rerunInterval = int( main.params[ 'EVENT' ][ 'Event' ][ 'rerunInterval' ] )
+ maxRerunNum = int( main.params[ 'EVENT' ][ 'Event' ][ 'maxRerunNum' ] )
+ eventTuple = EventTuple( main.eventID, main.enabledEvents[ typeIndex ], EventType().map[ typeIndex ], typeIndex, scheduleMethod, args, rerunInterval, maxRerunNum )
+ with main.variableLock:
+ main.eventID += 1
+ main.log.debug( "Event Scheduler - Event added: %s, %s, %s" % ( typeIndex,
+ scheduleMethod,
+ args ) )
+ with self.pendingEventsCondition:
+ if index == -1:
+ self.pendingEvents.append( eventTuple )
+ elif index > -1 and index <= len( self.pendingEvents ):
+ self.pendingEvents.insert( index, eventTuple )
+ else:
+ main.log.warn( "Event Scheduler - invalid index when isnerting event: %s" % ( index ) )
+ self.pendingEventsCondition.notify()
+ self.printEvents()
+
+ def startScheduler( self ):
+ """
+ Start the loop which schedules the events in pendingEvents
+ """
+ import time
+
+ while 1:
+ with self.pendingEventsCondition:
+ while len( self.pendingEvents ) == 0:
+ self.pendingEventsCondition.wait()
+ eventTuple = self.pendingEvents[ 0 ]
+ main.log.debug( "Event Scheduler - Scheduling event: %s, %s, %s" % ( eventTuple.typeIndex,
+ eventTuple.scheduleMethod,
+ eventTuple.args ) )
+ if eventTuple.scheduleMethod == EventScheduleMethod().RUN_NON_BLOCK:
+ # Run NON_BLOCK events using threads
+ with self.pendingEventsCondition:
+ self.pendingEvents.remove( eventTuple )
+ t = main.Thread( target=self.startEvent,
+ threadID=main.threadID,
+ name="startEvent",
+ args=[ eventTuple ])
+ t.start()
+ with main.variableLock:
+ main.threadID += 1
+ elif eventTuple.scheduleMethod == EventScheduleMethod().RUN_BLOCK:
+ # Wait for all other events before start
+ with self.runningEventsCondition:
+ while not len( self.runningEvents ) == 0:
+ self.runningEventsCondition.wait()
+ # BLOCK events will temporarily block the following events until finish running
+ with self.pendingEventsCondition:
+ self.pendingEvents.remove( eventTuple )
+ self.startEvent( eventTuple )
+ else:
+ with self.pendingEventsCondition:
+ self.pendingEvents.remove( eventTuple )
+ time.sleep( self.scheduleLoopSleep )
+
+ def startEvent( self, eventTuple ):
+ """
+ Start a network/ONOS/application event
+ """
+ import time
+
+ with self.runningEventsCondition:
+ self.runningEvents.append( eventTuple )
+ self.printEvents()
+ rerunNum = 0
+ result = eventTuple.startEvent()
+ while result == EventStates().FAIL and rerunNum < eventTuple.maxRerunNum:
+ time.sleep( eventTuple.rerunInterval )
+ rerunNum += 1
+ main.log.debug( eventTuple.typeString + ": retry number " + str( rerunNum ) )
+ result = eventTuple.startEvent()
+ if result == EventStates().FAIL:
+ main.log.error( eventTuple.typeString + " failed" )
+ main.caseResult = main.FALSE
+ if main.params[ 'TEST' ][ 'pauseTest' ] == 'on':
+ #self.isRunning = False
+ #main.log.error( "Event Scheduler - Test paused. To resume test, run \'resume-test\' command in CLI debugging mode" )
+ main.stop()
+ with self.runningEventsCondition:
+ self.runningEvents.remove( eventTuple )
+ if len( self.runningEvents ) == 0:
+ self.runningEventsCondition.notify()
+ with self.pendingEventsCondition:
+ if len( self.pendingEvents ) == 0:
+ with self.idleCondition:
+ self.idleCondition.notify()
+ self.printEvents()
+
+ def printEvents( self ):
+ """
+ Print all the events in pendingEvents and runningEvents
+ """
+ events = " ["
+ with self.runningEventsCondition:
+ for index in range( 0, len( self.runningEvents ) - 1 ):
+ events += str( self.runningEvents[ index ].typeIndex )
+ events += ", "
+ if len( self.runningEvents ) > 0:
+ events += str( self.runningEvents[ -1 ].typeIndex )
+ events += "]"
+ events += " ["
+ with self.pendingEventsCondition:
+ for index in range( 0, len( self.pendingEvents ) - 1 ):
+ events += str( self.pendingEvents[ index ].typeIndex )
+ events += ", "
+ if len( self.pendingEvents ) > 0:
+ events += str( self.pendingEvents[ -1 ].typeIndex )
+ events += "]"
+ main.log.debug( "Event Scheduler - Events: " + events )
+
+ def isAvailable( self ):
+ with self.pendingEventsCondition:
+ with self.runningEventsCondition:
+ return len( self.pendingEvents ) < self.pendingEventsCapacity and\
+ len( self.runningEvents ) < self.runningEventsCapacity and\
+ self.isRunning
+
+ def isIdle( self ):
+ with self.pendingEventsCondition:
+ with self.runningEventsCondition:
+ return len( self.pendingEvents ) == 0 and\
+ len( self.runningEvents ) == 0 and\
+ self.isRunning
+
+ def setPendingEventsCapacity( self, capacity ):
+ self.pendingEventsCapacity = capacity
+
+ def setRunningState( self, state ):
+ assert state == True or state == False
+ self.isRunning = state
+
diff --git a/TestON/tests/CHOTestMonkey/dependencies/EventTrigger.py b/TestON/tests/CHOTestMonkey/dependencies/EventTrigger.py
new file mode 100644
index 0000000..0c3e0e0
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/EventTrigger.py
@@ -0,0 +1,68 @@
+"""
+Insert network/ONOS/app events into CHOTestMonkey
+Author: you@onlab.us
+"""
+import time
+import random
+from multiprocessing.connection import Client
+
+def triggerEvent( type, scheduleMethod, *args ):
+ """
+ This function inserts an event into CHOTestMonkey
+ """
+ host = "localhost"
+ port = 6000
+ address = ( host, port )
+ conn = Client( address )
+ request = []
+ request.append( 2 )
+ request.append( type )
+ request.append( scheduleMethod )
+ for arg in args:
+ request.append( arg )
+ conn.send( request )
+ response = conn.recv()
+ while response == 11:
+ time.sleep( 1 )
+ conn.send( request )
+ response = conn.recv()
+ if response == 10:
+ print "Event inserted:", type, scheduleMethod, args
+ elif response == 20:
+ print "Unknown message to server"
+ elif response == 21:
+ print "Unknown event type to server"
+ elif response == 22:
+ print "Unknown schedule method to server"
+ elif response == 23:
+ print "Not enough argument"
+ else:
+ print "Unknown response from server:", response
+ conn.close()
+
+def testLoop( sleepTime=5 ):
+ downLinkNum = 0
+ downDeviceNum = 0
+ while True:
+ r = random.random()
+ if r < 0.2:
+ triggerEvent( 'NETWORK_LINK_DOWN', 'RUN_BLOCK', 'random', 'random' )
+ downLinkNum += 1
+ time.sleep( sleepTime )
+ elif r < 0.4:
+ triggerEvent( 'NETWORK_DEVICE_DOWN', 'RUN_BLOCK', 'random' )
+ downDeviceNum += 1
+ time.sleep( sleepTime * 2 )
+ elif r < 0.7 and downLinkNum > 0:
+ triggerEvent( 'NETWORK_LINK_UP', 'RUN_BLOCK', 'random', 'random' )
+ downLinkNum -= 1
+ time.sleep( sleepTime )
+ elif downDeviceNum > 0:
+ triggerEvent( 'NETWORK_DEVICE_UP', 'RUN_BLOCK', 'random' )
+ downDeviceNum -= 1
+ time.sleep( sleepTime * 2 )
+ else:
+ pass
+
+if __name__ == '__main__':
+ testLoop( 2 )
diff --git a/TestON/tests/CHOTestMonkey/dependencies/GraphHelper.py b/TestON/tests/CHOTestMonkey/dependencies/GraphHelper.py
new file mode 100644
index 0000000..13f110b
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/GraphHelper.py
@@ -0,0 +1,133 @@
+"""
+Graph algorithm implementations for CHOTestMonkey
+Author: you@onlab.us
+"""
+class GraphHelper:
+ """
+ This class implements graph algorithms for CHOTestMonkey.
+ It reads main.devices and main.links as vertices and edges.
+ Currently it offers functions for finding (non-)cut-edges and vertices,
+ which is realized based on chain-decomposition algorithm
+ """
+ def __init__( self ):
+ # Depth-first index of each node
+ self.DFI = []
+ # Parent vertex and egde of each node in depth-first search tree
+ self.parentDeviceInDFS = []
+ self.parentLinkInDFS = []
+ # Data structures for chain-decomposition algorithm
+ self.backEdges = {}
+ self.chains = []
+ self.currentDFI = 0
+ self.upDevices = []
+ for device in main.devices:
+ if device.isUp():
+ self.upDevices.append( device )
+ for i in range( len( main.devices ) ):
+ self.DFI.append( -1 )
+ self.parentDeviceInDFS.append( None )
+ self.parentLinkInDFS.append( None )
+
+ def genDFIandBackEdge( self, device ):
+ """
+ This function runs a depth-first search and get DFI of each node
+ as well as collect the back edges
+ """
+ self.DFI[ device.index ] = self.currentDFI
+ self.currentDFI += 1
+ for link in device.outgoingLinks:
+ if not link.isUp():
+ continue
+ backwardLink = link.backwardLink
+ neighbor = link.deviceB
+ if neighbor == self.parentDeviceInDFS[ device.index ]:
+ continue
+ elif self.DFI[ neighbor.index ] == -1:
+ self.parentDeviceInDFS[ neighbor.index ] = device
+ self.parentLinkInDFS[ neighbor.index ] = backwardLink
+ self.genDFIandBackEdge( neighbor )
+ else:
+ key = self.DFI[ neighbor.index ]
+ if key in self.backEdges.keys():
+ if not link in self.backEdges[ key ] and\
+ not backwardLink in self.backEdges[ key ]:
+ self.backEdges[ key ].append( backwardLink )
+ else:
+ tempKey = self.DFI[ device.index ]
+ if tempKey in self.backEdges.keys():
+ if not link in self.backEdges[ tempKey ] and\
+ not backwardLink in self.backEdges[ tempKey ]:
+ self.backEdges[ key ] = [ backwardLink ]
+ else:
+ self.backEdges[ key ] = [ backwardLink ]
+
+ def findChains( self ):
+ """
+ This function finds all the 'chains' for chain-decomposition algorithm
+ """
+ keyList = self.backEdges.keys()
+ keyList.sort()
+ deviceIsVisited = []
+ for i in range( len( main.devices ) ):
+ deviceIsVisited.append( 0 )
+ for key in keyList:
+ backEdgeList = self.backEdges[ key ]
+ for link in backEdgeList:
+ chain = []
+ currentLink = link
+ sourceDevice = link.deviceA
+ while True:
+ currentDevice = currentLink.deviceA
+ nextDevice = currentLink.deviceB
+ deviceIsVisited[ currentDevice.index ] = 1
+ chain.append( currentLink )
+ if nextDevice == sourceDevice or deviceIsVisited[ nextDevice.index ] == 1:
+ break
+ currentLink = self.parentLinkInDFS[ nextDevice.index ]
+ self.chains.append( chain )
+
+ def getNonCutEdges( self ):
+ """
+ This function returns all non-cut-edges of a graph
+ """
+ assert len( self.upDevices ) != 0
+ self.genDFIandBackEdge( self.upDevices[ 0 ] )
+ self.findChains()
+ nonCutEdges = []
+ for chain in self.chains:
+ for link in chain:
+ nonCutEdges.append( link )
+ return nonCutEdges
+
+ def getNonCutVertices( self ):
+ """
+ This function returns all non-cut-vertices of a graph
+ """
+ nonCutEdges = self.getNonCutEdges()
+ nonCutVertices = []
+ for device in self.upDevices:
+ deviceIsNonCut = True
+ for link in device.outgoingLinks:
+ if link.isUp() and not ( link in nonCutEdges or link.backwardLink in nonCutEdges ):
+ deviceIsNonCut = False
+ break
+ if deviceIsNonCut:
+ nonCutVertices.append( device )
+ return nonCutVertices
+
+ def printDFI( self ):
+ print self.DFI
+
+ def printParentInDFS( self ):
+ print self.parentInDFS
+
+ def printBackEdges( self ):
+ print self.backEdges
+
+ def printChains( self ):
+ chainIndex = 0
+ for chain in self.chains:
+ print chainIndex
+ for link in chain:
+ print link
+ chainIndex += 1
diff --git a/TestON/tests/CHOTestMonkey/dependencies/__init__.py b/TestON/tests/CHOTestMonkey/dependencies/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/__init__.py
diff --git a/TestON/tests/CHOTestMonkey/dependencies/cli.py b/TestON/tests/CHOTestMonkey/dependencies/cli.py
new file mode 100644
index 0000000..d1a8448
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/cli.py
@@ -0,0 +1,109 @@
+"""
+Start CLI for CHOTestMonkey
+Author: you@onlab.us
+"""
+from multiprocessing.connection import Client
+
+commandMap = {}
+paramNum = {}
+
+def triggerEvent( debugMode, name, scheduleMethod, args ):
+ """
+ This function inserts an event from CLI to CHOTestMonkey
+ """
+ host = "localhost"
+ port = 6000
+ address = ( host, port )
+ conn = Client( address )
+ request = []
+ if debugMode:
+ request.append( 2 )
+ else:
+ request.append( 1 )
+ request.append( name )
+ request.append( scheduleMethod )
+ for arg in args:
+ request.append( arg )
+ conn.send( request )
+ response = conn.recv()
+ return response
+
+def startCLI():
+ debugMode = False
+ while 1:
+ try:
+ if debugMode:
+ cmd = raw_input("CHOTestMonkey-debug>")
+ else:
+ cmd = raw_input("CHOTestMonkey>")
+ except EOFError:
+ print "exit"
+ return
+ except Exception:
+ print "Uncaught exception!"
+ return
+
+ if cmd == 'help':
+ print 'Supported commands:'
+ print 'help'
+ print 'debug'
+ print 'exit'
+ for command in commandMap.keys():
+ print command
+ elif cmd == '':
+ pass
+ elif cmd == 'debug':
+ debugMode = True
+ elif cmd == 'exit':
+ if debugMode:
+ debugMode = False
+ else:
+ return
+ else:
+ cmdList = cmd.split( ' ' )
+ if cmdList[ 0 ] in commandMap.keys():
+ num = paramNum[ cmdList[ 0 ] ]
+ name = commandMap[ cmdList[ 0 ] ]
+ if len( cmdList ) < num + 1:
+ print 'not enough arguments'
+ elif len( cmdList ) > num + 1:
+ print 'Too many arguments'
+ else:
+ result = triggerEvent( debugMode, name, 'RUN_BLOCK', cmdList[ 1: ] )
+ if result == 10:
+ pass
+ elif result == 11:
+ print "Scheduler busy...Try later or use debugging mode by entering \'debug\'"
+ elif result == 20:
+ print "Unknown message to server"
+ elif result == 21:
+ print "Unknown event type to server"
+ elif result == 22:
+ print "Unknown schedule method to server"
+ elif result == 23:
+ print "Not enough argument"
+ else:
+ print "Unknown response from server"
+ else:
+ print 'Unknown command'
+
+if __name__ == '__main__':
+ import xml.etree.ElementTree
+ try:
+ root = xml.etree.ElementTree.parse( '../CHOTestMonkey.params' ).getroot()
+ except Exception:
+ print "Uncaught exception!"
+ for child in root:
+ if child.tag == 'EVENT':
+ for event in child:
+ for item in event:
+ if item.tag == 'CLI':
+ CLI = str( item.text )
+ if item.tag == 'typeString':
+ name = str( item.text )
+ if item.tag == 'CLIParamNum':
+ num = int( item.text )
+ commandMap[ CLI ] = name
+ paramNum[ CLI ] = num
+ startCLI()
+
diff --git a/TestON/tests/CHOTestMonkey/dependencies/elements/NetworkElement.py b/TestON/tests/CHOTestMonkey/dependencies/elements/NetworkElement.py
new file mode 100644
index 0000000..2a18ac7
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/elements/NetworkElement.py
@@ -0,0 +1,80 @@
+"""
+This file contains device, host and link class for CHOTestMonkey
+Author: you@onlab.us
+"""
+
+class NetworkElement:
+ def __init__( self, index ):
+ self.default = ''
+ self.index = index
+ self.status = 'up'
+
+ def isUp( self ):
+ return self.status == 'up'
+
+ def isDown( self ):
+ return self.status == 'down'
+
+ def isRemoved( self ):
+ return self.status == 'removed'
+
+ def setPendingDown( self ):
+ self.status = 'pending_down'
+
+ def setRemoved( self ):
+ self.status = 'removed'
+
+ def bringDown( self ):
+ self.status = 'down'
+
+ def bringUp( self ):
+ self.status = 'up'
+
+class Device( NetworkElement ):
+ def __init__( self, index, name, dpid ):
+ NetworkElement.__init__( self, index )
+ self.name = name
+ self.dpid = dpid
+ self.hosts = []
+ # For each bidirectional link, we only store one direction here
+ self.outgoingLinks = []
+
+ def __str__( self ):
+ return "name: " + self.name + ", dpid: " + self.dpid
+
+class Host( NetworkElement ):
+ def __init__( self, index, name, id, mac, device, devicePort, vlan, ipAddresses ):
+ NetworkElement.__init__( self, index )
+ self.name = name
+ self.id = id
+ self.mac = mac
+ self.device = device
+ self.devicePort = devicePort
+ self.vlan = vlan
+ self.ipAddresses = ipAddresses
+ self.correspondents = []
+ self.handle = None
+
+ def __str__( self ):
+ return "name: " + self.name + ", mac: " + self.mac + ", device: " + self.device.dpid + ", ipAddresses: " + str( self.ipAddresses )
+
+ def setHandle( self, handle ):
+ self.handle = handle
+
+class Link( NetworkElement ):
+ """
+ Unidirectional link
+ """
+ def __init__( self, index, deviceA, portA, deviceB, portB ):
+ NetworkElement.__init__( self, index )
+ self.backwardLink = None
+ self.deviceA = deviceA
+ self.portA = portA
+ self.deviceB = deviceB
+ self.portB = portB
+
+ def __str__( self ):
+ return self.deviceA.dpid + "/" + self.portA + " - " + self.deviceB.dpid + "/" + self.portB
+
+ def setBackwardLink( self, link ):
+ self.backwardLink = link
diff --git a/TestON/tests/CHOTestMonkey/dependencies/elements/ONOSElement.py b/TestON/tests/CHOTestMonkey/dependencies/elements/ONOSElement.py
new file mode 100644
index 0000000..699a17d
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/elements/ONOSElement.py
@@ -0,0 +1,61 @@
+"""
+This file contains intent class for CHOTestMonkey
+Author: you@onlab.us
+"""
+from threading import Lock
+
+class Controller:
+ def __init__( self, index ):
+ self.default = ''
+ self.index = index
+ self.ip = main.onosIPs[ index - 1 ]
+ self.CLI = None
+ self.CLILock = Lock()
+ self.status = 'up'
+
+ def setCLI( self, CLI ):
+ self.CLI = CLI
+
+ def startCLI( self ):
+ return self.CLI.startOnosCli( self.ip )
+
+ def isUp( self ):
+ return self.status == 'up'
+
+ def bringDown( self ):
+ self.status = 'down'
+
+ def bringUp( self ):
+ self.status = 'up'
+
+class Intent:
+ def __init__( self, id ):
+ self.default = ''
+ self.type = 'INTENT'
+ self.id = id
+
+ def isHostIntent( self ):
+ return self.type == 'INTENT_HOST'
+
+ def isPointIntent( self ):
+ return self.type == 'INTENT_POINT'
+
+class HostIntent( Intent ):
+ def __init__( self, id, hostA, hostB ):
+ Intent.__init__( self, id )
+ self.type = 'INTENT_HOST'
+ self.hostA = hostA
+ self.hostB = hostB
+
+ def __str__( self ):
+ return "ID: " + self.id
+
+class PointIntent( Intent ):
+ def __init__( self, id, deviceA, deviceB ):
+ Intent.__init__( self, id )
+ self.type = 'INTENT_POINT'
+ self.deviceA = deviceA
+ self.deviceB = deviceB
+
+ def __str__( self ):
+ return "ID: " + self.id
diff --git a/TestON/tests/CHOTestMonkey/dependencies/elements/__init__.py b/TestON/tests/CHOTestMonkey/dependencies/elements/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/elements/__init__.py
diff --git a/TestON/tests/CHOTestMonkey/dependencies/events/AppEvent.py b/TestON/tests/CHOTestMonkey/dependencies/events/AppEvent.py
new file mode 100644
index 0000000..7f0ca7c
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/events/AppEvent.py
@@ -0,0 +1,246 @@
+"""
+This file contains classes for CHOTestMonkey that are related to application event
+Author: you@onlab.us
+"""
+from tests.CHOTestMonkey.dependencies.events.Event import EventType, EventStates, Event
+from tests.CHOTestMonkey.dependencies.elements.ONOSElement import HostIntent, PointIntent
+
+class IntentEvent( Event ):
+ def __init__( self ):
+ Event.__init__( self )
+ # The index of the ONOS CLI that is going to run the command
+ self.CLIIndex = 0
+
+class HostIntentEvent( IntentEvent ):
+ def __init__( self ):
+ IntentEvent.__init__( self )
+ self.hostA = None
+ self.hostB = None
+
+ def startHostIntentEvent( self ):
+ return EventStates().PASS
+
+ def startEvent( self, args ):
+ with self.eventLock:
+ main.log.info( "%s - starting event" % ( self.typeString ) )
+ if self.typeIndex == EventType().APP_INTENT_HOST_ADD or self.typeIndex == EventType().APP_INTENT_HOST_DEL:
+ if len( args ) < 3:
+ main.log.warn( "%s - Not enough arguments: %s" % ( self.typeString, args ) )
+ return EventStates().ABORT
+ elif len( args ) > 3:
+ main.log.warn( "%s - Too many arguments: %s" % ( self.typeString, args ) )
+ return EventStates().ABORT
+ else:
+ if args[ 0 ] == args[ 1 ]:
+ main.log.warn( "%s - invalid argument: %s" % ( self.typeString, index ) )
+ return EventStates().ABORT
+ for host in main.hosts:
+ if host.name == args[ 0 ]:
+ self.hostA = host
+ elif host.name == args[ 1 ]:
+ self.hostB = host
+ if self.hostA != None and self.hostB != None:
+ break
+ if self.hostA == None:
+ main.log.warn( "Host %s does not exist: " % ( args[ 0 ] ) )
+ return EventStates().ABORT
+ if self.hostB == None:
+ main.log.warn( "Host %s does not exist: " % ( args[ 1 ] ) )
+ return EventStates().ABORT
+ index = int( args[ 2 ] )
+ if index < 1 or index > int( main.numCtrls ):
+ main.log.warn( "%s - invalid argument: %s" % ( self.typeString, index ) )
+ return EventStates().ABORT
+ if not main.controllers[ index - 1 ].isUp():
+ main.log.warn( self.typeString + " - invalid argument: onos %s is down" % ( controller.index ) )
+ return EventStates().ABORT
+ self.CLIIndex = index
+ return self.startHostIntentEvent()
+
+class AddHostIntent( HostIntentEvent ):
+ """
+ Add a host-to-host intent (bidirectional)
+ """
+ def __init__( self ):
+ HostIntentEvent.__init__( self )
+ self.typeString = main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeString' ]
+ self.typeIndex= int( main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeIndex' ] )
+
+ def startHostIntentEvent( self ):
+ assert self.hostA != None and self.hostB != None
+ # Check whether there already exists some intent for the host pair
+ # For now we should avoid installing overlapping intents
+ for intent in main.intents:
+ if not intent.type == 'INTENT_HOST':
+ continue
+ if intent.hostA == self.hostA and intent.hostB == self.hostB or\
+ intent.hostB == self.hostA and intent.hostA == self.hostB:
+ main.log.warn( self.typeString + " - find an exiting intent for the host pair, abort installation" )
+ return EventStates().ABORT
+ controller = main.controllers[ self.CLIIndex - 1 ]
+ with controller.CLILock:
+ id = controller.CLI.addHostIntent( self.hostA.id, self.hostB.id )
+ if id == None:
+ main.log.warn( self.typeString + " - add host intent failed" )
+ return EventStates().FAIL
+ with main.variableLock:
+ newHostIntent = HostIntent( id, self.hostA, self.hostB )
+ main.intents.append( newHostIntent )
+ # Update host connectivity status
+ # TODO: should we check whether hostA and hostB are already correspondents?
+ self.hostB.correspondents.append( self.hostA )
+ self.hostA.correspondents.append( self.hostB )
+ return EventStates().PASS
+
+class DelHostIntent( HostIntentEvent ):
+ """
+ Delete a host-to-host intent (bidirectional)
+ """
+ def __init__( self ):
+ HostIntentEvent.__init__( self )
+ self.typeString = main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeString' ]
+ self.typeIndex= int( main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeIndex' ] )
+
+ def startHostIntentEvent( self ):
+ assert self.hostA != None and self.hostB != None
+ targetIntent = None
+ for intent in main.intents:
+ if not intent.type == 'INTENT_HOST':
+ continue
+ if intent.hostA == self.hostA and intent.hostB == self.hostB or\
+ intent.hostB == self.hostA and intent.hostA == self.hostB:
+ targetIntent = intent
+ break
+ if targetIntent == None:
+ main.log.warn( self.typeString + " - intent does not exist" )
+ return EventStates().FAIL
+ controller = main.controllers[ self.CLIIndex - 1 ]
+ with controller.CLILock:
+ result = controller.CLI.removeIntent( targetIntent.id, purge=True )
+ if result == None or result == main.FALSE:
+ main.log.warn( self.typeString + " - delete host intent failed" )
+ return EventStates().FAIL
+ with main.variableLock:
+ main.intents.remove( targetIntent )
+ # Update host connectivity status
+ self.hostB.correspondents.remove( self.hostA )
+ self.hostA.correspondents.remove( self.hostB )
+ return EventStates().PASS
+
+class PointIntentEvent( IntentEvent ):
+ def __init__( self ):
+ IntentEvent.__init__( self )
+ self.deviceA = None
+ self.deviceB = None
+
+ def startPointIntentEvent( self ):
+ return EventStates().PASS
+
+ def startEvent( self, args ):
+ with self.eventLock:
+ main.log.info( "%s - starting event" % ( self.typeString ) )
+ if self.typeIndex == EventType().APP_INTENT_POINT_ADD or self.typeIndex == EventType().APP_INTENT_POINT_DEL:
+ if len( args ) < 3:
+ main.log.warn( "%s - Not enough arguments: %s" % ( self.typeString, args ) )
+ return EventStates().ABORT
+ elif len( args ) > 3:
+ main.log.warn( "%s - Too many arguments: %s" % ( self.typeString, args ) )
+ return EventStates().ABORT
+ else:
+ for device in main.devices:
+ if device.name == args[ 0 ]:
+ self.deviceA = device
+ elif device.name == args[ 1 ]:
+ self.deviceB = device
+ if self.deviceA != None and self.deviceB != None:
+ break
+ if self.deviceA == None:
+ main.log.warn( "Device %s does not exist: " % ( args[ 0 ] ) )
+ return EventStates().ABORT
+ if self.deviceB == None:
+ main.log.warn( "Device %s does not exist: " % ( args[ 1 ] ) )
+ return EventStates().ABORT
+ index = int( args[ 2 ] )
+ if index < 1 or index > int( main.numCtrls ):
+ main.log.warn( "%s - invalid argument: %s" % ( self.typeString, index ) )
+ return EventStates().ABORT
+ if not main.controllers[ index - 1 ].isUp():
+ main.log.warn( self.typeString + " - invalid argument: onos %s is down" % ( controller.index ) )
+ return EventStates().ABORT
+ self.CLIIndex = index
+ return self.startPointIntentEvent()
+
+class AddPointIntent( PointIntentEvent ):
+ """
+ Add a point-to-point intent
+ """
+ def __init__( self ):
+ PointIntentEvent.__init__( self )
+ self.typeString = main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeString' ]
+ self.typeIndex= int( main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeIndex' ] )
+
+ def startPointIntentEvent( self ):
+ assert self.deviceA != None and self.deviceB != None
+ controller = main.controllers[ self.CLIIndex - 1 ]
+ # TODO: the following check only work when we use default port number for point intents
+ # Check whether there already exists some intent for the device pair
+ # For now we should avoid installing overlapping intents
+ for intent in main.intents:
+ if not intent.type == 'INTENT_POINT':
+ continue
+ if intent.deviceA == self.deviceA and intent.deviceB == self.deviceB:
+ main.log.warn( self.typeString + " - find an exiting intent for the device pair, abort installation" )
+ return EventStates().ABORT
+ controller = main.controllers[ self.CLIIndex - 1 ]
+ with controller.CLILock:
+ # TODO: handle the case that multiple hosts attach to one device
+ id = controller.CLI.addPointIntent( self.deviceA.dpid, self.deviceB.dpid,
+ 1, 1, '',
+ self.deviceA.hosts[ 0 ].mac,
+ self.deviceB.hosts[ 0 ].mac )
+ if id == None:
+ main.log.warn( self.typeString + " - add point intent failed" )
+ return EventStates().FAIL
+ with main.variableLock:
+ newPointIntent = PointIntent( id, self.deviceA, self.deviceB )
+ main.intents.append( newPointIntent )
+ # Update host connectivity status
+ for hostA in self.deviceA.hosts:
+ for hostB in self.deviceB.hosts:
+ hostA.correspondents.append( hostB )
+ return EventStates().PASS
+
+class DelPointIntent( PointIntentEvent ):
+ """
+ Delete a point-to-point intent
+ """
+ def __init__( self ):
+ PointIntentEvent.__init__( self )
+ self.typeString = main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeString' ]
+ self.typeIndex= int( main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeIndex' ] )
+
+ def startPointIntentEvent( self ):
+ assert self.deviceA != None and self.deviceB != None
+ targetIntent = None
+ for intent in main.intents:
+ if not intent.type == 'INTENT_POINT':
+ continue
+ if intent.deviceA == self.deviceA and intent.deviceB == self.deviceB:
+ targetIntent = intent
+ break
+ if targetIntent == None:
+ main.log.warn( self.typeString + " - intent does not exist" )
+ return EventStates().FAIL
+ controller = main.controllers[ self.CLIIndex - 1 ]
+ with controller.CLILock:
+ result = controller.CLI.removeIntent( targetIntent.id, purge=True )
+ if result == None or result == main.FALSE:
+ main.log.warn( self.typeString + " - delete host intent failed" )
+ return EventStates().FAIL
+ with main.variableLock:
+ main.intents.remove( targetIntent )
+ # Update host connectivity status
+ for hostA in self.deviceA.hosts:
+ for hostB in self.deviceB.hosts:
+ hostA.correspondents.remove( hostB )
+ return EventStates().PASS
diff --git a/TestON/tests/CHOTestMonkey/dependencies/events/CheckEvent.py b/TestON/tests/CHOTestMonkey/dependencies/events/CheckEvent.py
new file mode 100644
index 0000000..e48f674
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/events/CheckEvent.py
@@ -0,0 +1,235 @@
+"""
+This file contains classes for CHOTestMonkey that are related to check event
+Author: you@onlab.us
+"""
+from tests.CHOTestMonkey.dependencies.events.Event import EventType, EventStates, Event
+
+class CheckEvent( Event ):
+ def __init__( self ):
+ Event.__init__( self )
+
+ def startCheckEvent( self ):
+ return EventStates().PASS
+
+ def startEvent( self, args ):
+ with self.eventLock:
+ main.log.info( "%s - starting event" % ( self.typeString ) )
+ result = self.startCheckEvent()
+ return result
+
+class IntentCheck( CheckEvent ):
+ def __init__( self ):
+ CheckEvent.__init__( self )
+ self.typeString = main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeString' ]
+ self.typeIndex = int( main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeIndex' ] )
+
+ def startCheckEvent( self, args=None ):
+ checkResult = EventStates().PASS
+ intentIDs = []
+ for intent in main.intents:
+ if intent.isHostIntent():
+ deviceA = intent.hostA.device
+ deviceB = intent.hostB.device
+ elif intent.isPointIntent():
+ deviceA = intent.deviceA
+ deviceB = intent.deviceB
+ # Exclude the intents that are to or from removed devices/hosts
+ if not deviceA.isRemoved() and not deviceB.isRemoved():
+ intentIDs.append( intent.id )
+ for controller in main.controllers:
+ if controller.isUp():
+ with controller.CLILock:
+ intentState = controller.CLI.checkIntentState( intentsId=intentIDs )
+ if not intentState:
+ main.log.warn( "Intent Check - Not all intents are in INSTALLED state on ONOS%s" % ( controller.index ) )
+ checkResult = EventStates().FAIL
+ #TODO: check flows?
+ return checkResult
+
+class TopoCheck( CheckEvent ):
+ def __init__( self ):
+ CheckEvent.__init__( self )
+ self.typeString = main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeString' ]
+ self.typeIndex = int( main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeIndex' ] )
+
+ def startCheckEvent( self, args=None ):
+ import json
+ checkResult = EventStates().PASS
+ upLinkNum = 0
+ upDeviceNum = 0
+ upHostNum = 0
+ with main.variableLock:
+ for link in main.links:
+ if not link.isDown() and not link.isRemoved():
+ upLinkNum += 1
+ for device in main.devices:
+ if not device.isDown() and not device.isRemoved():
+ upDeviceNum += 1
+ for host in main.hosts:
+ if not host.isDown() and not host.isRemoved():
+ upHostNum += 1
+ clusterNum = 1
+ for controller in main.controllers:
+ if controller.isUp():
+ with controller.CLILock:
+ topologyOutput = controller.CLI.topology()
+ topoState = controller.CLI.checkStatus( topologyOutput, upDeviceNum, upLinkNum )
+ #if not topoState:
+ # main.log.warn( "Topo Check - link or device number discoverd by ONOS%s is incorrect" % ( controller.index ) )
+ # checkResult = EventStates().FAIL
+ # Check links
+ links = controller.CLI.links()
+ links = json.loads( links )
+ if not len( links ) == upLinkNum:
+ checkResult = EventStates().FAIL
+ main.log.warn( "Topo Check - link number discoverd by ONOS%s is incorrect: %s expected and %s actual" % ( controller.index, upLinkNum, len( links ) ) )
+ # Check devices
+ devices = controller.CLI.devices()
+ devices = json.loads( devices )
+ availableDeviceNum = 0
+ for device in devices:
+ if device[ 'available' ] == True:
+ availableDeviceNum += 1
+ if not availableDeviceNum == upDeviceNum:
+ checkResult = EventStates().FAIL
+ main.log.warn( "Topo Check - device number discoverd by ONOS%s is incorrect: %s expected and %s actual" % ( controller.index, upDeviceNum, availableDeviceNum ) )
+ # Check hosts
+ hosts = controller.CLI.hosts()
+ hosts = json.loads( hosts )
+ if not len( hosts ) == upHostNum:
+ checkResult = EventStates().FAIL
+ main.log.warn( "Topo Check - host number discoverd by ONOS%s is incorrect: %s expected and %s actual" % ( controller.index, upHostNum, len( hosts ) ) )
+ # Check clusters
+ clusters = controller.CLI.clusters()
+ clusters = json.loads( clusters )
+ if not len( clusters ) == clusterNum:
+ checkResult = EventStates().FAIL
+ main.log.warn( "Topo Check - cluster number discoverd by ONOS%s is incorrect: %s expected and %s actual" % ( controller.index, clusterNum, len( clusters ) ) )
+ return checkResult
+
+class ONOSCheck( CheckEvent ):
+ def __init__( self ):
+ CheckEvent.__init__( self )
+ self.typeString = main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeString' ]
+ self.typeIndex= int( main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeIndex' ] )
+
+ def startCheckEvent( self, args=None ):
+ import json
+ checkResult = EventStates().PASS
+ topics = []
+ # TODO: Other topics?
+ for i in range( 14 ):
+ topics.append( "intent-partition-" + str( i ) )
+ dpidToAvailability = {}
+ dpidToMaster = {}
+ for device in main.devices:
+ if device.isDown() or device.isRemoved():
+ dpidToAvailability[ device.dpid ] = False
+ else:
+ dpidToAvailability[ device.dpid ] = True
+ dpidToMaster[ device.dpid ] = 'unknown'
+ # Check mastership, leaders and node states on each controller node
+ for controller in main.controllers:
+ if controller.isUp():
+ # Check mastership
+ with controller.CLILock:
+ roles = controller.CLI.roles()
+ roles = json.loads( roles )
+ for device in roles:
+ dpid = device[ 'id' ]
+ if dpidToMaster[ dpid ] == 'unknown':
+ dpidToMaster[ dpid ] = device[ 'master' ]
+ elif dpidToMaster[ dpid ] != device[ 'master' ]:
+ checkResult = EventStates().FAIL
+ main.log.warn( "ONOS Check - Mastership of %s on ONOS%s is inconsistent with that on ONOS1" % ( device.name, controller.index ) )
+ if dpidToAvailability[ dpid ] and device[ 'master' ] == "none":
+ checkResult = EventStates().FAIL
+ main.log.warn( "ONOS Check - Device %s has no master on ONOS%s" % ( device.name, controller.index ) )
+ # Check leaders
+ with controller.CLILock:
+ leaders = controller.CLI.leaders()
+ leaders = json.loads( leaders )
+ ONOSTopics = [ j['topic'] for j in leaders ]
+ for topic in topics:
+ if topic not in ONOSTopics:
+ checkResult = EventStates().FAIL
+ main.log.warn( "ONOS Check - Topic %s not in leaders on ONOS%s" % ( topic, controller.index ) )
+ # Check node state
+ with controller.CLILock:
+ nodes = controller.CLI.nodes()
+ nodes = json.loads( nodes )
+ ipToState = {}
+ for node in nodes:
+ ipToState[ node[ 'ip' ] ] = node[ 'state' ]
+ for c in main.controllers:
+ if c.isUp() and ipToState[ c.ip ] == 'READY':
+ pass
+ elif not c.isUp() and ipToState[ c.ip ] == 'INACTIVE':
+ pass
+ else:
+ checkResult = EventStates().FAIL
+ main.log.warn( "ONOS Check - ONOS%s shows wrong node state: ONOS%s is %s but state is %s" % ( controller.index, c.index, c.status, ipToState[ c.ip ] ) )
+ # TODO: check partitions?
+ return checkResult
+
+class TrafficCheck( CheckEvent ):
+ def __init__( self ):
+ CheckEvent.__init__( self )
+ self.typeString = main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeString' ]
+ self.typeIndex= int( main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeIndex' ] )
+
+ def startCheckEvent( self, args=None ):
+ checkResult = EventStates().PASS
+ pool = []
+ wait = int( main.params[ 'EVENT' ][ 'TrafficCheck' ][ 'pingWait' ] )
+ timeout = int( main.params[ 'EVENT' ][ 'TrafficCheck' ][ 'pingTimeout' ] )
+ dstIPv4List = {}
+ dstIPv6List = {}
+ upHosts = []
+ for host in main.hosts:
+ if host.isUp():
+ upHosts.append( host )
+ for host in upHosts:
+ dstIPv4List[ host.index ] = []
+ dstIPv6List[ host.index ] = []
+ for correspondent in host.correspondents:
+ if not correspondent in upHosts:
+ continue
+ for ipAddress in correspondent.ipAddresses:
+ if ipAddress.startswith( str( main.params[ 'TEST' ][ 'ipv6Prefix' ] ) ):
+ dstIPv6List[ host.index ].append( ipAddress )
+ elif ipAddress.startswith( str( main.params[ 'TEST' ][ 'ipv4Prefix' ] ) ):
+ dstIPv4List[ host.index ].append( ipAddress )
+ thread = main.Thread( target=host.handle.pingHostSetAlternative,
+ threadID=main.threadID,
+ name="pingHostSetAlternative",
+ args=[ dstIPv4List[ host.index ], 1 ] )
+ pool.append( thread )
+ thread.start()
+ with main.variableLock:
+ main.threadID += 1
+ for thread in pool:
+ thread.join( 10 )
+ if not thread.result:
+ checkResult = EventStates().FAIL
+ main.log.warn( "Traffic Check - ping failed" )
+
+ if not main.enableIPv6:
+ return checkResult
+ # Check ipv6 ping
+ for host in upHosts:
+ thread = main.Thread( target=host.handle.pingHostSetAlternative,
+ threadID=main.threadID,
+ name="pingHostSetAlternative",
+ args=[ dstIPv6List[ host.index ], 1, True ] )
+ pool.append( thread )
+ thread.start()
+ with main.variableLock:
+ main.threadID += 1
+ for thread in pool:
+ thread.join( 10 )
+ if not thread.result:
+ checkResult = EventStates().FAIL
+ main.log.warn( "Traffic Check - ping6 failed" )
+ return checkResult
+
diff --git a/TestON/tests/CHOTestMonkey/dependencies/events/Event.py b/TestON/tests/CHOTestMonkey/dependencies/events/Event.py
new file mode 100644
index 0000000..2abd77f
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/events/Event.py
@@ -0,0 +1,43 @@
+"""
+This file contains the Event class for CHOTestMonkey
+Author: you@onlab.us
+"""
+from threading import Lock
+
+class EventType:
+ def __init__( self ):
+ self.map = {}
+ # Group events (>100) should be divided into individual events by the generator before going to the scheduler
+ self.NULL = 0
+ for eventName in main.params[ 'EVENT' ].keys():
+ typeString = main.params[ 'EVENT' ][ eventName ][ 'typeString' ]
+ typeIndex = int( main.params[ 'EVENT' ][ eventName ][ 'typeIndex' ] )
+ setattr( self, typeString, typeIndex )
+ self.map[ typeIndex ] = typeString
+
+class EventStates:
+ def __init__( self ):
+ self.map = {}
+ self.FAIL = 0
+ self.map[ 0 ] = 'FAIL'
+ self.PASS = 1
+ self.map[ 1 ] = 'PASS'
+ self.ABORT = -1
+ self.map[ -1 ] = 'ABORT'
+
+class Event:
+ """
+ Event class for CHOTestMonkey
+ It is the super class for CheckEvent and NetworkEvent
+ """
+ def __init__( self ):
+ self.typeString = main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeString' ]
+ self.typeIndex = int( main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeIndex' ] )
+ self.eventLock = Lock()
+ self.variableLock = Lock()
+
+ def startEvent( self, args=None ):
+ """
+ Start running the event
+ """
+ return EventStates().PASS
diff --git a/TestON/tests/CHOTestMonkey/dependencies/events/NetworkEvent.py b/TestON/tests/CHOTestMonkey/dependencies/events/NetworkEvent.py
new file mode 100644
index 0000000..46b37e7
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/events/NetworkEvent.py
@@ -0,0 +1,289 @@
+"""
+This file contains classes for CHOTestMonkey that are related to network event
+Author: you@onlab.us
+"""
+from tests.CHOTestMonkey.dependencies.events.Event import EventType, EventStates, Event
+from tests.CHOTestMonkey.dependencies.elements.NetworkElement import NetworkElement, Device, Host, Link
+from tests.CHOTestMonkey.dependencies.GraphHelper import GraphHelper
+
+class LinkEvent( Event ):
+ def __init__( self ):
+ Event.__init__( self )
+ self.linkA = None
+ self.linkB = None
+
+ def startLinkEvent( self ):
+ return EventStates().PASS
+
+ def startEvent( self, args ):
+ """
+ args are the names of the two link ends, e.g. ['s1', 's2']
+ """
+ with self.eventLock:
+ main.log.info( "%s - starting event" % ( self.typeString ) )
+ if len( args ) < 2:
+ main.log.warn( "%s - Not enough arguments: %s" % ( self.typeString, args ) )
+ return EventStates().ABORT
+ elif len( args ) > 2:
+ main.log.warn( "%s - Too many arguments: %s" % ( self.typeString, args ) )
+ return EventStates().ABORT
+ if args[ 0 ] == 'random' or args[ 1 ] == 'random':
+ import random
+ if self.typeIndex == EventType().NETWORK_LINK_DOWN:
+ with main.variableLock:
+ graphHelper = GraphHelper()
+ availableLinks = graphHelper.getNonCutEdges()
+ if len( availableLinks ) == 0:
+ main.log.warn( "All links are cut edges, aborting event" )
+ return EventStates().ABORT
+ linkList = random.sample( availableLinks, 1 )
+ self.linkA = linkList[ 0 ]
+ self.linkB = linkList[ 0 ].backwardLink
+ elif self.typeIndex == EventType().NETWORK_LINK_UP:
+ with main.variableLock:
+ downLinks = []
+ for link in main.links:
+ if link.isDown():
+ downLinks.append( link )
+ if len( downLinks ) == 0:
+ main.log.warn( "None of the links are in 'down' state, aborting event" )
+ return EventStates().ABORT
+ linkList = random.sample( downLinks, 1 )
+ self.linkA = linkList[ 0 ]
+ self.linkB = linkList[ 0 ].backwardLink
+ elif args[ 0 ] == args[ 1 ]:
+ main.log.warn( "%s - invalid arguments: %s" % ( self.typeString, args ) )
+ return EventStates().ABORT
+ else:
+ for link in main.links:
+ if link.deviceA.name == args[ 0 ] and link.deviceB.name == args[ 1 ]:
+ self.linkA = link
+ elif link.deviceA.name == args[ 1 ] and link.deviceB.name == args[ 0 ]:
+ self.linkB = link
+ if self.linkA != None and self.linkB != None:
+ break
+ if self.linkA == None or self.linkB == None:
+ main.log.warn( "Bidirectional link %s - %s does not exist: " % ( args[ 0 ], args[ 1 ] ) )
+ return EventStates().ABORT
+ main.log.debug( "%s - %s" % ( self.typeString, self.linkA ) )
+ return self.startLinkEvent()
+
+class LinkDown( LinkEvent ):
+ """
+ Generate a link down event giving the two ends of the link
+ """
+ def __init__( self ):
+ LinkEvent.__init__( self )
+ self.typeString = main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeString' ]
+ self.typeIndex = int( main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeIndex' ] )
+
+ def startLinkEvent( self ):
+ # TODO: do we need to handle a unidirectional link?
+ assert self.linkA != None and self.linkB != None
+ with main.variableLock:
+ if self.linkA.isDown() or self.linkB.isDown():
+ main.log.warn( "Link Down - link already down" )
+ return EventStates().ABORT
+ elif self.linkA.isRemoved() or self.linkB.isRemoved():
+ main.log.warn( "Link Down - link has been removed" )
+ return EventStates().ABORT
+ with main.mininetLock:
+ result = main.Mininet1.link( END1=self.linkA.deviceA.name,
+ END2=self.linkA.deviceB.name,
+ OPTION="down")
+ if not result:
+ main.log.warn( "%s - failed to bring down link" % ( self.typeString ) )
+ return EventStates().FAIL
+ with main.variableLock:
+ self.linkA.bringDown()
+ self.linkB.bringDown()
+ return EventStates().PASS
+
+class LinkUp( LinkEvent ):
+ """
+ Generate a link up event giving the two ends of the link
+ """
+ def __init__( self ):
+ LinkEvent.__init__( self )
+ self.typeString = main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeString' ]
+ self.typeIndex = int( main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeIndex' ] )
+
+ def startLinkEvent( self ):
+ assert self.linkA != None and self.linkB != None
+ with main.variableLock:
+ if self.linkA.isUp() or self.linkB.isUp():
+ main.log.warn( "Link Up - link already up" )
+ return EventStates().ABORT
+ if self.linkA.isRemoved() or self.linkB.isRemoved():
+ main.log.warn( "Link Up - link has been removed" )
+ return EventStates().ABORT
+ with main.mininetLock:
+ result = main.Mininet1.link( END1=self.linkA.deviceA.name,
+ END2=self.linkA.deviceB.name,
+ OPTION="up")
+ if not result:
+ main.log.warn( "%s - failed to bring up link" % ( self.typeString ) )
+ return EventStates().FAIL
+ with main.variableLock:
+ self.linkA.bringUp()
+ self.linkB.bringUp()
+ return EventStates().PASS
+
+class DeviceEvent( Event ):
+ def __init__( self ):
+ Event.__init__( self )
+ self.device = None
+
+ def startDeviceEvent( self ):
+ return EventStates().PASS
+
+ def startEvent( self, args ):
+ """
+ args are the names of the device, e.g. 's1'
+ """
+ with self.eventLock:
+ main.log.info( "%s - starting event" % ( self.typeString ) )
+ if len( args ) < 1:
+ main.log.warn( "%s - Not enough arguments: %s" % ( self.typeString, args ) )
+ return EventStates().ABORT
+ elif len( args ) > 1:
+ main.log.warn( "%s - Too many arguments: %s" % ( self.typeString, args ) )
+ return EventStates().ABORT
+ if args[ 0 ] == 'random':
+ import random
+ if self.typeIndex == EventType().NETWORK_DEVICE_DOWN:
+ with main.variableLock:
+ graphHelper = GraphHelper()
+ availableDevices = graphHelper.getNonCutVertices()
+ if len( availableDevices ) == 0:
+ main.log.warn( "All devices are cut vertices, aborting event" )
+ return EventStates().ABORT
+ deviceList = random.sample( availableDevices, 1 )
+ self.device = deviceList[ 0 ]
+ elif self.typeIndex == EventType().NETWORK_DEVICE_UP:
+ with main.variableLock:
+ removedDevices = []
+ for device in main.devices:
+ if device.isRemoved():
+ removedDevices.append( device )
+ if len( removedDevices ) == 0:
+ main.log.warn( "None of the devices are removed, aborting event" )
+ return EventStates().ABORT
+ deviceList = random.sample( removedDevices, 1 )
+ self.device = deviceList[ 0 ]
+ else:
+ for device in main.devices:
+ if device.name == args[ 0 ]:
+ self.device = device
+ if self.device == None:
+ main.log.warn( "Device %s does not exist: " % ( args[ 0 ] ) )
+ return EventStates().ABORT
+ main.log.debug( "%s - %s" % ( self.typeString, self.device ) )
+ return self.startDeviceEvent()
+
+class DeviceDown( DeviceEvent ):
+ """
+ Generate a device down event (which actually removes this device for now) giving its name
+ """
+ def __init__( self ):
+ DeviceEvent.__init__( self )
+ self.typeString = main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeString' ]
+ self.typeIndex = int( main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeIndex' ] )
+
+ def startDeviceEvent( self ):
+ assert self.device != None
+ with main.variableLock:
+ if self.device.isRemoved():
+ main.log.warn( "Device Down - device has been removed" )
+ return EventStates().ABORT
+ with main.mininetLock:
+ result = main.Mininet1.delSwitch( self.device.name )
+ if not result:
+ main.log.warn( "%s - failed to bring down device" % ( self.typeString ) )
+ return EventStates().FAIL
+ with main.variableLock:
+ self.device.setRemoved()
+ for link in self.device.outgoingLinks:
+ link.setRemoved()
+ link.backwardLink.setRemoved()
+ for host in self.device.hosts:
+ host.setRemoved()
+ return EventStates().PASS
+
+class DeviceUp( DeviceEvent ):
+ """
+ Generate a device up event (which re-adds this device in case the device is removed) giving its name
+ """
+ def __init__( self ):
+ DeviceEvent.__init__( self )
+ self.typeString = main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeString' ]
+ self.typeIndex = int( main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeIndex' ] )
+
+ def startDeviceEvent( self ):
+ assert self.device != None
+ with main.variableLock:
+ if self.device.isUp():
+ main.log.warn( "Device Up - device already up" )
+ return EventStates().ABORT
+ # Re-add the device
+ with main.mininetLock:
+ result = main.Mininet1.addSwitch( self.device.name, dpid=self.device.dpid[3:] )
+ if not result:
+ main.log.warn( "%s - failed to re-add device" % ( self.typeString ) )
+ return EventStates().FAIL
+ with main.variableLock:
+ self.device.bringUp()
+ # Re-add links
+ # We add host-device links first since we did the same in mininet topology file
+ # TODO: a more rubust way is to add links according to the port info of the device
+ for host in self.device.hosts:
+ # Add host-device link
+ with main.mininetLock:
+ result = main.Mininet1.addLink( self.device.name, host.name )
+ if not result:
+ main.log.warn( "%s - failed to re-connect host %s to device" % ( self.typeString, host.name ) )
+ return EventStates().FAIL
+ for link in self.device.outgoingLinks:
+ neighbor = link.deviceB
+ # Skip bringing up any link that connecting this device to a removed neighbor
+ if neighbor.isRemoved():
+ continue
+ with main.mininetLock:
+ result = main.Mininet1.addLink( self.device.name, neighbor.name )
+ if not result:
+ main.log.warn( "%s - failed to re-add link to %s" % ( self.typeString, neighbor.name ) )
+ return EventStates().FAIL
+ with main.variableLock:
+ link.bringUp()
+ link.backwardLink.bringUp()
+ # Re-assign mastership for the device
+ with main.mininetLock:
+ main.Mininet1.assignSwController( sw=self.device.name, ip=main.onosIPs )
+ # Re-discover hosts
+ for host in self.device.hosts:
+ correspondent = None
+ for h in main.hosts:
+ if h.isUp() and h != host:
+ correspondent = h
+ break
+ if correspondent == None:
+ with main.mininetLock:
+ main.Mininet1.pingall()
+ if main.enableIPv6:
+ main.Mininet1.pingall( protocol="IPv6" )
+ else:
+ ipv4Addr = None
+ ipv6Addr = None
+ for ipAddress in correspondent.ipAddresses:
+ if ipAddress.startswith( str( main.params[ 'TEST' ][ 'ipv6Prefix' ] ) ) and ipv6Addr == None:
+ ipv6Addr = ipAddress
+ elif ipAddress.startswith( str( main.params[ 'TEST' ][ 'ipv4Prefix' ] ) ) and ipv4Addr == None:
+ ipv4Addr = ipAddress
+ assert ipv4Addr != None
+ host.handle.pingHostSetAlternative( [ ipv4Addr ], 1 )
+ if main.enableIPv6:
+ assert ipv6Addr != None
+ host.handle.pingHostSetAlternative( [ ipv6Addr ], 1, True )
+ with main.variableLock:
+ host.bringUp()
+ return EventStates().PASS
diff --git a/TestON/tests/CHOTestMonkey/dependencies/events/ONOSEvent.py b/TestON/tests/CHOTestMonkey/dependencies/events/ONOSEvent.py
new file mode 100644
index 0000000..f4e2a89
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/events/ONOSEvent.py
@@ -0,0 +1,194 @@
+"""
+This file contains classes for CHOTestMonkey that are related to application event
+Author: you@onlab.us
+"""
+from tests.CHOTestMonkey.dependencies.events.Event import EventType, EventStates, Event
+
+class ONOSEvent( Event ):
+ def __init__( self ):
+ Event.__init__( self )
+ self.ONOSIndex = -1
+
+ def startEvent( self, args ):
+ with self.eventLock:
+ main.log.info( "%s - starting event" % ( self.typeString ) )
+ result = EventStates().PASS
+ if self.typeIndex == EventType().ONOS_ONOS_DOWN or self.typeIndex == EventType().ONOS_ONOS_UP:
+ if len( args ) < 1:
+ main.log.warn( "%s - Not enough arguments: %s" % ( self.typeString, args ) )
+ result = EventStates().ABORT
+ elif len( args ) > 1:
+ main.log.warn( "%s - Too many arguments: %s" % ( self.typeString, args ) )
+ result = EventStates().ABORT
+ else:
+ index = int( args[ 0 ] )
+ if index < 1 or index > int( main.numCtrls ):
+ main.log.warn( "%s - invalid argument: %s" % ( self.typeString, index ) )
+ result = EventStates().ABORT
+ else:
+ self.ONOSIndex = index
+ result = self.startONOSEvent()
+ return result
+
+class ONOSDown( ONOSEvent ):
+ def __init__( self ):
+ ONOSEvent.__init__( self )
+ self.typeString = main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeString' ]
+ self.typeIndex= int( main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeIndex' ] )
+
+ def startONOSEvent( self ):
+ assert self.ONOSIndex != -1
+ with main.variableLock:
+ if not main.controllers[ self.ONOSIndex - 1 ].isUp():
+ main.log.warn( "ONOS Down - ONOS already down" )
+ return EventStates().ABORT
+ with main.ONOSbenchLock:
+ result = main.ONOSbench.onosStop( main.controllers[ self.ONOSIndex - 1 ].ip )
+ if not result:
+ main.log.warn( "%s - failed to bring down ONOS" % ( self.typeString ) )
+ return EventStates().FAIL
+ with main.variableLock:
+ main.controllers[ self.ONOSIndex - 1 ].bringDown()
+ return EventStates().PASS
+
+class ONOSUp( ONOSEvent ):
+ def __init__( self ):
+ ONOSEvent.__init__( self )
+ self.typeString = main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeString' ]
+ self.typeIndex= int( main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeIndex' ] )
+
+ def startONOSEvent( self ):
+ assert self.ONOSIndex != -1
+ with main.variableLock:
+ if main.controllers[ self.ONOSIndex - 1 ].isUp():
+ main.log.warn( "ONOS Up - ONOS already up" )
+ return EventStates().ABORT
+ with main.ONOSbenchLock:
+ startResult = main.ONOSbench.onosStart( main.controllers[ self.ONOSIndex - 1 ].ip )
+ if not startResult:
+ main.log.warn( "%s - failed to bring up ONOS" % ( self.typeString ) )
+ return EventStates().FAIL
+ else:
+ ONOSState = main.ONOSbench.isup( main.controllers[ self.ONOSIndex - 1 ].ip )
+ if not ONOSState:
+ main.log.warn( "%s - ONOS is not up" % ( self.typeString ) )
+ return EventStates().FAIL
+ else:
+ cliResult = main.controllers[ self.ONOSIndex - 1 ].startCLI()
+ if not cliResult:
+ main.log.warn( "%s - failed to start ONOS cli" % ( self.typeString ) )
+ return EventStates().FAIL
+ else:
+ with main.variableLock:
+ main.controllers[ self.ONOSIndex - 1 ].bringUp()
+ return EventStates().PASS
+
+class CfgEvent( Event ):
+ def __init__( self ):
+ Event.__init__( self )
+ self.component = ''
+ self.propName = ''
+ self.value = ''
+
+ def startEvent( self, args ):
+ with self.eventLock:
+ main.log.info( "%s - starting event" % ( self.typeString ) )
+ result = self.startCfgEvent( args )
+ return result
+
+class SetCfg( CfgEvent ):
+ def __init__( self ):
+ CfgEvent.__init__( self )
+ self.typeString = main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeString' ]
+ self.typeIndex= int( main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeIndex' ] )
+
+ def startCfgEvent( self, args ):
+ if len( args ) < 3:
+ main.log.warn( "%s - Not enough arguments: %s" % ( self.typeString, args ) )
+ return EventStates().ABORT
+ elif len( args ) > 3:
+ main.log.warn( "%s - Too many arguments: %s" % ( self.typeString, args ) )
+ return EventStates().ABORT
+ else:
+ self.component = str( args[ 0 ] )
+ self.propName = str( args[ 1 ] )
+ self.value = str( args[ 2 ] )
+ assert self.component != '' and self.propName != '' and self.value != ''
+ index = -1
+ for controller in main.controllers:
+ if controller.isUp():
+ index = controller.index
+ if index == -1:
+ main.log.warn( "%s - No available controllers" %s ( self.typeString ) )
+ return EventStates().ABORT
+ controller = main.controllers[ index - 1 ]
+ with controller.CLILock:
+ result = controller.CLI.setCfg( component=self.component,
+ propName=self.propName,
+ value=self.value )
+ if not result:
+ main.log.warn( "%s - failed to set configuration" % ( self.typeString ) )
+ return EventStates().FAIL
+ return EventStates().PASS
+
+class SetFlowObj( CfgEvent ):
+ def __init__( self ):
+ CfgEvent.__init__( self )
+ self.typeString = main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeString' ]
+ self.typeIndex= int( main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeIndex' ] )
+
+ def startCfgEvent( self, args ):
+ if len( args ) < 1:
+ main.log.warn( "%s - Not enough arguments: %s" % ( self.typeString, args ) )
+ return EventStates().ABORT
+ elif len( args ) > 1:
+ main.log.warn( "%s - Too many arguments: %s" % ( self.typeString, args ) )
+ return EventStates().ABORT
+ elif args[ 0 ] != 'true' and args[ 0 ] != 'false':
+ main.log.warn( "%s - Invalid arguments: %s" % ( self.typeString, args ) )
+ return EventStates().ABORT
+ else:
+ self.component = 'org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator'
+ self.propName = 'useFlowObjectives'
+ self.value = str( args[ 0 ] )
+ index = -1
+ for controller in main.controllers:
+ if controller.isUp():
+ index = controller.index
+ if index == -1:
+ main.log.warn( "%s - No available controllers" %s ( self.typeString ) )
+ return EventStates().ABORT
+ controller = main.controllers[ index - 1 ]
+ with controller.CLILock:
+ result = controller.CLI.setCfg( component=self.component,
+ propName=self.propName,
+ value=self.value )
+ if not result:
+ main.log.warn( "%s - failed to set configuration" % ( self.typeString ) )
+ return EventStates().FAIL
+ return EventStates().PASS
+
+class BalanceMasters( Event ):
+ def __init__( self ):
+ Event.__init__( self )
+ self.typeString = main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeString' ]
+ self.typeIndex= int( main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeIndex' ] )
+
+ def startEvent( self, args=None ):
+ with self.eventLock:
+ main.log.info( "%s - starting event" % ( self.typeString ) )
+ index = -1
+ for controller in main.controllers:
+ if controller.isUp():
+ index = controller.index
+ if index == -1:
+ main.log.warn( "%s - No available controllers" %s ( self.typeString ) )
+ return EventStates().ABORT
+ controller = main.controllers[ index - 1 ]
+ with controller.CLILock:
+ result = controller.CLI.balanceMasters()
+ if not result:
+ main.log.warn( "%s - failed to balance masters" % ( self.typeString ) )
+ return EventStates().FAIL
+ return EventStates().PASS
+
diff --git a/TestON/tests/CHOTestMonkey/dependencies/events/TestEvent.py b/TestON/tests/CHOTestMonkey/dependencies/events/TestEvent.py
new file mode 100644
index 0000000..605e43f
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/events/TestEvent.py
@@ -0,0 +1,59 @@
+"""
+This file contains classes for CHOTestMonkey that are related to check event
+Author: you@onlab.us
+"""
+from tests.CHOTestMonkey.dependencies.events.Event import EventType, EventStates, Event
+
+class TestEvent( Event ):
+ def __init__( self ):
+ Event.__init__( self )
+
+ def startTestEvent( self ):
+ return EventStates().PASS
+
+ def startEvent( self, args ):
+ with self.eventLock:
+ main.log.info( "%s - starting event" % ( self.typeString ) )
+ result = self.startTestEvent( args )
+ return result
+
+class TestPause( TestEvent ):
+ def __init__( self ):
+ TestEvent.__init__( self )
+ self.typeString = main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeString' ]
+ self.typeIndex = int( main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeIndex' ] )
+
+ def startTestEvent( self, args=None ):
+ result = EventStates().PASS
+ main.eventScheduler.setRunningState( False )
+ return result
+
+class TestResume( TestEvent ):
+ def __init__( self ):
+ TestEvent.__init__( self )
+ self.typeString = main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeString' ]
+ self.typeIndex = int( main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeIndex' ] )
+
+ def startTestEvent( self, args=None ):
+ result = EventStates().PASS
+ main.eventScheduler.setRunningState( True )
+ return result
+
+class TestSleep( TestEvent ):
+ def __init__( self ):
+ TestEvent.__init__( self )
+ self.typeString = main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeString' ]
+ self.typeIndex = int( main.params[ 'EVENT' ][ self.__class__.__name__ ][ 'typeIndex' ] )
+
+ def startTestEvent( self, args ):
+ import time
+ result = EventStates().PASS
+ if len( args ) < 1:
+ main.log.warn( "%s - Not enough arguments: %s" % ( self.typeString, args ) )
+ result = EventStates().ABORT
+ elif len( args ) > 1:
+ main.log.warn( "%s - Too many arguments: %s" % ( self.typeString, args ) )
+ result = EventStates().ABORT
+ sleepTime = int( args[ 0 ] )
+ time.sleep( sleepTime )
+ return result
diff --git a/TestON/tests/CHOTestMonkey/dependencies/events/__init__.py b/TestON/tests/CHOTestMonkey/dependencies/events/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/events/__init__.py
diff --git a/TestON/tests/CHOTestMonkey/dependencies/topologies/__init__.py b/TestON/tests/CHOTestMonkey/dependencies/topologies/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/topologies/__init__.py
diff --git a/TestON/tests/CHOTestMonkey/dependencies/topologies/topoAtt.py b/TestON/tests/CHOTestMonkey/dependencies/topologies/topoAtt.py
new file mode 100755
index 0000000..4291f8b
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/topologies/topoAtt.py
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+
+"""
+Custom topology for Mininet
+"""
+from mininet.topo import Topo
+from mininet.net import Mininet
+from mininet.node import RemoteController
+from mininet.node import Node
+from mininet.node import CPULimitedHost
+from mininet.link import TCLink
+from mininet.cli import CLI
+from mininet.log import setLogLevel
+from mininet.util import dumpNodeConnections
+from mininet.node import ( UserSwitch, OVSSwitch, IVSSwitch )
+
+class attTopo( Topo ):
+
+ def __init__( self, **opts ):
+ "Create a topology."
+
+ # Initialize Topology
+ Topo.__init__( self, **opts )
+
+ # add nodes, switches first...
+ NY54 = self.addSwitch( 's1' )
+ CMBR = self.addSwitch( 's2' )
+ CHCG = self.addSwitch( 's3' )
+ CLEV = self.addSwitch( 's4' )
+ RLGH = self.addSwitch( 's5' )
+ ATLN = self.addSwitch( 's6' )
+ PHLA = self.addSwitch( 's7' )
+ WASH = self.addSwitch( 's8' )
+ NSVL = self.addSwitch( 's9' )
+ STLS = self.addSwitch( 's10' )
+ NWOR = self.addSwitch( 's11' )
+ HSTN = self.addSwitch( 's12' )
+ SNAN = self.addSwitch( 's13' )
+ DLLS = self.addSwitch( 's14' )
+ ORLD = self.addSwitch( 's15' )
+ DNVR = self.addSwitch( 's16' )
+ KSCY = self.addSwitch( 's17' )
+ SNFN = self.addSwitch( 's18' )
+ SCRM = self.addSwitch( 's19' )
+ PTLD = self.addSwitch( 's20' )
+ STTL = self.addSwitch( 's21' )
+ SLKC = self.addSwitch( 's22' )
+ LA03 = self.addSwitch( 's23' )
+ SNDG = self.addSwitch( 's24' )
+ PHNX = self.addSwitch( 's25' )
+
+ # ... and now hosts
+ NY54_host = self.addHost( 'h1' )
+ CMBR_host = self.addHost( 'h2' )
+ CHCG_host = self.addHost( 'h3' )
+ CLEV_host = self.addHost( 'h4' )
+ RLGH_host = self.addHost( 'h5' )
+ ATLN_host = self.addHost( 'h6' )
+ PHLA_host = self.addHost( 'h7' )
+ WASH_host = self.addHost( 'h8' )
+ NSVL_host = self.addHost( 'h9' )
+ STLS_host = self.addHost( 'h10' )
+ NWOR_host = self.addHost( 'h11' )
+ HSTN_host = self.addHost( 'h12' )
+ SNAN_host = self.addHost( 'h13' )
+ DLLS_host = self.addHost( 'h14' )
+ ORLD_host = self.addHost( 'h15' )
+ DNVR_host = self.addHost( 'h16' )
+ KSCY_host = self.addHost( 'h17' )
+ SNFN_host = self.addHost( 'h18' )
+ SCRM_host = self.addHost( 'h19' )
+ PTLD_host = self.addHost( 'h20' )
+ STTL_host = self.addHost( 'h21' )
+ SLKC_host = self.addHost( 'h22' )
+ LA03_host = self.addHost( 'h23' )
+ SNDG_host = self.addHost( 'h24' )
+ PHNX_host = self.addHost( 'h25' )
+
+ # add edges between switch and corresponding host
+ self.addLink( NY54 , NY54_host )
+ self.addLink( CMBR , CMBR_host )
+ self.addLink( CHCG , CHCG_host )
+ self.addLink( CLEV , CLEV_host )
+ self.addLink( RLGH , RLGH_host )
+ self.addLink( ATLN , ATLN_host )
+ self.addLink( PHLA , PHLA_host )
+ self.addLink( WASH , WASH_host )
+ self.addLink( NSVL , NSVL_host )
+ self.addLink( STLS , STLS_host )
+ self.addLink( NWOR , NWOR_host )
+ self.addLink( HSTN , HSTN_host )
+ self.addLink( SNAN , SNAN_host )
+ self.addLink( DLLS , DLLS_host )
+ self.addLink( ORLD , ORLD_host )
+ self.addLink( DNVR , DNVR_host )
+ self.addLink( KSCY , KSCY_host )
+ self.addLink( SNFN , SNFN_host )
+ self.addLink( SCRM , SCRM_host )
+ self.addLink( PTLD , PTLD_host )
+ self.addLink( STTL , STTL_host )
+ self.addLink( SLKC , SLKC_host )
+ self.addLink( LA03 , LA03_host )
+ self.addLink( SNDG , SNDG_host )
+ self.addLink( PHNX , PHNX_host )
+
+ # add edges between switches
+ self.addLink( NY54 , CMBR, bw=10, delay='0.979030824185ms')
+ self.addLink( NY54 , CHCG, bw=10, delay='0.806374975652ms')
+ self.addLink( NY54 , PHLA, bw=10, delay='0.686192970166ms')
+ self.addLink( NY54 , WASH, bw=10, delay='0.605826192092ms')
+ self.addLink( CMBR , PHLA, bw=10, delay='1.4018238197ms')
+ self.addLink( CHCG , CLEV, bw=10, delay='0.232315346482ms')
+ self.addLink( CHCG , PHLA, bw=10, delay='1.07297714274ms')
+ self.addLink( CHCG , STLS, bw=10, delay='1.12827896944ms')
+ self.addLink( CHCG , DNVR, bw=10, delay='1.35964770335ms')
+ self.addLink( CHCG , KSCY, bw=10, delay='1.5199778541ms')
+ self.addLink( CHCG , SNFN, bw=10, delay='0.620743405435ms')
+ self.addLink( CHCG , STTL, bw=10, delay='0.93027212534ms')
+ self.addLink( CHCG , SLKC, bw=10, delay='0.735621751348ms')
+ self.addLink( CLEV , NSVL, bw=10, delay='0.523419372248ms')
+ self.addLink( CLEV , STLS, bw=10, delay='1.00360290845ms')
+ self.addLink( CLEV , PHLA, bw=10, delay='0.882912133249ms')
+ self.addLink( RLGH , ATLN, bw=10, delay='1.1644489729ms')
+ self.addLink( RLGH , WASH, bw=10, delay='1.48176810502ms')
+ self.addLink( ATLN , WASH, bw=10, delay='0.557636936322ms')
+ self.addLink( ATLN , NSVL, bw=10, delay='1.32869749865ms')
+ self.addLink( ATLN , STLS, bw=10, delay='0.767705554748ms')
+ self.addLink( ATLN , DLLS, bw=10, delay='0.544782086448ms')
+ self.addLink( ATLN , ORLD, bw=10, delay='1.46119152532ms')
+ self.addLink( PHLA , WASH, bw=10, delay='0.372209320106ms')
+ self.addLink( NSVL , STLS, bw=10, delay='1.43250491305ms')
+ self.addLink( NSVL , DLLS, bw=10, delay='1.67698215288ms')
+ self.addLink( STLS , DLLS, bw=10, delay='0.256389964194ms')
+ self.addLink( STLS , KSCY, bw=10, delay='0.395511571791ms')
+ self.addLink( STLS , LA03, bw=10, delay='0.257085227363ms')
+ self.addLink( NWOR , HSTN, bw=10, delay='0.0952906633914ms')
+ self.addLink( NWOR , DLLS, bw=10, delay='1.60231329739ms')
+ self.addLink( NWOR , ORLD, bw=10, delay='0.692731063896ms')
+ self.addLink( HSTN , SNAN, bw=10, delay='0.284150653798ms')
+ self.addLink( HSTN , DLLS, bw=10, delay='1.65690128332ms')
+ self.addLink( HSTN , ORLD, bw=10, delay='0.731886304782ms')
+ self.addLink( SNAN , PHNX, bw=10, delay='1.34258627257ms')
+ self.addLink( SNAN , DLLS, bw=10, delay='1.50063532341ms')
+ self.addLink( DLLS , DNVR, bw=10, delay='0.251471593235ms')
+ self.addLink( DLLS , KSCY, bw=10, delay='0.18026026737ms')
+ self.addLink( DLLS , SNFN, bw=10, delay='0.74304274592ms')
+ self.addLink( DLLS , LA03, bw=10, delay='0.506439293357ms')
+ self.addLink( DNVR , KSCY, bw=10, delay='0.223328790403ms')
+ self.addLink( DNVR , SNFN, bw=10, delay='0.889017541903ms')
+ self.addLink( DNVR , SLKC, bw=10, delay='0.631898982721ms')
+ self.addLink( KSCY , SNFN, bw=10, delay='0.922778522233ms')
+ self.addLink( SNFN , SCRM, bw=10, delay='0.630352278097ms')
+ self.addLink( SNFN , PTLD, bw=10, delay='0.828572513655ms')
+ self.addLink( SNFN , STTL, bw=10, delay='1.54076081649ms')
+ self.addLink( SNFN , SLKC, bw=10, delay='0.621507502625ms')
+ self.addLink( SNFN , LA03, bw=10, delay='0.602936230151ms')
+ self.addLink( SCRM , SLKC, bw=10, delay='0.461350343644ms')
+ self.addLink( PTLD , STTL, bw=10, delay='1.17591515181ms')
+ self.addLink( SLKC , LA03, bw=10, delay='0.243225267023ms')
+ self.addLink( LA03 , SNDG, bw=10, delay='0.681264950821ms')
+ self.addLink( LA03 , PHNX, bw=10, delay='0.343709457969ms')
+ self.addLink( SNDG , PHNX, bw=10, delay='0.345064487693ms')
+
+topos = { 'att': ( lambda: attTopo() ) }
+
+# HERE THE CODE DEFINITION OF THE TOPOLOGY ENDS
+
+def setupNetwork():
+ "Create network"
+ topo = attTopo()
+ #if controller_ip == '':
+ #controller_ip = '10.0.2.2';
+ # controller_ip = '127.0.0.1';
+ network = Mininet(topo=topo, link=TCLink, autoSetMacs=True, controller=None)
+ network.start()
+ CLI( network )
+ network.stop()
+
+if __name__ == '__main__':
+ setLogLevel('info')
+ #setLogLevel('debug')
+ setupNetwork()
diff --git a/TestON/tests/CHOTestMonkey/dependencies/topologies/topoAttIpv6.py b/TestON/tests/CHOTestMonkey/dependencies/topologies/topoAttIpv6.py
new file mode 100755
index 0000000..f135c81
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/topologies/topoAttIpv6.py
@@ -0,0 +1,188 @@
+#!/usr/bin/python
+
+"""
+Custom topology for Mininet
+"""
+from mininet.topo import Topo
+from mininet.net import Mininet
+from mininet.node import Host, RemoteController
+from mininet.node import Node
+from mininet.node import CPULimitedHost
+from mininet.link import TCLink
+from mininet.cli import CLI
+from mininet.log import setLogLevel
+from mininet.util import dumpNodeConnections
+from mininet.node import ( UserSwitch, OVSSwitch, IVSSwitch )
+
+class dualStackHost( Host ):
+ def config( self, v6Addr='1000::1/64', **params ):
+ r = super( Host, self ).config( **params )
+ intf = self.defaultIntf()
+ self.cmd( 'ip -6 addr add %s dev %s' % ( v6Addr, intf ) )
+ return r
+
+class attTopo( Topo ):
+
+ def __init__( self, **opts ):
+ "Create a topology."
+
+ # Initialize Topology
+ Topo.__init__( self, **opts )
+
+ NY54 = self.addSwitch( 's1' )
+ CMBR = self.addSwitch( 's2' )
+ CHCG = self.addSwitch( 's3' )
+ CLEV = self.addSwitch( 's4' )
+ RLGH = self.addSwitch( 's5' )
+ ATLN = self.addSwitch( 's6' )
+ PHLA = self.addSwitch( 's7' )
+ WASH = self.addSwitch( 's8' )
+ NSVL = self.addSwitch( 's9' )
+ STLS = self.addSwitch( 's10' )
+ NWOR = self.addSwitch( 's11' )
+ HSTN = self.addSwitch( 's12' )
+ SNAN = self.addSwitch( 's13' )
+ DLLS = self.addSwitch( 's14' )
+ ORLD = self.addSwitch( 's15' )
+ DNVR = self.addSwitch( 's16' )
+ KSCY = self.addSwitch( 's17' )
+ SNFN = self.addSwitch( 's18' )
+ SCRM = self.addSwitch( 's19' )
+ PTLD = self.addSwitch( 's20' )
+ STTL = self.addSwitch( 's21' )
+ SLKC = self.addSwitch( 's22' )
+ LA03 = self.addSwitch( 's23' )
+ SNDG = self.addSwitch( 's24' )
+ PHNX = self.addSwitch( 's25' )
+
+ # ... and now hosts
+ NY54_host = self.addHost( 'h1', ip='10.1.0.1/24', cls=dualStackHost, v6Addr='1000::1/64' )
+ CMBR_host = self.addHost( 'h2', ip='10.1.0.2/24', cls=dualStackHost, v6Addr='1000::2/64' )
+ CHCG_host = self.addHost( 'h3', ip='10.1.0.3/24', cls=dualStackHost, v6Addr='1000::3/64' )
+ CLEV_host = self.addHost( 'h4', ip='10.1.0.4/24', cls=dualStackHost, v6Addr='1000::4/64' )
+ RLGH_host = self.addHost( 'h5', ip='10.1.0.5/24', cls=dualStackHost, v6Addr='1000::5/64' )
+ ATLN_host = self.addHost( 'h6', ip='10.1.0.6/24', cls=dualStackHost, v6Addr='1000::6/64' )
+ PHLA_host = self.addHost( 'h7', ip='10.1.0.7/24', cls=dualStackHost, v6Addr='1000::7/64' )
+ WASH_host = self.addHost( 'h8', ip='10.1.0.8/24', cls=dualStackHost, v6Addr='1000::8/64' )
+ NSVL_host = self.addHost( 'h9', ip='10.1.0.9/24', cls=dualStackHost, v6Addr='1000::9/64' )
+ STLS_host = self.addHost( 'h10', ip='10.1.0.10/24', cls=dualStackHost, v6Addr='1000::10/64' )
+ NWOR_host = self.addHost( 'h11', ip='10.1.0.11/24', cls=dualStackHost, v6Addr='1000::11/64' )
+ HSTN_host = self.addHost( 'h12', ip='10.1.0.12/24', cls=dualStackHost, v6Addr='1000::12/64' )
+ SNAN_host = self.addHost( 'h13', ip='10.1.0.13/24', cls=dualStackHost, v6Addr='1000::13/64' )
+ DLLS_host = self.addHost( 'h14', ip='10.1.0.14/24', cls=dualStackHost, v6Addr='1000::14/64' )
+ ORLD_host = self.addHost( 'h15', ip='10.1.0.15/24', cls=dualStackHost, v6Addr='1000::15/64' )
+ DNVR_host = self.addHost( 'h16', ip='10.1.0.16/24', cls=dualStackHost, v6Addr='1000::16/64' )
+ KSCY_host = self.addHost( 'h17', ip='10.1.0.17/24', cls=dualStackHost, v6Addr='1000::17/64' )
+ SNFN_host = self.addHost( 'h18', ip='10.1.0.18/24', cls=dualStackHost, v6Addr='1000::18/64' )
+ SCRM_host = self.addHost( 'h19', ip='10.1.0.19/24', cls=dualStackHost, v6Addr='1000::19/64' )
+ PTLD_host = self.addHost( 'h20', ip='10.1.0.20/24', cls=dualStackHost, v6Addr='1000::20/64' )
+ STTL_host = self.addHost( 'h21', ip='10.1.0.21/24', cls=dualStackHost, v6Addr='1000::21/64' )
+ SLKC_host = self.addHost( 'h22', ip='10.1.0.22/24', cls=dualStackHost, v6Addr='1000::22/64' )
+ LA03_host = self.addHost( 'h23', ip='10.1.0.23/24', cls=dualStackHost, v6Addr='1000::23/64' )
+ SNDG_host = self.addHost( 'h24', ip='10.1.0.24/24', cls=dualStackHost, v6Addr='1000::24/64' )
+ PHNX_host = self.addHost( 'h25', ip='10.1.0.25/24', cls=dualStackHost, v6Addr='1000::25/64' )
+
+ # add edges between switch and corresponding host
+ self.addLink( NY54 , NY54_host )
+ self.addLink( CMBR , CMBR_host )
+ self.addLink( CHCG , CHCG_host )
+ self.addLink( CLEV , CLEV_host )
+ self.addLink( RLGH , RLGH_host )
+ self.addLink( ATLN , ATLN_host )
+ self.addLink( PHLA , PHLA_host )
+ self.addLink( WASH , WASH_host )
+ self.addLink( NSVL , NSVL_host )
+ self.addLink( STLS , STLS_host )
+ self.addLink( NWOR , NWOR_host )
+ self.addLink( HSTN , HSTN_host )
+ self.addLink( SNAN , SNAN_host )
+ self.addLink( DLLS , DLLS_host )
+ self.addLink( ORLD , ORLD_host )
+ self.addLink( DNVR , DNVR_host )
+ self.addLink( KSCY , KSCY_host )
+ self.addLink( SNFN , SNFN_host )
+ self.addLink( SCRM , SCRM_host )
+ self.addLink( PTLD , PTLD_host )
+ self.addLink( STTL , STTL_host )
+ self.addLink( SLKC , SLKC_host )
+ self.addLink( LA03 , LA03_host )
+ self.addLink( SNDG , SNDG_host )
+ self.addLink( PHNX , PHNX_host )
+
+ # add edges between switches
+ self.addLink( NY54 , CMBR, bw=10, delay='0.979030824185ms')
+ self.addLink( NY54 , CHCG, bw=10, delay='0.806374975652ms')
+ self.addLink( NY54 , PHLA, bw=10, delay='0.686192970166ms')
+ self.addLink( NY54 , WASH, bw=10, delay='0.605826192092ms')
+ self.addLink( CMBR , PHLA, bw=10, delay='1.4018238197ms')
+ self.addLink( CHCG , CLEV, bw=10, delay='0.232315346482ms')
+ self.addLink( CHCG , PHLA, bw=10, delay='1.07297714274ms')
+ self.addLink( CHCG , STLS, bw=10, delay='1.12827896944ms')
+ self.addLink( CHCG , DNVR, bw=10, delay='1.35964770335ms')
+ self.addLink( CHCG , KSCY, bw=10, delay='1.5199778541ms')
+ self.addLink( CHCG , SNFN, bw=10, delay='0.620743405435ms')
+ self.addLink( CHCG , STTL, bw=10, delay='0.93027212534ms')
+ self.addLink( CHCG , SLKC, bw=10, delay='0.735621751348ms')
+ self.addLink( CLEV , NSVL, bw=10, delay='0.523419372248ms')
+ self.addLink( CLEV , STLS, bw=10, delay='1.00360290845ms')
+ self.addLink( CLEV , PHLA, bw=10, delay='0.882912133249ms')
+ self.addLink( RLGH , ATLN, bw=10, delay='1.1644489729ms')
+ self.addLink( RLGH , WASH, bw=10, delay='1.48176810502ms')
+ self.addLink( ATLN , WASH, bw=10, delay='0.557636936322ms')
+ self.addLink( ATLN , NSVL, bw=10, delay='1.32869749865ms')
+ self.addLink( ATLN , STLS, bw=10, delay='0.767705554748ms')
+ self.addLink( ATLN , DLLS, bw=10, delay='0.544782086448ms')
+ self.addLink( ATLN , ORLD, bw=10, delay='1.46119152532ms')
+ self.addLink( PHLA , WASH, bw=10, delay='0.372209320106ms')
+ self.addLink( NSVL , STLS, bw=10, delay='1.43250491305ms')
+ self.addLink( NSVL , DLLS, bw=10, delay='1.67698215288ms')
+ self.addLink( STLS , DLLS, bw=10, delay='0.256389964194ms')
+ self.addLink( STLS , KSCY, bw=10, delay='0.395511571791ms')
+ self.addLink( STLS , LA03, bw=10, delay='0.257085227363ms')
+ self.addLink( NWOR , HSTN, bw=10, delay='0.0952906633914ms')
+ self.addLink( NWOR , DLLS, bw=10, delay='1.60231329739ms')
+ self.addLink( NWOR , ORLD, bw=10, delay='0.692731063896ms')
+ self.addLink( HSTN , SNAN, bw=10, delay='0.284150653798ms')
+ self.addLink( HSTN , DLLS, bw=10, delay='1.65690128332ms')
+ self.addLink( HSTN , ORLD, bw=10, delay='0.731886304782ms')
+ self.addLink( SNAN , PHNX, bw=10, delay='1.34258627257ms')
+ self.addLink( SNAN , DLLS, bw=10, delay='1.50063532341ms')
+ self.addLink( DLLS , DNVR, bw=10, delay='0.251471593235ms')
+ self.addLink( DLLS , KSCY, bw=10, delay='0.18026026737ms')
+ self.addLink( DLLS , SNFN, bw=10, delay='0.74304274592ms')
+ self.addLink( DLLS , LA03, bw=10, delay='0.506439293357ms')
+ self.addLink( DNVR , KSCY, bw=10, delay='0.223328790403ms')
+ self.addLink( DNVR , SNFN, bw=10, delay='0.889017541903ms')
+ self.addLink( DNVR , SLKC, bw=10, delay='0.631898982721ms')
+ self.addLink( KSCY , SNFN, bw=10, delay='0.922778522233ms')
+ self.addLink( SNFN , SCRM, bw=10, delay='0.630352278097ms')
+ self.addLink( SNFN , PTLD, bw=10, delay='0.828572513655ms')
+ self.addLink( SNFN , STTL, bw=10, delay='1.54076081649ms')
+ self.addLink( SNFN , SLKC, bw=10, delay='0.621507502625ms')
+ self.addLink( SNFN , LA03, bw=10, delay='0.602936230151ms')
+ self.addLink( SCRM , SLKC, bw=10, delay='0.461350343644ms')
+ self.addLink( PTLD , STTL, bw=10, delay='1.17591515181ms')
+ self.addLink( SLKC , LA03, bw=10, delay='0.243225267023ms')
+ self.addLink( LA03 , SNDG, bw=10, delay='0.681264950821ms')
+ self.addLink( LA03 , PHNX, bw=10, delay='0.343709457969ms')
+ self.addLink( SNDG , PHNX, bw=10, delay='0.345064487693ms')
+
+topos = { 'att': ( lambda: attTopo() ) }
+
+# HERE THE CODE DEFINITION OF THE TOPOLOGY ENDS
+
+def setupNetwork():
+ "Create network"
+ topo = attTopo()
+ #if controller_ip == '':
+ #controller_ip = '10.0.2.2';
+ # controller_ip = '127.0.0.1';
+ network = Mininet(topo=topo, link=TCLink, autoSetMacs=True, controller=None)
+ network.start()
+ CLI( network )
+ network.stop()
+
+if __name__ == '__main__':
+ setLogLevel('info')
+ #setLogLevel('debug')
+ setupNetwork()
diff --git a/TestON/tests/CHOTestMonkey/dependencies/topologies/topoChordal.py b/TestON/tests/CHOTestMonkey/dependencies/topologies/topoChordal.py
new file mode 100755
index 0000000..542c3ed
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/topologies/topoChordal.py
@@ -0,0 +1,422 @@
+#!/usr/bin/python
+"""
+"""
+from mininet.topo import Topo
+from mininet.net import Mininet
+from mininet.node import RemoteController
+from mininet.node import Node
+from mininet.node import CPULimitedHost
+from mininet.link import TCLink
+from mininet.cli import CLI
+from mininet.log import setLogLevel
+from mininet.util import dumpNodeConnections
+from mininet.node import ( UserSwitch, OVSSwitch, IVSSwitch )
+
+class chordalTopo( Topo ):
+
+ def __init__( self, **opts ):
+ "Create a topology."
+
+ # Initialize Topology
+ Topo.__init__( self, **opts )
+
+ # add nodes, switches first...
+ s1 = self.addSwitch( 's1' )
+ s2 = self.addSwitch( 's2' )
+ s3 = self.addSwitch( 's3' )
+ s4 = self.addSwitch( 's4' )
+ s5 = self.addSwitch( 's5' )
+ s6 = self.addSwitch( 's6' )
+ s7 = self.addSwitch( 's7' )
+ s8 = self.addSwitch( 's8' )
+ s9 = self.addSwitch( 's9' )
+ s10 = self.addSwitch( 's10' )
+ s11 = self.addSwitch( 's11' )
+ s12 = self.addSwitch( 's12' )
+ s13 = self.addSwitch( 's13' )
+ s14 = self.addSwitch( 's14' )
+ s15 = self.addSwitch( 's15' )
+ s16 = self.addSwitch( 's16' )
+ s17 = self.addSwitch( 's17' )
+ s18 = self.addSwitch( 's18' )
+ s19 = self.addSwitch( 's19' )
+ s20 = self.addSwitch( 's20' )
+ s21 = self.addSwitch( 's21' )
+ s22 = self.addSwitch( 's22' )
+ s23 = self.addSwitch( 's23' )
+ s24 = self.addSwitch( 's24' )
+ s25 = self.addSwitch( 's25' )
+
+ # ... and now hosts
+ s1_host = self.addHost( 'h1' )
+ s2_host = self.addHost( 'h2' )
+ s3_host = self.addHost( 'h3' )
+ s4_host = self.addHost( 'h4' )
+ s5_host = self.addHost( 'h5' )
+ s6_host = self.addHost( 'h6' )
+ s7_host = self.addHost( 'h7' )
+ s8_host = self.addHost( 'h8' )
+ s9_host = self.addHost( 'h9' )
+ s10_host = self.addHost( 'h10' )
+ s11_host = self.addHost( 'h11' )
+ s12_host = self.addHost( 'h12' )
+ s13_host = self.addHost( 'h13' )
+ s14_host = self.addHost( 'h14' )
+ s15_host = self.addHost( 'h15' )
+ s16_host = self.addHost( 'h16' )
+ s17_host = self.addHost( 'h17' )
+ s18_host = self.addHost( 'h18' )
+ s19_host = self.addHost( 'h19' )
+ s20_host = self.addHost( 'h20' )
+ s21_host = self.addHost( 'h21' )
+ s22_host = self.addHost( 'h22' )
+ s23_host = self.addHost( 'h23' )
+ s24_host = self.addHost( 'h24' )
+ s25_host = self.addHost( 'h25' )
+
+ # add edges between switch and corresponding host
+ self.addLink( s1 , s1_host )
+ self.addLink( s2 , s2_host )
+ self.addLink( s3 , s3_host )
+ self.addLink( s4 , s4_host )
+ self.addLink( s5 , s5_host )
+ self.addLink( s6 , s6_host )
+ self.addLink( s7 , s7_host )
+ self.addLink( s8 , s8_host )
+ self.addLink( s9 , s9_host )
+ self.addLink( s10 , s10_host )
+ self.addLink( s11 , s11_host )
+ self.addLink( s12 , s12_host )
+ self.addLink( s13 , s13_host )
+ self.addLink( s14 , s14_host )
+ self.addLink( s15 , s15_host )
+ self.addLink( s16 , s16_host )
+ self.addLink( s17 , s17_host )
+ self.addLink( s18 , s18_host )
+ self.addLink( s19 , s19_host )
+ self.addLink( s20 , s20_host )
+ self.addLink( s21 , s21_host )
+ self.addLink( s22 , s22_host )
+ self.addLink( s23 , s23_host )
+ self.addLink( s24 , s24_host )
+ self.addLink( s25 , s25_host )
+ self.addLink(s1, s2)
+ self.addLink(s1, s3)
+ self.addLink(s1, s4)
+ self.addLink(s1, s5)
+ self.addLink(s1, s6)
+ self.addLink(s1, s7)
+ self.addLink(s1, s8)
+ self.addLink(s1, s9)
+ self.addLink(s1, s10)
+ self.addLink(s1, s11)
+ self.addLink(s1, s12)
+ self.addLink(s1, s13)
+ self.addLink(s1, s14)
+ self.addLink(s1, s15)
+ self.addLink(s1, s16)
+ self.addLink(s1, s17)
+ self.addLink(s1, s18)
+ self.addLink(s1, s19)
+ self.addLink(s1, s20)
+ self.addLink(s1, s21)
+ self.addLink(s1, s22)
+ self.addLink(s1, s23)
+ self.addLink(s1, s24)
+ self.addLink(s1, s25)
+ self.addLink(s2, s3)
+ self.addLink(s2, s4)
+ self.addLink(s2, s5)
+ self.addLink(s2, s6)
+ self.addLink(s2, s7)
+ self.addLink(s2, s8)
+ self.addLink(s2, s9)
+ self.addLink(s2, s10)
+ self.addLink(s2, s11)
+ self.addLink(s2, s12)
+ self.addLink(s2, s13)
+ self.addLink(s2, s14)
+ self.addLink(s2, s15)
+ self.addLink(s2, s16)
+ self.addLink(s2, s17)
+ self.addLink(s2, s18)
+ self.addLink(s2, s19)
+ self.addLink(s2, s20)
+ self.addLink(s2, s21)
+ self.addLink(s2, s22)
+ self.addLink(s2, s23)
+ self.addLink(s2, s24)
+ self.addLink(s2, s25)
+ self.addLink(s3, s4)
+ self.addLink(s3, s5)
+ self.addLink(s3, s6)
+ self.addLink(s3, s7)
+ self.addLink(s3, s8)
+ self.addLink(s3, s9)
+ self.addLink(s3, s10)
+ self.addLink(s3, s11)
+ self.addLink(s3, s12)
+ self.addLink(s3, s13)
+ self.addLink(s3, s14)
+ self.addLink(s3, s15)
+ self.addLink(s3, s16)
+ self.addLink(s3, s17)
+ self.addLink(s3, s18)
+ self.addLink(s3, s19)
+ self.addLink(s3, s20)
+ self.addLink(s3, s21)
+ self.addLink(s3, s22)
+ self.addLink(s3, s23)
+ self.addLink(s3, s24)
+ self.addLink(s3, s25)
+ self.addLink(s4, s5)
+ self.addLink(s4, s6)
+ self.addLink(s4, s7)
+ self.addLink(s4, s8)
+ self.addLink(s4, s9)
+ self.addLink(s4, s10)
+ self.addLink(s4, s11)
+ self.addLink(s4, s12)
+ self.addLink(s4, s13)
+ self.addLink(s4, s14)
+ self.addLink(s4, s15)
+ self.addLink(s4, s16)
+ self.addLink(s4, s17)
+ self.addLink(s4, s18)
+ self.addLink(s4, s19)
+ self.addLink(s4, s20)
+ self.addLink(s4, s21)
+ self.addLink(s4, s22)
+ self.addLink(s4, s23)
+ self.addLink(s4, s24)
+ self.addLink(s4, s25)
+ self.addLink(s5, s6)
+ self.addLink(s5, s7)
+ self.addLink(s5, s8)
+ self.addLink(s5, s9)
+ self.addLink(s5, s10)
+ self.addLink(s5, s11)
+ self.addLink(s5, s12)
+ self.addLink(s5, s13)
+ self.addLink(s5, s14)
+ self.addLink(s5, s15)
+ self.addLink(s5, s16)
+ self.addLink(s5, s17)
+ self.addLink(s5, s18)
+ self.addLink(s5, s19)
+ self.addLink(s5, s20)
+ self.addLink(s5, s21)
+ self.addLink(s5, s22)
+ self.addLink(s5, s23)
+ self.addLink(s5, s24)
+ self.addLink(s5, s25)
+ self.addLink(s6, s7)
+ self.addLink(s6, s8)
+ self.addLink(s6, s9)
+ self.addLink(s6, s10)
+ self.addLink(s6, s11)
+ self.addLink(s6, s12)
+ self.addLink(s6, s13)
+ self.addLink(s6, s14)
+ self.addLink(s6, s15)
+ self.addLink(s6, s16)
+ self.addLink(s6, s17)
+ self.addLink(s6, s18)
+ self.addLink(s6, s19)
+ self.addLink(s6, s20)
+ self.addLink(s6, s21)
+ self.addLink(s6, s22)
+ self.addLink(s6, s23)
+ self.addLink(s6, s24)
+ self.addLink(s6, s25)
+ self.addLink(s7, s8)
+ self.addLink(s7, s9)
+ self.addLink(s7, s10)
+ self.addLink(s7, s11)
+ self.addLink(s7, s12)
+ self.addLink(s7, s13)
+ self.addLink(s7, s14)
+ self.addLink(s7, s15)
+ self.addLink(s7, s16)
+ self.addLink(s7, s17)
+ self.addLink(s7, s18)
+ self.addLink(s7, s19)
+ self.addLink(s7, s20)
+ self.addLink(s7, s21)
+ self.addLink(s7, s22)
+ self.addLink(s7, s23)
+ self.addLink(s7, s24)
+ self.addLink(s7, s25)
+ self.addLink(s8, s9)
+ self.addLink(s8, s10)
+ self.addLink(s8, s11)
+ self.addLink(s8, s12)
+ self.addLink(s8, s13)
+ self.addLink(s8, s14)
+ self.addLink(s8, s15)
+ self.addLink(s8, s16)
+ self.addLink(s8, s17)
+ self.addLink(s8, s18)
+ self.addLink(s8, s19)
+ self.addLink(s8, s20)
+ self.addLink(s8, s21)
+ self.addLink(s8, s22)
+ self.addLink(s8, s23)
+ self.addLink(s8, s24)
+ self.addLink(s8, s25)
+ self.addLink(s9, s10)
+ self.addLink(s9, s11)
+ self.addLink(s9, s12)
+ self.addLink(s9, s13)
+ self.addLink(s9, s14)
+ self.addLink(s9, s15)
+ self.addLink(s9, s16)
+ self.addLink(s9, s17)
+ self.addLink(s9, s18)
+ self.addLink(s9, s19)
+ self.addLink(s9, s20)
+ self.addLink(s9, s21)
+ self.addLink(s9, s22)
+ self.addLink(s9, s23)
+ self.addLink(s9, s24)
+ self.addLink(s9, s25)
+ self.addLink(s10, s11)
+ self.addLink(s10, s12)
+ self.addLink(s10, s13)
+ self.addLink(s10, s14)
+ self.addLink(s10, s15)
+ self.addLink(s10, s16)
+ self.addLink(s10, s17)
+ self.addLink(s10, s18)
+ self.addLink(s10, s19)
+ self.addLink(s10, s20)
+ self.addLink(s10, s21)
+ self.addLink(s10, s22)
+ self.addLink(s10, s23)
+ self.addLink(s10, s24)
+ self.addLink(s10, s25)
+ self.addLink(s11, s12)
+ self.addLink(s11, s13)
+ self.addLink(s11, s14)
+ self.addLink(s11, s15)
+ self.addLink(s11, s16)
+ self.addLink(s11, s17)
+ self.addLink(s11, s18)
+ self.addLink(s11, s19)
+ self.addLink(s11, s20)
+ self.addLink(s11, s21)
+ self.addLink(s11, s22)
+ self.addLink(s11, s23)
+ self.addLink(s11, s24)
+ self.addLink(s11, s25)
+ self.addLink(s12, s13)
+ self.addLink(s12, s14)
+ self.addLink(s12, s15)
+ self.addLink(s12, s16)
+ self.addLink(s12, s17)
+ self.addLink(s12, s18)
+ self.addLink(s12, s19)
+ self.addLink(s12, s20)
+ self.addLink(s12, s21)
+ self.addLink(s12, s22)
+ self.addLink(s12, s23)
+ self.addLink(s12, s24)
+ self.addLink(s12, s25)
+ self.addLink(s13, s14)
+ self.addLink(s13, s15)
+ self.addLink(s13, s16)
+ self.addLink(s13, s17)
+ self.addLink(s13, s18)
+ self.addLink(s13, s19)
+ self.addLink(s13, s20)
+ self.addLink(s13, s21)
+ self.addLink(s13, s22)
+ self.addLink(s13, s23)
+ self.addLink(s13, s24)
+ self.addLink(s13, s25)
+ self.addLink(s14, s15)
+ self.addLink(s14, s16)
+ self.addLink(s14, s17)
+ self.addLink(s14, s18)
+ self.addLink(s14, s19)
+ self.addLink(s14, s20)
+ self.addLink(s14, s21)
+ self.addLink(s14, s22)
+ self.addLink(s14, s23)
+ self.addLink(s14, s24)
+ self.addLink(s14, s25)
+ self.addLink(s15, s16)
+ self.addLink(s15, s17)
+ self.addLink(s15, s18)
+ self.addLink(s15, s19)
+ self.addLink(s15, s20)
+ self.addLink(s15, s21)
+ self.addLink(s15, s22)
+ self.addLink(s15, s23)
+ self.addLink(s15, s24)
+ self.addLink(s15, s25)
+ self.addLink(s16, s17)
+ self.addLink(s16, s18)
+ self.addLink(s16, s19)
+ self.addLink(s16, s20)
+ self.addLink(s16, s21)
+ self.addLink(s16, s22)
+ self.addLink(s16, s23)
+ self.addLink(s16, s24)
+ self.addLink(s16, s25)
+ self.addLink(s17, s18)
+ self.addLink(s17, s19)
+ self.addLink(s17, s20)
+ self.addLink(s17, s21)
+ self.addLink(s17, s22)
+ self.addLink(s17, s23)
+ self.addLink(s17, s24)
+ self.addLink(s17, s25)
+ self.addLink(s18, s19)
+ self.addLink(s18, s20)
+ self.addLink(s18, s21)
+ self.addLink(s18, s22)
+ self.addLink(s18, s23)
+ self.addLink(s18, s24)
+ self.addLink(s18, s25)
+ self.addLink(s19, s20)
+ self.addLink(s19, s21)
+ self.addLink(s19, s22)
+ self.addLink(s19, s23)
+ self.addLink(s19, s24)
+ self.addLink(s19, s25)
+ self.addLink(s20, s21)
+ self.addLink(s20, s22)
+ self.addLink(s20, s23)
+ self.addLink(s20, s24)
+ self.addLink(s20, s25)
+ self.addLink(s21, s22)
+ self.addLink(s21, s23)
+ self.addLink(s21, s24)
+ self.addLink(s21, s25)
+ self.addLink(s22, s23)
+ self.addLink(s22, s24)
+ self.addLink(s22, s25)
+ self.addLink(s23, s24)
+ self.addLink(s23, s25)
+ self.addLink(s24, s25)
+
+topos = { 'chordal': ( lambda: chordalTopo() ) }
+
+# HERE THE CODE DEFINITION OF THE TOPOLOGY ENDS
+
+def setupNetwork():
+ "Create network"
+ topo = chordalTopo()
+ #if controller_ip == '':
+ #controller_ip = '10.0.2.2';
+ # controller_ip = '127.0.0.1';
+ network = Mininet(topo=topo, switch=OVSSwitch,autoSetMacs=True, controller=None)
+ network.start()
+ CLI( network )
+ network.stop()
+
+if __name__ == '__main__':
+ setLogLevel('info')
+ #setLogLevel('debug')
+ setupNetwork()
diff --git a/TestON/tests/CHOTestMonkey/dependencies/topologies/topoChordalIpv6.py b/TestON/tests/CHOTestMonkey/dependencies/topologies/topoChordalIpv6.py
new file mode 100755
index 0000000..87d5946
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/topologies/topoChordalIpv6.py
@@ -0,0 +1,428 @@
+#!/usr/bin/python
+"""
+"""
+from mininet.topo import Topo
+from mininet.net import Mininet
+from mininet.node import Host, RemoteController
+from mininet.node import Node
+from mininet.node import CPULimitedHost
+from mininet.link import TCLink
+from mininet.cli import CLI
+from mininet.log import setLogLevel
+from mininet.util import dumpNodeConnections
+from mininet.node import ( UserSwitch, OVSSwitch, IVSSwitch )
+
+class dualStackHost( Host ):
+ def config( self, v6Addr='1000::1/64', **params ):
+ r = super( Host, self ).config( **params )
+ intf = self.defaultIntf()
+ self.cmd( 'ip -6 addr add %s dev %s' % ( v6Addr, intf ) )
+ return r
+
+class chordalTopo( Topo ):
+
+ def __init__( self, **opts ):
+ "Create a topology."
+
+ # Initialize Topology
+ Topo.__init__( self, **opts )
+
+ s1 = self.addSwitch( 's1' )
+ s2 = self.addSwitch( 's2' )
+ s3 = self.addSwitch( 's3' )
+ s4 = self.addSwitch( 's4' )
+ s5 = self.addSwitch( 's5' )
+ s6 = self.addSwitch( 's6' )
+ s7 = self.addSwitch( 's7' )
+ s8 = self.addSwitch( 's8' )
+ s9 = self.addSwitch( 's9' )
+ s10 = self.addSwitch( 's10' )
+ s11 = self.addSwitch( 's11' )
+ s12 = self.addSwitch( 's12' )
+ s13 = self.addSwitch( 's13' )
+ s14 = self.addSwitch( 's14' )
+ s15 = self.addSwitch( 's15' )
+ s16 = self.addSwitch( 's16' )
+ s17 = self.addSwitch( 's17' )
+ s18 = self.addSwitch( 's18' )
+ s19 = self.addSwitch( 's19' )
+ s20 = self.addSwitch( 's20' )
+ s21 = self.addSwitch( 's21' )
+ s22 = self.addSwitch( 's22' )
+ s23 = self.addSwitch( 's23' )
+ s24 = self.addSwitch( 's24' )
+ s25 = self.addSwitch( 's25' )
+
+ # ... and now hosts
+ s1_host = self.addHost( 'h1', ip='10.1.0.1/24', cls=dualStackHost, v6Addr='1000::1/64' )
+ s2_host = self.addHost( 'h2', ip='10.1.0.2/24', cls=dualStackHost, v6Addr='1000::2/64' )
+ s3_host = self.addHost( 'h3', ip='10.1.0.3/24', cls=dualStackHost, v6Addr='1000::3/64' )
+ s4_host = self.addHost( 'h4', ip='10.1.0.4/24', cls=dualStackHost, v6Addr='1000::4/64' )
+ s5_host = self.addHost( 'h5', ip='10.1.0.5/24', cls=dualStackHost, v6Addr='1000::5/64' )
+ s6_host = self.addHost( 'h6', ip='10.1.0.6/24', cls=dualStackHost, v6Addr='1000::6/64' )
+ s7_host = self.addHost( 'h7', ip='10.1.0.7/24', cls=dualStackHost, v6Addr='1000::7/64' )
+ s8_host = self.addHost( 'h8', ip='10.1.0.8/24', cls=dualStackHost, v6Addr='1000::8/64' )
+ s9_host = self.addHost( 'h9', ip='10.1.0.9/24', cls=dualStackHost, v6Addr='1000::9/64' )
+ s10_host = self.addHost( 'h10', ip='10.1.0.10/24', cls=dualStackHost, v6Addr='1000::10/64' )
+ s11_host = self.addHost( 'h11', ip='10.1.0.11/24', cls=dualStackHost, v6Addr='1000::11/64' )
+ s12_host = self.addHost( 'h12', ip='10.1.0.12/24', cls=dualStackHost, v6Addr='1000::12/64' )
+ s13_host = self.addHost( 'h13', ip='10.1.0.13/24', cls=dualStackHost, v6Addr='1000::13/64' )
+ s14_host = self.addHost( 'h14', ip='10.1.0.14/24', cls=dualStackHost, v6Addr='1000::14/64' )
+ s15_host = self.addHost( 'h15', ip='10.1.0.15/24', cls=dualStackHost, v6Addr='1000::15/64' )
+ s16_host = self.addHost( 'h16', ip='10.1.0.16/24', cls=dualStackHost, v6Addr='1000::16/64' )
+ s17_host = self.addHost( 'h17', ip='10.1.0.17/24', cls=dualStackHost, v6Addr='1000::17/64' )
+ s18_host = self.addHost( 'h18', ip='10.1.0.18/24', cls=dualStackHost, v6Addr='1000::18/64' )
+ s19_host = self.addHost( 'h19', ip='10.1.0.19/24', cls=dualStackHost, v6Addr='1000::19/64' )
+ s20_host = self.addHost( 'h20', ip='10.1.0.20/24', cls=dualStackHost, v6Addr='1000::20/64' )
+ s21_host = self.addHost( 'h21', ip='10.1.0.21/24', cls=dualStackHost, v6Addr='1000::21/64' )
+ s22_host = self.addHost( 'h22', ip='10.1.0.22/24', cls=dualStackHost, v6Addr='1000::22/64' )
+ s23_host = self.addHost( 'h23', ip='10.1.0.23/24', cls=dualStackHost, v6Addr='1000::23/64' )
+ s24_host = self.addHost( 'h24', ip='10.1.0.24/24', cls=dualStackHost, v6Addr='1000::24/64' )
+ s25_host = self.addHost( 'h25', ip='10.1.0.25/24', cls=dualStackHost, v6Addr='1000::25/64' )
+
+ # add edges between switch and corresponding host
+ self.addLink( s1 , s1_host )
+ self.addLink( s2 , s2_host )
+ self.addLink( s3 , s3_host )
+ self.addLink( s4 , s4_host )
+ self.addLink( s5 , s5_host )
+ self.addLink( s6 , s6_host )
+ self.addLink( s7 , s7_host )
+ self.addLink( s8 , s8_host )
+ self.addLink( s9 , s9_host )
+ self.addLink( s10 , s10_host )
+ self.addLink( s11 , s11_host )
+ self.addLink( s12 , s12_host )
+ self.addLink( s13 , s13_host )
+ self.addLink( s14 , s14_host )
+ self.addLink( s15 , s15_host )
+ self.addLink( s16 , s16_host )
+ self.addLink( s17 , s17_host )
+ self.addLink( s18 , s18_host )
+ self.addLink( s19 , s19_host )
+ self.addLink( s20 , s20_host )
+ self.addLink( s21 , s21_host )
+ self.addLink( s22 , s22_host )
+ self.addLink( s23 , s23_host )
+ self.addLink( s24 , s24_host )
+ self.addLink( s25 , s25_host )
+ self.addLink(s1, s2)
+ self.addLink(s1, s3)
+ self.addLink(s1, s4)
+ self.addLink(s1, s5)
+ self.addLink(s1, s6)
+ self.addLink(s1, s7)
+ self.addLink(s1, s8)
+ self.addLink(s1, s9)
+ self.addLink(s1, s10)
+ self.addLink(s1, s11)
+ self.addLink(s1, s12)
+ self.addLink(s1, s13)
+ self.addLink(s1, s14)
+ self.addLink(s1, s15)
+ self.addLink(s1, s16)
+ self.addLink(s1, s17)
+ self.addLink(s1, s18)
+ self.addLink(s1, s19)
+ self.addLink(s1, s20)
+ self.addLink(s1, s21)
+ self.addLink(s1, s22)
+ self.addLink(s1, s23)
+ self.addLink(s1, s24)
+ self.addLink(s1, s25)
+ self.addLink(s2, s3)
+ self.addLink(s2, s4)
+ self.addLink(s2, s5)
+ self.addLink(s2, s6)
+ self.addLink(s2, s7)
+ self.addLink(s2, s8)
+ self.addLink(s2, s9)
+ self.addLink(s2, s10)
+ self.addLink(s2, s11)
+ self.addLink(s2, s12)
+ self.addLink(s2, s13)
+ self.addLink(s2, s14)
+ self.addLink(s2, s15)
+ self.addLink(s2, s16)
+ self.addLink(s2, s17)
+ self.addLink(s2, s18)
+ self.addLink(s2, s19)
+ self.addLink(s2, s20)
+ self.addLink(s2, s21)
+ self.addLink(s2, s22)
+ self.addLink(s2, s23)
+ self.addLink(s2, s24)
+ self.addLink(s2, s25)
+ self.addLink(s3, s4)
+ self.addLink(s3, s5)
+ self.addLink(s3, s6)
+ self.addLink(s3, s7)
+ self.addLink(s3, s8)
+ self.addLink(s3, s9)
+ self.addLink(s3, s10)
+ self.addLink(s3, s11)
+ self.addLink(s3, s12)
+ self.addLink(s3, s13)
+ self.addLink(s3, s14)
+ self.addLink(s3, s15)
+ self.addLink(s3, s16)
+ self.addLink(s3, s17)
+ self.addLink(s3, s18)
+ self.addLink(s3, s19)
+ self.addLink(s3, s20)
+ self.addLink(s3, s21)
+ self.addLink(s3, s22)
+ self.addLink(s3, s23)
+ self.addLink(s3, s24)
+ self.addLink(s3, s25)
+ self.addLink(s4, s5)
+ self.addLink(s4, s6)
+ self.addLink(s4, s7)
+ self.addLink(s4, s8)
+ self.addLink(s4, s9)
+ self.addLink(s4, s10)
+ self.addLink(s4, s11)
+ self.addLink(s4, s12)
+ self.addLink(s4, s13)
+ self.addLink(s4, s14)
+ self.addLink(s4, s15)
+ self.addLink(s4, s16)
+ self.addLink(s4, s17)
+ self.addLink(s4, s18)
+ self.addLink(s4, s19)
+ self.addLink(s4, s20)
+ self.addLink(s4, s21)
+ self.addLink(s4, s22)
+ self.addLink(s4, s23)
+ self.addLink(s4, s24)
+ self.addLink(s4, s25)
+ self.addLink(s5, s6)
+ self.addLink(s5, s7)
+ self.addLink(s5, s8)
+ self.addLink(s5, s9)
+ self.addLink(s5, s10)
+ self.addLink(s5, s11)
+ self.addLink(s5, s12)
+ self.addLink(s5, s13)
+ self.addLink(s5, s14)
+ self.addLink(s5, s15)
+ self.addLink(s5, s16)
+ self.addLink(s5, s17)
+ self.addLink(s5, s18)
+ self.addLink(s5, s19)
+ self.addLink(s5, s20)
+ self.addLink(s5, s21)
+ self.addLink(s5, s22)
+ self.addLink(s5, s23)
+ self.addLink(s5, s24)
+ self.addLink(s5, s25)
+ self.addLink(s6, s7)
+ self.addLink(s6, s8)
+ self.addLink(s6, s9)
+ self.addLink(s6, s10)
+ self.addLink(s6, s11)
+ self.addLink(s6, s12)
+ self.addLink(s6, s13)
+ self.addLink(s6, s14)
+ self.addLink(s6, s15)
+ self.addLink(s6, s16)
+ self.addLink(s6, s17)
+ self.addLink(s6, s18)
+ self.addLink(s6, s19)
+ self.addLink(s6, s20)
+ self.addLink(s6, s21)
+ self.addLink(s6, s22)
+ self.addLink(s6, s23)
+ self.addLink(s6, s24)
+ self.addLink(s6, s25)
+ self.addLink(s7, s8)
+ self.addLink(s7, s9)
+ self.addLink(s7, s10)
+ self.addLink(s7, s11)
+ self.addLink(s7, s12)
+ self.addLink(s7, s13)
+ self.addLink(s7, s14)
+ self.addLink(s7, s15)
+ self.addLink(s7, s16)
+ self.addLink(s7, s17)
+ self.addLink(s7, s18)
+ self.addLink(s7, s19)
+ self.addLink(s7, s20)
+ self.addLink(s7, s21)
+ self.addLink(s7, s22)
+ self.addLink(s7, s23)
+ self.addLink(s7, s24)
+ self.addLink(s7, s25)
+ self.addLink(s8, s9)
+ self.addLink(s8, s10)
+ self.addLink(s8, s11)
+ self.addLink(s8, s12)
+ self.addLink(s8, s13)
+ self.addLink(s8, s14)
+ self.addLink(s8, s15)
+ self.addLink(s8, s16)
+ self.addLink(s8, s17)
+ self.addLink(s8, s18)
+ self.addLink(s8, s19)
+ self.addLink(s8, s20)
+ self.addLink(s8, s21)
+ self.addLink(s8, s22)
+ self.addLink(s8, s23)
+ self.addLink(s8, s24)
+ self.addLink(s8, s25)
+ self.addLink(s9, s10)
+ self.addLink(s9, s11)
+ self.addLink(s9, s12)
+ self.addLink(s9, s13)
+ self.addLink(s9, s14)
+ self.addLink(s9, s15)
+ self.addLink(s9, s16)
+ self.addLink(s9, s17)
+ self.addLink(s9, s18)
+ self.addLink(s9, s19)
+ self.addLink(s9, s20)
+ self.addLink(s9, s21)
+ self.addLink(s9, s22)
+ self.addLink(s9, s23)
+ self.addLink(s9, s24)
+ self.addLink(s9, s25)
+ self.addLink(s10, s11)
+ self.addLink(s10, s12)
+ self.addLink(s10, s13)
+ self.addLink(s10, s14)
+ self.addLink(s10, s15)
+ self.addLink(s10, s16)
+ self.addLink(s10, s17)
+ self.addLink(s10, s18)
+ self.addLink(s10, s19)
+ self.addLink(s10, s20)
+ self.addLink(s10, s21)
+ self.addLink(s10, s22)
+ self.addLink(s10, s23)
+ self.addLink(s10, s24)
+ self.addLink(s10, s25)
+ self.addLink(s11, s12)
+ self.addLink(s11, s13)
+ self.addLink(s11, s14)
+ self.addLink(s11, s15)
+ self.addLink(s11, s16)
+ self.addLink(s11, s17)
+ self.addLink(s11, s18)
+ self.addLink(s11, s19)
+ self.addLink(s11, s20)
+ self.addLink(s11, s21)
+ self.addLink(s11, s22)
+ self.addLink(s11, s23)
+ self.addLink(s11, s24)
+ self.addLink(s11, s25)
+ self.addLink(s12, s13)
+ self.addLink(s12, s14)
+ self.addLink(s12, s15)
+ self.addLink(s12, s16)
+ self.addLink(s12, s17)
+ self.addLink(s12, s18)
+ self.addLink(s12, s19)
+ self.addLink(s12, s20)
+ self.addLink(s12, s21)
+ self.addLink(s12, s22)
+ self.addLink(s12, s23)
+ self.addLink(s12, s24)
+ self.addLink(s12, s25)
+ self.addLink(s13, s14)
+ self.addLink(s13, s15)
+ self.addLink(s13, s16)
+ self.addLink(s13, s17)
+ self.addLink(s13, s18)
+ self.addLink(s13, s19)
+ self.addLink(s13, s20)
+ self.addLink(s13, s21)
+ self.addLink(s13, s22)
+ self.addLink(s13, s23)
+ self.addLink(s13, s24)
+ self.addLink(s13, s25)
+ self.addLink(s14, s15)
+ self.addLink(s14, s16)
+ self.addLink(s14, s17)
+ self.addLink(s14, s18)
+ self.addLink(s14, s19)
+ self.addLink(s14, s20)
+ self.addLink(s14, s21)
+ self.addLink(s14, s22)
+ self.addLink(s14, s23)
+ self.addLink(s14, s24)
+ self.addLink(s14, s25)
+ self.addLink(s15, s16)
+ self.addLink(s15, s17)
+ self.addLink(s15, s18)
+ self.addLink(s15, s19)
+ self.addLink(s15, s20)
+ self.addLink(s15, s21)
+ self.addLink(s15, s22)
+ self.addLink(s15, s23)
+ self.addLink(s15, s24)
+ self.addLink(s15, s25)
+ self.addLink(s16, s17)
+ self.addLink(s16, s18)
+ self.addLink(s16, s19)
+ self.addLink(s16, s20)
+ self.addLink(s16, s21)
+ self.addLink(s16, s22)
+ self.addLink(s16, s23)
+ self.addLink(s16, s24)
+ self.addLink(s16, s25)
+ self.addLink(s17, s18)
+ self.addLink(s17, s19)
+ self.addLink(s17, s20)
+ self.addLink(s17, s21)
+ self.addLink(s17, s22)
+ self.addLink(s17, s23)
+ self.addLink(s17, s24)
+ self.addLink(s17, s25)
+ self.addLink(s18, s19)
+ self.addLink(s18, s20)
+ self.addLink(s18, s21)
+ self.addLink(s18, s22)
+ self.addLink(s18, s23)
+ self.addLink(s18, s24)
+ self.addLink(s18, s25)
+ self.addLink(s19, s20)
+ self.addLink(s19, s21)
+ self.addLink(s19, s22)
+ self.addLink(s19, s23)
+ self.addLink(s19, s24)
+ self.addLink(s19, s25)
+ self.addLink(s20, s21)
+ self.addLink(s20, s22)
+ self.addLink(s20, s23)
+ self.addLink(s20, s24)
+ self.addLink(s20, s25)
+ self.addLink(s21, s22)
+ self.addLink(s21, s23)
+ self.addLink(s21, s24)
+ self.addLink(s21, s25)
+ self.addLink(s22, s23)
+ self.addLink(s22, s24)
+ self.addLink(s22, s25)
+ self.addLink(s23, s24)
+ self.addLink(s23, s25)
+ self.addLink(s24, s25)
+
+topos = { 'chordal': ( lambda: chordalTopo() ) }
+
+# HERE THE CODE DEFINITION OF THE TOPOLOGY ENDS
+
+def setupNetwork():
+ "Create network"
+ topo = chordalTopo()
+ #if controller_ip == '':
+ #controller_ip = '10.0.2.2';
+ # controller_ip = '127.0.0.1';
+ network = Mininet(topo=topo, switch=OVSSwitch,autoSetMacs=True, controller=None)
+ network.start()
+ CLI( network )
+ network.stop()
+
+if __name__ == '__main__':
+ setLogLevel('info')
+ #setLogLevel('debug')
+ setupNetwork()
diff --git a/TestON/tests/CHOTestMonkey/dependencies/topologies/topoRingIpv6.py b/TestON/tests/CHOTestMonkey/dependencies/topologies/topoRingIpv6.py
new file mode 100755
index 0000000..4b81223
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/topologies/topoRingIpv6.py
@@ -0,0 +1,93 @@
+#!/usr/bin/python
+"""
+"""
+from mininet.topo import Topo
+from mininet.net import Mininet
+from mininet.node import Host, RemoteController
+from mininet.node import Node
+from mininet.node import CPULimitedHost
+from mininet.link import TCLink
+from mininet.cli import CLI
+from mininet.log import setLogLevel
+from mininet.util import dumpNodeConnections
+from mininet.node import ( UserSwitch, OVSSwitch, IVSSwitch )
+
+class dualStackHost( Host ):
+ def config( self, v6Addr='1000::1/64', **params ):
+ r = super( Host, self ).config( **params )
+ intf = self.defaultIntf()
+ self.cmd( 'ip -6 addr add %s dev %s' % ( v6Addr, intf ) )
+ return r
+
+class ringTopo( Topo ):
+
+ def __init__( self, **opts ):
+ "Create a topology."
+
+ # Initialize Topology
+ Topo.__init__( self, **opts )
+
+ s1 = self.addSwitch( 's1' )
+ s2 = self.addSwitch( 's2' )
+ s3 = self.addSwitch( 's3' )
+ s4 = self.addSwitch( 's4' )
+ s5 = self.addSwitch( 's5' )
+ s6 = self.addSwitch( 's6' )
+ s7 = self.addSwitch( 's7' )
+ s8 = self.addSwitch( 's8' )
+ s9 = self.addSwitch( 's9' )
+ s10 = self.addSwitch( 's10' )
+
+ # ... and now hosts
+ s1_host = self.addHost( 'h1', ip='10.1.0.1/24', cls=dualStackHost, v6Addr='1000::1/64' )
+ s2_host = self.addHost( 'h2', ip='10.1.0.2/24', cls=dualStackHost, v6Addr='1000::2/64' )
+ s3_host = self.addHost( 'h3', ip='10.1.0.3/24', cls=dualStackHost, v6Addr='1000::3/64' )
+ s4_host = self.addHost( 'h4', ip='10.1.0.4/24', cls=dualStackHost, v6Addr='1000::4/64' )
+ s5_host = self.addHost( 'h5', ip='10.1.0.5/24', cls=dualStackHost, v6Addr='1000::5/64' )
+ s6_host = self.addHost( 'h6', ip='10.1.0.6/24', cls=dualStackHost, v6Addr='1000::6/64' )
+ s7_host = self.addHost( 'h7', ip='10.1.0.7/24', cls=dualStackHost, v6Addr='1000::7/64' )
+ s8_host = self.addHost( 'h8', ip='10.1.0.8/24', cls=dualStackHost, v6Addr='1000::8/64' )
+ s9_host = self.addHost( 'h9', ip='10.1.0.9/24', cls=dualStackHost, v6Addr='1000::9/64' )
+ s10_host = self.addHost( 'h10', ip='10.1.0.10/24', cls=dualStackHost, v6Addr='1000::10/64' )
+
+ # add edges between switch and corresponding host
+ self.addLink( s1 , s1_host )
+ self.addLink( s2 , s2_host )
+ self.addLink( s3 , s3_host )
+ self.addLink( s4 , s4_host )
+ self.addLink( s5 , s5_host )
+ self.addLink( s6 , s6_host )
+ self.addLink( s7 , s7_host )
+ self.addLink( s8 , s8_host )
+ self.addLink( s9 , s9_host )
+ self.addLink( s10 , s10_host )
+ self.addLink(s1, s2)
+ self.addLink(s2, s3)
+ self.addLink(s3, s4)
+ self.addLink(s4, s5)
+ self.addLink(s5, s6)
+ self.addLink(s6, s7)
+ self.addLink(s7, s8)
+ self.addLink(s8, s9)
+ self.addLink(s9, s10)
+ self.addLink(s10, s1)
+
+topos = { 'ring': ( lambda: ringTopo() ) }
+
+# HERE THE CODE DEFINITION OF THE TOPOLOGY ENDS
+
+def setupNetwork():
+ "Create network"
+ topo = ringTopo()
+ #if controller_ip == '':
+ #controller_ip = '10.0.2.2';
+ # controller_ip = '127.0.0.1';
+ network = Mininet(topo=topo, switch=OVSSwitch, autoSetMacs=True, controller=None)
+ network.start()
+ CLI( network )
+ network.stop()
+
+if __name__ == '__main__':
+ setLogLevel('info')
+ #setLogLevel('debug')
+ setupNetwork()
diff --git a/TestON/tests/CHOTestMonkey/dependencies/topologies/topoSpine.py b/TestON/tests/CHOTestMonkey/dependencies/topologies/topoSpine.py
new file mode 100755
index 0000000..5787d93
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/topologies/topoSpine.py
@@ -0,0 +1,434 @@
+#!/usr/bin/python
+
+from mininet.topo import Topo
+from mininet.net import Mininet
+from mininet.node import Controller, RemoteController, OVSController
+from mininet.node import CPULimitedHost, Host, Node
+from mininet.cli import CLI
+from mininet.log import setLogLevel, info
+from mininet.link import TCLink, Intf
+from subprocess import call
+from mininet.util import dumpNodeConnections
+from mininet.node import ( UserSwitch, OVSSwitch, IVSSwitch )
+
+class spineTopo( Topo ):
+
+ def __init__( self, **opts ):
+ "Create a topology."
+
+ # Initialize Topology
+ Topo.__init__( self, **opts )
+
+ # add nodes, Leaf switches
+ s1 = self.addSwitch( 's1' )
+ s2 = self.addSwitch( 's2' )
+ s3 = self.addSwitch( 's3' )
+ s4 = self.addSwitch( 's4' )
+ s5 = self.addSwitch( 's5' )
+ s6 = self.addSwitch( 's6' )
+ s7 = self.addSwitch( 's7' )
+ s8 = self.addSwitch( 's8' )
+ s9 = self.addSwitch( 's9' )
+ s10 = self.addSwitch( 's10' )
+ s11 = self.addSwitch( 's11' )
+ s12 = self.addSwitch( 's12' )
+ s13 = self.addSwitch( 's13' )
+ s14 = self.addSwitch( 's14' )
+
+ # add nodes, Spine switches first...
+ s15 = self.addSwitch( 's15' )
+ s16 = self.addSwitch( 's16' )
+ s17 = self.addSwitch( 's17' )
+ s18 = self.addSwitch( 's18' )
+ s19 = self.addSwitch( 's19' )
+ s20 = self.addSwitch( 's20' )
+ s21 = self.addSwitch( 's21' )
+ s22 = self.addSwitch( 's22' )
+ s23 = self.addSwitch( 's23' )
+ s24 = self.addSwitch( 's24' )
+ s25 = self.addSwitch( 's25' )
+ s26 = self.addSwitch( 's26' )
+ s27 = self.addSwitch( 's27' )
+ s28 = self.addSwitch( 's28' )
+ s29 = self.addSwitch( 's29' )
+ s30 = self.addSwitch( 's30' )
+ s31 = self.addSwitch( 's31' )
+ s32 = self.addSwitch( 's32' )
+ s33 = self.addSwitch( 's33' )
+ s34 = self.addSwitch( 's34' )
+ s35 = self.addSwitch( 's35' )
+ s36 = self.addSwitch( 's36' )
+ s37 = self.addSwitch( 's37' )
+ s38 = self.addSwitch( 's38' )
+ s39 = self.addSwitch( 's39' )
+ s40 = self.addSwitch( 's40' )
+ s41 = self.addSwitch( 's41' )
+ s42 = self.addSwitch( 's42' )
+ s43 = self.addSwitch( 's43' )
+ s44 = self.addSwitch( 's44' )
+ s45 = self.addSwitch( 's45' )
+ s46 = self.addSwitch( 's46' )
+ s47 = self.addSwitch( 's47' )
+ s48 = self.addSwitch( 's48' )
+ s49 = self.addSwitch( 's49' )
+ s50 = self.addSwitch( 's50' )
+ s51 = self.addSwitch( 's51' )
+ s52 = self.addSwitch( 's52' )
+ s53 = self.addSwitch( 's53' )
+ s54 = self.addSwitch( 's54' )
+ s55 = self.addSwitch( 's55' )
+ s56 = self.addSwitch( 's56' )
+ s57 = self.addSwitch( 's57' )
+ s58 = self.addSwitch( 's58' )
+ s59 = self.addSwitch( 's59' )
+ s60 = self.addSwitch( 's60' )
+ s61 = self.addSwitch( 's61' )
+ s62 = self.addSwitch( 's62' )
+ s63 = self.addSwitch( 's63' )
+ s64 = self.addSwitch( 's64' )
+ s65 = self.addSwitch( 's65' )
+ s66 = self.addSwitch( 's66' )
+ s67 = self.addSwitch( 's67' )
+ s68 = self.addSwitch( 's68' )
+ s69 = self.addSwitch( 's69' )
+ s70 = self.addSwitch( 's70' )
+ s71 = self.addSwitch( 's71' )
+ s72 = self.addSwitch( 's72' )
+ s73 = self.addSwitch( 's73' )
+ s74 = self.addSwitch( 's74' )
+ s75 = self.addSwitch( 's75' )
+ s76 = self.addSwitch( 's76' )
+ s77 = self.addSwitch( 's77' )
+ s78 = self.addSwitch( 's78' )
+
+ # ... and now hosts
+ #s1_host = self.addHost( 'h1' )
+ #s2_host = self.addHost( 'h2' )
+ #s3_host = self.addHost( 'h3' )
+ #s4_host = self.addHost( 'h4' )
+ #s5_host = self.addHost( 'h5' )
+ #s6_host = self.addHost( 'h6' )
+ #s7_host = self.addHost( 'h7' )
+ #s8_host = self.addHost( 'h8' )
+ #s9_host = self.addHost( 'h9' )
+ #s10_host = self.addHost( 'h10' )
+ s11_host = self.addHost( 'h11' )
+ s12_host = self.addHost( 'h12' )
+ s13_host = self.addHost( 'h13' )
+ s14_host = self.addHost( 'h14' )
+ s15_host = self.addHost( 'h15' )
+ s16_host = self.addHost( 'h16' )
+ s17_host = self.addHost( 'h17' )
+ s18_host = self.addHost( 'h18' )
+ s19_host = self.addHost( 'h19' )
+ s20_host = self.addHost( 'h20' )
+ s21_host = self.addHost( 'h21' )
+ s22_host = self.addHost( 'h22' )
+ s23_host = self.addHost( 'h23' )
+ s24_host = self.addHost( 'h24' )
+ s25_host = self.addHost( 'h25' )
+ s26_host = self.addHost( 'h26' )
+ s27_host = self.addHost( 'h27' )
+ s28_host = self.addHost( 'h28' )
+ s29_host = self.addHost( 'h29' )
+ s30_host = self.addHost( 'h30' )
+ s31_host = self.addHost( 'h31' )
+ s32_host = self.addHost( 'h32' )
+ s33_host = self.addHost( 'h33' )
+ s34_host = self.addHost( 'h34' )
+ s35_host = self.addHost( 'h35' )
+ s36_host = self.addHost( 'h36' )
+ s37_host = self.addHost( 'h37' )
+ s38_host = self.addHost( 'h38' )
+ s39_host = self.addHost( 'h39' )
+ s40_host = self.addHost( 'h40' )
+ s41_host = self.addHost( 'h41' )
+ s42_host = self.addHost( 'h42' )
+ s43_host = self.addHost( 'h43' )
+ s44_host = self.addHost( 'h44' )
+ s45_host = self.addHost( 'h45' )
+ s46_host = self.addHost( 'h46' )
+ s47_host = self.addHost( 'h47' )
+ s48_host = self.addHost( 'h48' )
+ s49_host = self.addHost( 'h49' )
+ s50_host = self.addHost( 'h50' )
+ s51_host = self.addHost( 'h51' )
+ s52_host = self.addHost( 'h52' )
+ s53_host = self.addHost( 'h53' )
+ s54_host = self.addHost( 'h54' )
+ s55_host = self.addHost( 'h55' )
+ s56_host = self.addHost( 'h56' )
+ s57_host = self.addHost( 'h57' )
+ s58_host = self.addHost( 'h58' )
+ s59_host = self.addHost( 'h59' )
+ s60_host = self.addHost( 'h60' )
+ s61_host = self.addHost( 'h61' )
+ s62_host = self.addHost( 'h62' )
+ s63_host = self.addHost( 'h63' )
+ s64_host = self.addHost( 'h64' )
+ s65_host = self.addHost( 'h65' )
+ s66_host = self.addHost( 'h66' )
+ s67_host = self.addHost( 'h67' )
+ s68_host = self.addHost( 'h68' )
+ s69_host = self.addHost( 'h69' )
+ s70_host = self.addHost( 'h70' )
+ s71_host = self.addHost( 'h71' )
+ s72_host = self.addHost( 'h72' )
+ s73_host = self.addHost( 'h73' )
+ s74_host = self.addHost( 'h74' )
+ s75_host = self.addHost( 'h75' )
+ s76_host = self.addHost( 'h76' )
+ s77_host = self.addHost( 'h77' )
+ s78_host = self.addHost( 'h78' )
+
+ # add edges between switch and corresponding host
+ #self.addLink( s1 , s1_host )
+ #self.addLink( s2 , s2_host )
+ #self.addLink( s3 , s3_host )
+ #self.addLink( s4 , s4_host )
+ #self.addLink( s5 , s5_host )
+ #self.addLink( s6 , s6_host )
+ #self.addLink( s7 , s7_host )
+ #self.addLink( s8 , s8_host )
+ #self.addLink( s9 , s9_host )
+ #self.addLink( s10 , s10_host )
+ self.addLink( s11 , s11_host )
+ self.addLink( s12 , s12_host )
+ self.addLink( s13 , s13_host )
+ self.addLink( s14 , s14_host )
+ self.addLink( s15 , s15_host )
+ self.addLink( s16 , s16_host )
+ self.addLink( s17 , s17_host )
+ self.addLink( s18 , s18_host )
+ self.addLink( s19 , s19_host )
+ self.addLink( s20 , s20_host )
+ self.addLink( s21 , s21_host )
+ self.addLink( s22 , s22_host )
+ self.addLink( s23 , s23_host )
+ self.addLink( s24 , s24_host )
+ self.addLink( s25 , s25_host )
+ self.addLink( s26 , s26_host )
+ self.addLink( s27 , s27_host )
+ self.addLink( s28 , s28_host )
+ self.addLink( s29 , s29_host )
+ self.addLink( s30 , s30_host )
+ self.addLink( s31 , s31_host )
+ self.addLink( s32 , s32_host )
+ self.addLink( s33 , s33_host )
+ self.addLink( s34 , s34_host )
+ self.addLink( s35 , s35_host )
+ self.addLink( s36 , s36_host )
+ self.addLink( s37 , s37_host )
+ self.addLink( s38 , s38_host )
+ self.addLink( s39 , s39_host )
+ self.addLink( s40 , s40_host )
+ self.addLink( s41 , s41_host )
+ self.addLink( s42 , s42_host )
+ self.addLink( s43 , s43_host )
+ self.addLink( s44 , s44_host )
+ self.addLink( s45 , s45_host )
+ self.addLink( s46 , s46_host )
+ self.addLink( s47 , s47_host )
+ self.addLink( s48 , s48_host )
+ self.addLink( s49 , s49_host )
+ self.addLink( s50 , s50_host )
+ self.addLink( s51 , s51_host )
+ self.addLink( s52 , s52_host )
+ self.addLink( s53 , s53_host )
+ self.addLink( s54 , s54_host )
+ self.addLink( s55 , s55_host )
+ self.addLink( s56 , s56_host )
+ self.addLink( s57 , s57_host )
+ self.addLink( s58 , s58_host )
+ self.addLink( s59 , s59_host )
+ self.addLink( s60 , s60_host )
+ self.addLink( s61 , s61_host )
+ self.addLink( s62 , s62_host )
+ self.addLink( s63 , s63_host )
+ self.addLink( s64 , s64_host )
+ self.addLink( s65 , s65_host )
+ self.addLink( s66 , s66_host )
+ self.addLink( s67 , s67_host )
+ self.addLink( s68 , s68_host )
+ self.addLink( s69 , s69_host )
+ self.addLink( s70 , s70_host )
+ self.addLink( s71 , s71_host )
+ self.addLink( s72 , s72_host )
+ self.addLink( s73 , s73_host )
+ self.addLink( s74 , s74_host )
+ self.addLink( s75 , s75_host )
+ self.addLink( s76 , s76_host )
+ self.addLink( s77 , s77_host )
+ self.addLink( s78 , s78_host )
+
+ #info( '*** Add Leaf links\n')
+ self.addLink(s1, s9)
+ self.addLink(s2, s10)
+ self.addLink(s3, s9)
+ self.addLink(s4, s10)
+ self.addLink(s5, s9)
+ self.addLink(s6, s10)
+ self.addLink(s7, s9)
+ self.addLink(s8, s10)
+ self.addLink(s9, s11)
+ self.addLink(s9, s12)
+ self.addLink(s10, s13)
+ self.addLink(s10, s14)
+ self.addLink(s11, s12)
+ self.addLink(s13, s14)
+
+ #info( '*** Add Spine-1 links\n')
+ self.addLink(s15, s1)
+ self.addLink(s15, s2)
+ self.addLink(s16, s1)
+ self.addLink(s16, s2)
+ self.addLink(s17, s1)
+ self.addLink(s17, s2)
+ self.addLink(s18, s1)
+ self.addLink(s18, s2)
+ self.addLink(s19, s1)
+ self.addLink(s19, s2)
+ self.addLink(s20, s1)
+ self.addLink(s20, s2)
+ self.addLink(s21, s1)
+ self.addLink(s21, s2)
+ self.addLink(s22, s1)
+ self.addLink(s22, s2)
+ self.addLink(s23, s1)
+ self.addLink(s23, s2)
+ self.addLink(s24, s1)
+ self.addLink(s24, s2)
+ self.addLink(s25, s1)
+ self.addLink(s25, s2)
+ self.addLink(s26, s1)
+ self.addLink(s26, s2)
+ self.addLink(s27, s1)
+ self.addLink(s27, s2)
+ self.addLink(s28, s1)
+ self.addLink(s28, s2)
+ self.addLink(s29, s1)
+ self.addLink(s29, s2)
+ self.addLink(s30, s1)
+ self.addLink(s30, s2)
+
+ #info( '*** Add Spine-2 links\n')
+ self.addLink(s31, s3)
+ self.addLink(s31, s4)
+ self.addLink(s32, s3)
+ self.addLink(s32, s4)
+ self.addLink(s33, s3)
+ self.addLink(s33, s4)
+ self.addLink(s34, s3)
+ self.addLink(s34, s4)
+ self.addLink(s35, s3)
+ self.addLink(s35, s4)
+ self.addLink(s36, s3)
+ self.addLink(s36, s4)
+ self.addLink(s37, s3)
+ self.addLink(s37, s4)
+ self.addLink(s38, s3)
+ self.addLink(s38, s4)
+ self.addLink(s39, s3)
+ self.addLink(s39, s4)
+ self.addLink(s40, s3)
+ self.addLink(s40, s4)
+ self.addLink(s41, s3)
+ self.addLink(s41, s4)
+ self.addLink(s42, s3)
+ self.addLink(s42, s4)
+ self.addLink(s43, s3)
+ self.addLink(s43, s4)
+ self.addLink(s44, s3)
+ self.addLink(s44, s4)
+ self.addLink(s45, s3)
+ self.addLink(s45, s4)
+ self.addLink(s46, s3)
+ self.addLink(s46, s4)
+
+ #info( '*** Add Spine-3 links\n')
+ self.addLink(s47, s5)
+ self.addLink(s47, s6)
+ self.addLink(s48, s5)
+ self.addLink(s48, s6)
+ self.addLink(s49, s5)
+ self.addLink(s49, s6)
+ self.addLink(s50, s5)
+ self.addLink(s50, s6)
+ self.addLink(s51, s5)
+ self.addLink(s51, s6)
+ self.addLink(s52, s5)
+ self.addLink(s52, s6)
+ self.addLink(s53, s5)
+ self.addLink(s53, s6)
+ self.addLink(s54, s5)
+ self.addLink(s54, s6)
+ self.addLink(s55, s5)
+ self.addLink(s55, s6)
+ self.addLink(s56, s5)
+ self.addLink(s56, s6)
+ self.addLink(s57, s5)
+ self.addLink(s57, s6)
+ self.addLink(s58, s5)
+ self.addLink(s58, s6)
+ self.addLink(s59, s5)
+ self.addLink(s59, s6)
+ self.addLink(s60, s5)
+ self.addLink(s60, s6)
+ self.addLink(s61, s5)
+ self.addLink(s61, s6)
+ self.addLink(s62, s5)
+ self.addLink(s62, s6)
+
+ #info( '*** Add Spine-4 links\n')
+ self.addLink(s63, s7)
+ self.addLink(s63, s8)
+ self.addLink(s64, s7)
+ self.addLink(s64, s8)
+ self.addLink(s65, s7)
+ self.addLink(s65, s8)
+ self.addLink(s66, s7)
+ self.addLink(s66, s8)
+ self.addLink(s67, s7)
+ self.addLink(s67, s8)
+ self.addLink(s68, s7)
+ self.addLink(s68, s8)
+ self.addLink(s69, s7)
+ self.addLink(s69, s8)
+ self.addLink(s70, s7)
+ self.addLink(s70, s8)
+ self.addLink(s71, s7)
+ self.addLink(s71, s8)
+ self.addLink(s72, s7)
+ self.addLink(s72, s8)
+ self.addLink(s73, s7)
+ self.addLink(s73, s8)
+ self.addLink(s74, s7)
+ self.addLink(s74, s8)
+ self.addLink(s75, s7)
+ self.addLink(s75, s8)
+ self.addLink(s76, s7)
+ self.addLink(s76, s8)
+ self.addLink(s77, s7)
+ self.addLink(s77, s8)
+ self.addLink(s78, s7)
+ self.addLink(s78, s8)
+
+topos = { 'spine': ( lambda: spineTopo() ) }
+
+# HERE THE CODE DEFINITION OF THE TOPOLOGY ENDS
+
+def setupNetwork():
+ "Create network"
+ topo = spineTopo()
+ #if controller_ip == '':
+ #controller_ip = '10.0.2.2';
+ # controller_ip = '127.0.0.1';
+ network = Mininet(topo=topo, switch=OVSSwitch, link=TCLink, autoSetMacs = True, controller=None)
+ network.start()
+ CLI( network )
+ network.stop()
+
+if __name__ == '__main__':
+ setLogLevel('info')
+ #setLogLevel('debug')
+ setupNetwork()
diff --git a/TestON/tests/CHOTestMonkey/dependencies/topologies/topoSpineIpv6.py b/TestON/tests/CHOTestMonkey/dependencies/topologies/topoSpineIpv6.py
new file mode 100755
index 0000000..3f35494
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/topologies/topoSpineIpv6.py
@@ -0,0 +1,441 @@
+#!/usr/bin/python
+from mininet.topo import Topo
+from mininet.net import Mininet
+from mininet.node import Host, RemoteController
+from mininet.node import Node
+from mininet.node import CPULimitedHost
+from mininet.link import TCLink
+from mininet.cli import CLI
+from mininet.log import setLogLevel
+from mininet.util import dumpNodeConnections
+from mininet.node import ( UserSwitch, OVSSwitch, IVSSwitch )
+
+class dualStackHost( Host ):
+ def config( self, v6Addr='1000::1/64', **params ):
+ r = super( Host, self ).config( **params )
+ intf = self.defaultIntf()
+ self.cmd( 'ip -6 addr add %s dev %s' % ( v6Addr, intf ) )
+ return r
+
+class spineTopo( Topo ):
+
+ def __init__( self, **opts ):
+ "Create a topology."
+
+ # Initialize Topology
+ Topo.__init__( self, **opts )
+
+ # add nodes, Leaf switches
+ s1 = self.addSwitch( 's1' )
+ s2 = self.addSwitch( 's2' )
+ s3 = self.addSwitch( 's3' )
+ s4 = self.addSwitch( 's4' )
+ s5 = self.addSwitch( 's5' )
+ s6 = self.addSwitch( 's6' )
+ s7 = self.addSwitch( 's7' )
+ s8 = self.addSwitch( 's8' )
+ s9 = self.addSwitch( 's9' )
+ s10 = self.addSwitch( 's10' )
+ s11 = self.addSwitch( 's11' )
+ s12 = self.addSwitch( 's12' )
+ s13 = self.addSwitch( 's13' )
+ s14 = self.addSwitch( 's14' )
+
+ # add nodes, Spine switches first...
+ s15 = self.addSwitch( 's15' )
+ s16 = self.addSwitch( 's16' )
+ s17 = self.addSwitch( 's17' )
+ s18 = self.addSwitch( 's18' )
+ s19 = self.addSwitch( 's19' )
+ s20 = self.addSwitch( 's20' )
+ s21 = self.addSwitch( 's21' )
+ s22 = self.addSwitch( 's22' )
+ s23 = self.addSwitch( 's23' )
+ s24 = self.addSwitch( 's24' )
+ s25 = self.addSwitch( 's25' )
+ s26 = self.addSwitch( 's26' )
+ s27 = self.addSwitch( 's27' )
+ s28 = self.addSwitch( 's28' )
+ s29 = self.addSwitch( 's29' )
+ s30 = self.addSwitch( 's30' )
+ s31 = self.addSwitch( 's31' )
+ s32 = self.addSwitch( 's32' )
+ s33 = self.addSwitch( 's33' )
+ s34 = self.addSwitch( 's34' )
+ s35 = self.addSwitch( 's35' )
+ s36 = self.addSwitch( 's36' )
+ s37 = self.addSwitch( 's37' )
+ s38 = self.addSwitch( 's38' )
+ s39 = self.addSwitch( 's39' )
+ s40 = self.addSwitch( 's40' )
+ s41 = self.addSwitch( 's41' )
+ s42 = self.addSwitch( 's42' )
+ s43 = self.addSwitch( 's43' )
+ s44 = self.addSwitch( 's44' )
+ s45 = self.addSwitch( 's45' )
+ s46 = self.addSwitch( 's46' )
+ s47 = self.addSwitch( 's47' )
+ s48 = self.addSwitch( 's48' )
+ s49 = self.addSwitch( 's49' )
+ s50 = self.addSwitch( 's50' )
+ s51 = self.addSwitch( 's51' )
+ s52 = self.addSwitch( 's52' )
+ s53 = self.addSwitch( 's53' )
+ s54 = self.addSwitch( 's54' )
+ s55 = self.addSwitch( 's55' )
+ s56 = self.addSwitch( 's56' )
+ s57 = self.addSwitch( 's57' )
+ s58 = self.addSwitch( 's58' )
+ s59 = self.addSwitch( 's59' )
+ s60 = self.addSwitch( 's60' )
+ s61 = self.addSwitch( 's61' )
+ s62 = self.addSwitch( 's62' )
+ s63 = self.addSwitch( 's63' )
+ s64 = self.addSwitch( 's64' )
+ s65 = self.addSwitch( 's65' )
+ s66 = self.addSwitch( 's66' )
+ s67 = self.addSwitch( 's67' )
+ s68 = self.addSwitch( 's68' )
+ s69 = self.addSwitch( 's69' )
+ s70 = self.addSwitch( 's70' )
+ s71 = self.addSwitch( 's71' )
+ s72 = self.addSwitch( 's72' )
+ s73 = self.addSwitch( 's73' )
+ s74 = self.addSwitch( 's74' )
+ s75 = self.addSwitch( 's75' )
+ s76 = self.addSwitch( 's76' )
+ s77 = self.addSwitch( 's77' )
+ s78 = self.addSwitch( 's78' )
+
+
+ # ... and now hosts
+ # s1_host = self.addHost( 'h1', ip='10.1.0.1/24', cls=dualStackHost, v6Addr='1000::1/64' )
+ # s2_host = self.addHost( 'h2', ip='10.1.0.2/24', cls=dualStackHost, v6Addr='1000::2/64' )
+ # s3_host = self.addHost( 'h3', ip='10.1.0.3/24', cls=dualStackHost, v6Addr='1000::3/64' )
+ # s4_host = self.addHost( 'h4', ip='10.1.0.4/24', cls=dualStackHost, v6Addr='1000::4/64' )
+ # s5_host = self.addHost( 'h5', ip='10.1.0.5/24', cls=dualStackHost, v6Addr='1000::5/64' )
+ # s6_host = self.addHost( 'h6', ip='10.1.0.6/24', cls=dualStackHost, v6Addr='1000::6/64' )
+ # s7_host = self.addHost( 'h7', ip='10.1.0.7/24', cls=dualStackHost, v6Addr='1000::7/64' )
+ # s8_host = self.addHost( 'h8', ip='10.1.0.8/24', cls=dualStackHost, v6Addr='1000::8/64' )
+ # s9_host = self.addHost( 'h9', ip='10.1.0.9/24', cls=dualStackHost, v6Addr='1000::9/64' )
+ # s10_host = self.addHost( 'h10', ip='10.1.0.10/24', cls=dualStackHost, v6Addr='1000::10/64' )
+ s11_host = self.addHost( 'h11', ip='10.1.0.11/24', cls=dualStackHost, v6Addr='1000::11/64' )
+ s12_host = self.addHost( 'h12', ip='10.1.0.12/24', cls=dualStackHost, v6Addr='1000::12/64' )
+ s13_host = self.addHost( 'h13', ip='10.1.0.13/24', cls=dualStackHost, v6Addr='1000::13/64' )
+ s14_host = self.addHost( 'h14', ip='10.1.0.14/24', cls=dualStackHost, v6Addr='1000::14/64' )
+ s15_host = self.addHost( 'h15', ip='10.1.0.15/24', cls=dualStackHost, v6Addr='1000::15/64' )
+ s16_host = self.addHost( 'h16', ip='10.1.0.16/24', cls=dualStackHost, v6Addr='1000::16/64' )
+ s17_host = self.addHost( 'h17', ip='10.1.0.17/24', cls=dualStackHost, v6Addr='1000::17/64' )
+ s18_host = self.addHost( 'h18', ip='10.1.0.18/24', cls=dualStackHost, v6Addr='1000::18/64' )
+ s19_host = self.addHost( 'h19', ip='10.1.0.19/24', cls=dualStackHost, v6Addr='1000::19/64' )
+ s20_host = self.addHost( 'h20', ip='10.1.0.20/24', cls=dualStackHost, v6Addr='1000::20/64' )
+ s21_host = self.addHost( 'h21', ip='10.1.0.21/24', cls=dualStackHost, v6Addr='1000::21/64' )
+ s22_host = self.addHost( 'h22', ip='10.1.0.22/24', cls=dualStackHost, v6Addr='1000::22/64' )
+ s23_host = self.addHost( 'h23', ip='10.1.0.23/24', cls=dualStackHost, v6Addr='1000::23/64' )
+ s24_host = self.addHost( 'h24', ip='10.1.0.24/24', cls=dualStackHost, v6Addr='1000::24/64' )
+ s25_host = self.addHost( 'h25', ip='10.1.0.25/24', cls=dualStackHost, v6Addr='1000::25/64' )
+ s26_host = self.addHost( 'h26', ip='10.1.0.26/24', cls=dualStackHost, v6Addr='1000::26/64' )
+ s27_host = self.addHost( 'h27', ip='10.1.0.27/24', cls=dualStackHost, v6Addr='1000::27/64' )
+ s28_host = self.addHost( 'h28', ip='10.1.0.28/24', cls=dualStackHost, v6Addr='1000::28/64' )
+ s29_host = self.addHost( 'h29', ip='10.1.0.29/24', cls=dualStackHost, v6Addr='1000::29/64' )
+ s30_host = self.addHost( 'h30', ip='10.1.0.30/24', cls=dualStackHost, v6Addr='1000::30/64' )
+ s31_host = self.addHost( 'h31', ip='10.1.0.31/24', cls=dualStackHost, v6Addr='1000::31/64' )
+ s32_host = self.addHost( 'h32', ip='10.1.0.32/24', cls=dualStackHost, v6Addr='1000::32/64' )
+ s33_host = self.addHost( 'h33', ip='10.1.0.33/24', cls=dualStackHost, v6Addr='1000::33/64' )
+ s34_host = self.addHost( 'h34', ip='10.1.0.34/24', cls=dualStackHost, v6Addr='1000::34/64' )
+ s35_host = self.addHost( 'h35', ip='10.1.0.35/24', cls=dualStackHost, v6Addr='1000::35/64' )
+ s36_host = self.addHost( 'h36', ip='10.1.0.36/24', cls=dualStackHost, v6Addr='1000::36/64' )
+ s37_host = self.addHost( 'h37', ip='10.1.0.37/24', cls=dualStackHost, v6Addr='1000::37/64' )
+ s38_host = self.addHost( 'h38', ip='10.1.0.38/24', cls=dualStackHost, v6Addr='1000::38/64' )
+ s39_host = self.addHost( 'h39', ip='10.1.0.39/24', cls=dualStackHost, v6Addr='1000::39/64' )
+ s40_host = self.addHost( 'h40', ip='10.1.0.40/24', cls=dualStackHost, v6Addr='1000::40/64' )
+ s41_host = self.addHost( 'h41', ip='10.1.0.41/24', cls=dualStackHost, v6Addr='1000::41/64' )
+ s42_host = self.addHost( 'h42', ip='10.1.0.42/24', cls=dualStackHost, v6Addr='1000::42/64' )
+ s43_host = self.addHost( 'h43', ip='10.1.0.43/24', cls=dualStackHost, v6Addr='1000::43/64' )
+ s44_host = self.addHost( 'h44', ip='10.1.0.44/24', cls=dualStackHost, v6Addr='1000::44/64' )
+ s45_host = self.addHost( 'h45', ip='10.1.0.45/24', cls=dualStackHost, v6Addr='1000::45/64' )
+ s46_host = self.addHost( 'h46', ip='10.1.0.46/24', cls=dualStackHost, v6Addr='1000::46/64' )
+ s47_host = self.addHost( 'h47', ip='10.1.0.47/24', cls=dualStackHost, v6Addr='1000::47/64' )
+ s48_host = self.addHost( 'h48', ip='10.1.0.48/24', cls=dualStackHost, v6Addr='1000::48/64' )
+ s49_host = self.addHost( 'h49', ip='10.1.0.49/24', cls=dualStackHost, v6Addr='1000::49/64' )
+ s50_host = self.addHost( 'h50', ip='10.1.0.50/24', cls=dualStackHost, v6Addr='1000::50/64' )
+ s51_host = self.addHost( 'h51', ip='10.1.0.51/24', cls=dualStackHost, v6Addr='1000::51/64' )
+ s52_host = self.addHost( 'h52', ip='10.1.0.52/24', cls=dualStackHost, v6Addr='1000::52/64' )
+ s53_host = self.addHost( 'h53', ip='10.1.0.53/24', cls=dualStackHost, v6Addr='1000::53/64' )
+ s54_host = self.addHost( 'h54', ip='10.1.0.54/24', cls=dualStackHost, v6Addr='1000::54/64' )
+ s55_host = self.addHost( 'h55', ip='10.1.0.55/24', cls=dualStackHost, v6Addr='1000::55/64' )
+ s56_host = self.addHost( 'h56', ip='10.1.0.56/24', cls=dualStackHost, v6Addr='1000::56/64' )
+ s57_host = self.addHost( 'h57', ip='10.1.0.57/24', cls=dualStackHost, v6Addr='1000::57/64' )
+ s58_host = self.addHost( 'h58', ip='10.1.0.58/24', cls=dualStackHost, v6Addr='1000::58/64' )
+ s59_host = self.addHost( 'h59', ip='10.1.0.59/24', cls=dualStackHost, v6Addr='1000::59/64' )
+ s60_host = self.addHost( 'h60', ip='10.1.0.60/24', cls=dualStackHost, v6Addr='1000::60/64' )
+ s61_host = self.addHost( 'h61', ip='10.1.0.61/24', cls=dualStackHost, v6Addr='1000::61/64' )
+ s62_host = self.addHost( 'h62', ip='10.1.0.62/24', cls=dualStackHost, v6Addr='1000::62/64' )
+ s63_host = self.addHost( 'h63', ip='10.1.0.63/24', cls=dualStackHost, v6Addr='1000::63/64' )
+ s64_host = self.addHost( 'h64', ip='10.1.0.64/24', cls=dualStackHost, v6Addr='1000::64/64' )
+ s65_host = self.addHost( 'h65', ip='10.1.0.65/24', cls=dualStackHost, v6Addr='1000::65/64' )
+ s66_host = self.addHost( 'h66', ip='10.1.0.66/24', cls=dualStackHost, v6Addr='1000::66/64' )
+ s67_host = self.addHost( 'h67', ip='10.1.0.67/24', cls=dualStackHost, v6Addr='1000::67/64' )
+ s68_host = self.addHost( 'h68', ip='10.1.0.68/24', cls=dualStackHost, v6Addr='1000::68/64' )
+ s69_host = self.addHost( 'h69', ip='10.1.0.69/24', cls=dualStackHost, v6Addr='1000::69/64' )
+ s70_host = self.addHost( 'h70', ip='10.1.0.70/24', cls=dualStackHost, v6Addr='1000::70/64' )
+ s71_host = self.addHost( 'h71', ip='10.1.0.71/24', cls=dualStackHost, v6Addr='1000::71/64' )
+ s72_host = self.addHost( 'h72', ip='10.1.0.72/24', cls=dualStackHost, v6Addr='1000::72/64' )
+ s73_host = self.addHost( 'h73', ip='10.1.0.73/24', cls=dualStackHost, v6Addr='1000::73/64' )
+ s74_host = self.addHost( 'h74', ip='10.1.0.74/24', cls=dualStackHost, v6Addr='1000::74/64' )
+ s75_host = self.addHost( 'h75', ip='10.1.0.75/24', cls=dualStackHost, v6Addr='1000::75/64' )
+ s76_host = self.addHost( 'h76', ip='10.1.0.76/24', cls=dualStackHost, v6Addr='1000::76/64' )
+ s77_host = self.addHost( 'h77', ip='10.1.0.77/24', cls=dualStackHost, v6Addr='1000::77/64' )
+ s78_host = self.addHost( 'h78', ip='10.1.0.78/24', cls=dualStackHost, v6Addr='1000::78/64' )
+
+ # add edges between switch and corresponding host
+ #self.addLink( s1 , s1_host )
+ #self.addLink( s2 , s2_host )
+ #self.addLink( s3 , s3_host )
+ #self.addLink( s4 , s4_host )
+ #self.addLink( s5 , s5_host )
+ #self.addLink( s6 , s6_host )
+ #self.addLink( s7 , s7_host )
+ #self.addLink( s8 , s8_host )
+ #self.addLink( s9 , s9_host )
+ #self.addLink( s10 , s10_host )
+ self.addLink( s11 , s11_host )
+ self.addLink( s12 , s12_host )
+ self.addLink( s13 , s13_host )
+ self.addLink( s14 , s14_host )
+ self.addLink( s15 , s15_host )
+ self.addLink( s16 , s16_host )
+ self.addLink( s17 , s17_host )
+ self.addLink( s18 , s18_host )
+ self.addLink( s19 , s19_host )
+ self.addLink( s20 , s20_host )
+ self.addLink( s21 , s21_host )
+ self.addLink( s22 , s22_host )
+ self.addLink( s23 , s23_host )
+ self.addLink( s24 , s24_host )
+ self.addLink( s25 , s25_host )
+ self.addLink( s26 , s26_host )
+ self.addLink( s27 , s27_host )
+ self.addLink( s28 , s28_host )
+ self.addLink( s29 , s29_host )
+ self.addLink( s30 , s30_host )
+ self.addLink( s31 , s31_host )
+ self.addLink( s32 , s32_host )
+ self.addLink( s33 , s33_host )
+ self.addLink( s34 , s34_host )
+ self.addLink( s35 , s35_host )
+ self.addLink( s36 , s36_host )
+ self.addLink( s37 , s37_host )
+ self.addLink( s38 , s38_host )
+ self.addLink( s39 , s39_host )
+ self.addLink( s40 , s40_host )
+ self.addLink( s41 , s41_host )
+ self.addLink( s42 , s42_host )
+ self.addLink( s43 , s43_host )
+ self.addLink( s44 , s44_host )
+ self.addLink( s45 , s45_host )
+ self.addLink( s46 , s46_host )
+ self.addLink( s47 , s47_host )
+ self.addLink( s48 , s48_host )
+ self.addLink( s49 , s49_host )
+ self.addLink( s50 , s50_host )
+ self.addLink( s51 , s51_host )
+ self.addLink( s52 , s52_host )
+ self.addLink( s53 , s53_host )
+ self.addLink( s54 , s54_host )
+ self.addLink( s55 , s55_host )
+ self.addLink( s56 , s56_host )
+ self.addLink( s57 , s57_host )
+ self.addLink( s58 , s58_host )
+ self.addLink( s59 , s59_host )
+ self.addLink( s60 , s60_host )
+ self.addLink( s61 , s61_host )
+ self.addLink( s62 , s62_host )
+ self.addLink( s63 , s63_host )
+ self.addLink( s64 , s64_host )
+ self.addLink( s65 , s65_host )
+ self.addLink( s66 , s66_host )
+ self.addLink( s67 , s67_host )
+ self.addLink( s68 , s68_host )
+ self.addLink( s69 , s69_host )
+ self.addLink( s70 , s70_host )
+ self.addLink( s71 , s71_host )
+ self.addLink( s72 , s72_host )
+ self.addLink( s73 , s73_host )
+ self.addLink( s74 , s74_host )
+ self.addLink( s75 , s75_host )
+ self.addLink( s76 , s76_host )
+ self.addLink( s77 , s77_host )
+ self.addLink( s78 , s78_host )
+
+ #info( '*** Add Leaf links\n')
+ self.addLink(s1, s9)
+ self.addLink(s2, s10)
+ self.addLink(s3, s9)
+ self.addLink(s4, s10)
+ self.addLink(s5, s9)
+ self.addLink(s6, s10)
+ self.addLink(s7, s9)
+ self.addLink(s8, s10)
+ self.addLink(s9, s11)
+ self.addLink(s9, s12)
+ self.addLink(s10, s13)
+ self.addLink(s10, s14)
+ self.addLink(s11, s12)
+ self.addLink(s13, s14)
+
+ #info( '*** Add Spine-1 links\n')
+ self.addLink(s15, s1)
+ self.addLink(s15, s2)
+ self.addLink(s16, s1)
+ self.addLink(s16, s2)
+ self.addLink(s17, s1)
+ self.addLink(s17, s2)
+ self.addLink(s18, s1)
+ self.addLink(s18, s2)
+ self.addLink(s19, s1)
+ self.addLink(s19, s2)
+ self.addLink(s20, s1)
+ self.addLink(s20, s2)
+ self.addLink(s21, s1)
+ self.addLink(s21, s2)
+ self.addLink(s22, s1)
+ self.addLink(s22, s2)
+ self.addLink(s23, s1)
+ self.addLink(s23, s2)
+ self.addLink(s24, s1)
+ self.addLink(s24, s2)
+ self.addLink(s25, s1)
+ self.addLink(s25, s2)
+ self.addLink(s26, s1)
+ self.addLink(s26, s2)
+ self.addLink(s27, s1)
+ self.addLink(s27, s2)
+ self.addLink(s28, s1)
+ self.addLink(s28, s2)
+ self.addLink(s29, s1)
+ self.addLink(s29, s2)
+ self.addLink(s30, s1)
+ self.addLink(s30, s2)
+
+ #info( '*** Add Spine-2 links\n')
+ self.addLink(s31, s3)
+ self.addLink(s31, s4)
+ self.addLink(s32, s3)
+ self.addLink(s32, s4)
+ self.addLink(s33, s3)
+ self.addLink(s33, s4)
+ self.addLink(s34, s3)
+ self.addLink(s34, s4)
+ self.addLink(s35, s3)
+ self.addLink(s35, s4)
+ self.addLink(s36, s3)
+ self.addLink(s36, s4)
+ self.addLink(s37, s3)
+ self.addLink(s37, s4)
+ self.addLink(s38, s3)
+ self.addLink(s38, s4)
+ self.addLink(s39, s3)
+ self.addLink(s39, s4)
+ self.addLink(s40, s3)
+ self.addLink(s40, s4)
+ self.addLink(s41, s3)
+ self.addLink(s41, s4)
+ self.addLink(s42, s3)
+ self.addLink(s42, s4)
+ self.addLink(s43, s3)
+ self.addLink(s43, s4)
+ self.addLink(s44, s3)
+ self.addLink(s44, s4)
+ self.addLink(s45, s3)
+ self.addLink(s45, s4)
+ self.addLink(s46, s3)
+ self.addLink(s46, s4)
+
+ #info( '*** Add Spine-3 links\n')
+ self.addLink(s47, s5)
+ self.addLink(s47, s6)
+ self.addLink(s48, s5)
+ self.addLink(s48, s6)
+ self.addLink(s49, s5)
+ self.addLink(s49, s6)
+ self.addLink(s50, s5)
+ self.addLink(s50, s6)
+ self.addLink(s51, s5)
+ self.addLink(s51, s6)
+ self.addLink(s52, s5)
+ self.addLink(s52, s6)
+ self.addLink(s53, s5)
+ self.addLink(s53, s6)
+ self.addLink(s54, s5)
+ self.addLink(s54, s6)
+ self.addLink(s55, s5)
+ self.addLink(s55, s6)
+ self.addLink(s56, s5)
+ self.addLink(s56, s6)
+ self.addLink(s57, s5)
+ self.addLink(s57, s6)
+ self.addLink(s58, s5)
+ self.addLink(s58, s6)
+ self.addLink(s59, s5)
+ self.addLink(s59, s6)
+ self.addLink(s60, s5)
+ self.addLink(s60, s6)
+ self.addLink(s61, s5)
+ self.addLink(s61, s6)
+ self.addLink(s62, s5)
+ self.addLink(s62, s6)
+
+ #info( '*** Add Spine-4 links\n')
+ self.addLink(s63, s7)
+ self.addLink(s63, s8)
+ self.addLink(s64, s7)
+ self.addLink(s64, s8)
+ self.addLink(s65, s7)
+ self.addLink(s65, s8)
+ self.addLink(s66, s7)
+ self.addLink(s66, s8)
+ self.addLink(s67, s7)
+ self.addLink(s67, s8)
+ self.addLink(s68, s7)
+ self.addLink(s68, s8)
+ self.addLink(s69, s7)
+ self.addLink(s69, s8)
+ self.addLink(s70, s7)
+ self.addLink(s70, s8)
+ self.addLink(s71, s7)
+ self.addLink(s71, s8)
+ self.addLink(s72, s7)
+ self.addLink(s72, s8)
+ self.addLink(s73, s7)
+ self.addLink(s73, s8)
+ self.addLink(s74, s7)
+ self.addLink(s74, s8)
+ self.addLink(s75, s7)
+ self.addLink(s75, s8)
+ self.addLink(s76, s7)
+ self.addLink(s76, s8)
+ self.addLink(s77, s7)
+ self.addLink(s77, s8)
+ self.addLink(s78, s7)
+ self.addLink(s78, s8)
+
+topos = { 'spine': ( lambda: spineTopo() ) }
+
+# HERE THE CODE DEFINITION OF THE TOPOLOGY ENDS
+
+def setupNetwork():
+ "Create network"
+ topo = spineTopo()
+ #if controller_ip == '':
+ #controller_ip = '10.0.2.2';
+ # controller_ip = '127.0.0.1';
+ network = Mininet(topo=topo, switch=OVSSwitch, link=TCLink, autoSetMacs = True, controller=None)
+ network.start()
+ CLI( network )
+ network.stop()
+
+if __name__ == '__main__':
+ setLogLevel('info')
+ #setLogLevel('debug')
+ setupNetwork()
diff --git a/TestON/tests/CHOTestMonkey/dependencies/topologies/topoTripleIpv6.py b/TestON/tests/CHOTestMonkey/dependencies/topologies/topoTripleIpv6.py
new file mode 100755
index 0000000..2a53b3d
--- /dev/null
+++ b/TestON/tests/CHOTestMonkey/dependencies/topologies/topoTripleIpv6.py
@@ -0,0 +1,65 @@
+#!/usr/bin/python
+"""
+"""
+from mininet.topo import Topo
+from mininet.net import Mininet
+from mininet.node import Host, RemoteController
+from mininet.node import Node
+from mininet.node import CPULimitedHost
+from mininet.link import TCLink
+from mininet.cli import CLI
+from mininet.log import setLogLevel
+from mininet.util import dumpNodeConnections
+from mininet.node import ( UserSwitch, OVSSwitch, IVSSwitch )
+
+class dualStackHost( Host ):
+ def config( self, v6Addr='1000::1/64', **params ):
+ r = super( Host, self ).config( **params )
+ intf = self.defaultIntf()
+ self.cmd( 'ip -6 addr add %s dev %s' % ( v6Addr, intf ) )
+ return r
+
+class tripleTopo( Topo ):
+
+ def __init__( self, **opts ):
+ "Create a topology."
+
+ # Initialize Topology
+ Topo.__init__( self, **opts )
+
+ s1 = self.addSwitch( 's1' )
+ s2 = self.addSwitch( 's2' )
+ s3 = self.addSwitch( 's3' )
+
+ # ... and now hosts
+ s1_host = self.addHost( 'h1', ip='10.1.0.1/24', cls=dualStackHost, v6Addr='1000::1/64' )
+ s2_host = self.addHost( 'h2', ip='10.1.0.2/24', cls=dualStackHost, v6Addr='1000::2/64' )
+ s3_host = self.addHost( 'h3', ip='10.1.0.3/24', cls=dualStackHost, v6Addr='1000::3/64' )
+
+ # add edges between switch and corresponding host
+ self.addLink( s1 , s1_host )
+ self.addLink( s2 , s2_host )
+ self.addLink( s3 , s3_host )
+ self.addLink(s1, s2)
+ self.addLink(s1, s3)
+ self.addLink(s2, s3)
+
+topos = { 'triple': ( lambda: tripleTopo() ) }
+
+# HERE THE CODE DEFINITION OF THE TOPOLOGY ENDS
+
+def setupNetwork():
+ "Create network"
+ topo = tripleTopo()
+ #if controller_ip == '':
+ #controller_ip = '10.0.2.2';
+ # controller_ip = '127.0.0.1';
+ network = Mininet(topo=topo, switch=OVSSwitch, autoSetMacs=True, controller=None)
+ network.start()
+ CLI( network )
+ network.stop()
+
+if __name__ == '__main__':
+ setLogLevel('info')
+ #setLogLevel('debug')
+ setupNetwork()
diff --git a/TestON/tests/FUNC/FUNCintent/FUNCintent.py b/TestON/tests/FUNC/FUNCintent/FUNCintent.py
index 080aa8f..1bff0f1 100644
--- a/TestON/tests/FUNC/FUNCintent/FUNCintent.py
+++ b/TestON/tests/FUNC/FUNCintent/FUNCintent.py
@@ -836,8 +836,8 @@
main.step( "VLAN1: Add vlan host intents between h4 and h12" )
main.assertReturnString = "Assertion Result vlan IPV4\n"
- host1 = { "name":"h4","id":"00:00:00:00:00:04/100" }
- host2 = { "name":"h12","id":"00:00:00:00:00:0C/100 "}
+ host1 = { "name":"h4","id":"00:00:00:00:00:04/100", "vlan":"100" }
+ host2 = { "name":"h12","id":"00:00:00:00:00:0C/100", "vlan":"100" }
testResult = main.FALSE
installResult = main.FALSE
installResult = main.intentFunction.installHostIntent( main,
@@ -864,36 +864,6 @@
onpass=main.assertReturnString,
onfail=main.assertReturnString)
- main.step( "VLAN2: Add inter vlan host intents between h13 and h20" )
- main.assertReturnString = "Assertion Result different VLAN negative test\n"
- host1 = { "name":"h13" }
- host2 = { "name":"h20" }
- testResult = main.FALSE
- installResult = main.FALSE
- installResult = main.intentFunction.installHostIntent( main,
- name='VLAN2',
- onosNode='0',
- host1=host1,
- host2=host2)
-
- if installResult:
- testResult = main.intentFunction.testHostIntent( main,
- name='VLAN2',
- intentId = installResult,
- onosNode='0',
- host1=host1,
- host2=host2,
- sw1='s5',
- sw2='s2',
- expectedLink = 18)
- else:
- main.CLIs[ 0 ].removeAllIntents( purge=True )
-
- utilities.assert_equals( expect=main.TRUE,
- actual=testResult,
- onpass=main.assertReturnString,
- onfail=main.assertReturnString)
-
main.step( "Confirm that ONOS leadership is unchanged")
intentLeadersNew = main.CLIs[ 0 ].leaderCandidates()
main.intentFunction.checkLeaderChange( intentLeadersOld,
@@ -1184,25 +1154,24 @@
main.step( "VLAN: Add point intents between h5 and h21" )
main.assertReturnString = "Assertion Result for VLAN IPV4 with mac address point intents\n"
senders = [
- { "name":"h5","device":"of:0000000000000005/5","mac":"00:00:00:00:00:05" }
+ { "name":"h5","device":"of:0000000000000005/5","mac":"00:00:00:00:00:05", "vlan":"200" }
]
recipients = [
- { "name":"h21","device":"of:0000000000000007/5","mac":"00:00:00:00:00:15" }
+ { "name":"h21","device":"of:0000000000000007/5","mac":"00:00:00:00:00:15", "vlan":"200" }
]
testResult = main.FALSE
installResult = main.FALSE
installResult = main.intentFunction.installPointIntent(
main,
- name="DUALSTACK1",
+ name="VLAN",
senders=senders,
- recipients=recipients,
- ethType="IPV4" )
+ recipients=recipients)
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
- name="DUALSTACK1",
+ name="VLAN",
senders=senders,
recipients=recipients,
sw1="s5",
@@ -1434,11 +1403,11 @@
main.step( "VLAN: Add single point to multi point intents" )
main.assertReturnString = "Assertion results for IPV4 single to multi point intent with IPV4 type and MAC addresses in the same VLAN\n"
senders = [
- { "name":"h4", "device":"of:0000000000000005/4", "mac":"00:00:00:00:00:04" }
+ { "name":"h4", "device":"of:0000000000000005/4", "mac":"00:00:00:00:00:04", "vlan":"100" }
]
recipients = [
- { "name":"h12", "device":"of:0000000000000006/4", "mac":"00:00:00:00:00:0C" },
- { "name":"h20", "device":"of:0000000000000007/4", "mac":"00:00:00:00:00:14" }
+ { "name":"h12", "device":"of:0000000000000006/4", "mac":"00:00:00:00:00:0C", "vlan":"100" },
+ { "name":"h20", "device":"of:0000000000000007/4", "mac":"00:00:00:00:00:14", "vlan":"100" }
]
badSenders=[ { "name":"h13" } ] # Senders that are not in the intent
badRecipients=[ { "name":"h21" } ] # Recipients that are not in the intent
@@ -1446,10 +1415,9 @@
installResult = main.FALSE
installResult = main.intentFunction.installSingleToMultiIntent(
main,
- name="IPV4",
+ name="VLAN`",
senders=senders,
recipients=recipients,
- ethType="IPV4",
sw1="s5",
sw2="s2")
@@ -1457,7 +1425,7 @@
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
- name="IPV4",
+ name="VLAN",
senders=senders,
recipients=recipients,
badSenders=badSenders,
@@ -1658,11 +1626,11 @@
main.step( "VLAN: Add multi point to single point intents" )
main.assertReturnString = "Assertion results for IPV4 multi to single point intent with IPV4 type and no MAC addresses in the same VLAN\n"
senders = [
- { "name":"h13", "device":"of:0000000000000006/5" },
- { "name":"h21", "device":"of:0000000000000007/5" }
+ { "name":"h13", "device":"of:0000000000000006/5", "vlan":"200" },
+ { "name":"h21", "device":"of:0000000000000007/5", "vlan":"200" }
]
recipients = [
- { "name":"h5", "device":"of:0000000000000005/5" }
+ { "name":"h5", "device":"of:0000000000000005/5", "vlan":"200" }
]
badSenders=[ { "name":"h12" } ] # Senders that are not in the intent
badRecipients=[ { "name":"h20" } ] # Recipients that are not in the intent
@@ -1673,7 +1641,6 @@
name="VLAN",
senders=senders,
recipients=recipients,
- ethType="IPV4",
sw1="s5",
sw2="s2")
@@ -1863,156 +1830,8 @@
onpass=main.assertReturnString,
onfail=main.assertReturnString )
- main.step( "IPV4: Add multi point to single point intents" )
- main.assertReturnString = "Assertion results for IPV4 multi to single \
- point intent end point failure with IPV4 type and MAC addresses\n"
- senders = [
- { "name":"h16", "device":"of:0000000000000006/8", "mac":"00:00:00:00:00:10" },
- { "name":"h24", "device":"of:0000000000000007/8", "mac":"00:00:00:00:00:18" }
- ]
- recipients = [
- { "name":"h8", "device":"of:0000000000000005/8", "mac":"00:00:00:00:00:08" }
- ]
- isolatedSenders = [
- { "name":"h24"}
- ]
- isolatedRecipients = []
- testResult = main.FALSE
- installResult = main.FALSE
- installResult = main.intentFunction.installMultiToSingleIntent(
- main,
- name="IPV4",
- senders=senders,
- recipients=recipients,
- ethType="IPV4",
- sw1="s5",
- sw2="s2")
-
- if installResult:
- testResult = main.intentFunction.testEndPointFail(
- main,
- intentId=installResult,
- name="IPV4",
- senders=senders,
- recipients=recipients,
- isolatedSenders=isolatedSenders,
- isolatedRecipients=isolatedRecipients,
- sw1="s6",
- sw2="s2",
- sw3="s4",
- sw4="s1",
- sw5="s3",
- expectedLink1=16,
- expectedLink2=14 )
- else:
- main.CLIs[ 0 ].removeAllIntents( purge=True )
-
- utilities.assert_equals( expect=main.TRUE,
- actual=testResult,
- onpass=main.assertReturnString,
- onfail=main.assertReturnString )
-
- main.step( "IPV4_2: Add multi point to single point intents" )
- main.assertReturnString = "Assertion results for IPV4 multi to single \
- point intent end point failure with IPV4 type and no MAC addresses\n"
- senders = [
- { "name":"h16", "device":"of:0000000000000006/8" },
- { "name":"h24", "device":"of:0000000000000007/8" }
- ]
- recipients = [
- { "name":"h8", "device":"of:0000000000000005/8" }
- ]
- isolatedSenders = [
- { "name":"h24"}
- ]
- isolatedRecipients = []
- testResult = main.FALSE
- installResult = main.FALSE
- installResult = main.intentFunction.installMultiToSingleIntent(
- main,
- name="IPV4_2",
- senders=senders,
- recipients=recipients,
- ethType="IPV4",
- sw1="s5",
- sw2="s2")
-
- if installResult:
- testResult = main.intentFunction.testEndPointFail(
- main,
- intentId=installResult,
- name="IPV4_2",
- senders=senders,
- recipients=recipients,
- isolatedSenders=isolatedSenders,
- isolatedRecipients=isolatedRecipients,
- sw1="s6",
- sw2="s2",
- sw3="s4",
- sw4="s1",
- sw5="s3",
- expectedLink1=16,
- expectedLink2=14 )
- else:
- main.CLIs[ 0 ].removeAllIntents( purge=True )
-
- utilities.assert_equals( expect=main.TRUE,
- actual=testResult,
- onpass=main.assertReturnString,
- onfail=main.assertReturnString )
-
- main.step( "VLAN: Add multi point to single point intents" )
- main.assertReturnString = "Assertion results for IPV4 multi to single \
- point intent end point failure with IPV4 type and no MAC addresses in the same VLAN\n"
- senders = [
- { "name":"h13", "device":"of:0000000000000006/5" },
- { "name":"h21", "device":"of:0000000000000007/5" }
- ]
- recipients = [
- { "name":"h5", "device":"of:0000000000000005/5" }
- ]
- isolatedSenders = [
- { "name":"h21"}
- ]
- isolatedRecipients = []
- testResult = main.FALSE
- installResult = main.FALSE
- installResult = main.intentFunction.installMultiToSingleIntent(
- main,
- name="VLAN",
- senders=senders,
- recipients=recipients,
- ethType="IPV4",
- sw1="s5",
- sw2="s2")
-
- if installResult:
- testResult = main.intentFunction.testEndPointFail(
- main,
- intentId=installResult,
- name="VLAN",
- senders=senders,
- recipients=recipients,
- isolatedSenders=isolatedSenders,
- isolatedRecipients=isolatedRecipients,
- sw1="s6",
- sw2="s2",
- sw3="s4",
- sw4="s1",
- sw5="s3",
- expectedLink1=16,
- expectedLink2=14 )
- else:
- main.CLIs[ 0 ].removeAllIntents( purge=True )
-
- utilities.assert_equals( expect=main.TRUE,
- actual=testResult,
- onpass=main.assertReturnString,
- onfail=main.assertReturnString )
-
main.step( "NOOPTION: Install and test single point to multi point intents" )
- main.assertReturnString = "Assertion results for IPV4 single to multi \
- point intent end point failure with no options set\n"
+ main.assertReturnString = "Assertion results for IPV4 single to multi point intent with no options set\n"
senders = [
{ "name":"h8", "device":"of:0000000000000005/8" }
]
@@ -2020,9 +1839,8 @@
{ "name":"h16", "device":"of:0000000000000006/8" },
{ "name":"h24", "device":"of:0000000000000007/8" }
]
- isolatedSenders = []
- isolatedRecipients = [
- { "name":"h24" }
+ isolatedSenders = [
+ { "name":"h24"}
]
testResult = main.FALSE
installResult = main.FALSE
@@ -2058,151 +1876,4 @@
onpass=main.assertReturnString,
onfail=main.assertReturnString )
- main.step( "IPV4: Install and test single point to multi point intents" )
- main.assertReturnString = "Assertion results for IPV4 single to multi \
- point intent end point failure with IPV4 type and no MAC addresses\n"
- senders = [
- { "name":"h8", "device":"of:0000000000000005/8","mac":"00:00:00:00:00:08" }
- ]
- recipients = [
- { "name":"h16", "device":"of:0000000000000006/8", "mac":"00:00:00:00:00:10" },
- { "name":"h24", "device":"of:0000000000000007/8", "mac":"00:00:00:00:00:18" }
- ]
- isolatedSenders = []
- isolatedRecipients = [
- { "name":"h24" }
- ]
- testResult = main.FALSE
- installResult = main.FALSE
- installResult = main.intentFunction.installSingleToMultiIntent(
- main,
- name="IPV4",
- senders=senders,
- recipients=recipients,
- ethType="IPV4",
- sw1="s5",
- sw2="s2")
-
- if installResult:
- testResult = main.intentFunction.testEndPointFail(
- main,
- intentId=installResult,
- name="IPV4",
- senders=senders,
- recipients=recipients,
- isolatedSenders=isolatedSenders,
- isolatedRecipients=isolatedRecipients,
- sw1="s6",
- sw2="s2",
- sw3="s4",
- sw4="s1",
- sw5="s3",
- expectedLink1=16,
- expectedLink2=14 )
- else:
- main.CLIs[ 0 ].removeAllIntents( purge=True )
-
- utilities.assert_equals( expect=main.TRUE,
- actual=testResult,
- onpass=main.assertReturnString,
- onfail=main.assertReturnString )
-
- main.step( "IPV4_2: Add single point to multi point intents" )
- main.assertReturnString = "Assertion results for IPV4 single to multi\
- point intent endpoint failure with IPV4 type and no MAC addresses\n"
- senders = [
- { "name":"h8", "device":"of:0000000000000005/8" }
- ]
- recipients = [
- { "name":"h16", "device":"of:0000000000000006/8" },
- { "name":"h24", "device":"of:0000000000000007/8" }
- ]
- isolatedSenders = []
- isolatedRecipients = [
- { "name":"h24" }
- ]
- testResult = main.FALSE
- installResult = main.FALSE
- installResult = main.intentFunction.installSingleToMultiIntent(
- main,
- name="IPV4_2",
- senders=senders,
- recipients=recipients,
- ethType="IPV4",
- sw1="s5",
- sw2="s2")
-
- if installResult:
- testResult = main.intentFunction.testEndPointFail(
- main,
- intentId=installResult,
- name="IPV4_2",
- senders=senders,
- recipients=recipients,
- isolatedSenders=isolatedSenders,
- isolatedRecipients=isolatedRecipients,
- sw1="s6",
- sw2="s2",
- sw3="s4",
- sw4="s1",
- sw5="s3",
- expectedLink1=16,
- expectedLink2=14 )
- else:
- main.CLIs[ 0 ].removeAllIntents( purge=True )
-
- utilities.assert_equals( expect=main.TRUE,
- actual=testResult,
- onpass=main.assertReturnString,
- onfail=main.assertReturnString )
-
- main.step( "VLAN: Add single point to multi point intents" )
- main.assertReturnString = "Assertion results for IPV4 single to multi point\
- intent endpoint failure with IPV4 type and MAC addresses in the same VLAN\n"
- senders = [
- { "name":"h4", "device":"of:0000000000000005/4", "mac":"00:00:00:00:00:04" }
- ]
- recipients = [
- { "name":"h12", "device":"of:0000000000000006/4", "mac":"00:00:00:00:00:0C" },
- { "name":"h20", "device":"of:0000000000000007/4", "mac":"00:00:00:00:00:14" }
- ]
- isolatedSenders = []
- isolatedRecipients = [
- { "name":"h20" }
- ]
- testResult = main.FALSE
- installResult = main.FALSE
- installResult = main.intentFunction.installSingleToMultiIntent(
- main,
- name="IPV4",
- senders=senders,
- recipients=recipients,
- ethType="IPV4",
- sw1="s5",
- sw2="s2")
-
- if installResult:
- testResult = main.intentFunction.testEndPointFail(
- main,
- intentId=installResult,
- name="IPV4",
- senders=senders,
- recipients=recipients,
- isolatedSenders=isolatedSenders,
- isolatedRecipients=isolatedRecipients,
- sw1="s6",
- sw2="s2",
- sw3="s4",
- sw4="s1",
- sw5="s3",
- expectedLink1=16,
- expectedLink2=14 )
- else:
- main.CLIs[ 0 ].removeAllIntents( purge=True )
-
- utilities.assert_equals( expect=main.TRUE,
- actual=testResult,
- onpass=main.assertReturnString,
- onfail=main.assertReturnString )
-
main.intentFunction.report( main )
\ No newline at end of file
diff --git a/TestON/tests/FUNC/FUNCintent/dependencies/FuncIntentFunction.py b/TestON/tests/FUNC/FUNCintent/dependencies/FuncIntentFunction.py
index 88a0cad..dede2c8 100644
--- a/TestON/tests/FUNC/FUNCintent/dependencies/FuncIntentFunction.py
+++ b/TestON/tests/FUNC/FUNCintent/dependencies/FuncIntentFunction.py
@@ -22,7 +22,7 @@
ipAddresses="",
tcp="",
sw1="",
- sw2=""):
+ sw2="" ):
"""
Installs a Host Intent
@@ -75,8 +75,10 @@
host2[ "id" ] = main.hostsData.get( host2.get( "name" ) ).get( "id" )
# Adding point intent
+ vlanId = host1.get( "vlan" )
intentId = main.CLIs[ onosNode ].addHostIntent( hostIdOne=host1.get( "id" ),
- hostIdTwo=host2.get( "id" ) )
+ hostIdTwo=host2.get( "id" ),
+ vlanId=vlanId )
except (KeyError, TypeError):
errorMsg = "There was a problem loading the hosts data."
if intentId:
@@ -168,6 +170,7 @@
senderNames = [ host1.get( "name" ), host2.get( "name" ) ]
recipientNames = [ host1.get( "name" ), host2.get( "name" ) ]
+ vlanId = host1.get( "vlan" )
testResult = main.TRUE
except (KeyError, TypeError):
@@ -191,7 +194,7 @@
testResult = main.FALSE
# Check Connectivity
- if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames ) ):
+ if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames, vlanId ) ):
main.assertReturnString += 'Initial Ping Passed\n'
else:
main.assertReturnString += 'Initial Ping Failed\n'
@@ -228,7 +231,7 @@
testResult = main.FALSE
# Check Connection
- if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames ) ):
+ if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames, vlanId ) ):
main.assertReturnString += 'Link Down Pingall Passed\n'
else:
main.assertReturnString += 'Link Down Pingall Failed\n'
@@ -266,7 +269,7 @@
testResult = main.FALSE
# Check Connection
- if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames ) ):
+ if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames, vlanId ) ):
main.assertReturnString += 'Link Up Pingall Passed\n'
else:
main.assertReturnString += 'Link Up Pingall Failed\n'
@@ -364,6 +367,8 @@
ipSrc = senders[ 0 ].get( "ip" )
ipDst = recipients[ 0 ].get( "ip" )
+ vlanId = senders[ 0 ].get( "vlan" )
+
# Adding point intent
intentId = main.CLIs[ onosNode ].addPointIntent(
ingressDevice=ingressDevice,
@@ -378,7 +383,8 @@
ipSrc=ipSrc,
ipDst=ipDst,
tcpSrc=tcpSrc,
- tcpDst=tcpDst )
+ tcpDst=tcpDst,
+ vlanId=vlanId )
except (KeyError, TypeError):
errorMsg = "There was a problem loading the hosts data."
if intentId:
@@ -758,6 +764,7 @@
portEgressList = None
srcMac = senders[ 0 ].get( "mac" )
+ vlanId = senders[ 0 ].get( "vlan" )
# Adding point intent
intentId = main.CLIs[ onosNode ].addSinglepointToMultipointIntent(
@@ -773,7 +780,8 @@
ipSrc="",
ipDst="",
tcpSrc="",
- tcpDst="" )
+ tcpDst="",
+ vlanId=vlanId )
except (KeyError, TypeError):
errorMsg = "There was a problem loading the hosts data."
if intentId:
@@ -869,6 +877,7 @@
portIngressList = None
dstMac = recipients[ 0 ].get( "mac" )
+ vlanId = senders[ 0 ].get( "vlan" )
# Adding point intent
intentId = main.CLIs[ onosNode ].addMultipointToSinglepointIntent(
@@ -884,7 +893,8 @@
ipSrc="",
ipDst="",
tcpSrc="",
- tcpDst="" )
+ tcpDst="",
+ vlanId=vlanId )
except (KeyError, TypeError):
errorMsg = "There was a problem loading the hosts data."
if intentId:
@@ -993,6 +1003,7 @@
if not recipient.get( "device" ):
main.log.warn( "Device not given for recipient {0}. Loading from main.hostData".format( recipient.get( "name" ) ) )
recipient[ "device" ] = main.hostsData.get( recipient.get( "name" ) ).get( "location" )
+ vlanId = senders[ 0 ].get( "vlan" )
except (KeyError, TypeError):
main.log.error( "There was a problem loading the hosts data." )
return main.FALSE
@@ -1015,7 +1026,7 @@
testResult = main.FALSE
# Check Connectivity
- if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames ), attempts=3, sleep=5 ):
+ if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames, vlanId ), attempts=3, sleep=5 ):
main.assertReturnString += 'Initial Ping Passed\n'
else:
main.assertReturnString += 'Initial Ping Failed\n'
@@ -1069,7 +1080,7 @@
testResult = main.FALSE
# Check Connection
- if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames ) ):
+ if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames, vlanId ) ):
main.assertReturnString += 'Link Down Pingall Passed\n'
else:
main.assertReturnString += 'Link Down Pingall Failed\n'
@@ -1107,7 +1118,7 @@
testResult = main.FALSE
# Check Connection
- if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames ) ):
+ if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames, vlanId ) ):
main.assertReturnString += 'Link Up Scapy Packet Received Passed\n'
else:
main.assertReturnString += 'Link Up Scapy Packet Recieved Failed\n'
@@ -1292,7 +1303,7 @@
# Check Connectivity
# First check connectivity of any isolated senders to recipients
if isolatedSenderNames:
- if scapyCheckConnection( main, isolatedSenderNames, recipientNames, None, None, main.TRUE ):
+ if scapyCheckConnection( main, isolatedSenderNames, recipientNames, None, None, None, main.TRUE ):
main.assertReturnString += 'Isolation link Down Connectivity Check Passed\n'
else:
main.assertReturnString += 'Isolation link Down Connectivity Check Failed\n'
@@ -1300,7 +1311,7 @@
# Next check connectivity of senders to any isolated recipients
if isolatedRecipientNames:
- if scapyCheckConnection( main, senderNames, isolatedRecipientNames, None, None, main.TRUE ):
+ if scapyCheckConnection( main, senderNames, isolatedRecipientNames, None, None, None, main.TRUE ):
main.assertReturnString += 'Isolation link Down Connectivity Check Passed\n'
else:
main.assertReturnString += 'Isolation link Down Connectivity Check Failed\n'
@@ -1592,7 +1603,7 @@
linkResult = main.Mininet1.link( end1=sw1, end2=sw2, option=option )
return linkResult
-def scapyCheckConnection( main, senders, recipients, packet=None, packetFilter=None, expectFailure=False ):
+def scapyCheckConnection( main, senders, recipients, vlanId=None, packet=None, packetFilter=None, expectFailure=False ):
"""
Checks the connectivity between all given sender hosts and all given recipient hosts
Packet may be specified. Defaults to Ether/IP packet
@@ -1631,17 +1642,31 @@
connectionsFunctional = main.FALSE
continue
- recipientComp.startFilter( pktFilter = packetFilter.format( senderComp.hostMac ) )
+ if vlanId:
+ recipientComp.startFilter( pktFilter = ( "vlan {}".format( vlanId ) + " && " + packetFilter.format( senderComp.hostMac ) ) )
+ else:
+ recipientComp.startFilter( pktFilter = packetFilter.format( senderComp.hostMac ) )
if not packet:
- pkt = 'Ether( src="{0}", dst="{2}" )/IP( src="{1}", dst="{3}" )'.format(
- senderComp.hostMac,
- senderComp.hostIp,
- recipientComp.hostMac,
- recipientComp.hostIp )
+ if vlanId:
+ pkt = 'Ether( src="{0}", dst="{2}" )/Dot1Q(vlan={4})/IP( src="{1}", dst="{3}" )'.format(
+ senderComp.hostMac,
+ senderComp.hostIp,
+ recipientComp.hostMac,
+ recipientComp.hostIp,
+ vlanId )
+ else:
+ pkt = 'Ether( src="{0}", dst="{2}" )/IP( src="{1}", dst="{3}" )'.format(
+ senderComp.hostMac,
+ senderComp.hostIp,
+ recipientComp.hostMac,
+ recipientComp.hostIp )
else:
pkt = packet
- senderComp.sendPacket( packet = pkt )
+ if vlanId:
+ senderComp.sendPacket( iface=( "{0}-eth0.{1}".format( sender, vlanId ) ), packet = pkt )
+ else:
+ senderComp.sendPacket( packet = pkt )
if recipientComp.checkFilter( timeout ):
if expectFailure:
diff --git a/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py b/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py
index 6bb32e2..69f094d 100644
--- a/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py
+++ b/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py
@@ -266,8 +266,6 @@
main.log.debug( "{} components not ACTIVE: \n{}".format(
cli.name,
cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
-
- if cliResults == main.FALSE:
main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
main.exit()
@@ -1681,8 +1679,9 @@
actual=consistentClustersResult,
onpass="Clusters view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of clusters" )
- if consistentClustersResult != main.TRUE:
+ if not consistentClustersResult:
main.log.debug( clusters )
+
# there should always only be one cluster
main.step( "Cluster view correct across ONOS nodes" )
try:
@@ -2557,6 +2556,8 @@
actual=consistentClustersResult,
onpass="Clusters view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of clusters" )
+ if not consistentClustersResult:
+ main.log.debug( clusters )
main.step( "There is only one SCC" )
# there should always only be one cluster
@@ -4214,6 +4215,9 @@
onfail="Partitioned Transactional Map put values are incorrect" )
main.step( "Partitioned Transactional maps get" )
+ # FIXME: is this sleep needed?
+ time.sleep( 5 )
+
getCheck = True
for n in range( 1, numKeys + 1 ):
getResponses = []
diff --git a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
index 6eb81d9..a48a460 100644
--- a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
+++ b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
@@ -291,8 +291,6 @@
main.log.debug( "{} components not ACTIVE: \n{}".format(
cli.name,
cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
-
- if cliResults == main.FALSE:
main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
main.exit()
@@ -1683,8 +1681,9 @@
actual=consistentClustersResult,
onpass="Clusters view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of clusters" )
- if consistentClustersResult != main.TRUE:
+ if not consistentClustersResult:
main.log.debug( clusters )
+
# there should always only be one cluster
main.step( "Cluster view correct across ONOS nodes" )
try:
@@ -2541,6 +2540,8 @@
actual=consistentClustersResult,
onpass="Clusters view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of clusters" )
+ if not consistentClustersResult:
+ main.log.debug( clusters )
main.step( "There is only one SCC" )
# there should always only be one cluster
@@ -4200,6 +4201,9 @@
onfail="Partitioned Transactional Map put values are incorrect" )
main.step( "Partitioned Transactional maps get" )
+ # FIXME: is this sleep needed?
+ time.sleep( 5 )
+
getCheck = True
for n in range( 1, numKeys + 1 ):
getResponses = []
diff --git a/TestON/tests/HA/HAkillNodes/HAkillNodes.py b/TestON/tests/HA/HAkillNodes/HAkillNodes.py
index 62062fc..6fb4d6c 100644
--- a/TestON/tests/HA/HAkillNodes/HAkillNodes.py
+++ b/TestON/tests/HA/HAkillNodes/HAkillNodes.py
@@ -302,8 +302,6 @@
main.log.debug( "{} components not ACTIVE: \n{}".format(
cli.name,
cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
-
- if cliResults == main.FALSE:
main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
main.exit()
@@ -1703,8 +1701,9 @@
actual=consistentClustersResult,
onpass="Clusters view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of clusters" )
- if consistentClustersResult != main.TRUE:
+ if not consistentClustersResult:
main.log.debug( clusters )
+
# there should always only be one cluster
main.step( "Cluster view correct across ONOS nodes" )
try:
@@ -2576,6 +2575,8 @@
actual=consistentClustersResult,
onpass="Clusters view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of clusters" )
+ if not consistentClustersResult:
+ main.log.debug( clusters )
main.step( "There is only one SCC" )
# there should always only be one cluster
@@ -4235,6 +4236,9 @@
onfail="Partitioned Transactional Map put values are incorrect" )
main.step( "Partitioned Transactional maps get" )
+ # FIXME: is this sleep needed?
+ time.sleep( 5 )
+
getCheck = True
for n in range( 1, numKeys + 1 ):
getResponses = []
diff --git a/TestON/tests/HA/HAsanity/HAsanity.py b/TestON/tests/HA/HAsanity/HAsanity.py
index 13b4084..aa66574 100644
--- a/TestON/tests/HA/HAsanity/HAsanity.py
+++ b/TestON/tests/HA/HAsanity/HAsanity.py
@@ -267,8 +267,6 @@
main.log.debug( "{} components not ACTIVE: \n{}".format(
cli.name,
cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
-
- if cliResults == main.FALSE:
main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
main.exit()
@@ -1669,8 +1667,9 @@
actual=consistentClustersResult,
onpass="Clusters view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of clusters" )
- if consistentClustersResult != main.TRUE:
+ if not consistentClustersResult:
main.log.debug( clusters )
+
# there should always only be one cluster
main.step( "Cluster view correct across ONOS nodes" )
try:
@@ -2477,6 +2476,8 @@
actual=consistentClustersResult,
onpass="Clusters view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of clusters" )
+ if not consistentClustersResult:
+ main.log.debug( clusters )
main.step( "There is only one SCC" )
# there should always only be one cluster
@@ -4136,6 +4137,9 @@
onfail="Partitioned Transactional Map put values are incorrect" )
main.step( "Partitioned Transactional maps get" )
+ # FIXME: is this sleep needed?
+ time.sleep( 5 )
+
getCheck = True
for n in range( 1, numKeys + 1 ):
getResponses = []
diff --git a/TestON/tests/HA/HAscaling/HAscaling.py b/TestON/tests/HA/HAscaling/HAscaling.py
index fcec126..3eb95c6 100644
--- a/TestON/tests/HA/HAscaling/HAscaling.py
+++ b/TestON/tests/HA/HAscaling/HAscaling.py
@@ -324,8 +324,6 @@
main.log.debug( "{} components not ACTIVE: \n{}".format(
cli.name,
cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
-
- if cliResults == main.FALSE:
main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
main.exit()
@@ -1721,8 +1719,9 @@
actual=consistentClustersResult,
onpass="Clusters view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of clusters" )
- if consistentClustersResult != main.TRUE:
+ if not consistentClustersResult:
main.log.debug( clusters )
+
# there should always only be one cluster
main.step( "Cluster view correct across ONOS nodes" )
try:
@@ -2616,6 +2615,8 @@
actual=consistentClustersResult,
onpass="Clusters view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of clusters" )
+ if not consistentClustersResult:
+ main.log.debug( clusters )
main.step( "There is only one SCC" )
# there should always only be one cluster
@@ -4264,6 +4265,9 @@
onfail="Partitioned Transactional Map put values are incorrect" )
main.step( "Partitioned Transactional maps get" )
+ # FIXME: is this sleep needed?
+ time.sleep( 5 )
+
getCheck = True
for n in range( 1, numKeys + 1 ):
getResponses = []
diff --git a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
index 1adb53c..1d4db86 100644
--- a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
+++ b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
@@ -256,8 +256,6 @@
main.log.debug( "{} components not ACTIVE: \n{}".format(
cli.name,
cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
-
- if cliResults == main.FALSE:
main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
main.exit()
@@ -3426,6 +3424,9 @@
onfail="Partitioned Transactional Map put values are incorrect" )
main.step( "Partitioned Transactional maps get" )
+ # FIXME: is this sleep needed?
+ time.sleep( 5 )
+
getCheck = True
for n in range( 1, numKeys + 1 ):
getResponses = []
diff --git a/TestON/tests/HA/HAstopNodes/HAstopNodes.py b/TestON/tests/HA/HAstopNodes/HAstopNodes.py
index f329fd8..0f33ec0 100644
--- a/TestON/tests/HA/HAstopNodes/HAstopNodes.py
+++ b/TestON/tests/HA/HAstopNodes/HAstopNodes.py
@@ -291,8 +291,6 @@
main.log.debug( "{} components not ACTIVE: \n{}".format(
cli.name,
cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
-
- if cliResults == main.FALSE:
main.log.error( "Failed to start ONOS, stopping test" )
main.cleanup()
main.exit()
@@ -1682,8 +1680,9 @@
actual=consistentClustersResult,
onpass="Clusters view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of clusters" )
- if consistentClustersResult != main.TRUE:
+ if not consistentClustersResult:
main.log.debug( clusters )
+
# there should always only be one cluster
main.step( "Cluster view correct across ONOS nodes" )
try:
@@ -2553,6 +2552,8 @@
actual=consistentClustersResult,
onpass="Clusters view is consistent across all ONOS nodes",
onfail="ONOS nodes have different views of clusters" )
+ if not consistentClustersResult:
+ main.log.debug( clusters )
main.step( "There is only one SCC" )
# there should always only be one cluster
@@ -4212,6 +4213,9 @@
onfail="Partitioned Transactional Map put values are incorrect" )
main.step( "Partitioned Transactional maps get" )
+ # FIXME: is this sleep needed?
+ time.sleep( 5 )
+
getCheck = True
for n in range( 1, numKeys + 1 ):
getResponses = []
diff --git a/TestON/tests/PLAT/PLATdockertest/PLATdockertest.py b/TestON/tests/PLAT/PLATdockertest/PLATdockertest.py
index 6e4acda..7133931 100755
--- a/TestON/tests/PLAT/PLATdockertest/PLATdockertest.py
+++ b/TestON/tests/PLAT/PLATdockertest/PLATdockertest.py
@@ -163,7 +163,7 @@
main.ONOSbenchDocker.onosFormCluster(cmdPath = clcmdpath, onosIPs=IPlist, user=dkruser, passwd = dkrpasswd)
main.log.info("Wait for cluster to form with sleep time of " + str(startupSleep))
time.sleep(startupSleep)
- status, response = main.ONOSbenchRest.send(ip=IPlist[0],port=8181, url="/cluster")
+ status, response = main.ONOSbenchRest.send(ip=IPlist[0], port=8181, url="/cluster")
main.log.debug("Rest call response: " + str(status) + " - " + response)
if status == 200:
jrsp = json.loads(response)
diff --git a/TestON/tests/SCPF/SCPFscalingMaxIntents/SCPFscalingMaxIntents.params b/TestON/tests/SCPF/SCPFscalingMaxIntents/SCPFscalingMaxIntents.params
index 6efe17e..cb75b01 100644
--- a/TestON/tests/SCPF/SCPFscalingMaxIntents/SCPFscalingMaxIntents.params
+++ b/TestON/tests/SCPF/SCPFscalingMaxIntents/SCPFscalingMaxIntents.params
@@ -13,14 +13,14 @@
<SCALE>1,3,5,7</SCALE>
<DEPENDENCY>
- <path>/tests/SCPFscalingMaxIntents/dependencies/</path>
+ <path>/tests/SCPF/SCPFscalingMaxIntents/dependencies/</path>
<wrapper1>startUp</wrapper1>
<topology>rerouteTopo.py</topology>
</DEPENDENCY>
<ENV>
<cellName>productionCell</cellName>
- <cellApps>drivers</cellApps>
+ <cellApps>drivers,openflow</cellApps>
</ENV>
<GIT>
@@ -61,10 +61,10 @@
<NULL>
# CASE20
<PUSH>
- <batch_size>100</batch_size>
- <min_intents>100</min_intents>
- <max_intents>100000</max_intents>
- <check_interval>100</check_interval>
+ <batch_size>1000</batch_size>
+ <min_intents>10000</min_intents>
+ <max_intents>70000</max_intents>
+ <check_interval>10000</check_interval>
</PUSH>
# if reroute is true
diff --git a/TestON/tests/SCPF/SCPFscalingMaxIntents/SCPFscalingMaxIntents.py b/TestON/tests/SCPF/SCPFscalingMaxIntents/SCPFscalingMaxIntents.py
index 27bc5c2..dff2a22 100644
--- a/TestON/tests/SCPF/SCPFscalingMaxIntents/SCPFscalingMaxIntents.py
+++ b/TestON/tests/SCPF/SCPFscalingMaxIntents/SCPFscalingMaxIntents.py
@@ -54,10 +54,6 @@
main.reroute = False
main.CLIs = []
- main.ONOSip = []
- main.maxNumBatch = 0
- main.ONOSip = main.ONOSbench.getOnosIps()
- main.log.info(main.ONOSip)
main.setupSkipped = False
wrapperFile1 = main.params[ 'DEPENDENCY' ][ 'wrapper1' ]
@@ -82,6 +78,13 @@
# main.scale[ 0 ] determines the current number of ONOS controller
main.CLIs = []
main.numCtrls = int( main.scale[ 0 ] )
+ main.ONOSip = []
+ main.maxNumBatch = 0
+ main.AllONOSip = main.ONOSbench.getOnosIps()
+ for i in range(main.numCtrls):
+ main.ONOSip.append(main.AllONOSip[i])
+ main.log.info(main.ONOSip)
+
main.log.info( "Creating list of ONOS cli handles" )
for i in range(main.numCtrls):
main.CLIs.append( getattr( main, 'ONOScli%s' % (i+1) ) )
@@ -302,19 +305,11 @@
'''
import json
import time
-
- time.sleep(main.startUpSleep)
-
- main.step("Activating openflow")
- appStatus = utilities.retry( main.ONOSrest1.activateApp,
- main.FALSE,
- ['org.onosproject.openflow'],
- sleep=3,
- attempts=3 )
- utilities.assert_equals( expect=main.TRUE,
- actual=appStatus,
- onpass="Successfully activated openflow",
- onfail="Failed activate openflow" )
+
+ devices = []
+ devices = main.CLIs[0].getAllDevicesId()
+ for d in devices:
+ main.CLIs[0].deviceRemove( d )
time.sleep(main.startUpSleep)
main.step('Starting mininet topology')
@@ -333,13 +328,26 @@
onfail="Failed assign switches to masters" )
time.sleep(main.startUpSleep)
+ # Balancing Masters
+ main.step( "Balancing Masters" )
+ stepResult = main.FALSE
+ stepResult = utilities.retry( main.CLIs[0].balanceMasters,
+ main.FALSE,
+ [],
+ sleep=3,
+ attempts=3 )
+
+ utilities.assert_equals( expect=main.TRUE,
+ actual=stepResult,
+ onpass="Balance masters was successfull",
+ onfail="Failed to balance masters" )
main.log.info("Getting default flows")
jsonSum = json.loads(main.CLIs[0].summary())
main.defaultFlows = jsonSum["flows"]
main.step("Check status of Mininet setup")
- caseResult = appStatus and mnStatus and swStatus
+ caseResult = mnStatus and swStatus
utilities.assert_equals( expect=main.TRUE,
actual=caseResult,
onpass="Successfully setup Mininet",
@@ -393,6 +401,7 @@
# keeps track of how many flows have been installed, set to 0 at start
currFlows = 0
# limit for the number of intents that can be installed
+ main.batchSize = int( int(main.batchSize)/int(main.numCtrls))
limit = main.maxIntents / main.batchSize
# total intents installed
totalIntents = 0
@@ -404,7 +413,12 @@
stepResult = main.TRUE
# temp variable to contain the number of flows
flowsNum = 0
+ if main.numCtrls > 1:
+ # if more than one onos nodes, we should check more frequently
+ main.checkInterval = main.checkInterval/4
+ # make sure the checkInterval divisible batchSize
+ main.checkInterval = int( int( main.checkInterval / main.batchSize ) * main.batchSize )
for i in range(limit):
# Threads pool
@@ -424,7 +438,8 @@
kwargs={ "offset": offtmp,
"options": "-i",
"timeout": main.timeout,
- "background":False } )
+ "background":False,
+ "noExit":True} )
pool.append(t)
t.start()
main.threadID = main.threadID + 1
@@ -441,108 +456,45 @@
main.log.info("Verify Intents states")
# k is a control variable for verify retry attempts
k = 1
- intentVerify = main.FALSE
while k <= main.verifyAttempts:
# while loop for check intents by using REST api
time.sleep(5)
temp = 0
- intentsState = json.loads( main.ONOSrest1.intents() )
- for f in intentsState:
- # get INSTALLED intents number
- if f.get("state") == "INSTALLED":
- temp = temp + 1
-
- main.log.info("Total Intents: {} INSTALLED: {}".format(totalIntents, temp))
- if totalIntents == temp:
- intentVerify = main.TRUE
+ intentsState = main.CLIs[0].checkIntentSummary(timeout=600)
+ if intentsState:
+ totalIntents = main.CLIs[0].getTotalIntentsNum(timeout=600)
+ if temp < totalIntents:
+ temp = totalIntents
+ else:
+ totalIntents = temp
break
- intentVerify = main.FALSE
+ main.log.info("Total Intents: {}".format( totalIntents) )
k = k+1
- if not intentVerify:
+
+ if not intentsState:
# If some intents are not installed, grep the previous flows list, and finished this test case
main.log.warn( "Some intens did not install" )
- # We don't want to check flows if intents not installed, because onos will drop flows
- if currFlows == 0:
- # If currFlows equal 0, which means we failed to install intents at first, or we didn't get
- # the correct number, so we need get flows here.
- flowsState = json.loads( main.ONOSrest1.flows() )
+ main.log.info("Total Intents: {}".format( totalIntents) )
+ temp = 0
+ totalFlows = 0
+ if temp < totalFlows:
+ temp = totalFlows
+ else:
+ totalFlows = main.CLIs[0].getTotalFlowsNum(timeout=600, noExit=True)
break
-
- main.log.info("Verify Flows states")
- k = 1
- flowsVerify = main.TRUE
- while k <= main.verifyAttempts:
- # while loop for check flows by using REST api
- time.sleep(3)
- temp = 0
- flowsStateCount = []
- flowsState = json.loads( main.ONOSrest1.flows() )
- main.log.info("Total flows now: {}".format(len(flowsState)))
- if ( flowsNum < len(flowsState) ):
- flowsNum = len(flowsState)
- print(flowsNum)
- for f in flowsState:
- # get PENDING_ADD flows
- if f.get("state") == "PENDING_ADD":
- temp = temp + 1
-
- flowsStateCount.append(temp)
- temp = 0
-
- for f in flowsState:
- # get PENDING_REMOVE flows
- if f.get("state") == "PENDING_REMOVE":
- temp = temp + 1
-
- flowsStateCount.append(temp)
- temp = 0
-
- for f in flowsState:
- # get REMOVED flows
- if f.get("state") == "REMOVED":
- temp = temp + 1
-
- flowsStateCount.append(temp)
- temp = 0
-
- for f in flowsState:
- # get FAILED flwos
- if f.get("state") == "FAILED":
- temp = temp + 1
-
- flowsStateCount.append(temp)
- temp = 0
- k = k + 1
- for c in flowsStateCount:
- if int(c) > 0:
- flowsVerify = main.FALSE
-
- main.log.info( "Check flows States:" )
- main.log.info( "PENDING_ADD: {}".format( flowsStateCount[0]) )
- main.log.info( "PENDING_REMOVE: {}".format( flowsStateCount[1]) )
- main.log.info( "REMOVED: {}".format( flowsStateCount[2]) )
- main.log.info( "FAILED: {}".format( flowsStateCount[3]) )
-
- if flowsVerify == main.TRUE:
- break
-
+ # We don't want to check flows if intents not installed, because onos will drop flows
del main.scale[0]
utilities.assert_equals( expect = main.TRUE,
- actual = intentVerify,
+ actual = intentsState,
onpass = "Successfully pushed and verified intents",
onfail = "Failed to push and verify intents" )
- # we need the total intents before crash
- totalIntents = len(intentsState)
- totalFlows = flowsNum
-
main.log.info( "Total Intents Installed before crash: {}".format( totalIntents ) )
main.log.info( "Total Flows ADDED before crash: {}".format( totalFlows ) )
main.step('clean up Mininet')
main.Mininet1.stopNet()
-
main.log.info("Writing results to DS file")
with open(main.dbFileName, "a") as dbFile:
# Scale number
diff --git a/TestON/tests/SCPF/SCPFscalingMaxIntents/dependencies/__init__.py b/TestON/tests/SCPF/SCPFscalingMaxIntents/dependencies/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/SCPF/SCPFscalingMaxIntents/dependencies/__init__.py
diff --git a/TestON/tests/SCPF/SCPFscalingMaxIntentsWithFlowObj/SCPFscalingMaxIntentsWithFlowObj.params b/TestON/tests/SCPF/SCPFscalingMaxIntentsWithFlowObj/SCPFscalingMaxIntentsWithFlowObj.params
index ed0badf..8083e7a 100644
--- a/TestON/tests/SCPF/SCPFscalingMaxIntentsWithFlowObj/SCPFscalingMaxIntentsWithFlowObj.params
+++ b/TestON/tests/SCPF/SCPFscalingMaxIntentsWithFlowObj/SCPFscalingMaxIntentsWithFlowObj.params
@@ -13,14 +13,14 @@
<SCALE>1,3,5,7</SCALE>
<DEPENDENCY>
- <path>/tests/SCPFscalingMaxIntents/dependencies/</path>
+ <path>/tests/SCPF/SCPFscalingMaxIntentsWithFlowObj/dependencies/</path>
<wrapper1>startUp</wrapper1>
<topology>rerouteTopo.py</topology>
</DEPENDENCY>
<ENV>
<cellName>productionCell</cellName>
- <cellApps>drivers</cellApps>
+ <cellApps>drivers,openflow</cellApps>
</ENV>
<GIT>
@@ -61,10 +61,10 @@
<NULL>
# CASE20
<PUSH>
- <batch_size>100</batch_size>
- <min_intents>100</min_intents>
- <max_intents>100000</max_intents>
- <check_interval>100</check_interval>
+ <batch_size>1000</batch_size>
+ <min_intents>10000</min_intents>
+ <max_intents>70000</max_intents>
+ <check_interval>10000</check_interval>
</PUSH>
# if reroute is true
diff --git a/TestON/tests/SCPF/SCPFscalingMaxIntentsWithFlowObj/SCPFscalingMaxIntentsWithFlowObj.py b/TestON/tests/SCPF/SCPFscalingMaxIntentsWithFlowObj/SCPFscalingMaxIntentsWithFlowObj.py
index e405fd3..3c35012 100644
--- a/TestON/tests/SCPF/SCPFscalingMaxIntentsWithFlowObj/SCPFscalingMaxIntentsWithFlowObj.py
+++ b/TestON/tests/SCPF/SCPFscalingMaxIntentsWithFlowObj/SCPFscalingMaxIntentsWithFlowObj.py
@@ -3,7 +3,7 @@
import time
import os
'''
-SCPFscalingMaxIntentsWithFlowObj
+SCPFscalingMaxIntents
Push test Intents to onos
CASE10: set up Null Provider
CASE11: set up Open Flows
@@ -54,10 +54,6 @@
main.reroute = False
main.CLIs = []
- main.ONOSip = []
- main.maxNumBatch = 0
- main.ONOSip = main.ONOSbench.getOnosIps()
- main.log.info(main.ONOSip)
main.setupSkipped = False
wrapperFile1 = main.params[ 'DEPENDENCY' ][ 'wrapper1' ]
@@ -82,6 +78,13 @@
# main.scale[ 0 ] determines the current number of ONOS controller
main.CLIs = []
main.numCtrls = int( main.scale[ 0 ] )
+ main.ONOSip = []
+ main.maxNumBatch = 0
+ main.AllONOSip = main.ONOSbench.getOnosIps()
+ for i in range(main.numCtrls):
+ main.ONOSip.append(main.AllONOSip[i])
+ main.log.info(main.ONOSip)
+
main.log.info( "Creating list of ONOS cli handles" )
for i in range(main.numCtrls):
main.CLIs.append( getattr( main, 'ONOScli%s' % (i+1) ) )
@@ -301,25 +304,16 @@
Setting up mininet
'''
import json
- import time
+ import time
+ devices = []
+ devices = main.CLIs[0].getAllDevicesId()
+ for d in devices:
+ main.CLIs[0].deviceRemove( d )
- time.sleep(main.startUpSleep)
-
- main.step("Activating openflow")
- appStatus = utilities.retry( main.ONOSrest1.activateApp,
- main.FALSE,
- ['org.onosproject.openflow'],
- sleep=3,
- attempts=3 )
- utilities.assert_equals( expect=main.TRUE,
- actual=appStatus,
- onpass="Successfully activated openflow",
- onfail="Failed activate openflow" )
-
- time.sleep(main.startUpSleep)
main.log.info("Set Intent Compiler use Flow Object")
- main.CLIs[0].setCfg( "org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator", "useFlowObjectives", "true" )
-
+ main.CLIs[0].setCfg("org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator",
+ "useFlowObjectives", "true")
+ time.sleep(main.startUpSleep)
main.step('Starting mininet topology')
mnStatus = main.Mininet1.startNet(topoFile='~/mininet/custom/rerouteTopo.py')
utilities.assert_equals( expect=main.TRUE,
@@ -336,13 +330,26 @@
onfail="Failed assign switches to masters" )
time.sleep(main.startUpSleep)
+ # Balancing Masters
+ main.step( "Balancing Masters" )
+ stepResult = main.FALSE
+ stepResult = utilities.retry( main.CLIs[0].balanceMasters,
+ main.FALSE,
+ [],
+ sleep=3,
+ attempts=3 )
+
+ utilities.assert_equals( expect=main.TRUE,
+ actual=stepResult,
+ onpass="Balance masters was successfull",
+ onfail="Failed to balance masters" )
main.log.info("Getting default flows")
jsonSum = json.loads(main.CLIs[0].summary())
main.defaultFlows = jsonSum["flows"]
main.step("Check status of Mininet setup")
- caseResult = appStatus and mnStatus and swStatus
+ caseResult = mnStatus and swStatus
utilities.assert_equals( expect=main.TRUE,
actual=caseResult,
onpass="Successfully setup Mininet",
@@ -396,6 +403,7 @@
# keeps track of how many flows have been installed, set to 0 at start
currFlows = 0
# limit for the number of intents that can be installed
+ main.batchSize = int( int(main.batchSize)/int(main.numCtrls))
limit = main.maxIntents / main.batchSize
# total intents installed
totalIntents = 0
@@ -407,7 +415,12 @@
stepResult = main.TRUE
# temp variable to contain the number of flows
flowsNum = 0
+ if main.numCtrls > 1:
+ # if more than one onos nodes, we should check more frequently
+ main.checkInterval = main.checkInterval/4
+ # make sure the checkInterval divisible batchSize
+ main.checkInterval = int( int( main.checkInterval / main.batchSize ) * main.batchSize )
for i in range(limit):
# Threads pool
@@ -427,7 +440,8 @@
kwargs={ "offset": offtmp,
"options": "-i",
"timeout": main.timeout,
- "background":False } )
+ "background":False,
+ "noExit":True} )
pool.append(t)
t.start()
main.threadID = main.threadID + 1
@@ -444,108 +458,45 @@
main.log.info("Verify Intents states")
# k is a control variable for verify retry attempts
k = 1
- intentVerify = main.FALSE
while k <= main.verifyAttempts:
# while loop for check intents by using REST api
time.sleep(5)
temp = 0
- intentsState = json.loads( main.ONOSrest1.intents() )
- for f in intentsState:
- # get INSTALLED intents number
- if f.get("state") == "INSTALLED":
- temp = temp + 1
-
- main.log.info("Total Intents: {} INSTALLED: {}".format(totalIntents, temp))
- if totalIntents == temp:
- intentVerify = main.TRUE
+ intentsState = main.CLIs[0].checkIntentSummary(timeout=600)
+ if intentsState:
+ totalIntents = main.CLIs[0].getTotalIntentsNum(timeout=600)
+ if temp < totalIntents:
+ temp = totalIntents
+ else:
+ totalIntents = temp
break
- intentVerify = main.FALSE
+ main.log.info("Total Intents: {}".format( totalIntents) )
k = k+1
- if not intentVerify:
+
+ if not intentsState:
# If some intents are not installed, grep the previous flows list, and finished this test case
main.log.warn( "Some intens did not install" )
- # We don't want to check flows if intents not installed, because onos will drop flows
- if currFlows == 0:
- # If currFlows equal 0, which means we failed to install intents at first, or we didn't get
- # the correct number, so we need get flows here.
- flowsState = json.loads( main.ONOSrest1.flows() )
+ main.log.info("Total Intents: {}".format( totalIntents) )
+ temp = 0
+ totalFlows = 0
+ if temp < totalFlows:
+ temp = totalFlows
+ else:
+ totalFlows = main.CLIs[0].getTotalFlowsNum(timeout=600, noExit=True)
break
-
- main.log.info("Verify Flows states")
- k = 1
- flowsVerify = main.TRUE
- while k <= main.verifyAttempts:
- # while loop for check flows by using REST api
- time.sleep(3)
- temp = 0
- flowsStateCount = []
- flowsState = json.loads( main.ONOSrest1.flows() )
- main.log.info("Total flows now: {}".format(len(flowsState)))
- if ( flowsNum < len(flowsState) ):
- flowsNum = len(flowsState)
- print(flowsNum)
- for f in flowsState:
- # get PENDING_ADD flows
- if f.get("state") == "PENDING_ADD":
- temp = temp + 1
-
- flowsStateCount.append(temp)
- temp = 0
-
- for f in flowsState:
- # get PENDING_REMOVE flows
- if f.get("state") == "PENDING_REMOVE":
- temp = temp + 1
-
- flowsStateCount.append(temp)
- temp = 0
-
- for f in flowsState:
- # get REMOVED flows
- if f.get("state") == "REMOVED":
- temp = temp + 1
-
- flowsStateCount.append(temp)
- temp = 0
-
- for f in flowsState:
- # get FAILED flwos
- if f.get("state") == "FAILED":
- temp = temp + 1
-
- flowsStateCount.append(temp)
- temp = 0
- k = k + 1
- for c in flowsStateCount:
- if int(c) > 0:
- flowsVerify = main.FALSE
-
- main.log.info( "Check flows States:" )
- main.log.info( "PENDING_ADD: {}".format( flowsStateCount[0]) )
- main.log.info( "PENDING_REMOVE: {}".format( flowsStateCount[1]) )
- main.log.info( "REMOVED: {}".format( flowsStateCount[2]) )
- main.log.info( "FAILED: {}".format( flowsStateCount[3]) )
-
- if flowsVerify == main.TRUE:
- break
-
+ # We don't want to check flows if intents not installed, because onos will drop flows
del main.scale[0]
utilities.assert_equals( expect = main.TRUE,
- actual = intentVerify,
+ actual = intentsState,
onpass = "Successfully pushed and verified intents",
onfail = "Failed to push and verify intents" )
- # we need the total intents before crash
- totalIntents = len(intentsState)
- totalFlows = flowsNum
-
main.log.info( "Total Intents Installed before crash: {}".format( totalIntents ) )
main.log.info( "Total Flows ADDED before crash: {}".format( totalFlows ) )
main.step('clean up Mininet')
main.Mininet1.stopNet()
-
main.log.info("Writing results to DS file")
with open(main.dbFileName, "a") as dbFile:
# Scale number