Merge "Add exception handling for parseFlowTable in MininetCliDriver"
diff --git a/TestON/drivers/common/api/controller/onosrestdriver.py b/TestON/drivers/common/api/controller/onosrestdriver.py
index 1b7c8fd..08f242d 100644
--- a/TestON/drivers/common/api/controller/onosrestdriver.py
+++ b/TestON/drivers/common/api/controller/onosrestdriver.py
@@ -431,7 +431,7 @@
main.exit()
def addHostIntent( self, hostIdOne, hostIdTwo, appId='org.onosproject.cli',
- ip="DEFAULT", port="DEFAULT" ):
+ ip="DEFAULT", port="DEFAULT", vlanId="" ):
"""
Description:
Adds a host-to-host intent ( bidirectional ) by
@@ -454,6 +454,9 @@
"constraints": [{"type": "LinkTypeConstraint",
"types": ["OPTICAL"],
"inclusive": 'false' }]}
+ if vlanId:
+ intentJson[ 'selector' ][ 'criteria' ].append( { "type":"VLAN_VID",
+ "vlanId":vlanId } )
output = None
if ip == "DEFAULT":
main.log.warn( "No ip given, reverting to ip from topo file" )
@@ -501,7 +504,8 @@
tcpSrc="",
tcpDst="",
ip="DEFAULT",
- port="DEFAULT" ):
+ port="DEFAULT",
+ vlanId="" ):
"""
Description:
Adds a point-to-point intent ( uni-directional ) by
@@ -599,6 +603,10 @@
intentJson[ 'selector' ][ 'criteria' ].append(
{ "type":"IP_PROTO",
"protocol": ipProto } )
+ if vlanId:
+ intentJson[ 'selector' ][ 'criteria' ].append(
+ { "type":"VLAN_VID",
+ "vlanId": vlanId } )
# TODO: Bandwidth and Lambda will be implemented if needed
diff --git a/TestON/drivers/common/cli/onosdriver.py b/TestON/drivers/common/cli/onosdriver.py
old mode 100644
new mode 100755
index 1aed954..f3de98f
--- a/TestON/drivers/common/cli/onosdriver.py
+++ b/TestON/drivers/common/cli/onosdriver.py
@@ -25,7 +25,6 @@
from requests.models import Response
from drivers.common.clidriver import CLI
-
class OnosDriver( CLI ):
def __init__( self ):
@@ -1547,6 +1546,30 @@
cmd="flows > "+ str( destDir ) + str( filename ) + localtime
return self.onosCli(ONOSIp,cmd)
+ def dumpGroups(self,ONOSIp, destDir, filename="groups" ):
+ """
+ Dump Group Tables to a desired directory.
+ For debugging purposes, you may want to use
+ this function to capture groups at a given point in time.
+ Localtime will be attached to the filename
+
+ Required:
+ * ONOSIp: the IP of the target ONOS instance
+ * destDir: specify directory to copy to.
+ ex ) /tmp/
+ Optional:
+ * fileName: Name of the file
+ """
+
+ localtime = time.strftime( '%x %X' )
+ localtime = localtime.replace( "/", "" )
+ localtime = localtime.replace( " ", "_" )
+ localtime = localtime.replace( ":", "" )
+ if destDir[ -1: ] != "/":
+ destDir += "/"
+ cmd="groups > "+ str( destDir ) + str( filename ) + localtime
+ return self.onosCli(ONOSIp,cmd)
+
def cpLogsToDir( self, logToCopy,
destDir, copyFileName="" ):
"""
@@ -2207,7 +2230,7 @@
return passed
- def getIpAddr( self ):
+ def getIpAddr( self, iface=None ):
"""
Update self.ip_address with numerical ip address. If multiple IP's are
located on the device, will attempt to use self.nicAddr to choose the
@@ -2217,7 +2240,7 @@
ONLY WORKS WITH IPV4 ADDRESSES
"""
try:
- localhost = "127.0.0.1"
+ LOCALHOST = "127.0.0.1"
ipPat = "\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}"
pattern = re.compile( ipPat )
match = re.search( pattern, self.ip_address )
@@ -2234,23 +2257,41 @@
nicMatch = re.search( nicPat, curIp )
if nicMatch:
return self.ip_address
- # ELSE: attempt to get correct address.
- raw = subprocess.check_output( "ifconfig")
+ # ELSE: IF iface, return ip of interface
+ cmd = "ifconfig"
+ ifPat = re.compile( "inet addr:({})".format( ipPat ) )
+ if iface:
+ cmd += " " + str( iface )
+ raw = subprocess.check_output( cmd.split() )
ifPat = re.compile( "inet addr:({})".format( ipPat ) )
ips = re.findall( ifPat, raw )
+ if iface:
+ if ips:
+ ip = ips[0]
+ self.ip_address = ip
+ return ip
+ else:
+ main.log.error( "Error finding ip, ifconfig output:".format( raw ) )
+ # ELSE: attempt to get address matching nicPat.
if nicPat:
for ip in ips:
curMatch = re.search( nicPat, ip )
if curMatch:
self.ip_address = ip
return ip
- else:
- tmpList = [ ip for ip in ips if ip is not localhost ]
+ else: # If only one non-localhost ip, return that
+ tmpList = [ ip for ip in ips if ip is not LOCALHOST ]
if len(tmpList) == 1:
curIp = tmpList[0]
self.ip_address = curIp
return curIp
- return localhost
+ # Either no non-localhost IPs, or more than 1
+ main.log.warn( "getIpAddr failed to find a public IP address" )
+ return LOCALHOST
+ except subprocess.CalledProcessError:
+ main.log.exception( "Error executing ifconfig" )
+ except IndexError:
+ main.log.exception( "Error getting IP Address" )
except Exception:
main.log.exception( "Uncaught exception" )
diff --git a/TestON/logs/.gitignore b/TestON/logs/.gitignore
new file mode 100644
index 0000000..5e7d273
--- /dev/null
+++ b/TestON/logs/.gitignore
@@ -0,0 +1,4 @@
+# Ignore everything in this directory
+*
+# Except this file
+!.gitignore
diff --git a/TestON/logs/placeHolder.txt b/TestON/logs/placeHolder.txt
deleted file mode 100644
index 3b18e51..0000000
--- a/TestON/logs/placeHolder.txt
+++ /dev/null
@@ -1 +0,0 @@
-hello world
diff --git a/TestON/tests/FUNC/FUNCintentRest/FUNCintentRest.py b/TestON/tests/FUNC/FUNCintentRest/FUNCintentRest.py
index f48ae3b..60fc47f 100644
--- a/TestON/tests/FUNC/FUNCintentRest/FUNCintentRest.py
+++ b/TestON/tests/FUNC/FUNCintentRest/FUNCintentRest.py
@@ -941,8 +941,8 @@
main.step( "VLAN1: Add vlan host intents between h4 and h12" )
main.assertReturnString = "Assertion Result vlan IPV4\n"
- host1 = { "name":"h4","id":"00:00:00:00:00:04/100" }
- host2 = { "name":"h12","id":"00:00:00:00:00:0C/100" }
+ host1 = { "name":"h4","id":"00:00:00:00:00:04/100", "vlanId":"100" }
+ host2 = { "name":"h12","id":"00:00:00:00:00:0C/100", "vlanId":"100" }
testResult = main.FALSE
installResult = main.intentFunction.installHostIntent( main,
name='VLAN1',
@@ -966,32 +966,33 @@
onpass=main.assertReturnString,
onfail=main.assertReturnString )
- main.step( "VLAN2: Add inter vlan host intents between h13 and h20" )
- main.assertReturnString = "Assertion Result different VLAN negative test\n"
- host1 = { "name":"h13" }
- host2 = { "name":"h20" }
- testResult = main.FALSE
- installResult = main.intentFunction.installHostIntent( main,
- name='VLAN2',
- onosNode='0',
- host1=host1,
- host2=host2 )
+ # This step isn't currently possible to perform in the REST API
+ # main.step( "VLAN2: Add inter vlan host intents between h13 and h20" )
+ # main.assertReturnString = "Assertion Result different VLAN negative test\n"
+ # host1 = { "name":"h13" }
+ # host2 = { "name":"h20" }
+ # testResult = main.FALSE
+ # installResult = main.intentFunction.installHostIntent( main,
+ # name='VLAN2',
+ # onosNode='0',
+ # host1=host1,
+ # host2=host2 )
- if installResult:
- testResult = main.intentFunction.testHostIntent( main,
- name='VLAN2',
- intentId = installResult,
- onosNode='0',
- host1=host1,
- host2=host2,
- sw1='s5',
- sw2='s2',
- expectedLink = 18 )
+ # if installResult:
+ # testResult = main.intentFunction.testHostIntent( main,
+ # name='VLAN2',
+ # intentId = installResult,
+ # onosNode='0',
+ # host1=host1,
+ # host2=host2,
+ # sw1='s5',
+ # sw2='s2',
+ # expectedLink = 18 )
- utilities.assert_equals( expect=main.TRUE,
- actual=testResult,
- onpass=main.assertReturnString,
- onfail=main.assertReturnString )
+ # utilities.assert_equals( expect=main.TRUE,
+ # actual=testResult,
+ # onpass=main.assertReturnString,
+ # onfail=main.assertReturnString )
# Change the following to use the REST API when leader checking is
# supported by it
@@ -1194,7 +1195,8 @@
recipients=recipients,
sw1="s5",
sw2="s2",
- expectedLink=18)
+ expectedLink=18,
+ useTCP=True )
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
@@ -1266,23 +1268,22 @@
main.step( "VLAN: Add point intents between h5 and h21" )
main.assertReturnString = "Assertion Result for VLAN IPV4 with mac address point intents\n"
senders = [
- { "name":"h5","device":"of:0000000000000005/5","mac":"00:00:00:00:00:05" }
+ { "name":"h5","device":"of:0000000000000005/5","mac":"00:00:00:00:00:05", "vlanId":"200" }
]
recipients = [
- { "name":"h21","device":"of:0000000000000007/5","mac":"00:00:00:00:00:15" }
+ { "name":"h21","device":"of:0000000000000007/5","mac":"00:00:00:00:00:15", "vlanId":"200" }
]
installResult = main.intentFunction.installPointIntent(
main,
- name="DUALSTACK1",
+ name="VLAN",
senders=senders,
- recipients=recipients,
- ethType="IPV4" )
+ recipients=recipients )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
- name="DUALSTACK1",
+ name="VLAN",
senders=senders,
recipients=recipients,
sw1="s5",
@@ -1294,6 +1295,8 @@
onpass=main.assertReturnString,
onfail=main.assertReturnString )
+ # TODO: implement VLAN selector REST API intent test once supported
+
main.step( "1HOP: Add point intents between h1 and h3" )
main.assertReturnString = "Assertion Result for 1HOP IPV4 with no mac address point intents\n"
senders = [
diff --git a/TestON/tests/FUNC/FUNCintentRest/dependencies/FuncIntentFunction.py b/TestON/tests/FUNC/FUNCintentRest/dependencies/FuncIntentFunction.py
index 4ad42b2..cefb077 100644
--- a/TestON/tests/FUNC/FUNCintentRest/dependencies/FuncIntentFunction.py
+++ b/TestON/tests/FUNC/FUNCintentRest/dependencies/FuncIntentFunction.py
@@ -75,12 +75,14 @@
if not host2.get( "id" ):
main.log.warn( "ID not given for host2 {0}. Loading from main.hostData".format( host2.get( "name" ) ) )
host2[ "id" ] = main.hostsData.get( host2.get( "name" ) ).get( "id" )
+ vlanId = host1.get( "vlanId" )
# Adding host intents
main.log.info( itemName + ": Adding host intents" )
intent1 = main.CLIs[ onosNode ].addHostIntent( hostIdOne=host1.get( "id" ),
- hostIdTwo=host2.get( "id" ) )
+ hostIdTwo=host2.get( "id" ),
+ vlanId=vlanId )
# Get all intents ID in the system, time delay right after intents are added
time.sleep( main.addIntentSleep )
@@ -175,6 +177,7 @@
senderNames = [ host1.get( "name" ), host2.get( "name" ) ]
recipientNames = [ host1.get( "name" ), host2.get( "name" ) ]
+ vlanId = host1.get( "vlanId" )
except ( KeyError, TypeError ):
main.log.error( "There was a problem loading the hosts data." )
return main.FALSE
@@ -197,7 +200,7 @@
testResult = main.FALSE
# Check Connectivity
- if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames ) ):
+ if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames, vlanId ) ):
main.assertReturnString += 'Initial Ping Passed\n'
else:
main.assertReturnString += 'Initial Ping Failed\n'
@@ -234,7 +237,7 @@
testResult = main.FALSE
# Check Connection
- if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames ) ):
+ if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames, vlanId ) ):
main.assertReturnString += 'Link Down Pingall Passed\n'
else:
main.assertReturnString += 'Link Down Pingall Failed\n'
@@ -272,7 +275,7 @@
testResult = main.FALSE
# Check Connection
- if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames ) ):
+ if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames, vlanId ) ):
main.assertReturnString += 'Link Up Pingall Passed\n'
else:
main.assertReturnString += 'Link Up Pingall Failed\n'
@@ -357,6 +360,7 @@
if not recipient.get( "device" ):
main.log.warn( "Device not given for recipient {0}. Loading from main.hostData".format( recipient.get( "name" ) ) )
recipient[ "device" ] = main.hostsData.get( recipient.get( "name" ) ).get( "location" )
+ vlanId = senders[ 0 ].get( "vlanId" )
ingressDevice = senders[ 0 ].get( "device" )
@@ -387,7 +391,8 @@
ipSrc=ipSrc,
ipDst=ipDst,
tcpSrc=tcpSrc,
- tcpDst=tcpDst )
+ tcpDst=tcpDst,
+ vlanId=vlanId )
time.sleep( main.addIntentSleep )
intentsId = main.CLIs[ 0 ].getIntentsId()
@@ -422,7 +427,8 @@
tcp="",
sw1="s5",
sw2="s2",
- expectedLink=0):
+ expectedLink=0,
+ useTCP=False ):
"""
Test a Point Intent
@@ -500,6 +506,7 @@
if not recipient.get( "device" ):
main.log.warn( "Device not given for recipient {0}. Loading from main.hostData".format( recipient.get( "name" ) ) )
recipient[ "device" ] = main.hostsData.get( recipient.get( "name" ) ).get( "location" )
+ vlanId=senders[ 0 ].get( "vlanId" )
except (KeyError, TypeError):
main.log.error( "There was a problem loading the hosts data." )
return main.FALSE
@@ -522,7 +529,7 @@
testResult = main.FALSE
# Check Connectivity
- if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames ) ):
+ if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames, vlanId, useTCP ) ):
main.assertReturnString += 'Initial Ping Passed\n'
else:
main.assertReturnString += 'Initial Ping Failed\n'
@@ -576,7 +583,7 @@
testResult = main.FALSE
# Check Connection
- if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames ) ):
+ if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames, vlanId, useTCP ) ):
main.assertReturnString += 'Link Down Pingall Passed\n'
else:
main.assertReturnString += 'Link Down Pingall Failed\n'
@@ -614,7 +621,7 @@
testResult = main.FALSE
# Check Connection
- if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames ) ):
+ if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames, vlanId, useTCP ) ):
main.assertReturnString += 'Link Up Scapy Packet Received Passed\n'
else:
main.assertReturnString += 'Link Up Scapy Packet Recieved Failed\n'
@@ -1616,7 +1623,7 @@
main.log.error( "TypeError while populating hostsData" )
return main.FALSE
-def scapyCheckConnection( main, senders, recipients, packet=None, packetFilter=None, expectFailure=False ):
+def scapyCheckConnection( main, senders, recipients, vlanId=None, useTCP=False, packet=None, packetFilter=None, expectFailure=False ):
"""
Checks the connectivity between all given sender hosts and all given recipient hosts
Packet may be specified. Defaults to Ether/IP packet
@@ -1630,7 +1637,8 @@
if not packetFilter:
packetFilter = 'ether host {}'
-
+ if useTCP:
+ packetFilter += ' ip proto \\tcp tcp port {}'.format(main.params[ 'SDNIP' ][ 'dstPort' ])
if expectFailure:
timeout = 1
else:
@@ -1655,17 +1663,31 @@
connectionsFunctional = main.FALSE
continue
- recipientComp.startFilter( pktFilter = packetFilter.format( senderComp.hostMac ) )
+ if vlanId:
+ recipientComp.startFilter( pktFilter = ( "vlan {}".format( vlanId ) + " && " + packetFilter.format( senderComp.hostMac ) ) )
+ else:
+ recipientComp.startFilter( pktFilter = packetFilter.format( senderComp.hostMac ) )
if not packet:
- pkt = 'Ether( src="{0}", dst="{2}" )/IP( src="{1}", dst="{3}" )'.format(
- senderComp.hostMac,
- senderComp.hostIp,
- recipientComp.hostMac,
- recipientComp.hostIp )
+ if vlanId:
+ pkt = 'Ether( src="{0}", dst="{2}" )/Dot1Q(vlan={4})/IP( src="{1}", dst="{3}" )'.format(
+ senderComp.hostMac,
+ senderComp.hostIp,
+ recipientComp.hostMac,
+ recipientComp.hostIp,
+ vlanId )
+ else:
+ pkt = 'Ether( src="{0}", dst="{2}" )/IP( src="{1}", dst="{3}" )'.format(
+ senderComp.hostMac,
+ senderComp.hostIp,
+ recipientComp.hostMac,
+ recipientComp.hostIp )
else:
pkt = packet
- senderComp.sendPacket( packet = pkt )
+ if vlanId:
+ senderComp.sendPacket( iface=( "{0}-eth0.{1}".format( sender, vlanId ) ), packet = pkt )
+ else:
+ senderComp.sendPacket( packet = pkt )
if recipientComp.checkFilter( timeout ):
if expectFailure:
diff --git a/TestON/tests/HA/HAscaling/HAscaling.params b/TestON/tests/HA/HAscaling/HAscaling.params
index 5b5358d..388f432 100644
--- a/TestON/tests/HA/HAscaling/HAscaling.params
+++ b/TestON/tests/HA/HAscaling/HAscaling.params
@@ -17,10 +17,13 @@
#CASE15: Check that Leadership Election is still functional
#CASE16: Install Distributed Primitives app
#CASE17: Check for basic functionality with distributed primitives
- <testcases>1,[2,8,21,3,8,4,5,14,16,17]*1,[6,8,3,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4]*13,13</testcases>
+ <testcases>1,2,8,21,3,8,4,5,14,16,17,[6,8,7,4,15,17]*9,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
- <scaling>1,3b,3,5b,5,7b,7,7b,5,5b,3,3b,1</scaling>
- <serverPort>8000</serverPort>
+ <scaling>1,3b,5b,7b,7,7b,5b,3b,1</scaling>
+ <server>
+ <port>8000</port>
+ <interface>eth0</interface>
+ </server>
<apps></apps>
<ONOS_Configuration>
<org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
diff --git a/TestON/tests/HA/HAscaling/HAscaling.py b/TestON/tests/HA/HAscaling/HAscaling.py
index 3eb95c6..2064f1e 100644
--- a/TestON/tests/HA/HAscaling/HAscaling.py
+++ b/TestON/tests/HA/HAscaling/HAscaling.py
@@ -130,7 +130,7 @@
killResults = killResults and killed
main.step( "Setup server for cluster metadata file" )
- port = main.params['serverPort']
+ port = main.params['server']['port']
rootDir = os.path.dirname( main.testFile ) + "/dependencies"
main.log.debug( "Root dir: {}".format( rootDir ) )
status = main.Server.start( main.ONOSbench,
@@ -227,7 +227,8 @@
onfail="Copy backup config file failed" )
# we need to modify the onos-service file to use remote metadata file
# url for cluster metadata file
- ip = main.ONOSbench.getIpAddr()
+ iface = main.params['server'].get( 'interface' )
+ ip = main.ONOSbench.getIpAddr( iface=iface )
metaFile = "cluster.json"
javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
main.log.warn( javaArgs )
@@ -1835,7 +1836,7 @@
global data
data = []
- main.case( "Restart entire ONOS cluster" )
+ main.case( "Scale the number of nodes in the ONOS cluster" )
main.step( "Checking ONOS Logs for errors" )
for i in main.activeNodes:
diff --git a/TestON/tests/HA/HAswapNodes/HAswapNodes.params b/TestON/tests/HA/HAswapNodes/HAswapNodes.params
new file mode 100644
index 0000000..d3729a4
--- /dev/null
+++ b/TestON/tests/HA/HAswapNodes/HAswapNodes.params
@@ -0,0 +1,92 @@
+<PARAMS>
+ #CASE1: Compile ONOS and push it to the test machines
+ #CASE2: Assign devices to controllers
+ #CASE21: Assign mastership to controllers
+ #CASE3: Assign intents
+ #CASE4: Ping across added host intents
+ #CASE5: Reading state of ONOS
+ #CASE6: Swap nodes
+ #CASE7: Check state after control plane failure
+ #CASE8: Compare topo
+ #CASE9: Link s3-s28 down
+ #CASE10: Link s3-s28 up
+ #CASE11: Switch down
+ #CASE12: Switch up
+ #CASE13: Clean up
+ #CASE14: start election app on all onos nodes
+ #CASE15: Check that Leadership Election is still functional
+ #CASE16: Install Distributed Primitives app
+ #CASE17: Check for basic functionality with distributed primitives
+ <testcases>1,[2,8,21,3,8,4,5,14,16,17]*1,6,8,3,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+
+ <server>
+ <port>8000</port>
+ <interface>eth0</interface>
+ </server>
+ <apps></apps>
+ <ONOS_Configuration>
+ <org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
+ <useFlowObjectives>true</useFlowObjectives>
+ </org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
+ </ONOS_Configuration>
+ <ENV>
+ <cellName>HA</cellName>
+ <appString>drivers,openflow,proxyarp,mobility</appString>
+ </ENV>
+ <Git> False </Git>
+ <branch> master </branch>
+ <num_controllers> 7 </num_controllers>
+ <tcpdump> False </tcpdump>
+
+ <CTRL>
+ <port1>6653</port1>
+ <port2>6653</port2>
+ <port3>6653</port3>
+ <port4>6653</port4>
+ <port5>6653</port5>
+ <port6>6653</port6>
+ <port7>6653</port7>
+ </CTRL>
+ <BACKUP>
+ <ENABLED> False </ENABLED>
+ <TESTONUSER>sdn</TESTONUSER>
+ <TESTONIP>10.128.30.9</TESTONIP>
+ </BACKUP>
+ <PING>
+ <source1>h8</source1>
+ <source2>h9</source2>
+ <source3>h10</source3>
+ <source4>h11</source4>
+ <source5>h12</source5>
+ <source6>h13</source6>
+ <source7>h14</source7>
+ <source8>h15</source8>
+ <source9>h16</source9>
+ <source10>h17</source10>
+ <target1>10.0.0.18</target1>
+ <target2>10.0.0.19</target2>
+ <target3>10.0.0.20</target3>
+ <target4>10.0.0.21</target4>
+ <target5>10.0.0.22</target5>
+ <target6>10.0.0.23</target6>
+ <target7>10.0.0.24</target7>
+ <target8>10.0.0.25</target8>
+ <target9>10.0.0.26</target9>
+ <target10>10.0.0.27</target10>
+ </PING>
+ <timers>
+ <LinkDiscovery>12</LinkDiscovery>
+ <SwitchDiscovery>12</SwitchDiscovery>
+ <gossip>5</gossip>
+ </timers>
+ <kill>
+ <switch> s5 </switch>
+ <dpid> 0000000000005000 </dpid>
+ <links> h5 s2 s1 s6 </links>
+ </kill>
+ <MNtcpdump>
+ <intf>eth0</intf>
+ <port> </port>
+ <folder>~/packet_captures/</folder>
+ </MNtcpdump>
+</PARAMS>
diff --git a/TestON/tests/HA/HAswapNodes/HAswapNodes.py b/TestON/tests/HA/HAswapNodes/HAswapNodes.py
new file mode 100644
index 0000000..8ea1490
--- /dev/null
+++ b/TestON/tests/HA/HAswapNodes/HAswapNodes.py
@@ -0,0 +1,4289 @@
+"""
+Description: This test is to determine if ONOS can handle
+ dynamic swapping of cluster nodes.
+
+List of test cases:
+CASE1: Compile ONOS and push it to the test machines
+CASE2: Assign devices to controllers
+CASE21: Assign mastership to controllers
+CASE3: Assign intents
+CASE4: Ping across added host intents
+CASE5: Reading state of ONOS
+CASE6: Swap nodes
+CASE7: Check state after control plane failure
+CASE8: Compare topo
+CASE9: Link s3-s28 down
+CASE10: Link s3-s28 up
+CASE11: Switch down
+CASE12: Switch up
+CASE13: Clean up
+CASE14: start election app on all onos nodes
+CASE15: Check that Leadership Election is still functional
+CASE16: Install Distributed Primitives app
+CASE17: Check for basic functionality with distributed primitives
+"""
+
+
+class HAswapNodes:
+
+ def __init__( self ):
+ self.default = ''
+
+ def CASE1( self, main ):
+ """
+ CASE1 is to compile ONOS and push it to the test machines
+
+ Startup sequence:
+ cell <name>
+ onos-verify-cell
+ NOTE: temporary - onos-remove-raft-logs
+ onos-uninstall
+ start mininet
+ git pull
+ mvn clean install
+ onos-package
+ onos-install -f
+ onos-wait-for-start
+ start cli sessions
+ start tcpdump
+ """
+ import time
+ import os
+ import re
+ main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
+ "initialization" )
+ main.case( "Setting up test environment" )
+ main.caseExplanation = "Setup the test environment including " +\
+ "installing ONOS, starting Mininet and ONOS" +\
+ "cli sessions."
+
+ # load some variables from the params file
+ PULLCODE = False
+ if main.params[ 'Git' ] == 'True':
+ PULLCODE = True
+ gitBranch = main.params[ 'branch' ]
+ cellName = main.params[ 'ENV' ][ 'cellName' ]
+
+ main.numCtrls = int( main.params[ 'num_controllers' ] )
+ if main.ONOSbench.maxNodes:
+ if main.ONOSbench.maxNodes < main.numCtrls:
+ main.numCtrls = int( main.ONOSbench.maxNodes )
+ # set global variables
+ # These are for csv plotting in jenkins
+ global labels
+ global data
+ labels = []
+ data = []
+
+ try:
+ from tests.HA.dependencies.HA import HA
+ main.HA = HA()
+ from tests.HA.HAswapNodes.dependencies.Server import Server
+ main.Server = Server()
+ except Exception as e:
+ main.log.exception( e )
+ main.cleanup()
+ main.exit()
+
+ main.CLIs = []
+ main.nodes = []
+ ipList = []
+ for i in range( 1, main.numCtrls + 1 ):
+ try:
+ main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
+ main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
+ ipList.append( main.nodes[ -1 ].ip_address )
+ except AttributeError:
+ break
+
+ main.step( "Create cell file" )
+ cellAppString = main.params[ 'ENV' ][ 'appString' ]
+ main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
+ main.Mininet1.ip_address,
+ cellAppString, ipList )
+
+ main.step( "Applying cell variable to environment" )
+ cellResult = main.ONOSbench.setCell( cellName )
+ utilities.assert_equals( expect=main.TRUE, actual=cellResult,
+ onpass="Set cell successfull",
+ onfail="Failled to set cell" )
+
+ main.step( "Verify connectivity to cell" )
+ verifyResult = main.ONOSbench.verifyCell()
+ utilities.assert_equals( expect=main.TRUE, actual=verifyResult,
+ onpass="Verify cell passed",
+ onfail="Failled to verify cell" )
+
+ # FIXME:this is short term fix
+ main.log.info( "Removing raft logs" )
+ main.ONOSbench.onosRemoveRaftLogs()
+
+ main.log.info( "Uninstalling ONOS" )
+ for node in main.nodes:
+ main.ONOSbench.onosUninstall( node.ip_address )
+
+ # Make sure ONOS is DEAD
+ main.log.info( "Killing any ONOS processes" )
+ killResults = main.TRUE
+ for node in main.nodes:
+ killed = main.ONOSbench.onosKill( node.ip_address )
+ killResults = killResults and killed
+
+ main.step( "Setup server for cluster metadata file" )
+ port = main.params['server']['port']
+ rootDir = os.path.dirname( main.testFile ) + "/dependencies"
+ main.log.debug( "Root dir: {}".format( rootDir ) )
+ status = main.Server.start( main.ONOSbench,
+ rootDir,
+ port=port,
+ logDir=main.logdir + "/server.log" )
+ utilities.assert_equals( expect=main.TRUE, actual=status,
+ onpass="Server started",
+ onfail="Failled to start SimpleHTTPServer" )
+
+ main.step( "Generate initial metadata file" )
+ if main.numCtrls >= 5:
+ main.numCtrls -= 2
+ else:
+ main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
+ genResult = main.Server.generateFile( main.numCtrls )
+ utilities.assert_equals( expect=main.TRUE, actual=genResult,
+ onpass="New cluster metadata file generated",
+ onfail="Failled to generate new metadata file" )
+
+ cleanInstallResult = main.TRUE
+ gitPullResult = main.TRUE
+
+ main.step( "Starting Mininet" )
+ # scp topo file to mininet
+ # TODO: move to params?
+ topoName = "obelisk.py"
+ filePath = main.ONOSbench.home + "/tools/test/topos/"
+ main.ONOSbench.scp( main.Mininet1,
+ filePath + topoName,
+ main.Mininet1.home,
+ direction="to" )
+ mnResult = main.Mininet1.startNet( )
+ utilities.assert_equals( expect=main.TRUE, actual=mnResult,
+ onpass="Mininet Started",
+ onfail="Error starting Mininet" )
+
+ main.step( "Git checkout and pull " + gitBranch )
+ if PULLCODE:
+ main.ONOSbench.gitCheckout( gitBranch )
+ gitPullResult = main.ONOSbench.gitPull()
+ # values of 1 or 3 are good
+ utilities.assert_lesser( expect=0, actual=gitPullResult,
+ onpass="Git pull successful",
+ onfail="Git pull failed" )
+ main.ONOSbench.getVersion( report=True )
+
+ main.step( "Using mvn clean install" )
+ cleanInstallResult = main.TRUE
+ if PULLCODE and gitPullResult == main.TRUE:
+ cleanInstallResult = main.ONOSbench.cleanInstall()
+ else:
+ main.log.warn( "Did not pull new code so skipping mvn " +
+ "clean install" )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=cleanInstallResult,
+ onpass="MCI successful",
+ onfail="MCI failed" )
+ # GRAPHS
+ # NOTE: important params here:
+ # job = name of Jenkins job
+ # Plot Name = Plot-HA, only can be used if multiple plots
+ # index = The number of the graph under plot name
+ job = "HAswapNodes"
+ plotName = "Plot-HA"
+ index = "0"
+ graphs = '<ac:structured-macro ac:name="html">\n'
+ graphs += '<ac:plain-text-body><![CDATA[\n'
+ graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
+ '/plot/' + plotName + '/getPlot?index=' + index +\
+ '&width=500&height=300"' +\
+ 'noborder="0" width="500" height="300" scrolling="yes" ' +\
+ 'seamless="seamless"></iframe>\n'
+ graphs += ']]></ac:plain-text-body>\n'
+ graphs += '</ac:structured-macro>\n'
+ main.log.wiki(graphs)
+
+ main.step( "Copying backup config files" )
+ path = "~/onos/tools/package/bin/onos-service"
+ cp = main.ONOSbench.scp( main.ONOSbench,
+ path,
+ path + ".backup",
+ direction="to" )
+
+ utilities.assert_equals( expect=main.TRUE,
+ actual=cp,
+ onpass="Copy backup config file succeeded",
+ onfail="Copy backup config file failed" )
+ # we need to modify the onos-service file to use remote metadata file
+ # url for cluster metadata file
+ iface = main.params['server'].get( 'interface' )
+ ip = main.ONOSbench.getIpAddr( iface=iface )
+ metaFile = "cluster.json"
+ javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
+ main.log.warn( javaArgs )
+ main.log.warn( repr( javaArgs ) )
+ handle = main.ONOSbench.handle
+ sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, path )
+ main.log.warn( sed )
+ main.log.warn( repr( sed ) )
+ handle.sendline( sed )
+ handle.expect( "\$" )
+ main.log.debug( repr( handle.before ) )
+
+ main.step( "Creating ONOS package" )
+ packageResult = main.ONOSbench.onosPackage()
+ utilities.assert_equals( expect=main.TRUE, actual=packageResult,
+ onpass="ONOS package successful",
+ onfail="ONOS package failed" )
+
+ main.step( "Installing ONOS package" )
+ onosInstallResult = main.TRUE
+ for i in range( main.ONOSbench.maxNodes ):
+ node = main.nodes[i]
+ options = "-f"
+ if i >= main.numCtrls:
+ options = "-nf" # Don't start more than the current scale
+ tmpResult = main.ONOSbench.onosInstall( options=options,
+ node=node.ip_address )
+ onosInstallResult = onosInstallResult and tmpResult
+ utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
+ onpass="ONOS install successful",
+ onfail="ONOS install failed" )
+
+ # Cleanup custom onos-service file
+ main.ONOSbench.scp( main.ONOSbench,
+ path + ".backup",
+ path,
+ direction="to" )
+
+ main.step( "Checking if ONOS is up yet" )
+ for i in range( 2 ):
+ onosIsupResult = main.TRUE
+ for i in range( main.numCtrls ):
+ node = main.nodes[i]
+ started = main.ONOSbench.isup( node.ip_address )
+ if not started:
+ main.log.error( node.name + " hasn't started" )
+ onosIsupResult = onosIsupResult and started
+ if onosIsupResult == main.TRUE:
+ break
+ utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
+ onpass="ONOS startup successful",
+ onfail="ONOS startup failed" )
+
+ main.log.step( "Starting ONOS CLI sessions" )
+ cliResults = main.TRUE
+ threads = []
+ for i in range( main.numCtrls ):
+ t = main.Thread( target=main.CLIs[i].startOnosCli,
+ name="startOnosCli-" + str( i ),
+ args=[main.nodes[i].ip_address] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ cliResults = cliResults and t.result
+ utilities.assert_equals( expect=main.TRUE, actual=cliResults,
+ onpass="ONOS cli startup successful",
+ onfail="ONOS cli startup failed" )
+
+ # Create a list of active nodes for use when some nodes are stopped
+ main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
+
+ if main.params[ 'tcpdump' ].lower() == "true":
+ main.step( "Start Packet Capture MN" )
+ main.Mininet2.startTcpdump(
+ str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
+ + "-MN.pcap",
+ intf=main.params[ 'MNtcpdump' ][ 'intf' ],
+ port=main.params[ 'MNtcpdump' ][ 'port' ] )
+
+ main.step( "Checking ONOS nodes" )
+ nodeResults = utilities.retry( main.HA.nodesCheck,
+ False,
+ args=[main.activeNodes],
+ attempts=5 )
+ utilities.assert_equals( expect=True, actual=nodeResults,
+ onpass="Nodes check successful",
+ onfail="Nodes check NOT successful" )
+
+ if not nodeResults:
+ for i in main.activeNodes:
+ cli = main.CLIs[i]
+ main.log.debug( "{} components not ACTIVE: \n{}".format(
+ cli.name,
+ cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
+ main.log.error( "Failed to start ONOS, stopping test" )
+ main.cleanup()
+ main.exit()
+
+ main.step( "Activate apps defined in the params file" )
+ # get data from the params
+ apps = main.params.get( 'apps' )
+ if apps:
+ apps = apps.split(',')
+ main.log.warn( apps )
+ activateResult = True
+ for app in apps:
+ main.CLIs[ 0 ].app( app, "Activate" )
+ # TODO: check this worked
+ time.sleep( 10 ) # wait for apps to activate
+ for app in apps:
+ state = main.CLIs[ 0 ].appStatus( app )
+ if state == "ACTIVE":
+ activateResult = activateResult and True
+ else:
+ main.log.error( "{} is in {} state".format( app, state ) )
+ activateResult = False
+ utilities.assert_equals( expect=True,
+ actual=activateResult,
+ onpass="Successfully activated apps",
+ onfail="Failed to activate apps" )
+ else:
+ main.log.warn( "No apps were specified to be loaded after startup" )
+
+ main.step( "Set ONOS configurations" )
+ config = main.params.get( 'ONOS_Configuration' )
+ if config:
+ main.log.debug( config )
+ checkResult = main.TRUE
+ for component in config:
+ for setting in config[component]:
+ value = config[component][setting]
+ check = main.CLIs[ 0 ].setCfg( component, setting, value )
+ main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
+ checkResult = check and checkResult
+ utilities.assert_equals( expect=main.TRUE,
+ actual=checkResult,
+ onpass="Successfully set config",
+ onfail="Failed to set config" )
+ else:
+ main.log.warn( "No configurations were specified to be changed after startup" )
+
+ main.step( "App Ids check" )
+ appCheck = main.TRUE
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].appToIDCheck,
+ name="appToIDCheck-" + str( i ),
+ args=[] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ appCheck = appCheck and t.result
+ if appCheck != main.TRUE:
+ node = main.activeNodes[0]
+ main.log.warn( main.CLIs[node].apps() )
+ main.log.warn( main.CLIs[node].appIDs() )
+ utilities.assert_equals( expect=main.TRUE, actual=appCheck,
+ onpass="App Ids seem to be correct",
+ onfail="Something is wrong with app Ids" )
+
+ def CASE2( self, main ):
+ """
+ Assign devices to controllers
+ """
+ import re
+ assert main.numCtrls, "main.numCtrls not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert main.CLIs, "main.CLIs not defined"
+ assert main.nodes, "main.nodes not defined"
+
+ main.case( "Assigning devices to controllers" )
+ main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
+ "and check that an ONOS node becomes the " +\
+ "master of the device."
+ main.step( "Assign switches to controllers" )
+
+ ipList = []
+ for i in range( main.ONOSbench.maxNodes ):
+ ipList.append( main.nodes[ i ].ip_address )
+ swList = []
+ for i in range( 1, 29 ):
+ swList.append( "s" + str( i ) )
+ main.Mininet1.assignSwController( sw=swList, ip=ipList )
+
+ mastershipCheck = main.TRUE
+ for i in range( 1, 29 ):
+ response = main.Mininet1.getSwController( "s" + str( i ) )
+ try:
+ main.log.info( str( response ) )
+ except Exception:
+ main.log.info( repr( response ) )
+ for node in main.nodes:
+ if re.search( "tcp:" + node.ip_address, response ):
+ mastershipCheck = mastershipCheck and main.TRUE
+ else:
+ main.log.error( "Error, node " + node.ip_address + " is " +
+ "not in the list of controllers s" +
+ str( i ) + " is connecting to." )
+ mastershipCheck = main.FALSE
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=mastershipCheck,
+ onpass="Switch mastership assigned correctly",
+ onfail="Switches not assigned correctly to controllers" )
+
+ def CASE21( self, main ):
+ """
+ Assign mastership to controllers
+ """
+ import time
+ assert main.numCtrls, "main.numCtrls not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert main.CLIs, "main.CLIs not defined"
+ assert main.nodes, "main.nodes not defined"
+
+ main.case( "Assigning Controller roles for switches" )
+ main.caseExplanation = "Check that ONOS is connected to each " +\
+ "device. Then manually assign" +\
+ " mastership to specific ONOS nodes using" +\
+ " 'device-role'"
+ main.step( "Assign mastership of switches to specific controllers" )
+ # Manually assign mastership to the controller we want
+ roleCall = main.TRUE
+
+ ipList = [ ]
+ deviceList = []
+ onosCli = main.CLIs[ main.activeNodes[0] ]
+ try:
+ # Assign mastership to specific controllers. This assignment was
+ # determined for a 7 node cluser, but will work with any sized
+ # cluster
+ for i in range( 1, 29 ): # switches 1 through 28
+ # set up correct variables:
+ if i == 1:
+ c = 0
+ ip = main.nodes[ c ].ip_address # ONOS1
+ deviceId = onosCli.getDevice( "1000" ).get( 'id' )
+ elif i == 2:
+ c = 1 % main.numCtrls
+ ip = main.nodes[ c ].ip_address # ONOS2
+ deviceId = onosCli.getDevice( "2000" ).get( 'id' )
+ elif i == 3:
+ c = 1 % main.numCtrls
+ ip = main.nodes[ c ].ip_address # ONOS2
+ deviceId = onosCli.getDevice( "3000" ).get( 'id' )
+ elif i == 4:
+ c = 3 % main.numCtrls
+ ip = main.nodes[ c ].ip_address # ONOS4
+ deviceId = onosCli.getDevice( "3004" ).get( 'id' )
+ elif i == 5:
+ c = 2 % main.numCtrls
+ ip = main.nodes[ c ].ip_address # ONOS3
+ deviceId = onosCli.getDevice( "5000" ).get( 'id' )
+ elif i == 6:
+ c = 2 % main.numCtrls
+ ip = main.nodes[ c ].ip_address # ONOS3
+ deviceId = onosCli.getDevice( "6000" ).get( 'id' )
+ elif i == 7:
+ c = 5 % main.numCtrls
+ ip = main.nodes[ c ].ip_address # ONOS6
+ deviceId = onosCli.getDevice( "6007" ).get( 'id' )
+ elif i >= 8 and i <= 17:
+ c = 4 % main.numCtrls
+ ip = main.nodes[ c ].ip_address # ONOS5
+ dpid = '3' + str( i ).zfill( 3 )
+ deviceId = onosCli.getDevice( dpid ).get( 'id' )
+ elif i >= 18 and i <= 27:
+ c = 6 % main.numCtrls
+ ip = main.nodes[ c ].ip_address # ONOS7
+ dpid = '6' + str( i ).zfill( 3 )
+ deviceId = onosCli.getDevice( dpid ).get( 'id' )
+ elif i == 28:
+ c = 0
+ ip = main.nodes[ c ].ip_address # ONOS1
+ deviceId = onosCli.getDevice( "2800" ).get( 'id' )
+ else:
+ main.log.error( "You didn't write an else statement for " +
+ "switch s" + str( i ) )
+ roleCall = main.FALSE
+ # Assign switch
+ assert deviceId, "No device id for s" + str( i ) + " in ONOS"
+ # TODO: make this controller dynamic
+ roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
+ ipList.append( ip )
+ deviceList.append( deviceId )
+ except ( AttributeError, AssertionError ):
+ main.log.exception( "Something is wrong with ONOS device view" )
+ main.log.info( onosCli.devices() )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=roleCall,
+ onpass="Re-assigned switch mastership to designated controller",
+ onfail="Something wrong with deviceRole calls" )
+
+ main.step( "Check mastership was correctly assigned" )
+ roleCheck = main.TRUE
+ # NOTE: This is due to the fact that device mastership change is not
+ # atomic and is actually a multi step process
+ time.sleep( 5 )
+ for i in range( len( ipList ) ):
+ ip = ipList[i]
+ deviceId = deviceList[i]
+ # Check assignment
+ master = onosCli.getRole( deviceId ).get( 'master' )
+ if ip in master:
+ roleCheck = roleCheck and main.TRUE
+ else:
+ roleCheck = roleCheck and main.FALSE
+ main.log.error( "Error, controller " + ip + " is not" +
+ " master " + "of device " +
+ str( deviceId ) + ". Master is " +
+ repr( master ) + "." )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=roleCheck,
+ onpass="Switches were successfully reassigned to designated " +
+ "controller",
+ onfail="Switches were not successfully reassigned" )
+
+ def CASE3( self, main ):
+ """
+ Assign intents
+ """
+ import time
+ import json
+ assert main.numCtrls, "main.numCtrls not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert main.CLIs, "main.CLIs not defined"
+ assert main.nodes, "main.nodes not defined"
+ try:
+ labels
+ except NameError:
+ main.log.error( "labels not defined, setting to []" )
+ labels = []
+ try:
+ data
+ except NameError:
+ main.log.error( "data not defined, setting to []" )
+ data = []
+ # NOTE: we must reinstall intents until we have a persistant intent
+ # datastore!
+ main.case( "Adding host Intents" )
+ main.caseExplanation = "Discover hosts by using pingall then " +\
+ "assign predetermined host-to-host intents." +\
+ " After installation, check that the intent" +\
+ " is distributed to all nodes and the state" +\
+ " is INSTALLED"
+
+ # install onos-app-fwd
+ main.step( "Install reactive forwarding app" )
+ onosCli = main.CLIs[ main.activeNodes[0] ]
+ installResults = onosCli.activateApp( "org.onosproject.fwd" )
+ utilities.assert_equals( expect=main.TRUE, actual=installResults,
+ onpass="Install fwd successful",
+ onfail="Install fwd failed" )
+
+ main.step( "Check app ids" )
+ appCheck = main.TRUE
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].appToIDCheck,
+ name="appToIDCheck-" + str( i ),
+ args=[] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ appCheck = appCheck and t.result
+ if appCheck != main.TRUE:
+ main.log.warn( onosCli.apps() )
+ main.log.warn( onosCli.appIDs() )
+ utilities.assert_equals( expect=main.TRUE, actual=appCheck,
+ onpass="App Ids seem to be correct",
+ onfail="Something is wrong with app Ids" )
+
+ main.step( "Discovering Hosts( Via pingall for now )" )
+ # FIXME: Once we have a host discovery mechanism, use that instead
+ # REACTIVE FWD test
+ pingResult = main.FALSE
+ passMsg = "Reactive Pingall test passed"
+ time1 = time.time()
+ pingResult = main.Mininet1.pingall()
+ time2 = time.time()
+ if not pingResult:
+ main.log.warn("First pingall failed. Trying again...")
+ pingResult = main.Mininet1.pingall()
+ passMsg += " on the second try"
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=pingResult,
+ onpass= passMsg,
+ onfail="Reactive Pingall failed, " +
+ "one or more ping pairs failed" )
+ main.log.info( "Time for pingall: %2f seconds" %
+ ( time2 - time1 ) )
+ # timeout for fwd flows
+ time.sleep( 11 )
+ # uninstall onos-app-fwd
+ main.step( "Uninstall reactive forwarding app" )
+ node = main.activeNodes[0]
+ uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
+ utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
+ onpass="Uninstall fwd successful",
+ onfail="Uninstall fwd failed" )
+
+ main.step( "Check app ids" )
+ threads = []
+ appCheck2 = main.TRUE
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].appToIDCheck,
+ name="appToIDCheck-" + str( i ),
+ args=[] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ appCheck2 = appCheck2 and t.result
+ if appCheck2 != main.TRUE:
+ node = main.activeNodes[0]
+ main.log.warn( main.CLIs[node].apps() )
+ main.log.warn( main.CLIs[node].appIDs() )
+ utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
+ onpass="App Ids seem to be correct",
+ onfail="Something is wrong with app Ids" )
+
+ main.step( "Add host intents via cli" )
+ intentIds = []
+ # TODO: move the host numbers to params
+ # Maybe look at all the paths we ping?
+ intentAddResult = True
+ hostResult = main.TRUE
+ for i in range( 8, 18 ):
+ main.log.info( "Adding host intent between h" + str( i ) +
+ " and h" + str( i + 10 ) )
+ host1 = "00:00:00:00:00:" + \
+ str( hex( i )[ 2: ] ).zfill( 2 ).upper()
+ host2 = "00:00:00:00:00:" + \
+ str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
+ # NOTE: getHost can return None
+ host1Dict = onosCli.getHost( host1 )
+ host2Dict = onosCli.getHost( host2 )
+ host1Id = None
+ host2Id = None
+ if host1Dict and host2Dict:
+ host1Id = host1Dict.get( 'id', None )
+ host2Id = host2Dict.get( 'id', None )
+ if host1Id and host2Id:
+ nodeNum = ( i % len( main.activeNodes ) )
+ node = main.activeNodes[nodeNum]
+ tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
+ if tmpId:
+ main.log.info( "Added intent with id: " + tmpId )
+ intentIds.append( tmpId )
+ else:
+ main.log.error( "addHostIntent returned: " +
+ repr( tmpId ) )
+ else:
+ main.log.error( "Error, getHost() failed for h" + str( i ) +
+ " and/or h" + str( i + 10 ) )
+ node = main.activeNodes[0]
+ hosts = main.CLIs[node].hosts()
+ main.log.warn( "Hosts output: " )
+ try:
+ main.log.warn( json.dumps( json.loads( hosts ),
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ except ( ValueError, TypeError ):
+ main.log.warn( repr( hosts ) )
+ hostResult = main.FALSE
+ utilities.assert_equals( expect=main.TRUE, actual=hostResult,
+ onpass="Found a host id for each host",
+ onfail="Error looking up host ids" )
+
+ intentStart = time.time()
+ onosIds = onosCli.getAllIntentsId()
+ main.log.info( "Submitted intents: " + str( intentIds ) )
+ main.log.info( "Intents in ONOS: " + str( onosIds ) )
+ for intent in intentIds:
+ if intent in onosIds:
+ pass # intent submitted is in onos
+ else:
+ intentAddResult = False
+ if intentAddResult:
+ intentStop = time.time()
+ else:
+ intentStop = None
+ # Print the intent states
+ intents = onosCli.intents()
+ intentStates = []
+ installedCheck = True
+ main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+ count = 0
+ try:
+ for intent in json.loads( intents ):
+ state = intent.get( 'state', None )
+ if "INSTALLED" not in state:
+ installedCheck = False
+ intentId = intent.get( 'id', None )
+ intentStates.append( ( intentId, state ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing intents" )
+ # add submitted intents not in the store
+ tmplist = [ i for i, s in intentStates ]
+ missingIntents = False
+ for i in intentIds:
+ if i not in tmplist:
+ intentStates.append( ( i, " - " ) )
+ missingIntents = True
+ intentStates.sort()
+ for i, s in intentStates:
+ count += 1
+ main.log.info( "%-6s%-15s%-15s" %
+ ( str( count ), str( i ), str( s ) ) )
+ leaders = onosCli.leaders()
+ try:
+ missing = False
+ if leaders:
+ parsedLeaders = json.loads( leaders )
+ main.log.warn( json.dumps( parsedLeaders,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # check for all intent partitions
+ topics = []
+ for i in range( 14 ):
+ topics.append( "intent-partition-" + str( i ) )
+ main.log.debug( topics )
+ ONOStopics = [ j['topic'] for j in parsedLeaders ]
+ for topic in topics:
+ if topic not in ONOStopics:
+ main.log.error( "Error: " + topic +
+ " not in leaders" )
+ missing = True
+ else:
+ main.log.error( "leaders() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing leaders" )
+ main.log.error( repr( leaders ) )
+ # Check all nodes
+ if missing:
+ for i in main.activeNodes:
+ response = main.CLIs[i].leaders( jsonFormat=False)
+ main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
+ str( response ) )
+
+ partitions = onosCli.partitions()
+ try:
+ if partitions :
+ parsedPartitions = json.loads( partitions )
+ main.log.warn( json.dumps( parsedPartitions,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # TODO check for a leader in all paritions
+ # TODO check for consistency among nodes
+ else:
+ main.log.error( "partitions() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing partitions" )
+ main.log.error( repr( partitions ) )
+ pendingMap = onosCli.pendingMap()
+ try:
+ if pendingMap :
+ parsedPending = json.loads( pendingMap )
+ main.log.warn( json.dumps( parsedPending,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # TODO check something here?
+ else:
+ main.log.error( "pendingMap() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing pending map" )
+ main.log.error( repr( pendingMap ) )
+
+ intentAddResult = bool( intentAddResult and not missingIntents and
+ installedCheck )
+ if not intentAddResult:
+ main.log.error( "Error in pushing host intents to ONOS" )
+
+ main.step( "Intent Anti-Entropy dispersion" )
+ for j in range(100):
+ correct = True
+ main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
+ for i in main.activeNodes:
+ onosIds = []
+ ids = main.CLIs[i].getAllIntentsId()
+ onosIds.append( ids )
+ main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
+ str( sorted( onosIds ) ) )
+ if sorted( ids ) != sorted( intentIds ):
+ main.log.warn( "Set of intent IDs doesn't match" )
+ correct = False
+ break
+ else:
+ intents = json.loads( main.CLIs[i].intents() )
+ for intent in intents:
+ if intent[ 'state' ] != "INSTALLED":
+ main.log.warn( "Intent " + intent[ 'id' ] +
+ " is " + intent[ 'state' ] )
+ correct = False
+ break
+ if correct:
+ break
+ else:
+ time.sleep(1)
+ if not intentStop:
+ intentStop = time.time()
+ global gossipTime
+ gossipTime = intentStop - intentStart
+ main.log.info( "It took about " + str( gossipTime ) +
+ " seconds for all intents to appear in each node" )
+ append = False
+ title = "Gossip Intents"
+ count = 1
+ while append is False:
+ curTitle = title + str( count )
+ if curTitle not in labels:
+ labels.append( curTitle )
+ data.append( str( gossipTime ) )
+ append = True
+ else:
+ count += 1
+ gossipPeriod = int( main.params['timers']['gossip'] )
+ maxGossipTime = gossipPeriod * len( main.activeNodes )
+ utilities.assert_greater_equals(
+ expect=maxGossipTime, actual=gossipTime,
+ onpass="ECM anti-entropy for intents worked within " +
+ "expected time",
+ onfail="Intent ECM anti-entropy took too long. " +
+ "Expected time:{}, Actual time:{}".format( maxGossipTime,
+ gossipTime ) )
+ if gossipTime <= maxGossipTime:
+ intentAddResult = True
+
+ if not intentAddResult or "key" in pendingMap:
+ import time
+ installedCheck = True
+ main.log.info( "Sleeping 60 seconds to see if intents are found" )
+ time.sleep( 60 )
+ onosIds = onosCli.getAllIntentsId()
+ main.log.info( "Submitted intents: " + str( intentIds ) )
+ main.log.info( "Intents in ONOS: " + str( onosIds ) )
+ # Print the intent states
+ intents = onosCli.intents()
+ intentStates = []
+ main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+ count = 0
+ try:
+ for intent in json.loads( intents ):
+ # Iter through intents of a node
+ state = intent.get( 'state', None )
+ if "INSTALLED" not in state:
+ installedCheck = False
+ intentId = intent.get( 'id', None )
+ intentStates.append( ( intentId, state ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing intents" )
+ # add submitted intents not in the store
+ tmplist = [ i for i, s in intentStates ]
+ for i in intentIds:
+ if i not in tmplist:
+ intentStates.append( ( i, " - " ) )
+ intentStates.sort()
+ for i, s in intentStates:
+ count += 1
+ main.log.info( "%-6s%-15s%-15s" %
+ ( str( count ), str( i ), str( s ) ) )
+ leaders = onosCli.leaders()
+ try:
+ missing = False
+ if leaders:
+ parsedLeaders = json.loads( leaders )
+ main.log.warn( json.dumps( parsedLeaders,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # check for all intent partitions
+ # check for election
+ topics = []
+ for i in range( 14 ):
+ topics.append( "intent-partition-" + str( i ) )
+ # FIXME: this should only be after we start the app
+ topics.append( "org.onosproject.election" )
+ main.log.debug( topics )
+ ONOStopics = [ j['topic'] for j in parsedLeaders ]
+ for topic in topics:
+ if topic not in ONOStopics:
+ main.log.error( "Error: " + topic +
+ " not in leaders" )
+ missing = True
+ else:
+ main.log.error( "leaders() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing leaders" )
+ main.log.error( repr( leaders ) )
+ # Check all nodes
+ if missing:
+ for i in main.activeNodes:
+ node = main.CLIs[i]
+ response = node.leaders( jsonFormat=False)
+ main.log.warn( str( node.name ) + " leaders output: \n" +
+ str( response ) )
+
+ partitions = onosCli.partitions()
+ try:
+ if partitions :
+ parsedPartitions = json.loads( partitions )
+ main.log.warn( json.dumps( parsedPartitions,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # TODO check for a leader in all paritions
+ # TODO check for consistency among nodes
+ else:
+ main.log.error( "partitions() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing partitions" )
+ main.log.error( repr( partitions ) )
+ pendingMap = onosCli.pendingMap()
+ try:
+ if pendingMap :
+ parsedPending = json.loads( pendingMap )
+ main.log.warn( json.dumps( parsedPending,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # TODO check something here?
+ else:
+ main.log.error( "pendingMap() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing pending map" )
+ main.log.error( repr( pendingMap ) )
+
+ def CASE4( self, main ):
+ """
+ Ping across added host intents
+ """
+ import json
+ import time
+ assert main.numCtrls, "main.numCtrls not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert main.CLIs, "main.CLIs not defined"
+ assert main.nodes, "main.nodes not defined"
+ main.case( "Verify connectivity by sending traffic across Intents" )
+ main.caseExplanation = "Ping across added host intents to check " +\
+ "functionality and check the state of " +\
+ "the intent"
+
+ onosCli = main.CLIs[ main.activeNodes[0] ]
+ main.step( "Check Intent state" )
+ installedCheck = False
+ loopCount = 0
+ while not installedCheck and loopCount < 40:
+ installedCheck = True
+ # Print the intent states
+ intents = onosCli.intents()
+ intentStates = []
+ main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+ count = 0
+ # Iter through intents of a node
+ try:
+ for intent in json.loads( intents ):
+ state = intent.get( 'state', None )
+ if "INSTALLED" not in state:
+ installedCheck = False
+ intentId = intent.get( 'id', None )
+ intentStates.append( ( intentId, state ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing intents." )
+ # Print states
+ intentStates.sort()
+ for i, s in intentStates:
+ count += 1
+ main.log.info( "%-6s%-15s%-15s" %
+ ( str( count ), str( i ), str( s ) ) )
+ if not installedCheck:
+ time.sleep( 1 )
+ loopCount += 1
+ utilities.assert_equals( expect=True, actual=installedCheck,
+ onpass="Intents are all INSTALLED",
+ onfail="Intents are not all in " +
+ "INSTALLED state" )
+
+ main.step( "Ping across added host intents" )
+ PingResult = main.TRUE
+ for i in range( 8, 18 ):
+ ping = main.Mininet1.pingHost( src="h" + str( i ),
+ target="h" + str( i + 10 ) )
+ PingResult = PingResult and ping
+ if ping == main.FALSE:
+ main.log.warn( "Ping failed between h" + str( i ) +
+ " and h" + str( i + 10 ) )
+ elif ping == main.TRUE:
+ main.log.info( "Ping test passed!" )
+ # Don't set PingResult or you'd override failures
+ if PingResult == main.FALSE:
+ main.log.error(
+ "Intents have not been installed correctly, pings failed." )
+ # TODO: pretty print
+ main.log.warn( "ONOS1 intents: " )
+ try:
+ tmpIntents = onosCli.intents()
+ main.log.warn( json.dumps( json.loads( tmpIntents ),
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ except ( ValueError, TypeError ):
+ main.log.warn( repr( tmpIntents ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=PingResult,
+ onpass="Intents have been installed correctly and pings work",
+ onfail="Intents have not been installed correctly, pings failed." )
+
+ main.step( "Check leadership of topics" )
+ leaders = onosCli.leaders()
+ topicCheck = main.TRUE
+ try:
+ if leaders:
+ parsedLeaders = json.loads( leaders )
+ main.log.warn( json.dumps( parsedLeaders,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # check for all intent partitions
+ # check for election
+ # TODO: Look at Devices as topics now that it uses this system
+ topics = []
+ for i in range( 14 ):
+ topics.append( "intent-partition-" + str( i ) )
+ # FIXME: this should only be after we start the app
+ # FIXME: topics.append( "org.onosproject.election" )
+ # Print leaders output
+ main.log.debug( topics )
+ ONOStopics = [ j['topic'] for j in parsedLeaders ]
+ for topic in topics:
+ if topic not in ONOStopics:
+ main.log.error( "Error: " + topic +
+ " not in leaders" )
+ topicCheck = main.FALSE
+ else:
+ main.log.error( "leaders() returned None" )
+ topicCheck = main.FALSE
+ except ( ValueError, TypeError ):
+ topicCheck = main.FALSE
+ main.log.exception( "Error parsing leaders" )
+ main.log.error( repr( leaders ) )
+ # TODO: Check for a leader of these topics
+ # Check all nodes
+ if topicCheck:
+ for i in main.activeNodes:
+ node = main.CLIs[i]
+ response = node.leaders( jsonFormat=False)
+ main.log.warn( str( node.name ) + " leaders output: \n" +
+ str( response ) )
+
+ utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
+ onpass="intent Partitions is in leaders",
+ onfail="Some topics were lost " )
+ # Print partitions
+ partitions = onosCli.partitions()
+ try:
+ if partitions :
+ parsedPartitions = json.loads( partitions )
+ main.log.warn( json.dumps( parsedPartitions,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # TODO check for a leader in all paritions
+ # TODO check for consistency among nodes
+ else:
+ main.log.error( "partitions() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing partitions" )
+ main.log.error( repr( partitions ) )
+ # Print Pending Map
+ pendingMap = onosCli.pendingMap()
+ try:
+ if pendingMap :
+ parsedPending = json.loads( pendingMap )
+ main.log.warn( json.dumps( parsedPending,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # TODO check something here?
+ else:
+ main.log.error( "pendingMap() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing pending map" )
+ main.log.error( repr( pendingMap ) )
+
+ if not installedCheck:
+ main.log.info( "Waiting 60 seconds to see if the state of " +
+ "intents change" )
+ time.sleep( 60 )
+ # Print the intent states
+ intents = onosCli.intents()
+ intentStates = []
+ main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+ count = 0
+ # Iter through intents of a node
+ try:
+ for intent in json.loads( intents ):
+ state = intent.get( 'state', None )
+ if "INSTALLED" not in state:
+ installedCheck = False
+ intentId = intent.get( 'id', None )
+ intentStates.append( ( intentId, state ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing intents." )
+ intentStates.sort()
+ for i, s in intentStates:
+ count += 1
+ main.log.info( "%-6s%-15s%-15s" %
+ ( str( count ), str( i ), str( s ) ) )
+ leaders = onosCli.leaders()
+ try:
+ missing = False
+ if leaders:
+ parsedLeaders = json.loads( leaders )
+ main.log.warn( json.dumps( parsedLeaders,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # check for all intent partitions
+ # check for election
+ topics = []
+ for i in range( 14 ):
+ topics.append( "intent-partition-" + str( i ) )
+ # FIXME: this should only be after we start the app
+ topics.append( "org.onosproject.election" )
+ main.log.debug( topics )
+ ONOStopics = [ j['topic'] for j in parsedLeaders ]
+ for topic in topics:
+ if topic not in ONOStopics:
+ main.log.error( "Error: " + topic +
+ " not in leaders" )
+ missing = True
+ else:
+ main.log.error( "leaders() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing leaders" )
+ main.log.error( repr( leaders ) )
+ if missing:
+ for i in main.activeNodes:
+ node = main.CLIs[i]
+ response = node.leaders( jsonFormat=False)
+ main.log.warn( str( node.name ) + " leaders output: \n" +
+ str( response ) )
+
+ partitions = onosCli.partitions()
+ try:
+ if partitions :
+ parsedPartitions = json.loads( partitions )
+ main.log.warn( json.dumps( parsedPartitions,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # TODO check for a leader in all paritions
+ # TODO check for consistency among nodes
+ else:
+ main.log.error( "partitions() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing partitions" )
+ main.log.error( repr( partitions ) )
+ pendingMap = onosCli.pendingMap()
+ try:
+ if pendingMap :
+ parsedPending = json.loads( pendingMap )
+ main.log.warn( json.dumps( parsedPending,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ # TODO check something here?
+ else:
+ main.log.error( "pendingMap() returned None" )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing pending map" )
+ main.log.error( repr( pendingMap ) )
+ # Print flowrules
+ node = main.activeNodes[0]
+ main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
+ main.step( "Wait a minute then ping again" )
+ # the wait is above
+ PingResult = main.TRUE
+ for i in range( 8, 18 ):
+ ping = main.Mininet1.pingHost( src="h" + str( i ),
+ target="h" + str( i + 10 ) )
+ PingResult = PingResult and ping
+ if ping == main.FALSE:
+ main.log.warn( "Ping failed between h" + str( i ) +
+ " and h" + str( i + 10 ) )
+ elif ping == main.TRUE:
+ main.log.info( "Ping test passed!" )
+ # Don't set PingResult or you'd override failures
+ if PingResult == main.FALSE:
+ main.log.error(
+ "Intents have not been installed correctly, pings failed." )
+ # TODO: pretty print
+ main.log.warn( "ONOS1 intents: " )
+ try:
+ tmpIntents = onosCli.intents()
+ main.log.warn( json.dumps( json.loads( tmpIntents ),
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ except ( ValueError, TypeError ):
+ main.log.warn( repr( tmpIntents ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=PingResult,
+ onpass="Intents have been installed correctly and pings work",
+ onfail="Intents have not been installed correctly, pings failed." )
+
+ def CASE5( self, main ):
+ """
+ Reading state of ONOS
+ """
+ import json
+ import time
+ assert main.numCtrls, "main.numCtrls not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert main.CLIs, "main.CLIs not defined"
+ assert main.nodes, "main.nodes not defined"
+
+ main.case( "Setting up and gathering data for current state" )
+ # The general idea for this test case is to pull the state of
+ # ( intents,flows, topology,... ) from each ONOS node
+ # We can then compare them with each other and also with past states
+
+ main.step( "Check that each switch has a master" )
+ global mastershipState
+ mastershipState = '[]'
+
+ # Assert that each device has a master
+ rolesNotNull = main.TRUE
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].rolesNotNull,
+ name="rolesNotNull-" + str( i ),
+ args=[] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ rolesNotNull = rolesNotNull and t.result
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=rolesNotNull,
+ onpass="Each device has a master",
+ onfail="Some devices don't have a master assigned" )
+
+ main.step( "Get the Mastership of each switch from each controller" )
+ ONOSMastership = []
+ consistentMastership = True
+ rolesResults = True
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].roles,
+ name="roles-" + str( i ),
+ args=[] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ ONOSMastership.append( t.result )
+
+ for i in range( len( ONOSMastership ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
+ main.log.error( "Error in getting ONOS" + node + " roles" )
+ main.log.warn( "ONOS" + node + " mastership response: " +
+ repr( ONOSMastership[i] ) )
+ rolesResults = False
+ utilities.assert_equals(
+ expect=True,
+ actual=rolesResults,
+ onpass="No error in reading roles output",
+ onfail="Error in reading roles from ONOS" )
+
+ main.step( "Check for consistency in roles from each controller" )
+ if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
+ main.log.info(
+ "Switch roles are consistent across all ONOS nodes" )
+ else:
+ consistentMastership = False
+ utilities.assert_equals(
+ expect=True,
+ actual=consistentMastership,
+ onpass="Switch roles are consistent across all ONOS nodes",
+ onfail="ONOS nodes have different views of switch roles" )
+
+ if rolesResults and not consistentMastership:
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ try:
+ main.log.warn(
+ "ONOS" + node + " roles: ",
+ json.dumps(
+ json.loads( ONOSMastership[ i ] ),
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ except ( ValueError, TypeError ):
+ main.log.warn( repr( ONOSMastership[ i ] ) )
+ elif rolesResults and consistentMastership:
+ mastershipState = ONOSMastership[ 0 ]
+
+ main.step( "Get the intents from each controller" )
+ global intentState
+ intentState = []
+ ONOSIntents = []
+ consistentIntents = True # Are Intents consistent across nodes?
+ intentsResults = True # Could we read Intents from ONOS?
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].intents,
+ name="intents-" + str( i ),
+ args=[],
+ kwargs={ 'jsonFormat': True } )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ ONOSIntents.append( t.result )
+
+ for i in range( len( ONOSIntents ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
+ main.log.error( "Error in getting ONOS" + node + " intents" )
+ main.log.warn( "ONOS" + node + " intents response: " +
+ repr( ONOSIntents[ i ] ) )
+ intentsResults = False
+ utilities.assert_equals(
+ expect=True,
+ actual=intentsResults,
+ onpass="No error in reading intents output",
+ onfail="Error in reading intents from ONOS" )
+
+ main.step( "Check for consistency in Intents from each controller" )
+ if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
+ main.log.info( "Intents are consistent across all ONOS " +
+ "nodes" )
+ else:
+ consistentIntents = False
+ main.log.error( "Intents not consistent" )
+ utilities.assert_equals(
+ expect=True,
+ actual=consistentIntents,
+ onpass="Intents are consistent across all ONOS nodes",
+ onfail="ONOS nodes have different views of intents" )
+
+ if intentsResults:
+ # Try to make it easy to figure out what is happening
+ #
+ # Intent ONOS1 ONOS2 ...
+ # 0x01 INSTALLED INSTALLING
+ # ... ... ...
+ # ... ... ...
+ title = " Id"
+ for n in main.activeNodes:
+ title += " " * 10 + "ONOS" + str( n + 1 )
+ main.log.warn( title )
+ # get all intent keys in the cluster
+ keys = []
+ try:
+ # Get the set of all intent keys
+ for nodeStr in ONOSIntents:
+ node = json.loads( nodeStr )
+ for intent in node:
+ keys.append( intent.get( 'id' ) )
+ keys = set( keys )
+ # For each intent key, print the state on each node
+ for key in keys:
+ row = "%-13s" % key
+ for nodeStr in ONOSIntents:
+ node = json.loads( nodeStr )
+ for intent in node:
+ if intent.get( 'id', "Error" ) == key:
+ row += "%-15s" % intent.get( 'state' )
+ main.log.warn( row )
+ # End of intent state table
+ except ValueError as e:
+ main.log.exception( e )
+ main.log.debug( "nodeStr was: " + repr( nodeStr ) )
+
+ if intentsResults and not consistentIntents:
+ # print the json objects
+ n = str( main.activeNodes[-1] + 1 )
+ main.log.debug( "ONOS" + n + " intents: " )
+ main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ for i in range( len( ONOSIntents ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
+ main.log.debug( "ONOS" + node + " intents: " )
+ main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ else:
+ main.log.debug( "ONOS" + node + " intents match ONOS" +
+ n + " intents" )
+ elif intentsResults and consistentIntents:
+ intentState = ONOSIntents[ 0 ]
+
+ main.step( "Get the flows from each controller" )
+ global flowState
+ flowState = []
+ ONOSFlows = []
+ ONOSFlowsJson = []
+ flowCheck = main.FALSE
+ consistentFlows = True
+ flowsResults = True
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].flows,
+ name="flows-" + str( i ),
+ args=[],
+ kwargs={ 'jsonFormat': True } )
+ threads.append( t )
+ t.start()
+
+ # NOTE: Flows command can take some time to run
+ time.sleep(30)
+ for t in threads:
+ t.join()
+ result = t.result
+ ONOSFlows.append( result )
+
+ for i in range( len( ONOSFlows ) ):
+ num = str( main.activeNodes[i] + 1 )
+ if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
+ main.log.error( "Error in getting ONOS" + num + " flows" )
+ main.log.warn( "ONOS" + num + " flows response: " +
+ repr( ONOSFlows[ i ] ) )
+ flowsResults = False
+ ONOSFlowsJson.append( None )
+ else:
+ try:
+ ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
+ except ( ValueError, TypeError ):
+ # FIXME: change this to log.error?
+ main.log.exception( "Error in parsing ONOS" + num +
+ " response as json." )
+ main.log.error( repr( ONOSFlows[ i ] ) )
+ ONOSFlowsJson.append( None )
+ flowsResults = False
+ utilities.assert_equals(
+ expect=True,
+ actual=flowsResults,
+ onpass="No error in reading flows output",
+ onfail="Error in reading flows from ONOS" )
+
+ main.step( "Check for consistency in Flows from each controller" )
+ tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
+ if all( tmp ):
+ main.log.info( "Flow count is consistent across all ONOS nodes" )
+ else:
+ consistentFlows = False
+ utilities.assert_equals(
+ expect=True,
+ actual=consistentFlows,
+ onpass="The flow count is consistent across all ONOS nodes",
+ onfail="ONOS nodes have different flow counts" )
+
+ if flowsResults and not consistentFlows:
+ for i in range( len( ONOSFlows ) ):
+ node = str( main.activeNodes[i] + 1 )
+ try:
+ main.log.warn(
+ "ONOS" + node + " flows: " +
+ json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
+ indent=4, separators=( ',', ': ' ) ) )
+ except ( ValueError, TypeError ):
+ main.log.warn( "ONOS" + node + " flows: " +
+ repr( ONOSFlows[ i ] ) )
+ elif flowsResults and consistentFlows:
+ flowCheck = main.TRUE
+ flowState = ONOSFlows[ 0 ]
+
+ main.step( "Get the OF Table entries" )
+ global flows
+ flows = []
+ for i in range( 1, 29 ):
+ flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
+ if flowCheck == main.FALSE:
+ for table in flows:
+ main.log.warn( table )
+ # TODO: Compare switch flow tables with ONOS flow tables
+
+ main.step( "Start continuous pings" )
+ main.Mininet2.pingLong(
+ src=main.params[ 'PING' ][ 'source1' ],
+ target=main.params[ 'PING' ][ 'target1' ],
+ pingTime=500 )
+ main.Mininet2.pingLong(
+ src=main.params[ 'PING' ][ 'source2' ],
+ target=main.params[ 'PING' ][ 'target2' ],
+ pingTime=500 )
+ main.Mininet2.pingLong(
+ src=main.params[ 'PING' ][ 'source3' ],
+ target=main.params[ 'PING' ][ 'target3' ],
+ pingTime=500 )
+ main.Mininet2.pingLong(
+ src=main.params[ 'PING' ][ 'source4' ],
+ target=main.params[ 'PING' ][ 'target4' ],
+ pingTime=500 )
+ main.Mininet2.pingLong(
+ src=main.params[ 'PING' ][ 'source5' ],
+ target=main.params[ 'PING' ][ 'target5' ],
+ pingTime=500 )
+ main.Mininet2.pingLong(
+ src=main.params[ 'PING' ][ 'source6' ],
+ target=main.params[ 'PING' ][ 'target6' ],
+ pingTime=500 )
+ main.Mininet2.pingLong(
+ src=main.params[ 'PING' ][ 'source7' ],
+ target=main.params[ 'PING' ][ 'target7' ],
+ pingTime=500 )
+ main.Mininet2.pingLong(
+ src=main.params[ 'PING' ][ 'source8' ],
+ target=main.params[ 'PING' ][ 'target8' ],
+ pingTime=500 )
+ main.Mininet2.pingLong(
+ src=main.params[ 'PING' ][ 'source9' ],
+ target=main.params[ 'PING' ][ 'target9' ],
+ pingTime=500 )
+ main.Mininet2.pingLong(
+ src=main.params[ 'PING' ][ 'source10' ],
+ target=main.params[ 'PING' ][ 'target10' ],
+ pingTime=500 )
+
+ main.step( "Collecting topology information from ONOS" )
+ devices = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].devices,
+ name="devices-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ devices.append( t.result )
+ hosts = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].hosts,
+ name="hosts-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ try:
+ hosts.append( json.loads( t.result ) )
+ except ( ValueError, TypeError ):
+ # FIXME: better handling of this, print which node
+ # Maybe use thread name?
+ main.log.exception( "Error parsing json output of hosts" )
+ main.log.warn( repr( t.result ) )
+ hosts.append( None )
+
+ ports = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].ports,
+ name="ports-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ ports.append( t.result )
+ links = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].links,
+ name="links-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ links.append( t.result )
+ clusters = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].clusters,
+ name="clusters-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ clusters.append( t.result )
+ # Compare json objects for hosts and dataplane clusters
+
+ # hosts
+ main.step( "Host view is consistent across ONOS nodes" )
+ consistentHostsResult = main.TRUE
+ for controller in range( len( hosts ) ):
+ controllerStr = str( main.activeNodes[controller] + 1 )
+ if hosts[ controller ] and "Error" not in hosts[ controller ]:
+ if hosts[ controller ] == hosts[ 0 ]:
+ continue
+ else: # hosts not consistent
+ main.log.error( "hosts from ONOS" +
+ controllerStr +
+ " is inconsistent with ONOS1" )
+ main.log.warn( repr( hosts[ controller ] ) )
+ consistentHostsResult = main.FALSE
+
+ else:
+ main.log.error( "Error in getting ONOS hosts from ONOS" +
+ controllerStr )
+ consistentHostsResult = main.FALSE
+ main.log.warn( "ONOS" + controllerStr +
+ " hosts response: " +
+ repr( hosts[ controller ] ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=consistentHostsResult,
+ onpass="Hosts view is consistent across all ONOS nodes",
+ onfail="ONOS nodes have different views of hosts" )
+
+ main.step( "Each host has an IP address" )
+ ipResult = main.TRUE
+ for controller in range( 0, len( hosts ) ):
+ controllerStr = str( main.activeNodes[controller] + 1 )
+ if hosts[ controller ]:
+ for host in hosts[ controller ]:
+ if not host.get( 'ipAddresses', [ ] ):
+ main.log.error( "Error with host ips on controller" +
+ controllerStr + ": " + str( host ) )
+ ipResult = main.FALSE
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=ipResult,
+ onpass="The ips of the hosts aren't empty",
+ onfail="The ip of at least one host is missing" )
+
+ # Strongly connected clusters of devices
+ main.step( "Cluster view is consistent across ONOS nodes" )
+ consistentClustersResult = main.TRUE
+ for controller in range( len( clusters ) ):
+ controllerStr = str( main.activeNodes[controller] + 1 )
+ if "Error" not in clusters[ controller ]:
+ if clusters[ controller ] == clusters[ 0 ]:
+ continue
+ else: # clusters not consistent
+ main.log.error( "clusters from ONOS" + controllerStr +
+ " is inconsistent with ONOS1" )
+ consistentClustersResult = main.FALSE
+
+ else:
+ main.log.error( "Error in getting dataplane clusters " +
+ "from ONOS" + controllerStr )
+ consistentClustersResult = main.FALSE
+ main.log.warn( "ONOS" + controllerStr +
+ " clusters response: " +
+ repr( clusters[ controller ] ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=consistentClustersResult,
+ onpass="Clusters view is consistent across all ONOS nodes",
+ onfail="ONOS nodes have different views of clusters" )
+ if not consistentClustersResult:
+ main.log.debug( clusters )
+
+ # there should always only be one cluster
+ main.step( "Cluster view correct across ONOS nodes" )
+ try:
+ numClusters = len( json.loads( clusters[ 0 ] ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing clusters[0]: " +
+ repr( clusters[ 0 ] ) )
+ numClusters = "ERROR"
+ utilities.assert_equals(
+ expect=1,
+ actual=numClusters,
+ onpass="ONOS shows 1 SCC",
+ onfail="ONOS shows " + str( numClusters ) + " SCCs" )
+
+ main.step( "Comparing ONOS topology to MN" )
+ devicesResults = main.TRUE
+ linksResults = main.TRUE
+ hostsResults = main.TRUE
+ mnSwitches = main.Mininet1.getSwitches()
+ mnLinks = main.Mininet1.getLinks()
+ mnHosts = main.Mininet1.getHosts()
+ for controller in main.activeNodes:
+ controllerStr = str( main.activeNodes[controller] + 1 )
+ if devices[ controller ] and ports[ controller ] and\
+ "Error" not in devices[ controller ] and\
+ "Error" not in ports[ controller ]:
+ currentDevicesResult = main.Mininet1.compareSwitches(
+ mnSwitches,
+ json.loads( devices[ controller ] ),
+ json.loads( ports[ controller ] ) )
+ else:
+ currentDevicesResult = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=currentDevicesResult,
+ onpass="ONOS" + controllerStr +
+ " Switches view is correct",
+ onfail="ONOS" + controllerStr +
+ " Switches view is incorrect" )
+ if links[ controller ] and "Error" not in links[ controller ]:
+ currentLinksResult = main.Mininet1.compareLinks(
+ mnSwitches, mnLinks,
+ json.loads( links[ controller ] ) )
+ else:
+ currentLinksResult = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=currentLinksResult,
+ onpass="ONOS" + controllerStr +
+ " links view is correct",
+ onfail="ONOS" + controllerStr +
+ " links view is incorrect" )
+
+ if hosts[ controller ] and "Error" not in hosts[ controller ]:
+ currentHostsResult = main.Mininet1.compareHosts(
+ mnHosts,
+ hosts[ controller ] )
+ else:
+ currentHostsResult = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=currentHostsResult,
+ onpass="ONOS" + controllerStr +
+ " hosts exist in Mininet",
+ onfail="ONOS" + controllerStr +
+ " hosts don't match Mininet" )
+
+ devicesResults = devicesResults and currentDevicesResult
+ linksResults = linksResults and currentLinksResult
+ hostsResults = hostsResults and currentHostsResult
+
+ main.step( "Device information is correct" )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=devicesResults,
+ onpass="Device information is correct",
+ onfail="Device information is incorrect" )
+
+ main.step( "Links are correct" )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=linksResults,
+ onpass="Link are correct",
+ onfail="Links are incorrect" )
+
+ main.step( "Hosts are correct" )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=hostsResults,
+ onpass="Hosts are correct",
+ onfail="Hosts are incorrect" )
+
+ def CASE6( self, main ):
+ """
+ The Scaling case.
+ """
+ import time
+ import re
+ assert main.numCtrls, "main.numCtrls not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert main.CLIs, "main.CLIs not defined"
+ assert main.nodes, "main.nodes not defined"
+ try:
+ labels
+ except NameError:
+ main.log.error( "labels not defined, setting to []" )
+ global labels
+ labels = []
+ try:
+ data
+ except NameError:
+ main.log.error( "data not defined, setting to []" )
+ global data
+ data = []
+
+ main.case( "Swap some of the ONOS nodes" )
+
+ main.step( "Checking ONOS Logs for errors" )
+ for i in main.activeNodes:
+ node = main.nodes[i]
+ main.log.debug( "Checking logs for errors on " + node.name + ":" )
+ main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
+
+ main.step( "Generate new metadata file" )
+ old = [ main.activeNodes[0], main.activeNodes[-1] ]
+ new = range( main.ONOSbench.maxNodes )[-2:]
+ assert len( old ) == len( new ), "Length of nodes to swap don't match"
+ handle = main.ONOSbench.handle
+ for x, y in zip( old, new ):
+ handle.sendline( "export OC{}=$OC{}".format( x + 1, y + 1 ) )
+ handle.expect( "\$" ) # from the variable
+ ret = handle.before
+ handle.expect( "\$" ) # From the prompt
+ ret += handle.before
+ main.log.debug( ret )
+ main.activeNodes.remove( x )
+ main.activeNodes.append( y )
+
+ genResult = main.Server.generateFile( main.numCtrls )
+ utilities.assert_equals( expect=main.TRUE, actual=genResult,
+ onpass="New cluster metadata file generated",
+ onfail="Failled to generate new metadata file" )
+ time.sleep( 5 ) # Give time for nodes to read new file
+
+ main.step( "Start new nodes" ) # OR stop old nodes?
+ started = main.TRUE
+ for i in new:
+ started = main.ONOSbench.onosStart( main.nodes[i].ip_address ) and main.TRUE
+ utilities.assert_equals( expect=main.TRUE, actual=started,
+ onpass="ONOS started",
+ onfail="ONOS start NOT successful" )
+
+ main.step( "Checking if ONOS is up yet" )
+ for i in range( 2 ):
+ onosIsupResult = main.TRUE
+ for i in main.activeNodes:
+ node = main.nodes[i]
+ started = main.ONOSbench.isup( node.ip_address )
+ if not started:
+ main.log.error( node.name + " didn't start!" )
+ onosIsupResult = onosIsupResult and started
+ if onosIsupResult == main.TRUE:
+ break
+ utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
+ onpass="ONOS started",
+ onfail="ONOS start NOT successful" )
+
+ main.log.step( "Starting ONOS CLI sessions" )
+ cliResults = main.TRUE
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].startOnosCli,
+ name="startOnosCli-" + str( i ),
+ args=[main.nodes[i].ip_address] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ cliResults = cliResults and t.result
+ utilities.assert_equals( expect=main.TRUE, actual=cliResults,
+ onpass="ONOS cli started",
+ onfail="ONOS clis did not start" )
+
+ main.step( "Checking ONOS nodes" )
+ nodeResults = utilities.retry( main.HA.nodesCheck,
+ False,
+ args=[main.activeNodes],
+ attempts=5 )
+ utilities.assert_equals( expect=True, actual=nodeResults,
+ onpass="Nodes check successful",
+ onfail="Nodes check NOT successful" )
+
+ for i in range( 10 ):
+ ready = True
+ for i in main.activeNodes:
+ cli = main.CLIs[i]
+ output = cli.summary()
+ if not output:
+ ready = False
+ if ready:
+ break
+ time.sleep( 30 )
+ utilities.assert_equals( expect=True, actual=ready,
+ onpass="ONOS summary command succeded",
+ onfail="ONOS summary command failed" )
+ if not ready:
+ main.cleanup()
+ main.exit()
+
+ # Rerun for election on new nodes
+ runResults = main.TRUE
+ for i in main.activeNodes:
+ cli = main.CLIs[i]
+ run = cli.electionTestRun()
+ if run != main.TRUE:
+ main.log.error( "Error running for election on " + cli.name )
+ runResults = runResults and run
+ utilities.assert_equals( expect=main.TRUE, actual=runResults,
+ onpass="Reran for election",
+ onfail="Failed to rerun for election" )
+
+ for node in main.activeNodes:
+ main.log.warn( "\n****************** {} **************".format( main.nodes[node].ip_address ) )
+ main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
+ main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
+ main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
+ main.log.debug( main.CLIs[node].apps( jsonFormat=False ) )
+
+ main.step( "Reapplying cell variable to environment" )
+ cellName = main.params[ 'ENV' ][ 'cellName' ]
+ cellResult = main.ONOSbench.setCell( cellName )
+ utilities.assert_equals( expect=main.TRUE, actual=cellResult,
+ onpass="Set cell successfull",
+ onfail="Failled to set cell" )
+
+ def CASE7( self, main ):
+ """
+ Check state after ONOS scaling
+ """
+ import json
+ assert main.numCtrls, "main.numCtrls not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert main.CLIs, "main.CLIs not defined"
+ assert main.nodes, "main.nodes not defined"
+ main.case( "Running ONOS Constant State Tests" )
+
+ main.step( "Check that each switch has a master" )
+ # Assert that each device has a master
+ rolesNotNull = main.TRUE
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].rolesNotNull,
+ name="rolesNotNull-" + str( i ),
+ args=[ ] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ rolesNotNull = rolesNotNull and t.result
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=rolesNotNull,
+ onpass="Each device has a master",
+ onfail="Some devices don't have a master assigned" )
+
+ main.step( "Read device roles from ONOS" )
+ ONOSMastership = []
+ consistentMastership = True
+ rolesResults = True
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].roles,
+ name="roles-" + str( i ),
+ args=[] )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ ONOSMastership.append( t.result )
+
+ for i in range( len( ONOSMastership ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
+ main.log.error( "Error in getting ONOS" + node + " roles" )
+ main.log.warn( "ONOS" + node + " mastership response: " +
+ repr( ONOSMastership[i] ) )
+ rolesResults = False
+ utilities.assert_equals(
+ expect=True,
+ actual=rolesResults,
+ onpass="No error in reading roles output",
+ onfail="Error in reading roles from ONOS" )
+
+ main.step( "Check for consistency in roles from each controller" )
+ if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
+ main.log.info(
+ "Switch roles are consistent across all ONOS nodes" )
+ else:
+ consistentMastership = False
+ utilities.assert_equals(
+ expect=True,
+ actual=consistentMastership,
+ onpass="Switch roles are consistent across all ONOS nodes",
+ onfail="ONOS nodes have different views of switch roles" )
+
+ if rolesResults and not consistentMastership:
+ for i in range( len( ONOSMastership ) ):
+ node = str( main.activeNodes[i] + 1 )
+ main.log.warn( "ONOS" + node + " roles: ",
+ json.dumps( json.loads( ONOSMastership[ i ] ),
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+
+ # NOTE: we expect mastership to change on controller scaling down
+
+ main.step( "Get the intents and compare across all nodes" )
+ ONOSIntents = []
+ intentCheck = main.FALSE
+ consistentIntents = True
+ intentsResults = True
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].intents,
+ name="intents-" + str( i ),
+ args=[],
+ kwargs={ 'jsonFormat': True } )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ ONOSIntents.append( t.result )
+
+ for i in range( len( ONOSIntents) ):
+ node = str( main.activeNodes[i] + 1 )
+ if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
+ main.log.error( "Error in getting ONOS" + node + " intents" )
+ main.log.warn( "ONOS" + node + " intents response: " +
+ repr( ONOSIntents[ i ] ) )
+ intentsResults = False
+ utilities.assert_equals(
+ expect=True,
+ actual=intentsResults,
+ onpass="No error in reading intents output",
+ onfail="Error in reading intents from ONOS" )
+
+ main.step( "Check for consistency in Intents from each controller" )
+ if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
+ main.log.info( "Intents are consistent across all ONOS " +
+ "nodes" )
+ else:
+ consistentIntents = False
+
+ # Try to make it easy to figure out what is happening
+ #
+ # Intent ONOS1 ONOS2 ...
+ # 0x01 INSTALLED INSTALLING
+ # ... ... ...
+ # ... ... ...
+ title = " ID"
+ for n in main.activeNodes:
+ title += " " * 10 + "ONOS" + str( n + 1 )
+ main.log.warn( title )
+ # get all intent keys in the cluster
+ keys = []
+ for nodeStr in ONOSIntents:
+ node = json.loads( nodeStr )
+ for intent in node:
+ keys.append( intent.get( 'id' ) )
+ keys = set( keys )
+ for key in keys:
+ row = "%-13s" % key
+ for nodeStr in ONOSIntents:
+ node = json.loads( nodeStr )
+ for intent in node:
+ if intent.get( 'id' ) == key:
+ row += "%-15s" % intent.get( 'state' )
+ main.log.warn( row )
+ # End table view
+
+ utilities.assert_equals(
+ expect=True,
+ actual=consistentIntents,
+ onpass="Intents are consistent across all ONOS nodes",
+ onfail="ONOS nodes have different views of intents" )
+ intentStates = []
+ for node in ONOSIntents: # Iter through ONOS nodes
+ nodeStates = []
+ # Iter through intents of a node
+ try:
+ for intent in json.loads( node ):
+ nodeStates.append( intent[ 'state' ] )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error in parsing intents" )
+ main.log.error( repr( node ) )
+ intentStates.append( nodeStates )
+ out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
+ main.log.info( dict( out ) )
+
+ if intentsResults and not consistentIntents:
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ main.log.warn( "ONOS" + node + " intents: " )
+ main.log.warn( json.dumps(
+ json.loads( ONOSIntents[ i ] ),
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ elif intentsResults and consistentIntents:
+ intentCheck = main.TRUE
+
+ main.step( "Compare current intents with intents before the scaling" )
+ # NOTE: this requires case 5 to pass for intentState to be set.
+ # maybe we should stop the test if that fails?
+ sameIntents = main.FALSE
+ try:
+ intentState
+ except NameError:
+ main.log.warn( "No previous intent state was saved" )
+ else:
+ if intentState and intentState == ONOSIntents[ 0 ]:
+ sameIntents = main.TRUE
+ main.log.info( "Intents are consistent with before scaling" )
+ # TODO: possibly the states have changed? we may need to figure out
+ # what the acceptable states are
+ elif len( intentState ) == len( ONOSIntents[ 0 ] ):
+ sameIntents = main.TRUE
+ try:
+ before = json.loads( intentState )
+ after = json.loads( ONOSIntents[ 0 ] )
+ for intent in before:
+ if intent not in after:
+ sameIntents = main.FALSE
+ main.log.debug( "Intent is not currently in ONOS " +
+ "(at least in the same form):" )
+ main.log.debug( json.dumps( intent ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Exception printing intents" )
+ main.log.debug( repr( ONOSIntents[0] ) )
+ main.log.debug( repr( intentState ) )
+ if sameIntents == main.FALSE:
+ try:
+ main.log.debug( "ONOS intents before: " )
+ main.log.debug( json.dumps( json.loads( intentState ),
+ sort_keys=True, indent=4,
+ separators=( ',', ': ' ) ) )
+ main.log.debug( "Current ONOS intents: " )
+ main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
+ sort_keys=True, indent=4,
+ separators=( ',', ': ' ) ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Exception printing intents" )
+ main.log.debug( repr( ONOSIntents[0] ) )
+ main.log.debug( repr( intentState ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=sameIntents,
+ onpass="Intents are consistent with before scaling",
+ onfail="The Intents changed during scaling" )
+ intentCheck = intentCheck and sameIntents
+
+ main.step( "Get the OF Table entries and compare to before " +
+ "component scaling" )
+ FlowTables = main.TRUE
+ for i in range( 28 ):
+ main.log.info( "Checking flow table on s" + str( i + 1 ) )
+ tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
+ curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
+ FlowTables = FlowTables and curSwitch
+ if curSwitch == main.FALSE:
+ main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=FlowTables,
+ onpass="No changes were found in the flow tables",
+ onfail="Changes were found in the flow tables" )
+
+ main.Mininet2.pingLongKill()
+ '''
+ # main.step( "Check the continuous pings to ensure that no packets " +
+ # "were dropped during component failure" )
+ main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
+ main.params[ 'TESTONIP' ] )
+ LossInPings = main.FALSE
+ # NOTE: checkForLoss returns main.FALSE with 0% packet loss
+ for i in range( 8, 18 ):
+ main.log.info(
+ "Checking for a loss in pings along flow from s" +
+ str( i ) )
+ LossInPings = main.Mininet2.checkForLoss(
+ "/tmp/ping.h" +
+ str( i ) ) or LossInPings
+ if LossInPings == main.TRUE:
+ main.log.info( "Loss in ping detected" )
+ elif LossInPings == main.ERROR:
+ main.log.info( "There are multiple mininet process running" )
+ elif LossInPings == main.FALSE:
+ main.log.info( "No Loss in the pings" )
+ main.log.info( "No loss of dataplane connectivity" )
+ # utilities.assert_equals(
+ # expect=main.FALSE,
+ # actual=LossInPings,
+ # onpass="No Loss of connectivity",
+ # onfail="Loss of dataplane connectivity detected" )
+
+ # NOTE: Since intents are not persisted with IntnentStore,
+ # we expect loss in dataplane connectivity
+ LossInPings = main.FALSE
+ '''
+
+ main.step( "Leadership Election is still functional" )
+ # Test of LeadershipElection
+ leaderList = []
+ leaderResult = main.TRUE
+
+ for i in main.activeNodes:
+ cli = main.CLIs[i]
+ leaderN = cli.electionTestLeader()
+ leaderList.append( leaderN )
+ if leaderN == main.FALSE:
+ # error in response
+ main.log.error( "Something is wrong with " +
+ "electionTestLeader function, check the" +
+ " error logs" )
+ leaderResult = main.FALSE
+ elif leaderN is None:
+ main.log.error( cli.name +
+ " shows no leader for the election-app." )
+ leaderResult = main.FALSE
+ if len( set( leaderList ) ) != 1:
+ leaderResult = main.FALSE
+ main.log.error(
+ "Inconsistent view of leader for the election test app" )
+ # TODO: print the list
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=leaderResult,
+ onpass="Leadership election passed",
+ onfail="Something went wrong with Leadership election" )
+
+ def CASE8( self, main ):
+ """
+ Compare topo
+ """
+ import json
+ import time
+ assert main.numCtrls, "main.numCtrls not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert main.CLIs, "main.CLIs not defined"
+ assert main.nodes, "main.nodes not defined"
+
+ main.case( "Compare ONOS Topology view to Mininet topology" )
+ main.caseExplanation = "Compare topology objects between Mininet" +\
+ " and ONOS"
+ topoResult = main.FALSE
+ topoFailMsg = "ONOS topology don't match Mininet"
+ elapsed = 0
+ count = 0
+ main.step( "Comparing ONOS topology to MN topology" )
+ startTime = time.time()
+ # Give time for Gossip to work
+ while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
+ devicesResults = main.TRUE
+ linksResults = main.TRUE
+ hostsResults = main.TRUE
+ hostAttachmentResults = True
+ count += 1
+ cliStart = time.time()
+ devices = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=utilities.retry,
+ name="devices-" + str( i ),
+ args=[ main.CLIs[i].devices, [ None ] ],
+ kwargs= { 'sleep': 5, 'attempts': 5,
+ 'randomTime': True } )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ devices.append( t.result )
+ hosts = []
+ ipResult = main.TRUE
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=utilities.retry,
+ name="hosts-" + str( i ),
+ args=[ main.CLIs[i].hosts, [ None ] ],
+ kwargs= { 'sleep': 5, 'attempts': 5,
+ 'randomTime': True } )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ try:
+ hosts.append( json.loads( t.result ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing hosts results" )
+ main.log.error( repr( t.result ) )
+ hosts.append( None )
+ for controller in range( 0, len( hosts ) ):
+ controllerStr = str( main.activeNodes[controller] + 1 )
+ if hosts[ controller ]:
+ for host in hosts[ controller ]:
+ if host is None or host.get( 'ipAddresses', [] ) == []:
+ main.log.error(
+ "Error with host ipAddresses on controller" +
+ controllerStr + ": " + str( host ) )
+ ipResult = main.FALSE
+ ports = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=utilities.retry,
+ name="ports-" + str( i ),
+ args=[ main.CLIs[i].ports, [ None ] ],
+ kwargs= { 'sleep': 5, 'attempts': 5,
+ 'randomTime': True } )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ ports.append( t.result )
+ links = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=utilities.retry,
+ name="links-" + str( i ),
+ args=[ main.CLIs[i].links, [ None ] ],
+ kwargs= { 'sleep': 5, 'attempts': 5,
+ 'randomTime': True } )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ links.append( t.result )
+ clusters = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=utilities.retry,
+ name="clusters-" + str( i ),
+ args=[ main.CLIs[i].clusters, [ None ] ],
+ kwargs= { 'sleep': 5, 'attempts': 5,
+ 'randomTime': True } )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ clusters.append( t.result )
+
+ elapsed = time.time() - startTime
+ cliTime = time.time() - cliStart
+ print "Elapsed time: " + str( elapsed )
+ print "CLI time: " + str( cliTime )
+
+ if all( e is None for e in devices ) and\
+ all( e is None for e in hosts ) and\
+ all( e is None for e in ports ) and\
+ all( e is None for e in links ) and\
+ all( e is None for e in clusters ):
+ topoFailMsg = "Could not get topology from ONOS"
+ main.log.error( topoFailMsg )
+ continue # Try again, No use trying to compare
+
+ mnSwitches = main.Mininet1.getSwitches()
+ mnLinks = main.Mininet1.getLinks()
+ mnHosts = main.Mininet1.getHosts()
+ for controller in range( len( main.activeNodes ) ):
+ controllerStr = str( main.activeNodes[controller] + 1 )
+ if devices[ controller ] and ports[ controller ] and\
+ "Error" not in devices[ controller ] and\
+ "Error" not in ports[ controller ]:
+
+ try:
+ currentDevicesResult = main.Mininet1.compareSwitches(
+ mnSwitches,
+ json.loads( devices[ controller ] ),
+ json.loads( ports[ controller ] ) )
+ except ( TypeError, ValueError ):
+ main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
+ devices[ controller ], ports[ controller ] ) )
+ else:
+ currentDevicesResult = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=currentDevicesResult,
+ onpass="ONOS" + controllerStr +
+ " Switches view is correct",
+ onfail="ONOS" + controllerStr +
+ " Switches view is incorrect" )
+
+ if links[ controller ] and "Error" not in links[ controller ]:
+ currentLinksResult = main.Mininet1.compareLinks(
+ mnSwitches, mnLinks,
+ json.loads( links[ controller ] ) )
+ else:
+ currentLinksResult = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=currentLinksResult,
+ onpass="ONOS" + controllerStr +
+ " links view is correct",
+ onfail="ONOS" + controllerStr +
+ " links view is incorrect" )
+ if hosts[ controller ] and "Error" not in hosts[ controller ]:
+ currentHostsResult = main.Mininet1.compareHosts(
+ mnHosts,
+ hosts[ controller ] )
+ elif hosts[ controller ] == []:
+ currentHostsResult = main.TRUE
+ else:
+ currentHostsResult = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=currentHostsResult,
+ onpass="ONOS" + controllerStr +
+ " hosts exist in Mininet",
+ onfail="ONOS" + controllerStr +
+ " hosts don't match Mininet" )
+ # CHECKING HOST ATTACHMENT POINTS
+ hostAttachment = True
+ zeroHosts = False
+ # FIXME: topo-HA/obelisk specific mappings:
+ # key is mac and value is dpid
+ mappings = {}
+ for i in range( 1, 29 ): # hosts 1 through 28
+ # set up correct variables:
+ macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
+ if i == 1:
+ deviceId = "1000".zfill(16)
+ elif i == 2:
+ deviceId = "2000".zfill(16)
+ elif i == 3:
+ deviceId = "3000".zfill(16)
+ elif i == 4:
+ deviceId = "3004".zfill(16)
+ elif i == 5:
+ deviceId = "5000".zfill(16)
+ elif i == 6:
+ deviceId = "6000".zfill(16)
+ elif i == 7:
+ deviceId = "6007".zfill(16)
+ elif i >= 8 and i <= 17:
+ dpid = '3' + str( i ).zfill( 3 )
+ deviceId = dpid.zfill(16)
+ elif i >= 18 and i <= 27:
+ dpid = '6' + str( i ).zfill( 3 )
+ deviceId = dpid.zfill(16)
+ elif i == 28:
+ deviceId = "2800".zfill(16)
+ mappings[ macId ] = deviceId
+ if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
+ if hosts[ controller ] == []:
+ main.log.warn( "There are no hosts discovered" )
+ zeroHosts = True
+ else:
+ for host in hosts[ controller ]:
+ mac = None
+ location = None
+ device = None
+ port = None
+ try:
+ mac = host.get( 'mac' )
+ assert mac, "mac field could not be found for this host object"
+
+ location = host.get( 'location' )
+ assert location, "location field could not be found for this host object"
+
+ # Trim the protocol identifier off deviceId
+ device = str( location.get( 'elementId' ) ).split(':')[1]
+ assert device, "elementId field could not be found for this host location object"
+
+ port = location.get( 'port' )
+ assert port, "port field could not be found for this host location object"
+
+ # Now check if this matches where they should be
+ if mac and device and port:
+ if str( port ) != "1":
+ main.log.error( "The attachment port is incorrect for " +
+ "host " + str( mac ) +
+ ". Expected: 1 Actual: " + str( port) )
+ hostAttachment = False
+ if device != mappings[ str( mac ) ]:
+ main.log.error( "The attachment device is incorrect for " +
+ "host " + str( mac ) +
+ ". Expected: " + mappings[ str( mac ) ] +
+ " Actual: " + device )
+ hostAttachment = False
+ else:
+ hostAttachment = False
+ except AssertionError:
+ main.log.exception( "Json object not as expected" )
+ main.log.error( repr( host ) )
+ hostAttachment = False
+ else:
+ main.log.error( "No hosts json output or \"Error\"" +
+ " in output. hosts = " +
+ repr( hosts[ controller ] ) )
+ if zeroHosts is False:
+ # TODO: Find a way to know if there should be hosts in a
+ # given point of the test
+ hostAttachment = True
+
+ # END CHECKING HOST ATTACHMENT POINTS
+ devicesResults = devicesResults and currentDevicesResult
+ linksResults = linksResults and currentLinksResult
+ hostsResults = hostsResults and currentHostsResult
+ hostAttachmentResults = hostAttachmentResults and\
+ hostAttachment
+ topoResult = ( devicesResults and linksResults
+ and hostsResults and ipResult and
+ hostAttachmentResults )
+ utilities.assert_equals( expect=True,
+ actual=topoResult,
+ onpass="ONOS topology matches Mininet",
+ onfail=topoFailMsg )
+ # End of While loop to pull ONOS state
+
+ # Compare json objects for hosts and dataplane clusters
+
+ # hosts
+ main.step( "Hosts view is consistent across all ONOS nodes" )
+ consistentHostsResult = main.TRUE
+ for controller in range( len( hosts ) ):
+ controllerStr = str( main.activeNodes[controller] + 1 )
+ if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
+ if hosts[ controller ] == hosts[ 0 ]:
+ continue
+ else: # hosts not consistent
+ main.log.error( "hosts from ONOS" + controllerStr +
+ " is inconsistent with ONOS1" )
+ main.log.warn( repr( hosts[ controller ] ) )
+ consistentHostsResult = main.FALSE
+
+ else:
+ main.log.error( "Error in getting ONOS hosts from ONOS" +
+ controllerStr )
+ consistentHostsResult = main.FALSE
+ main.log.warn( "ONOS" + controllerStr +
+ " hosts response: " +
+ repr( hosts[ controller ] ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=consistentHostsResult,
+ onpass="Hosts view is consistent across all ONOS nodes",
+ onfail="ONOS nodes have different views of hosts" )
+
+ main.step( "Hosts information is correct" )
+ hostsResults = hostsResults and ipResult
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=hostsResults,
+ onpass="Host information is correct",
+ onfail="Host information is incorrect" )
+
+ main.step( "Host attachment points to the network" )
+ utilities.assert_equals(
+ expect=True,
+ actual=hostAttachmentResults,
+ onpass="Hosts are correctly attached to the network",
+ onfail="ONOS did not correctly attach hosts to the network" )
+
+ # Strongly connected clusters of devices
+ main.step( "Clusters view is consistent across all ONOS nodes" )
+ consistentClustersResult = main.TRUE
+ for controller in range( len( clusters ) ):
+ controllerStr = str( main.activeNodes[controller] + 1 )
+ if "Error" not in clusters[ controller ]:
+ if clusters[ controller ] == clusters[ 0 ]:
+ continue
+ else: # clusters not consistent
+ main.log.error( "clusters from ONOS" +
+ controllerStr +
+ " is inconsistent with ONOS1" )
+ consistentClustersResult = main.FALSE
+ else:
+ main.log.error( "Error in getting dataplane clusters " +
+ "from ONOS" + controllerStr )
+ consistentClustersResult = main.FALSE
+ main.log.warn( "ONOS" + controllerStr +
+ " clusters response: " +
+ repr( clusters[ controller ] ) )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=consistentClustersResult,
+ onpass="Clusters view is consistent across all ONOS nodes",
+ onfail="ONOS nodes have different views of clusters" )
+ if not consistentClustersResult:
+ main.log.debug( clusters )
+ for x in links:
+ main.log.warn( "{}: {}".format( len( x ), x ) )
+
+
+ main.step( "There is only one SCC" )
+ # there should always only be one cluster
+ try:
+ numClusters = len( json.loads( clusters[ 0 ] ) )
+ except ( ValueError, TypeError ):
+ main.log.exception( "Error parsing clusters[0]: " +
+ repr( clusters[0] ) )
+ numClusters = "ERROR"
+ clusterResults = main.FALSE
+ if numClusters == 1:
+ clusterResults = main.TRUE
+ utilities.assert_equals(
+ expect=1,
+ actual=numClusters,
+ onpass="ONOS shows 1 SCC",
+ onfail="ONOS shows " + str( numClusters ) + " SCCs" )
+
+ topoResult = ( devicesResults and linksResults
+ and hostsResults and consistentHostsResult
+ and consistentClustersResult and clusterResults
+ and ipResult and hostAttachmentResults )
+
+ topoResult = topoResult and int( count <= 2 )
+ note = "note it takes about " + str( int( cliTime ) ) + \
+ " seconds for the test to make all the cli calls to fetch " +\
+ "the topology from each ONOS instance"
+ main.log.info(
+ "Very crass estimate for topology discovery/convergence( " +
+ str( note ) + " ): " + str( elapsed ) + " seconds, " +
+ str( count ) + " tries" )
+
+ main.step( "Device information is correct" )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=devicesResults,
+ onpass="Device information is correct",
+ onfail="Device information is incorrect" )
+
+ main.step( "Links are correct" )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=linksResults,
+ onpass="Link are correct",
+ onfail="Links are incorrect" )
+
+ main.step( "Hosts are correct" )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=hostsResults,
+ onpass="Hosts are correct",
+ onfail="Hosts are incorrect" )
+
+ # FIXME: move this to an ONOS state case
+ main.step( "Checking ONOS nodes" )
+ nodeResults = utilities.retry( main.HA.nodesCheck,
+ False,
+ args=[main.activeNodes],
+ attempts=5 )
+ utilities.assert_equals( expect=True, actual=nodeResults,
+ onpass="Nodes check successful",
+ onfail="Nodes check NOT successful" )
+ if not nodeResults:
+ for i in main.activeNodes:
+ main.log.debug( "{} components not ACTIVE: \n{}".format(
+ main.CLIs[i].name,
+ main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
+
+ def CASE9( self, main ):
+ """
+ Link s3-s28 down
+ """
+ import time
+ assert main.numCtrls, "main.numCtrls not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert main.CLIs, "main.CLIs not defined"
+ assert main.nodes, "main.nodes not defined"
+ # NOTE: You should probably run a topology check after this
+
+ linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
+
+ description = "Turn off a link to ensure that Link Discovery " +\
+ "is working properly"
+ main.case( description )
+
+ main.step( "Kill Link between s3 and s28" )
+ LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
+ main.log.info( "Waiting " + str( linkSleep ) +
+ " seconds for link down to be discovered" )
+ time.sleep( linkSleep )
+ utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
+ onpass="Link down successful",
+ onfail="Failed to bring link down" )
+ # TODO do some sort of check here
+
+ def CASE10( self, main ):
+ """
+ Link s3-s28 up
+ """
+ import time
+ assert main.numCtrls, "main.numCtrls not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert main.CLIs, "main.CLIs not defined"
+ assert main.nodes, "main.nodes not defined"
+ # NOTE: You should probably run a topology check after this
+
+ linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
+
+ description = "Restore a link to ensure that Link Discovery is " + \
+ "working properly"
+ main.case( description )
+
+ main.step( "Bring link between s3 and s28 back up" )
+ LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
+ main.log.info( "Waiting " + str( linkSleep ) +
+ " seconds for link up to be discovered" )
+ time.sleep( linkSleep )
+ utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
+ onpass="Link up successful",
+ onfail="Failed to bring link up" )
+ # TODO do some sort of check here
+
+ def CASE11( self, main ):
+ """
+ Switch Down
+ """
+ # NOTE: You should probably run a topology check after this
+ import time
+ assert main.numCtrls, "main.numCtrls not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert main.CLIs, "main.CLIs not defined"
+ assert main.nodes, "main.nodes not defined"
+
+ switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
+
+ description = "Killing a switch to ensure it is discovered correctly"
+ onosCli = main.CLIs[ main.activeNodes[0] ]
+ main.case( description )
+ switch = main.params[ 'kill' ][ 'switch' ]
+ switchDPID = main.params[ 'kill' ][ 'dpid' ]
+
+ # TODO: Make this switch parameterizable
+ main.step( "Kill " + switch )
+ main.log.info( "Deleting " + switch )
+ main.Mininet1.delSwitch( switch )
+ main.log.info( "Waiting " + str( switchSleep ) +
+ " seconds for switch down to be discovered" )
+ time.sleep( switchSleep )
+ device = onosCli.getDevice( dpid=switchDPID )
+ # Peek at the deleted switch
+ main.log.warn( str( device ) )
+ result = main.FALSE
+ if device and device[ 'available' ] is False:
+ result = main.TRUE
+ utilities.assert_equals( expect=main.TRUE, actual=result,
+ onpass="Kill switch successful",
+ onfail="Failed to kill switch?" )
+
+ def CASE12( self, main ):
+ """
+ Switch Up
+ """
+ # NOTE: You should probably run a topology check after this
+ import time
+ assert main.numCtrls, "main.numCtrls not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert main.CLIs, "main.CLIs not defined"
+ assert main.nodes, "main.nodes not defined"
+
+ switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
+ switch = main.params[ 'kill' ][ 'switch' ]
+ switchDPID = main.params[ 'kill' ][ 'dpid' ]
+ links = main.params[ 'kill' ][ 'links' ].split()
+ onosCli = main.CLIs[ main.activeNodes[0] ]
+ description = "Adding a switch to ensure it is discovered correctly"
+ main.case( description )
+
+ main.step( "Add back " + switch )
+ main.Mininet1.addSwitch( switch, dpid=switchDPID )
+ for peer in links:
+ main.Mininet1.addLink( switch, peer )
+ ipList = [ node.ip_address for node in main.nodes ]
+ main.Mininet1.assignSwController( sw=switch, ip=ipList )
+ main.log.info( "Waiting " + str( switchSleep ) +
+ " seconds for switch up to be discovered" )
+ time.sleep( switchSleep )
+ device = onosCli.getDevice( dpid=switchDPID )
+ # Peek at the deleted switch
+ main.log.warn( str( device ) )
+ result = main.FALSE
+ if device and device[ 'available' ]:
+ result = main.TRUE
+ utilities.assert_equals( expect=main.TRUE, actual=result,
+ onpass="add switch successful",
+ onfail="Failed to add switch?" )
+
+ def CASE13( self, main ):
+ """
+ Clean up
+ """
+ assert main.numCtrls, "main.numCtrls not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert main.CLIs, "main.CLIs not defined"
+ assert main.nodes, "main.nodes not defined"
+
+ main.case( "Test Cleanup" )
+ main.step( "Killing tcpdumps" )
+ main.Mininet2.stopTcpdump()
+
+ if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
+ main.step( "Copying MN pcap and ONOS log files to test station" )
+ # NOTE: MN Pcap file is being saved to logdir.
+ # We scp this file as MN and TestON aren't necessarily the same vm
+
+ # FIXME: To be replaced with a Jenkin's post script
+ # TODO: Load these from params
+ # NOTE: must end in /
+ logFolder = "/opt/onos/log/"
+ logFiles = [ "karaf.log", "karaf.log.1" ]
+ # NOTE: must end in /
+ for f in logFiles:
+ for node in main.nodes:
+ dstName = main.logdir + "/" + node.name + "-" + f
+ main.ONOSbench.secureCopy( node.user_name, node.ip_address,
+ logFolder + f, dstName )
+ # std*.log's
+ # NOTE: must end in /
+ logFolder = "/opt/onos/var/"
+ logFiles = [ "stderr.log", "stdout.log" ]
+ # NOTE: must end in /
+ for f in logFiles:
+ for node in main.nodes:
+ dstName = main.logdir + "/" + node.name + "-" + f
+ main.ONOSbench.secureCopy( node.user_name, node.ip_address,
+ logFolder + f, dstName )
+ else:
+ main.log.debug( "skipping saving log files" )
+
+ main.step( "Stopping Mininet" )
+ mnResult = main.Mininet1.stopNet()
+ utilities.assert_equals( expect=main.TRUE, actual=mnResult,
+ onpass="Mininet stopped",
+ onfail="MN cleanup NOT successful" )
+
+ main.step( "Checking ONOS Logs for errors" )
+ for node in main.nodes:
+ main.log.debug( "Checking logs for errors on " + node.name + ":" )
+ main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
+
+ try:
+ timerLog = open( main.logdir + "/Timers.csv", 'w')
+ main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
+ timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
+ timerLog.close()
+ except NameError, e:
+ main.log.exception(e)
+
+ main.step( "Stopping webserver" )
+ status = main.Server.stop( )
+ utilities.assert_equals( expect=main.TRUE, actual=status,
+ onpass="Stop Server",
+ onfail="Failled to stop SimpleHTTPServer" )
+ del main.Server
+
+ def CASE14( self, main ):
+ """
+ start election app on all onos nodes
+ """
+ import time
+ assert main.numCtrls, "main.numCtrls not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert main.CLIs, "main.CLIs not defined"
+ assert main.nodes, "main.nodes not defined"
+
+ main.case("Start Leadership Election app")
+ main.step( "Install leadership election app" )
+ onosCli = main.CLIs[ main.activeNodes[0] ]
+ appResult = onosCli.activateApp( "org.onosproject.election" )
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=appResult,
+ onpass="Election app installed",
+ onfail="Something went wrong with installing Leadership election" )
+
+ main.step( "Run for election on each node" )
+ for i in main.activeNodes:
+ main.CLIs[i].electionTestRun()
+ time.sleep(5)
+ activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+ sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
+ utilities.assert_equals(
+ expect=True,
+ actual=sameResult,
+ onpass="All nodes see the same leaderboards",
+ onfail="Inconsistent leaderboards" )
+
+ if sameResult:
+ leader = leaders[ 0 ][ 0 ]
+ if main.nodes[ main.activeNodes[0] ].ip_address in leader:
+ correctLeader = True
+ else:
+ correctLeader = False
+ main.step( "First node was elected leader" )
+ utilities.assert_equals(
+ expect=True,
+ actual=correctLeader,
+ onpass="Correct leader was elected",
+ onfail="Incorrect leader" )
+
+ def CASE15( self, main ):
+ """
+ Check that Leadership Election is still functional
+ 15.1 Run election on each node
+ 15.2 Check that each node has the same leaders and candidates
+ 15.3 Find current leader and withdraw
+ 15.4 Check that a new node was elected leader
+ 15.5 Check that that new leader was the candidate of old leader
+ 15.6 Run for election on old leader
+ 15.7 Check that oldLeader is a candidate, and leader if only 1 node
+ 15.8 Make sure that the old leader was added to the candidate list
+
+ old and new variable prefixes refer to data from before vs after
+ withdrawl and later before withdrawl vs after re-election
+ """
+ import time
+ assert main.numCtrls, "main.numCtrls not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert main.CLIs, "main.CLIs not defined"
+ assert main.nodes, "main.nodes not defined"
+
+ description = "Check that Leadership Election is still functional"
+ main.case( description )
+ # NOTE: Need to re-run after restarts since being a canidate is not persistant
+
+ oldLeaders = [] # list of lists of each nodes' candidates before
+ newLeaders = [] # list of lists of each nodes' candidates after
+ oldLeader = '' # the old leader from oldLeaders, None if not same
+ newLeader = '' # the new leaders fron newLoeaders, None if not same
+ oldLeaderCLI = None # the CLI of the old leader used for re-electing
+ expectNoLeader = False # True when there is only one leader
+ if main.numCtrls == 1:
+ expectNoLeader = True
+
+ main.step( "Run for election on each node" )
+ electionResult = main.TRUE
+
+ for i in main.activeNodes: # run test election on each node
+ if main.CLIs[i].electionTestRun() == main.FALSE:
+ electionResult = main.FALSE
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=electionResult,
+ onpass="All nodes successfully ran for leadership",
+ onfail="At least one node failed to run for leadership" )
+
+ if electionResult == main.FALSE:
+ main.log.error(
+ "Skipping Test Case because Election Test App isn't loaded" )
+ main.skipCase()
+
+ main.step( "Check that each node shows the same leader and candidates" )
+ failMessage = "Nodes have different leaderboards"
+ activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+ sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
+ if sameResult:
+ oldLeader = oldLeaders[ 0 ][ 0 ]
+ main.log.warn( oldLeader )
+ else:
+ oldLeader = None
+ utilities.assert_equals(
+ expect=True,
+ actual=sameResult,
+ onpass="Leaderboards are consistent for the election topic",
+ onfail=failMessage )
+
+ main.step( "Find current leader and withdraw" )
+ withdrawResult = main.TRUE
+ # do some sanity checking on leader before using it
+ if oldLeader is None:
+ main.log.error( "Leadership isn't consistent." )
+ withdrawResult = main.FALSE
+ # Get the CLI of the oldLeader
+ for i in main.activeNodes:
+ if oldLeader == main.nodes[ i ].ip_address:
+ oldLeaderCLI = main.CLIs[ i ]
+ break
+ else: # FOR/ELSE statement
+ main.log.error( "Leader election, could not find current leader" )
+ if oldLeader:
+ withdrawResult = oldLeaderCLI.electionTestWithdraw()
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=withdrawResult,
+ onpass="Node was withdrawn from election",
+ onfail="Node was not withdrawn from election" )
+
+ main.step( "Check that a new node was elected leader" )
+ failMessage = "Nodes have different leaders"
+ # Get new leaders and candidates
+ newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
+ newLeader = None
+ if newLeaderResult:
+ if newLeaders[ 0 ][ 0 ] == 'none':
+ main.log.error( "No leader was elected on at least 1 node" )
+ if not expectNoLeader:
+ newLeaderResult = False
+ newLeader = newLeaders[ 0 ][ 0 ]
+
+ # Check that the new leader is not the older leader, which was withdrawn
+ if newLeader == oldLeader:
+ newLeaderResult = False
+ main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
+ " as the current leader" )
+ utilities.assert_equals(
+ expect=True,
+ actual=newLeaderResult,
+ onpass="Leadership election passed",
+ onfail="Something went wrong with Leadership election" )
+
+ main.step( "Check that that new leader was the candidate of old leader" )
+ # candidates[ 2 ] should become the top candidate after withdrawl
+ correctCandidateResult = main.TRUE
+ if expectNoLeader:
+ if newLeader == 'none':
+ main.log.info( "No leader expected. None found. Pass" )
+ correctCandidateResult = main.TRUE
+ else:
+ main.log.info( "Expected no leader, got: " + str( newLeader ) )
+ correctCandidateResult = main.FALSE
+ elif len( oldLeaders[0] ) >= 3:
+ if newLeader == oldLeaders[ 0 ][ 2 ]:
+ # correct leader was elected
+ correctCandidateResult = main.TRUE
+ else:
+ correctCandidateResult = main.FALSE
+ main.log.error( "Candidate {} was elected. {} should have had priority.".format(
+ newLeader, oldLeaders[ 0 ][ 2 ] ) )
+ else:
+ main.log.warn( "Could not determine who should be the correct leader" )
+ main.log.debug( oldLeaders[ 0 ] )
+ correctCandidateResult = main.FALSE
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=correctCandidateResult,
+ onpass="Correct Candidate Elected",
+ onfail="Incorrect Candidate Elected" )
+
+ main.step( "Run for election on old leader( just so everyone " +
+ "is in the hat )" )
+ if oldLeaderCLI is not None:
+ runResult = oldLeaderCLI.electionTestRun()
+ else:
+ main.log.error( "No old leader to re-elect" )
+ runResult = main.FALSE
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=runResult,
+ onpass="App re-ran for election",
+ onfail="App failed to run for election" )
+
+ main.step(
+ "Check that oldLeader is a candidate, and leader if only 1 node" )
+ # verify leader didn't just change
+ # Get new leaders and candidates
+ reRunLeaders = []
+ time.sleep( 5 ) # Paremterize
+ positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
+
+ # Check that the re-elected node is last on the candidate List
+ if not reRunLeaders[0]:
+ positionResult = main.FALSE
+ elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
+ main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
+ str( reRunLeaders[ 0 ] ) ) )
+ positionResult = main.FALSE
+ utilities.assert_equals(
+ expect=True,
+ actual=positionResult,
+ onpass="Old leader successfully re-ran for election",
+ onfail="Something went wrong with Leadership election after " +
+ "the old leader re-ran for election" )
+
+ def CASE16( self, main ):
+ """
+ Install Distributed Primitives app
+ """
+ import time
+ assert main.numCtrls, "main.numCtrls not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert main.CLIs, "main.CLIs not defined"
+ assert main.nodes, "main.nodes not defined"
+
+ # Variables for the distributed primitives tests
+ global pCounterName
+ global pCounterValue
+ global onosSet
+ global onosSetName
+ pCounterName = "TestON-Partitions"
+ pCounterValue = 0
+ onosSet = set([])
+ onosSetName = "TestON-set"
+
+ description = "Install Primitives app"
+ main.case( description )
+ main.step( "Install Primitives app" )
+ appName = "org.onosproject.distributedprimitives"
+ node = main.activeNodes[0]
+ appResults = main.CLIs[node].activateApp( appName )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=appResults,
+ onpass="Primitives app activated",
+ onfail="Primitives app not activated" )
+ time.sleep( 5 ) # To allow all nodes to activate
+
+ def CASE17( self, main ):
+ """
+ Check for basic functionality with distributed primitives
+ """
+ # Make sure variables are defined/set
+ assert main.numCtrls, "main.numCtrls not defined"
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert main.CLIs, "main.CLIs not defined"
+ assert main.nodes, "main.nodes not defined"
+ assert pCounterName, "pCounterName not defined"
+ assert onosSetName, "onosSetName not defined"
+ # NOTE: assert fails if value is 0/None/Empty/False
+ try:
+ pCounterValue
+ except NameError:
+ main.log.error( "pCounterValue not defined, setting to 0" )
+ pCounterValue = 0
+ try:
+ onosSet
+ except NameError:
+ main.log.error( "onosSet not defined, setting to empty Set" )
+ onosSet = set([])
+ # Variables for the distributed primitives tests. These are local only
+ addValue = "a"
+ addAllValue = "a b c d e f"
+ retainValue = "c d e f"
+
+ description = "Check for basic functionality with distributed " +\
+ "primitives"
+ main.case( description )
+ main.caseExplanation = "Test the methods of the distributed " +\
+ "primitives (counters and sets) throught the cli"
+ # DISTRIBUTED ATOMIC COUNTERS
+ # Partitioned counters
+ main.step( "Increment then get a default counter on each node" )
+ pCounters = []
+ threads = []
+ addedPValues = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
+ name="counterAddAndGet-" + str( i ),
+ args=[ pCounterName ] )
+ pCounterValue += 1
+ addedPValues.append( pCounterValue )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ pCounters.append( t.result )
+ # Check that counter incremented numController times
+ pCounterResults = True
+ for i in addedPValues:
+ tmpResult = i in pCounters
+ pCounterResults = pCounterResults and tmpResult
+ if not tmpResult:
+ main.log.error( str( i ) + " is not in partitioned "
+ "counter incremented results" )
+ utilities.assert_equals( expect=True,
+ actual=pCounterResults,
+ onpass="Default counter incremented",
+ onfail="Error incrementing default" +
+ " counter" )
+
+ main.step( "Get then Increment a default counter on each node" )
+ pCounters = []
+ threads = []
+ addedPValues = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
+ name="counterGetAndAdd-" + str( i ),
+ args=[ pCounterName ] )
+ addedPValues.append( pCounterValue )
+ pCounterValue += 1
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ pCounters.append( t.result )
+ # Check that counter incremented numController times
+ pCounterResults = True
+ for i in addedPValues:
+ tmpResult = i in pCounters
+ pCounterResults = pCounterResults and tmpResult
+ if not tmpResult:
+ main.log.error( str( i ) + " is not in partitioned "
+ "counter incremented results" )
+ utilities.assert_equals( expect=True,
+ actual=pCounterResults,
+ onpass="Default counter incremented",
+ onfail="Error incrementing default" +
+ " counter" )
+
+ main.step( "Counters we added have the correct values" )
+ incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=incrementCheck,
+ onpass="Added counters are correct",
+ onfail="Added counters are incorrect" )
+
+ main.step( "Add -8 to then get a default counter on each node" )
+ pCounters = []
+ threads = []
+ addedPValues = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
+ name="counterIncrement-" + str( i ),
+ args=[ pCounterName ],
+ kwargs={ "delta": -8 } )
+ pCounterValue += -8
+ addedPValues.append( pCounterValue )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ pCounters.append( t.result )
+ # Check that counter incremented numController times
+ pCounterResults = True
+ for i in addedPValues:
+ tmpResult = i in pCounters
+ pCounterResults = pCounterResults and tmpResult
+ if not tmpResult:
+ main.log.error( str( i ) + " is not in partitioned "
+ "counter incremented results" )
+ utilities.assert_equals( expect=True,
+ actual=pCounterResults,
+ onpass="Default counter incremented",
+ onfail="Error incrementing default" +
+ " counter" )
+
+ main.step( "Add 5 to then get a default counter on each node" )
+ pCounters = []
+ threads = []
+ addedPValues = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
+ name="counterIncrement-" + str( i ),
+ args=[ pCounterName ],
+ kwargs={ "delta": 5 } )
+ pCounterValue += 5
+ addedPValues.append( pCounterValue )
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ pCounters.append( t.result )
+ # Check that counter incremented numController times
+ pCounterResults = True
+ for i in addedPValues:
+ tmpResult = i in pCounters
+ pCounterResults = pCounterResults and tmpResult
+ if not tmpResult:
+ main.log.error( str( i ) + " is not in partitioned "
+ "counter incremented results" )
+ utilities.assert_equals( expect=True,
+ actual=pCounterResults,
+ onpass="Default counter incremented",
+ onfail="Error incrementing default" +
+ " counter" )
+
+ main.step( "Get then add 5 to a default counter on each node" )
+ pCounters = []
+ threads = []
+ addedPValues = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
+ name="counterIncrement-" + str( i ),
+ args=[ pCounterName ],
+ kwargs={ "delta": 5 } )
+ addedPValues.append( pCounterValue )
+ pCounterValue += 5
+ threads.append( t )
+ t.start()
+
+ for t in threads:
+ t.join()
+ pCounters.append( t.result )
+ # Check that counter incremented numController times
+ pCounterResults = True
+ for i in addedPValues:
+ tmpResult = i in pCounters
+ pCounterResults = pCounterResults and tmpResult
+ if not tmpResult:
+ main.log.error( str( i ) + " is not in partitioned "
+ "counter incremented results" )
+ utilities.assert_equals( expect=True,
+ actual=pCounterResults,
+ onpass="Default counter incremented",
+ onfail="Error incrementing default" +
+ " counter" )
+
+ main.step( "Counters we added have the correct values" )
+ incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=incrementCheck,
+ onpass="Added counters are correct",
+ onfail="Added counters are incorrect" )
+
+ # DISTRIBUTED SETS
+ main.step( "Distributed Set get" )
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+
+ getResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + node +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + node +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=getResults,
+ onpass="Set elements are correct",
+ onfail="Set elements are incorrect" )
+
+ main.step( "Distributed Set size" )
+ sizeResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+
+ sizeResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + node +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=sizeResults,
+ onpass="Set sizes are correct",
+ onfail="Set sizes are incorrect" )
+
+ main.step( "Distributed Set add()" )
+ onosSet.add( addValue )
+ addResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestAdd,
+ name="setTestAdd-" + str( i ),
+ args=[ onosSetName, addValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ addResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ addResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ if addResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif addResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif addResponses[ i ] == main.ERROR:
+ # Error in execution
+ addResults = main.FALSE
+ else:
+ # unexpected result
+ addResults = main.FALSE
+ if addResults != main.TRUE:
+ main.log.error( "Error executing set add" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + node + " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + node + " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + node +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ addResults = addResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=addResults,
+ onpass="Set add correct",
+ onfail="Set add was incorrect" )
+
+ main.step( "Distributed Set addAll()" )
+ onosSet.update( addAllValue.split() )
+ addResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestAdd,
+ name="setTestAddAll-" + str( i ),
+ args=[ onosSetName, addAllValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ addResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ addAllResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ if addResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif addResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif addResponses[ i ] == main.ERROR:
+ # Error in execution
+ addAllResults = main.FALSE
+ else:
+ # unexpected result
+ addAllResults = main.FALSE
+ if addAllResults != main.TRUE:
+ main.log.error( "Error executing set addAll" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + node +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + node +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + node +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ addAllResults = addAllResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=addAllResults,
+ onpass="Set addAll correct",
+ onfail="Set addAll was incorrect" )
+
+ main.step( "Distributed Set contains()" )
+ containsResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestGet,
+ name="setContains-" + str( i ),
+ args=[ onosSetName ],
+ kwargs={ "values": addValue } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ # NOTE: This is the tuple
+ containsResponses.append( t.result )
+
+ containsResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ if containsResponses[ i ] == main.ERROR:
+ containsResults = main.FALSE
+ else:
+ containsResults = containsResults and\
+ containsResponses[ i ][ 1 ]
+ utilities.assert_equals( expect=main.TRUE,
+ actual=containsResults,
+ onpass="Set contains is functional",
+ onfail="Set contains failed" )
+
+ main.step( "Distributed Set containsAll()" )
+ containsAllResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestGet,
+ name="setContainsAll-" + str( i ),
+ args=[ onosSetName ],
+ kwargs={ "values": addAllValue } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ # NOTE: This is the tuple
+ containsAllResponses.append( t.result )
+
+ containsAllResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ if containsResponses[ i ] == main.ERROR:
+ containsResults = main.FALSE
+ else:
+ containsResults = containsResults and\
+ containsResponses[ i ][ 1 ]
+ utilities.assert_equals( expect=main.TRUE,
+ actual=containsAllResults,
+ onpass="Set containsAll is functional",
+ onfail="Set containsAll failed" )
+
+ main.step( "Distributed Set remove()" )
+ onosSet.remove( addValue )
+ removeResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestRemove,
+ name="setTestRemove-" + str( i ),
+ args=[ onosSetName, addValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ removeResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ removeResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ if removeResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif removeResponses[ i ] == main.FALSE:
+ # not in set, probably fine
+ pass
+ elif removeResponses[ i ] == main.ERROR:
+ # Error in execution
+ removeResults = main.FALSE
+ else:
+ # unexpected result
+ removeResults = main.FALSE
+ if removeResults != main.TRUE:
+ main.log.error( "Error executing set remove" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + node +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + node +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + node +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ removeResults = removeResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=removeResults,
+ onpass="Set remove correct",
+ onfail="Set remove was incorrect" )
+
+ main.step( "Distributed Set removeAll()" )
+ onosSet.difference_update( addAllValue.split() )
+ removeAllResponses = []
+ threads = []
+ try:
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestRemove,
+ name="setTestRemoveAll-" + str( i ),
+ args=[ onosSetName, addAllValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ removeAllResponses.append( t.result )
+ except Exception, e:
+ main.log.exception(e)
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ removeAllResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ if removeAllResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif removeAllResponses[ i ] == main.FALSE:
+ # not in set, probably fine
+ pass
+ elif removeAllResponses[ i ] == main.ERROR:
+ # Error in execution
+ removeAllResults = main.FALSE
+ else:
+ # unexpected result
+ removeAllResults = main.FALSE
+ if removeAllResults != main.TRUE:
+ main.log.error( "Error executing set removeAll" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + node +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + node +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + node +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ removeAllResults = removeAllResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=removeAllResults,
+ onpass="Set removeAll correct",
+ onfail="Set removeAll was incorrect" )
+
+ main.step( "Distributed Set addAll()" )
+ onosSet.update( addAllValue.split() )
+ addResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestAdd,
+ name="setTestAddAll-" + str( i ),
+ args=[ onosSetName, addAllValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ addResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ addAllResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ if addResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif addResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif addResponses[ i ] == main.ERROR:
+ # Error in execution
+ addAllResults = main.FALSE
+ else:
+ # unexpected result
+ addAllResults = main.FALSE
+ if addAllResults != main.TRUE:
+ main.log.error( "Error executing set addAll" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + node +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + node +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + node +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ addAllResults = addAllResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=addAllResults,
+ onpass="Set addAll correct",
+ onfail="Set addAll was incorrect" )
+
+ main.step( "Distributed Set clear()" )
+ onosSet.clear()
+ clearResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestRemove,
+ name="setTestClear-" + str( i ),
+ args=[ onosSetName, " "], # Values doesn't matter
+ kwargs={ "clear": True } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ clearResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ clearResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ if clearResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif clearResponses[ i ] == main.FALSE:
+ # Nothing set, probably fine
+ pass
+ elif clearResponses[ i ] == main.ERROR:
+ # Error in execution
+ clearResults = main.FALSE
+ else:
+ # unexpected result
+ clearResults = main.FALSE
+ if clearResults != main.TRUE:
+ main.log.error( "Error executing set clear" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + node +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + node +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + node +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ clearResults = clearResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=clearResults,
+ onpass="Set clear correct",
+ onfail="Set clear was incorrect" )
+
+ main.step( "Distributed Set addAll()" )
+ onosSet.update( addAllValue.split() )
+ addResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestAdd,
+ name="setTestAddAll-" + str( i ),
+ args=[ onosSetName, addAllValue ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ addResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ addAllResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ if addResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif addResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif addResponses[ i ] == main.ERROR:
+ # Error in execution
+ addAllResults = main.FALSE
+ else:
+ # unexpected result
+ addAllResults = main.FALSE
+ if addAllResults != main.TRUE:
+ main.log.error( "Error executing set addAll" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + node +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + node +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + node +
+ " expected a size of " + str( size ) +
+ " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ addAllResults = addAllResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=addAllResults,
+ onpass="Set addAll correct",
+ onfail="Set addAll was incorrect" )
+
+ main.step( "Distributed Set retain()" )
+ onosSet.intersection_update( retainValue.split() )
+ retainResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestRemove,
+ name="setTestRetain-" + str( i ),
+ args=[ onosSetName, retainValue ],
+ kwargs={ "retain": True } )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ retainResponses.append( t.result )
+
+ # main.TRUE = successfully changed the set
+ # main.FALSE = action resulted in no change in set
+ # main.ERROR - Some error in executing the function
+ retainResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ if retainResponses[ i ] == main.TRUE:
+ # All is well
+ pass
+ elif retainResponses[ i ] == main.FALSE:
+ # Already in set, probably fine
+ pass
+ elif retainResponses[ i ] == main.ERROR:
+ # Error in execution
+ retainResults = main.FALSE
+ else:
+ # unexpected result
+ retainResults = main.FALSE
+ if retainResults != main.TRUE:
+ main.log.error( "Error executing set retain" )
+
+ # Check if set is still correct
+ size = len( onosSet )
+ getResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestGet,
+ name="setTestGet-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ getResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if isinstance( getResponses[ i ], list):
+ current = set( getResponses[ i ] )
+ if len( current ) == len( getResponses[ i ] ):
+ # no repeats
+ if onosSet != current:
+ main.log.error( "ONOS" + node +
+ " has incorrect view" +
+ " of set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ main.log.debug( "Expected: " + str( onosSet ) )
+ main.log.debug( "Actual: " + str( current ) )
+ getResults = main.FALSE
+ else:
+ # error, set is not a set
+ main.log.error( "ONOS" + node +
+ " has repeat elements in" +
+ " set " + onosSetName + ":\n" +
+ str( getResponses[ i ] ) )
+ getResults = main.FALSE
+ elif getResponses[ i ] == main.ERROR:
+ getResults = main.FALSE
+ sizeResponses = []
+ threads = []
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].setTestSize,
+ name="setTestSize-" + str( i ),
+ args=[ onosSetName ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ sizeResponses.append( t.result )
+ sizeResults = main.TRUE
+ for i in range( len( main.activeNodes ) ):
+ node = str( main.activeNodes[i] + 1 )
+ if size != sizeResponses[ i ]:
+ sizeResults = main.FALSE
+ main.log.error( "ONOS" + node + " expected a size of " +
+ str( size ) + " for set " + onosSetName +
+ " but got " + str( sizeResponses[ i ] ) )
+ retainResults = retainResults and getResults and sizeResults
+ utilities.assert_equals( expect=main.TRUE,
+ actual=retainResults,
+ onpass="Set retain correct",
+ onfail="Set retain was incorrect" )
+
+ # Transactional maps
+ main.step( "Partitioned Transactional maps put" )
+ tMapValue = "Testing"
+ numKeys = 100
+ putResult = True
+ node = main.activeNodes[0]
+ putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
+ if putResponses and len( putResponses ) == 100:
+ for i in putResponses:
+ if putResponses[ i ][ 'value' ] != tMapValue:
+ putResult = False
+ else:
+ putResult = False
+ if not putResult:
+ main.log.debug( "Put response values: " + str( putResponses ) )
+ utilities.assert_equals( expect=True,
+ actual=putResult,
+ onpass="Partitioned Transactional Map put successful",
+ onfail="Partitioned Transactional Map put values are incorrect" )
+
+ main.step( "Partitioned Transactional maps get" )
+ getCheck = True
+ for n in range( 1, numKeys + 1 ):
+ getResponses = []
+ threads = []
+ valueCheck = True
+ for i in main.activeNodes:
+ t = main.Thread( target=main.CLIs[i].transactionalMapGet,
+ name="TMap-get-" + str( i ),
+ args=[ "Key" + str( n ) ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ getResponses.append( t.result )
+ for node in getResponses:
+ if node != tMapValue:
+ valueCheck = False
+ if not valueCheck:
+ main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
+ main.log.warn( getResponses )
+ getCheck = getCheck and valueCheck
+ utilities.assert_equals( expect=True,
+ actual=getCheck,
+ onpass="Partitioned Transactional Map get values were correct",
+ onfail="Partitioned Transactional Map values incorrect" )
diff --git a/TestON/tests/HA/HAswapNodes/HAswapNodes.topo b/TestON/tests/HA/HAswapNodes/HAswapNodes.topo
new file mode 100644
index 0000000..81cf47a
--- /dev/null
+++ b/TestON/tests/HA/HAswapNodes/HAswapNodes.topo
@@ -0,0 +1,171 @@
+<TOPOLOGY>
+ <COMPONENT>
+
+ <ONOSbench>
+ <host>localhost</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosDriver</type>
+ <connect_order>1</connect_order>
+ <COMPONENTS>
+ </COMPONENTS>
+ </ONOSbench>
+
+ <ONOScli1>
+ <host>localhost</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosCliDriver</type>
+ <connect_order>2</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOScli1>
+
+ <ONOScli2>
+ <host>localhost</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosCliDriver</type>
+ <connect_order>3</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOScli2>
+
+ <ONOScli3>
+ <host>localhost</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosCliDriver</type>
+ <connect_order>4</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOScli3>
+
+
+ <ONOScli4>
+ <host>localhost</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosCliDriver</type>
+ <connect_order>5</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOScli4>
+
+
+ <ONOScli5>
+ <host>localhost</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosCliDriver</type>
+ <connect_order>6</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOScli5>
+
+
+ <ONOScli6>
+ <host>localhost</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosCliDriver</type>
+ <connect_order>7</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOScli6>
+
+
+ <ONOScli7>
+ <host>localhost</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosCliDriver</type>
+ <connect_order>8</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOScli7>
+
+ <ONOS1>
+ <host>OC1</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosDriver</type>
+ <connect_order>9</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOS1>
+
+ <ONOS2>
+ <host>OC2</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosDriver</type>
+ <connect_order>10</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOS2>
+
+ <ONOS3>
+ <host>OC3</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosDriver</type>
+ <connect_order>11</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOS3>
+
+ <ONOS4>
+ <host>OC4</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosDriver</type>
+ <connect_order>12</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOS4>
+
+ <ONOS5>
+ <host>OC5</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosDriver</type>
+ <connect_order>13</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOS5>
+
+ <ONOS6>
+ <host>OC6</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosDriver</type>
+ <connect_order>14</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOS6>
+
+ <ONOS7>
+ <host>OC7</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosDriver</type>
+ <connect_order>15</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOS7>
+
+ <Mininet1>
+ <host>OCN</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>MininetCliDriver</type>
+ <connect_order>16</connect_order>
+ <COMPONENTS>
+ #Specify the Option for mininet
+ <arg1> --custom ~/mininet/custom/obelisk.py </arg1>
+ <arg2> --topo obelisk </arg2>
+ <arg3> --switch ovs,protocols=OpenFlow13 </arg3>
+ <controller> none </controller>
+ <home>~/mininet/custom/</home>
+ </COMPONENTS>
+ </Mininet1>
+
+ <Mininet2>
+ <host>OCN</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>RemoteMininetDriver</type>
+ <connect_order>17</connect_order>
+ <COMPONENTS>
+ </COMPONENTS>
+ </Mininet2>
+
+ </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/HA/HAswapNodes/README b/TestON/tests/HA/HAswapNodes/README
new file mode 100644
index 0000000..281b7c2
--- /dev/null
+++ b/TestON/tests/HA/HAswapNodes/README
@@ -0,0 +1,20 @@
+This test is designed to verify that an ONOS cluster behaves correctly when
+nodes are added or removed from a cluster dynamically via modifying a remote
+cluster metadata file.
+
+
+The gerneral structure for the test:
+- Startup
+- Assign switches
+- Verify ONOS state and functionality
+ - Device mastership
+ - Intents
+ - Leadership election
+ - Distributed Primitives
+- Scale ONOS cluster size from 1 to 7 to 1 by increments of 2
+ - Modify cluster metadata file
+ - Start or stop ONOS nodes
+ - Verify ONOS state and functionality
+ - Dataplane failures
+ - link down and up
+ - switch down and up
diff --git a/TestON/tests/SAMP/SAMPstartTemplate/__init__.py b/TestON/tests/HA/HAswapNodes/__init__.py
similarity index 100%
copy from TestON/tests/SAMP/SAMPstartTemplate/__init__.py
copy to TestON/tests/HA/HAswapNodes/__init__.py
diff --git a/TestON/tests/HA/HAswapNodes/dependencies/Server.py b/TestON/tests/HA/HAswapNodes/dependencies/Server.py
new file mode 100644
index 0000000..bff976f
--- /dev/null
+++ b/TestON/tests/HA/HAswapNodes/dependencies/Server.py
@@ -0,0 +1,148 @@
+"Functions for using the SimpleHTTPServer python module"
+import re
+
+class Server():
+
+ def __init__( self ):
+ self.default = ''
+ self.PID = -1
+ self.component = None
+ self.rootDir = None
+
+ def __del__( self ):
+ self.stop()
+
+ def start( self, component, rootDir, port=8000, logDir=None ):
+ """
+ Start SimpleHTTPServer as a background process from rootDir on the
+ given component. The webserver will listen on port and if specified,
+ output will be redirected to logDir.
+
+ Arguments:
+ - component = The TestON component handle to start the webserver on
+ - rootDir = The root directory for the web content
+ - port = The port number for the webserver to listen on. Defaults to 8000
+ - logDir = If specified, the output of the webserver will be redirected
+ to this file. Note that this should be either an absolute path
+ or relative to rootDir.
+ Returns:
+ main.TRUE if the command succedes or main.FALSE if there is an error.
+ """
+ retValue = main.TRUE
+ self.rootDir = rootDir
+ try:
+ # Save component for this instance so other functions can use it
+ self.component = component
+ main.log.info( "Starting SimpleHTTPServer on " + component.name )
+ if component.handle:
+ handle = component.handle
+ # cd to rootDir
+ handle.sendline( "cd " + str( rootDir ) )
+ handle.expect( "\$" )
+ # Start server
+ cmd = "python -m SimpleHTTPServer {}".format( port )
+ if logDir:
+ cmd += " &> {}".format( logDir ) # pipe all output to a file
+ else:
+ cmd += "&> {dev/null}" # Throw away all output
+ cmd += " &"
+ handle.sendline( cmd )
+ handle.expect( "\$" )
+ response = handle.before
+ # Return to home dir
+ handle.sendline( "cd " + component.home )
+ handle.expect( "\$" )
+ response += handle.before
+ if "Exit" in response:
+ main.log.error( "Error starting server. Check server log for details" )
+ main.log.debug( handle.before )
+ retValue = main.FALSE
+ # capture PID for later use
+ # EX: [1] 67987
+ match = re.search( "\[\d\] (?P<PID>\d+)", response )
+ if match:
+ self.PID = match.group( "PID" )
+ else:
+ main.log.warn( "Could not find PID" )
+ else:
+ main.log.error( "Component handle is not set" )
+ retValue = main.FALSE
+ except Exception:
+ main.log.exception( "Error starting web server" )
+ retValue = main.FALSE
+ return retValue
+
+ def stop( self ):
+ """
+ Kills the process of the server. Note that this function must be run
+ from the same instance of the server class that the server was started
+ on.
+ """
+ retValue = main.TRUE
+ try:
+ main.log.info( "Stopping Server." )
+ assert self.component, "Component not specified"
+ assert self.PID, "PID not found"
+ if self.component.handle:
+ handle = self.component.handle
+ cmd = "sudo kill {}".format( self.PID )
+ handle.sendline( cmd )
+ handle.expect( "\$" )
+ # TODO: What is bad output? cannot sudo?
+ else:
+ main.log.error( "Component handle is not set" )
+ retValue = main.FALSE
+ except Exception:
+ main.log.exception( "Error stopping web server" )
+ retValue = main.FALSE
+ return retValue
+
+ def generateFile( self, nodes, equal=False, filename="cluster.json" ):
+ """
+ Generate custom metadata file in the root directory using the custom
+ onos-gen-partitions file which should also be located in the root
+ directory.
+
+ Note that this function needs to be run after the start function has
+ been called for this instance.
+
+ Arguments:
+ - nodes = The number of ONOS nodes to include in the cluster. Will
+ include nodes in ascending order, I.E. OC1, OC2, etc
+
+ Optional Arguments:
+ - equal = Specifies whether all nodes should participate in every
+ partition. Defaults to False.
+ - filename = The name of the file to save the cluster metadata to.
+ Defaults to "cluster.json".
+ Returns:
+ main.TRUE if the command succedes or main.FALSE if there is an error.
+ """
+ retValue = main.TRUE
+ try:
+ if self.component.handle:
+ assert self.component, "Component not specified. Please start the server first"
+ assert self.rootDir, "Root directory not found"
+ handle = self.component.handle
+ # cd to rootDir
+ handle.sendline( "cd " + str( self.rootDir ) )
+ handle.expect( "\$" )
+ cmd = "./onos-gen-partitions {} {} ".format( filename, nodes )
+ if equal:
+ cmd += "-e"
+ handle.sendline( cmd )
+ handle.expect( "\$" )
+ response = handle.before
+ # Return to home dir
+ handle.sendline( "cd " + self.component.home )
+ handle.expect( "\$" )
+ response += handle.before
+ if "Traceback" in response:
+ main.log.error( handle.before )
+ retValue = main.FALSE
+ else:
+ main.log.error( "Component handle is not set" )
+ retValue = main.FALSE
+ except Exception:
+ main.log.exception( "Error generating metadata file" )
+ return retValue
diff --git a/TestON/tests/SAMP/SAMPstartTemplate/__init__.py b/TestON/tests/HA/HAswapNodes/dependencies/__init__.py
similarity index 100%
copy from TestON/tests/SAMP/SAMPstartTemplate/__init__.py
copy to TestON/tests/HA/HAswapNodes/dependencies/__init__.py
diff --git a/TestON/tests/HA/HAswapNodes/dependencies/onos-gen-partitions b/TestON/tests/HA/HAswapNodes/dependencies/onos-gen-partitions
new file mode 100755
index 0000000..ae7da6c
--- /dev/null
+++ b/TestON/tests/HA/HAswapNodes/dependencies/onos-gen-partitions
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+"""
+ Generate the partitions json file from the $OC* environment variables
+
+ Usage: onos-gen-partitions output_file [num_nodes] [-e]
+ If output file is not provided, the json is written to stdout.
+"""
+
+from os import environ
+from collections import deque, OrderedDict
+import re
+import json
+import sys
+import hashlib
+
+convert = lambda text: int(text) if text.isdigit() else text.lower()
+alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
+
+def get_OC_vars():
+ vars = []
+ for var in environ:
+ if re.match(r"OC[0-9]+", var):
+ vars.append(var)
+ return sorted(vars, key=alphanum_key)
+
+def get_nodes(vars, port=9876):
+ node = lambda k: { 'id': k, 'ip': k, 'port': port }
+ return [ node(environ[v]) for v in vars ]
+
+def generate_base_partition(nodes):
+ return {
+ 'id': 0,
+ 'members': nodes
+ }
+
+def generate_extended_partitions_scaling(nodes, k, partitions=3, equal=False):
+ l = deque(nodes)
+ perms = []
+ for i in range(1, partitions + 1):
+ if equal:
+ members = list(l)
+ else:
+ members = list(l)[:k]
+
+ part = {
+ 'id': i,
+ 'members': members
+ }
+ perms.append(part)
+ l.rotate(-2)
+ return perms
+
+if __name__ == '__main__':
+ vars = get_OC_vars()
+ # NOTE: likely prone to errors
+ nodes = get_nodes(vars)
+ num = None
+ equal = False
+ if len(sys.argv) >= 3:
+ num = int(sys.argv[2])
+ try:
+ equal = "-e" in sys.argv[3]
+ except:
+ equal = False
+ if num:
+ nodes = nodes[:num]
+
+ base_partition = generate_base_partition([v.get('id') for v in nodes])
+ extended_partitions = generate_extended_partitions_scaling([v.get('id') for v in nodes],
+ 3, equal=equal)
+ partitions = []
+ partitions.append(base_partition)
+ partitions.extend(extended_partitions)
+ name = hash("HAScaling")
+ data = {
+ 'name': name,
+ 'nodes': nodes,
+ 'partitions': partitions
+ }
+ output = json.dumps(data, indent=4)
+
+ if len(sys.argv) >= 2:
+ filename = sys.argv[1]
+ with open(filename, 'w') as f:
+ f.write(output)
+ else:
+ print output
diff --git a/TestON/tests/MISC/SCPFbatchFlowResp/SCPFbatchFlowResp.py b/TestON/tests/MISC/SCPFbatchFlowResp/SCPFbatchFlowResp.py
index ca5e240..b767835 100755
--- a/TestON/tests/MISC/SCPFbatchFlowResp/SCPFbatchFlowResp.py
+++ b/TestON/tests/MISC/SCPFbatchFlowResp/SCPFbatchFlowResp.py
@@ -184,8 +184,8 @@
cliResult = main.TRUE
for i in range( i, main.numCtrls ):
cliResult = cliResult and \
- main.CLIs[ i ].startOnosCli( ONOSIp=main.ONOSip[ i ] )
- main.log.info("ONOSip is: " + main.ONOSip[i])
+ main.ONOScli1.startCellCli( )
+ main.log.info("ONOSip is: " + main.ONOScli1.ip_address)
stepResult = cliResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
@@ -204,8 +204,7 @@
main.step("Activate openflow-base App")
app = main.params['CASE10']['app']
- stepResult = main.ONOSbench.onosCli( ONOSIp = main.ONOSip[0],
- cmdstr = "app activate " + app )
+ stepResult = main.ONOScli1.activateApp( app )
time.sleep(main.cfgSleep)
main.log.info(stepResult)
utilities.assert_equals( expect=main.TRUE,
@@ -216,9 +215,9 @@
time.sleep(main.cfgSleep)
- main.step( "Disable AdaptiveFlowSampling ")
- stepResult = main.ONOSbench.onosCfgSet( main.ONOSip[0], "org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider",
- "adaptiveFlowSampling " + main.params['CASE10']['adaptiveFlowenabled'])
+ main.step( "Configure AdaptiveFlowSampling ")
+ stepResult = main.ONOScli1.setCfg( component = "org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider",
+ propName = "adaptiveFlowSampling ", value = main.params['CASE10']['adaptiveFlowenabled'])
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="App Configuration Succeeded! ",
diff --git a/TestON/tests/SAMP/SAMPstartTemplate/__init__.py b/TestON/tests/MISC/__init__.py
old mode 100644
new mode 100755
similarity index 100%
copy from TestON/tests/SAMP/SAMPstartTemplate/__init__.py
copy to TestON/tests/MISC/__init__.py
diff --git a/TestON/tests/SAMP/SAMPstartTemplate/README b/TestON/tests/SAMP/SAMPstartTemplate/README
deleted file mode 100644
index 359943e..0000000
--- a/TestON/tests/SAMP/SAMPstartTemplate/README
+++ /dev/null
@@ -1,5 +0,0 @@
-Summary:
- This is a Sample test suite that demonstrates starting up ONOS
- and scalling to multiple instances. It also has extra
- functionalilty that allows the tester to bypass the ONOS
- package and install case (case 2) to save time.
diff --git a/TestON/tests/SAMP/SAMPstartTemplate/SAMPstartTemplate.params b/TestON/tests/SAMP/SAMPstartTemplate/SAMPstartTemplate.params
deleted file mode 100755
index 9120c6a..0000000
--- a/TestON/tests/SAMP/SAMPstartTemplate/SAMPstartTemplate.params
+++ /dev/null
@@ -1,34 +0,0 @@
-<PARAMS>
-
- <testcases>1,2,2,2</testcases>
-
- <SCALE>
- <size>1,2,3</size>
- <max>3</max>
- </SCALE>
-
- <DEPENDENCY>
- <path>/tests/SAMPstartTemplate/dependencies/</path>
- <wrapper1>startUp</wrapper1>
- <topology>newFuncTopo.py</topology>
- </DEPENDENCY>
-
- <ENV>
- <cellName>productionCell</cellName>
- <cellApps>drivers,openflow,proxyarp,mobility</cellApps>
- </ENV>
-
- <GIT>
- <pull>False</pull>
- <branch>master</branch>
- </GIT>
-
- <CTRL>
- <port>6653</port>
- </CTRL>
-
- <SLEEP>
- <startup>15</startup>
- </SLEEP>
-
-</PARAMS>
diff --git a/TestON/tests/SAMP/SAMPstartTemplate/SAMPstartTemplate.py b/TestON/tests/SAMP/SAMPstartTemplate/SAMPstartTemplate.py
deleted file mode 100644
index b6184da..0000000
--- a/TestON/tests/SAMP/SAMPstartTemplate/SAMPstartTemplate.py
+++ /dev/null
@@ -1,251 +0,0 @@
-
-# This is a sample template that starts up ONOS cluster, this template
-# is used as a starting script for creating functionality and performance test
-
-class SAMPstartTemplate:
-
- def __init__( self ):
- self.default = ''
-
- def CASE1( self, main ):
- import time
- import os
- import imp
- import re
-
- """
- - Construct tests variables
- - GIT ( optional )
- - Checkout ONOS master branch
- - Pull latest ONOS code
- - Building ONOS ( optional )
- - Install ONOS package
- - Build ONOS package
- """
-
- main.case( "Constructing test variables and building ONOS package" )
- main.step( "Constructing test variables" )
- stepResult = main.FALSE
-
- # Test variables
- main.testOnDirectory = re.sub( "(/tests)$", "", main.testDir )
- main.cellName = main.params[ 'ENV' ][ 'cellName' ]
- main.apps = main.params[ 'ENV' ][ 'cellApps' ]
- gitBranch = main.params[ 'GIT' ][ 'branch' ]
- main.dependencyPath = main.testOnDirectory + \
- main.params[ 'DEPENDENCY' ][ 'path' ]
- main.topology = main.params[ 'DEPENDENCY' ][ 'topology' ]
- main.scale = ( main.params[ 'SCALE' ][ 'size' ] ).split( "," )
- main.maxNodes = int( main.params[ 'SCALE' ][ 'max' ] )
- main.ONOSport = main.params[ 'CTRL' ][ 'port' ]
- wrapperFile1 = main.params[ 'DEPENDENCY' ][ 'wrapper1' ]
- main.startUpSleep = int( main.params[ 'SLEEP' ][ 'startup' ] )
- gitPull = main.params[ 'GIT' ][ 'pull' ]
- main.cellData = {} # for creating cell file
- main.CLIs = []
- main.ONOSip = []
-
- main.ONOSip = main.ONOSbench.getOnosIps()
- print main.ONOSip
-
- # Assigning ONOS cli handles to a list
- for i in range( 1, main.maxNodes + 1 ):
- main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
-
- # -- INIT SECTION, ONLY RUNS ONCE -- #
- main.startUp = imp.load_source( wrapperFile1,
- main.dependencyPath +
- wrapperFile1 +
- ".py" )
-
- copyResult1 = main.ONOSbench.scp( main.Mininet1,
- main.dependencyPath +
- main.topology,
- main.Mininet1.home,
- direction="to" )
- if main.CLIs:
- stepResult = main.TRUE
- else:
- main.log.error( "Did not properly created list of ONOS CLI handle" )
- stepResult = main.FALSE
-
- utilities.assert_equals( expect=main.TRUE,
- actual=stepResult,
- onpass="Successfully construct " +
- "test variables ",
- onfail="Failed to construct test variables" )
-
- if gitPull == 'True':
- main.step( "Building ONOS in " + gitBranch + " branch" )
- onosBuildResult = main.startUp.onosBuild( main, gitBranch )
- stepResult = onosBuildResult
- utilities.assert_equals( expect=main.TRUE,
- actual=stepResult,
- onpass="Successfully compiled " +
- "latest ONOS",
- onfail="Failed to compile " +
- "latest ONOS" )
- else:
- main.log.warn( "Did not pull new code so skipping mvn " +
- "clean install" )
-
- def CASE2( self, main ):
- """
- - Set up cell
- - Create cell file
- - Set cell file
- - Verify cell file
- - Kill ONOS process
- - Uninstall ONOS cluster
- - Verify ONOS start up
- - Install ONOS cluster
- - Connect to cli
- """
-
- # main.scale[ 0 ] determines the current number of ONOS controller
- main.numCtrls = int( main.scale[ 0 ] )
-
- main.case( "Starting up " + str( main.numCtrls ) +
- " node(s) ONOS cluster" )
-
- #kill off all onos processes
- main.log.info( "Safety check, killing all ONOS processes" +
- " before initiating environment setup" )
-
- for i in range( main.maxNodes ):
- main.ONOSbench.onosDie( main.ONOSip[ i ] )
-
- print "NODE COUNT = ", main.numCtrls
-
- tempOnosIp = []
- for i in range( main.numCtrls ):
- tempOnosIp.append( main.ONOSip[i] )
-
- main.ONOSbench.createCellFile( main.ONOSbench.ip_address,
- "temp",
- main.Mininet1.ip_address,
- main.apps,
- tempOnosIp )
-
- main.step( "Apply cell to environment" )
- cellResult = main.ONOSbench.setCell( "temp" )
- verifyResult = main.ONOSbench.verifyCell()
- stepResult = cellResult and verifyResult
- utilities.assert_equals( expect=main.TRUE,
- actual=stepResult,
- onpass="Successfully applied cell to " + \
- "environment",
- onfail="Failed to apply cell to environment " )
-
- main.step( "Creating ONOS package" )
- packageResult = main.ONOSbench.onosPackage()
- stepResult = packageResult
- utilities.assert_equals( expect=main.TRUE,
- actual=stepResult,
- onpass="Successfully created ONOS package",
- onfail="Failed to create ONOS package" )
-
- time.sleep( main.startUpSleep )
- main.step( "Uninstalling ONOS package" )
- onosUninstallResult = main.TRUE
- for i in range( main.numCtrls ):
- onosUninstallResult = onosUninstallResult and \
- main.ONOSbench.onosUninstall( nodeIp=main.ONOSip[ i ] )
- stepResult = onosUninstallResult
- utilities.assert_equals( expect=main.TRUE,
- actual=stepResult,
- onpass="Successfully uninstalled ONOS package",
- onfail="Failed to uninstall ONOS package" )
-
- time.sleep( main.startUpSleep )
- main.step( "Installing ONOS package" )
- onosInstallResult = main.TRUE
- for i in range( main.numCtrls ):
- onosInstallResult = onosInstallResult and \
- main.ONOSbench.onosInstall( node=main.ONOSip[ i ] )
- stepResult = onosInstallResult
- utilities.assert_equals( expect=main.TRUE,
- actual=stepResult,
- onpass="Successfully installed ONOS package",
- onfail="Failed to install ONOS package" )
-
- time.sleep( main.startUpSleep )
- main.step( "Starting ONOS service" )
- stopResult = main.TRUE
- startResult = main.TRUE
- onosIsUp = main.TRUE
-
- for i in range( main.numCtrls ):
- onosIsUp = onosIsUp and main.ONOSbench.isup( main.ONOSip[ i ] )
- if onosIsUp == main.TRUE:
- main.log.report( "ONOS instance is up and ready" )
- else:
- main.log.report( "ONOS instance may not be up, stop and " +
- "start ONOS again " )
- for i in range( main.numCtrls ):
- stopResult = stopResult and \
- main.ONOSbench.onosStop( main.ONOSip[ i ] )
- for i in range( main.numCtrls ):
- startResult = startResult and \
- main.ONOSbench.onosStart( main.ONOSip[ i ] )
- stepResult = onosIsUp and stopResult and startResult
- utilities.assert_equals( expect=main.TRUE,
- actual=stepResult,
- onpass="ONOS service is ready",
- onfail="ONOS service did not start properly" )
-
- main.step( "Start ONOS cli" )
- cliResult = main.TRUE
- for i in range( main.numCtrls ):
- cliResult = cliResult and \
- main.CLIs[ i ].startOnosCli( main.ONOSip[ i ] )
- stepResult = cliResult
- utilities.assert_equals( expect=main.TRUE,
- actual=stepResult,
- onpass="Successfully start ONOS cli",
- onfail="Failed to start ONOS cli" )
-
- # Remove the first element in main.scale list
- main.scale.remove( main.scale[ 0 ] )
-
- def CASE9( self, main ):
- '''
- Report errors/warnings/exceptions
- '''
- main.log.info("Error report: \n" )
- main.ONOSbench.logReport( main.ONOSip[ 0 ],
- [ "INFO",
- "FOLLOWER",
- "WARN",
- "flow",
- "ERROR",
- "Except" ],
- "s" )
-
- def CASE11( self, main ):
- """
- Start mininet
- """
- main.log.report( "Start Mininet topology" )
- main.log.case( "Start Mininet topology" )
-
- main.step( "Starting Mininet Topology" )
- topoResult = main.Mininet1.startNet( topoFile=main.dependencyPath + main.topology )
- stepResult = topoResult
- utilities.assert_equals( expect=main.TRUE,
- actual=stepResult,
- onpass="Successfully loaded topology",
- onfail="Failed to load topology" )
- # Exit if topology did not load properly
- if not topoResult:
- main.cleanup()
- main.exit()
-
- def CASE12( self, main ):
- """
- Test random ONOS command
- """
-
- main.CLIs[ 0 ].startOnosCli( main.ONOSip[ 0 ] )
- print main.CLIs[ 0 ].leaders()
-
diff --git a/TestON/tests/SAMP/SAMPstartTemplate/dependencies/newFuncTopo.py b/TestON/tests/SAMP/SAMPstartTemplate/dependencies/newFuncTopo.py
deleted file mode 100755
index 7fe68c1..0000000
--- a/TestON/tests/SAMP/SAMPstartTemplate/dependencies/newFuncTopo.py
+++ /dev/null
@@ -1,152 +0,0 @@
-#!/usr/bin/python
-
-"""
-Custom topology for Mininet
-"""
-from mininet.topo import Topo
-from mininet.net import Mininet
-from mininet.node import Host, RemoteController
-from mininet.node import Node
-from mininet.node import CPULimitedHost
-from mininet.link import TCLink
-from mininet.cli import CLI
-from mininet.log import setLogLevel
-from mininet.util import dumpNodeConnections
-from mininet.node import ( UserSwitch, OVSSwitch, IVSSwitch )
-
-class VLANHost( Host ):
- def config( self, vlan=100, **params ):
- r = super( Host, self ).config( **params )
- intf = self.defaultIntf()
- self.cmd( 'ifconfig %s inet 0' % intf )
- self.cmd( 'vconfig add %s %d' % ( intf, vlan ) )
- self.cmd( 'ifconfig %s.%d inet %s' % ( intf, vlan, params['ip'] ) )
- newName = '%s.%d' % ( intf, vlan )
- intf.name = newName
- self.nameToIntf[ newName ] = intf
- return r
-
-class IPv6Host( Host ):
- def config( self, v6Addr='1000:1/64', **params ):
- r = super( Host, self ).config( **params )
- intf = self.defaultIntf()
- self.cmd( 'ifconfig %s inet 0' % intf )
- self.cmd( 'ip -6 addr add %s dev %s' % ( v6Addr, intf ) )
- return r
-
-class dualStackHost( Host ):
- def config( self, v6Addr='2000:1/64', **params ):
- r = super( Host, self ).config( **params )
- intf = self.defaultIntf()
- self.cmd( 'ip -6 addr add %s dev %s' % ( v6Addr, intf ) )
- return r
-
-class MyTopo( Topo ):
-
- def __init__( self ):
- # Initialize topology
- Topo.__init__( self )
-
- # Switch S5 Hosts
- host1=self.addHost( 'h1', ip='10.1.0.2/24' )
- host2=self.addHost( 'h2', cls=IPv6Host, v6Addr='1000::2/64' )
- host3=self.addHost( 'h3', ip='10.1.0.3/24', cls=dualStackHost, v6Addr='2000::2/64' )
- #VLAN hosts
- host4=self.addHost( 'h4', ip='100.1.0.2/24', cls=VLANHost, vlan=100 )
- host5=self.addHost( 'h5', ip='200.1.0.2/24', cls=VLANHost, vlan=200 )
- #VPN-1 and VPN-2 Hosts
- host6=self.addHost( 'h6', ip='11.1.0.2/24' )
- host7=self.addHost( 'h7', ip='12.1.0.2/24' )
- #Multicast Sender
- host8=self.addHost( 'h8', ip='10.1.0.4/24' )
-
- # Switch S6 Hosts
- host9=self.addHost( 'h9', ip='10.1.0.5/24' )
- host10=self.addHost( 'h10', cls=IPv6Host, v6Addr='1000::3/64' )
- host11=self.addHost( 'h11', ip='10.1.0.6/24', cls=dualStackHost, v6Addr='2000::3/64' )
- #VLAN hosts
- host12=self.addHost( 'h12', ip='100.1.0.3/24', cls=VLANHost, vlan=100 )
- host13=self.addHost( 'h13', ip='200.1.0.3/24', cls=VLANHost, vlan=200 )
- #VPN-1 and VPN-2 Hosts
- host14=self.addHost( 'h14', ip='11.1.0.3/24' )
- host15=self.addHost( 'h15', ip='12.1.0.3/24' )
- #Multicast Receiver
- host16=self.addHost( 'h16', ip='10.1.0.7/24' )
-
- # Switch S7 Hosts
- host17=self.addHost( 'h17', ip='10.1.0.8/24' )
- host18=self.addHost( 'h18', cls=IPv6Host, v6Addr='1000::4/64' )
- host19=self.addHost( 'h19', ip='10.1.0.9/24', cls=dualStackHost, v6Addr='2000::4/64' )
- #VLAN hosts
- host20=self.addHost( 'h20', ip='100.1.0.4/24', cls=VLANHost, vlan=100 )
- host21=self.addHost( 'h21', ip='200.1.0.4/24', cls=VLANHost, vlan=200 )
- #VPN-1 and VPN-2 Hosts
- host22=self.addHost( 'h22', ip='11.1.0.4/24' )
- host23=self.addHost( 'h23', ip='12.1.0.4/24' )
- #Multicast Receiver
- host24=self.addHost( 'h24', ip='10.1.0.10/24' )
-
- s1 = self.addSwitch( 's1' )
- s2 = self.addSwitch( 's2' )
- s3 = self.addSwitch( 's3' )
- s4 = self.addSwitch( 's4' )
- s5 = self.addSwitch( 's5' )
- s6 = self.addSwitch( 's6' )
- s7 = self.addSwitch( 's7' )
-
- self.addLink(s5,host1)
- self.addLink(s5,host2)
- self.addLink(s5,host3)
- self.addLink(s5,host4)
- self.addLink(s5,host5)
- self.addLink(s5,host6)
- self.addLink(s5,host7)
- self.addLink(s5,host8)
-
- self.addLink(s6,host9)
- self.addLink(s6,host10)
- self.addLink(s6,host11)
- self.addLink(s6,host12)
- self.addLink(s6,host13)
- self.addLink(s6,host14)
- self.addLink(s6,host15)
- self.addLink(s6,host16)
-
- self.addLink(s7,host17)
- self.addLink(s7,host18)
- self.addLink(s7,host19)
- self.addLink(s7,host20)
- self.addLink(s7,host21)
- self.addLink(s7,host22)
- self.addLink(s7,host23)
- self.addLink(s7,host24)
-
- self.addLink(s1,s2)
- self.addLink(s1,s3)
- self.addLink(s1,s4)
- self.addLink(s1,s5)
-
- self.addLink(s2,s3)
- self.addLink(s2,s5)
- self.addLink(s2,s6)
-
- self.addLink(s3,s4)
- self.addLink(s3,s6)
-
- self.addLink(s4,s7)
- topos = { 'mytopo': ( lambda: MyTopo() ) }
-
-# HERE THE CODE DEFINITION OF THE TOPOLOGY ENDS
-
-def setupNetwork():
- "Create network"
- topo = MyTopo()
- network = Mininet(topo=topo, autoSetMacs=True, controller=None)
- network.start()
- CLI( network )
- network.stop()
-
-if __name__ == '__main__':
- setLogLevel('info')
- #setLogLevel('debug')
- setupNetwork()
diff --git a/TestON/tests/SAMP/SAMPstartTemplate/dependencies/startUp.py b/TestON/tests/SAMP/SAMPstartTemplate/dependencies/startUp.py
deleted file mode 100644
index bf2a2b6..0000000
--- a/TestON/tests/SAMP/SAMPstartTemplate/dependencies/startUp.py
+++ /dev/null
@@ -1,38 +0,0 @@
-"""
- This wrapper function is use for starting up onos instance
-"""
-
-import time
-import os
-import json
-
-def onosBuild( main, gitBranch ):
- """
- This includes pulling ONOS and building it using maven install
- """
-
- buildResult = main.FALSE
-
- # Git checkout a branch of ONOS
- checkOutResult = main.ONOSbench.gitCheckout( gitBranch )
- # Does the git pull on the branch that was checked out
- if not checkOutResult:
- main.log.warn( "Failed to checked out " + gitBranch +
- " branch")
- else:
- main.log.info( "Successfully checked out " + gitBranch +
- " branch")
- gitPullResult = main.ONOSbench.gitPull()
- if gitPullResult == main.ERROR:
- main.log.error( "Error pulling git branch" )
- else:
- main.log.info( "Successfully pulled " + gitBranch + " branch" )
-
- # Maven clean install
- buildResult = main.ONOSbench.cleanInstall()
-
- return buildResult
-
-
-
-
diff --git a/TestON/tests/SAMP/SAMPstartTemplate2/__init__.py b/TestON/tests/SAMP/SAMPstartTemplate2/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/TestON/tests/SAMP/SAMPstartTemplate2/__init__.py
+++ /dev/null
diff --git a/TestON/tests/SAMP/SAMPstartTemplate2/Dependency/newFuncTopo.py b/TestON/tests/SAMP/SAMPstartTemplate2_1node/Dependency/newFuncTopo.py
similarity index 100%
rename from TestON/tests/SAMP/SAMPstartTemplate2/Dependency/newFuncTopo.py
rename to TestON/tests/SAMP/SAMPstartTemplate2_1node/Dependency/newFuncTopo.py
diff --git a/TestON/tests/SAMP/SAMPstartTemplate2/README b/TestON/tests/SAMP/SAMPstartTemplate2_1node/README
similarity index 100%
rename from TestON/tests/SAMP/SAMPstartTemplate2/README
rename to TestON/tests/SAMP/SAMPstartTemplate2_1node/README
diff --git a/TestON/tests/SAMP/SAMPstartTemplate2/SAMPstartTemplate2.params b/TestON/tests/SAMP/SAMPstartTemplate2_1node/SAMPstartTemplate2_1node.params
similarity index 93%
copy from TestON/tests/SAMP/SAMPstartTemplate2/SAMPstartTemplate2.params
copy to TestON/tests/SAMP/SAMPstartTemplate2_1node/SAMPstartTemplate2_1node.params
index 8fde55a..1bed38d 100755
--- a/TestON/tests/SAMP/SAMPstartTemplate2/SAMPstartTemplate2.params
+++ b/TestON/tests/SAMP/SAMPstartTemplate2_1node/SAMPstartTemplate2_1node.params
@@ -9,7 +9,7 @@
CASE2: get onos warnings, errors from log
-->
<!--
- CASE10: start a 3-node ONOS Cluster
+ CASE10: start a 1-node ONOS
-->
<!--
CASE11: Start Mininet and assign controllers
@@ -39,7 +39,7 @@
</CASE1>
<CASE10>
- <numNodes>3</numNodes>
+ <numNodes>1</numNodes>
<Apps>
org.onosproject.openflow,org.onosproject.fwd
</Apps>
@@ -51,7 +51,7 @@
</CASE10>
<CASE11>
- <path>~/OnosSystemTest/TestON/tests/SAMP/SAMPstartTemplate2/Dependency/</path>
+ <path>~/OnosSystemTest/TestON/tests/SAMP/SAMPstartTemplate2_1node/Dependency/</path>
<topo>newFuncTopo.py</topo>
</CASE11>
diff --git a/TestON/tests/SAMP/SAMPstartTemplate2/SAMPstartTemplate2.py b/TestON/tests/SAMP/SAMPstartTemplate2_1node/SAMPstartTemplate2_1node.py
similarity index 96%
copy from TestON/tests/SAMP/SAMPstartTemplate2/SAMPstartTemplate2.py
copy to TestON/tests/SAMP/SAMPstartTemplate2_1node/SAMPstartTemplate2_1node.py
index ce057f4..befa623 100644
--- a/TestON/tests/SAMP/SAMPstartTemplate2/SAMPstartTemplate2.py
+++ b/TestON/tests/SAMP/SAMPstartTemplate2_1node/SAMPstartTemplate2_1node.py
@@ -2,7 +2,7 @@
# This is a sample template that starts up ONOS cluster, this template
# can be use as a base script for ONOS System Testing.
-class SAMPstartTemplate2:
+class SAMPstartTemplate2_1node:
def __init__( self ):
self.default = ''
@@ -197,7 +197,7 @@
main.step( "Assign switches to controllers.")
assignResult = main.TRUE
- onosNodes = [ main.ONOScli1.ip_address, main.ONOScli2.ip_address, main.ONOScli3.ip_address ]
+ onosNodes = [ main.ONOScli1.ip_address ]
for i in range(1, 8):
assignResult = assignResult & main.Mininet1.assignSwController( sw="s" + str( i ),
ip=onosNodes,
@@ -216,7 +216,7 @@
main.log.case( "Test some onos commands through CLI. ")
main.log.debug( main.ONOScli1.sendline("summary") )
- main.log.debug( main.ONOScli3.sendline("devices") )
+ main.log.debug( main.ONOScli1.sendline("devices") )
def CASE22( self, main ):
"""
@@ -225,4 +225,4 @@
main.case( " Sample tests using ONOS REST API handles. ")
main.log.debug( main.ONOSrest1.send("/devices") )
- main.log.debug( main.ONOSrest2.apps() )
\ No newline at end of file
+ main.log.debug( main.ONOSrest1.apps() )
\ No newline at end of file
diff --git a/TestON/tests/SAMP/SAMPstartTemplate/SAMPstartTemplate.topo b/TestON/tests/SAMP/SAMPstartTemplate2_1node/SAMPstartTemplate2_1node.topo
similarity index 64%
rename from TestON/tests/SAMP/SAMPstartTemplate/SAMPstartTemplate.topo
rename to TestON/tests/SAMP/SAMPstartTemplate2_1node/SAMPstartTemplate2_1node.topo
index 068bfdd..a804d4a 100755
--- a/TestON/tests/SAMP/SAMPstartTemplate/SAMPstartTemplate.topo
+++ b/TestON/tests/SAMP/SAMPstartTemplate2_1node/SAMPstartTemplate2_1node.topo
@@ -1,6 +1,10 @@
<TOPOLOGY>
<COMPONENT>
-
+ <!--
+ This is a list of all components and their handles in the test setup.
+ Even with some handles not used in test cases, we want to define
+ all onos cells here, for cases to set up onos cluster.
+ -->
<ONOSbench>
<host>localhost</host>
<user>sdn</user>
@@ -8,11 +12,12 @@
<type>OnosDriver</type>
<connect_order>1</connect_order>
<COMPONENTS>
+ <home></home> #defines where onos home is
</COMPONENTS>
</ONOSbench>
<ONOScli1>
- <host>localhost</host>
+ <host>OC1</host>
<user>sdn</user>
<password>rocks</password>
<type>OnosCliDriver</type>
@@ -21,28 +26,8 @@
</COMPONENTS>
</ONOScli1>
- <ONOScli2>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>3</connect_order>
- <COMPONENTS>
- </COMPONENTS>
- </ONOScli2>
-
- <ONOScli3>
- <host>localhost</host>
- <user>sdn</user>
- <password>rocks</password>
- <type>OnosCliDriver</type>
- <connect_order>4</connect_order>
- <COMPONENTS>
- </COMPONENTS>
- </ONOScli3>
-
<Mininet1>
- <host>OCN</host>
+ <host>localhost</host>
<user>sdn</user>
<password>rocks</password>
<type>MininetCliDriver</type>
@@ -52,5 +37,16 @@
</COMPONENTS>
</Mininet1>
+ <ONOSrest1>
+ <host>OC1</host>
+ <port>8181</port>
+ <user>onos</user>
+ <password>rocks</password>
+ <type>OnosRestDriver</type>
+ <connect_order>6</connect_order>
+ <COMPONENTS>
+ </COMPONENTS>
+ </ONOSrest1>
+
</COMPONENT>
</TOPOLOGY>
diff --git a/TestON/tests/SAMP/SAMPstartTemplate/__init__.py b/TestON/tests/SAMP/SAMPstartTemplate2_1node/__init__.py
similarity index 100%
rename from TestON/tests/SAMP/SAMPstartTemplate/__init__.py
rename to TestON/tests/SAMP/SAMPstartTemplate2_1node/__init__.py
diff --git a/TestON/tests/SAMP/SAMPstartTemplate2/Dependency/newFuncTopo.py b/TestON/tests/SAMP/SAMPstartTemplate2_3node/Dependency/newFuncTopo.py
similarity index 100%
copy from TestON/tests/SAMP/SAMPstartTemplate2/Dependency/newFuncTopo.py
copy to TestON/tests/SAMP/SAMPstartTemplate2_3node/Dependency/newFuncTopo.py
diff --git a/TestON/tests/SAMP/SAMPstartTemplate2/README b/TestON/tests/SAMP/SAMPstartTemplate2_3node/README
similarity index 100%
copy from TestON/tests/SAMP/SAMPstartTemplate2/README
copy to TestON/tests/SAMP/SAMPstartTemplate2_3node/README
diff --git a/TestON/tests/SAMP/SAMPstartTemplate2/SAMPstartTemplate2.params b/TestON/tests/SAMP/SAMPstartTemplate2_3node/SAMPstartTemplate2_3node.params
similarity index 97%
rename from TestON/tests/SAMP/SAMPstartTemplate2/SAMPstartTemplate2.params
rename to TestON/tests/SAMP/SAMPstartTemplate2_3node/SAMPstartTemplate2_3node.params
index 8fde55a..41fedd0 100755
--- a/TestON/tests/SAMP/SAMPstartTemplate2/SAMPstartTemplate2.params
+++ b/TestON/tests/SAMP/SAMPstartTemplate2_3node/SAMPstartTemplate2_3node.params
@@ -51,7 +51,7 @@
</CASE10>
<CASE11>
- <path>~/OnosSystemTest/TestON/tests/SAMP/SAMPstartTemplate2/Dependency/</path>
+ <path>~/OnosSystemTest/TestON/tests/SAMP/SAMPstartTemplate2_3node/Dependency/</path>
<topo>newFuncTopo.py</topo>
</CASE11>
diff --git a/TestON/tests/SAMP/SAMPstartTemplate2/SAMPstartTemplate2.py b/TestON/tests/SAMP/SAMPstartTemplate2_3node/SAMPstartTemplate2_3node.py
similarity index 98%
rename from TestON/tests/SAMP/SAMPstartTemplate2/SAMPstartTemplate2.py
rename to TestON/tests/SAMP/SAMPstartTemplate2_3node/SAMPstartTemplate2_3node.py
index ce057f4..49b72b5 100644
--- a/TestON/tests/SAMP/SAMPstartTemplate2/SAMPstartTemplate2.py
+++ b/TestON/tests/SAMP/SAMPstartTemplate2_3node/SAMPstartTemplate2_3node.py
@@ -2,7 +2,7 @@
# This is a sample template that starts up ONOS cluster, this template
# can be use as a base script for ONOS System Testing.
-class SAMPstartTemplate2:
+class SAMPstartTemplate2_3node:
def __init__( self ):
self.default = ''
@@ -225,4 +225,4 @@
main.case( " Sample tests using ONOS REST API handles. ")
main.log.debug( main.ONOSrest1.send("/devices") )
- main.log.debug( main.ONOSrest2.apps() )
\ No newline at end of file
+ main.log.debug( main.ONOSrest2.apps() )
diff --git a/TestON/tests/SAMP/SAMPstartTemplate2/SAMPstartTemplate2.topo b/TestON/tests/SAMP/SAMPstartTemplate2_3node/SAMPstartTemplate2_3node.topo
similarity index 100%
rename from TestON/tests/SAMP/SAMPstartTemplate2/SAMPstartTemplate2.topo
rename to TestON/tests/SAMP/SAMPstartTemplate2_3node/SAMPstartTemplate2_3node.topo
diff --git a/TestON/tests/SAMP/SAMPstartTemplate/__init__.py b/TestON/tests/SAMP/SAMPstartTemplate2_3node/__init__.py
similarity index 100%
copy from TestON/tests/SAMP/SAMPstartTemplate/__init__.py
copy to TestON/tests/SAMP/SAMPstartTemplate2_3node/__init__.py
diff --git a/TestON/tests/USECASE/USECASE_ReactiveRouting/README b/TestON/tests/USECASE/USECASE_ReactiveRouting/README
new file mode 100644
index 0000000..de225b9
--- /dev/null
+++ b/TestON/tests/USECASE/USECASE_ReactiveRouting/README
@@ -0,0 +1,6 @@
+Please ask Pingping(pingping@onlab.us) for topology figure.
+
+The default route 0.0.0.0 will have a flow entry look like blow inside the internal switch, which connects host inside SDN network.
+ cookie=0x17000048f91093, duration=3134.048s, table=0, n_packets=1978, n_bytes=193613, idle_age=0, priority=100,ip,in_port=3 actions=mod_dl_dst:00:00:00:00:00:04,output:1
+
+00:00:00:00:00:04 is the next hop peer MAC address.
diff --git a/TestON/tests/USECASE/USECASE_ReactiveRouting/dependencies/USECASE_ReactiveRoutingI2MN.py b/TestON/tests/USECASE/USECASE_ReactiveRouting/dependencies/USECASE_ReactiveRoutingI2MN.py
new file mode 100755
index 0000000..5fca446
--- /dev/null
+++ b/TestON/tests/USECASE/USECASE_ReactiveRouting/dependencies/USECASE_ReactiveRoutingI2MN.py
@@ -0,0 +1,366 @@
+#!/usr/bin/python
+
+"""
+Set up the SDN-IP topology as same as it on Internet2
+"""
+
+"""
+AS 64513, (SDN AS)
+AS 64514, reachable by 10.0.4.1
+AS 64515, reachable by 10.0.5.1
+AS 64516, reachable by 10.0.6.1
+"""
+
+from mininet.net import Mininet
+from mininet.node import Controller, RemoteController
+from mininet.log import setLogLevel, info
+from mininet.cli import CLI
+from mininet.topo import Topo
+from mininet.util import quietRun
+from mininet.moduledeps import pathCheck
+
+import os.path
+import time
+from subprocess import Popen, STDOUT, PIPE
+
+QUAGGA_DIR = '/usr/lib/quagga'
+QUAGGA_RUN_DIR = '/usr/local/var/run/quagga'
+QUAGGA_CONFIG_DIR = '~/OnosSystemTest/TestON/tests/ReactiveRouting/Dependency/'
+onos1IP = '10.128.4.52'
+numSw = 39
+
+
+class SDNTopo( Topo ):
+ "SDN Topology"
+
+ def __init__( self, *args, **kwargs ):
+
+ Topo.__init__( self, *args, **kwargs )
+
+ # BGP peer hosts
+ peer64514 = self.addHost( 'peer64514' )
+ peer64515 = self.addHost( 'peer64515' )
+ peer64516 = self.addHost( 'peer64516' )
+
+ sw1 = self.addSwitch( 'sw1', dpid = '00000000000000a1' )
+ sw2 = self.addSwitch( 'sw2', dpid = '00000000000000a2' )
+ sw3 = self.addSwitch( 'sw3', dpid = '00000000000000a3' )
+ sw4 = self.addSwitch( 'sw4', dpid = '00000000000000a4' )
+ sw5 = self.addSwitch( 'sw5', dpid = '00000000000000a5' )
+ sw6 = self.addSwitch( 'sw6', dpid = '00000000000000a6' )
+ sw7 = self.addSwitch( 'sw7', dpid = '00000000000000a7' )
+ sw8 = self.addSwitch( 'sw8', dpid = '00000000000000a8' )
+ sw9 = self.addSwitch( 'sw9', dpid = '00000000000000a9' )
+ sw10 = self.addSwitch( 'sw10', dpid = '0000000000000a10' )
+ sw11 = self.addSwitch( 'sw11', dpid = '0000000000000a11' )
+ sw12 = self.addSwitch( 'sw12', dpid = '0000000000000a12' )
+ sw13 = self.addSwitch( 'sw13', dpid = '0000000000000a13' )
+ sw14 = self.addSwitch( 'sw14', dpid = '0000000000000a14' )
+ sw15 = self.addSwitch( 'sw15', dpid = '0000000000000a15' )
+ sw16 = self.addSwitch( 'sw16', dpid = '0000000000000a16' )
+ sw17 = self.addSwitch( 'sw17', dpid = '0000000000000a17' )
+ sw18 = self.addSwitch( 'sw18', dpid = '0000000000000a18' )
+ sw19 = self.addSwitch( 'sw19', dpid = '0000000000000a19' )
+ sw20 = self.addSwitch( 'sw20', dpid = '0000000000000a20' )
+ sw21 = self.addSwitch( 'sw21', dpid = '0000000000000a21' )
+ sw22 = self.addSwitch( 'sw22', dpid = '0000000000000a22' )
+ sw23 = self.addSwitch( 'sw23', dpid = '0000000000000a23' )
+ sw24 = self.addSwitch( 'sw24', dpid = '0000000000000a24' )
+ sw25 = self.addSwitch( 'sw25', dpid = '0000000000000a25' )
+ sw26 = self.addSwitch( 'sw26', dpid = '0000000000000a26' )
+ sw27 = self.addSwitch( 'sw27', dpid = '0000000000000a27' )
+ sw28 = self.addSwitch( 'sw28', dpid = '0000000000000a28' )
+ sw29 = self.addSwitch( 'sw29', dpid = '0000000000000a29' )
+ sw30 = self.addSwitch( 'sw30', dpid = '0000000000000a30' )
+ sw31 = self.addSwitch( 'sw31', dpid = '0000000000000a31' )
+ sw32 = self.addSwitch( 'sw32', dpid = '0000000000000a32' )
+ sw33 = self.addSwitch( 'sw33', dpid = '0000000000000a33' )
+ sw34 = self.addSwitch( 'sw34', dpid = '0000000000000a34' )
+ sw35 = self.addSwitch( 'sw35', dpid = '0000000000000a35' )
+ sw36 = self.addSwitch( 'sw36', dpid = '0000000000000a36' )
+ sw37 = self.addSwitch( 'sw37', dpid = '0000000000000a37' )
+ sw38 = self.addSwitch( 'sw38', dpid = '0000000000000a38' )
+ sw39 = self.addSwitch( 'sw39', dpid = '0000000000000a39' )
+
+
+ # Add a layer2 switch for control plane connectivity
+ # This switch isn't part of the SDN topology
+ # We'll use the ovs-controller to turn this into a learning switch
+ swCtl100 = self.addSwitch( 'swCtl100', dpid = '0000000000000100' )
+
+
+ # BGP speaker hosts
+ speaker1 = self.addHost( 'speaker1' )
+ speaker2 = self.addHost( 'speaker2' )
+
+ root = self.addHost( 'root', inNamespace = False , ip = '0' )
+
+ # hosts behind each AS
+ host64514 = self.addHost( 'host64514' )
+ host64515 = self.addHost( 'host64515' )
+ host64516 = self.addHost( 'host64516' )
+
+ host1 = self.addHost( 'host1' )
+ host2 = self.addHost( 'host2' )
+ host6 = self.addHost( 'host6' )
+ host13 = self.addHost( 'host13' )
+
+ self.addLink( 'speaker1', sw24 )
+ self.addLink( 'speaker2', sw24 )
+
+ # connect all switches
+ self.addLink( sw1, sw2 )
+ self.addLink( sw1, sw6 )
+ self.addLink( sw1, sw8 )
+ self.addLink( sw2, sw3 )
+ self.addLink( sw3, sw4 )
+ self.addLink( sw3, sw5 )
+ self.addLink( sw4, sw8 )
+ self.addLink( sw5, sw7 )
+ self.addLink( sw5, sw9 )
+ self.addLink( sw6, sw13 )
+ self.addLink( sw7, sw8 )
+ self.addLink( sw8, sw11 )
+ self.addLink( sw9, sw10 )
+ self.addLink( sw10, sw12 )
+ self.addLink( sw11, sw12 )
+ self.addLink( sw11, sw14 )
+ self.addLink( sw12, sw17 )
+ self.addLink( sw13, sw14 )
+ self.addLink( sw13, sw21 )
+ self.addLink( sw14, sw15 )
+ self.addLink( sw14, sw18 )
+ self.addLink( sw14, sw23 )
+ self.addLink( sw15, sw16 )
+ self.addLink( sw16, sw17 )
+ self.addLink( sw17, sw19 )
+ self.addLink( sw17, sw20 )
+ self.addLink( sw18, sw23 )
+ self.addLink( sw19, sw27 )
+ self.addLink( sw20, sw28 )
+ self.addLink( sw21, sw22 )
+ self.addLink( sw21, sw29 )
+ self.addLink( sw22, sw23 )
+ self.addLink( sw23, sw24 )
+ self.addLink( sw23, sw31 )
+ self.addLink( sw24, sw25 )
+ self.addLink( sw25, sw26 )
+ self.addLink( sw26, sw27 )
+ self.addLink( sw27, sw28 )
+ self.addLink( sw27, sw34 )
+ self.addLink( sw29, sw30 )
+ self.addLink( sw29, sw35 )
+ self.addLink( sw30, sw31 )
+ self.addLink( sw31, sw32 )
+ self.addLink( sw32, sw33 )
+ self.addLink( sw32, sw39 )
+ self.addLink( sw33, sw34 )
+ self.addLink( sw35, sw36 )
+ self.addLink( sw36, sw37 )
+ self.addLink( sw37, sw38 )
+ self.addLink( sw38, sw39 )
+
+ # connection between switches and peers
+ self.addLink( peer64514, sw32 )
+ self.addLink( peer64515, sw8 )
+ self.addLink( peer64516, sw28 )
+
+ # connection between BGP peer and hosts behind the BGP peer
+ self.addLink( peer64514, host64514 )
+ self.addLink( peer64515, host64515 )
+ self.addLink( peer64516, host64516 )
+
+ self.addLink( sw1, host1 )
+ self.addLink( sw2, host2 )
+ self.addLink( sw6, host6 )
+ self.addLink( sw13, host13 )
+
+ # Internal Connection To Hosts
+ self.addLink( swCtl100, peer64514 )
+ self.addLink( swCtl100, peer64515 )
+ self.addLink( swCtl100, peer64516 )
+ self.addLink( swCtl100, speaker1 )
+ self.addLink( swCtl100, speaker2 )
+
+
+
+ # add host64514 to control plane for ping test
+ self.addLink( swCtl100, host64514 )
+ self.addLink( swCtl100, root )
+
+
+def startsshd( host ):
+ "Start sshd on host"
+ info( '*** Starting sshd\n' )
+ name, intf, ip = host.name, host.defaultIntf(), host.IP()
+ banner = '/tmp/%s.banner' % name
+ host.cmd( 'echo "Welcome to %s at %s" > %s' % ( name, ip, banner ) )
+ host.cmd( '/usr/sbin/sshd -o "Banner %s"' % banner, '-o "UseDNS no"' )
+ info( '***', host.name, 'is running sshd on', intf, 'at', ip, '\n' )
+
+def startsshds ( hosts ):
+ for h in hosts:
+ startsshd( h )
+
+def stopsshd():
+ "Stop *all* sshd processes with a custom banner"
+ info( '*** Shutting down stale sshd/Banner processes ',
+ quietRun( "pkill -9 -f Banner" ), '\n' )
+
+def startquagga( host, num, config_file ):
+ info( '*** Starting Quagga on %s\n' % host )
+ host.cmd( "cd %s" % QUAGGA_CONFIG_DIR )
+ zebra_cmd = \
+ '%s/zebra -d -f ./zebra.conf -z %s/zserv%s.api -i %s/zebra%s.pid'\
+ % ( QUAGGA_DIR, QUAGGA_RUN_DIR, num, QUAGGA_RUN_DIR, num )
+ quagga_cmd = '%s/bgpd -d -f %s -z %s/zserv%s.api -i %s/bgpd%s.pid' \
+ % ( QUAGGA_DIR, config_file, QUAGGA_RUN_DIR, num, QUAGGA_RUN_DIR, num )
+
+ print zebra_cmd
+ print quagga_cmd
+
+ host.cmd( zebra_cmd )
+ host.cmd( quagga_cmd )
+
+def startquaggahost5( host, num ):
+ info( '*** Starting Quagga on %s\n' % host )
+ zebra_cmd = \
+ '%s/zebra -d -f ./zebra.conf -z %s/zserv%s.api -i %s/zebra%s.pid' \
+ % ( QUAGGA_DIR, QUAGGA_RUN_DIR, num, QUAGGA_RUN_DIR, num )
+ quagga_cmd = \
+ '%s/bgpd -d -f ./as4quaggas/quagga%s.conf -z %s/zserv%s.api -i %s/bgpd%s.pid'\
+ % ( QUAGGA_DIR, num, QUAGGA_RUN_DIR, num, QUAGGA_RUN_DIR, num )
+
+ host.cmd( zebra_cmd )
+ host.cmd( quagga_cmd )
+
+
+def stopquagga():
+ quietRun( 'sudo pkill -9 -f bgpd' )
+ quietRun( 'sudo pkill -9 -f zebra' )
+
+def sdn1net():
+ topo = SDNTopo()
+ info( '*** Creating network\n' )
+ # time.sleep( 30 )
+ net = Mininet( topo = topo, controller = RemoteController )
+
+
+ speaker1, speaker2, peer64514, peer64515, peer64516 = \
+ net.get( 'speaker1', 'speaker2' ,
+ 'peer64514', 'peer64515', 'peer64516' )
+
+ # Adding addresses to host64513_1 interface connected to sw24
+ # for BGP peering
+ speaker1.setMAC( '00:00:00:00:00:01', 'speaker1-eth0' )
+ speaker1.cmd( 'ip addr add 10.0.4.101/24 dev speaker1-eth0' )
+ speaker1.cmd( 'ip addr add 10.0.5.101/24 dev speaker1-eth0' )
+ speaker1.cmd( 'ip addr add 10.0.6.101/24 dev speaker1-eth0' )
+
+ speaker1.defaultIntf().setIP( '10.1.4.101/24' )
+ speaker1.defaultIntf().setMAC( '00:00:00:00:00:01' )
+
+ # Net has to be start after adding the above link
+ net.start()
+
+ # setup configuration on the interface connected to switch
+ peer64514.cmd( "ifconfig peer64514-eth0 10.0.4.1 up" )
+ peer64514.setMAC( '00:00:00:00:00:04', 'peer64514-eth0' )
+ peer64515.cmd( "ifconfig peer64515-eth0 10.0.5.1 up" )
+ peer64515.setMAC( '00:00:00:00:00:05', 'peer64515-eth0' )
+ peer64516.cmd( "ifconfig peer64516-eth0 10.0.6.1 up" )
+ peer64516.setMAC( '00:00:00:00:00:06', 'peer64516-eth0' )
+
+ # setup configuration on the interface connected to hosts
+ peer64514.setIP( "4.0.0.254", 8, "peer64514-eth1" )
+ peer64514.setMAC( '00:00:00:00:00:44', 'peer64514-eth1' )
+ peer64515.setIP( "5.0.0.254", 8, "peer64515-eth1" )
+ peer64515.setMAC( '00:00:00:00:00:55', 'peer64515-eth1' )
+ peer64516.setIP( "6.0.0.254", 8, "peer64516-eth1" )
+ peer64516.setMAC( '00:00:00:00:00:66', 'peer64516-eth1' )
+
+ # enable forwarding on BGP peer hosts
+ peer64514.cmd( 'sysctl net.ipv4.conf.all.forwarding=1' )
+ peer64515.cmd( 'sysctl net.ipv4.conf.all.forwarding=1' )
+ peer64516.cmd( 'sysctl net.ipv4.conf.all.forwarding=1' )
+
+ # config interface for control plane connectivity
+ peer64514.setIP( "192.168.0.4", 24, "peer64514-eth2" )
+ peer64515.setIP( "192.168.0.5", 24, "peer64515-eth2" )
+ peer64516.setIP( "192.168.0.6", 24, "peer64516-eth2" )
+
+ # Setup hosts in each non-SDN AS
+ host64514, host64515, host64516 = \
+ net.get( 'host64514', 'host64515', 'host64516' )
+ host64514.cmd( 'ifconfig host64514-eth0 4.0.0.1 up' )
+ host64514.cmd( 'ip route add default via 4.0.0.254' )
+ host64514.setIP( '192.168.0.44', 24, 'host64514-eth1' ) # for control plane
+ host64515.cmd( 'ifconfig host64515-eth0 5.0.0.1 up' )
+ host64515.cmd( 'ip route add default via 5.0.0.254' )
+ host64516.cmd( 'ifconfig host64516-eth0 6.0.0.1 up' )
+ host64516.cmd( 'ip route add default via 6.0.0.254' )
+
+ host1, host2, host6, host13 = \
+ net.get( 'host1', 'host2', 'host6', 'host13' )
+ host1.cmd( 'ifconfig host1-eth0 201.0.0.1 up' )
+ host1.cmd( 'route add default gw 201.0.0.254' )
+ host2.cmd( 'ifconfig host2-eth0 202.0.0.1 up' )
+ host2.cmd( 'route add default gw 202.0.0.254' )
+ host6.cmd( 'ifconfig host6-eth0 206.0.0.1 up' )
+ host6.cmd( 'route add default gw 206.0.0.254' )
+ host13.cmd( 'ifconfig host13-eth0 213.0.0.13 up' )
+ host13.cmd( 'route add default gw 213.0.0.254' )
+
+
+ # set up swCtl100 as a learning
+ swCtl100 = net.get( 'swCtl100' )
+ swCtl100.cmd( 'ovs-vsctl set-controller swCtl100 none' )
+ swCtl100.cmd( 'ovs-vsctl set-fail-mode swCtl100 standalone' )
+
+ # connect all switches to controller
+
+ for i in range ( 1, numSw + 1 ):
+ swX = net.get( 'sw%s' % ( i ) )
+ swX.cmd( 'ovs-vsctl set-controller sw%s tcp:%s:6653' % ( i, onos1IP ) )
+
+ # Start Quagga on border routers
+ '''
+ for i in range ( 64514, 64516 + 1 ):
+ startquagga( 'peer%s' % ( i ), i, 'quagga%s.conf' % ( i ) )
+ '''
+ startquagga( peer64514, 64514, 'quagga64514.conf' )
+ startquagga( peer64515, 64515, 'quagga64515.conf' )
+ startquagga( peer64516, 64516, 'quagga64516.conf' )
+
+ # start Quagga in SDN network
+ startquagga( speaker1, 64513, 'quagga-sdn.conf' )
+
+
+ root = net.get( 'root' )
+ root.intf( 'root-eth0' ).setIP( '1.1.1.2/24' )
+ root.cmd( 'ip addr add 192.168.0.100/24 dev root-eth0' )
+
+ speaker1.intf( 'speaker1-eth1' ).setIP( '1.1.1.1/24' )
+
+
+ stopsshd()
+
+ hosts = [ peer64514, peer64515, peer64516, host64514];
+ startsshds( hosts )
+ #
+
+ forwarding1 = '%s:2000:%s:2000' % ( '1.1.1.2', onos1IP )
+ root.cmd( 'ssh -nNT -o "PasswordAuthentication no" \
+ -o "StrictHostKeyChecking no" -l sdn -L %s %s & ' % ( forwarding1, onos1IP ) )
+
+ # time.sleep( 3000000000 )
+ CLI( net )
+
+
+ stopsshd()
+ stopquagga()
+ net.stop()
+
+if __name__ == '__main__':
+ setLogLevel( 'debug' )
+ sdn1net()
diff --git a/TestON/tests/USECASE/USECASE_ReactiveRouting/dependencies/quagga-sdn.conf b/TestON/tests/USECASE/USECASE_ReactiveRouting/dependencies/quagga-sdn.conf
new file mode 100644
index 0000000..98f2fa2
--- /dev/null
+++ b/TestON/tests/USECASE/USECASE_ReactiveRouting/dependencies/quagga-sdn.conf
@@ -0,0 +1,44 @@
+! -*- bgp -*-
+!
+! BGPd sample configuratin file
+!
+! $Id: bgpd.conf.sample,v 1.1 2002/12/13 20:15:29 paul Exp $
+!
+hostname bgpd
+password hello
+!enable password please-set-at-here
+!
+!bgp mulitple-instance
+!
+!
+router bgp 64513
+ bgp router-id 10.0.4.101
+ timers bgp 1 3
+ !timers bgp 3 9
+ neighbor 10.0.4.1 remote-as 64514
+ neighbor 10.0.4.1 ebgp-multihop
+ neighbor 10.0.4.1 timers connect 5
+ neighbor 10.0.5.1 remote-as 64515
+ neighbor 10.0.5.1 ebgp-multihop
+ neighbor 10.0.5.1 timers connect 5
+ neighbor 10.0.6.1 remote-as 64516
+ neighbor 10.0.6.1 ebgp-multihop
+ neighbor 10.0.6.1 timers connect 5
+
+ neighbor 1.1.1.2 remote-as 64513
+ neighbor 1.1.1.2 port 2000
+ neighbor 1.1.1.2 timers connect 5
+
+ network 201.0.0.0/24
+ network 202.0.0.0/24
+ !network 206.0.0.0/24
+!
+! access-list all permit any
+!
+!route-map set-nexthop permit 10
+! match ip address all
+! set ip next-hop 10.0.0.1
+!
+!log file /usr/local/var/log/quagga/bgpd.log
+!
+log stdout
diff --git a/TestON/tests/USECASE/USECASE_ReactiveRouting/dependencies/quagga64514.conf b/TestON/tests/USECASE/USECASE_ReactiveRouting/dependencies/quagga64514.conf
new file mode 100644
index 0000000..09440af
--- /dev/null
+++ b/TestON/tests/USECASE/USECASE_ReactiveRouting/dependencies/quagga64514.conf
@@ -0,0 +1,28 @@
+! -*- bgp -*-
+!
+! BGPd sample configuratin file
+!
+! $Id: bgpd.conf.sample,v 1.1 2002/12/13 20:15:29 paul Exp $
+!
+hostname bgpd
+password hello
+!enable password please-set-at-here
+!
+!bgp mulitple-instance
+!
+router bgp 64514
+ bgp router-id 10.0.4.1
+! timers bgp 1 3
+ neighbor 10.0.4.101 remote-as 64513
+ network 4.0.0.0/24
+
+!
+! access-list all permit any
+!
+!route-map set-nexthop permit 10
+! match ip address all
+! set ip next-hop 10.0.0.1
+!
+!log file /usr/local/var/log/quagga/bgpd.log
+!
+log stdout
\ No newline at end of file
diff --git a/TestON/tests/USECASE/USECASE_ReactiveRouting/dependencies/quagga64515.conf b/TestON/tests/USECASE/USECASE_ReactiveRouting/dependencies/quagga64515.conf
new file mode 100644
index 0000000..6d0b701
--- /dev/null
+++ b/TestON/tests/USECASE/USECASE_ReactiveRouting/dependencies/quagga64515.conf
@@ -0,0 +1,28 @@
+! -*- bgp -*-
+!
+! BGPd sample configuratin file
+!
+! $Id: bgpd.conf.sample,v 1.1 2002/12/13 20:15:29 paul Exp $
+!
+hostname bgpd
+password hello
+!enable password please-set-at-here
+!
+!bgp mulitple-instance
+!
+router bgp 64515
+ bgp router-id 10.0.5.1
+! timers bgp 1 3
+ neighbor 10.0.5.101 remote-as 64513
+ network 5.0.0.0/24
+
+!
+! access-list all permit any
+!
+!route-map set-nexthop permit 10
+! match ip address all
+! set ip next-hop 10.0.0.1
+!
+!log file /usr/local/var/log/quagga/bgpd.log
+!
+log stdout
diff --git a/TestON/tests/USECASE/USECASE_ReactiveRouting/dependencies/quagga64516.conf b/TestON/tests/USECASE/USECASE_ReactiveRouting/dependencies/quagga64516.conf
new file mode 100644
index 0000000..5401c05
--- /dev/null
+++ b/TestON/tests/USECASE/USECASE_ReactiveRouting/dependencies/quagga64516.conf
@@ -0,0 +1,31 @@
+! -*- bgp -*-
+!
+! BGPd sample configuratin file
+!
+! $Id: bgpd.conf.sample,v 1.1 2002/12/13 20:15:29 paul Exp $
+!
+hostname bgpd
+password hello
+!enable password please-set-at-here
+!
+!bgp mulitple-instance
+!
+router bgp 64516
+ bgp router-id 10.0.6.1
+! timers bgp 1 3
+ neighbor 10.0.6.101 remote-as 64513
+ network 6.0.0.0/24
+
+! neighbor 10.0.0.2 route-map set-nexthop out
+! neighbor 10.0.0.2 ebgp-multihop
+! neighbor 10.0.0.2 next-hop-self
+!
+! access-list all permit any
+!
+!route-map set-nexthop permit 10
+! match ip address all
+! set ip next-hop 10.0.0.1
+!
+!log file /usr/local/var/log/quagga/bgpd.log
+!
+log stdout
diff --git a/TestON/tests/USECASE/USECASE_ReactiveRouting/dependencies/zebra.conf b/TestON/tests/USECASE/USECASE_ReactiveRouting/dependencies/zebra.conf
new file mode 100644
index 0000000..517db94
--- /dev/null
+++ b/TestON/tests/USECASE/USECASE_ReactiveRouting/dependencies/zebra.conf
@@ -0,0 +1,26 @@
+! -*- zebra -*-
+!
+! zebra sample configuration file
+!
+! $Id: zebra.conf.sample,v 1.1 2002/12/13 20:15:30 paul Exp $
+!
+hostname zebra
+password hello
+enable password 0fw0rk
+log stdout
+!
+! Interfaces description.
+!
+!interface lo
+! description test of desc.
+!
+!interface sit0
+! multicast
+
+!
+! Static default route sample.
+!
+!ip route 0.0.0.0/0 203.181.89.241
+!
+
+!log file /usr/local/var/log/quagga/zebra.log
diff --git a/TestON/tests/USECASE/USECASE_ReactiveRouting/network-cfg.json b/TestON/tests/USECASE/USECASE_ReactiveRouting/network-cfg.json
new file mode 100644
index 0000000..cd1065f
--- /dev/null
+++ b/TestON/tests/USECASE/USECASE_ReactiveRouting/network-cfg.json
@@ -0,0 +1,76 @@
+{
+ "ports" : {
+ "of:00000000000000a8/5" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.0.5.101/24" ],
+ "mac" : "00:00:00:00:00:01"
+ }
+ ]
+ },
+ "of:0000000000000a32/4" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.0.4.101/24" ],
+ "mac" : "00:00:00:00:00:01"
+ }
+ ]
+ },
+ "of:0000000000000a28/3" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.0.6.101/24" ],
+ "mac" : "00:00:00:00:00:01"
+ }
+ ]
+ },
+ "of:00000000000000a1/4" : {
+ "interfaces" : [
+ {
+ "ips" : [ "201.0.0.200/24" ],
+ "mac" : "00:00:00:00:00:01"
+ }
+ ]
+ },
+ "of:00000000000000a2/3" : {
+ "interfaces" : [
+ {
+ "ips" : [ "202.0.0.200/24" ],
+ "mac" : "00:00:00:00:00:01"
+ }
+ ]
+ },
+ "of:00000000000000a6/3" : {
+ "interfaces" : [
+ {
+ "ips" : [ "206.0.0.200/24" ],
+ "mac" : "00:00:00:00:00:01"
+ }
+ ]
+ },
+ "of:0000000000000a13/4" : {
+ "interfaces" : [
+ {
+ "ips" : [ "213.0.0.200/24" ],
+ "mac" : "00:00:00:00:00:01"
+ }
+ ]
+ }
+ },
+ "apps" : {
+ "org.onosproject.router" : {
+ "bgp" : {
+ "bgpSpeakers" : [
+ {
+ "connectPoint" : "of:0000000000000a24/1",
+ "peers" : [
+ "10.0.4.1",
+ "10.0.5.1",
+ "10.0.6.1"
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/TestON/tests/USECASE/USECASE_ReactiveRouting/network-cfg.json.withBGP b/TestON/tests/USECASE/USECASE_ReactiveRouting/network-cfg.json.withBGP
new file mode 100644
index 0000000..cd1065f
--- /dev/null
+++ b/TestON/tests/USECASE/USECASE_ReactiveRouting/network-cfg.json.withBGP
@@ -0,0 +1,76 @@
+{
+ "ports" : {
+ "of:00000000000000a8/5" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.0.5.101/24" ],
+ "mac" : "00:00:00:00:00:01"
+ }
+ ]
+ },
+ "of:0000000000000a32/4" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.0.4.101/24" ],
+ "mac" : "00:00:00:00:00:01"
+ }
+ ]
+ },
+ "of:0000000000000a28/3" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.0.6.101/24" ],
+ "mac" : "00:00:00:00:00:01"
+ }
+ ]
+ },
+ "of:00000000000000a1/4" : {
+ "interfaces" : [
+ {
+ "ips" : [ "201.0.0.200/24" ],
+ "mac" : "00:00:00:00:00:01"
+ }
+ ]
+ },
+ "of:00000000000000a2/3" : {
+ "interfaces" : [
+ {
+ "ips" : [ "202.0.0.200/24" ],
+ "mac" : "00:00:00:00:00:01"
+ }
+ ]
+ },
+ "of:00000000000000a6/3" : {
+ "interfaces" : [
+ {
+ "ips" : [ "206.0.0.200/24" ],
+ "mac" : "00:00:00:00:00:01"
+ }
+ ]
+ },
+ "of:0000000000000a13/4" : {
+ "interfaces" : [
+ {
+ "ips" : [ "213.0.0.200/24" ],
+ "mac" : "00:00:00:00:00:01"
+ }
+ ]
+ }
+ },
+ "apps" : {
+ "org.onosproject.router" : {
+ "bgp" : {
+ "bgpSpeakers" : [
+ {
+ "connectPoint" : "of:0000000000000a24/1",
+ "peers" : [
+ "10.0.4.1",
+ "10.0.5.1",
+ "10.0.6.1"
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/TestON/tests/USECASE/USECASE_ReactiveRouting/network-cfg.json.withoutBGP b/TestON/tests/USECASE/USECASE_ReactiveRouting/network-cfg.json.withoutBGP
new file mode 100644
index 0000000..8d21d37
--- /dev/null
+++ b/TestON/tests/USECASE/USECASE_ReactiveRouting/network-cfg.json.withoutBGP
@@ -0,0 +1,36 @@
+{
+ "ports" : {
+ "of:00000000000000a1/4" : {
+ "interfaces" : [
+ {
+ "ips" : [ "201.0.0.200/24" ],
+ "mac" : "00:00:00:00:00:01"
+ }
+ ]
+ },
+ "of:00000000000000a2/3" : {
+ "interfaces" : [
+ {
+ "ips" : [ "202.0.0.200/24" ],
+ "mac" : "00:00:00:00:00:01"
+ }
+ ]
+ },
+ "of:00000000000000a6/3" : {
+ "interfaces" : [
+ {
+ "ips" : [ "206.0.0.200/24" ],
+ "mac" : "00:00:00:00:00:01"
+ }
+ ]
+ },
+ "of:0000000000000a13/4" : {
+ "interfaces" : [
+ {
+ "ips" : [ "213.0.0.200/24" ],
+ "mac" : "00:00:00:00:00:01"
+ }
+ ]
+ }
+ }
+}
diff --git a/TestON/tests/USECASE/USECASE_SdnipFunctionCluster_fsfw/USECASE_SdnipFunctionCluster_fsfw.topo b/TestON/tests/USECASE/USECASE_SdnipFunctionCluster_fsfw/USECASE_SdnipFunctionCluster_fsfw.topo
index c6d57fc..2dbca1d 100644
--- a/TestON/tests/USECASE/USECASE_SdnipFunctionCluster_fsfw/USECASE_SdnipFunctionCluster_fsfw.topo
+++ b/TestON/tests/USECASE/USECASE_SdnipFunctionCluster_fsfw/USECASE_SdnipFunctionCluster_fsfw.topo
@@ -27,12 +27,21 @@
<COMPONENTS> </COMPONENTS>
</ONOScli2>
+ <ONOScli3>
+ <host>127.0.0.1</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosCliDriver</type>
+ <connect_order>4</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOScli3>
+
<QuaggaCliSpeaker1>
<host>127.0.0.1</host>
<user>sdn</user>
<password>rocks</password>
<type>QuaggaCliDriver</type>
- <connect_order>4</connect_order>
+ <connect_order>5</connect_order>
<COMPONENTS> </COMPONENTS>
</QuaggaCliSpeaker1>
@@ -41,7 +50,7 @@
<user>sdn</user>
<password>rocks</password>
<type>MininetCliDriver</type>
- <connect_order>5</connect_order>
+ <connect_order>6</connect_order>
<COMPONENTS>
<home>~/Mininet/mininet/custom/</home>
</COMPONENTS>
diff --git a/TestON/tests/USECASE/USECASE_SdnipFunction_fsfw/USECASE_SdnipFunction_fsfw.py b/TestON/tests/USECASE/USECASE_SdnipFunction_fsfw/USECASE_SdnipFunction_fsfw.py
index 9183974..d66f5df 100644
--- a/TestON/tests/USECASE/USECASE_SdnipFunction_fsfw/USECASE_SdnipFunction_fsfw.py
+++ b/TestON/tests/USECASE/USECASE_SdnipFunction_fsfw/USECASE_SdnipFunction_fsfw.py
@@ -154,7 +154,7 @@
onfail="ONOS is NOT up" )
main.step( "Checking if ONOS CLI is ready" )
- cliResult = main.ONOScli.startOnosCli( ONOS1Ip,
+ cliResult = main.ONOScli1.startOnosCli( ONOS1Ip,
commandlineTimeout=100,
onosStartTimeout=600 )
utilities.assert_equals( expect=main.TRUE,
@@ -164,7 +164,7 @@
for i in range( 10 ):
ready = True
- output = main.ONOScli.summary()
+ output = main.ONOScli1.summary()
if not output:
ready = False
if ready:
@@ -181,20 +181,20 @@
main.log.info( "Get links in the network" )
time.sleep( int ( main.params['timers']['TopoDiscovery'] ) )
- summaryResult = main.ONOScli.summary()
+ summaryResult = main.ONOScli1.summary()
linkNum = json.loads( summaryResult )[ "links" ]
- listResult = main.ONOScli.links( jsonFormat=False )
+ listResult = main.ONOScli1.links( jsonFormat=False )
main.log.info( listResult )
if linkNum < 100:
main.log.error( "Link number is wrong!" )
time.sleep( int( main.params['timers']['TopoDiscovery'] ) )
- listResult = main.ONOScli.links( jsonFormat=False )
+ listResult = main.ONOScli1.links( jsonFormat=False )
main.log.info( listResult )
main.cleanup()
main.exit()
main.step( "Activate sdn-ip application" )
- activeSDNIPresult = main.ONOScli.activateApp( "org.onosproject.sdnip" )
+ activeSDNIPresult = main.ONOScli1.activateApp( "org.onosproject.sdnip" )
utilities.assert_equals( expect=main.TRUE,
actual=activeSDNIPresult,
onpass="Activate SDN-IP succeeded",
@@ -248,13 +248,13 @@
% main.params[ 'config' ][ 'peerNum' ] )
main.step( "Check P2P intents number from ONOS CLI" )
- getIntentsResult = main.ONOScli.intents( jsonFormat=True )
+ getIntentsResult = main.ONOScli1.intents( jsonFormat=True )
bgpIntentsActualNum = \
main.QuaggaCliSpeaker1.extractActualBgpIntentNum( getIntentsResult )
bgpIntentsExpectedNum = int( main.params[ 'config' ][ 'peerNum' ] ) * 6
if bgpIntentsActualNum != bgpIntentsExpectedNum:
time.sleep( int( main.params['timers']['RouteDelivery'] ) )
- getIntentsResult = main.ONOScli.intents( jsonFormat=True )
+ getIntentsResult = main.ONOScli1.intents( jsonFormat=True )
bgpIntentsActualNum = \
main.QuaggaCliSpeaker1.extractActualBgpIntentNum( getIntentsResult )
main.log.info( "bgpIntentsExpected num is:" )
@@ -280,14 +280,14 @@
allRoutesExpected.append( "5.0.0.0/24" + "/" + "10.0.5.1" )
allRoutesExpected.append( "6.0.0.0/24" + "/" + "10.0.6.1" )
- getRoutesResult = main.ONOScli.routes( jsonFormat=True )
+ getRoutesResult = main.ONOScli1.routes( jsonFormat=True )
allRoutesActual = \
main.QuaggaCliSpeaker1.extractActualRoutesMaster( getRoutesResult )
allRoutesStrExpected = str( sorted( allRoutesExpected ) )
allRoutesStrActual = str( allRoutesActual ).replace( 'u', "" )
if allRoutesStrActual != allRoutesStrExpected:
time.sleep( int( main.params['timers']['RouteDelivery'] ) )
- getRoutesResult = main.ONOScli.routes( jsonFormat=True )
+ getRoutesResult = main.ONOScli1.routes( jsonFormat=True )
allRoutesActual = \
main.QuaggaCliSpeaker1.extractActualRoutesMaster( getRoutesResult )
allRoutesStrActual = str( allRoutesActual ).replace( 'u', "" )
@@ -303,13 +303,13 @@
onfail="Routes are wrong!" )
main.step( "Check M2S intents installed" )
- getIntentsResult = main.ONOScli.intents( jsonFormat=True )
+ getIntentsResult = main.ONOScli1.intents( jsonFormat=True )
routeIntentsActualNum = \
main.QuaggaCliSpeaker1.extractActualRouteIntentNum( getIntentsResult )
routeIntentsExpectedNum = 3
if routeIntentsActualNum != routeIntentsExpectedNum:
time.sleep( int( main.params['timers']['RouteDelivery'] ) )
- getIntentsResult = main.ONOScli.intents( jsonFormat=True )
+ getIntentsResult = main.ONOScli1.intents( jsonFormat=True )
routeIntentsActualNum = \
main.QuaggaCliSpeaker1.extractActualRouteIntentNum( getIntentsResult )
diff --git a/TestON/tests/USECASE/USECASE_SdnipFunction_fsfw/USECASE_SdnipFunction_fsfw.topo b/TestON/tests/USECASE/USECASE_SdnipFunction_fsfw/USECASE_SdnipFunction_fsfw.topo
index 01be5af..9ba2e9e 100644
--- a/TestON/tests/USECASE/USECASE_SdnipFunction_fsfw/USECASE_SdnipFunction_fsfw.topo
+++ b/TestON/tests/USECASE/USECASE_SdnipFunction_fsfw/USECASE_SdnipFunction_fsfw.topo
@@ -10,14 +10,14 @@
<COMPONENTS> </COMPONENTS>
</ONOSbench>
- <ONOScli>
+ <ONOScli1>
<host>127.0.0.1</host>
<user>sdn</user>
<password>rocks</password>
<type>OnosCliDriver</type>
<connect_order>2</connect_order>
<COMPONENTS> </COMPONENTS>
- </ONOScli>
+ </ONOScli1>
<ONOS1>
<host>OC1</host>
diff --git a/TestON/tests/USECASE/USECASE_SegmentRouting/USECASE_SegmentRouting.py b/TestON/tests/USECASE/USECASE_SegmentRouting/USECASE_SegmentRouting.py
old mode 100644
new mode 100755
index bfdff59..9d96a43
--- a/TestON/tests/USECASE/USECASE_SegmentRouting/USECASE_SegmentRouting.py
+++ b/TestON/tests/USECASE/USECASE_SegmentRouting/USECASE_SegmentRouting.py
@@ -287,6 +287,8 @@
onfail="Flow status is wrong!" )
main.ONOSbench.dumpFlows( main.ONOSip[0],
main.logdir, "flowsBefore" + main.jsonFile)
+ main.ONOSbench.dumpGroups( main.ONOSip[0],
+ main.logdir, "groupsBefore" + main.jsonFile)
#time.sleep( 3*main.startUpSleep)
def CASE4( self, main ):
@@ -301,7 +303,8 @@
# cleanup mininet
main.ONOSbench.dumpFlows( main.ONOSip[0],
main.logdir, "flowsAfter" + main.jsonFile)
-
+ main.ONOSbench.dumpGroups( main.ONOSip[0],
+ main.logdir, "groupsAfter" + main.jsonFile)
main.step("2nd Check full connectivity")
pa = main.Mininet1.pingall()
utilities.assert_equals( expect=main.TRUE, actual=pa,
@@ -311,6 +314,9 @@
main.ONOSbench.dumpFlows( main.ONOSip[0],
main.logdir, "flowsAfter2nd" + main.jsonFile)
+ main.ONOSbench.dumpGroups( main.ONOSip[0],
+ main.logdir, "groupsAfter2nd" + main.jsonFile)
+
main.ONOSbench.onosStop( main.ONOSip[0] )
main.Mininet1.stopNet()