Merge pull request #19 from OPENNETWORKINGLAB/devel/git_checkout_refactor
Refactor git checkout
diff --git a/TestON/dependencies/topo-100sw.py b/TestON/dependencies/topo-100sw.py
new file mode 100644
index 0000000..308a3f1
--- /dev/null
+++ b/TestON/dependencies/topo-100sw.py
@@ -0,0 +1,31 @@
+
+from mininet.topo import Topo
+
+class MyTopo( Topo ):
+ "100 'floating' switch topology"
+
+ def __init__( self ):
+ # Initialize topology
+ Topo.__init__( self )
+
+ sw_list = []
+
+ for i in range(1, 101):
+ sw_list.append(
+ self.addSwitch(
+ 's'+str(i),
+ dpid = str(i).zfill(16)))
+
+
+ #Below connections are used for test cases
+ #that need to test link and port events
+ #Add link between switch 1 and switch 2
+ self.addLink(sw_list[0],sw_list[1])
+
+ #Create hosts and attach to sw 1 and sw 2
+ h1 = self.addHost('h1')
+ h2 = self.addHost('h2')
+ self.addLink(sw_list[0],h1)
+ self.addLink(sw_list[1],h2)
+
+topos = { 'mytopo': ( lambda: MyTopo() ) }
diff --git a/TestON/dependencies/topo-intentFlower.py b/TestON/dependencies/topo-intentFlower.py
new file mode 100644
index 0000000..138c291
--- /dev/null
+++ b/TestON/dependencies/topo-intentFlower.py
@@ -0,0 +1,80 @@
+'''
+Topology with 3 core switches connected linearly.
+
+Each 'core' switch has a 'flower' of 10 switches
+for a total of 33 switches.
+
+Used in conjunction with 'IntentPerfNext' test
+'''
+
+from mininet.topo import Topo
+
+class MyTopo( Topo ):
+
+ def __init__( self ):
+ Topo.__init__( self )
+
+ #Switches are listed out here for better view
+ #of the topology from this code
+ core_sw_list = ['s1','s2','s3']
+
+ #Flower switches for core switch 1
+ flower_sw_list_s1 =\
+ ['s10', 's11', 's12', 's13', 's14',
+ 's15', 's16', 's17', 's18', 's19']
+ #Flower switches for core switch 2
+ flower_sw_list_s2 =\
+ ['s20', 's21', 's22', 's23', 's24',
+ 's25', 's26', 's27', 's28', 's29']
+ #Flower switches for core switch 3
+ flower_sw_list_s3 =\
+ ['s30', 's31', 's32', 's33', 's34',
+ 's35', 's36', 's37', 's38', 's39']
+
+ #Store switch objects in these variables
+ core_switches = []
+ flower_switches_1 = []
+ flower_switches_2 = []
+ flower_switches_3 = []
+
+ #Add switches
+ for sw in core_sw_list:
+ core_switches.append(
+ self.addSwitch(
+ sw,
+ dpid = sw.replace('s','').zfill(16)
+ )
+ )
+ for sw in flower_sw_list_s1:
+ flower_switches_1.append(
+ self.addSwitch(
+ sw,
+ dpid = sw.replace('s','').zfill(16)
+ )
+ )
+ for sw in flower_sw_list_s2:
+ flower_switches_2.append(
+ self.addSwitch(
+ sw,
+ dpid = sw.replace('s','').zfill(16)
+ )
+ )
+ for sw in flower_sw_list_s3:
+ flower_switches_3.append(
+ self.addSwitch(
+ sw,
+ dpid = sw.replace('s','').zfill(16)
+ )
+ )
+
+ self.addLink(core_switches[0], core_switches[1])
+ self.addLink(core_switches[1], core_switches[2])
+
+ for x in range(0, len(flower_sw_list_s1)):
+ self.addLink(core_switches[0], flower_switches_1[x])
+ for x in range(0, len(flower_sw_list_s2)):
+ self.addLink(core_switches[1], flower_switches_2[x])
+ for x in range(0, len(flower_sw_list_s3)):
+ self.addLink(core_switches[2], flower_switches_3[x])
+
+topos = { 'mytopo': ( lambda: MyTopo() ) }
diff --git a/TestON/drivers/common/cli/emulator/mininetclidriver.py b/TestON/drivers/common/cli/emulator/mininetclidriver.py
index 9875064..ad07d95 100644
--- a/TestON/drivers/common/cli/emulator/mininetclidriver.py
+++ b/TestON/drivers/common/cli/emulator/mininetclidriver.py
@@ -115,18 +115,18 @@
if fanout is None: #In tree topology, if fanout arg is not given, by default it is 2
fanout = 2
k = 0
- sum = 0
+ count = 0
while(k <= depth-1):
- sum = sum + pow(fanout,k)
+ count = count + pow(fanout,k)
k = k+1
- num_switches = sum
+ num_switches = count
while(k <= depth-2):
'''depth-2 gives you only core links and not considering edge links as seen by ONOS
If all the links including edge links are required, do depth-1
'''
- sum = sum + pow(fanout,k)
+ count = count + pow(fanout,k)
k = k+1
- num_links = sum * fanout
+ num_links = count * fanout
#print "num_switches for %s(%d,%d) = %d and links=%d" %(topoType,depth,fanout,num_switches,num_links)
elif topoType =='linear':
@@ -387,7 +387,7 @@
return information dict about interfaces connected to the node
'''
if self.handle :
- cmd = 'py "\\n".join(["name=%s,mac=%s,ip=%s,isUp=%s" % (i.name, i.MAC(), i.IP(), i.isUp())'
+ cmd = 'py "\\n".join(["name=%s,mac=%s,ip=%s,enabled=%s" % (i.name, i.MAC(), i.IP(), i.isUp())'
cmd += ' for i in %s.intfs.values()])' % node
try:
response = self.execute(cmd=cmd,prompt="mininet>",timeout=10)
@@ -490,11 +490,11 @@
'''
Bring link(s) between two nodes up or down
'''
- main.log.info('Bring link(s) between two nodes up or down')
args = utilities.parse_args(["END1","END2","OPTION"],**linkargs)
end1 = args["END1"] if args["END1"] != None else ""
end2 = args["END2"] if args["END2"] != None else ""
option = args["OPTION"] if args["OPTION"] != None else ""
+ main.log.info("Bring link between '"+ end1 +"' and '" + end2 + "' '" + option + "'")
command = "link "+str(end1) + " " + str(end2)+ " " + str(option)
try:
#response = self.execute(cmd=command,prompt="mininet>",timeout=10)
@@ -716,7 +716,6 @@
pattern = "flow_count=(\d+)"
result = re.search(pattern, response, re.MULTILINE)
if result is None:
- print "no flow on switch print test"
main.log.info("Couldn't find flows on switch '', found: %s" % (switch, response))
return main.FALSE
return result.group(1)
@@ -811,13 +810,10 @@
#main.log.debug("Switches_json string: ", switches_json)
output = {"switches":[]}
for switch in topo.graph.switches: #iterate through the MN topology and pull out switches and and port info
- #print vars(switch)
ports = []
for port in switch.ports.values():
- #print port.hw_addr.toStr(separator = '')
ports.append({'of_port': port.port_no, 'mac': str(port.hw_addr).replace('\'',''), 'name': port.name})
output['switches'].append({"name": switch.name, "dpid": str(switch.dpid).zfill(16), "ports": ports })
- #print output
#print "mn"
#print json.dumps(output, sort_keys=True,indent=4,separators=(',', ': '))
@@ -830,6 +826,7 @@
for switch in output['switches']:
mnDPIDs.append(switch['dpid'])
mnDPIDs.sort()
+ #print "List of Mininet switch DPID's"
#print mnDPIDs
if switches_json == "":#if rest call fails
main.log.error(self.name + ".compare_switches(): Empty JSON object given from ONOS")
@@ -837,9 +834,13 @@
onos=switches_json
onosDPIDs=[]
for switch in onos:
- onosDPIDs.append(switch['id'].replace(":",'').replace("of",''))
- #print switch
+ if switch['available'] == True:
+ onosDPIDs.append(switch['id'].replace(":",'').replace("of",''))
+ #else:
+ #print "Switch is unavailable:"
+ #print switch
onosDPIDs.sort()
+ #print "List of ONOS switch DPID's"
#print onosDPIDs
if mnDPIDs!=onosDPIDs:
@@ -872,51 +873,68 @@
port_results = main.TRUE
output = {"switches":[]}
for switch in topo.graph.switches: #iterate through the MN topology and pull out switches and and port info
- #print vars(switch)
ports = []
for port in switch.ports.values():
#print port.hw_addr.toStr(separator = '')
- ports.append({'of_port': port.port_no, 'mac': str(port.hw_addr).replace('\'',''), 'name': port.name})
- output['switches'].append({"name": switch.name, "dpid": str(switch.dpid).zfill(16), "ports": ports })
- #print "compare_ports 'output' variable:"
- #print output
+ tmp_port = {}
+ tmp_port['of_port'] = port.port_no
+ tmp_port['mac'] = str(port.hw_addr).replace('\'','')
+ tmp_port['name'] = port.name
+ tmp_port['enabled'] = port.enabled
+
+ ports.append(tmp_port)
+ tmp_switch = {}
+ tmp_switch['name'] = switch.name
+ tmp_switch['dpid'] = str(switch.dpid).zfill(16)
+ tmp_switch['ports'] = ports
+
+ output['switches'].append(tmp_switch)
################ports#############
- for switch in output['switches']:
+ for mn_switch in output['switches']:
mn_ports = []
onos_ports = []
- for port in switch['ports']:
- mn_ports.append(port['of_port'])
+ for port in mn_switch['ports']:
+ if port['enabled'] == True:
+ mn_ports.append(port['of_port'])
for onos_switch in ports_json:
- if onos_switch['device'].replace(':','').replace("of", '') == switch['dpid']:
- for port in onos_switch['ports']:
- onos_ports.append(int(port['port']))
+ #print "Iterating through a new switch as seen by ONOS"
+ #print onos_switch
+ if onos_switch['device']['available'] == True:
+ if onos_switch['device']['id'].replace(':','').replace("of", '') == mn_switch['dpid']:
+ for port in onos_switch['ports']:
+ if port['isEnabled']:
+ #print "Iterating through available ports on the switch"
+ #print port
+ onos_ports.append(int(port['port']))
mn_ports.sort(key=float)
onos_ports.sort(key=float)
#print "\nPorts for Switch %s:" % (switch['name'])
#print "\tmn_ports[] = ", mn_ports
#print "\tonos_ports[] = ", onos_ports
- #if mn_ports == onos_ports:
- #pass #don't set results to true here as this is just one of many checks and it might override a failure
-
- #For OF1.3, the OFP_local port number is 0xfffffffe or 4294967294 instead of 0xfffe or 65534 in OF1.0, ONOS topology
- #sees the correct port number, however MN topology as read from line 151 of https://github.com/ucb-sts/sts/blob/
- #topology_refactoring2/sts/entities/teston_entities.py is 0xfffe which doesn't work correctly with OF1.3 switches.
- #So a short term fix is to ignore the case when mn_port == 65534 and onos_port ==4294967294.
+ #NOTE:For OF1.3, the OFP_local port number is 0xfffffffe or 4294967294 instead of 0xfffe or 65534 in OF1.0,
+ # ONOS topology sees the correct port number, however MN topology as read from line 151 of
+ # https://github.com/ucb-sts/sts/blob/topology_refactoring2/sts/entities/teston_entities.py
+ # is 0xfffe which doesn't work correctly with OF1.3 switches.
- #ONOS-Next is abstracting port numbers to 64bit unsigned number. So we will be converting the OF reserved ports to these numbers
+ #NOTE: ONOS is abstracting port numbers to 64bit unsigned number(long). So we will be converting the
+ # OF reserved ports to these numbers
+
+
#TODO: handle other reserved port numbers besides LOCAL
for mn_port,onos_port in zip(mn_ports,onos_ports):
- if mn_port == onos_port or (mn_port == 65534 and onos_port ==int(uint64(-2))):
+ #print "mn == onos port?"
+ #print mn_port, onos_port
+ if mn_port == onos_port or (mn_port == 65534 and onos_port == long(uint64(-2))):
continue
#don't set results to true here as this is just one of many checks and it might override a failure
else: #the ports of this switch don't match
port_results = main.FALSE
break
if port_results == main.FALSE:
- main.log.report("The list of ports for switch %s(%s) does not match:" % (switch['name'], switch['dpid']) )
+ main.log.report("The list of ports for switch %s(%s) does not match:" % (mn_switch['name'], mn_switch['dpid']) )
main.log.report("mn_ports[] = " + str(mn_ports))
main.log.report("onos_ports[] = " + str(onos_ports))
return port_results
@@ -939,7 +957,8 @@
output = {"switches":[]}
onos = links_json
for switch in topo.graph.switches: #iterate through the MN topology and pull out switches and and port info
- #print vars(switch)
+ # print "Iterating though switches as seen by Mininet"
+ # print switch
ports = []
for port in switch.ports.values():
#print port.hw_addr.toStr(separator = '')
@@ -947,19 +966,21 @@
output['switches'].append({"name": switch.name, "dpid": str(switch.dpid).zfill(16), "ports": ports })
#######Links########
- if 2*len(topo.patch_panel.network_links) == len(onos):
+ mn_links = [link for link in topo.patch_panel.network_links if (link.port1.enabled and link.port2.enabled)]
+ #print "mn_links:"
+ #print mn_links
+ if 2*len(mn_links) == len(onos):
link_results = main.TRUE
else:
link_results = main.FALSE
- main.log.report("Mininet has %i bidirectional links and ONOS has %i unidirectional links" % (len(topo.patch_panel.network_links), len(onos) ))
+ main.log.report("Mininet has %i bidirectional links and ONOS has %i unidirectional links" % (len(mn_links), len(onos) ))
# iterate through MN links and check if an ONOS link exists in both directions
# NOTE: Will currently only show mn links as down if they are cut through STS.
# We can either do everything through STS or wait for up_network_links
# and down_network_links to be fully implemented.
- for link in topo.patch_panel.network_links:
- #print "\n"
+ for link in mn_links:
#print "Link: %s" % link
#TODO: Find a more efficient search method
node1 = None
@@ -1050,6 +1071,26 @@
return host_list
+
+ def update(self):
+ '''
+ updates the port address and status information for each port in mn
+ '''
+ #TODO: Add error checking. currently the mininet command has no output
+ main.log.info("Updateing MN port information")
+ self.handle.sendline("")
+ self.handle.expect("mininet>")
+
+ self.handle.sendline("update")
+ self.handle.expect("mininet>")
+
+ self.handle.sendline("")
+ self.handle.expect("mininet>")
+
+ return main.TRUE
+
+
+
if __name__ != "__main__":
import sys
sys.modules[__name__] = MininetCliDriver()
diff --git a/TestON/drivers/common/cli/emulator/remotemininetdriver.py b/TestON/drivers/common/cli/emulator/remotemininetdriver.py
index 2acddb4..ffb5151 100644
--- a/TestON/drivers/common/cli/emulator/remotemininetdriver.py
+++ b/TestON/drivers/common/cli/emulator/remotemininetdriver.py
@@ -416,6 +416,15 @@
main.cleanup()
main.exit()
+ def run_optical_mn_script(self):
+ self.handle.sendline("")
+ self.handle.expect("\$")
+ self.handle.sendline("cd ~")
+ self.handle.expect("\$")
+ self.handle.sendline("sudo python optical.py")
+ self.handle.expect("\$")
+
+
def del_switch(self,sw):
self.handle.sendline("")
self.handle.expect("\$")
diff --git a/TestON/drivers/common/cli/onosclidriver.py b/TestON/drivers/common/cli/onosclidriver.py
index 2404a0d..90514d0 100644
--- a/TestON/drivers/common/cli/onosclidriver.py
+++ b/TestON/drivers/common/cli/onosclidriver.py
@@ -950,7 +950,7 @@
def add_point_intent(self, ingress_device, port_ingress,
egress_device, port_egress, ethType="", ethSrc="",
- ethDst=""):
+ ethDst="", bandwidth="", lambda_alloc=False):
'''
Required:
* ingress_device: device id of ingress device
@@ -959,6 +959,9 @@
* ethType: specify ethType
* ethSrc: specify ethSrc (i.e. src mac addr)
* ethDst: specify ethDst (i.e. dst mac addr)
+ * bandwidth: specify bandwidth capacity of link
+ * lambda_alloc: if True, intent will allocate lambda
+ for the specified intent
Description:
Adds a point-to-point intent (uni-directional) by
specifying device id's and optional fields
@@ -971,7 +974,8 @@
cmd = ""
#If there are no optional arguments
- if not ethType and not ethSrc and not ethDst:
+ if not ethType and not ethSrc and not ethDst\
+ and not bandwidth and not lambda_alloc:
cmd = "add-point-intent "+\
str(ingress_device) + "/" + str(port_ingress) + " " +\
str(egress_device) + "/" + str(port_egress)
@@ -985,13 +989,14 @@
cmd += " --ethSrc " + str(ethSrc)
if ethDst:
cmd += " --ethDst " + str(ethDst)
-
- cmd += " "+str(ingress_device) + "/" + str(port_ingress) + " " +\
- str(egress_device) + "/" + str(port_egress)
+ if bandwidth:
+ cmd += " --bandwidth " + str(bandwidth)
+ if lambda_alloc:
+ cmd += " --lambda "
- #print "cmd = ", cmd
- #self.handle.sendline("")
- #self.handle.expect("onos>")
+ cmd += " "+str(ingress_device) +\
+ "/" + str(port_ingress) + " " +\
+ str(egress_device) + "/" + str(port_egress)
self.handle.sendline(cmd)
i = self.handle.expect([
@@ -1001,12 +1006,6 @@
self.handle.sendline("intents")
self.handle.expect("onos>")
Intenthandle = self.handle.before
- #print "Intenthandle = ", Intenthandle
-
- #self.handle.sendline("flows")
- #self.handle.expect("onos>")
- #Flowhandle = self.handle.before
- #print "Flowhandle = ", Flowhandle
if i == 0:
main.log.error("Error in adding point-to-point intent")
@@ -1072,7 +1071,9 @@
self.handle.expect("intents -j")
self.handle.expect("onos>")
handle = self.handle.before
-
+
+ ansi_escape = re.compile(r'\r\r\n\x1b[^m]*m')
+ handle = ansi_escape.sub('', handle)
else:
self.handle.sendline("")
self.handle.expect("onos>")
@@ -1130,7 +1131,44 @@
main.cleanup()
main.exit()
+ def intents_events_metrics(self, json_format=True):
+ '''
+ Description:Returns topology metrics
+ Optional:
+ * json_format: enable json formatting of output
+ '''
+ try:
+ if json_format:
+ self.handle.sendline("intents-events-metrics -j")
+ self.handle.expect("intents-events-metrics -j")
+ self.handle.expect("onos>")
+
+ handle = self.handle.before
+
+ #Some color thing that we want to escape
+ ansi_escape = re.compile(r'\r\r\n\x1b[^m]*m')
+ handle = ansi_escape.sub('', handle)
+
+ else:
+ self.handle.sendline("intents-events-metrics")
+ self.handle.expect("intents-events-metrics")
+ self.handle.expect("onos>")
+
+ handle = self.handle.before
+ return handle
+
+ except pexpect.EOF:
+ main.log.error(self.name + ": EOF exception found")
+ main.log.error(self.name + ": " + self.handle.before)
+ main.cleanup()
+ main.exit()
+ except:
+ main.log.info(self.name+" ::::::")
+ main.log.error( traceback.print_exc())
+ main.log.info(self.name+" ::::::")
+ main.cleanup()
+ main.exit()
def topology_events_metrics(self, json_format=True):
'''
diff --git a/TestON/drivers/common/cli/onosdriver.py b/TestON/drivers/common/cli/onosdriver.py
index 9238f8a..875707b 100644
--- a/TestON/drivers/common/cli/onosdriver.py
+++ b/TestON/drivers/common/cli/onosdriver.py
@@ -120,6 +120,36 @@
main.cleanup()
main.exit()
+ def onos_build(self):
+ '''
+ Use the pre defined script to build onos via mvn
+ '''
+
+ try:
+ self.handle.sendline("onos-build")
+ self.handle.expect("onos-build")
+ i = self.handle.expect([
+ "BUILD SUCCESS",
+ "ERROR",
+ "BUILD FAILED"], timeout=120)
+ handle = str(self.handle.before)
+
+ main.log.info("onos-build command returned: "+
+ handle)
+
+ if i == 0:
+ return main.TRUE
+ else:
+ return handle
+
+ except pexpect.EOF:
+ main.log.error(self.name + ": EOF exception found")
+ main.log.error(self.name + ": " + self.handle.before)
+ except:
+ main.log.error("Failed to build ONOS")
+ main.cleanup()
+ main.exit()
+
def clean_install(self):
'''
Runs mvn clean install in the root of the ONOS directory.
@@ -354,7 +384,7 @@
main.cleanup()
main.exit()
- def get_version(self):
+ def get_version(self, report=False):
'''
Writes the COMMIT number to the report to be parsed by Jenkins data collecter.
'''
@@ -373,6 +403,9 @@
lines=response.splitlines()
for line in lines:
print line
+ if report:
+ for line in lines[2:-1]:
+ main.log.report(line)
return lines[2]
except pexpect.EOF:
main.log.error(self.name + ": EOF exception found")
@@ -850,7 +883,7 @@
self.handle.sendline("onos-wait-for-start " + node )
self.handle.expect("onos-wait-for-start")
#NOTE: this timeout is arbitrary"
- i = self.handle.expect(["\$", pexpect.TIMEOUT], timeout = 120)
+ i = self.handle.expect(["\$", pexpect.TIMEOUT], timeout = 300)
if i == 0:
main.log.info(self.name + ": " + node + " is up")
return main.TRUE
@@ -986,6 +1019,41 @@
main.cleanup()
main.exit()
+ def tshark_pcap(self, interface, dir_file):
+ '''
+ Capture all packet activity and store in specified
+ directory/file
+
+ Required:
+ * interface: interface to capture
+ * dir: directory/filename to store pcap
+ '''
+ self.handle.sendline("")
+ self.handle.expect("\$")
+
+ self.handle.sendline("tshark -i "+str(interface)+
+ " -t e -w "+str(dir_file))
+ self.handle.expect("Capturing on")
+
+ main.log.info("Tshark started capturing files on "+
+ str(interface)+ " and saving to directory: "+
+ str(dir_File))
+
+
+ def run_onos_topo_cfg(self):
+ '''
+ On ONOS bench, run this command: ./~/ONOS/tools/test/bin/onos-topo-cfg
+ which starts the rest and copies the json file to the onos instance
+ '''
+ self.handle.sendline("")
+ self.handle.expect("\$")
+ self.handle.sendline("cd ~/ONOS/tools/test/bin")
+ self.handle.expect("/bin$")
+ self.handle.sendline("./onos-topo-cfg")
+ self.handle.expect("{}admin@onosTestBench")
+ self.handle.sendline("cd ~")
+ self.handle.expect("\$")
+
def tshark_grep(self, grep, directory, interface='eth0'):
'''
diff --git a/TestON/tests/IntentPerfNext/IntentPerfNext.params b/TestON/tests/IntentPerfNext/IntentPerfNext.params
new file mode 100644
index 0000000..7e1de77
--- /dev/null
+++ b/TestON/tests/IntentPerfNext/IntentPerfNext.params
@@ -0,0 +1,44 @@
+<PARAMS>
+ <testcases>1,2</testcases>
+
+ <ENV>
+ <cellName>intent_perf_test</cellName>
+ </ENV>
+
+ <GIT>
+ #autoPull 'on' or 'off'
+ <autoPull>off</autoPull>
+ <checkout>master</checkout>
+ </GIT>
+
+ <CTRL>
+ <user>admin</user>
+ <ip1>10.128.174.1</ip1>
+ <port1>6633</port1>
+ <ip2>10.128.174.2</ip2>
+ <port2>6633</port2>
+ <ip3>10.128.174.3</ip3>
+ <port3>6633</port3>
+ </CTRL>
+
+ <MN>
+ <ip1>10.128.10.90</ip1>
+ <ip2>10.128.10.91</ip2>
+ </MN>
+
+ <BENCH>
+ <ip>10.128.174.10</ip>
+ </BENCH>
+
+ <TEST>
+ #Number of times to iterate each case
+ <numIter>5</numIter>
+ </TEST>
+
+ <JSON>
+ <submittedTime>intentSubmittedTimestamp</submittedTime>
+ <installedTime>intentInstalledTimestamp</installedTime>
+ <wdRequestTime>intentWithdrawRequestedTimestamp</wdRequestTime>
+ <withdrawnTime>intentWithdrawnTimestamp</withdrawnTime>
+ </JSON>
+</PARAMS>
diff --git a/TestON/tests/IntentPerfNext/IntentPerfNext.py b/TestON/tests/IntentPerfNext/IntentPerfNext.py
new file mode 100644
index 0000000..2fb22dc
--- /dev/null
+++ b/TestON/tests/IntentPerfNext/IntentPerfNext.py
@@ -0,0 +1,218 @@
+#Intent Performance Test for ONOS-next
+#
+#andrew@onlab.us
+#
+#November 5, 2014
+
+class IntentPerfNext:
+ def __init__(self):
+ self.default = ""
+
+ def CASE1(self, main):
+ '''
+ ONOS startup sequence
+ '''
+
+ import time
+
+ cell_name = main.params['ENV']['cellName']
+
+ git_pull = main.params['GIT']['autoPull']
+ checkout_branch = main.params['GIT']['checkout']
+
+ ONOS1_ip = main.params['CTRL']['ip1']
+ ONOS2_ip = main.params['CTRL']['ip2']
+ ONOS3_ip = main.params['CTRL']['ip3']
+
+ MN1_ip = main.params['MN']['ip1']
+ BENCH_ip = main.params['BENCH']['ip']
+
+ main.case("Setting up test environment")
+
+ main.step("Creating cell file")
+ cell_file_result = main.ONOSbench.create_cell_file(
+ BENCH_ip, cell_name, MN1_ip, "onos-core",
+ ONOS1_ip, ONOS2_ip, ONOS3_ip)
+
+ main.step("Applying cell file to environment")
+ cell_apply_result = main.ONOSbench.set_cell(cell_name)
+ verify_cell_result = main.ONOSbench.verify_cell()
+
+ main.step("Git checkout and pull "+checkout_branch)
+ if git_pull == 'on':
+ checkout_result = \
+ main.ONOSbench.git_checkout(checkout_branch)
+ pull_result = main.ONOSbench.git_pull()
+
+ #If you used git pull, auto compile
+ main.step("Using onos-build to compile ONOS")
+ build_result = main.ONOSbench.onos_build()
+ else:
+ checkout_result = main.TRUE
+ pull_result = main.TRUE
+ build_result = main.TRUE
+ main.log.info("Git pull skipped by configuration")
+
+ main.step("Creating ONOS package")
+ package_result = main.ONOSbench.onos_package()
+
+ main.step("Installing ONOS package")
+ install1_result = main.ONOSbench.onos_install(node=ONOS1_ip)
+ install2_result = main.ONOSbench.onos_install(node=ONOS2_ip)
+ install3_result = main.ONOSbench.onos_install(node=ONOS3_ip)
+
+ main.step("Set cell for ONOScli env")
+ main.ONOS1cli.set_cell(cell_name)
+ main.ONOS2cli.set_cell(cell_name)
+ main.ONOS3cli.set_cell(cell_name)
+
+ time.sleep(5)
+
+ main.step("Start onos cli")
+ cli1 = main.ONOS1cli.start_onos_cli(ONOS1_ip)
+ cli2 = main.ONOS2cli.start_onos_cli(ONOS2_ip)
+ cli3 = main.ONOS3cli.start_onos_cli(ONOS3_ip)
+
+ main.step("Enable metrics feature")
+ main.ONOS1cli.feature_install("onos-app-metrics")
+ main.ONOS2cli.feature_install("onos-app-metrics")
+ main.ONOS3cli.feature_install("onos-app-metrics")
+
+ utilities.assert_equals(expect=main.TRUE,
+ actual = cell_file_result and cell_apply_result and\
+ verify_cell_result and checkout_result and\
+ pull_result and build_result and\
+ install1_result and install2_result and\
+ install3_result,
+ onpass="ONOS started successfully",
+ onfail="Failed to start ONOS")
+
+ def CASE2(self, main):
+ '''
+ Single intent add latency
+
+ '''
+ import time
+ import json
+ import requests
+ import os
+
+ ONOS1_ip = main.params['CTRL']['ip1']
+ ONOS2_ip = main.params['CTRL']['ip2']
+ ONOS3_ip = main.params['CTRL']['ip3']
+ ONOS_user = main.params['CTRL']['user']
+
+ default_sw_port = main.params['CTRL']['port1']
+
+ #number of iterations of case
+ num_iter = main.params['TEST']['numIter']
+
+ #Timestamp keys for json metrics output
+ submit_time = main.params['JSON']['submittedTime']
+ install_time = main.params['JSON']['installedTime']
+ wdRequest_time = main.params['JSON']['wdRequestTime']
+ withdrawn_time = main.params['JSON']['withdrawnTime']
+
+ devices_json_str = main.ONOS1cli.devices()
+ devices_json_obj = json.loads(devices_json_str)
+
+ device_id_list = []
+
+ #Obtain device id list in ONOS format.
+ #They should already be in order (1,2,3,10,11,12,13, etc)
+ for device in devices_json_obj:
+ device_id_list.append(device['id'])
+
+ intent_add_lat_list = []
+
+ for i in range(0, int(num_iter)):
+ #add_point_intent(ingr_device, ingr_port,
+ # egr_device, egr_port)
+ main.ONOS1cli.add_point_intent(
+ device_id_list[0], 2,
+ device_id_list[2], 1)
+
+ #Allow some time for intents to propagate
+ time.sleep(5)
+
+ #Obtain metrics from ONOS 1, 2, 3
+ intents_json_str_1 = main.ONOS1cli.intents_events_metrics()
+ intents_json_str_2 = main.ONOS2cli.intents_events_metrics()
+ intents_json_str_3 = main.ONOS3cli.intents_events_metrics()
+
+ intents_json_obj_1 = json.loads(intents_json_str_1)
+ intents_json_obj_2 = json.loads(intents_json_str_2)
+ intents_json_obj_3 = json.loads(intents_json_str_3)
+
+ #Parse values from the json object
+ intent_submit_1 = \
+ intents_json_obj_1[submit_time]['value']
+ intent_submit_2 = \
+ intents_json_obj_2[submit_time]['value']
+ intent_submit_3 = \
+ intents_json_obj_3[submit_time]['value']
+
+ intent_install_1 = \
+ intents_json_obj_1[install_time]['value']
+ intent_install_2 = \
+ intents_json_obj_2[install_time]['value']
+ intent_install_3 = \
+ intents_json_obj_3[install_time]['value']
+
+ intent_install_lat_1 = \
+ int(intent_install_1) - int(intent_submit_1)
+ intent_install_lat_2 = \
+ int(intent_install_2) - int(intent_submit_2)
+ intent_install_lat_3 = \
+ int(intent_install_3) - int(intent_submit_3)
+
+ intent_install_lat_avg = \
+ (intent_install_lat_1 +
+ intent_install_lat_2 +
+ intent_install_lat_3 ) / 3
+
+ main.log.info("Intent add latency avg for iteration "+str(i)+
+ ": "+str(intent_install_lat_avg))
+
+ if intent_install_lat_avg > 0.0 and \
+ intent_install_lat_avg < 1000:
+ intent_add_lat_list.append(intent_install_lat_avg)
+ else:
+ main.log.info("Intent add latency exceeded "+
+ "threshold. Skipping iteration "+str(i))
+
+ time.sleep(3)
+
+ #TODO: Possibly put this in the driver function
+ main.log.info("Removing intents for next iteration")
+ json_temp = \
+ main.ONOS1cli.intents(json_format=True)
+ json_obj_intents = json.loads(json_temp)
+ if json_obj_intents:
+ for intents in json_obj_intents:
+ temp_id = intents['id']
+ main.ONOS1cli.remove_intent(temp_id)
+ main.log.info("Removing intent id: "+
+ str(temp_id))
+ main.ONOS1cli.remove_intent(temp_id)
+ else:
+ main.log.info("Intents were not installed correctly")
+
+ time.sleep(5)
+
+ intent_add_lat_min = min(intent_add_lat_list)
+ intent_add_lat_max = max(intent_add_lat_list)
+ intent_add_lat_avg = sum(intent_add_lat_list) /\
+ len(intent_add_lat_list)
+ #END ITERATION FOR LOOP
+ main.log.report("Single intent add latency - \n"+
+ "Min: "+str(intent_add_lat_min)+" ms\n"+
+ "Max: "+str(intent_add_lat_max)+" ms\n"+
+ "Avg: "+str(intent_add_lat_avg)+" ms\n")
+
+
+ def CASE3(self, main):
+ '''
+ CASE3 coming soon
+ '''
+
diff --git a/TestON/tests/IntentPerfNext/IntentPerfNext.topo b/TestON/tests/IntentPerfNext/IntentPerfNext.topo
new file mode 100644
index 0000000..fbde0e1
--- /dev/null
+++ b/TestON/tests/IntentPerfNext/IntentPerfNext.topo
@@ -0,0 +1,73 @@
+<TOPOLOGY>
+ <COMPONENT>
+
+ <ONOSbench>
+ <host>10.128.174.10</host>
+ <user>admin</user>
+ <password>onos_test</password>
+ <type>OnosDriver</type>
+ <connect_order>1</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOSbench>
+
+ <ONOS1cli>
+ <host>10.128.174.10</host>
+ <user>admin</user>
+ <password>onos_test</password>
+ <type>OnosCliDriver</type>
+ <connect_order>2</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOS1cli>
+
+ <ONOS2cli>
+ <host>10.128.174.10</host>
+ <user>admin</user>
+ <password>onos_test</password>
+ <type>OnosCliDriver</type>
+ <connect_order>2</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOS2cli>
+
+ <ONOS3cli>
+ <host>10.128.174.10</host>
+ <user>admin</user>
+ <password>onos_test</password>
+ <type>OnosCliDriver</type>
+ <connect_order>2</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOS3cli>
+
+ <ONOS1>
+ <host>10.128.174.1</host>
+ <user>admin</user>
+ <password>onos_test</password>
+ <type>OnosDriver</type>
+ <connect_order>3</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOS1>
+
+ <Mininet1>
+ <host>10.128.10.90</host>
+ <user>admin</user>
+ <password>onos_test</password>
+ <type>MininetCliDriver</type>
+ <connect_order>4</connect_order>
+ <COMPONENTS>
+ <arg1> --custom topo-intentFlower.py </arg1>
+ <arg2> --arp --mac --topo mytopo</arg2>
+ <arg3> </arg3>
+ <controller> remote,ip=10.128.174.1 </controller>
+ </COMPONENTS>
+ </Mininet1>
+
+ <Mininet2>
+ <host>10.128.10.90</host>
+ <user>admin</user>
+ <password>onos_test</password>
+ <type>RemoteMininetDriver</type>
+ <connect_order>5</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </Mininet2>
+
+ </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/MininetSlicing/MininetSlicing.py b/TestON/tests/MininetSlicing/MininetSlicing.py
index ac723fb..86b3ddc 100644
--- a/TestON/tests/MininetSlicing/MininetSlicing.py
+++ b/TestON/tests/MininetSlicing/MininetSlicing.py
@@ -1,17 +1,4 @@
-'''
-
- * TestON is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- * TestON is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
-
-
-'''
class MininetSlicing :
def __init__(self) :
@@ -81,5 +68,4 @@
-
-
+
\ No newline at end of file
diff --git a/TestON/tests/ONOSNextTest/ONOSNextTest.params b/TestON/tests/ONOSNextTest/ONOSNextTest.params
index 019b746..d600eae 100755
--- a/TestON/tests/ONOSNextTest/ONOSNextTest.params
+++ b/TestON/tests/ONOSNextTest/ONOSNextTest.params
@@ -8,7 +8,7 @@
</ENV>
<CTRL>
- <ip1>10.128.20.11</ip1>
+ <ip1>10.128.174.1</ip1>
<port1>6633</port1>
</CTRL>
diff --git a/TestON/tests/ONOSNextTest/ONOSNextTest.py b/TestON/tests/ONOSNextTest/ONOSNextTest.py
index de441ba..3496771 100755
--- a/TestON/tests/ONOSNextTest/ONOSNextTest.py
+++ b/TestON/tests/ONOSNextTest/ONOSNextTest.py
@@ -30,7 +30,7 @@
ONOS1_port = main.params['CTRL']['port1']
git_pull_trigger = main.params['GIT']['autoPull']
- git_checkout_branch = main.params['GIT']['branch']
+ git_checkout_branch = main.params['GIT']['checkout']
main.case("Setting up test environment")
@@ -39,7 +39,7 @@
cell_file_result = main.ONOSbench.create_cell_file(
"10.128.20.10", "temp_cell_2", "10.128.10.90",
"onos-core-trivial,onos-app-fwd",
- "10.128.20.11")
+ "10.128.174.1")
main.step("Applying cell variable to environment")
#cell_result = main.ONOSbench.set_cell(cell_name)
@@ -180,7 +180,8 @@
main.case("Testing the ONOS-cli")
main.step("Set cell for ONOS-cli environment")
- main.ONOScli.set_cell(cell_name)
+ #main.ONOScli.set_cell(cell_name)
+ main.ONOScli.set_cell("temp_cell_2")
main.step("Start ONOS-cli")
main.ONOScli.start_onos_cli(ONOS1_ip)
@@ -198,12 +199,12 @@
main.log.info("Node successfully added")
main.step("Add a correct node")
- node_result = main.ONOScli.add_node("111", "10.128.20.12")
+ node_result = main.ONOScli.add_node("111", "10.128.174.2")
main.step("Assign switches and list devices")
for i in range(1,8):
main.Mininet2.handle.sendline("sh ovs-vsctl set-controller s"+str(i)+
- " tcp:10.128.20.11")
+ " tcp:10.128.174.1")
main.Mininet2.handle.expect("mininet>")
#Need to sleep to allow switch add processing
time.sleep(5)
@@ -315,6 +316,19 @@
main.log.info(get_intent_result)
#*******************************************
+ main.step("Print intents in json format")
+ intents = main.ONOScli.intents(json_format = True)
+ main.log.info(intents)
+
+ main.step("Add eth options in point-to-point intent")
+ ptp_eth = main.ONOScli.add_point_intent(
+ devices_id_list[2], 1, devices_id_list[3], 2,
+ ethSrc = "00:02", ethDst = "00:03")
+ main.log.info(ptp_eth)
+
+ main.step("Print intents with eth options")
+ intents = main.ONOScli.intents()
+ main.log.info(intents)
######
#jhall@onlab.us
#andrew@onlab.us
diff --git a/TestON/tests/ONOSNextTest/ONOSNextTest.topo b/TestON/tests/ONOSNextTest/ONOSNextTest.topo
index 118e57c..0c84cc7 100755
--- a/TestON/tests/ONOSNextTest/ONOSNextTest.topo
+++ b/TestON/tests/ONOSNextTest/ONOSNextTest.topo
@@ -20,7 +20,7 @@
</ONOScli>
<ONOS1>
- <host>10.128.20.11</host>
+ <host>10.128.174.1</host>
<user>sdn</user>
<password>rocks</password>
<type>OnosDriver</type>
diff --git a/TestON/tests/TopoPerfNext/TopoPerfNext.params b/TestON/tests/TopoPerfNext/TopoPerfNext.params
index 39ddc7b..01334c9 100644
--- a/TestON/tests/TopoPerfNext/TopoPerfNext.params
+++ b/TestON/tests/TopoPerfNext/TopoPerfNext.params
@@ -1,5 +1,5 @@
<PARAMS>
- <testcases>1,4</testcases>
+ <testcases>1,2,3,4,5</testcases>
<ENV>
<cellName>topo_perf_test</cellName>
@@ -31,9 +31,22 @@
</BENCH>
<TEST>
+ #'on' or 'off' debug mode.
+ #If on, logging will be more verbose and
+ #tshark pcap will be enabled
+ #pcap file located at /tmp/'capture_name'
+ <debugMode>on</debugMode>
+
#Number of times to iterate each case
- <numIter>2</numIter>
+ <numIter>5</numIter>
<numSwitch>100</numSwitch>
+
+ <singleSwThreshold>0,1000</singleSwThreshold>
+ <portUpThreshold>0,1000</portUpThreshold>
+ <portDownThreshold>0,1000</portDownThreshold>
+ <linkUpThreshold>0,10000</linkUpThreshold>
+ <linkDownThreshold>0,10000</linkDownThreshold>
+ <swDisc100Threshold>0,10000</swDisc100Threshold>
</TEST>
<JSON>
diff --git a/TestON/tests/TopoPerfNext/TopoPerfNext.py b/TestON/tests/TopoPerfNext/TopoPerfNext.py
index adc3b96..9dd828e 100644
--- a/TestON/tests/TopoPerfNext/TopoPerfNext.py
+++ b/TestON/tests/TopoPerfNext/TopoPerfNext.py
@@ -131,6 +131,14 @@
deviceTimestamp = main.params['JSON']['deviceTimestamp']
graphTimestamp = main.params['JSON']['graphTimestamp']
+ debug_mode = main.params['TEST']['debugMode']
+
+ #Threshold for the test
+ threshold_str = main.params['TEST']['singleSwThreshold']
+ threshold_obj = threshold_str.split(",")
+ threshold_min = int(threshold_obj[0])
+ threshold_max = int(threshold_obj[1])
+
#List of switch add latency collected from
#all iterations
latency_end_to_end_list = []
@@ -148,7 +156,12 @@
#Initialize assertion to TRUE
assertion = main.TRUE
-
+
+ local_time = time.strftime('%x %X')
+ if debug_mode == 'on':
+ main.ONOS1.tshark_pcap("eth0",
+ "/tmp/single_sw_lat_pcap"+local_time)
+
main.log.report("Latency of adding one switch")
for i in range(0, int(num_iter)):
@@ -308,7 +321,8 @@
int(delta_ofp_graph_2)+\
int(delta_ofp_graph_3)) / 3
- if avg_delta_ofp_graph > 0.0 and avg_delta_ofp_graph < 10000:
+ if avg_delta_ofp_graph > threshold_min \
+ and avg_delta_ofp_graph < threshold_max:
latency_ofp_to_graph_list.append(avg_delta_ofp_graph)
else:
main.log.info("Results for ofp-to-graph "+\
@@ -326,14 +340,11 @@
#NOTE: ofp - delta measurements are occasionally negative
# due to system time misalignment.
- #TODO: Implement ptp across all clusters
- #Just add the calculation to list for now
latency_ofp_to_device_list.append(avg_delta_ofp_device)
#TODO:
#Fetch logs upon threshold excess
-
main.log.info("ONOS1 delta end-to-end: "+
str(delta_graph_1) + " ms")
main.log.info("ONOS2 delta end-to-end: "+
@@ -355,12 +366,12 @@
main.log.info("ONOS3 delta device - t0: "+
str(delta_device_3) + " ms")
- main.log.info("ONOS1 delta OFP - device: "+
- str(delta_ofp_device_1) + " ms")
- main.log.info("ONOS2 delta OFP - device: "+
- str(delta_ofp_device_2) + " ms")
- main.log.info("ONOS3 delta OFP - device: "+
- str(delta_ofp_device_3) + " ms")
+ #main.log.info("ONOS1 delta OFP - device: "+
+ # str(delta_ofp_device_1) + " ms")
+ #main.log.info("ONOS2 delta OFP - device: "+
+ # str(delta_ofp_device_2) + " ms")
+ #main.log.info("ONOS3 delta OFP - device: "+
+ # str(delta_ofp_device_3) + " ms")
main.step("Remove switch from controller")
main.Mininet1.delete_sw_controller("s1")
@@ -426,21 +437,17 @@
len(latency_ofp_to_device_list))
main.log.report("Switch add - End-to-end latency: \n"+\
- "Min: "+str(latency_end_to_end_min)+"\n"+\
- "Max: "+str(latency_end_to_end_max)+"\n"+\
- "Avg: "+str(latency_end_to_end_avg))
+ "Min: "+str(latency_end_to_end_min)+" mx\n"+\
+ "Max: "+str(latency_end_to_end_max)+" ms\n"+\
+ "Avg: "+str(latency_end_to_end_avg)+" ms")
main.log.report("Switch add - OFP-to-Graph latency: \n"+\
- "Min: "+str(latency_ofp_to_graph_min)+"\n"+\
- "Max: "+str(latency_ofp_to_graph_max)+"\n"+\
- "Avg: "+str(latency_ofp_to_graph_avg))
- main.log.report("Switch add - OFP-to-Device latency: \n"+\
- "Min: "+str(latency_ofp_to_device_min)+"\n"+\
- "Max: "+str(latency_ofp_to_device_max)+"\n"+\
- "Avg: "+str(latency_ofp_to_device_avg))
+ "Min: "+str(latency_ofp_to_graph_min)+" ms \n"+\
+ "Max: "+str(latency_ofp_to_graph_max)+" ms\n"+\
+ "Avg: "+str(latency_ofp_to_graph_avg)+" ms")
main.log.report("Switch add - t0-to-Device latency: \n"+\
- "Min: "+str(latency_t0_to_device_min)+"\n"+\
- "Max: "+str(latency_t0_to_device_max)+"\n"+\
- "Avg: "+str(latency_t0_to_device_avg))
+ "Min: "+str(latency_t0_to_device_min)+" ms\n"+\
+ "Max: "+str(latency_t0_to_device_max)+" ms\n"+\
+ "Avg: "+str(latency_t0_to_device_avg)+" ms")
utilities.assert_equals(expect=main.TRUE, actual=assertion,
onpass="Switch latency test successful",
@@ -467,7 +474,8 @@
ONOS_user = main.params['CTRL']['user']
default_sw_port = main.params['CTRL']['port1']
-
+
+ assertion = main.TRUE
#Number of iterations of case
num_iter = main.params['TEST']['numIter']
@@ -475,6 +483,25 @@
#These are subject to change, hence moved into params
deviceTimestamp = main.params['JSON']['deviceTimestamp']
graphTimestamp = main.params['JSON']['graphTimestamp']
+
+ debug_mode = main.params['TEST']['debugMode']
+
+ local_time = time.strftime('%x %X')
+ if debug_mode == 'on':
+ main.ONOS1.tshark_pcap("eth0",
+ "/tmp/port_lat_pcap"+local_time)
+
+ #Threshold for this test case
+ up_threshold_str = main.params['TEST']['portUpThreshold']
+ down_threshold_str = main.params['TEST']['portDownThreshold']
+ up_threshold_obj = up_threshold_str.split(",")
+ down_threshold_obj = down_threshold_str.split(",")
+
+ up_threshold_min = int(up_threshold_obj[0])
+ up_threshold_max = int(up_threshold_obj[1])
+
+ down_threshold_min = int(down_threshold_obj[0])
+ down_threshold_max = int(down_threshold_obj[1])
#NOTE: Some hardcoded variables you may need to configure
# besides the params
@@ -493,6 +520,11 @@
main.Mininet1.assign_sw_controller(sw="2",ip1=ONOS1_ip,
port1=default_sw_port)
+ #Give enough time for metrics to propagate the
+ #assign controller event. Otherwise, these events may
+ #carry over to our measurements
+ time.sleep(10)
+
main.step("Verify switch is assigned correctly")
result_s1 = main.Mininet1.get_sw_controller(sw="s1")
result_s2 = main.Mininet1.get_sw_controller(sw="s2")
@@ -519,7 +551,7 @@
main.Mininet2.handle.sendline("sudo ifconfig "+
interface_config+" down")
main.Mininet2.handle.expect("\$")
- time.sleep(20)
+ time.sleep(10)
main.ONOS1.tshark_stop()
time.sleep(5)
@@ -597,11 +629,11 @@
pt_down_graph_to_ofp_avg =\
(int(pt_down_graph_to_ofp_1) +
int(pt_down_graph_to_ofp_2) +
- int(pt_down_graph_to_ofp_3)) / 3
+ int(pt_down_graph_to_ofp_3)) / 3.0
pt_down_device_to_ofp_avg = \
(int(pt_down_device_to_ofp_1) +
int(pt_down_device_to_ofp_2) +
- int(pt_down_device_to_ofp_3)) / 3
+ int(pt_down_device_to_ofp_3)) / 3.0
if pt_down_graph_to_ofp_avg > 0.0 and \
pt_down_graph_to_ofp_avg < 1000:
@@ -629,12 +661,14 @@
main.step("Enable port and obtain timestamp")
main.step("Starting wireshark capture for port status up")
main.ONOS1.tshark_grep("OFP 130 Port Status", tshark_port_up)
- time.sleep(10)
+ time.sleep(5)
main.Mininet2.handle.sendline("sudo ifconfig "+
interface_config+" up")
main.Mininet2.handle.expect("\$")
- time.sleep(20)
+ time.sleep(10)
+
+ main.ONOS1.tshark_stop()
os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
tshark_port_up+" /tmp/")
@@ -706,8 +740,8 @@
float(pt_up_device_to_ofp_2) +
float(pt_up_device_to_ofp_3)) / 3
- if pt_up_graph_to_ofp_avg > 0 and \
- pt_up_graph_to_ofp_avg < 1000:
+ if pt_up_graph_to_ofp_avg > up_threshold_min and \
+ pt_up_graph_to_ofp_avg < up_threshold_max:
port_up_graph_to_ofp_list.append(
pt_up_graph_to_ofp_avg)
main.log.info("Port down: graph to ofp avg: "+
@@ -717,8 +751,8 @@
" exceeded the threshold: "+
str(pt_up_graph_to_ofp_avg))
- if pt_up_device_to_ofp_avg > 0 and \
- pt_up_device_to_ofp_avg < 1000:
+ if pt_up_device_to_ofp_avg > up_threshold_min and \
+ pt_up_device_to_ofp_avg < up_threshold_max:
port_up_device_to_ofp_list.append(
pt_up_device_to_ofp_avg)
main.log.info("Port up: device to ofp avg: "+
@@ -729,7 +763,12 @@
str(pt_up_device_to_ofp_avg))
#END ITERATION FOR LOOP
-
+
+ #Check all list for latency existence and set assertion
+ if (port_down_graph_to_ofp_list and port_down_device_to_ofp_list\
+ and port_up_graph_to_ofp_list and port_up_device_to_ofp_list):
+ assertion = main.TRUE
+
#Calculate and report latency measurements
port_down_graph_to_ofp_min = min(port_down_graph_to_ofp_list)
port_down_graph_to_ofp_max = max(port_down_graph_to_ofp_list)
@@ -737,12 +776,10 @@
(sum(port_down_graph_to_ofp_list) /
len(port_down_graph_to_ofp_list))
- main.log.report("Port down graph-to-ofp Min: "+
- str(port_down_graph_to_ofp_min))
- main.log.report("Port down graph-to-ofp Max: "+
- str(port_down_graph_to_ofp_max))
- main.log.report("Port down graph-to-ofp Avg: "+
- str(port_down_graph_to_ofp_avg))
+ main.log.report("Port down graph-to-ofp \nMin: "+
+ str(port_down_graph_to_ofp_min)+" ms \nMax: "+
+ str(port_down_graph_to_ofp_max)+" ms \nAvg: "+
+ str(port_down_graph_to_ofp_avg)+" ms")
port_down_device_to_ofp_min = min(port_down_device_to_ofp_list)
port_down_device_to_ofp_max = max(port_down_device_to_ofp_list)
@@ -750,19 +787,36 @@
(sum(port_down_device_to_ofp_list) /\
len(port_down_device_to_ofp_list))
- main.log.report("Port down device-to-ofp Min: "+
- str(port_down_device_to_ofp_min))
- main.log.report("Port down device-to-ofp Max: "+
- str(port_down_device_to_ofp_max))
- main.log.report("Port down device-to-ofp Avg: "+
- str(port_down_device_to_ofp_avg))
+ main.log.report("Port down device-to-ofp \nMin: "+
+ str(port_down_device_to_ofp_min)+" ms \nMax: "+
+ str(port_down_device_to_ofp_max)+" ms \nAvg: "+
+ str(port_down_device_to_ofp_avg)+" ms")
port_up_graph_to_ofp_min = min(port_up_graph_to_ofp_list)
port_up_graph_to_ofp_max = max(port_up_graph_to_ofp_list)
port_up_graph_to_ofp_avg = \
(sum(port_up_graph_to_ofp_list) /\
len(port_up_graph_to_ofp_list))
-
+
+ main.log.report("Port up graph-to-ofp \nMin: "+
+ str(port_up_graph_to_ofp_min)+" ms \nMax: "+
+ str(port_up_graph_to_ofp_max)+" ms \nAvg: "+
+ str(port_up_graph_to_ofp_avg)+" ms")
+
+ port_up_device_to_ofp_min = min(port_up_device_to_ofp_list)
+ port_up_device_to_ofp_max = max(port_up_device_to_ofp_list)
+ port_up_device_to_ofp_avg = \
+ (sum(port_up_device_to_ofp_list) /\
+ len(port_up_device_to_ofp_list))
+
+ main.log.report("Port up device-to-ofp \nMin: "+
+ str(port_up_device_to_ofp_min)+" ms \nMax: "+
+ str(port_up_device_to_ofp_max)+" ms \nAvg: "+
+ str(port_up_device_to_ofp_avg)+" ms")
+
+ utilities.assert_equals(expect=main.TRUE, actual=assertion,
+ onpass="Port discovery latency calculation successful",
+ onfail="Port discovery test failed")
def CASE4(self, main):
'''
@@ -794,6 +848,26 @@
deviceTimestamp = main.params['JSON']['deviceTimestamp']
linkTimestamp = main.params['JSON']['linkTimestamp']
graphTimestamp = main.params['JSON']['graphTimestamp']
+
+ debug_mode = main.params['TEST']['debugMode']
+
+ local_time = time.strftime('%x %X')
+ if debug_mode == 'on':
+ main.ONOS1.tshark_pcap("eth0",
+ "/tmp/link_lat_pcap"+local_time)
+
+ #Threshold for this test case
+ up_threshold_str = main.params['TEST']['linkUpThreshold']
+ down_threshold_str = main.params['TEST']['linkDownThreshold']
+
+ up_threshold_obj = up_threshold_str.split(",")
+ down_threshold_obj = down_threshold_str.split(",")
+
+ up_threshold_min = int(up_threshold_obj[0])
+ up_threshold_max = int(up_threshold_obj[1])
+
+ down_threshold_min = int(down_threshold_obj[0])
+ down_threshold_max = int(down_threshold_obj[1])
assertion = main.TRUE
#Link event timestamp to system time list
@@ -972,14 +1046,22 @@
link_down_lat_link3) / 3.0
#Set threshold and append latency to list
- if link_down_lat_graph_avg > 0.0 and\
- link_down_lat_graph_avg < 30000:
+ if link_down_lat_graph_avg > down_threshold_min and\
+ link_down_lat_graph_avg < down_threshold_max:
link_down_graph_to_system_list.append(
link_down_lat_graph_avg)
- if link_down_lat_link_avg > 0.0 and\
- link_down_lat_link_avg < 30000:
+ else:
+ main.log.info("Link down latency exceeded threshold")
+ main.log.info("Results for iteration "+str(i)+
+ "have been omitted")
+ if link_down_lat_link_avg > down_threshold_min and\
+ link_down_lat_link_avg < down_threshold_max:
link_down_link_to_system_list.append(
link_down_lat_link_avg)
+ else:
+ main.log.info("Link down latency exceeded threshold")
+ main.log.info("Results for iteration "+str(i)+
+ "have been omitted")
#NOTE: To remove loss rate and measure latency:
# 'sh tc qdisc del dev s1-eth1 root'
@@ -1089,24 +1171,25 @@
link_up_lat_device1 = 0
link_up_lat_device2 = 0
link_up_lat_device3 = 0
-
- main.log.info("Link up latency ONOS1 iteration "+
+
+ if debug_mode == 'on':
+ main.log.info("Link up latency ONOS1 iteration "+
str(i)+" (end-to-end): "+
str(link_up_lat_graph1)+" ms")
- main.log.info("Link up latency ONOS2 iteration "+
+ main.log.info("Link up latency ONOS2 iteration "+
str(i)+" (end-to-end): "+
str(link_up_lat_graph2)+" ms")
- main.log.info("Link up latency ONOS3 iteration "+
+ main.log.info("Link up latency ONOS3 iteration "+
str(i)+" (end-to-end): "+
str(link_up_lat_graph3)+" ms")
- main.log.info("Link up latency ONOS1 iteration "+
+ main.log.info("Link up latency ONOS1 iteration "+
str(i)+" (link-event-to-system-timestamp): "+
str(link_up_lat_link1)+" ms")
- main.log.info("Link up latency ONOS2 iteration "+
+ main.log.info("Link up latency ONOS2 iteration "+
str(i)+" (link-event-to-system-timestamp): "+
str(link_up_lat_link2)+" ms")
- main.log.info("Link up latency ONOS3 iteration "+
+ main.log.info("Link up latency ONOS3 iteration "+
str(i)+" (link-event-to-system-timestamp): "+
str(link_up_lat_link3))
@@ -1121,14 +1204,22 @@
link_up_lat_link3) / 3.0
#Set threshold and append latency to list
- if link_up_lat_graph_avg > 0.0 and\
- link_up_lat_graph_avg < 30000:
+ if link_up_lat_graph_avg > up_threshold_min and\
+ link_up_lat_graph_avg < up_threshold_max:
link_up_graph_to_system_list.append(
link_up_lat_graph_avg)
- if link_up_lat_link_avg > 0.0 and\
- link_up_lat_link_avg < 30000:
+ else:
+ main.log.info("Link up latency exceeded threshold")
+ main.log.info("Results for iteration "+str(i)+
+ "have been omitted")
+ if link_up_lat_link_avg > up_threshold_min and\
+ link_up_lat_link_avg < up_threshold_max:
link_up_link_to_system_list.append(
link_up_lat_link_avg)
+ else:
+ main.log.info("Link up latency exceeded threshold")
+ main.log.info("Results for iteration "+str(i)+
+ "have been omitted")
#Calculate min, max, avg of list and report
link_down_min = min(link_down_graph_to_system_list)
@@ -1140,28 +1231,33 @@
link_up_avg = sum(link_up_graph_to_system_list) / \
len(link_up_graph_to_system_list)
- main.log.report("Link down latency - Min: "+
- str(link_down_min)+"ms Max: "+
- str(link_down_max)+"ms Avg: "+
- str(link_down_avg)+"ms")
- main.log.report("Link up latency - Min: "+
- str(link_up_min)+"ms Max: "+
- str(link_up_max)+"ms Avg: "+
- str(link_up_avg)+"ms")
+ main.log.report("Link down latency - \nMin: "+
+ str(link_down_min)+" ms \nMax: "+
+ str(link_down_max)+" ms \nAvg: "+
+ str(link_down_avg)+" ms")
+ main.log.report("Link up latency - \nMin: "+
+ str(link_up_min)+" ms \nMax: "+
+ str(link_up_max)+" ms \nAvg: "+
+ str(link_up_avg)+" ms")
+
+ utilities.assert_equals(expect=main.TRUE, actual=assertion,
+ onpass="Link discovery latency calculation successful",
+ onfail="Link discovery latency case failed")
def CASE5(self, main):
'''
100 Switch discovery latency
Important:
- If a simple topology was used in previous cases,
- you will need to change the topology file in the
- params for this case to proceed
-
This test case can be potentially dangerous if
your machine has previously set iptables rules.
One of the steps of the test case will flush
all existing iptables rules.
+ Note:
+ You can specify the number of switches in the
+ params file to adjust the switch discovery size
+ (and specify the corresponding topology in Mininet1
+ .topo file)
'''
import time
import subprocess
@@ -1185,13 +1281,28 @@
#These are subject to change, hence moved into params
deviceTimestamp = main.params['JSON']['deviceTimestamp']
graphTimestamp = main.params['JSON']['graphTimestamp']
-
+
+ debug_mode = main.params['TEST']['debugMode']
+
+ local_time = time.strftime('%x %X')
+ if debug_mode == 'on':
+ main.ONOS1.tshark_pcap("eth0",
+ "/tmp/100_sw_lat_pcap"+local_time)
+
+ #Threshold for this test case
+ sw_disc_threshold_str = main.params['TEST']['swDisc100Threshold']
+ sw_disc_threshold_obj = sw_disc_threshold_str.split(",")
+ sw_disc_threshold_min = int(sw_disc_threshold_obj[0])
+ sw_disc_threshold_max = int(sw_disc_threshold_obj[1])
+
tshark_ofp_output = "/tmp/tshark_ofp_"+num_sw+"sw.txt"
tshark_tcp_output = "/tmp/tshark_tcp_"+num_sw+"sw.txt"
tshark_ofp_result_list = []
tshark_tcp_result_list = []
+ sw_discovery_lat_list = []
+
main.case(num_sw+" Switch discovery latency")
main.step("Assigning all switches to ONOS1")
for i in range(1, int(num_sw)+1):
@@ -1230,6 +1341,8 @@
" --dport "+default_sw_port+" -j DROP")
main.ONOS1.handle.expect("\$")
#Give time to allow rule to take effect
+ #NOTE: Sleep period may need to be configured
+ # based on the number of switches in the topology
main.log.info("Please wait for switch connection to "+
"time out")
time.sleep(60)
@@ -1323,24 +1436,23 @@
tshark_ofp_output+" /tmp/")
os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
tshark_tcp_output+" /tmp/")
- ofp_file = open(tshark_ofp_output, 'r')
- #The following is for information purpose only.
#TODO: Automate OFP output analysis
- main.log.info("Tshark OFP Vendor output: ")
- for line in ofp_file:
- tshark_ofp_result_list.append(line)
- main.log.info(line)
+ #Debug mode - print out packets captured at runtime
+ if debug_mode == 'on':
+ ofp_file = open(tshark_ofp_output, 'r')
+ main.log.info("Tshark OFP Vendor output: ")
+ for line in ofp_file:
+ tshark_ofp_result_list.append(line)
+ main.log.info(line)
+ ofp_file.close()
- ofp_file.close()
-
- tcp_file = open(tshark_tcp_output, 'r')
- main.log.info("Tshark TCP 74 output: ")
- for line in tcp_file:
- tshark_tcp_result_list.append(line)
- main.log.info(line)
-
- tcp_file.close()
+ tcp_file = open(tshark_tcp_output, 'r')
+ main.log.info("Tshark TCP 74 output: ")
+ for line in tcp_file:
+ tshark_tcp_result_list.append(line)
+ main.log.info(line)
+ tcp_file.close()
json_obj_1 = json.loads(json_str_topology_metrics_1)
json_obj_2 = json.loads(json_str_topology_metrics_2)
@@ -1353,16 +1465,33 @@
graph_timestamp_3 = \
json_obj_3[graphTimestamp]['value']
- main.log.info(
- int(graph_timestamp_1) - int(t0_system))
- main.log.info(
- int(graph_timestamp_2) - int(t0_system))
- main.log.info(
- int(graph_timestamp_3) - int(t0_system))
+ graph_lat_1 = int(graph_timestamp_1) - int(t0_system)
+ graph_lat_2 = int(graph_timestamp_2) - int(t0_system)
+ graph_lat_3 = int(graph_timestamp_3) - int(t0_system)
+ avg_graph_lat = \
+ (int(graph_lat_1) +\
+ int(graph_lat_2) +\
+ int(graph_lat_3)) / 3
+
+ if avg_graph_lat > sw_disc_threshold_min \
+ and avg_graph_lat < sw_disc_threshold_max:
+ sw_discovery_lat_list.append(
+ avg_graph_lat)
+ else:
+ main.log.info("100 Switch discovery latency "+
+ "exceeded the threshold.")
+
+ #END ITERATION FOR LOOP
+ sw_lat_min = min(sw_discovery_lat_list)
+ sw_lat_max = max(sw_discovery_lat_list)
+ sw_lat_avg = sum(sw_discovery_lat_list) /\
+ len(sw_discovery_lat_list)
-
-
+ main.log.report("100 Switch discovery lat - \n"+\
+ "Min: "+str(sw_lat_min)+" ms\n"+\
+ "Max: "+str(sw_lat_max)+" ms\n"+\
+ "Avg: "+str(sw_lat_avg)+" ms\n")
diff --git a/TestON/tests/TopoPerfNextSingleNode/TopoPerfNextSingleNode.params b/TestON/tests/TopoPerfNextSingleNode/TopoPerfNextSingleNode.params
new file mode 100644
index 0000000..f797706
--- /dev/null
+++ b/TestON/tests/TopoPerfNextSingleNode/TopoPerfNextSingleNode.params
@@ -0,0 +1,45 @@
+<PARAMS>
+ <testcases>1,2,3,4,5</testcases>
+
+ <ENV>
+ <cellName>topo_perf_test</cellName>
+ </ENV>
+
+ <GIT>
+ #autoPull 'on' or 'off'
+ <autoPull>off</autoPull>
+ <checkout>master</checkout>
+ </GIT>
+
+ <CTRL>
+ <user>admin</user>
+ <ip1>10.128.174.1</ip1>
+ <port1>6633</port1>
+ <ip2>10.128.174.2</ip2>
+ <port2>6633</port2>
+ <ip3>10.128.174.3</ip3>
+ <port3>6633</port3>
+ </CTRL>
+
+ <MN>
+ <ip1>10.128.10.90</ip1>
+ <ip2>10.128.10.91</ip2>
+ </MN>
+
+ <BENCH>
+ <ip>10.128.174.10</ip>
+ </BENCH>
+
+ <TEST>
+ #Number of times to iterate each case
+ <numIter>5</numIter>
+ <numSwitch>100</numSwitch>
+ </TEST>
+
+ <JSON>
+ <deviceTimestamp>topologyDeviceEventTimestamp</deviceTimestamp>
+ <hostTimestamp>topologyHostEventTimestamp</hostTimestamp>
+ <linkTimestamp>topologyLinkEventTimestamp</linkTimestamp>
+ <graphTimestamp>topologyGraphEventTimestamp</graphTimestamp>
+ </JSON>
+</PARAMS>
diff --git a/TestON/tests/TopoPerfNextSingleNode/TopoPerfNextSingleNode.py b/TestON/tests/TopoPerfNextSingleNode/TopoPerfNextSingleNode.py
new file mode 100644
index 0000000..d10c0ee
--- /dev/null
+++ b/TestON/tests/TopoPerfNextSingleNode/TopoPerfNextSingleNode.py
@@ -0,0 +1,1108 @@
+#TopoPerfNext
+#
+#Topology Performance test for ONOS-next
+#*** Revised for single node operation ***
+#
+#andrew@onlab.us
+
+import time
+import sys
+import os
+import re
+
+class TopoPerfNextSingleNode:
+ def __init__(self):
+ self.default = ''
+
+ def CASE1(self, main):
+ '''
+ ONOS startup sequence
+ '''
+ import time
+
+ cell_name = main.params['ENV']['cellName']
+
+ git_pull = main.params['GIT']['autoPull']
+ checkout_branch = main.params['GIT']['checkout']
+
+ ONOS1_ip = main.params['CTRL']['ip1']
+ MN1_ip = main.params['MN']['ip1']
+ BENCH_ip = main.params['BENCH']['ip']
+
+ main.case("Setting up test environment")
+
+ main.step("Creating cell file")
+ cell_file_result = main.ONOSbench.create_cell_file(
+ BENCH_ip, cell_name, MN1_ip, "onos-core",
+ ONOS1_ip)
+
+ main.step("Applying cell file to environment")
+ cell_apply_result = main.ONOSbench.set_cell(cell_name)
+ verify_cell_result = main.ONOSbench.verify_cell()
+
+ main.step("Git checkout and pull "+checkout_branch)
+ if git_pull == 'on':
+ checkout_result = \
+ main.ONOSbench.git_checkout(checkout_branch)
+ pull_result = main.ONOSbench.git_pull()
+ else:
+ checkout_result = main.TRUE
+ pull_result = main.TRUE
+ main.log.info("Skipped git checkout and pull")
+
+ main.step("Using mvn clean & install")
+ #mvn_result = main.ONOSbench.clean_install()
+ mvn_result = main.TRUE
+
+ main.step("Creating ONOS package")
+ package_result = main.ONOSbench.onos_package()
+
+ main.step("Installing ONOS package")
+ install1_result = main.ONOSbench.onos_install(node=ONOS1_ip)
+
+ #NOTE: This step may be unnecessary
+ #main.step("Starting ONOS service")
+ #start_result = main.ONOSbench.onos_start(ONOS1_ip)
+
+ main.step("Set cell for ONOS cli env")
+ main.ONOS1cli.set_cell(cell_name)
+
+ time.sleep(10)
+
+ main.step("Start onos cli")
+ cli1 = main.ONOS1cli.start_onos_cli(ONOS1_ip)
+
+ main.step("Enable metrics feature")
+ main.ONOS1cli.feature_install("onos-app-metrics")
+
+ utilities.assert_equals(expect=main.TRUE,
+ actual= cell_file_result and cell_apply_result and\
+ verify_cell_result and checkout_result and\
+ pull_result and mvn_result and\
+ install1_result,
+ onpass="ONOS started successfully",
+ onfail="Failed to start ONOS")
+
+ def CASE2(self, main):
+ '''
+ Assign s1 to ONOS1 and measure latency
+
+ There are 4 levels of latency measurements to this test:
+ 1) End-to-end measurement: Complete end-to-end measurement
+ from TCP (SYN/ACK) handshake to Graph change
+ 2) OFP-to-graph measurement: 'ONOS processing' snippet of
+ measurement from OFP Vendor message to Graph change
+ 3) OFP-to-device measurement: 'ONOS processing without
+ graph change' snippet of measurement from OFP vendor
+ message to Device change timestamp
+ 4) T0-to-device measurement: Measurement that includes
+ the switch handshake to devices timestamp without
+ the graph view change. (TCP handshake -> Device
+ change)
+ '''
+ import time
+ import subprocess
+ import json
+ import requests
+ import os
+
+ ONOS1_ip = main.params['CTRL']['ip1']
+ ONOS_user = main.params['CTRL']['user']
+
+ default_sw_port = main.params['CTRL']['port1']
+
+ #Number of iterations of case
+ num_iter = main.params['TEST']['numIter']
+
+ #Timestamp 'keys' for json metrics output.
+ #These are subject to change, hence moved into params
+ deviceTimestamp = main.params['JSON']['deviceTimestamp']
+ graphTimestamp = main.params['JSON']['graphTimestamp']
+
+ #List of switch add latency collected from
+ #all iterations
+ latency_end_to_end_list = []
+ latency_ofp_to_graph_list = []
+ latency_ofp_to_device_list = []
+ latency_t0_to_device_list = []
+
+ #Directory/file to store tshark results
+ tshark_of_output = "/tmp/tshark_of_topo.txt"
+ tshark_tcp_output = "/tmp/tshark_tcp_topo.txt"
+
+ #String to grep in tshark output
+ tshark_tcp_string = "TCP 74 "+default_sw_port
+ tshark_of_string = "OFP 86 Vendor"
+
+ #Initialize assertion to TRUE
+ assertion = main.TRUE
+
+ main.log.report("Latency of adding one switch")
+
+ for i in range(0, int(num_iter)):
+ main.log.info("Starting tshark capture")
+
+ #* TCP [ACK, SYN] is used as t0_a, the
+ # very first "exchange" between ONOS and
+ # the switch for end-to-end measurement
+ #* OFP [Stats Reply] is used for t0_b
+ # the very last OFP message between ONOS
+ # and the switch for ONOS measurement
+ main.ONOS1.tshark_grep(tshark_tcp_string,
+ tshark_tcp_output)
+ main.ONOS1.tshark_grep(tshark_of_string,
+ tshark_of_output)
+
+ #Wait and ensure tshark is started and
+ #capturing
+ time.sleep(10)
+
+ main.log.info("Assigning s1 to controller")
+
+ main.Mininet1.assign_sw_controller(sw="1",
+ ip1=ONOS1_ip, port1=default_sw_port)
+
+ #Wait and ensure switch is assigned
+ #before stopping tshark
+ time.sleep(30)
+
+ main.log.info("Stopping all Tshark processes")
+ main.ONOS1.stop_tshark()
+
+ #tshark output is saved in ONOS. Use subprocess
+ #to copy over files to TestON for parsing
+ main.log.info("Copying over tshark files")
+
+ #TCP CAPTURE ****
+ #Copy the tshark output from ONOS machine to
+ #TestON machine in tshark_tcp_output directory>file
+ os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
+ tshark_tcp_output+" /tmp/")
+ tcp_file = open(tshark_tcp_output, 'r')
+ temp_text = tcp_file.readline()
+ temp_text = temp_text.split(" ")
+
+ main.log.info("Object read in from TCP capture: "+
+ str(temp_text))
+ if len(temp_text) > 1:
+ t0_tcp = float(temp_text[1])*1000.0
+ else:
+ main.log.error("Tshark output file for TCP"+
+ " returned unexpected results")
+ t0_tcp = 0
+ assertion = main.FALSE
+
+ tcp_file.close()
+ #****************
+
+ #OF CAPTURE ****
+ os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
+ tshark_of_output+" /tmp/")
+ of_file = open(tshark_of_output, 'r')
+
+ line_ofp = ""
+ #Read until last line of file
+ while True:
+ temp_text = of_file.readline()
+ if temp_text !='':
+ line_ofp = temp_text
+ else:
+ break
+ obj = line_ofp.split(" ")
+
+ main.log.info("Object read in from OFP capture: "+
+ str(line_ofp))
+
+ if len(line_ofp) > 1:
+ t0_ofp = float(obj[1])*1000.0
+ else:
+ main.log.error("Tshark output file for OFP"+
+ " returned unexpected results")
+ t0_ofp = 0
+ assertion = main.FALSE
+
+ of_file.close()
+ #****************
+
+ json_str_1 = main.ONOS1cli.topology_events_metrics()
+
+ json_obj_1 = json.loads(json_str_1)
+
+ #Obtain graph timestamp. This timestsamp captures
+ #the epoch time at which the topology graph was updated.
+ graph_timestamp_1 = \
+ json_obj_1[graphTimestamp]['value']
+
+ #Obtain device timestamp. This timestamp captures
+ #the epoch time at which the device event happened
+ device_timestamp_1 = \
+ json_obj_1[deviceTimestamp]['value']
+
+ #t0 to device processing latency
+ delta_device_1 = int(device_timestamp_1) - int(t0_tcp)
+
+ #Get average of delta from all instances
+ avg_delta_device = (int(delta_device_1))
+
+ #Ensure avg delta meets the threshold before appending
+ if avg_delta_device > 0.0 and avg_delta_device < 10000:
+ latency_t0_to_device_list.append(avg_delta_device)
+ else:
+ main.log.info("Results for t0-to-device ignored"+\
+ "due to excess in threshold")
+
+ #t0 to graph processing latency (end-to-end)
+ delta_graph_1 = int(graph_timestamp_1) - int(t0_tcp)
+
+ #Get average of delta from all instances
+ avg_delta_graph = int(delta_graph_1)
+
+ #Ensure avg delta meets the threshold before appending
+ if avg_delta_graph > 0.0 and avg_delta_graph < 10000:
+ latency_end_to_end_list.append(avg_delta_graph)
+ else:
+ main.log.info("Results for end-to-end ignored"+\
+ "due to excess in threshold")
+
+ #ofp to graph processing latency (ONOS processing)
+ delta_ofp_graph_1 = int(graph_timestamp_1) - int(t0_ofp)
+
+ avg_delta_ofp_graph = int(delta_ofp_graph_1)
+
+ if avg_delta_ofp_graph > 0.0 and avg_delta_ofp_graph < 10000:
+ latency_ofp_to_graph_list.append(avg_delta_ofp_graph)
+ else:
+ main.log.info("Results for ofp-to-graph "+\
+ "ignored due to excess in threshold")
+
+ #ofp to device processing latency (ONOS processing)
+ delta_ofp_device_1 = float(device_timestamp_1) - float(t0_ofp)
+
+ avg_delta_ofp_device = float(delta_ofp_device_1)
+
+ #NOTE: ofp - delta measurements are occasionally negative
+ # due to system time misalignment.
+ latency_ofp_to_device_list.append(avg_delta_ofp_device)
+
+ #TODO:
+ #Fetch logs upon threshold excess
+
+ main.log.info("ONOS1 delta end-to-end: "+
+ str(delta_graph_1) + " ms")
+
+ main.log.info("ONOS1 delta OFP - graph: "+
+ str(delta_ofp_graph_1) + " ms")
+
+ main.log.info("ONOS1 delta device - t0: "+
+ str(delta_device_1) + " ms")
+
+ main.step("Remove switch from controller")
+ main.Mininet1.delete_sw_controller("s1")
+
+ time.sleep(5)
+
+ #END of for loop iteration
+
+ #If there is at least 1 element in each list,
+ #pass the test case
+ if len(latency_end_to_end_list) > 0 and\
+ len(latency_ofp_to_graph_list) > 0 and\
+ len(latency_ofp_to_device_list) > 0 and\
+ len(latency_t0_to_device_list) > 0:
+ assertion = main.TRUE
+ elif len(latency_end_to_end_list) == 0:
+ #The appending of 0 here is to prevent
+ #the min,max,sum functions from failing
+ #below
+ latency_end_to_end_list.append(0)
+ assertion = main.FALSE
+ elif len(latency_ofp_to_graph_list) == 0:
+ latency_ofp_to_graph_list.append(0)
+ assertion = main.FALSE
+ elif len(latency_ofp_to_device_list) == 0:
+ latency_ofp_to_device_list.append(0)
+ assertion = main.FALSE
+ elif len(latency_t0_to_device_list) == 0:
+ latency_t0_to_device_list.append(0)
+ assertion = main.FALSE
+
+ #Calculate min, max, avg of latency lists
+ latency_end_to_end_max = \
+ int(max(latency_end_to_end_list))
+ latency_end_to_end_min = \
+ int(min(latency_end_to_end_list))
+ latency_end_to_end_avg = \
+ (int(sum(latency_end_to_end_list)) / \
+ len(latency_end_to_end_list))
+
+ latency_ofp_to_graph_max = \
+ int(max(latency_ofp_to_graph_list))
+ latency_ofp_to_graph_min = \
+ int(min(latency_ofp_to_graph_list))
+ latency_ofp_to_graph_avg = \
+ (int(sum(latency_ofp_to_graph_list)) / \
+ len(latency_ofp_to_graph_list))
+
+ latency_ofp_to_device_max = \
+ int(max(latency_ofp_to_device_list))
+ latency_ofp_to_device_min = \
+ int(min(latency_ofp_to_device_list))
+ latency_ofp_to_device_avg = \
+ (int(sum(latency_ofp_to_device_list)) / \
+ len(latency_ofp_to_device_list))
+
+ latency_t0_to_device_max = \
+ float(max(latency_t0_to_device_list))
+ latency_t0_to_device_min = \
+ float(min(latency_t0_to_device_list))
+ latency_t0_to_device_avg = \
+ (float(sum(latency_t0_to_device_list)) / \
+ len(latency_ofp_to_device_list))
+
+ main.log.report("Switch add - End-to-end latency: \n"+\
+ "Min: "+str(latency_end_to_end_min)+"\n"+\
+ "Max: "+str(latency_end_to_end_max)+"\n"+\
+ "Avg: "+str(latency_end_to_end_avg))
+ main.log.report("Switch add - OFP-to-Graph latency: \n"+\
+ "Min: "+str(latency_ofp_to_graph_min)+"\n"+\
+ "Max: "+str(latency_ofp_to_graph_max)+"\n"+\
+ "Avg: "+str(latency_ofp_to_graph_avg))
+ main.log.report("Switch add - t0-to-Device latency: \n"+\
+ "Min: "+str(latency_t0_to_device_min)+"\n"+\
+ "Max: "+str(latency_t0_to_device_max)+"\n"+\
+ "Avg: "+str(latency_t0_to_device_avg))
+
+ utilities.assert_equals(expect=main.TRUE, actual=assertion,
+ onpass="Switch latency test successful",
+ onfail="Switch latency test failed")
+
+ def CASE3(self, main):
+ '''
+ Bring port up / down and measure latency.
+ Port enable / disable is simulated by ifconfig up / down
+
+ In ONOS-next, we must ensure that the port we are
+ manipulating is connected to another switch with a valid
+ connection. Otherwise, graph view will not be updated.
+ '''
+ import time
+ import subprocess
+ import os
+ import requests
+ import json
+
+ ONOS1_ip = main.params['CTRL']['ip1']
+ ONOS_user = main.params['CTRL']['user']
+
+ default_sw_port = main.params['CTRL']['port1']
+
+ assertion = main.TRUE
+ #Number of iterations of case
+ num_iter = main.params['TEST']['numIter']
+
+ #Timestamp 'keys' for json metrics output.
+ #These are subject to change, hence moved into params
+ deviceTimestamp = main.params['JSON']['deviceTimestamp']
+ graphTimestamp = main.params['JSON']['graphTimestamp']
+
+ #NOTE: Some hardcoded variables you may need to configure
+ # besides the params
+
+ tshark_port_status = "OFP 130 Port Status"
+
+ tshark_port_up = "/tmp/tshark_port_up.txt"
+ tshark_port_down = "/tmp/tshark_port_down.txt"
+ interface_config = "s1-eth1"
+
+ main.log.report("Port enable / disable latency")
+
+ main.step("Assign switches s1 and s2 to controller 1")
+ main.Mininet1.assign_sw_controller(sw="1",ip1=ONOS1_ip,
+ port1=default_sw_port)
+ main.Mininet1.assign_sw_controller(sw="2",ip1=ONOS1_ip,
+ port1=default_sw_port)
+
+ #Give enough time for metrics to propagate the
+ #assign controller event. Otherwise, these events may
+ #carry over to our measurements
+ time.sleep(10)
+
+ main.step("Verify switch is assigned correctly")
+ result_s1 = main.Mininet1.get_sw_controller(sw="s1")
+ result_s2 = main.Mininet1.get_sw_controller(sw="s2")
+ if result_s1 == main.FALSE or result_s2 == main.FALSE:
+ main.log.info("Switch s1 was not assigned correctly")
+ assertion = main.FALSE
+ else:
+ main.log.info("Switch s1 was assigned correctly")
+
+ port_up_device_to_ofp_list = []
+ port_up_graph_to_ofp_list = []
+ port_down_device_to_ofp_list = []
+ port_down_graph_to_ofp_list = []
+
+ for i in range(0, int(num_iter)):
+ main.step("Starting wireshark capture for port status down")
+ main.ONOS1.tshark_grep(tshark_port_status,
+ tshark_port_down)
+
+ time.sleep(10)
+
+ #Disable interface that is connected to switch 2
+ main.step("Disable port: "+interface_config)
+ main.Mininet2.handle.sendline("sudo ifconfig "+
+ interface_config+" down")
+ main.Mininet2.handle.expect("\$")
+ time.sleep(10)
+
+ main.ONOS1.tshark_stop()
+ time.sleep(5)
+
+ #Copy tshark output file from ONOS to TestON instance
+ #/tmp directory
+ os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
+ tshark_port_down+" /tmp/")
+
+ f_port_down = open(tshark_port_down, 'r')
+ #Get first line of port down event from tshark
+ f_line = f_port_down.readline()
+ obj_down = f_line.split(" ")
+ if len(f_line) > 0:
+ timestamp_begin_pt_down = int(float(obj_down[1]))*1000
+ main.log.info("Port down begin timestamp: "+
+ str(timestamp_begin_pt_down))
+ else:
+ main.log.info("Tshark output file returned unexpected"+
+ " results: "+str(obj_down))
+ timestamp_begin_pt_down = 0
+
+ f_port_down.close()
+
+ main.log.info("TEST tshark obj: "+str(obj_down))
+
+ main.step("Obtain t1 by REST call")
+ json_str_1 = main.ONOS1cli.topology_events_metrics()
+
+ main.log.info("TEST json_str 1: "+str(json_str_1))
+
+ json_obj_1 = json.loads(json_str_1)
+
+ time.sleep(5)
+
+ #Obtain graph timestamp. This timestsamp captures
+ #the epoch time at which the topology graph was updated.
+ graph_timestamp_1 = \
+ json_obj_1[graphTimestamp]['value']
+
+ #Obtain device timestamp. This timestamp captures
+ #the epoch time at which the device event happened
+ device_timestamp_1 = \
+ json_obj_1[deviceTimestamp]['value']
+
+ #Get delta between graph event and OFP
+ pt_down_graph_to_ofp_1 = int(graph_timestamp_1) -\
+ int(timestamp_begin_pt_down)
+
+ #Get delta between device event and OFP
+ pt_down_device_to_ofp_1 = int(device_timestamp_1) -\
+ int(timestamp_begin_pt_down)
+
+ #Caluclate average across clusters
+ pt_down_graph_to_ofp_avg = int(pt_down_graph_to_ofp_1)
+ pt_down_device_to_ofp_avg = int(pt_down_device_to_ofp_1)
+
+ if pt_down_graph_to_ofp_avg > 0.0 and \
+ pt_down_graph_to_ofp_avg < 1000:
+ port_down_graph_to_ofp_list.append(
+ pt_down_graph_to_ofp_avg)
+ main.log.info("Port down: graph to ofp avg: "+
+ str(pt_down_graph_to_ofp_avg) + " ms")
+ else:
+ main.log.info("Average port down graph-to-ofp result" +
+ " exceeded the threshold: "+
+ str(pt_down_graph_to_ofp_avg))
+
+ if pt_down_device_to_ofp_avg > 0 and \
+ pt_down_device_to_ofp_avg < 1000:
+ port_down_device_to_ofp_list.append(
+ pt_down_device_to_ofp_avg)
+ main.log.info("Port down: device to ofp avg: "+
+ str(pt_down_device_to_ofp_avg) + " ms")
+ else:
+ main.log.info("Average port down device-to-ofp result" +
+ " exceeded the threshold: "+
+ str(pt_down_device_to_ofp_avg))
+
+ #Port up events
+ main.step("Enable port and obtain timestamp")
+ main.step("Starting wireshark capture for port status up")
+ main.ONOS1.tshark_grep("OFP 130 Port Status", tshark_port_up)
+ time.sleep(5)
+
+ main.Mininet2.handle.sendline("sudo ifconfig "+
+ interface_config+" up")
+ main.Mininet2.handle.expect("\$")
+ time.sleep(10)
+
+ main.ONOS1.tshark_stop()
+
+ os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
+ tshark_port_up+" /tmp/")
+
+ f_port_up = open(tshark_port_up, 'r')
+ f_line = f_port_up.readline()
+ obj_up = f_line.split(" ")
+ if len(f_line) > 0:
+ timestamp_begin_pt_up = int(float(obj_up[1]))*1000
+ main.log.info("Port up begin timestamp: "+
+ str(timestamp_begin_pt_up))
+ else:
+ main.log.info("Tshark output file returned unexpected"+
+ " results.")
+ timestamp_begin_pt_up = 0
+
+ f_port_up.close()
+
+ main.step("Obtain t1 by REST call")
+ json_str_1 = main.ONOS1cli.topology_events_metrics()
+
+ json_obj_1 = json.loads(json_str_1)
+
+ #Obtain graph timestamp. This timestsamp captures
+ #the epoch time at which the topology graph was updated.
+ graph_timestamp_1 = \
+ json_obj_1[graphTimestamp]['value']
+
+ #Obtain device timestamp. This timestamp captures
+ #the epoch time at which the device event happened
+ device_timestamp_1 = \
+ json_obj_1[deviceTimestamp]['value']
+
+ #Get delta between graph event and OFP
+ pt_up_graph_to_ofp_1 = int(graph_timestamp_1) -\
+ int(timestamp_begin_pt_up)
+
+ #Get delta between device event and OFP
+ pt_up_device_to_ofp_1 = int(device_timestamp_1) -\
+ int(timestamp_begin_pt_up)
+
+ pt_up_graph_to_ofp_avg = float(pt_up_graph_to_ofp_1)
+
+ pt_up_device_to_ofp_avg = float(pt_up_device_to_ofp_1)
+
+ if pt_up_graph_to_ofp_avg > 0 and \
+ pt_up_graph_to_ofp_avg < 1000:
+ port_up_graph_to_ofp_list.append(
+ pt_up_graph_to_ofp_avg)
+ main.log.info("Port down: graph to ofp avg: "+
+ str(pt_up_graph_to_ofp_avg) + " ms")
+ else:
+ main.log.info("Average port up graph-to-ofp result"+
+ " exceeded the threshold: "+
+ str(pt_up_graph_to_ofp_avg))
+
+ if pt_up_device_to_ofp_avg > 0 and \
+ pt_up_device_to_ofp_avg < 1000:
+ port_up_device_to_ofp_list.append(
+ pt_up_device_to_ofp_avg)
+ main.log.info("Port up: device to ofp avg: "+
+ str(pt_up_device_to_ofp_avg) + " ms")
+ else:
+ main.log.info("Average port up device-to-ofp result"+
+ " exceeded the threshold: "+
+ str(pt_up_device_to_ofp_avg))
+
+ #END ITERATION FOR LOOP
+
+ #Check all list for latency existence and set assertion
+ if (port_down_graph_to_ofp_list and port_down_device_to_ofp_list\
+ and port_up_graph_to_ofp_list and port_up_device_to_ofp_list):
+ assertion = main.TRUE
+
+ #Calculate and report latency measurements
+ port_down_graph_to_ofp_min = min(port_down_graph_to_ofp_list)
+ port_down_graph_to_ofp_max = max(port_down_graph_to_ofp_list)
+ port_down_graph_to_ofp_avg = \
+ (sum(port_down_graph_to_ofp_list) /
+ len(port_down_graph_to_ofp_list))
+
+ main.log.report("Port down graph-to-ofp Min: "+
+ str(port_down_graph_to_ofp_min)+" ms Max: "+
+ str(port_down_graph_to_ofp_max)+" ms Avg: "+
+ str(port_down_graph_to_ofp_avg))
+
+ port_down_device_to_ofp_min = min(port_down_device_to_ofp_list)
+ port_down_device_to_ofp_max = max(port_down_device_to_ofp_list)
+ port_down_device_to_ofp_avg = \
+ (sum(port_down_device_to_ofp_list) /\
+ len(port_down_device_to_ofp_list))
+
+ main.log.report("Port down device-to-ofp Min: "+
+ str(port_down_device_to_ofp_min)+" ms Max: "+
+ str(port_down_device_to_ofp_max)+" ms Avg: "+
+ str(port_down_device_to_ofp_avg))
+
+ port_up_graph_to_ofp_min = min(port_up_graph_to_ofp_list)
+ port_up_graph_to_ofp_max = max(port_up_graph_to_ofp_list)
+ port_up_graph_to_ofp_avg = \
+ (sum(port_up_graph_to_ofp_list) /\
+ len(port_up_graph_to_ofp_list))
+
+ main.log.report("Port up graph-to-ofp Min: "+
+ str(port_up_graph_to_ofp_min)+" ms Max: "+
+ str(port_up_graph_to_ofp_max)+" ms Avg: "+
+ str(port_up_graph_to_ofp_avg))
+
+ port_up_device_to_ofp_min = min(port_up_device_to_ofp_list)
+ port_up_device_to_ofp_max = max(port_up_device_to_ofp_list)
+ port_up_device_to_ofp_avg = \
+ (sum(port_up_device_to_ofp_list) /\
+ len(port_up_device_to_ofp_list))
+
+ main.log.report("Port up device-to-ofp Min: "+
+ str(port_up_device_to_ofp_min)+" ms Max: "+
+ str(port_up_device_to_ofp_max)+" ms Avg: "+
+ str(port_up_device_to_ofp_avg))
+
+ utilities.assert_equals(expect=main.TRUE, actual=assertion,
+ onpass="Port discovery latency calculation successful",
+ onfail="Port discovery test failed")
+
+ def CASE4(self, main):
+ '''
+ Link down event using loss rate 100%
+
+ Important:
+ Use a simple 2 switch topology with 1 link between
+ the two switches. Ensure that mac addresses of the
+ switches are 1 / 2 respectively
+ '''
+ import time
+ import subprocess
+ import os
+ import requests
+ import json
+
+ ONOS1_ip = main.params['CTRL']['ip1']
+ ONOS_user = main.params['CTRL']['user']
+
+ default_sw_port = main.params['CTRL']['port1']
+
+ #Number of iterations of case
+ num_iter = main.params['TEST']['numIter']
+
+ #Timestamp 'keys' for json metrics output.
+ #These are subject to change, hence moved into params
+ deviceTimestamp = main.params['JSON']['deviceTimestamp']
+ linkTimestamp = main.params['JSON']['linkTimestamp']
+ graphTimestamp = main.params['JSON']['graphTimestamp']
+
+ assertion = main.TRUE
+ #Link event timestamp to system time list
+ link_down_link_to_system_list = []
+ link_up_link_to_system_list = []
+ #Graph event timestamp to system time list
+ link_down_graph_to_system_list = []
+ link_up_graph_to_system_list = []
+
+ main.log.report("Add / remove link latency between "+
+ "two switches")
+
+ main.step("Assign all switches")
+ main.Mininet1.assign_sw_controller(sw="1",
+ ip1=ONOS1_ip, port1=default_sw_port)
+ main.Mininet1.assign_sw_controller(sw="2",
+ ip1=ONOS1_ip, port1=default_sw_port)
+
+ main.step("Verifying switch assignment")
+ result_s1 = main.Mininet1.get_sw_controller(sw="s1")
+ result_s2 = main.Mininet1.get_sw_controller(sw="s2")
+
+ #Allow time for events to finish before taking measurements
+ time.sleep(10)
+
+ link_down = False
+ #Start iteration of link event test
+ for i in range(0, int(num_iter)):
+ main.step("Getting initial system time as t0")
+
+ timestamp_link_down_t0 = time.time() * 1000
+ #Link down is simulated by 100% loss rate using traffic
+ #control command
+ main.Mininet1.handle.sendline(
+ "sh tc qdisc add dev s1-eth1 root netem loss 100%")
+
+ #TODO: Iterate through 'links' command to verify that
+ # link s1 -> s2 went down (loop timeout 30 seconds)
+ # on all 3 ONOS instances
+ main.log.info("Checking ONOS for link update")
+ loop_count = 0
+ while( not link_down and loop_count < 30 ):
+ json_str = main.ONOS1cli.links()
+
+ if not json_str:
+ main.log.error("CLI command returned error ")
+ break
+ else:
+ json_obj = json.loads(json_str)
+ for obj in json_obj:
+ if '01' not in obj['src']['device']:
+ link_down = True
+ main.log.report("Link down from "+
+ "s1 -> s2 on ONOS1 detected")
+ loop_count += 1
+ #If CLI doesn't like the continuous requests
+ #and exits in this loop, increase the sleep here.
+ #Consequently, while loop timeout will increase
+ time.sleep(1)
+
+ #Give time for metrics measurement to catch up
+ #NOTE: May need to be configured more accurately
+ time.sleep(10)
+ #If we exited the while loop and link down 1,2,3 are still
+ #false, then ONOS has failed to discover link down event
+ if not link_down:
+ main.log.info("Link down discovery failed")
+
+ link_down_lat_graph1 = 0
+ link_down_lat_device1 = 0
+ assertion = main.FALSE
+ else:
+ json_topo_metrics_1 =\
+ main.ONOS1cli.topology_events_metrics()
+ json_topo_metrics_1 = json.loads(json_topo_metrics_1)
+
+ main.log.info("Obtaining graph and device timestamp")
+ graph_timestamp_1 = \
+ json_topo_metrics_1[graphTimestamp]['value']
+
+ link_timestamp_1 = \
+ json_topo_metrics_1[linkTimestamp]['value']
+
+ if graph_timestamp_1 and link_timestamp_1:
+ link_down_lat_graph1 = int(graph_timestamp_1) -\
+ timestamp_link_down_t0
+
+ link_down_lat_link1 = int(link_timestamp_1) -\
+ timestamp_link_down_t0
+ else:
+ main.log.error("There was an error calculating"+
+ " the delta for link down event")
+ link_down_lat_graph1 = 0
+
+ link_down_lat_device1 = 0
+
+ main.log.report("Link down latency ONOS1 iteration "+
+ str(i)+" (end-to-end): "+
+ str(link_down_lat_graph1)+" ms")
+
+ main.log.report("Link down latency ONOS1 iteration "+
+ str(i)+" (link-event-to-system-timestamp): "+
+ str(link_down_lat_link1)+" ms")
+
+ #Calculate avg of node calculations
+ link_down_lat_graph_avg = link_down_lat_graph1
+ link_down_lat_link_avg = link_down_lat_link1
+
+ #Set threshold and append latency to list
+ if link_down_lat_graph_avg > 0.0 and\
+ link_down_lat_graph_avg < 30000:
+ link_down_graph_to_system_list.append(
+ link_down_lat_graph_avg)
+ else:
+ main.log.info("Link down latency exceeded threshold")
+ main.log.info("Results for iteration "+str(i)+
+ "have been omitted")
+ if link_down_lat_link_avg > 0.0 and\
+ link_down_lat_link_avg < 30000:
+ link_down_link_to_system_list.append(
+ link_down_lat_link_avg)
+ else:
+ main.log.info("Link down latency exceeded threshold")
+ main.log.info("Results for iteration "+str(i)+
+ "have been omitted")
+
+ #NOTE: To remove loss rate and measure latency:
+ # 'sh tc qdisc del dev s1-eth1 root'
+ timestamp_link_up_t0 = time.time() * 1000
+ main.Mininet1.handle.sendline("sh tc qdisc del dev "+
+ "s1-eth1 root")
+ main.Mininet1.handle.expect("mininet>")
+
+ main.log.info("Checking ONOS for link update")
+
+ link_down1 = True
+ loop_count = 0
+ while( link_down1 and loop_count < 30 ):
+ json_str1 = main.ONOS1cli.links()
+ if not json_str1:
+ main.log.error("CLI command returned error ")
+ break
+ else:
+ json_obj1 = json.loads(json_str1)
+
+ for obj1 in json_obj1:
+ if '01' in obj1['src']['device']:
+ link_down1 = False
+ main.log.report("Link up from "+
+ "s1 -> s2 on ONOS1 detected")
+ loop_count += 1
+ time.sleep(1)
+
+ if link_down1:
+ main.log.info("Link up discovery failed")
+ link_up_lat_graph1 = 0
+ link_up_lat_device1 = 0
+ assertion = main.FALSE
+ else:
+ json_topo_metrics_1 =\
+ main.ONOS1cli.topology_events_metrics()
+ json_topo_metrics_1 = json.loads(json_topo_metrics_1)
+
+ main.log.info("Obtaining graph and device timestamp")
+ graph_timestamp_1 = \
+ json_topo_metrics_1[graphTimestamp]['value']
+
+ link_timestamp_1 = \
+ json_topo_metrics_1[linkTimestamp]['value']
+
+ if graph_timestamp_1 and link_timestamp_1:
+ link_up_lat_graph1 = int(graph_timestamp_1) -\
+ timestamp_link_up_t0
+ link_up_lat_link1 = int(link_timestamp_1) -\
+ timestamp_link_up_t0
+ else:
+ main.log.error("There was an error calculating"+
+ " the delta for link down event")
+ link_up_lat_graph1 = 0
+ link_up_lat_device1 = 0
+
+ main.log.info("Link up latency ONOS1 iteration "+
+ str(i)+" (end-to-end): "+
+ str(link_up_lat_graph1)+" ms")
+
+ main.log.info("Link up latency ONOS1 iteration "+
+ str(i)+" (link-event-to-system-timestamp): "+
+ str(link_up_lat_link1)+" ms")
+
+ #Calculate avg of node calculations
+ link_up_lat_graph_avg = link_up_lat_graph1
+ link_up_lat_link_avg = link_up_lat_link1
+
+ #Set threshold and append latency to list
+ if link_up_lat_graph_avg > 0.0 and\
+ link_up_lat_graph_avg < 30000:
+ link_up_graph_to_system_list.append(
+ link_up_lat_graph_avg)
+ else:
+ main.log.info("Link up latency exceeded threshold")
+ main.log.info("Results for iteration "+str(i)+
+ "have been omitted")
+ if link_up_lat_link_avg > 0.0 and\
+ link_up_lat_link_avg < 30000:
+ link_up_link_to_system_list.append(
+ link_up_lat_link_avg)
+ else:
+ main.log.info("Link up latency exceeded threshold")
+ main.log.info("Results for iteration "+str(i)+
+ "have been omitted")
+
+ #Calculate min, max, avg of list and report
+ link_down_min = min(link_down_graph_to_system_list)
+ link_down_max = max(link_down_graph_to_system_list)
+ link_down_avg = sum(link_down_graph_to_system_list) / \
+ len(link_down_graph_to_system_list)
+ link_up_min = min(link_up_graph_to_system_list)
+ link_up_max = max(link_up_graph_to_system_list)
+ link_up_avg = sum(link_up_graph_to_system_list) / \
+ len(link_up_graph_to_system_list)
+
+ main.log.report("Link down latency - Min: "+
+ str(link_down_min)+"ms Max: "+
+ str(link_down_max)+"ms Avg: "+
+ str(link_down_avg)+"ms")
+ main.log.report("Link up latency - Min: "+
+ str(link_up_min)+"ms Max: "+
+ str(link_up_max)+"ms Avg: "+
+ str(link_up_avg)+"ms")
+
+ utilities.assert_equals(expect=main.TRUE, actual=assertion,
+ onpass="Link discovery latency calculation successful",
+ onfail="Link discovery latency case failed")
+
+ def CASE5(self, main):
+ '''
+ 100 Switch discovery latency
+
+ Important:
+ This test case can be potentially dangerous if
+ your machine has previously set iptables rules.
+ One of the steps of the test case will flush
+ all existing iptables rules.
+ Note:
+ You can specify the number of switches in the
+ params file to adjust the switch discovery size
+ (and specify the corresponding topology in Mininet1
+ .topo file)
+ '''
+ import time
+ import subprocess
+ import os
+ import requests
+ import json
+
+ ONOS1_ip = main.params['CTRL']['ip1']
+ MN1_ip = main.params['MN']['ip1']
+ ONOS_user = main.params['CTRL']['user']
+
+ default_sw_port = main.params['CTRL']['port1']
+
+ #Number of iterations of case
+ num_iter = main.params['TEST']['numIter']
+ num_sw = main.params['TEST']['numSwitch']
+
+ #Timestamp 'keys' for json metrics output.
+ #These are subject to change, hence moved into params
+ deviceTimestamp = main.params['JSON']['deviceTimestamp']
+ graphTimestamp = main.params['JSON']['graphTimestamp']
+
+ tshark_ofp_output = "/tmp/tshark_ofp_"+num_sw+"sw.txt"
+ tshark_tcp_output = "/tmp/tshark_tcp_"+num_sw+"sw.txt"
+
+ tshark_ofp_result_list = []
+ tshark_tcp_result_list = []
+
+ main.case(num_sw+" Switch discovery latency")
+ main.step("Assigning all switches to ONOS1")
+ for i in range(1, int(num_sw)+1):
+ main.Mininet1.assign_sw_controller(
+ sw=str(i),
+ ip1=ONOS1_ip,
+ port1=default_sw_port)
+
+ #Ensure that nodes are configured with ptpd
+ #Just a warning message
+ main.log.info("Please check ptpd configuration to ensure"+\
+ " All nodes' system times are in sync")
+ time.sleep(5)
+
+ for i in range(0, int(num_iter)):
+
+ main.step("Set iptables rule to block incoming sw connections")
+ #Set iptables rule to block incoming switch connections
+ #The rule description is as follows:
+ # Append to INPUT rule,
+ # behavior DROP that matches following:
+ # * packet type: tcp
+ # * source IP: MN1_ip
+ # * destination PORT: 6633
+ main.ONOS1.handle.sendline(
+ "sudo iptables -A INPUT -p tcp -s "+MN1_ip+
+ " --dport "+default_sw_port+" -j DROP")
+ main.ONOS1.handle.expect("\$")
+ # Append to OUTPUT rule,
+ # behavior DROP that matches following:
+ # * packet type: tcp
+ # * source IP: MN1_ip
+ # * destination PORT: 6633
+ main.ONOS1.handle.sendline(
+ "sudo iptables -A OUTPUT -p tcp -s "+MN1_ip+
+ " --dport "+default_sw_port+" -j DROP")
+ main.ONOS1.handle.expect("\$")
+ #Give time to allow rule to take effect
+ #NOTE: Sleep period may need to be configured
+ # based on the number of switches in the topology
+ main.log.info("Please wait for switch connection to "+
+ "time out")
+ time.sleep(60)
+
+ #Gather vendor OFP with tshark
+ main.ONOS1.tshark_grep("OFP 86 Vendor",
+ tshark_ofp_output)
+ main.ONOS1.tshark_grep("TCP 74 ",
+ tshark_tcp_output)
+
+ #NOTE: Remove all iptables rule quickly (flush)
+ # Before removal, obtain TestON timestamp at which
+ # removal took place
+ # (ensuring nodes are configured via ptp)
+ # sudo iptables -F
+
+ t0_system = time.time() * 1000
+ main.ONOS1.handle.sendline(
+ "sudo iptables -F")
+
+ #Counter to track loop count
+ counter_loop = 0
+ counter_avail1 = 0
+ onos1_dev = False
+ while counter_loop < 60:
+ #Continue to check devices for all device
+ #availability. When all devices in all 3
+ #ONOS instances indicate that devices are available
+ #obtain graph event timestamp for t1.
+ device_str_obj1 = main.ONOS1cli.devices()
+ device_json1 = json.loads(device_str_obj1)
+
+ for device1 in device_json1:
+ if device1['available'] == True:
+ counter_avail1 += 1
+ if counter_avail1 == int(num_sw):
+ onos1_dev = True
+ main.log.info("All devices have been "+
+ "discovered on ONOS1")
+ else:
+ counter_avail1 = 0
+
+ if onos1_dev:
+ main.log.info("All devices have been discovered "+
+ "on all ONOS instances")
+ json_str_topology_metrics_1 =\
+ main.ONOS1cli.topology_events_metrics()
+ #Exit while loop if all devices discovered
+ break
+
+ counter_loop += 1
+ #Give some time in between CLI calls
+ #(will not affect measurement)
+ time.sleep(3)
+
+ main.ONOS1.tshark_stop()
+
+ os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
+ tshark_ofp_output+" /tmp/")
+ os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
+ tshark_tcp_output+" /tmp/")
+ ofp_file = open(tshark_ofp_output, 'r')
+
+ #The following is for information purpose only.
+ #TODO: Automate OFP output analysis
+ main.log.info("Tshark OFP Vendor output: ")
+ for line in ofp_file:
+ tshark_ofp_result_list.append(line)
+ main.log.info(line)
+
+ ofp_file.close()
+
+ tcp_file = open(tshark_tcp_output, 'r')
+ main.log.info("Tshark TCP 74 output: ")
+ for line in tcp_file:
+ tshark_tcp_result_list.append(line)
+ main.log.info(line)
+
+ tcp_file.close()
+
+ json_obj_1 = json.loads(json_str_topology_metrics_1)
+
+ graph_timestamp_1 = \
+ json_obj_1[graphTimestamp]['value']
+
+ main.log.info(
+ int(graph_timestamp_1) - int(t0_system))
+
+
+
+
+
+
+
diff --git a/TestON/tests/TopoPerfNextSingleNode/TopoPerfNextSingleNode.topo b/TestON/tests/TopoPerfNextSingleNode/TopoPerfNextSingleNode.topo
new file mode 100644
index 0000000..3fc7bdc
--- /dev/null
+++ b/TestON/tests/TopoPerfNextSingleNode/TopoPerfNextSingleNode.topo
@@ -0,0 +1,55 @@
+<TOPOLOGY>
+ <COMPONENT>
+
+ <ONOSbench>
+ <host>10.128.174.10</host>
+ <user>admin</user>
+ <password>onos_test</password>
+ <type>OnosDriver</type>
+ <connect_order>1</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOSbench>
+
+ <ONOS1cli>
+ <host>10.128.174.10</host>
+ <user>admin</user>
+ <password>onos_test</password>
+ <type>OnosCliDriver</type>
+ <connect_order>2</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOS1cli>
+
+ <ONOS1>
+ <host>10.128.174.1</host>
+ <user>admin</user>
+ <password>onos_test</password>
+ <type>OnosDriver</type>
+ <connect_order>3</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </ONOS1>
+
+ <Mininet1>
+ <host>10.128.10.90</host>
+ <user>admin</user>
+ <password>onos_test</password>
+ <type>MininetCliDriver</type>
+ <connect_order>4</connect_order>
+ <COMPONENTS>
+ <arg1> --custom topo-100sw.py </arg1>
+ <arg2> --arp --mac --topo mytopo</arg2>
+ <arg3> </arg3>
+ <controller> remote </controller>
+ </COMPONENTS>
+ </Mininet1>
+
+ <Mininet2>
+ <host>10.128.10.90</host>
+ <user>admin</user>
+ <password>onos_test</password>
+ <type>RemoteMininetDriver</type>
+ <connect_order>5</connect_order>
+ <COMPONENTS> </COMPONENTS>
+ </Mininet2>
+
+ </COMPONENT>
+</TOPOLOGY>