Merge branch 'master' of https://github.com/OPENNETWORKINGLAB/ONLabTest
diff --git a/TestON/tests/IntentPerfNext/IntentPerfNext.params b/TestON/tests/IntentPerfNext/IntentPerfNext.params
index dbcd05c..8909059 100644
--- a/TestON/tests/IntentPerfNext/IntentPerfNext.params
+++ b/TestON/tests/IntentPerfNext/IntentPerfNext.params
@@ -36,7 +36,7 @@
<TEST>
#Number of times to iterate each case
- <numIter>12</numIter>
+ <numIter>3</numIter>
<numIgnore>2</numIgnore>
<numSwitch>8</numSwitch>
<batchThresholdMin>0</batchThresholdMin>
diff --git a/TestON/tests/IntentPerfNext/IntentPerfNext.py b/TestON/tests/IntentPerfNext/IntentPerfNext.py
index 74ce298..40a1276 100644
--- a/TestON/tests/IntentPerfNext/IntentPerfNext.py
+++ b/TestON/tests/IntentPerfNext/IntentPerfNext.py
@@ -1,164 +1,165 @@
-#Intent Performance Test for ONOS-next
+# Intent Performance Test for ONOS-next
#
-#andrew@onlab.us
+# andrew@onlab.us
#
-#November 5, 2014
+# November 5, 2014
+
class IntentPerfNext:
- def __init__(self):
+
+ def __init__( self ):
self.default = ""
- def CASE1(self, main):
- '''
+ def CASE1( self, main ):
+ """
ONOS startup sequence
- '''
-
+ """
import time
- global cluster_count
- cluster_count = 1
+ global clusterCount
+ clusterCount = 1
- cell_name = main.params['ENV']['cellName']
+ cellName = main.params[ 'ENV' ][ 'cellName' ]
- git_pull = main.params['GIT']['autoPull']
- checkout_branch = main.params['GIT']['checkout']
+ gitPull = main.params[ 'GIT' ][ 'autoPull' ]
+ checkoutBranch = main.params[ 'GIT' ][ 'checkout' ]
- ONOS1_ip = main.params['CTRL']['ip1']
- ONOS2_ip = main.params['CTRL']['ip2']
- ONOS3_ip = main.params['CTRL']['ip3']
- ONOS4_ip = main.params['CTRL']['ip4']
- ONOS5_ip = main.params['CTRL']['ip5']
- ONOS6_ip = main.params['CTRL']['ip6']
- ONOS7_ip = main.params['CTRL']['ip7']
+ ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
+ ONOS2Ip = main.params[ 'CTRL' ][ 'ip2' ]
+ ONOS3Ip = main.params[ 'CTRL' ][ 'ip3' ]
+ ONOS4Ip = main.params[ 'CTRL' ][ 'ip4' ]
+ ONOS5Ip = main.params[ 'CTRL' ][ 'ip5' ]
+ ONOS6Ip = main.params[ 'CTRL' ][ 'ip6' ]
+ ONOS7Ip = main.params[ 'CTRL' ][ 'ip7' ]
- main.ONOSbench.onos_uninstall(node_ip=ONOS1_ip)
- main.ONOSbench.onos_uninstall(node_ip=ONOS2_ip)
- main.ONOSbench.onos_uninstall(node_ip=ONOS3_ip)
- main.ONOSbench.onos_uninstall(node_ip=ONOS4_ip)
- main.ONOSbench.onos_uninstall(node_ip=ONOS5_ip)
- main.ONOSbench.onos_uninstall(node_ip=ONOS6_ip)
- main.ONOSbench.onos_uninstall(node_ip=ONOS7_ip)
+ main.ONOSbench.onosUninstall( nodeIp=ONOS1Ip )
+ main.ONOSbench.onosUninstall( nodeIp=ONOS2Ip )
+ main.ONOSbench.onosUninstall( nodeIp=ONOS3Ip )
+ main.ONOSbench.onosUninstall( nodeIp=ONOS4Ip )
+ main.ONOSbench.onosUninstall( nodeIp=ONOS5Ip )
+ main.ONOSbench.onosUninstall( nodeIp=ONOS6Ip )
+ main.ONOSbench.onosUninstall( nodeIp=ONOS7Ip )
- MN1_ip = main.params['MN']['ip1']
- BENCH_ip = main.params['BENCH']['ip']
-
- main.case("Setting up test environment")
+ MN1Ip = main.params[ 'MN' ][ 'ip1' ]
+ BENCHIp = main.params[ 'BENCH' ][ 'ip' ]
- main.step("Creating cell file")
- cell_file_result = main.ONOSbench.create_cell_file(
- BENCH_ip, cell_name, MN1_ip,
- "onos-core,onos-app-metrics,onos-gui",
- ONOS1_ip)
+ main.case( "Setting up test environment" )
- main.step("Applying cell file to environment")
- cell_apply_result = main.ONOSbench.set_cell(cell_name)
- verify_cell_result = main.ONOSbench.verify_cell()
+ main.step( "Creating cell file" )
+ cellFileResult = main.ONOSbench.createCellFile(
+ BENCHIp, cellName, MN1Ip,
+ "onos-core,onos-app-metrics,onos-gui",
+ ONOS1Ip )
- main.step("Removing raft logs")
- main.ONOSbench.onos_remove_raft_logs()
+ main.step( "Applying cell file to environment" )
+ cellApplyResult = main.ONOSbench.setCell( cellName )
+ verifyCellResult = main.ONOSbench.verifyCell()
- main.step("Git checkout and pull "+checkout_branch)
- if git_pull == 'on':
- checkout_result = \
- main.ONOSbench.git_checkout(checkout_branch)
- pull_result = main.ONOSbench.git_pull()
-
- #If you used git pull, auto compile
- main.step("Using onos-build to compile ONOS")
- build_result = main.ONOSbench.onos_build()
+ main.step( "Removing raft logs" )
+ main.ONOSbench.onosRemoveRaftLogs()
+
+ main.step( "Git checkout and pull " + checkoutBranch )
+ if gitPull == 'on':
+ checkoutResult = \
+ main.ONOSbench.gitCheckout( checkoutBranch )
+ pullResult = main.ONOSbench.gitPull()
+
+ # If you used git pull, auto compile
+ main.step( "Using onos-build to compile ONOS" )
+ buildResult = main.ONOSbench.onosBuild()
else:
- checkout_result = main.TRUE
- pull_result = main.TRUE
- build_result = main.TRUE
- main.log.info("Git pull skipped by configuration")
+ checkoutResult = main.TRUE
+ pullResult = main.TRUE
+ buildResult = main.TRUE
+ main.log.info( "Git pull skipped by configuration" )
- main.log.report("Commit information - ")
- main.ONOSbench.get_version(report=True)
+ main.log.report( "Commit information - " )
+ main.ONOSbench.getVersion( report=True )
- main.step("Creating ONOS package")
- package_result = main.ONOSbench.onos_package()
+ main.step( "Creating ONOS package" )
+ packageResult = main.ONOSbench.onosPackage()
- main.step("Installing ONOS package")
- install1_result = main.ONOSbench.onos_install(node=ONOS1_ip)
- #install2_result = main.ONOSbench.onos_install(node=ONOS2_ip)
- #install3_result = main.ONOSbench.onos_install(node=ONOS3_ip)
+ main.step( "Installing ONOS package" )
+ install1Result = main.ONOSbench.onosInstall( node=ONOS1Ip )
+ #install2Result = main.ONOSbench.onosInstall( node=ONOS2Ip )
+ #install3Result = main.ONOSbench.onosInstall( node=ONOS3Ip )
- main.step("Set cell for ONOScli env")
- main.ONOS1cli.set_cell(cell_name)
- #main.ONOS2cli.set_cell(cell_name)
- #main.ONOS3cli.set_cell(cell_name)
+ main.step( "Set cell for ONOScli env" )
+ main.ONOS1cli.setCell( cellName )
+ # main.ONOS2cli.setCell( cellName )
+ # main.ONOS3cli.setCell( cellName )
- time.sleep(5)
+ time.sleep( 5 )
- main.step("Start onos cli")
- cli1 = main.ONOS1cli.start_onos_cli(ONOS1_ip)
- #cli2 = main.ONOS2cli.start_onos_cli(ONOS2_ip)
- #cli3 = main.ONOS3cli.start_onos_cli(ONOS3_ip)
+ main.step( "Start onos cli" )
+ cli1 = main.ONOS1cli.startOnosCli( ONOS1Ip )
+ #cli2 = main.ONOS2cli.startOnosCli( ONOS2Ip )
+ #cli3 = main.ONOS3cli.startOnosCli( ONOS3Ip )
- utilities.assert_equals(expect=main.TRUE,
- actual = cell_file_result and cell_apply_result and\
- verify_cell_result and checkout_result and\
- pull_result and build_result and\
- install1_result, #and install2_result and\
- #install3_result,
- onpass="ONOS started successfully",
- onfail="Failed to start ONOS")
+ utilities.assert_equals( expect=main.TRUE,
+ actual=cellFileResult and cellApplyResult and
+ verifyCellResult and checkoutResult and
+ pullResult and buildResult and
+ install1Result, # and install2Result and
+ # install3Result,
+ onpass="ONOS started successfully",
+ onfail="Failed to start ONOS" )
- def CASE2(self, main):
- '''
+ def CASE2( self, main ):
+ """
Single intent add latency
- '''
+ """
import time
import json
import requests
import os
import numpy
- global cluster_count
+ global clusterCount
- ONOS1_ip = main.params['CTRL']['ip1']
- ONOS2_ip = main.params['CTRL']['ip2']
- ONOS3_ip = main.params['CTRL']['ip3']
- ONOS_ip_list = []
- for i in range(1, 8):
- ONOS_ip_list.append(main.params['CTRL']['ip'+str(i)])
-
- ONOS_user = main.params['CTRL']['user']
+ ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
+ ONOS2Ip = main.params[ 'CTRL' ][ 'ip2' ]
+ ONOS3Ip = main.params[ 'CTRL' ][ 'ip3' ]
+ ONOSIpList = []
+ for i in range( 1, 8 ):
+ ONOSIpList.append( main.params[ 'CTRL' ][ 'ip' + str( i ) ] )
- default_sw_port = main.params['CTRL']['port1']
+ ONOSUser = main.params[ 'CTRL' ][ 'user' ]
- #number of iterations of case
- num_iter = main.params['TEST']['numIter']
- num_ignore = int(main.params['TEST']['numIgnore'])
+ defaultSwPort = main.params[ 'CTRL' ][ 'port1' ]
- #Timestamp keys for json metrics output
- submit_time = main.params['JSON']['submittedTime']
- install_time = main.params['JSON']['installedTime']
- wdRequest_time = main.params['JSON']['wdRequestTime']
- withdrawn_time = main.params['JSON']['withdrawnTime']
-
+ # number of iterations of case
+ numIter = main.params[ 'TEST' ][ 'numIter' ]
+ numIgnore = int( main.params[ 'TEST' ][ 'numIgnore' ] )
+
+ # Timestamp keys for json metrics output
+ submitTime = main.params[ 'JSON' ][ 'submittedTime' ]
+ installTime = main.params[ 'JSON' ][ 'installedTime' ]
+ wdRequestTime = main.params[ 'JSON' ][ 'wdRequestTime' ]
+ withdrawnTime = main.params[ 'JSON' ][ 'withdrawnTime' ]
+
assertion = main.TRUE
- intent_add_lat_list = []
-
- #Distribute switches according to cluster count
- for i in range(1, 9):
- if cluster_count == 1:
- main.Mininet1.assign_sw_controller(
- sw=str(i), ip1=ONOS_ip_list[0],
- port1=default_sw_port
- )
- elif cluster_count == 3:
+ intentAddLatList = []
+
+ # Distribute switches according to cluster count
+ for i in range( 1, 9 ):
+ if clusterCount == 1:
+ main.Mininet1.assignSwController(
+ sw=str( i ), ip1=ONOSIpList[ 0 ],
+ port1=defaultSwPort
+ )
+ elif clusterCount == 3:
if i < 3:
index = 0
elif i < 6 and i >= 3:
index = 1
else:
index = 2
- main.Mininet1.assign_sw_controller(
- sw=str(i), ip1=ONOS_ip_list[index],
- port1=default_sw_port
- )
- elif cluster_count == 5:
+ main.Mininet1.assignSwController(
+ sw=str( i ), ip1=ONOSIpList[ index ],
+ port1=defaultSwPort
+ )
+ elif clusterCount == 5:
if i < 3:
index = 0
elif i < 5 and i >= 3:
@@ -169,947 +170,954 @@
index = 3
else:
index = 4
- main.Mininet1.assign_sw_controller(
- sw=str(i), ip1=ONOS_ip_list[index],
- port1=default_sw_port
- )
- elif cluster_count == 7:
+ main.Mininet1.assignSwController(
+ sw=str( i ), ip1=ONOSIpList[ index ],
+ port1=defaultSwPort
+ )
+ elif clusterCount == 7:
if i < 6:
index = i
else:
index = 6
- main.Mininet1.assign_sw_controller(
- sw=str(i), ip1=ONOS_ip_list[index],
- port1=default_sw_port
- )
+ main.Mininet1.assignSwController(
+ sw=str( i ), ip1=ONOSIpList[ index ],
+ port1=defaultSwPort
+ )
- time.sleep(10)
+ time.sleep( 10 )
- main.log.report("Single intent add latency test")
+ main.log.report( "Single intent add latency test" )
- devices_json_str = main.ONOS1cli.devices()
- devices_json_obj = json.loads(devices_json_str)
-
- if not devices_json_obj:
- main.log.report("Devices not discovered")
- main.log.report("Aborting test")
+ devicesJsonStr = main.ONOS1cli.devices()
+ devicesJsonObj = json.loads( devicesJsonStr )
+
+ if not devicesJsonObj:
+ main.log.report( "Devices not discovered" )
+ main.log.report( "Aborting test" )
main.exit()
else:
- main.log.info("Devices discovered successfully")
+ main.log.info( "Devices discovered successfully" )
- device_id_list = []
+ deviceIdList = []
- #Obtain device id list in ONOS format.
- #They should already be in order (1,2,3,10,11,12,13, etc)
- for device in devices_json_obj:
- device_id_list.append(device['id'])
+ # Obtain device id list in ONOS format.
+ # They should already be in order ( 1,2,3,10,11,12,13, etc )
+ for device in devicesJsonObj:
+ deviceIdList.append( device[ 'id' ] )
- for i in range(0, int(num_iter)):
- #add_point_intent(ingr_device, egr_device,
- # ingr_port, egr_port)
- main.ONOS1cli.add_point_intent(
- device_id_list[0]+"/2", device_id_list[7]+"/2")
-
- #Allow some time for intents to propagate
- time.sleep(5)
-
- intents_str = main.ONOS1cli.intents(json_format=True)
- intents_obj = json.loads(intents_str)
- for intent in intents_obj:
- if intent['state'] == "INSTALLED":
- main.log.info("Intent installed successfully")
- intent_id = intent['id']
- main.log.info("Intent id: "+str(intent_id))
+ for i in range( 0, int( numIter ) ):
+ # addPointIntent( ingrDevice, egrDevice,
+ # ingrPort, egrPort )
+ main.ONOS1cli.addPointIntent(
+ deviceIdList[ 0 ] + "/2", deviceIdList[ 7 ] + "/2" )
+
+ # Allow some time for intents to propagate
+ time.sleep( 5 )
+
+ intentsStr = main.ONOS1cli.intents( jsonFormat=True )
+ intentsObj = json.loads( intentsStr )
+ for intent in intentsObj:
+ if intent[ 'state' ] == "INSTALLED":
+ main.log.info( "Intent installed successfully" )
+ intentId = intent[ 'id' ]
+ main.log.info( "Intent id: " + str( intentId ) )
else:
- #TODO: Add error handling
- main.log.info("Intent installation failed")
- intent_id = ""
+ # TODO: Add error handling
+ main.log.info( "Intent installation failed" )
+ intentId = ""
- #Obtain metrics from ONOS 1, 2, 3
- intents_json_str_1 = main.ONOS1cli.intents_events_metrics()
- intents_json_obj_1 = json.loads(intents_json_str_1)
- #Parse values from the json object
- intent_submit_1 = \
- intents_json_obj_1[submit_time]['value']
- intent_install_1 = \
- intents_json_obj_1[install_time]['value']
- intent_install_lat_1 = \
- int(intent_install_1) - int(intent_submit_1)
-
- if cluster_count == 3:
- intents_json_str_2 = main.ONOS2cli.intents_events_metrics()
- intents_json_str_3 = main.ONOS3cli.intents_events_metrics()
- intents_json_obj_2 = json.loads(intents_json_str_2)
- intents_json_obj_3 = json.loads(intents_json_str_3)
- intent_submit_2 = \
- intents_json_obj_2[submit_time]['value']
- intent_submit_3 = \
- intents_json_obj_3[submit_time]['value']
- intent_install_2 = \
- intents_json_obj_2[install_time]['value']
- intent_install_3 = \
- intents_json_obj_3[install_time]['value']
- intent_install_lat_2 = \
- int(intent_install_2) - int(intent_submit_2)
- intent_install_lat_3 = \
- int(intent_install_3) - int(intent_submit_3)
+ # Obtain metrics from ONOS 1, 2, 3
+ intentsJsonStr1 = main.ONOS1cli.intentsEventsMetrics()
+ intentsJsonObj1 = json.loads( intentsJsonStr1 )
+ # Parse values from the json object
+ intentSubmit1 = \
+ intentsJsonObj1[ submitTime ][ 'value' ]
+ intentInstall1 = \
+ intentsJsonObj1[ installTime ][ 'value' ]
+ intentInstallLat1 = \
+ int( intentInstall1 ) - int( intentSubmit1 )
+
+ if clusterCount == 3:
+ intentsJsonStr2 = main.ONOS2cli.intentsEventsMetrics()
+ intentsJsonStr3 = main.ONOS3cli.intentsEventsMetrics()
+ intentsJsonObj2 = json.loads( intentsJsonStr2 )
+ intentsJsonObj3 = json.loads( intentsJsonStr3 )
+ intentSubmit2 = \
+ intentsJsonObj2[ submitTime ][ 'value' ]
+ intentSubmit3 = \
+ intentsJsonObj3[ submitTime ][ 'value' ]
+ intentInstall2 = \
+ intentsJsonObj2[ installTime ][ 'value' ]
+ intentInstall3 = \
+ intentsJsonObj3[ installTime ][ 'value' ]
+ intentInstallLat2 = \
+ int( intentInstall2 ) - int( intentSubmit2 )
+ intentInstallLat3 = \
+ int( intentInstall3 ) - int( intentSubmit3 )
else:
- intent_install_lat_2 = 0
- intent_install_lat_3 = 0
+ intentInstallLat2 = 0
+ intentInstallLat3 = 0
- if cluster_count == 5:
- intents_json_str_4 = main.ONOS4cli.intents_events_metrics()
- intents_json_str_5 = main.ONOS5cli.intents_events_metrics()
- intents_json_obj_4 = json.loads(intents_json_str_4)
- intents_json_obj_5 = json.loads(intents_json_str_5)
- intent_submit_4 = \
- intents_json_obj_4[submit_time]['value']
- intent_submit_5 = \
- intents_json_obj_5[submit_time]['value']
- intent_install_4 = \
- intents_json_obj_5[install_time]['value']
- intent_install_5 = \
- intents_json_obj_5[install_time]['value']
- intent_install_lat_4 = \
- int(intent_install_4) - int(intent_submit_4)
- intent_install_lat_5 = \
- int(intent_install_5) - int(intent_submit_5)
+ if clusterCount == 5:
+ intentsJsonStr4 = main.ONOS4cli.intentsEventsMetrics()
+ intentsJsonStr5 = main.ONOS5cli.intentsEventsMetrics()
+ intentsJsonObj4 = json.loads( intentsJsonStr4 )
+ intentsJsonObj5 = json.loads( intentsJsonStr5 )
+ intentSubmit4 = \
+ intentsJsonObj4[ submitTime ][ 'value' ]
+ intentSubmit5 = \
+ intentsJsonObj5[ submitTime ][ 'value' ]
+ intentInstall4 = \
+ intentsJsonObj5[ installTime ][ 'value' ]
+ intentInstall5 = \
+ intentsJsonObj5[ installTime ][ 'value' ]
+ intentInstallLat4 = \
+ int( intentInstall4 ) - int( intentSubmit4 )
+ intentInstallLat5 = \
+ int( intentInstall5 ) - int( intentSubmit5 )
else:
- intent_install_lat_4 = 0
- intent_install_lat_5 = 0
+ intentInstallLat4 = 0
+ intentInstallLat5 = 0
- if cluster_count == 7:
- intents_json_str_6 = main.ONOS6cli.intents_events_metrics()
- intents_json_str_7 = main.ONOS7cli.intents_events_metrics()
- intents_json_obj_6 = json.loads(intents_json_str_6)
- intents_json_obj_7 = json.loads(intents_json_str_7)
- intent_submit_6 = \
- intents_json_obj_6[submit_time]['value']
- intent_submit_7 = \
- intents_json_obj_6[submit_time]['value']
- intent_install_6 = \
- intents_json_obj_6[install_time]['value']
- intent_install_7 = \
- intents_json_obj_7[install_time]['value']
- intent_install_lat_6 = \
- int(intent_install_6) - int(intent_submit_6)
- intent_install_lat_7 = \
- int(intent_install_7) - int(intent_submit_7)
+ if clusterCount == 7:
+ intentsJsonStr6 = main.ONOS6cli.intentsEventsMetrics()
+ intentsJsonStr7 = main.ONOS7cli.intentsEventsMetrics()
+ intentsJsonObj6 = json.loads( intentsJsonStr6 )
+ intentsJsonObj7 = json.loads( intentsJsonStr7 )
+ intentSubmit6 = \
+ intentsJsonObj6[ submitTime ][ 'value' ]
+ intentSubmit7 = \
+ intentsJsonObj6[ submitTime ][ 'value' ]
+ intentInstall6 = \
+ intentsJsonObj6[ installTime ][ 'value' ]
+ intentInstall7 = \
+ intentsJsonObj7[ installTime ][ 'value' ]
+ intentInstallLat6 = \
+ int( intentInstall6 ) - int( intentSubmit6 )
+ intentInstallLat7 = \
+ int( intentInstall7 ) - int( intentSubmit7 )
else:
- intent_install_lat_6 = 0
- intent_install_lat_7 = 0
+ intentInstallLat6 = 0
+ intentInstallLat7 = 0
- intent_install_lat_avg = \
- (intent_install_lat_1 +
- intent_install_lat_2 +
- intent_install_lat_3 +
- intent_install_lat_4 +
- intent_install_lat_5 +
- intent_install_lat_6 +
- intent_install_lat_7) / cluster_count
+ intentInstallLatAvg = \
+ ( intentInstallLat1 +
+ intentInstallLat2 +
+ intentInstallLat3 +
+ intentInstallLat4 +
+ intentInstallLat5 +
+ intentInstallLat6 +
+ intentInstallLat7 ) / clusterCount
- main.log.info("Intent add latency avg for iteration "+str(i)+
- ": "+str(intent_install_lat_avg)+" ms")
+ main.log.info( "Intent add latency avg for iteration " + str( i ) +
+ ": " + str( intentInstallLatAvg ) + " ms" )
- if intent_install_lat_avg > 0.0 and \
- intent_install_lat_avg < 1000 and i > num_ignore:
- intent_add_lat_list.append(intent_install_lat_avg)
+ if intentInstallLatAvg > 0.0 and \
+ intentInstallLatAvg < 1000 and i > numIgnore:
+ intentAddLatList.append( intentInstallLatAvg )
else:
- main.log.info("Intent add latency exceeded "+
- "threshold. Skipping iteration "+str(i))
+ main.log.info( "Intent add latency exceeded " +
+ "threshold. Skipping iteration " + str( i ) )
- time.sleep(3)
-
- #TODO: Only remove intents that were installed
+ time.sleep( 3 )
+
+ # TODO: Only remove intents that were installed
# in this case... Otherwise many other intents
# may show up distorting the results
- main.log.info("Removing intents for next iteration")
- json_temp = \
- main.ONOS1cli.intents(json_format=True)
- json_obj_intents = json.loads(json_temp)
- if json_obj_intents:
- for intents in json_obj_intents:
- temp_id = intents['id']
- #main.ONOS1cli.remove_intent(temp_id)
- main.log.info("Removing intent id: "+
- str(temp_id))
- main.ONOS1cli.remove_intent(temp_id)
+ main.log.info( "Removing intents for next iteration" )
+ jsonTemp = \
+ main.ONOS1cli.intents( jsonFormat=True )
+ jsonObjIntents = json.loads( jsonTemp )
+ if jsonObjIntents:
+ for intents in jsonObjIntents:
+ tempId = intents[ 'id' ]
+ # main.ONOS1cli.removeIntent( tempId )
+ main.log.info( "Removing intent id: " +
+ str( tempId ) )
+ main.ONOS1cli.removeIntent( tempId )
else:
- main.log.info("Intents were not installed correctly")
+ main.log.info( "Intents were not installed correctly" )
- time.sleep(5)
+ time.sleep( 5 )
- if intent_add_lat_list:
- intent_add_lat_avg = sum(intent_add_lat_list) /\
- len(intent_add_lat_list)
+ if intentAddLatList:
+ intentAddLatAvg = sum( intentAddLatList ) /\
+ len( intentAddLatList )
else:
- main.log.report("Intent installation latency test failed")
- intent_add_lat_avg = "NA"
+ main.log.report( "Intent installation latency test failed" )
+ intentAddLatAvg = "NA"
assertion = main.FALSE
- intent_add_lat_std = \
- round(numpy.std(intent_add_lat_list),1)
- #END ITERATION FOR LOOP
- main.log.report("Single intent add latency - ")
- main.log.report("Avg: "+str(intent_add_lat_avg)+" ms")
- main.log.report("Std Deviation: "+str(intent_add_lat_std)+" ms")
-
- utilities.assert_equals(expect=main.TRUE, actual=assertion,
+ intentAddLatStd = \
+ round( numpy.std( intentAddLatList ), 1 )
+ # END ITERATION FOR LOOP
+ main.log.report( "Single intent add latency - " )
+ main.log.report( "Avg: " + str( intentAddLatAvg ) + " ms" )
+ main.log.report( "Std Deviation: " + str( intentAddLatStd ) + " ms" )
+
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=assertion,
onpass="Single intent install latency test successful",
- onfail="Single intent install latency test failed")
+ onfail="Single intent install latency test failed" )
- def CASE3(self, main):
- '''
+ def CASE3( self, main ):
+ """
Intent Reroute latency
- '''
+ """
import time
import json
import requests
import os
import numpy
- global cluster_count
+ global clusterCount
- ONOS1_ip = main.params['CTRL']['ip1']
- ONOS2_ip = main.params['CTRL']['ip2']
- ONOS3_ip = main.params['CTRL']['ip3']
- ONOS_user = main.params['CTRL']['user']
+ ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
+ ONOS2Ip = main.params[ 'CTRL' ][ 'ip2' ]
+ ONOS3Ip = main.params[ 'CTRL' ][ 'ip3' ]
+ ONOSUser = main.params[ 'CTRL' ][ 'user' ]
- default_sw_port = main.params['CTRL']['port1']
+ defaultSwPort = main.params[ 'CTRL' ][ 'port1' ]
- #number of iterations of case
- num_iter = main.params['TEST']['numIter']
- num_ignore = int(main.params['TEST']['numIgnore'])
+ # number of iterations of case
+ numIter = main.params[ 'TEST' ][ 'numIter' ]
+ numIgnore = int( main.params[ 'TEST' ][ 'numIgnore' ] )
assertion = main.TRUE
- #Timestamp keys for json metrics output
- submit_time = main.params['JSON']['submittedTime']
- install_time = main.params['JSON']['installedTime']
- wdRequest_time = main.params['JSON']['wdRequestTime']
- withdrawn_time = main.params['JSON']['withdrawnTime']
+ # Timestamp keys for json metrics output
+ submitTime = main.params[ 'JSON' ][ 'submittedTime' ]
+ installTime = main.params[ 'JSON' ][ 'installedTime' ]
+ wdRequestTime = main.params[ 'JSON' ][ 'wdRequestTime' ]
+ withdrawnTime = main.params[ 'JSON' ][ 'withdrawnTime' ]
- #NOTE: May need to configure interface depending on topology
- intfs = main.params['TEST']['intfs']
+ # NOTE: May need to configure interface depending on topology
+ intfs = main.params[ 'TEST' ][ 'intfs' ]
- devices_json_str = main.ONOS1cli.devices()
- devices_json_obj = json.loads(devices_json_str)
+ devicesJsonStr = main.ONOS1cli.devices()
+ devicesJsonObj = json.loads( devicesJsonStr )
- device_id_list = []
+ deviceIdList = []
- #Obtain device id list in ONOS format.
- #They should already be in order (1,2,3,10,11,12,13, etc)
- for device in devices_json_obj:
- device_id_list.append(device['id'])
+ # Obtain device id list in ONOS format.
+ # They should already be in order ( 1,2,3,10,11,12,13, etc )
+ for device in devicesJsonObj:
+ deviceIdList.append( device[ 'id' ] )
- intent_reroute_lat_list = []
+ intentRerouteLatList = []
- for i in range(0, int(num_iter)):
- #add_point_intent(ingr_device, ingr_port,
- # egr_device, egr_port)
- if len(device_id_list) > 0:
- main.ONOS1cli.add_point_intent(
- device_id_list[0]+"/2", device_id_list[7]+"/2")
+ for i in range( 0, int( numIter ) ):
+ # addPointIntent( ingrDevice, ingrPort,
+ # egrDevice, egrPort )
+ if len( deviceIdList ) > 0:
+ main.ONOS1cli.addPointIntent(
+ deviceIdList[ 0 ] + "/2", deviceIdList[ 7 ] + "/2" )
else:
- main.log.info("Failed to fetch devices from ONOS")
+ main.log.info( "Failed to fetch devices from ONOS" )
- time.sleep(5)
+ time.sleep( 5 )
- intents_str = main.ONOS1cli.intents(json_format=True)
- intents_obj = json.loads(intents_str)
- for intent in intents_obj:
- if intent['state'] == "INSTALLED":
- main.log.info("Intent installed successfully")
- intent_id = intent['id']
- main.log.info("Intent id: "+str(intent_id))
+ intentsStr = main.ONOS1cli.intents( jsonFormat=True )
+ intentsObj = json.loads( intentsStr )
+ for intent in intentsObj:
+ if intent[ 'state' ] == "INSTALLED":
+ main.log.info( "Intent installed successfully" )
+ intentId = intent[ 'id' ]
+ main.log.info( "Intent id: " + str( intentId ) )
else:
- #TODO: Add error handling
- main.log.info("Intent installation failed")
- intent_id = ""
-
- main.log.info("Disabling interface "+intfs)
- t0_system = time.time()*1000
+ # TODO: Add error handling
+ main.log.info( "Intent installation failed" )
+ intentId = ""
+
+ main.log.info( "Disabling interface " + intfs )
+ t0System = time.time() * 1000
main.Mininet1.handle.sendline(
- "sh ifconfig "+intfs+" down")
- main.Mininet1.handle.expect("mininet>")
+ "sh ifconfig " + intfs + " down" )
+ main.Mininet1.handle.expect( "mininet>" )
- #TODO: Check for correct intent reroute
- time.sleep(1)
+ # TODO: Check for correct intent reroute
+ time.sleep( 1 )
- #Obtain metrics from ONOS 1, 2, 3
- intents_json_str_1 = main.ONOS1cli.intents_events_metrics()
- intents_json_obj_1 = json.loads(intents_json_str_1)
- #Parse values from the json object
- intent_install_1 = \
- intents_json_obj_1[install_time]['value']
- intent_reroute_lat_1 = \
- int(intent_install_1) - int(t0_system)
-
- if cluster_count == 3:
- intents_json_str_2 = main.ONOS2cli.intents_events_metrics()
- intents_json_str_3 = main.ONOS3cli.intents_events_metrics()
+ # Obtain metrics from ONOS 1, 2, 3
+ intentsJsonStr1 = main.ONOS1cli.intentsEventsMetrics()
+ intentsJsonObj1 = json.loads( intentsJsonStr1 )
+ # Parse values from the json object
+ intentInstall1 = \
+ intentsJsonObj1[ installTime ][ 'value' ]
+ intentRerouteLat1 = \
+ int( intentInstall1 ) - int( t0System )
- intents_json_obj_2 = json.loads(intents_json_str_2)
- intents_json_obj_3 = json.loads(intents_json_str_3)
- intent_install_2 = \
- intents_json_obj_2[install_time]['value']
- intent_install_3 = \
- intents_json_obj_3[install_time]['value']
- intent_reroute_lat_2 = \
- int(intent_install_2) - int(t0_system)
- intent_reroute_lat_3 = \
- int(intent_install_3) - int(t0_system)
+ if clusterCount == 3:
+ intentsJsonStr2 = main.ONOS2cli.intentsEventsMetrics()
+ intentsJsonStr3 = main.ONOS3cli.intentsEventsMetrics()
+
+ intentsJsonObj2 = json.loads( intentsJsonStr2 )
+ intentsJsonObj3 = json.loads( intentsJsonStr3 )
+ intentInstall2 = \
+ intentsJsonObj2[ installTime ][ 'value' ]
+ intentInstall3 = \
+ intentsJsonObj3[ installTime ][ 'value' ]
+ intentRerouteLat2 = \
+ int( intentInstall2 ) - int( t0System )
+ intentRerouteLat3 = \
+ int( intentInstall3 ) - int( t0System )
else:
- intent_reroute_lat_2 = 0
- intent_reroute_lat_3 = 0
+ intentRerouteLat2 = 0
+ intentRerouteLat3 = 0
- if cluster_count == 5:
- intents_json_str_4 = main.ONOS4cli.intents_events_metrics()
- intents_json_str_5 = main.ONOS5cli.intents_events_metrics()
+ if clusterCount == 5:
+ intentsJsonStr4 = main.ONOS4cli.intentsEventsMetrics()
+ intentsJsonStr5 = main.ONOS5cli.intentsEventsMetrics()
- intents_json_obj_4 = json.loads(intents_json_str_4)
- intents_json_obj_5 = json.loads(intents_json_str_5)
- intent_install_4 = \
- intents_json_obj_4[install_time]['value']
- intent_install_5 = \
- intents_json_obj_5[install_time]['value']
- intent_reroute_lat_4 = \
- int(intent_install_4) - int(t0_system)
- intent_reroute_lat_5 = \
- int(intent_install_5) - int(t0_system)
+ intentsJsonObj4 = json.loads( intentsJsonStr4 )
+ intentsJsonObj5 = json.loads( intentsJsonStr5 )
+ intentInstall4 = \
+ intentsJsonObj4[ installTime ][ 'value' ]
+ intentInstall5 = \
+ intentsJsonObj5[ installTime ][ 'value' ]
+ intentRerouteLat4 = \
+ int( intentInstall4 ) - int( t0System )
+ intentRerouteLat5 = \
+ int( intentInstall5 ) - int( t0System )
else:
- intent_reroute_lat_4 = 0
- intent_reroute_lat_5 = 0
+ intentRerouteLat4 = 0
+ intentRerouteLat5 = 0
- if cluster_count == 7:
- intents_json_str_6 = main.ONOS6cli.intents_events_metrics()
- intents_json_str_7 = main.ONOS7cli.intents_events_metrics()
+ if clusterCount == 7:
+ intentsJsonStr6 = main.ONOS6cli.intentsEventsMetrics()
+ intentsJsonStr7 = main.ONOS7cli.intentsEventsMetrics()
- intents_json_obj_6 = json.loads(intents_json_str_6)
- intents_json_obj_7 = json.loads(intents_json_str_7)
- intent_install_6 = \
- intents_json_obj_6[install_time]['value']
- intent_install_7 = \
- intents_json_obj_7[install_time]['value']
- intent_reroute_lat_6 = \
- int(intent_install_6) - int(t0_system)
- intent_reroute_lat_7 = \
- int(intent_install_7) - int(t0_system)
+ intentsJsonObj6 = json.loads( intentsJsonStr6 )
+ intentsJsonObj7 = json.loads( intentsJsonStr7 )
+ intentInstall6 = \
+ intentsJsonObj6[ installTime ][ 'value' ]
+ intentInstall7 = \
+ intentsJsonObj7[ installTime ][ 'value' ]
+ intentRerouteLat6 = \
+ int( intentInstall6 ) - int( t0System )
+ intentRerouteLat7 = \
+ int( intentInstall7 ) - int( t0System )
else:
- intent_reroute_lat_6 = 0
- intent_reroute_lat_7 = 0
+ intentRerouteLat6 = 0
+ intentRerouteLat7 = 0
- intent_reroute_lat_avg = \
- (intent_reroute_lat_1 +
- intent_reroute_lat_2 +
- intent_reroute_lat_3 +
- intent_reroute_lat_4 +
- intent_reroute_lat_5 +
- intent_reroute_lat_6 +
- intent_reroute_lat_7) / cluster_count
-
- main.log.info("Intent reroute latency avg for iteration "+
- str(i)+": "+str(intent_reroute_lat_avg))
+ intentRerouteLatAvg = \
+ ( intentRerouteLat1 +
+ intentRerouteLat2 +
+ intentRerouteLat3 +
+ intentRerouteLat4 +
+ intentRerouteLat5 +
+ intentRerouteLat6 +
+ intentRerouteLat7 ) / clusterCount
- if intent_reroute_lat_avg > 0.0 and \
- intent_reroute_lat_avg < 1000 and i > num_ignore:
- intent_reroute_lat_list.append(intent_reroute_lat_avg)
+ main.log.info( "Intent reroute latency avg for iteration " +
+ str( i ) + ": " + str( intentRerouteLatAvg ) )
+
+ if intentRerouteLatAvg > 0.0 and \
+ intentRerouteLatAvg < 1000 and i > numIgnore:
+ intentRerouteLatList.append( intentRerouteLatAvg )
else:
- main.log.info("Intent reroute latency exceeded "+
- "threshold. Skipping iteration "+str(i))
+ main.log.info( "Intent reroute latency exceeded " +
+ "threshold. Skipping iteration " + str( i ) )
- main.log.info("Removing intents for next iteration")
- main.ONOS1cli.remove_intent(intent_id)
+ main.log.info( "Removing intents for next iteration" )
+ main.ONOS1cli.removeIntent( intentId )
- main.log.info("Bringing Mininet interface up for next "+
- "iteration")
+ main.log.info( "Bringing Mininet interface up for next " +
+ "iteration" )
main.Mininet1.handle.sendline(
- "sh ifconfig "+intfs+" up")
- main.Mininet1.handle.expect("mininet>")
+ "sh ifconfig " + intfs + " up" )
+ main.Mininet1.handle.expect( "mininet>" )
- if intent_reroute_lat_list:
- intent_reroute_lat_avg = sum(intent_reroute_lat_list) /\
- len(intent_reroute_lat_list)
+ if intentRerouteLatList:
+ intentRerouteLatAvg = sum( intentRerouteLatList ) /\
+ len( intentRerouteLatList )
else:
- main.log.report("Intent reroute test failed. Results NA")
- intent_reroute_lat_avg = "NA"
- #NOTE: fails test when list is empty
+ main.log.report( "Intent reroute test failed. Results NA" )
+ intentRerouteLatAvg = "NA"
+ # NOTE: fails test when list is empty
assertion = main.FALSE
-
- intent_reroute_lat_std = \
- round(numpy.std(intent_reroute_lat_list),1)
- #END ITERATION FOR LOOP
- main.log.report("Single intent reroute latency - ")
- main.log.report("Avg: "+str(intent_reroute_lat_avg)+" ms")
- main.log.report("Std Deviation: "+str(intent_reroute_lat_std)+" ms")
-
- utilities.assert_equals(expect=main.TRUE, actual=assertion,
+
+ intentRerouteLatStd = \
+ round( numpy.std( intentRerouteLatList ), 1 )
+ # END ITERATION FOR LOOP
+ main.log.report( "Single intent reroute latency - " )
+ main.log.report( "Avg: " + str( intentRerouteLatAvg ) + " ms" )
+ main.log.report(
+ "Std Deviation: " +
+ str( intentRerouteLatStd ) +
+ " ms" )
+
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=assertion,
onpass="Single intent reroute latency test successful",
- onfail="Single intent reroute latency test failed")
-
- def CASE4(self, main):
- '''
+ onfail="Single intent reroute latency test failed" )
+
+ def CASE4( self, main ):
+ """
Batch intent install
- '''
-
+ """
import time
import json
import requests
import os
import numpy
- global cluster_count
+ global clusterCount
- ONOS1_ip = main.params['CTRL']['ip1']
- ONOS2_ip = main.params['CTRL']['ip2']
- ONOS3_ip = main.params['CTRL']['ip3']
- ONOS4_ip = main.params['CTRL']['ip4']
- ONOS5_ip = main.params['CTRL']['ip5']
- ONOS6_ip = main.params['CTRL']['ip6']
- ONOS7_ip = main.params['CTRL']['ip7']
+ ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
+ ONOS2Ip = main.params[ 'CTRL' ][ 'ip2' ]
+ ONOS3Ip = main.params[ 'CTRL' ][ 'ip3' ]
+ ONOS4Ip = main.params[ 'CTRL' ][ 'ip4' ]
+ ONOS5Ip = main.params[ 'CTRL' ][ 'ip5' ]
+ ONOS6Ip = main.params[ 'CTRL' ][ 'ip6' ]
+ ONOS7Ip = main.params[ 'CTRL' ][ 'ip7' ]
assertion = main.TRUE
- ONOS_ip_list = []
- for i in range(1, 8):
- ONOS_ip_list.append(main.params['CTRL']['ip'+str(i)])
+ ONOSIpList = []
+ for i in range( 1, 8 ):
+ ONOSIpList.append( main.params[ 'CTRL' ][ 'ip' + str( i ) ] )
- ONOS_user = main.params['CTRL']['user']
+ ONOSUser = main.params[ 'CTRL' ][ 'user' ]
- default_sw_port = main.params['CTRL']['port1']
-
- batch_intent_size = main.params['TEST']['batchIntentSize']
- batch_thresh_min = int(main.params['TEST']['batchThresholdMin'])
- batch_thresh_max = int(main.params['TEST']['batchThresholdMax'])
+ defaultSwPort = main.params[ 'CTRL' ][ 'port1' ]
- #number of iterations of case
- num_iter = main.params['TEST']['numIter']
- num_ignore = int(main.params['TEST']['numIgnore'])
- num_switch = int(main.params['TEST']['numSwitch'])
- n_thread = main.params['TEST']['numMult']
- #n_thread = 105
+ batchIntentSize = int( main.params[ 'TEST' ][ 'batchIntentSize' ] )
+ batchThreshMin = int( main.params[ 'TEST' ][ 'batchThresholdMin' ] )
+ batchThreshMax = int( main.params[ 'TEST' ][ 'batchThresholdMax' ] )
- #Switch assignment NOTE: hardcoded
- if cluster_count == 1:
- for i in range(1, num_switch+1):
- main.Mininet1.assign_sw_controller(
- sw=str(i),
- ip1=ONOS1_ip,
- port1=default_sw_port)
- if cluster_count == 3:
- for i in range(1, 3):
- main.Mininet1.assign_sw_controller(
- sw=str(i),
- ip1=ONOS1_ip,
- port1=default_sw_port)
- for i in range(3, 6):
- main.Mininet1.assign_sw_controller(
- sw=str(i),
- ip1=ONOS2_ip,
- port1=default_sw_port)
- for i in range(6, 9):
- main.Mininet1.assign_sw_controller(
- sw=str(i),
- ip1=ONOS3_ip,
- port1=default_sw_port)
- if cluster_count == 5:
- main.Mininet1.assign_sw_controller(
- sw="1",
- ip1=ONOS1_ip,
- port1=default_sw_port)
- main.Mininet1.assign_sw_controller(
- sw="2",
- ip1=ONOS2_ip,
- port1=default_sw_port)
- for i in range(3, 6):
- main.Mininet1.assign_sw_controller(
- sw=str(i),
- ip1=ONOS3_ip,
- port1=default_sw_port)
- main.Mininet1.assign_sw_controller(
- sw="6",
- ip1=ONOS4_ip,
- port1=default_sw_port)
- main.Mininet1.assign_sw_controller(
- sw="7",
- ip1=ONOS5_ip,
- port1=default_sw_port)
- main.Mininet1.assign_sw_controller(
- sw="8",
- ip1=ONOS5_ip,
- port1=default_sw_port)
-
- if cluster_count == 7:
- for i in range(1,9):
+ # number of iterations of case
+ numIter = main.params[ 'TEST' ][ 'numIter' ]
+ numIgnore = int( main.params[ 'TEST' ][ 'numIgnore' ] )
+ numSwitch = int( main.params[ 'TEST' ][ 'numSwitch' ] )
+ nThread = main.params[ 'TEST' ][ 'numMult' ]
+ #nThread = 105
+
+ # Switch assignment NOTE: hardcoded
+ if clusterCount == 1:
+ for i in range( 1, numSwitch + 1 ):
+ main.Mininet1.assignSwController(
+ sw=str( i ),
+ ip1=ONOS1Ip,
+ port1=defaultSwPort )
+ if clusterCount == 3:
+ for i in range( 1, 3 ):
+ main.Mininet1.assignSwController(
+ sw=str( i ),
+ ip1=ONOS1Ip,
+ port1=defaultSwPort )
+ for i in range( 3, 6 ):
+ main.Mininet1.assignSwController(
+ sw=str( i ),
+ ip1=ONOS2Ip,
+ port1=defaultSwPort )
+ for i in range( 6, 9 ):
+ main.Mininet1.assignSwController(
+ sw=str( i ),
+ ip1=ONOS3Ip,
+ port1=defaultSwPort )
+ if clusterCount == 5:
+ main.Mininet1.assignSwController(
+ sw="1",
+ ip1=ONOS1Ip,
+ port1=defaultSwPort )
+ main.Mininet1.assignSwController(
+ sw="2",
+ ip1=ONOS2Ip,
+ port1=defaultSwPort )
+ for i in range( 3, 6 ):
+ main.Mininet1.assignSwController(
+ sw=str( i ),
+ ip1=ONOS3Ip,
+ port1=defaultSwPort )
+ main.Mininet1.assignSwController(
+ sw="6",
+ ip1=ONOS4Ip,
+ port1=defaultSwPort )
+ main.Mininet1.assignSwController(
+ sw="7",
+ ip1=ONOS5Ip,
+ port1=defaultSwPort )
+ main.Mininet1.assignSwController(
+ sw="8",
+ ip1=ONOS5Ip,
+ port1=defaultSwPort )
+
+ if clusterCount == 7:
+ for i in range( 1, 9 ):
if i < 8:
- main.Mininet1.assign_sw_controller(
- sw=str(i),
- ip1=ONOS_ip_list[i-1],
- port1=default_sw_port)
- elif i >= 8:
- main.Mininet1.assign_sw_controller(
- sw=str(i),
- ip1=ONOS_ip_list[6],
- port1=default_sw_port)
+ main.Mininet1.assignSwController(
+ sw=str( i ),
+ ip1=ONOSIpList[ i - 1 ],
+ port1=defaultSwPort )
+ elif i >= 8:
+ main.Mininet1.assignSwController(
+ sw=str( i ),
+ ip1=ONOSIpList[ 6 ],
+ port1=defaultSwPort )
- time.sleep(20)
+ time.sleep( 20 )
- main.log.report("Batch intent installation test of "+
- batch_intent_size +" intent(s)")
+ main.log.report( "Batch intent installation test of " +
+ str( batchIntentSize ) + " intent(s)" )
- batch_result_list = []
+ batchResultList = []
- main.log.info("Getting list of available devices")
- device_id_list = []
- json_str = main.ONOS1cli.devices()
- json_obj = json.loads(json_str)
- for device in json_obj:
- device_id_list.append(device['id'])
+ main.log.info( "Getting list of available devices" )
+ deviceIdList = []
+ jsonStr = main.ONOS1cli.devices()
+ jsonObj = json.loads( jsonStr )
+ for device in jsonObj:
+ deviceIdList.append( device[ 'id' ] )
- batch_install_lat = []
- batch_withdraw_lat = []
-
- #Max intent install measurement of all nodes
- max_install_lat = []
- max_withdraw_lat = []
- sleep_time = 10
-
- base_dir = "/tmp/"
+ batchInstallLat = []
+ batchWithdrawLat = []
- for batch in range(0, 5):
- for i in range(0, int(num_iter)):
- main.log.info("Pushing "+
- str(int(batch_intent_size)*int(n_thread))+
- " intents. Iteration "+str(i))
-
- for node in range(1, cluster_count+1):
- save_dir = base_dir + "batch_intent_"+str(node)+".txt"
- main.ONOSbench.push_test_intents_shell(
- device_id_list[0]+"/2",
- device_id_list[7]+"/2",
- int(batch_intent_size),
- save_dir, ONOS_ip_list[node-1],
- num_mult=n_thread, app_id=node)
-
- #Wait sufficient time for intents to start
- #installing
- time.sleep(sleep_time)
+ # Max intent install measurement of all nodes
+ maxInstallLat = []
+ maxWithdrawLat = []
+ sleepTime = 10
+
+ baseDir = "/tmp/"
+
+ for batch in range( 0, 5 ):
+ for i in range( 0, int( numIter ) ):
+ main.log.info( "Pushing " +
+ str( int( batchIntentSize ) * int( nThread ) ) +
+ " intents. Iteration " + str( i ) )
+
+ for node in range( 1, clusterCount + 1 ):
+ saveDir = baseDir + "batch_intent_" + str( node ) + ".txt"
+ main.ONOSbench.pushTestIntentsShell(
+ deviceIdList[ 0 ] + "/2",
+ deviceIdList[ 7 ] + "/2",
+ batchIntentSize,
+ saveDir, ONOSIpList[ node - 1 ],
+ numMult=nThread, appId=node )
+
+ # Wait sufficient time for intents to start
+ # installing
+ time.sleep( sleepTime )
intent = ""
counter = 300
- while len(intent) > 0 and counter > 0:
+ while len( intent ) > 0 and counter > 0:
main.ONOS1cli.handle.sendline(
- "intents | wc -l")
+ "intents | wc -l" )
main.ONOS1cli.handle.expect(
- "intents | wc -l")
+ "intents | wc -l" )
main.ONOS1cli.handle.expect(
- "onos>")
- intent_temp = main.ONOS1cli.handle.before()
+ "onos>" )
+ intentTemp = main.ONOS1cli.handle.before()
intent = main.ONOS1cli.intents()
- intent = json.loads(intent)
- counter = counter-1
- time.sleep(1)
+ intent = json.loads( intent )
+ counter = counter - 1
+ time.sleep( 1 )
- time.sleep(5)
+ time.sleep( 5 )
- for node in range(1, cluster_count+1):
- save_dir = base_dir + "batch_intent_"+str(node)+".txt"
- with open(save_dir) as f_onos:
- line_count = 0
- for line in f_onos:
- line = line[1:]
- line = line.split(": ")
- main.log.info("Line read: "+str(line))
- result = line[1].split(" ")[0]
- #TODO: add parameters before appending latency
- if line_count == 0:
- batch_install_lat.append(int(result))
- install_result = result
- elif line_count == 1:
- batch_withdraw_lat.append(int(result))
- withdraw_result = result
- line_count += 1
- main.log.info("Batch install latency for ONOS"+
- str(node)+" with "+\
- str(batch_intent_size) + "intents: "+\
- str(install_result)+" ms")
- main.log.info("Batch withdraw latency for ONOS"+
- str(node)+" with "+
- str(batch_intent_size) + "intents: "+
- str(withdraw_result)+" ms")
+ for node in range( 1, clusterCount + 1 ):
+ saveDir = baseDir + "batch_intent_" + str( node ) + ".txt"
+ with open( saveDir ) as fOnos:
+ lineCount = 0
+ for line in fOnos:
+ line = line[ 1: ]
+ line = line.split( ": " )
+ main.log.info( "Line read: " + str( line ) )
+ result = line[ 1 ].split( " " )[ 0 ]
+ # TODO: add parameters before appending latency
+ if lineCount == 0:
+ batchInstallLat.append( int( result ) )
+ installResult = result
+ elif lineCount == 1:
+ batchWithdrawLat.append( int( result ) )
+ withdrawResult = result
+ lineCount += 1
+ main.log.info( "Batch install latency for ONOS" +
+ str( node ) + " with " +
+ str( batchIntentSize ) + "intents: " +
+ str( installResult ) + " ms" )
+ main.log.info( "Batch withdraw latency for ONOS" +
+ str( node ) + " with " +
+ str( batchIntentSize ) + "intents: " +
+ str( withdrawResult ) + " ms" )
- if len(batch_install_lat) > 0 and int(i) > num_ignore:
- max_install_lat.append(max(batch_install_lat))
- elif len(batch_install_lat) == 0:
- #If I failed to read anything from the file,
- #increase the wait time before checking intents
- sleep_time += 30
- if len(batch_withdraw_lat) > 0 and int(i) > num_ignore:
- max_withdraw_lat.append(max(batch_withdraw_lat))
- batch_install_lat = []
- batch_withdraw_lat = []
-
- #Sleep in between iterations
- time.sleep(5)
-
- if max_install_lat:
- avg_install_lat = str(sum(max_install_lat) /\
- len(max_install_lat))
+ if len( batchInstallLat ) > 0 and int( i ) > numIgnore:
+ maxInstallLat.append( max( batchInstallLat ) )
+ elif len( batchInstallLat ) == 0:
+ # If I failed to read anything from the file,
+ # increase the wait time before checking intents
+ sleepTime += 30
+ if len( batchWithdrawLat ) > 0 and int( i ) > numIgnore:
+ maxWithdrawLat.append( max( batchWithdrawLat ) )
+ batchInstallLat = []
+ batchWithdrawLat = []
+
+ # Sleep in between iterations
+ time.sleep( 5 )
+
+ if maxInstallLat:
+ avgInstallLat = str( sum( maxInstallLat ) /
+ len( maxInstallLat ) )
else:
- avg_install_lat = "NA"
- main.log.report("Batch installation failed")
+ avgInstallLat = "NA"
+ main.log.report( "Batch installation failed" )
assertion = main.FALSE
- if max_withdraw_lat:
- avg_withdraw_lat = str(sum(max_withdraw_lat) /\
- len(max_withdraw_lat))
+ if maxWithdrawLat:
+ avgWithdrawLat = str( sum( maxWithdrawLat ) /
+ len( maxWithdrawLat ) )
else:
- avg_withdraw_lat = "NA"
- main.log.report("Batch withdraw failed")
+ avgWithdrawLat = "NA"
+ main.log.report( "Batch withdraw failed" )
assertion = main.FALSE
- main.log.report("Avg of batch installation latency "+
- "of size "+batch_intent_size+": "+
- str(avg_install_lat)+" ms")
- main.log.report("Std Deviation of batch installation latency "+
- ": "+str(numpy.std(max_install_lat))+" ms")
+ main.log.report( "Avg of batch installation latency " +
+ "of size " + str( batchIntentSize ) + ": " +
+ str( avgInstallLat ) + " ms" )
+ main.log.report( "Std Deviation of batch installation latency " +
+ ": " + str( numpy.std( maxInstallLat ) ) + " ms" )
- main.log.report("Avg of batch withdraw latency "+
- "of size "+batch_intent_size+": "+
- str(avg_withdraw_lat)+" ms")
- main.log.report("Std Deviation of batch withdraw latency "+
- ": "+str(numpy.std(max_withdraw_lat))+" ms")
-
- batch_intent_size += 1000
- main.log.report("Increasing batch intent size to "+
- batch_intent_size)
-
- #main.log.info("Removing all intents for next test case")
- #json_temp = main.ONOS1cli.intents(json_format=True)
- #json_obj_intents = json.loads(json_temp)
- #if json_obj_intents:
- # for intents in json_obj_intents:
- # temp_id = intents['id']
- #main.ONOS1cli.remove_intent(temp_id)
- # main.ONOS1cli.remove_intent(temp_id)
-
- utilities.assert_equals(expect=main.TRUE, actual=assertion,
+ main.log.report( "Avg of batch withdraw latency " +
+ "of size " + str( batchIntentSize ) + ": " +
+ str( avgWithdrawLat ) + " ms" )
+ main.log.report( "Std Deviation of batch withdraw latency " +
+ ": " +
+ str( numpy.std( maxWithdrawLat ) ) +
+ " ms" )
+
+ batchIntentSize = batchIntentSize + 1000
+ main.log.report( "Increasing batch intent size to " +
+ str(batchIntentSize) )
+
+ #main.log.info( "Removing all intents for next test case" )
+ #jsonTemp = main.ONOS1cli.intents( jsonFormat=True )
+ #jsonObjIntents = json.loads( jsonTemp )
+ # if jsonObjIntents:
+ # for intents in jsonObjIntents:
+ # tempId = intents[ 'id' ]
+ # main.ONOS1cli.removeIntent( tempId )
+ # main.ONOS1cli.removeIntent( tempId )
+
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=assertion,
onpass="Batch intent install/withdraw test successful",
- onfail="Batch intent install/withdraw test failed")
+ onfail="Batch intent install/withdraw test failed" )
- def CASE5(self,main):
- '''
+ def CASE5( self, main ):
+ """
Increase number of nodes and initiate CLI
- '''
+ """
import time
import json
- ONOS1_ip = main.params['CTRL']['ip1']
- ONOS2_ip = main.params['CTRL']['ip2']
- ONOS3_ip = main.params['CTRL']['ip3']
- ONOS4_ip = main.params['CTRL']['ip4']
- ONOS5_ip = main.params['CTRL']['ip5']
- ONOS6_ip = main.params['CTRL']['ip6']
- ONOS7_ip = main.params['CTRL']['ip7']
+ ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
+ ONOS2Ip = main.params[ 'CTRL' ][ 'ip2' ]
+ ONOS3Ip = main.params[ 'CTRL' ][ 'ip3' ]
+ ONOS4Ip = main.params[ 'CTRL' ][ 'ip4' ]
+ ONOS5Ip = main.params[ 'CTRL' ][ 'ip5' ]
+ ONOS6Ip = main.params[ 'CTRL' ][ 'ip6' ]
+ ONOS7Ip = main.params[ 'CTRL' ][ 'ip7' ]
- global cluster_count
- cluster_count += 2
- main.log.report("Increasing cluster size to "+
- str(cluster_count))
+ global clusterCount
+ clusterCount += 2
+ main.log.report( "Increasing cluster size to " +
+ str( clusterCount ) )
- install_result = main.FALSE
+ installResult = main.FALSE
- if cluster_count == 3:
- install_result1 = \
- main.ONOSbench.onos_install(node=ONOS2_ip)
- install_result2 = \
- main.ONOSbench.onos_install(node=ONOS3_ip)
- time.sleep(5)
+ if clusterCount == 3:
+ installResult1 = \
+ main.ONOSbench.onosInstall( node=ONOS2Ip )
+ installResult2 = \
+ main.ONOSbench.onosInstall( node=ONOS3Ip )
+ time.sleep( 5 )
- main.log.info("Starting ONOS CLI")
- main.ONOS2cli.start_onos_cli(ONOS2_ip)
- main.ONOS3cli.start_onos_cli(ONOS3_ip)
+ main.log.info( "Starting ONOS CLI" )
+ main.ONOS2cli.startOnosCli( ONOS2Ip )
+ main.ONOS3cli.startOnosCli( ONOS3Ip )
- install_result = install_result1 and install_result2
+ installResult = installResult1 and installResult2
- if cluster_count == 5:
- main.log.info("Installing ONOS on node 4 and 5")
- install_result1 = \
- main.ONOSbench.onos_install(node=ONOS4_ip)
- install_result2 = \
- main.ONOSbench.onos_install(node=ONOS5_ip)
+ if clusterCount == 5:
+ main.log.info( "Installing ONOS on node 4 and 5" )
+ installResult1 = \
+ main.ONOSbench.onosInstall( node=ONOS4Ip )
+ installResult2 = \
+ main.ONOSbench.onosInstall( node=ONOS5Ip )
- main.log.info("Starting ONOS CLI")
- main.ONOS4cli.start_onos_cli(ONOS4_ip)
- main.ONOS5cli.start_onos_cli(ONOS5_ip)
+ main.log.info( "Starting ONOS CLI" )
+ main.ONOS4cli.startOnosCli( ONOS4Ip )
+ main.ONOS5cli.startOnosCli( ONOS5Ip )
- install_result = install_result1 and install_result2
+ installResult = installResult1 and installResult2
- if cluster_count == 7:
- main.log.info("Installing ONOS on node 6 and 7")
- install_result1 = \
- main.ONOSbench.onos_install(node=ONOS6_ip)
- install_result2 = \
- main.ONOSbench.onos_install(node=ONOS7_ip)
+ if clusterCount == 7:
+ main.log.info( "Installing ONOS on node 6 and 7" )
+ installResult1 = \
+ main.ONOSbench.onosInstall( node=ONOS6Ip )
+ installResult2 = \
+ main.ONOSbench.onosInstall( node=ONOS7Ip )
- main.log.info("Starting ONOS CLI")
- main.ONOS6cli.start_onos_cli(ONOS6_ip)
- main.ONOS7cli.start_onos_cli(ONOS7_ip)
+ main.log.info( "Starting ONOS CLI" )
+ main.ONOS6cli.startOnosCli( ONOS6Ip )
+ main.ONOS7cli.startOnosCli( ONOS7Ip )
- install_result = install_result1 and install_result2
+ installResult = installResult1 and installResult2
- time.sleep(5)
+ time.sleep( 5 )
- if install_result == main.TRUE:
+ if installResult == main.TRUE:
assertion = main.TRUE
else:
assertion = main.FALSE
- utilities.assert_equals(expect=main.TRUE, actual=assertion,
- onpass="Scale out to "+str(cluster_count)+\
- " nodes successful",
- onfail="Scale out to "+str(cluster_count)+\
- " nodes failed")
-
- def CASE7(self, main):
- #TODO: Fix for scale-out scenario
+ utilities.assert_equals( expect=main.TRUE, actual=assertion,
+ onpass="Scale out to " + str( clusterCount ) +
+ " nodes successful",
+ onfail="Scale out to " + str( clusterCount ) +
+ " nodes failed" )
- '''
+ def CASE7( self, main ):
+ # TODO: Fix for scale-out scenario
+ """
Batch intent reroute latency
- '''
+ """
import time
import json
import requests
import os
import numpy
- global cluster_count
+ global clusterCount
- ONOS_ip_list = []
- for i in range(1, 8):
- ONOS_ip_list.append(main.params['CTRL']['ip'+str(i)])
+ ONOSIpList = []
+ for i in range( 1, 8 ):
+ ONOSIpList.append( main.params[ 'CTRL' ][ 'ip' + str( i ) ] )
- ONOS_user = main.params['CTRL']['user']
- default_sw_port = main.params['CTRL']['port1']
-
- batch_intent_size = main.params['TEST']['batchIntentSize']
- batch_thresh_min = int(main.params['TEST']['batchThresholdMin'])
- batch_thresh_max = int(main.params['TEST']['batchThresholdMax'])
- intfs = main.params['TEST']['intfs']
- install_time = main.params['JSON']['installedTime']
+ ONOSUser = main.params[ 'CTRL' ][ 'user' ]
+ defaultSwPort = main.params[ 'CTRL' ][ 'port1' ]
- #number of iterations of case
- num_iter = main.params['TEST']['numIter']
- num_ignore = int(main.params['TEST']['numIgnore'])
- num_switch = int(main.params['TEST']['numSwitch'])
- n_thread = main.params['TEST']['numMult']
+ batchIntentSize = main.params[ 'TEST' ][ 'batchIntentSize' ]
+ batchThreshMin = int( main.params[ 'TEST' ][ 'batchThresholdMin' ] )
+ batchThreshMax = int( main.params[ 'TEST' ][ 'batchThresholdMax' ] )
+ intfs = main.params[ 'TEST' ][ 'intfs' ]
+ installTime = main.params[ 'JSON' ][ 'installedTime' ]
- main.log.report("Batch intent installation test of "+
- batch_intent_size +" intents")
+ # number of iterations of case
+ numIter = main.params[ 'TEST' ][ 'numIter' ]
+ numIgnore = int( main.params[ 'TEST' ][ 'numIgnore' ] )
+ numSwitch = int( main.params[ 'TEST' ][ 'numSwitch' ] )
+ nThread = main.params[ 'TEST' ][ 'numMult' ]
- batch_result_list = []
+ main.log.report( "Batch intent installation test of " +
+ batchIntentSize + " intents" )
- time.sleep(10)
+ batchResultList = []
- main.log.info("Getting list of available devices")
- device_id_list = []
- json_str = main.ONOS1cli.devices()
- json_obj = json.loads(json_str)
- for device in json_obj:
- device_id_list.append(device['id'])
+ time.sleep( 10 )
- batch_install_lat = []
- batch_withdraw_lat = []
- sleep_time = 10
-
- base_dir = "/tmp/"
- max_install_lat = []
+ main.log.info( "Getting list of available devices" )
+ deviceIdList = []
+ jsonStr = main.ONOS1cli.devices()
+ jsonObj = json.loads( jsonStr )
+ for device in jsonObj:
+ deviceIdList.append( device[ 'id' ] )
- for i in range(0, int(num_iter)):
- main.log.info("Pushing "+
- str(int(batch_intent_size)*int(n_thread))+
- " intents. Iteration "+str(i))
+ batchInstallLat = []
+ batchWithdrawLat = []
+ sleepTime = 10
- main.ONOSbench.push_test_intents_shell(
- device_id_list[0]+"/2",
- device_id_list[7]+"/2",
- batch_intent_size, "/tmp/batch_install.txt",
- ONOS_ip_list[0], num_mult="1", app_id="1",
- report=False, options="--install")
- #main.ONOSbench.push_test_intents_shell(
+ baseDir = "/tmp/"
+ maxInstallLat = []
+
+ for i in range( 0, int( numIter ) ):
+ main.log.info( "Pushing " +
+ str( int( batchIntentSize ) * int( nThread ) ) +
+ " intents. Iteration " + str( i ) )
+
+ main.ONOSbench.pushTestIntentsShell(
+ deviceIdList[ 0 ] + "/2",
+ deviceIdList[ 7 ] + "/2",
+ batchIntentSize, "/tmp/batch_install.txt",
+ ONOSIpList[ 0 ], numMult="1", appId="1",
+ report=False, options="--install" )
+ # main.ONOSbench.pushTestIntentsShell(
# "of:0000000000001002/1",
# "of:0000000000002002/1",
# 133, "/tmp/temp2.txt", "10.128.174.2",
- # num_mult="6", app_id="2",report=False)
-
- #TODO: Check for installation success then proceed
- time.sleep(30)
-
- #NOTE: this interface is specific to
+ # numMult="6", appId="2",report=False )
+
+ # TODO: Check for installation success then proceed
+ time.sleep( 30 )
+
+ # NOTE: this interface is specific to
# topo-intentFlower.py topology
# reroute case.
- main.log.info("Disabling interface "+intfs)
+ main.log.info( "Disabling interface " + intfs )
main.Mininet1.handle.sendline(
- "sh ifconfig "+intfs+" down")
- t0_system = time.time()*1000
+ "sh ifconfig " + intfs + " down" )
+ t0System = time.time() * 1000
- #TODO: Wait sufficient time for intents to install
- time.sleep(10)
+ # TODO: Wait sufficient time for intents to install
+ time.sleep( 10 )
- #TODO: get intent installation time
-
- #Obtain metrics from ONOS 1, 2, 3
- intents_json_str_1 = main.ONOS1cli.intents_events_metrics()
- intents_json_obj_1 = json.loads(intents_json_str_1)
- #Parse values from the json object
- intent_install_1 = \
- intents_json_obj_1[install_time]['value']
- intent_reroute_lat_1 = \
- int(intent_install_1) - int(t0_system)
-
-
- if cluster_count == 3:
- intents_json_str_2 =\
- main.ONOS2cli.intents_events_metrics()
- intents_json_str_3 =\
- main.ONOS3cli.intents_events_metrics()
- intents_json_obj_2 = json.loads(intents_json_str_2)
- intents_json_obj_3 = json.loads(intents_json_str_3)
- intent_install_2 = \
- intents_json_obj_2[install_time]['value']
- intent_install_3 = \
- intents_json_obj_3[install_time]['value']
- intent_reroute_lat_2 = \
- int(intent_install_2) - int(t0_system)
- intent_reroute_lat_3 = \
- int(intent_install_3) - int(t0_system)
+ # TODO: get intent installation time
+
+ # Obtain metrics from ONOS 1, 2, 3
+ intentsJsonStr1 = main.ONOS1cli.intentsEventsMetrics()
+ intentsJsonObj1 = json.loads( intentsJsonStr1 )
+ # Parse values from the json object
+ intentInstall1 = \
+ intentsJsonObj1[ installTime ][ 'value' ]
+ intentRerouteLat1 = \
+ int( intentInstall1 ) - int( t0System )
+
+ if clusterCount == 3:
+ intentsJsonStr2 =\
+ main.ONOS2cli.intentsEventsMetrics()
+ intentsJsonStr3 =\
+ main.ONOS3cli.intentsEventsMetrics()
+ intentsJsonObj2 = json.loads( intentsJsonStr2 )
+ intentsJsonObj3 = json.loads( intentsJsonStr3 )
+ intentInstall2 = \
+ intentsJsonObj2[ installTime ][ 'value' ]
+ intentInstall3 = \
+ intentsJsonObj3[ installTime ][ 'value' ]
+ intentRerouteLat2 = \
+ int( intentInstall2 ) - int( t0System )
+ intentRerouteLat3 = \
+ int( intentInstall3 ) - int( t0System )
else:
- intent_reroute_lat_2 = 0
- intent_reroute_lat_3 = 0
+ intentRerouteLat2 = 0
+ intentRerouteLat3 = 0
- if cluster_count == 5:
- intents_json_str_4 =\
- main.ONOS4cli.intents_events_metrics()
- intents_json_str_5 =\
- main.ONOS5cli.intents_events_metrics()
- intents_json_obj_4 = json.loads(intents_json_str_4)
- intents_json_obj_5 = json.loads(intents_json_str_5)
- intent_install_4 = \
- intents_json_obj_4[install_time]['value']
- intent_install_5 = \
- intents_json_obj_5[install_time]['value']
- intent_reroute_lat_4 = \
- int(intent_install_4) - int(t0_system)
- intent_reroute_lat_5 = \
- int(intent_install_5) - int(t0_system)
+ if clusterCount == 5:
+ intentsJsonStr4 =\
+ main.ONOS4cli.intentsEventsMetrics()
+ intentsJsonStr5 =\
+ main.ONOS5cli.intentsEventsMetrics()
+ intentsJsonObj4 = json.loads( intentsJsonStr4 )
+ intentsJsonObj5 = json.loads( intentsJsonStr5 )
+ intentInstall4 = \
+ intentsJsonObj4[ installTime ][ 'value' ]
+ intentInstall5 = \
+ intentsJsonObj5[ installTime ][ 'value' ]
+ intentRerouteLat4 = \
+ int( intentInstall4 ) - int( t0System )
+ intentRerouteLat5 = \
+ int( intentInstall5 ) - int( t0System )
else:
- intent_reroute_lat_4 = 0
- intent_reroute_lat_5 = 0
+ intentRerouteLat4 = 0
+ intentRerouteLat5 = 0
- if cluster_count == 7:
- intents_json_str_6 =\
- main.ONOS6cli.intents_events_metrics()
- intents_json_str_7 =\
- main.ONOS7cli.intents_events_metrics()
- intents_json_obj_6 = json.loads(intents_json_str_6)
- intents_json_obj_7 = json.loads(intents_json_str_7)
- intent_install_6 = \
- intents_json_obj_6[install_time]['value']
- intent_install_7 = \
- intents_json_obj_7[install_time]['value']
- intent_reroute_lat_6 = \
- int(intent_install_6) - int(t0_system)
- intent_reroute_lat_7 = \
- int(intent_install_7) - int(t0_system)
+ if clusterCount == 7:
+ intentsJsonStr6 =\
+ main.ONOS6cli.intentsEventsMetrics()
+ intentsJsonStr7 =\
+ main.ONOS7cli.intentsEventsMetrics()
+ intentsJsonObj6 = json.loads( intentsJsonStr6 )
+ intentsJsonObj7 = json.loads( intentsJsonStr7 )
+ intentInstall6 = \
+ intentsJsonObj6[ installTime ][ 'value' ]
+ intentInstall7 = \
+ intentsJsonObj7[ installTime ][ 'value' ]
+ intentRerouteLat6 = \
+ int( intentInstall6 ) - int( t0System )
+ intentRerouteLat7 = \
+ int( intentInstall7 ) - int( t0System )
else:
- intent_reroute_lat_6 = 0
- intent_reroute_lat_7 = 0
+ intentRerouteLat6 = 0
+ intentRerouteLat7 = 0
- intent_reroute_lat_avg = \
- (intent_reroute_lat_1 +
- intent_reroute_lat_2 +
- intent_reroute_lat_3 +
- intent_reroute_lat_4 +
- intent_reroute_lat_5 +
- intent_reroute_lat_6 +
- intent_reroute_lat_7) / cluster_count
-
- main.log.info("Intent reroute latency avg for iteration "+
- str(i)+": "+str(intent_reroute_lat_avg))
- #TODO: Remove intents for next iteration
-
- time.sleep(5)
+ intentRerouteLatAvg = \
+ ( intentRerouteLat1 +
+ intentRerouteLat2 +
+ intentRerouteLat3 +
+ intentRerouteLat4 +
+ intentRerouteLat5 +
+ intentRerouteLat6 +
+ intentRerouteLat7 ) / clusterCount
- intents_str = main.ONOS1cli.intents()
- intents_json = json.loads(intents_str)
- for intents in intents_json:
- intent_id = intents['id']
- #TODO: make sure this removes all intents
- #print intent_id
- if intent_id:
- main.ONOS1cli.remove_intent(intent_id)
+ main.log.info( "Intent reroute latency avg for iteration " +
+ str( i ) + ": " + str( intentRerouteLatAvg ) )
+ # TODO: Remove intents for next iteration
+
+ time.sleep( 5 )
+
+ intentsStr = main.ONOS1cli.intents()
+ intentsJson = json.loads( intentsStr )
+ for intents in intentsJson:
+ intentId = intents[ 'id' ]
+ # TODO: make sure this removes all intents
+ # print intentId
+ if intentId:
+ main.ONOS1cli.removeIntent( intentId )
main.Mininet1.handle.sendline(
- "sh ifconfig "+intfs+" up")
-
- main.log.info("Intents removed and port back up")
+ "sh ifconfig " + intfs + " up" )
- def CASE9(self, main):
+ main.log.info( "Intents removed and port back up" )
+
+ def CASE9( self, main ):
count = 0
- sw_num1 = 1
- sw_num2 = 1
+ swNum1 = 1
+ swNum2 = 1
appid = 0
- port_num1 = 1
- port_num2 = 1
-
- time.sleep(30)
+ portNum1 = 1
+ portNum2 = 1
+
+ time.sleep( 30 )
while True:
- #main.ONOS1cli.push_test_intents(
+ # main.ONOS1cli.pushTestIntents(
#"of:0000000000001001/1",
#"of:0000000000002001/1",
- # 100, num_mult="10", app_id="1")
- #main.ONOS2cli.push_test_intents(
+ # 100, numMult="10", appId="1" )
+ # main.ONOS2cli.pushTestIntents(
# "of:0000000000001002/1",
# "of:0000000000002002/1",
- # 100, num_mult="10", app_id="2")
- #main.ONOS2cli.push_test_intents(
+ # 100, numMult="10", appId="2" )
+ # main.ONOS2cli.pushTestIntents(
# "of:0000000000001003/1",
# "of:0000000000002003/1",
- # 100, num_mult="10", app_id="3")
+ # 100, numMult="10", appId="3" )
count += 1
-
+
if count >= 100:
main.ONOSbench.handle.sendline(
- "onos 10.128.174.1 intents-events-metrics >>"+\
- " /tmp/metrics_intents_temp.txt &")
+ "onos 10.128.174.1 intents-events-metrics >>" +
+ " /tmp/metrics_intents_temp.txt &" )
count = 0
- arg1 = "of:000000000000100"+str(sw_num1)+"/"+str(port_num1)
- arg2 = "of:000000000000200"+str(sw_num2)+"/"+str(port_num2)
-
- sw_num1 += 1
+ arg1 = "of:000000000000100" + str( swNum1 ) + "/" + str( portNum1 )
+ arg2 = "of:000000000000200" + str( swNum2 ) + "/" + str( portNum2 )
- if sw_num1 > 7:
- sw_num1 = 1
- sw_num2 += 1
- if sw_num2 > 7:
+ swNum1 += 1
+
+ if swNum1 > 7:
+ swNum1 = 1
+ swNum2 += 1
+ if swNum2 > 7:
appid += 1
- if sw_num2 > 7:
- sw_num2 = 1
-
- main.ONOSbench.push_test_intents_shell(
+ if swNum2 > 7:
+ swNum2 = 1
+
+ main.ONOSbench.pushTestIntentsShell(
arg1,
- arg2,
+ arg2,
100, "/tmp/temp.txt", "10.128.174.1",
- num_mult="10", app_id=appid,report=False)
- #main.ONOSbench.push_test_intents_shell(
+ numMult="10", appId=appid, report=False )
+ # main.ONOSbench.pushTestIntentsShell(
# "of:0000000000001002/1",
# "of:0000000000002002/1",
# 133, "/tmp/temp2.txt", "10.128.174.2",
- # num_mult="6", app_id="2",report=False)
- #main.ONOSbench.push_test_intents_shell(
+ # numMult="6", appId="2",report=False )
+ # main.ONOSbench.pushTestIntentsShell(
# "of:0000000000001003/1",
# "of:0000000000002003/1",
# 133, "/tmp/temp3.txt", "10.128.174.3",
- # num_mult="6", app_id="3",report=False)
-
- time.sleep(0.2)
+ # numMult="6", appId="3",report=False )
+ time.sleep( 0.2 )
diff --git a/TestON/tests/TopoPerfNext/TopoPerfNext.params b/TestON/tests/TopoPerfNext/TopoPerfNext.params
index 8e31e62..45de48d 100644
--- a/TestON/tests/TopoPerfNext/TopoPerfNext.params
+++ b/TestON/tests/TopoPerfNext/TopoPerfNext.params
@@ -42,16 +42,17 @@
#pcap file located at /tmp/'capture_name'
<debugMode>off</debugMode>
<onosLogFile>/opt/onos/log/karaf*</onosLogFile>
+ <mci>off</mci>
- <topo_config_file>
+ <topoConfigFile>
single_topo_event_accumulator.cfg
- </topo_config_file>
- <topo_config_name>
+ </topoConfigFile>
+ <topoConfigName>
org.onlab.onos.net.topology.impl.DefaultTopologyProvider.cfg
- </topo_config_name>
+ </topoConfigName>
#Number of times to iterate each case
- <numIter>12</numIter>
+ <numIter>5</numIter>
<numSwitch>2</numSwitch>
#Number of iterations to ignore initially
<iterIgnore>2</iterIgnore>
diff --git a/TestON/tests/TopoPerfNext/TopoPerfNext.py b/TestON/tests/TopoPerfNext/TopoPerfNext.py
index b737f4c..12e53e5 100644
--- a/TestON/tests/TopoPerfNext/TopoPerfNext.py
+++ b/TestON/tests/TopoPerfNext/TopoPerfNext.py
@@ -1,1931 +1,1951 @@
-#TopoPerfNext
+# TopoPerfNext
#
-#Topology Performance test for ONOS-next
+# Topology Performance test for ONOS-next
#
-#andrew@onlab.us
+# andrew@onlab.us
#
-#If your machine does not come with numpy
-#run the following command:
-#sudo apt-get install python-numpy python-scipy
+# If your machine does not come with numpy
+# run the following command:
+# sudo apt-get install python-numpy python-scipy
import time
import sys
import os
import re
+
class TopoPerfNext:
- def __init__(self):
+
+ def __init__( self ):
self.default = ''
- def CASE1(self, main):
- '''
+ def CASE1( self, main ):
+ """
ONOS startup sequence
- '''
+ """
import time
-
- ## Global cluster count for scale-out purposes
- global cluster_count
- #Set initial cluster count
- cluster_count = 1
+
+ # Global cluster count for scale-out purposes
+ global clusterCount
+ # Set initial cluster count
+ clusterCount = 1
##
- cell_name = main.params['ENV']['cellName']
+ cellName = main.params[ 'ENV' ][ 'cellName' ]
- git_pull = main.params['GIT']['autoPull']
- checkout_branch = main.params['GIT']['checkout']
+ gitPull = main.params[ 'GIT' ][ 'autoPull' ]
+ checkoutBranch = main.params[ 'GIT' ][ 'checkout' ]
- ONOS1_ip = main.params['CTRL']['ip1']
- ONOS2_ip = main.params['CTRL']['ip2']
- ONOS3_ip = main.params['CTRL']['ip3']
+ ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
+ ONOS2Ip = main.params[ 'CTRL' ][ 'ip2' ]
+ ONOS3Ip = main.params[ 'CTRL' ][ 'ip3' ]
+ ONOS4Ip = main.params[ 'CTRL' ][ 'ip4' ]
+ ONOS5Ip = main.params[ 'CTRL' ][ 'ip5' ]
+ ONOS6Ip = main.params[ 'CTRL' ][ 'ip6' ]
+ ONOS7Ip = main.params[ 'CTRL' ][ 'ip7' ]
+
+ MN1Ip = main.params[ 'MN' ][ 'ip1' ]
+ BENCHIp = main.params[ 'BENCH' ][ 'ip' ]
+
+ topoCfgFile = main.params[ 'TEST' ][ 'topoConfigFile' ]
+ topoCfgName = main.params[ 'TEST' ][ 'topoConfigName' ]
+
+ mvnCleanInstall = main.params[ 'TEST' ][ 'mci' ]
- #### Hardcoded ONOS nodes particular to my env ####
- ONOS4_ip = "10.128.174.4"
- ONOS5_ip = "10.128.174.5"
- ONOS6_ip = "10.128.174.6"
- ONOS7_ip = "10.128.174.7"
- #### ####
+ main.case( "Setting up test environment" )
+ main.log.info( "Copying topology event accumulator config" +
+ " to ONOS /package/etc" )
+ main.ONOSbench.handle.sendline( "cp ~/" +
+ topoCfgFile +
+ " ~/ONOS/tools/package/etc/" +
+ topoCfgName )
+ main.ONOSbench.handle.expect( "\$" )
- MN1_ip = main.params['MN']['ip1']
- BENCH_ip = main.params['BENCH']['ip']
+ main.log.report( "Setting up test environment" )
- topo_cfg_file = main.params['TEST']['topo_config_file']
- topo_cfg_name = main.params['TEST']['topo_config_name']
-
- main.case("Setting up test environment")
- main.log.info("Copying topology event accumulator config"+\
- " to ONOS /package/etc")
- main.ONOSbench.handle.sendline("cp ~/"+\
- topo_cfg_file+\
- " ~/ONOS/tools/package/etc/"+\
- topo_cfg_name)
- main.ONOSbench.handle.expect("\$")
+ main.step( "Cleaning previously installed ONOS if any" )
+ main.ONOSbench.onosUninstall( nodeIp=ONOS2Ip )
+ main.ONOSbench.onosUninstall( nodeIp=ONOS3Ip )
+ main.ONOSbench.onosUninstall( nodeIp=ONOS4Ip )
+ main.ONOSbench.onosUninstall( nodeIp=ONOS5Ip )
+ main.ONOSbench.onosUninstall( nodeIp=ONOS6Ip )
+ #main.ONOSbench.onosUninstall( nodeIp=ONOS7Ip )
- main.log.report("Setting up test environment")
+ main.step( "Creating cell file" )
+ cellFileResult = main.ONOSbench.createCellFile(
+ BENCHIp, cellName, MN1Ip,
+ "onos-core,onos-app-metrics,onos-app-gui",
+ ONOS1Ip )
- main.step("Cleaning previously installed ONOS if any")
- main.ONOSbench.onos_uninstall(node_ip=ONOS2_ip)
- main.ONOSbench.onos_uninstall(node_ip=ONOS3_ip)
- main.ONOSbench.onos_uninstall(node_ip=ONOS4_ip)
- main.ONOSbench.onos_uninstall(node_ip=ONOS5_ip)
- main.ONOSbench.onos_uninstall(node_ip=ONOS6_ip)
- main.ONOSbench.onos_uninstall(node_ip=ONOS7_ip)
+ main.step( "Applying cell file to environment" )
+ cellApplyResult = main.ONOSbench.setCell( cellName )
+ verifyCellResult = main.ONOSbench.verifyCell()
- main.step("Creating cell file")
- cell_file_result = main.ONOSbench.create_cell_file(
- BENCH_ip, cell_name, MN1_ip,
- "onos-core,onos-app-metrics,onos-app-gui",
- ONOS1_ip)
-
- main.step("Applying cell file to environment")
- cell_apply_result = main.ONOSbench.set_cell(cell_name)
- verify_cell_result = main.ONOSbench.verify_cell()
-
- #NOTE: This step may be removed after proper
+ # NOTE: This step may be removed after proper
# copy cat log functionality
- main.step("Removing raft/copy-cat logs from ONOS nodes")
- main.ONOSbench.onos_remove_raft_logs()
- time.sleep(30)
+ main.step( "Removing raft/copy-cat logs from ONOS nodes" )
+ main.ONOSbench.onosRemoveRaftLogs()
+ time.sleep( 30 )
- main.step("Git checkout and pull "+checkout_branch)
- if git_pull == 'on':
- #checkout_result = \
- # main.ONOSbench.git_checkout(checkout_branch)
- checkout_result = main.TRUE
- pull_result = main.ONOSbench.git_pull()
+ main.step( "Git checkout and pull " + checkoutBranch )
+ if gitPull == 'on':
+ # checkoutResult = \
+ # main.ONOSbench.gitCheckout( checkoutBranch )
+ checkoutResult = main.TRUE
+ pullResult = main.ONOSbench.gitPull()
else:
- checkout_result = main.TRUE
- pull_result = main.TRUE
- main.log.info("Skipped git checkout and pull")
+ checkoutResult = main.TRUE
+ pullResult = main.TRUE
+ main.log.info( "Skipped git checkout and pull" )
- main.log.report("Commit information - ")
- main.ONOSbench.get_version(report=True)
+ main.log.report( "Commit information - " )
+ main.ONOSbench.getVersion( report=True )
- main.step("Using mvn clean & install")
- mvn_result = main.ONOSbench.clean_install()
- mvn_result = main.TRUE
+ main.step( "Using mvn clean & install" )
+ if mvnCleanInstall == 'on':
+ mvnResult = main.ONOSbench.cleanInstall()
+ elif mvnCleanInstall == 'off':
+ main.log.info("mci turned off by settings")
+ mvnResult = main.TRUE
- main.step("Set cell for ONOS cli env")
- main.ONOS1cli.set_cell(cell_name)
- #main.ONOS2cli.set_cell(cell_name)
- #main.ONOS3cli.set_cell(cell_name)
+ main.step( "Set cell for ONOS cli env" )
+ main.ONOS1cli.setCell( cellName )
+ # main.ONOS2cli.setCell( cellName )
+ # main.ONOS3cli.setCell( cellName )
- main.step("Creating ONOS package")
- package_result = main.ONOSbench.onos_package()
+ main.step( "Creating ONOS package" )
+ packageResult = main.ONOSbench.onosPackage()
- main.step("Installing ONOS package")
- install1_result = main.ONOSbench.onos_install(node=ONOS1_ip)
- #install2_result = main.ONOSbench.onos_install(node=ONOS2_ip)
- #install3_result = main.ONOSbench.onos_install(node=ONOS3_ip)
+ main.step( "Installing ONOS package" )
+ install1Result = main.ONOSbench.onosInstall( node=ONOS1Ip )
+ #install2Result = main.ONOSbench.onosInstall( node=ONOS2Ip )
+ #install3Result = main.ONOSbench.onosInstall( node=ONOS3Ip )
- time.sleep(10)
+ time.sleep( 10 )
- main.step("Start onos cli")
- cli1 = main.ONOS1cli.start_onos_cli(ONOS1_ip)
- #cli2 = main.ONOS2cli.start_onos_cli(ONOS2_ip)
- #cli3 = main.ONOS3cli.start_onos_cli(ONOS3_ip)
+ main.step( "Start onos cli" )
+ cli1 = main.ONOS1cli.startOnosCli( ONOS1Ip )
+ #cli2 = main.ONOS2cli.startOnosCli( ONOS2Ip )
+ #cli3 = main.ONOS3cli.startOnosCli( ONOS3Ip )
- utilities.assert_equals(expect=main.TRUE,
- actual= cell_file_result and cell_apply_result and\
- verify_cell_result and checkout_result and\
- pull_result and mvn_result and\
- install1_result, #and install2_result and\
- #install3_result,
- onpass="Test Environment setup successful",
- onfail="Failed to setup test environment")
+ utilities.assert_equals( expect=main.TRUE,
+ actual=cellFileResult and cellApplyResult and
+ verifyCellResult and checkoutResult and
+ pullResult and mvnResult and
+ install1Result, # and install2Result and
+ # install3Result,
+ onpass="Test Environment setup successful",
+ onfail="Failed to setup test environment" )
- def CASE2(self, main):
- '''
+ def CASE2( self, main ):
+ """
Assign s1 to ONOS1 and measure latency
-
+
There are 4 levels of latency measurements to this test:
- 1) End-to-end measurement: Complete end-to-end measurement
- from TCP (SYN/ACK) handshake to Graph change
- 2) OFP-to-graph measurement: 'ONOS processing' snippet of
+ 1 ) End-to-end measurement: Complete end-to-end measurement
+ from TCP ( SYN/ACK ) handshake to Graph change
+ 2 ) OFP-to-graph measurement: 'ONOS processing' snippet of
measurement from OFP Vendor message to Graph change
- 3) OFP-to-device measurement: 'ONOS processing without
+ 3 ) OFP-to-device measurement: 'ONOS processing without
graph change' snippet of measurement from OFP vendor
message to Device change timestamp
- 4) T0-to-device measurement: Measurement that includes
- the switch handshake to devices timestamp without
- the graph view change. (TCP handshake -> Device
- change)
- '''
+ 4 ) T0-to-device measurement: Measurement that includes
+ the switch handshake to devices timestamp without
+ the graph view change. ( TCP handshake -> Device
+ change )
+ """
import time
import subprocess
import json
import requests
import os
import numpy
- global cluster_count
+ global clusterCount
- ONOS1_ip = main.params['CTRL']['ip1']
- ONOS2_ip = main.params['CTRL']['ip2']
- ONOS3_ip = main.params['CTRL']['ip3']
- ONOS4_ip = main.params['CTRL']['ip4']
- ONOS5_ip = main.params['CTRL']['ip5']
- ONOS6_ip = main.params['CTRL']['ip6']
- ONOS7_ip = main.params['CTRL']['ip7']
+ ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
+ ONOS2Ip = main.params[ 'CTRL' ][ 'ip2' ]
+ ONOS3Ip = main.params[ 'CTRL' ][ 'ip3' ]
+ ONOS4Ip = main.params[ 'CTRL' ][ 'ip4' ]
+ ONOS5Ip = main.params[ 'CTRL' ][ 'ip5' ]
+ ONOS6Ip = main.params[ 'CTRL' ][ 'ip6' ]
+ ONOS7Ip = main.params[ 'CTRL' ][ 'ip7' ]
- ONOS_user = main.params['CTRL']['user']
+ ONOSUser = main.params[ 'CTRL' ][ 'user' ]
- default_sw_port = main.params['CTRL']['port1']
-
- #Number of iterations of case
- num_iter = main.params['TEST']['numIter']
- #Number of first 'x' iterations to ignore:
- iter_ignore = int(main.params['TEST']['iterIgnore'])
+ defaultSwPort = main.params[ 'CTRL' ][ 'port1' ]
- #Timestamp 'keys' for json metrics output.
- #These are subject to change, hence moved into params
- deviceTimestamp = main.params['JSON']['deviceTimestamp']
- graphTimestamp = main.params['JSON']['graphTimestamp']
+ # Number of iterations of case
+ numIter = main.params[ 'TEST' ][ 'numIter' ]
+ # Number of first 'x' iterations to ignore:
+ iterIgnore = int( main.params[ 'TEST' ][ 'iterIgnore' ] )
- debug_mode = main.params['TEST']['debugMode']
- onos_log = main.params['TEST']['onosLogFile']
+ # Timestamp 'keys' for json metrics output.
+ # These are subject to change, hence moved into params
+ deviceTimestamp = main.params[ 'JSON' ][ 'deviceTimestamp' ]
+ graphTimestamp = main.params[ 'JSON' ][ 'graphTimestamp' ]
- #Threshold for the test
- threshold_str = main.params['TEST']['singleSwThreshold']
- threshold_obj = threshold_str.split(",")
- threshold_min = int(threshold_obj[0])
- threshold_max = int(threshold_obj[1])
+ debugMode = main.params[ 'TEST' ][ 'debugMode' ]
+ onosLog = main.params[ 'TEST' ][ 'onosLogFile' ]
- #List of switch add latency collected from
- #all iterations
- latency_end_to_end_list = []
- latency_ofp_to_graph_list = []
- latency_ofp_to_device_list = []
- latency_t0_to_device_list = []
- latency_tcp_to_ofp_list = []
+ # Threshold for the test
+ thresholdStr = main.params[ 'TEST' ][ 'singleSwThreshold' ]
+ thresholdObj = thresholdStr.split( "," )
+ thresholdMin = int( thresholdObj[ 0 ] )
+ thresholdMax = int( thresholdObj[ 1 ] )
- #Directory/file to store tshark results
- tshark_of_output = "/tmp/tshark_of_topo.txt"
- tshark_tcp_output = "/tmp/tshark_tcp_topo.txt"
+ # List of switch add latency collected from
+ # all iterations
+ latencyEndToEndList = []
+ latencyOfpToGraphList = []
+ latencyOfpToDeviceList = []
+ latencyT0ToDeviceList = []
+ latencyTcpToOfpList = []
- #String to grep in tshark output
- tshark_tcp_string = "TCP 74 "+default_sw_port
- tshark_of_string = "OFP 86 Vendor"
-
- #Initialize assertion to TRUE
+ # Directory/file to store tshark results
+ tsharkOfOutput = "/tmp/tshark_of_topo.txt"
+ tsharkTcpOutput = "/tmp/tshark_tcp_topo.txt"
+
+ # String to grep in tshark output
+ tsharkTcpString = "TCP 74 " + defaultSwPort
+ tsharkOfString = "OFP 86 Vendor"
+
+ # Initialize assertion to TRUE
assertion = main.TRUE
-
- local_time = time.strftime('%x %X')
- local_time = local_time.replace("/","")
- local_time = local_time.replace(" ","_")
- local_time = local_time.replace(":","")
- if debug_mode == 'on':
- main.ONOS1.tshark_pcap("eth0",
- "/tmp/single_sw_lat_pcap_"+local_time)
- main.log.info("Debug mode is on")
+ localTime = time.strftime( '%x %X' )
+ localTime = localTime.replace( "/", "" )
+ localTime = localTime.replace( " ", "_" )
+ localTime = localTime.replace( ":", "" )
+ if debugMode == 'on':
+ main.ONOS1.tsharkPcap( "eth0",
+ "/tmp/single_sw_lat_pcap_" + localTime )
- main.log.report("Latency of adding one switch to controller")
- main.log.report("First "+str(iter_ignore)+" iterations ignored"+
- " for jvm warmup time")
- main.log.report("Total iterations of test: "+str(num_iter))
+ main.log.info( "Debug mode is on" )
- for i in range(0, int(num_iter)):
- main.log.info("Starting tshark capture")
+ main.log.report( "Latency of adding one switch to controller" )
+ main.log.report( "First " + str( iterIgnore ) + " iterations ignored" +
+ " for jvm warmup time" )
+ main.log.report( "Total iterations of test: " + str( numIter ) )
- #* TCP [ACK, SYN] is used as t0_a, the
- # very first "exchange" between ONOS and
+ for i in range( 0, int( numIter ) ):
+ main.log.info( "Starting tshark capture" )
+
+ #* TCP [ ACK, SYN ] is used as t0A, the
+ # very first "exchange" between ONOS and
# the switch for end-to-end measurement
- #* OFP [Stats Reply] is used for t0_b
+ #* OFP [ Stats Reply ] is used for t0B
# the very last OFP message between ONOS
# and the switch for ONOS measurement
- main.ONOS1.tshark_grep(tshark_tcp_string,
- tshark_tcp_output)
- main.ONOS1.tshark_grep(tshark_of_string,
- tshark_of_output)
+ main.ONOS1.tsharkGrep( tsharkTcpString,
+ tsharkTcpOutput )
+ main.ONOS1.tsharkGrep( tsharkOfString,
+ tsharkOfOutput )
- #Wait and ensure tshark is started and
- #capturing
- time.sleep(10)
+ # Wait and ensure tshark is started and
+ # capturing
+ time.sleep( 10 )
- main.log.info("Assigning s1 to controller")
+ main.log.info( "Assigning s1 to controller" )
- main.Mininet1.assign_sw_controller(sw="1",
- ip1=ONOS1_ip, port1=default_sw_port)
+ main.Mininet1.assignSwController(
+ sw="1",
+ ip1=ONOS1Ip,
+ port1=defaultSwPort )
- #Wait and ensure switch is assigned
- #before stopping tshark
- time.sleep(30)
-
- main.log.info("Stopping all Tshark processes")
- main.ONOS1.stop_tshark()
+ # Wait and ensure switch is assigned
+ # before stopping tshark
+ time.sleep( 30 )
- #tshark output is saved in ONOS. Use subprocess
- #to copy over files to TestON for parsing
- main.log.info("Copying over tshark files")
-
- #TCP CAPTURE ****
- #Copy the tshark output from ONOS machine to
- #TestON machine in tshark_tcp_output directory>file
- os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
- tshark_tcp_output+" /tmp/")
- tcp_file = open(tshark_tcp_output, 'r')
- temp_text = tcp_file.readline()
- temp_text = temp_text.split(" ")
+ main.log.info( "Stopping all Tshark processes" )
+ main.ONOS1.stopTshark()
- main.log.info("Object read in from TCP capture: "+
- str(temp_text))
- if len(temp_text) > 1:
- t0_tcp = float(temp_text[1])*1000.0
+ # tshark output is saved in ONOS. Use subprocess
+ # to copy over files to TestON for parsing
+ main.log.info( "Copying over tshark files" )
+
+ # TCP CAPTURE ****
+ # Copy the tshark output from ONOS machine to
+ # TestON machine in tsharkTcpOutput directory>file
+ os.system( "scp " + ONOSUser + "@" + ONOS1Ip + ":" +
+ tsharkTcpOutput + " /tmp/" )
+ tcpFile = open( tsharkTcpOutput, 'r' )
+ tempText = tcpFile.readline()
+ tempText = tempText.split( " " )
+
+ main.log.info( "Object read in from TCP capture: " +
+ str( tempText ) )
+ if len( tempText ) > 1:
+ t0Tcp = float( tempText[ 1 ] ) * 1000.0
else:
- main.log.error("Tshark output file for TCP"+
- " returned unexpected results")
- t0_tcp = 0
+ main.log.error( "Tshark output file for TCP" +
+ " returned unexpected results" )
+ t0Tcp = 0
assertion = main.FALSE
-
- tcp_file.close()
+
+ tcpFile.close()
#****************
- #OF CAPTURE ****
- os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
- tshark_of_output+" /tmp/")
- of_file = open(tshark_of_output, 'r')
-
- line_ofp = ""
- #Read until last line of file
+ # OF CAPTURE ****
+ os.system( "scp " + ONOSUser + "@" + ONOS1Ip + ":" +
+ tsharkOfOutput + " /tmp/" )
+ ofFile = open( tsharkOfOutput, 'r' )
+
+ lineOfp = ""
+ # Read until last line of file
while True:
- temp_text = of_file.readline()
- if temp_text !='':
- line_ofp = temp_text
+ tempText = ofFile.readline()
+ if tempText != '':
+ lineOfp = tempText
else:
- break
- obj = line_ofp.split(" ")
-
- main.log.info("Object read in from OFP capture: "+
- str(line_ofp))
-
- if len(line_ofp) > 1:
- t0_ofp = float(obj[1])*1000.0
+ break
+ obj = lineOfp.split( " " )
+
+ main.log.info( "Object read in from OFP capture: " +
+ str( lineOfp ) )
+
+ if len( lineOfp ) > 1:
+ t0Ofp = float( obj[ 1 ] ) * 1000.0
else:
- main.log.error("Tshark output file for OFP"+
- " returned unexpected results")
- t0_ofp = 0
+ main.log.error( "Tshark output file for OFP" +
+ " returned unexpected results" )
+ t0Ofp = 0
assertion = main.FALSE
-
- of_file.close()
+
+ ofFile.close()
#****************
-
- json_str_1 = main.ONOS1cli.topology_events_metrics()
- #Initialize scale-out variables
- json_str_2 = ""
- json_str_3 = ""
- json_str_4 = ""
- json_str_5 = ""
- json_str_6 = ""
- json_str_7 = ""
- json_obj_1 = json.loads(json_str_1)
- #Initialize scale-out variables
- json_obj_2 = ""
- json_obj_3 = ""
- json_obj_4 = ""
- json_obj_5 = ""
- json_obj_6 = ""
- json_obj_7 = ""
-
- #Obtain graph timestamp. This timestsamp captures
- #the epoch time at which the topology graph was updated.
- graph_timestamp_1 = \
- json_obj_1[graphTimestamp]['value']
- #Obtain device timestamp. This timestamp captures
- #the epoch time at which the device event happened
- device_timestamp_1 = \
- json_obj_1[deviceTimestamp]['value']
-
- #t0 to device processing latency
- delta_device_1 = int(device_timestamp_1) - int(t0_tcp)
-
- #t0 to graph processing latency (end-to-end)
- delta_graph_1 = int(graph_timestamp_1) - int(t0_tcp)
-
- #ofp to graph processing latency (ONOS processing)
- delta_ofp_graph_1 = int(graph_timestamp_1) - int(t0_ofp)
-
- #ofp to device processing latency (ONOS processing)
- delta_ofp_device_1 = float(device_timestamp_1) - float(t0_ofp)
+ jsonStr1 = main.ONOS1cli.topologyEventsMetrics()
+ # Initialize scale-out variables
+ jsonStr2 = ""
+ jsonStr3 = ""
+ jsonStr4 = ""
+ jsonStr5 = ""
+ jsonStr6 = ""
+ jsonStr7 = ""
- #TODO: Create even cluster number events
+ jsonObj1 = json.loads( jsonStr1 )
+ # Initialize scale-out variables
+ jsonObj2 = ""
+ jsonObj3 = ""
+ jsonObj4 = ""
+ jsonObj5 = ""
+ jsonObj6 = ""
+ jsonObj7 = ""
- #Include scale-out measurements when applicable
- if cluster_count >= 3:
- json_str_2 = main.ONOS2cli.topology_events_metrics()
- json_str_3 = main.ONOS3cli.topology_events_metrics()
- json_obj_2 = json.loads(json_str_2)
- json_obj_3 = json.loads(json_str_3)
- graph_timestamp_2 = \
- json_obj_2[graphTimestamp]['value']
- graph_timestamp_3 = \
- json_obj_3[graphTimestamp]['value']
- device_timestamp_2 = \
- json_obj_2[deviceTimestamp]['value']
- device_timestamp_3 = \
- json_obj_3[deviceTimestamp]['value']
- delta_device_2 = int(device_timestamp_2) - int(t0_tcp)
- delta_device_3 = int(device_timestamp_3) - int(t0_tcp)
- delta_graph_2 = int(graph_timestamp_2) - int(t0_tcp)
- delta_graph_3 = int(graph_timestamp_3) - int(t0_tcp)
- delta_ofp_graph_2 = int(graph_timestamp_2) - int(t0_ofp)
- delta_ofp_graph_3 = int(graph_timestamp_3) - int(t0_ofp)
- delta_ofp_device_2 = float(device_timestamp_2) -\
- float(t0_ofp)
- delta_ofp_device_3 = float(device_timestamp_3) -\
- float(t0_ofp)
+ # Obtain graph timestamp. This timestsamp captures
+ # the epoch time at which the topology graph was updated.
+ graphTimestamp1 = \
+ jsonObj1[ graphTimestamp ][ 'value' ]
+ # Obtain device timestamp. This timestamp captures
+ # the epoch time at which the device event happened
+ deviceTimestamp1 = \
+ jsonObj1[ deviceTimestamp ][ 'value' ]
+
+ # t0 to device processing latency
+ deltaDevice1 = int( deviceTimestamp1 ) - int( t0Tcp )
+
+ # t0 to graph processing latency ( end-to-end )
+ deltaGraph1 = int( graphTimestamp1 ) - int( t0Tcp )
+
+ # ofp to graph processing latency ( ONOS processing )
+ deltaOfpGraph1 = int( graphTimestamp1 ) - int( t0Ofp )
+
+ # ofp to device processing latency ( ONOS processing )
+ deltaOfpDevice1 = float( deviceTimestamp1 ) - float( t0Ofp )
+
+ # TODO: Create even cluster number events
+
+ # Include scale-out measurements when applicable
+ if clusterCount >= 3:
+ jsonStr2 = main.ONOS2cli.topologyEventsMetrics()
+ jsonStr3 = main.ONOS3cli.topologyEventsMetrics()
+ jsonObj2 = json.loads( jsonStr2 )
+ jsonObj3 = json.loads( jsonStr3 )
+ graphTimestamp2 = \
+ jsonObj2[ graphTimestamp ][ 'value' ]
+ graphTimestamp3 = \
+ jsonObj3[ graphTimestamp ][ 'value' ]
+ deviceTimestamp2 = \
+ jsonObj2[ deviceTimestamp ][ 'value' ]
+ deviceTimestamp3 = \
+ jsonObj3[ deviceTimestamp ][ 'value' ]
+ deltaDevice2 = int( deviceTimestamp2 ) - int( t0Tcp )
+ deltaDevice3 = int( deviceTimestamp3 ) - int( t0Tcp )
+ deltaGraph2 = int( graphTimestamp2 ) - int( t0Tcp )
+ deltaGraph3 = int( graphTimestamp3 ) - int( t0Tcp )
+ deltaOfpGraph2 = int( graphTimestamp2 ) - int( t0Ofp )
+ deltaOfpGraph3 = int( graphTimestamp3 ) - int( t0Ofp )
+ deltaOfpDevice2 = float( deviceTimestamp2 ) -\
+ float( t0Ofp )
+ deltaOfpDevice3 = float( deviceTimestamp3 ) -\
+ float( t0Ofp )
else:
- delta_device_2 = 0
- delta_device_3 = 0
- delta_graph_2 = 0
- delta_graph_3 = 0
- delta_ofp_graph_2 = 0
- delta_ofp_graph_3 = 0
- delta_ofp_device_2 = 0
- delta_ofp_device_3 = 0
+ deltaDevice2 = 0
+ deltaDevice3 = 0
+ deltaGraph2 = 0
+ deltaGraph3 = 0
+ deltaOfpGraph2 = 0
+ deltaOfpGraph3 = 0
+ deltaOfpDevice2 = 0
+ deltaOfpDevice3 = 0
- if cluster_count >= 5:
- json_str_4 = main.ONOS4cli.topology_events_metrics()
- json_str_5 = main.ONOS5cli.topology_events_metrics()
- json_obj_4 = json.loads(json_str_4)
- json_obj_5 = json.loads(json_str_5)
- graph_timestamp_4 = \
- json_obj_4[graphTimestamp]['value']
- graph_timestamp_5 = \
- json_obj_5[graphTimestamp]['value']
- device_timestamp_4 = \
- json_obj_4[deviceTimestamp]['value']
- device_timestamp_5 = \
- json_obj_5[deviceTimestamp]['value']
- delta_device_4 = int(device_timestamp_4) - int(t0_tcp)
- delta_device_5 = int(device_timestamp_5) - int(t0_tcp)
- delta_graph_4 = int(graph_timestamp_4) - int(t0_tcp)
- delta_graph_5 = int(graph_timestamp_5) - int(t0_tcp)
- delta_ofp_graph_4 = int(graph_timestamp_4) - int(t0_ofp)
- delta_ofp_graph_5 = int(graph_timestamp_5) - int(t0_ofp)
- delta_ofp_device_4 = float(device_timestamp_4) -\
- float(t0_ofp)
- delta_ofp_device_5 = float(device_timestamp_5) -\
- float(t0_ofp)
+ if clusterCount >= 5:
+ jsonStr4 = main.ONOS4cli.topologyEventsMetrics()
+ jsonStr5 = main.ONOS5cli.topologyEventsMetrics()
+ jsonObj4 = json.loads( jsonStr4 )
+ jsonObj5 = json.loads( jsonStr5 )
+ graphTimestamp4 = \
+ jsonObj4[ graphTimestamp ][ 'value' ]
+ graphTimestamp5 = \
+ jsonObj5[ graphTimestamp ][ 'value' ]
+ deviceTimestamp4 = \
+ jsonObj4[ deviceTimestamp ][ 'value' ]
+ deviceTimestamp5 = \
+ jsonObj5[ deviceTimestamp ][ 'value' ]
+ deltaDevice4 = int( deviceTimestamp4 ) - int( t0Tcp )
+ deltaDevice5 = int( deviceTimestamp5 ) - int( t0Tcp )
+ deltaGraph4 = int( graphTimestamp4 ) - int( t0Tcp )
+ deltaGraph5 = int( graphTimestamp5 ) - int( t0Tcp )
+ deltaOfpGraph4 = int( graphTimestamp4 ) - int( t0Ofp )
+ deltaOfpGraph5 = int( graphTimestamp5 ) - int( t0Ofp )
+ deltaOfpDevice4 = float( deviceTimestamp4 ) -\
+ float( t0Ofp )
+ deltaOfpDevice5 = float( deviceTimestamp5 ) -\
+ float( t0Ofp )
else:
- delta_device_4 = 0
- delta_device_5 = 0
- delta_graph_4 = 0
- delta_graph_5 = 0
- delta_ofp_graph_4 = 0
- delta_ofp_graph_5 = 0
- delta_ofp_device_4 = 0
- delta_ofp_device_5 = 0
+ deltaDevice4 = 0
+ deltaDevice5 = 0
+ deltaGraph4 = 0
+ deltaGraph5 = 0
+ deltaOfpGraph4 = 0
+ deltaOfpGraph5 = 0
+ deltaOfpDevice4 = 0
+ deltaOfpDevice5 = 0
- if cluster_count >= 7:
- json_str_6 = main.ONOS6cli.topology_events_metrics()
- json_str_7 = main.ONOS7cli.topology_events_metrics()
- json_obj_6 = json.loads(json_str_6)
- json_obj_7 = json.loads(json_str_7)
- graph_timestamp_6 = \
- json_obj_6[graphTimestamp]['value']
- graph_timestamp_7 = \
- json_obj_7[graphTimestamp]['value']
- device_timestamp_6 = \
- json_obj_6[deviceTimestamp]['value']
- device_timestamp_7 = \
- json_obj_7[deviceTimestamp]['value']
- delta_device_6 = int(device_timestamp_6) - int(t0_tcp)
- delta_device_7 = int(device_timestamp_7) - int(t0_tcp)
- delta_graph_6 = int(graph_timestamp_6) - int(t0_tcp)
- delta_graph_7 = int(graph_timestamp_7) - int(t0_tcp)
- delta_ofp_graph_6 = int(graph_timestamp_6) - int(t0_ofp)
- delta_ofp_graph_7 = int(graph_timestamp_7) - int(t0_ofp)
- delta_ofp_device_6 = float(device_timestamp_6) -\
- float(t0_ofp)
- delta_ofp_device_7 = float(device_timestamp_7) -\
- float(t0_ofp)
+ if clusterCount >= 7:
+ jsonStr6 = main.ONOS6cli.topologyEventsMetrics()
+ jsonStr7 = main.ONOS7cli.topologyEventsMetrics()
+ jsonObj6 = json.loads( jsonStr6 )
+ jsonObj7 = json.loads( jsonStr7 )
+ graphTimestamp6 = \
+ jsonObj6[ graphTimestamp ][ 'value' ]
+ graphTimestamp7 = \
+ jsonObj7[ graphTimestamp ][ 'value' ]
+ deviceTimestamp6 = \
+ jsonObj6[ deviceTimestamp ][ 'value' ]
+ deviceTimestamp7 = \
+ jsonObj7[ deviceTimestamp ][ 'value' ]
+ deltaDevice6 = int( deviceTimestamp6 ) - int( t0Tcp )
+ deltaDevice7 = int( deviceTimestamp7 ) - int( t0Tcp )
+ deltaGraph6 = int( graphTimestamp6 ) - int( t0Tcp )
+ deltaGraph7 = int( graphTimestamp7 ) - int( t0Tcp )
+ deltaOfpGraph6 = int( graphTimestamp6 ) - int( t0Ofp )
+ deltaOfpGraph7 = int( graphTimestamp7 ) - int( t0Ofp )
+ deltaOfpDevice6 = float( deviceTimestamp6 ) -\
+ float( t0Ofp )
+ deltaOfpDevice7 = float( deviceTimestamp7 ) -\
+ float( t0Ofp )
else:
- delta_device_6 = 0
- delta_device_7 = 0
- delta_graph_6 = 0
- delta_graph_7 = 0
- delta_ofp_graph_6 = 0
- delta_ofp_graph_7 = 0
- delta_ofp_device_6 = 0
- delta_ofp_device_7 = 0
+ deltaDevice6 = 0
+ deltaDevice7 = 0
+ deltaGraph6 = 0
+ deltaGraph7 = 0
+ deltaOfpGraph6 = 0
+ deltaOfpGraph7 = 0
+ deltaOfpDevice6 = 0
+ deltaOfpDevice7 = 0
- #Get average of delta from all instances
- avg_delta_device = \
- (int(delta_device_1)+\
- int(delta_device_2)+\
- int(delta_device_3)+\
- int(delta_device_4)+\
- int(delta_device_5)+\
- int(delta_device_6)+\
- int(delta_device_7)) / cluster_count
+ # Get average of delta from all instances
+ avgDeltaDevice = \
+ ( int( deltaDevice1 ) +
+ int( deltaDevice2 ) +
+ int( deltaDevice3 ) +
+ int( deltaDevice4 ) +
+ int( deltaDevice5 ) +
+ int( deltaDevice6 ) +
+ int( deltaDevice7 ) ) / clusterCount
- #Ensure avg delta meets the threshold before appending
- if avg_delta_device > 0.0 and avg_delta_device < 10000\
- and int(i) > iter_ignore:
- latency_t0_to_device_list.append(avg_delta_device)
+ # Ensure avg delta meets the threshold before appending
+ if avgDeltaDevice > 0.0 and avgDeltaDevice < 10000\
+ and int( i ) > iterIgnore:
+ latencyT0ToDeviceList.append( avgDeltaDevice )
else:
- main.log.info("Results for t0-to-device ignored"+\
- "due to excess in threshold / warmup iteration.")
+ main.log.info(
+ "Results for t0-to-device ignored" +
+ "due to excess in threshold / warmup iteration." )
- #Get average of delta from all instances
- #TODO: use max delta graph
- #max_delta_graph = max(three)
- avg_delta_graph = \
- (int(delta_graph_1)+\
- int(delta_graph_2)+\
- int(delta_graph_3)+\
- int(delta_graph_4)+\
- int(delta_graph_5)+\
- int(delta_graph_6)+\
- int(delta_graph_7)) / cluster_count
+ # Get average of delta from all instances
+ # TODO: use max delta graph
+ #maxDeltaGraph = max( three )
+ avgDeltaGraph = \
+ ( int( deltaGraph1 ) +
+ int( deltaGraph2 ) +
+ int( deltaGraph3 ) +
+ int( deltaGraph4 ) +
+ int( deltaGraph5 ) +
+ int( deltaGraph6 ) +
+ int( deltaGraph7 ) ) / clusterCount
- #Ensure avg delta meets the threshold before appending
- if avg_delta_graph > 0.0 and avg_delta_graph < 10000\
- and int(i) > iter_ignore:
- latency_end_to_end_list.append(avg_delta_graph)
+ # Ensure avg delta meets the threshold before appending
+ if avgDeltaGraph > 0.0 and avgDeltaGraph < 10000\
+ and int( i ) > iterIgnore:
+ latencyEndToEndList.append( avgDeltaGraph )
else:
- main.log.info("Results for end-to-end ignored"+\
- "due to excess in threshold")
+ main.log.info( "Results for end-to-end ignored" +
+ "due to excess in threshold" )
-
- avg_delta_ofp_graph = \
- (int(delta_ofp_graph_1)+\
- int(delta_ofp_graph_2)+\
- int(delta_ofp_graph_3)+\
- int(delta_ofp_graph_4)+\
- int(delta_ofp_graph_5)+\
- int(delta_ofp_graph_6)+\
- int(delta_ofp_graph_7)) / cluster_count
-
- if avg_delta_ofp_graph > threshold_min \
- and avg_delta_ofp_graph < threshold_max\
- and int(i) > iter_ignore:
- latency_ofp_to_graph_list.append(avg_delta_ofp_graph)
- elif avg_delta_ofp_graph > (-10) and \
- avg_delta_ofp_graph < 0.0 and\
- int(i) > iter_ignore:
- main.log.info("Sub-millisecond result likely; "+
- "negative result was rounded to 0")
- #NOTE: Current metrics framework does not
- #support sub-millisecond accuracy. Therefore,
- #if the result is negative, we can reasonably
- #conclude sub-millisecond results and just
- #append the best rounded effort - 0 ms.
- latency_ofp_to_graph_list.append(0)
+ avgDeltaOfpGraph = \
+ ( int( deltaOfpGraph1 ) +
+ int( deltaOfpGraph2 ) +
+ int( deltaOfpGraph3 ) +
+ int( deltaOfpGraph4 ) +
+ int( deltaOfpGraph5 ) +
+ int( deltaOfpGraph6 ) +
+ int( deltaOfpGraph7 ) ) / clusterCount
+
+ if avgDeltaOfpGraph > thresholdMin \
+ and avgDeltaOfpGraph < thresholdMax\
+ and int( i ) > iterIgnore:
+ latencyOfpToGraphList.append( avgDeltaOfpGraph )
+ elif avgDeltaOfpGraph > ( -10 ) and \
+ avgDeltaOfpGraph < 0.0 and\
+ int( i ) > iterIgnore:
+ main.log.info( "Sub-millisecond result likely; " +
+ "negative result was rounded to 0" )
+ # NOTE: Current metrics framework does not
+ # support sub-millisecond accuracy. Therefore,
+ # if the result is negative, we can reasonably
+ # conclude sub-millisecond results and just
+ # append the best rounded effort - 0 ms.
+ latencyOfpToGraphList.append( 0 )
else:
- main.log.info("Results for ofp-to-graph "+\
- "ignored due to excess in threshold")
+ main.log.info( "Results for ofp-to-graph " +
+ "ignored due to excess in threshold" )
-
- avg_delta_ofp_device = \
- (float(delta_ofp_device_1)+\
- float(delta_ofp_device_2)+\
- float(delta_ofp_device_3)+\
- float(delta_ofp_device_4)+\
- float(delta_ofp_device_5)+\
- float(delta_ofp_device_6)+\
- float(delta_ofp_device_7)) / cluster_count
-
- #NOTE: ofp - delta measurements are occasionally negative
+ avgDeltaOfpDevice = \
+ ( float( deltaOfpDevice1 ) +
+ float( deltaOfpDevice2 ) +
+ float( deltaOfpDevice3 ) +
+ float( deltaOfpDevice4 ) +
+ float( deltaOfpDevice5 ) +
+ float( deltaOfpDevice6 ) +
+ float( deltaOfpDevice7 ) ) / clusterCount
+
+ # NOTE: ofp - delta measurements are occasionally negative
# due to system time misalignment.
- latency_ofp_to_device_list.append(avg_delta_ofp_device)
+ latencyOfpToDeviceList.append( avgDeltaOfpDevice )
- delta_ofp_tcp = int(t0_ofp) - int(t0_tcp)
- if delta_ofp_tcp > threshold_min \
- and delta_ofp_tcp < threshold_max and\
- int(i) > iter_ignore:
- latency_tcp_to_ofp_list.append(delta_ofp_tcp)
+ deltaOfpTcp = int( t0Ofp ) - int( t0Tcp )
+ if deltaOfpTcp > thresholdMin \
+ and deltaOfpTcp < thresholdMax and\
+ int( i ) > iterIgnore:
+ latencyTcpToOfpList.append( deltaOfpTcp )
else:
- main.log.info("Results fo tcp-to-ofp "+\
- "ignored due to excess in threshold")
+ main.log.info( "Results fo tcp-to-ofp " +
+ "ignored due to excess in threshold" )
- #TODO:
- #Fetch logs upon threshold excess
+ # TODO:
+ # Fetch logs upon threshold excess
- main.log.info("ONOS1 delta end-to-end: "+
- str(delta_graph_1) + " ms")
+ main.log.info( "ONOS1 delta end-to-end: " +
+ str( deltaGraph1 ) + " ms" )
- main.log.info("ONOS1 delta OFP - graph: "+
- str(delta_ofp_graph_1) + " ms")
-
- main.log.info("ONOS1 delta device - t0: "+
- str(delta_device_1) + " ms")
-
- main.log.info("TCP to OFP delta: "+
- str(delta_ofp_tcp) + " ms")
+ main.log.info( "ONOS1 delta OFP - graph: " +
+ str( deltaOfpGraph1 ) + " ms" )
- main.step("Remove switch from controller")
- main.Mininet1.delete_sw_controller("s1")
+ main.log.info( "ONOS1 delta device - t0: " +
+ str( deltaDevice1 ) + " ms" )
- time.sleep(5)
+ main.log.info( "TCP to OFP delta: " +
+ str( deltaOfpTcp ) + " ms" )
- #END of for loop iteration
+ main.step( "Remove switch from controller" )
+ main.Mininet1.deleteSwController( "s1" )
- #If there is at least 1 element in each list,
- #pass the test case
- if len(latency_end_to_end_list) > 0 and\
- len(latency_ofp_to_graph_list) > 0 and\
- len(latency_ofp_to_device_list) > 0 and\
- len(latency_t0_to_device_list) > 0 and\
- len(latency_tcp_to_ofp_list) > 0:
+ time.sleep( 5 )
+
+ # END of for loop iteration
+
+ # If there is at least 1 element in each list,
+ # pass the test case
+ if len( latencyEndToEndList ) > 0 and\
+ len( latencyOfpToGraphList ) > 0 and\
+ len( latencyOfpToDeviceList ) > 0 and\
+ len( latencyT0ToDeviceList ) > 0 and\
+ len( latencyTcpToOfpList ) > 0:
assertion = main.TRUE
- elif len(latency_end_to_end_list) == 0:
- #The appending of 0 here is to prevent
- #the min,max,sum functions from failing
- #below
- latency_end_to_end_list.append(0)
+ elif len( latencyEndToEndList ) == 0:
+ # The appending of 0 here is to prevent
+ # the min,max,sum functions from failing
+ # below
+ latencyEndToEndList.append( 0 )
assertion = main.FALSE
- elif len(latency_ofp_to_graph_list) == 0:
- latency_ofp_to_graph_list.append(0)
+ elif len( latencyOfpToGraphList ) == 0:
+ latencyOfpToGraphList.append( 0 )
assertion = main.FALSE
- elif len(latency_ofp_to_device_list) == 0:
- latency_ofp_to_device_list.append(0)
+ elif len( latencyOfpToDeviceList ) == 0:
+ latencyOfpToDeviceList.append( 0 )
assertion = main.FALSE
- elif len(latency_t0_to_device_list) == 0:
- latency_t0_to_device_list.append(0)
+ elif len( latencyT0ToDeviceList ) == 0:
+ latencyT0ToDeviceList.append( 0 )
assertion = main.FALSE
- elif len(latency_tcp_to_ofp_list) == 0:
- latency_tcp_to_ofp_list.append(0)
+ elif len( latencyTcpToOfpList ) == 0:
+ latencyTcpToOfpList.append( 0 )
assertion = main.FALSE
- #Calculate min, max, avg of latency lists
- latency_end_to_end_max = \
- int(max(latency_end_to_end_list))
- latency_end_to_end_min = \
- int(min(latency_end_to_end_list))
- latency_end_to_end_avg = \
- (int(sum(latency_end_to_end_list)) / \
- len(latency_end_to_end_list))
- latency_end_to_end_std_dev = \
- str(round(numpy.std(latency_end_to_end_list),1))
+ # Calculate min, max, avg of latency lists
+ latencyEndToEndMax = \
+ int( max( latencyEndToEndList ) )
+ latencyEndToEndMin = \
+ int( min( latencyEndToEndList ) )
+ latencyEndToEndAvg = \
+ ( int( sum( latencyEndToEndList ) ) /
+ len( latencyEndToEndList ) )
+ latencyEndToEndStdDev = \
+ str( round( numpy.std( latencyEndToEndList ), 1 ) )
- latency_ofp_to_graph_max = \
- int(max(latency_ofp_to_graph_list))
- latency_ofp_to_graph_min = \
- int(min(latency_ofp_to_graph_list))
- latency_ofp_to_graph_avg = \
- (int(sum(latency_ofp_to_graph_list)) / \
- len(latency_ofp_to_graph_list))
- latency_ofp_to_graph_std_dev = \
- str(round(numpy.std(latency_ofp_to_graph_list),1))
+ latencyOfpToGraphMax = \
+ int( max( latencyOfpToGraphList ) )
+ latencyOfpToGraphMin = \
+ int( min( latencyOfpToGraphList ) )
+ latencyOfpToGraphAvg = \
+ ( int( sum( latencyOfpToGraphList ) ) /
+ len( latencyOfpToGraphList ) )
+ latencyOfpToGraphStdDev = \
+ str( round( numpy.std( latencyOfpToGraphList ), 1 ) )
- latency_ofp_to_device_max = \
- int(max(latency_ofp_to_device_list))
- latency_ofp_to_device_min = \
- int(min(latency_ofp_to_device_list))
- latency_ofp_to_device_avg = \
- (int(sum(latency_ofp_to_device_list)) / \
- len(latency_ofp_to_device_list))
- latency_ofp_to_device_std_dev = \
- str(round(numpy.std(latency_ofp_to_device_list),1))
+ latencyOfpToDeviceMax = \
+ int( max( latencyOfpToDeviceList ) )
+ latencyOfpToDeviceMin = \
+ int( min( latencyOfpToDeviceList ) )
+ latencyOfpToDeviceAvg = \
+ ( int( sum( latencyOfpToDeviceList ) ) /
+ len( latencyOfpToDeviceList ) )
+ latencyOfpToDeviceStdDev = \
+ str( round( numpy.std( latencyOfpToDeviceList ), 1 ) )
- latency_t0_to_device_max = \
- int(max(latency_t0_to_device_list))
- latency_t0_to_device_min = \
- int(min(latency_t0_to_device_list))
- latency_t0_to_device_avg = \
- (int(sum(latency_t0_to_device_list)) / \
- len(latency_t0_to_device_list))
- latency_ofp_to_device_std_dev = \
- str(round(numpy.std(latency_t0_to_device_list),1))
+ latencyT0ToDeviceMax = \
+ int( max( latencyT0ToDeviceList ) )
+ latencyT0ToDeviceMin = \
+ int( min( latencyT0ToDeviceList ) )
+ latencyT0ToDeviceAvg = \
+ ( int( sum( latencyT0ToDeviceList ) ) /
+ len( latencyT0ToDeviceList ) )
+ latencyOfpToDeviceStdDev = \
+ str( round( numpy.std( latencyT0ToDeviceList ), 1 ) )
- latency_tcp_to_ofp_max = \
- int(max(latency_tcp_to_ofp_list))
- latency_tcp_to_ofp_min = \
- int(min(latency_tcp_to_ofp_list))
- latency_tcp_to_ofp_avg = \
- (int(sum(latency_tcp_to_ofp_list)) / \
- len(latency_tcp_to_ofp_list))
- latency_tcp_to_ofp_std_dev = \
- str(round(numpy.std(latency_tcp_to_ofp_list),1))
+ latencyTcpToOfpMax = \
+ int( max( latencyTcpToOfpList ) )
+ latencyTcpToOfpMin = \
+ int( min( latencyTcpToOfpList ) )
+ latencyTcpToOfpAvg = \
+ ( int( sum( latencyTcpToOfpList ) ) /
+ len( latencyTcpToOfpList ) )
+ latencyTcpToOfpStdDev = \
+ str( round( numpy.std( latencyTcpToOfpList ), 1 ) )
- main.log.report("Cluster size: "+str(cluster_count)+\
- " node(s)")
- main.log.report("Switch add - End-to-end latency: "+\
- "Avg: "+str(latency_end_to_end_avg)+" ms "+
- "Std Deviation: "+latency_end_to_end_std_dev+" ms")
- main.log.report("Switch add - OFP-to-Graph latency: "+\
- "Note: results are not accurate to sub-millisecond. "+
- "Any sub-millisecond results are rounded to 0 ms. ")
- main.log.report("Avg: "+str(latency_ofp_to_graph_avg)+" ms "+
- "Std Deviation: "+latency_ofp_to_graph_std_dev+" ms")
- main.log.report("Switch add - TCP-to-OFP latency: "+\
- "Avg: "+str(latency_tcp_to_ofp_avg)+" ms "+
- "Std Deviation: "+latency_tcp_to_ofp_std_dev+" ms")
+ main.log.report( "Cluster size: " + str( clusterCount ) +
+ " node(s)" )
+ main.log.report( "Switch add - End-to-end latency: " +
+ "Avg: " + str( latencyEndToEndAvg ) + " ms " +
+ "Std Deviation: " + latencyEndToEndStdDev + " ms" )
+ main.log.report(
+ "Switch add - OFP-to-Graph latency: " +
+ "Note: results are not accurate to sub-millisecond. " +
+ "Any sub-millisecond results are rounded to 0 ms. " )
+ main.log.report( "Avg: " + str( latencyOfpToGraphAvg ) + " ms " +
+ "Std Deviation: " + latencyOfpToGraphStdDev + " ms" )
+ main.log.report( "Switch add - TCP-to-OFP latency: " +
+ "Avg: " + str( latencyTcpToOfpAvg ) + " ms " +
+ "Std Deviation: " + latencyTcpToOfpStdDev + " ms" )
- if debug_mode == 'on':
- main.ONOS1.cp_logs_to_dir("/opt/onos/log/karaf.log",
- "/tmp/", copy_file_name="sw_lat_karaf")
+ if debugMode == 'on':
+ main.ONOS1.cpLogsToDir( "/opt/onos/log/karaf.log",
+ "/tmp/", copyFileName="sw_lat_karaf" )
- utilities.assert_equals(expect=main.TRUE, actual=assertion,
- onpass="Switch latency test successful",
- onfail="Switch latency test failed")
+ utilities.assert_equals( expect=main.TRUE, actual=assertion,
+ onpass="Switch latency test successful",
+ onfail="Switch latency test failed" )
- def CASE3(self, main):
- '''
+ def CASE3( self, main ):
+ """
Bring port up / down and measure latency.
Port enable / disable is simulated by ifconfig up / down
-
- In ONOS-next, we must ensure that the port we are
+
+ In ONOS-next, we must ensure that the port we are
manipulating is connected to another switch with a valid
connection. Otherwise, graph view will not be updated.
- '''
+ """
import time
import subprocess
import os
import requests
import json
import numpy
- global cluster_count
+ global clusterCount
- ONOS1_ip = main.params['CTRL']['ip1']
- ONOS2_ip = main.params['CTRL']['ip2']
- ONOS3_ip = main.params['CTRL']['ip3']
- ONOS_user = main.params['CTRL']['user']
+ ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
+ ONOS2Ip = main.params[ 'CTRL' ][ 'ip2' ]
+ ONOS3Ip = main.params[ 'CTRL' ][ 'ip3' ]
+ ONOSUser = main.params[ 'CTRL' ][ 'user' ]
- default_sw_port = main.params['CTRL']['port1']
-
+ defaultSwPort = main.params[ 'CTRL' ][ 'port1' ]
+
assertion = main.TRUE
- #Number of iterations of case
- num_iter = main.params['TEST']['numIter']
-
- #Timestamp 'keys' for json metrics output.
- #These are subject to change, hence moved into params
- deviceTimestamp = main.params['JSON']['deviceTimestamp']
- graphTimestamp = main.params['JSON']['graphTimestamp']
-
- debug_mode = main.params['TEST']['debugMode']
+ # Number of iterations of case
+ numIter = main.params[ 'TEST' ][ 'numIter' ]
- local_time = time.strftime('%x %X')
- local_time = local_time.replace("/","")
- local_time = local_time.replace(" ","_")
- local_time = local_time.replace(":","")
- if debug_mode == 'on':
- main.ONOS1.tshark_pcap("eth0",
- "/tmp/port_lat_pcap_"+local_time)
+ # Timestamp 'keys' for json metrics output.
+ # These are subject to change, hence moved into params
+ deviceTimestamp = main.params[ 'JSON' ][ 'deviceTimestamp' ]
+ graphTimestamp = main.params[ 'JSON' ][ 'graphTimestamp' ]
- #Threshold for this test case
- up_threshold_str = main.params['TEST']['portUpThreshold']
- down_threshold_str = main.params['TEST']['portDownThreshold']
-
- up_threshold_obj = up_threshold_str.split(",")
- down_threshold_obj = down_threshold_str.split(",")
+ debugMode = main.params[ 'TEST' ][ 'debugMode' ]
- up_threshold_min = int(up_threshold_obj[0])
- up_threshold_max = int(up_threshold_obj[1])
+ localTime = time.strftime( '%x %X' )
+ localTime = localTime.replace( "/", "" )
+ localTime = localTime.replace( " ", "_" )
+ localTime = localTime.replace( ":", "" )
+ if debugMode == 'on':
+ main.ONOS1.tsharkPcap( "eth0",
+ "/tmp/port_lat_pcap_" + localTime )
- down_threshold_min = int(down_threshold_obj[0])
- down_threshold_max = int(down_threshold_obj[1])
+ # Threshold for this test case
+ upThresholdStr = main.params[ 'TEST' ][ 'portUpThreshold' ]
+ downThresholdStr = main.params[ 'TEST' ][ 'portDownThreshold' ]
- #NOTE: Some hardcoded variables you may need to configure
+ upThresholdObj = upThresholdStr.split( "," )
+ downThresholdObj = downThresholdStr.split( "," )
+
+ upThresholdMin = int( upThresholdObj[ 0 ] )
+ upThresholdMax = int( upThresholdObj[ 1 ] )
+
+ downThresholdMin = int( downThresholdObj[ 0 ] )
+ downThresholdMax = int( downThresholdObj[ 1 ] )
+
+ # NOTE: Some hardcoded variables you may need to configure
# besides the params
-
- tshark_port_status = "OFP 130 Port Status"
- tshark_port_up = "/tmp/tshark_port_up.txt"
- tshark_port_down = "/tmp/tshark_port_down.txt"
- interface_config = "s1-eth1"
+ tsharkPortStatus = "OFP 130 Port Status"
- main.log.report("Port enable / disable latency")
- main.log.report("Simulated by ifconfig up / down")
- main.log.report("Total iterations of test: "+str(num_iter))
+ tsharkPortUp = "/tmp/tshark_port_up.txt"
+ tsharkPortDown = "/tmp/tshark_port_down.txt"
+ interfaceConfig = "s1-eth1"
- main.step("Assign switches s1 and s2 to controller 1")
- main.Mininet1.assign_sw_controller(sw="1",ip1=ONOS1_ip,
- port1=default_sw_port)
- main.Mininet1.assign_sw_controller(sw="2",ip1=ONOS1_ip,
- port1=default_sw_port)
+ main.log.report( "Port enable / disable latency" )
+ main.log.report( "Simulated by ifconfig up / down" )
+ main.log.report( "Total iterations of test: " + str( numIter ) )
- #Give enough time for metrics to propagate the
- #assign controller event. Otherwise, these events may
- #carry over to our measurements
- time.sleep(15)
+ main.step( "Assign switches s1 and s2 to controller 1" )
+ main.Mininet1.assignSwController( sw="1", ip1=ONOS1Ip,
+ port1=defaultSwPort )
+ main.Mininet1.assignSwController( sw="2", ip1=ONOS1Ip,
+ port1=defaultSwPort )
- port_up_device_to_ofp_list = []
- port_up_graph_to_ofp_list = []
- port_down_device_to_ofp_list = []
- port_down_graph_to_ofp_list = []
+ # Give enough time for metrics to propagate the
+ # assign controller event. Otherwise, these events may
+ # carry over to our measurements
+ time.sleep( 15 )
- for i in range(0, int(num_iter)):
- main.step("Starting wireshark capture for port status down")
- main.ONOS1.tshark_grep(tshark_port_status,
- tshark_port_down)
-
- time.sleep(5)
+ portUpDeviceToOfpList = []
+ portUpGraphToOfpList = []
+ portDownDeviceToOfpList = []
+ portDownGraphToOfpList = []
- #Disable interface that is connected to switch 2
- main.step("Disable port: "+interface_config)
- main.Mininet1.handle.sendline("sh ifconfig "+
- interface_config+" down")
- main.Mininet1.handle.expect("mininet>")
+ for i in range( 0, int( numIter ) ):
+ main.step( "Starting wireshark capture for port status down" )
+ main.ONOS1.tsharkGrep( tsharkPortStatus,
+ tsharkPortDown )
- time.sleep(3)
- main.ONOS1.tshark_stop()
-
- #Copy tshark output file from ONOS to TestON instance
+ time.sleep( 5 )
+
+ # Disable interface that is connected to switch 2
+ main.step( "Disable port: " + interfaceConfig )
+ main.Mininet1.handle.sendline( "sh ifconfig " +
+ interfaceConfig + " down" )
+ main.Mininet1.handle.expect( "mininet>" )
+
+ time.sleep( 3 )
+ main.ONOS1.tsharkStop()
+
+ # Copy tshark output file from ONOS to TestON instance
#/tmp directory
- os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
- tshark_port_down+" /tmp/")
+ os.system( "scp " + ONOSUser + "@" + ONOS1Ip + ":" +
+ tsharkPortDown + " /tmp/" )
- f_port_down = open(tshark_port_down, 'r')
- #Get first line of port down event from tshark
- f_line = f_port_down.readline()
- obj_down = f_line.split(" ")
- if len(f_line) > 0:
- #NOTE: obj_down[1] is a very unreliable
- # way to determine the timestamp. If
- # results seem way off, check the object
+ fPortDown = open( tsharkPortDown, 'r' )
+ # Get first line of port down event from tshark
+ fLine = fPortDown.readline()
+ objDown = fLine.split( " " )
+ if len( fLine ) > 0:
+ # NOTE: objDown[ 1 ] is a very unreliable
+ # way to determine the timestamp. If
+ # results seem way off, check the object
# itself by printing it out
- timestamp_begin_pt_down = int(float(obj_down[1])*1000)
- # For some reason, wireshark decides to record the
+ timestampBeginPtDown = int( float( objDown[ 1 ] ) * 1000 )
+ # For some reason, wireshark decides to record the
# timestamp at the 3rd object position instead of
- # 2nd at unpredictable times. This statement is
+ # 2nd at unpredictable times. This statement is
# used to capture that odd behavior and use the
# correct epoch time
- if timestamp_begin_pt_down < 1400000000000:
- timestamp_begin_pt_down = \
- int(float(obj_down[2])*1000)
+ if timestampBeginPtDown < 1400000000000:
+ timestampBeginPtDown = \
+ int( float( objDown[ 2 ] ) * 1000 )
- main.log.info("Port down begin timestamp: "+
- str(timestamp_begin_pt_down))
+ main.log.info( "Port down begin timestamp: " +
+ str( timestampBeginPtDown ) )
else:
- main.log.info("Tshark output file returned unexpected"+
- " results: "+str(obj_down))
- timestamp_begin_pt_down = 0
- f_port_down.close()
-
- main.step("Obtain t1 by metrics call")
- json_str_up_1 = main.ONOS1cli.topology_events_metrics()
- json_obj_1 = json.loads(json_str_up_1)
- #Obtain graph timestamp. This timestsamp captures
- #the epoch time at which the topology graph was updated.
- graph_timestamp_1 = \
- json_obj_1[graphTimestamp]['value']
- #Obtain device timestamp. This timestamp captures
- #the epoch time at which the device event happened
- device_timestamp_1 = \
- json_obj_1[deviceTimestamp]['value']
- #Get delta between graph event and OFP
- pt_down_graph_to_ofp_1 = int(graph_timestamp_1) -\
- int(timestamp_begin_pt_down)
- #Get delta between device event and OFP
- pt_down_device_to_ofp_1 = int(device_timestamp_1) -\
- int(timestamp_begin_pt_down)
-
- if cluster_count >= 3:
- json_str_up_2 = main.ONOS2cli.topology_events_metrics()
- json_str_up_3 = main.ONOS3cli.topology_events_metrics()
- json_obj_2 = json.loads(json_str_up_2)
- json_obj_3 = json.loads(json_str_up_3)
- graph_timestamp_2 = \
- json_obj_2[graphTimestamp]['value']
- graph_timestamp_3 = \
- json_obj_3[graphTimestamp]['value']
- device_timestamp_2 = \
- json_obj_2[deviceTimestamp]['value']
- device_timestamp_3 = \
- json_obj_3[deviceTimestamp]['value']
- pt_down_graph_to_ofp_2 = int(graph_timestamp_2) -\
- int(timestamp_begin_pt_down)
- pt_down_graph_to_ofp_3 = int(graph_timestamp_3) -\
- int(timestamp_begin_pt_down)
- pt_down_device_to_ofp_2 = int(device_timestamp_2) -\
- int(timestamp_begin_pt_down)
- pt_down_device_to_ofp_3 = int(device_timestamp_3) -\
- int(timestamp_begin_pt_down)
+ main.log.info( "Tshark output file returned unexpected" +
+ " results: " + str( objDown ) )
+ timestampBeginPtDown = 0
+ fPortDown.close()
+
+ main.step( "Obtain t1 by metrics call" )
+ jsonStrUp1 = main.ONOS1cli.topologyEventsMetrics()
+ jsonObj1 = json.loads( jsonStrUp1 )
+ # Obtain graph timestamp. This timestsamp captures
+ # the epoch time at which the topology graph was updated.
+ graphTimestamp1 = \
+ jsonObj1[ graphTimestamp ][ 'value' ]
+ # Obtain device timestamp. This timestamp captures
+ # the epoch time at which the device event happened
+ deviceTimestamp1 = \
+ jsonObj1[ deviceTimestamp ][ 'value' ]
+ # Get delta between graph event and OFP
+ ptDownGraphToOfp1 = int( graphTimestamp1 ) -\
+ int( timestampBeginPtDown )
+ # Get delta between device event and OFP
+ ptDownDeviceToOfp1 = int( deviceTimestamp1 ) -\
+ int( timestampBeginPtDown )
+
+ if clusterCount >= 3:
+ jsonStrUp2 = main.ONOS2cli.topologyEventsMetrics()
+ jsonStrUp3 = main.ONOS3cli.topologyEventsMetrics()
+ jsonObj2 = json.loads( jsonStrUp2 )
+ jsonObj3 = json.loads( jsonStrUp3 )
+ graphTimestamp2 = \
+ jsonObj2[ graphTimestamp ][ 'value' ]
+ graphTimestamp3 = \
+ jsonObj3[ graphTimestamp ][ 'value' ]
+ deviceTimestamp2 = \
+ jsonObj2[ deviceTimestamp ][ 'value' ]
+ deviceTimestamp3 = \
+ jsonObj3[ deviceTimestamp ][ 'value' ]
+ ptDownGraphToOfp2 = int( graphTimestamp2 ) -\
+ int( timestampBeginPtDown )
+ ptDownGraphToOfp3 = int( graphTimestamp3 ) -\
+ int( timestampBeginPtDown )
+ ptDownDeviceToOfp2 = int( deviceTimestamp2 ) -\
+ int( timestampBeginPtDown )
+ ptDownDeviceToOfp3 = int( deviceTimestamp3 ) -\
+ int( timestampBeginPtDown )
else:
- pt_down_graph_to_ofp_2 = 0
- pt_down_graph_to_ofp_3 = 0
- pt_down_device_to_ofp_2 = 0
- pt_down_device_to_ofp_3 = 0
+ ptDownGraphToOfp2 = 0
+ ptDownGraphToOfp3 = 0
+ ptDownDeviceToOfp2 = 0
+ ptDownDeviceToOfp3 = 0
- if cluster_count >= 5:
- json_str_up_4 = main.ONOS4cli.topology_events_metrics()
- json_str_up_5 = main.ONOS5cli.topology_events_metrics()
- json_obj_4 = json.loads(json_str_up_4)
- json_obj_5 = json.loads(json_str_up_5)
- graph_timestamp_4 = \
- json_obj_4[graphTimestamp]['value']
- graph_timestamp_5 = \
- json_obj_5[graphTimestamp]['value']
- device_timestamp_4 = \
- json_obj_4[deviceTimestamp]['value']
- device_timestamp_5 = \
- json_obj_5[deviceTimestamp]['value']
- pt_down_graph_to_ofp_4 = int(graph_timestamp_4) -\
- int(timestamp_begin_pt_down)
- pt_down_graph_to_ofp_5 = int(graph_timestamp_5) -\
- int(timestamp_begin_pt_down)
- pt_down_device_to_ofp_4 = int(device_timestamp_4) -\
- int(timestamp_begin_pt_down)
- pt_down_device_to_ofp_5 = int(device_timestamp_5) -\
- int(timestamp_begin_pt_down)
+ if clusterCount >= 5:
+ jsonStrUp4 = main.ONOS4cli.topologyEventsMetrics()
+ jsonStrUp5 = main.ONOS5cli.topologyEventsMetrics()
+ jsonObj4 = json.loads( jsonStrUp4 )
+ jsonObj5 = json.loads( jsonStrUp5 )
+ graphTimestamp4 = \
+ jsonObj4[ graphTimestamp ][ 'value' ]
+ graphTimestamp5 = \
+ jsonObj5[ graphTimestamp ][ 'value' ]
+ deviceTimestamp4 = \
+ jsonObj4[ deviceTimestamp ][ 'value' ]
+ deviceTimestamp5 = \
+ jsonObj5[ deviceTimestamp ][ 'value' ]
+ ptDownGraphToOfp4 = int( graphTimestamp4 ) -\
+ int( timestampBeginPtDown )
+ ptDownGraphToOfp5 = int( graphTimestamp5 ) -\
+ int( timestampBeginPtDown )
+ ptDownDeviceToOfp4 = int( deviceTimestamp4 ) -\
+ int( timestampBeginPtDown )
+ ptDownDeviceToOfp5 = int( deviceTimestamp5 ) -\
+ int( timestampBeginPtDown )
else:
- pt_down_graph_to_ofp_4 = 0
- pt_down_graph_to_ofp_5 = 0
- pt_down_device_to_ofp_4 = 0
- pt_down_device_to_ofp_5 = 0
+ ptDownGraphToOfp4 = 0
+ ptDownGraphToOfp5 = 0
+ ptDownDeviceToOfp4 = 0
+ ptDownDeviceToOfp5 = 0
- if cluster_count >= 7:
- json_str_up_6 = main.ONOS6cli.topology_events_metrics()
- json_str_up_7 = main.ONOS7cli.topology_events_metrics()
- json_obj_6 = json.loads(json_str_up_6)
- json_obj_7 = json.loads(json_str_up_7)
- graph_timestamp_6 = \
- json_obj_6[graphTimestamp]['value']
- graph_timestamp_7 = \
- json_obj_7[graphTimestamp]['value']
- device_timestamp_6 = \
- json_obj_6[deviceTimestamp]['value']
- device_timestamp_7 = \
- json_obj_7[deviceTimestamp]['value']
- pt_down_graph_to_ofp_6 = int(graph_timestamp_6) -\
- int(timestamp_begin_pt_down)
- pt_down_graph_to_ofp_7 = int(graph_timestamp_7) -\
- int(timestamp_begin_pt_down)
- pt_down_device_to_ofp_6 = int(device_timestamp_6) -\
- int(timestamp_begin_pt_down)
- pt_down_device_to_ofp_7 = int(device_timestamp_7) -\
- int(timestamp_begin_pt_down)
+ if clusterCount >= 7:
+ jsonStrUp6 = main.ONOS6cli.topologyEventsMetrics()
+ jsonStrUp7 = main.ONOS7cli.topologyEventsMetrics()
+ jsonObj6 = json.loads( jsonStrUp6 )
+ jsonObj7 = json.loads( jsonStrUp7 )
+ graphTimestamp6 = \
+ jsonObj6[ graphTimestamp ][ 'value' ]
+ graphTimestamp7 = \
+ jsonObj7[ graphTimestamp ][ 'value' ]
+ deviceTimestamp6 = \
+ jsonObj6[ deviceTimestamp ][ 'value' ]
+ deviceTimestamp7 = \
+ jsonObj7[ deviceTimestamp ][ 'value' ]
+ ptDownGraphToOfp6 = int( graphTimestamp6 ) -\
+ int( timestampBeginPtDown )
+ ptDownGraphToOfp7 = int( graphTimestamp7 ) -\
+ int( timestampBeginPtDown )
+ ptDownDeviceToOfp6 = int( deviceTimestamp6 ) -\
+ int( timestampBeginPtDown )
+ ptDownDeviceToOfp7 = int( deviceTimestamp7 ) -\
+ int( timestampBeginPtDown )
else:
- pt_down_graph_to_ofp_6 = 0
- pt_down_graph_to_ofp_7 = 0
- pt_down_device_to_ofp_6 = 0
- pt_down_device_to_ofp_7 = 0
+ ptDownGraphToOfp6 = 0
+ ptDownGraphToOfp7 = 0
+ ptDownDeviceToOfp6 = 0
+ ptDownDeviceToOfp7 = 0
- time.sleep(3)
+ time.sleep( 3 )
- #Caluclate average across clusters
- pt_down_graph_to_ofp_avg =\
- (int(pt_down_graph_to_ofp_1) +
- int(pt_down_graph_to_ofp_2) +
- int(pt_down_graph_to_ofp_3) +
- int(pt_down_graph_to_ofp_4) +
- int(pt_down_graph_to_ofp_5) +
- int(pt_down_graph_to_ofp_6) +
- int(pt_down_graph_to_ofp_7)) / cluster_count
- pt_down_device_to_ofp_avg = \
- (int(pt_down_device_to_ofp_1) +
- int(pt_down_device_to_ofp_2) +
- int(pt_down_device_to_ofp_3) +
- int(pt_down_device_to_ofp_4) +
- int(pt_down_device_to_ofp_5) +
- int(pt_down_device_to_ofp_6) +
- int(pt_down_device_to_ofp_7)) / cluster_count
+ # Caluclate average across clusters
+ ptDownGraphToOfpAvg =\
+ ( int( ptDownGraphToOfp1 ) +
+ int( ptDownGraphToOfp2 ) +
+ int( ptDownGraphToOfp3 ) +
+ int( ptDownGraphToOfp4 ) +
+ int( ptDownGraphToOfp5 ) +
+ int( ptDownGraphToOfp6 ) +
+ int( ptDownGraphToOfp7 ) ) / clusterCount
+ ptDownDeviceToOfpAvg = \
+ ( int( ptDownDeviceToOfp1 ) +
+ int( ptDownDeviceToOfp2 ) +
+ int( ptDownDeviceToOfp3 ) +
+ int( ptDownDeviceToOfp4 ) +
+ int( ptDownDeviceToOfp5 ) +
+ int( ptDownDeviceToOfp6 ) +
+ int( ptDownDeviceToOfp7 ) ) / clusterCount
- if pt_down_graph_to_ofp_avg > down_threshold_min and \
- pt_down_graph_to_ofp_avg < down_threshold_max:
- port_down_graph_to_ofp_list.append(
- pt_down_graph_to_ofp_avg)
- main.log.info("Port down: graph to ofp avg: "+
- str(pt_down_graph_to_ofp_avg) + " ms")
+ if ptDownGraphToOfpAvg > downThresholdMin and \
+ ptDownGraphToOfpAvg < downThresholdMax:
+ portDownGraphToOfpList.append(
+ ptDownGraphToOfpAvg )
+ main.log.info( "Port down: graph to ofp avg: " +
+ str( ptDownGraphToOfpAvg ) + " ms" )
else:
- main.log.info("Average port down graph-to-ofp result" +
- " exceeded the threshold: "+
- str(pt_down_graph_to_ofp_avg))
+ main.log.info( "Average port down graph-to-ofp result" +
+ " exceeded the threshold: " +
+ str( ptDownGraphToOfpAvg ) )
- if pt_down_device_to_ofp_avg > 0 and \
- pt_down_device_to_ofp_avg < 1000:
- port_down_device_to_ofp_list.append(
- pt_down_device_to_ofp_avg)
- main.log.info("Port down: device to ofp avg: "+
- str(pt_down_device_to_ofp_avg) + " ms")
+ if ptDownDeviceToOfpAvg > 0 and \
+ ptDownDeviceToOfpAvg < 1000:
+ portDownDeviceToOfpList.append(
+ ptDownDeviceToOfpAvg )
+ main.log.info( "Port down: device to ofp avg: " +
+ str( ptDownDeviceToOfpAvg ) + " ms" )
else:
- main.log.info("Average port down device-to-ofp result" +
- " exceeded the threshold: "+
- str(pt_down_device_to_ofp_avg))
+ main.log.info( "Average port down device-to-ofp result" +
+ " exceeded the threshold: " +
+ str( ptDownDeviceToOfpAvg ) )
- #Port up events
- main.step("Enable port and obtain timestamp")
- main.step("Starting wireshark capture for port status up")
- main.ONOS1.tshark_grep(tshark_port_status, tshark_port_up)
- time.sleep(5)
+ # Port up events
+ main.step( "Enable port and obtain timestamp" )
+ main.step( "Starting wireshark capture for port status up" )
+ main.ONOS1.tsharkGrep( tsharkPortStatus, tsharkPortUp )
+ time.sleep( 5 )
- main.Mininet1.handle.sendline("sh ifconfig "+
- interface_config+" up")
- main.Mininet1.handle.expect("mininet>")
-
- #Allow time for tshark to capture event
- time.sleep(5)
- main.ONOS1.tshark_stop()
-
- time.sleep(3)
- os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
- tshark_port_up+" /tmp/")
- f_port_up = open(tshark_port_up, 'r')
- f_line = f_port_up.readline()
- obj_up = f_line.split(" ")
- if len(f_line) > 0:
- timestamp_begin_pt_up = int(float(obj_up[1])*1000)
- if timestamp_begin_pt_up < 1400000000000:
- timestamp_begin_pt_up = \
- int(float(obj_up[2])*1000)
- main.log.info("Port up begin timestamp: "+
- str(timestamp_begin_pt_up))
+ main.Mininet1.handle.sendline( "sh ifconfig " +
+ interfaceConfig + " up" )
+ main.Mininet1.handle.expect( "mininet>" )
+
+ # Allow time for tshark to capture event
+ time.sleep( 5 )
+ main.ONOS1.tsharkStop()
+
+ time.sleep( 3 )
+ os.system( "scp " + ONOSUser + "@" + ONOS1Ip + ":" +
+ tsharkPortUp + " /tmp/" )
+ fPortUp = open( tsharkPortUp, 'r' )
+ fLine = fPortUp.readline()
+ objUp = fLine.split( " " )
+ if len( fLine ) > 0:
+ timestampBeginPtUp = int( float( objUp[ 1 ] ) * 1000 )
+ if timestampBeginPtUp < 1400000000000:
+ timestampBeginPtUp = \
+ int( float( objUp[ 2 ] ) * 1000 )
+ main.log.info( "Port up begin timestamp: " +
+ str( timestampBeginPtUp ) )
else:
- main.log.info("Tshark output file returned unexpected"+
- " results.")
- timestamp_begin_pt_up = 0
- f_port_up.close()
+ main.log.info( "Tshark output file returned unexpected" +
+ " results." )
+ timestampBeginPtUp = 0
+ fPortUp.close()
- #Obtain metrics shortly afterwards
- #This timestsamp captures
- #the epoch time at which the topology graph was updated.
- main.step("Obtain t1 by REST call")
- json_str_up_1 = main.ONOS1cli.topology_events_metrics()
- json_obj_1 = json.loads(json_str_up_1)
- graph_timestamp_1 = \
- json_obj_1[graphTimestamp]['value']
- #Obtain device timestamp. This timestamp captures
- #the epoch time at which the device event happened
- device_timestamp_1 = \
- json_obj_1[deviceTimestamp]['value']
- #Get delta between graph event and OFP
- pt_up_graph_to_ofp_1 = int(graph_timestamp_1) -\
- int(timestamp_begin_pt_up)
- #Get delta between device event and OFP
- pt_up_device_to_ofp_1 = int(device_timestamp_1) -\
- int(timestamp_begin_pt_up)
-
- if cluster_count >= 3:
- json_str_up_2 = main.ONOS2cli.topology_events_metrics()
- json_str_up_3 = main.ONOS3cli.topology_events_metrics()
- json_obj_2 = json.loads(json_str_up_2)
- json_obj_3 = json.loads(json_str_up_3)
- graph_timestamp_2 = \
- json_obj_2[graphTimestamp]['value']
- graph_timestamp_3 = \
- json_obj_3[graphTimestamp]['value']
- device_timestamp_2 = \
- json_obj_2[deviceTimestamp]['value']
- device_timestamp_3 = \
- json_obj_3[deviceTimestamp]['value']
- pt_up_graph_to_ofp_2 = int(graph_timestamp_2) -\
- int(timestamp_begin_pt_up)
- pt_up_graph_to_ofp_3 = int(graph_timestamp_3) -\
- int(timestamp_begin_pt_up)
- pt_up_device_to_ofp_2 = int(device_timestamp_2) -\
- int(timestamp_begin_pt_up)
- pt_up_device_to_ofp_3 = int(device_timestamp_3) -\
- int(timestamp_begin_pt_up)
- else:
- pt_up_graph_to_ofp_2 = 0
- pt_up_graph_to_ofp_3 = 0
- pt_up_device_to_ofp_2 = 0
- pt_up_device_to_ofp_3 = 0
-
- if cluster_count >= 5:
- json_str_up_4 = main.ONOS4cli.topology_events_metrics()
- json_str_up_5 = main.ONOS5cli.topology_events_metrics()
- json_obj_4 = json.loads(json_str_up_4)
- json_obj_5 = json.loads(json_str_up_5)
- graph_timestamp_4 = \
- json_obj_4[graphTimestamp]['value']
- graph_timestamp_5 = \
- json_obj_5[graphTimestamp]['value']
- device_timestamp_4 = \
- json_obj_4[deviceTimestamp]['value']
- device_timestamp_5 = \
- json_obj_5[deviceTimestamp]['value']
- pt_up_graph_to_ofp_4 = int(graph_timestamp_4) -\
- int(timestamp_begin_pt_up)
- pt_up_graph_to_ofp_5 = int(graph_timestamp_5) -\
- int(timestamp_begin_pt_up)
- pt_up_device_to_ofp_4 = int(device_timestamp_4) -\
- int(timestamp_begin_pt_up)
- pt_up_device_to_ofp_5 = int(device_timestamp_5) -\
- int(timestamp_begin_pt_up)
- else:
- pt_up_graph_to_ofp_4 = 0
- pt_up_graph_to_ofp_5 = 0
- pt_up_device_to_ofp_4 = 0
- pt_up_device_to_ofp_5 = 0
+ # Obtain metrics shortly afterwards
+ # This timestsamp captures
+ # the epoch time at which the topology graph was updated.
+ main.step( "Obtain t1 by REST call" )
+ jsonStrUp1 = main.ONOS1cli.topologyEventsMetrics()
+ jsonObj1 = json.loads( jsonStrUp1 )
+ graphTimestamp1 = \
+ jsonObj1[ graphTimestamp ][ 'value' ]
+ # Obtain device timestamp. This timestamp captures
+ # the epoch time at which the device event happened
+ deviceTimestamp1 = \
+ jsonObj1[ deviceTimestamp ][ 'value' ]
+ # Get delta between graph event and OFP
+ ptUpGraphToOfp1 = int( graphTimestamp1 ) -\
+ int( timestampBeginPtUp )
+ # Get delta between device event and OFP
+ ptUpDeviceToOfp1 = int( deviceTimestamp1 ) -\
+ int( timestampBeginPtUp )
- if cluster_count >= 7:
- json_str_up_6 = main.ONOS6cli.topology_events_metrics()
- json_str_up_7 = main.ONOS7cli.topology_events_metrics()
- json_obj_6 = json.loads(json_str_up_6)
- json_obj_7 = json.loads(json_str_up_7)
- graph_timestamp_6 = \
- json_obj_6[graphTimestamp]['value']
- graph_timestamp_7 = \
- json_obj_7[graphTimestamp]['value']
- device_timestamp_6 = \
- json_obj_6[deviceTimestamp]['value']
- device_timestamp_7 = \
- json_obj_7[deviceTimestamp]['value']
- pt_up_graph_to_ofp_6 = int(graph_timestamp_6) -\
- int(timestamp_begin_pt_up)
- pt_up_graph_to_ofp_7 = int(graph_timestamp_7) -\
- int(timestamp_begin_pt_up)
- pt_up_device_to_ofp_6 = int(device_timestamp_6) -\
- int(timestamp_begin_pt_up)
- pt_up_device_to_ofp_7 = int(device_timestamp_7) -\
- int(timestamp_begin_pt_up)
+ if clusterCount >= 3:
+ jsonStrUp2 = main.ONOS2cli.topologyEventsMetrics()
+ jsonStrUp3 = main.ONOS3cli.topologyEventsMetrics()
+ jsonObj2 = json.loads( jsonStrUp2 )
+ jsonObj3 = json.loads( jsonStrUp3 )
+ graphTimestamp2 = \
+ jsonObj2[ graphTimestamp ][ 'value' ]
+ graphTimestamp3 = \
+ jsonObj3[ graphTimestamp ][ 'value' ]
+ deviceTimestamp2 = \
+ jsonObj2[ deviceTimestamp ][ 'value' ]
+ deviceTimestamp3 = \
+ jsonObj3[ deviceTimestamp ][ 'value' ]
+ ptUpGraphToOfp2 = int( graphTimestamp2 ) -\
+ int( timestampBeginPtUp )
+ ptUpGraphToOfp3 = int( graphTimestamp3 ) -\
+ int( timestampBeginPtUp )
+ ptUpDeviceToOfp2 = int( deviceTimestamp2 ) -\
+ int( timestampBeginPtUp )
+ ptUpDeviceToOfp3 = int( deviceTimestamp3 ) -\
+ int( timestampBeginPtUp )
else:
- pt_up_graph_to_ofp_6 = 0
- pt_up_graph_to_ofp_7 = 0
- pt_up_device_to_ofp_6 = 0
- pt_up_device_to_ofp_7 = 0
+ ptUpGraphToOfp2 = 0
+ ptUpGraphToOfp3 = 0
+ ptUpDeviceToOfp2 = 0
+ ptUpDeviceToOfp3 = 0
- pt_up_graph_to_ofp_avg = \
- (int(pt_up_graph_to_ofp_1) +
- int(pt_up_graph_to_ofp_2) +
- int(pt_up_graph_to_ofp_3) +
- int(pt_up_graph_to_ofp_4) +
- int(pt_up_graph_to_ofp_5) +
- int(pt_up_graph_to_ofp_6) +
- int(pt_up_graph_to_ofp_7)) / cluster_count
-
- pt_up_device_to_ofp_avg = \
- (int(pt_up_device_to_ofp_1) +
- int(pt_up_device_to_ofp_2) +
- int(pt_up_device_to_ofp_3) +
- int(pt_up_device_to_ofp_4) +
- int(pt_up_device_to_ofp_5) +
- int(pt_up_device_to_ofp_6) +
- int(pt_up_device_to_ofp_7)) / cluster_count
-
- if pt_up_graph_to_ofp_avg > up_threshold_min and \
- pt_up_graph_to_ofp_avg < up_threshold_max:
- port_up_graph_to_ofp_list.append(
- pt_up_graph_to_ofp_avg)
- main.log.info("Port down: graph to ofp avg: "+
- str(pt_up_graph_to_ofp_avg) + " ms")
+ if clusterCount >= 5:
+ jsonStrUp4 = main.ONOS4cli.topologyEventsMetrics()
+ jsonStrUp5 = main.ONOS5cli.topologyEventsMetrics()
+ jsonObj4 = json.loads( jsonStrUp4 )
+ jsonObj5 = json.loads( jsonStrUp5 )
+ graphTimestamp4 = \
+ jsonObj4[ graphTimestamp ][ 'value' ]
+ graphTimestamp5 = \
+ jsonObj5[ graphTimestamp ][ 'value' ]
+ deviceTimestamp4 = \
+ jsonObj4[ deviceTimestamp ][ 'value' ]
+ deviceTimestamp5 = \
+ jsonObj5[ deviceTimestamp ][ 'value' ]
+ ptUpGraphToOfp4 = int( graphTimestamp4 ) -\
+ int( timestampBeginPtUp )
+ ptUpGraphToOfp5 = int( graphTimestamp5 ) -\
+ int( timestampBeginPtUp )
+ ptUpDeviceToOfp4 = int( deviceTimestamp4 ) -\
+ int( timestampBeginPtUp )
+ ptUpDeviceToOfp5 = int( deviceTimestamp5 ) -\
+ int( timestampBeginPtUp )
else:
- main.log.info("Average port up graph-to-ofp result"+
- " exceeded the threshold: "+
- str(pt_up_graph_to_ofp_avg))
-
- if pt_up_device_to_ofp_avg > up_threshold_min and \
- pt_up_device_to_ofp_avg < up_threshold_max:
- port_up_device_to_ofp_list.append(
- pt_up_device_to_ofp_avg)
- main.log.info("Port up: device to ofp avg: "+
- str(pt_up_device_to_ofp_avg) + " ms")
+ ptUpGraphToOfp4 = 0
+ ptUpGraphToOfp5 = 0
+ ptUpDeviceToOfp4 = 0
+ ptUpDeviceToOfp5 = 0
+
+ if clusterCount >= 7:
+ jsonStrUp6 = main.ONOS6cli.topologyEventsMetrics()
+ jsonStrUp7 = main.ONOS7cli.topologyEventsMetrics()
+ jsonObj6 = json.loads( jsonStrUp6 )
+ jsonObj7 = json.loads( jsonStrUp7 )
+ graphTimestamp6 = \
+ jsonObj6[ graphTimestamp ][ 'value' ]
+ graphTimestamp7 = \
+ jsonObj7[ graphTimestamp ][ 'value' ]
+ deviceTimestamp6 = \
+ jsonObj6[ deviceTimestamp ][ 'value' ]
+ deviceTimestamp7 = \
+ jsonObj7[ deviceTimestamp ][ 'value' ]
+ ptUpGraphToOfp6 = int( graphTimestamp6 ) -\
+ int( timestampBeginPtUp )
+ ptUpGraphToOfp7 = int( graphTimestamp7 ) -\
+ int( timestampBeginPtUp )
+ ptUpDeviceToOfp6 = int( deviceTimestamp6 ) -\
+ int( timestampBeginPtUp )
+ ptUpDeviceToOfp7 = int( deviceTimestamp7 ) -\
+ int( timestampBeginPtUp )
else:
- main.log.info("Average port up device-to-ofp result"+
- " exceeded the threshold: "+
- str(pt_up_device_to_ofp_avg))
-
- #END ITERATION FOR LOOP
-
- #Check all list for latency existence and set assertion
- if (port_down_graph_to_ofp_list and port_down_device_to_ofp_list\
- and port_up_graph_to_ofp_list and port_up_device_to_ofp_list):
+ ptUpGraphToOfp6 = 0
+ ptUpGraphToOfp7 = 0
+ ptUpDeviceToOfp6 = 0
+ ptUpDeviceToOfp7 = 0
+
+ ptUpGraphToOfpAvg = \
+ ( int( ptUpGraphToOfp1 ) +
+ int( ptUpGraphToOfp2 ) +
+ int( ptUpGraphToOfp3 ) +
+ int( ptUpGraphToOfp4 ) +
+ int( ptUpGraphToOfp5 ) +
+ int( ptUpGraphToOfp6 ) +
+ int( ptUpGraphToOfp7 ) ) / clusterCount
+
+ ptUpDeviceToOfpAvg = \
+ ( int( ptUpDeviceToOfp1 ) +
+ int( ptUpDeviceToOfp2 ) +
+ int( ptUpDeviceToOfp3 ) +
+ int( ptUpDeviceToOfp4 ) +
+ int( ptUpDeviceToOfp5 ) +
+ int( ptUpDeviceToOfp6 ) +
+ int( ptUpDeviceToOfp7 ) ) / clusterCount
+
+ if ptUpGraphToOfpAvg > upThresholdMin and \
+ ptUpGraphToOfpAvg < upThresholdMax:
+ portUpGraphToOfpList.append(
+ ptUpGraphToOfpAvg )
+ main.log.info( "Port down: graph to ofp avg: " +
+ str( ptUpGraphToOfpAvg ) + " ms" )
+ else:
+ main.log.info( "Average port up graph-to-ofp result" +
+ " exceeded the threshold: " +
+ str( ptUpGraphToOfpAvg ) )
+
+ if ptUpDeviceToOfpAvg > upThresholdMin and \
+ ptUpDeviceToOfpAvg < upThresholdMax:
+ portUpDeviceToOfpList.append(
+ ptUpDeviceToOfpAvg )
+ main.log.info( "Port up: device to ofp avg: " +
+ str( ptUpDeviceToOfpAvg ) + " ms" )
+ else:
+ main.log.info( "Average port up device-to-ofp result" +
+ " exceeded the threshold: " +
+ str( ptUpDeviceToOfpAvg ) )
+
+ # END ITERATION FOR LOOP
+
+ # Check all list for latency existence and set assertion
+ if ( portDownGraphToOfpList and portDownDeviceToOfpList
+ and portUpGraphToOfpList and portUpDeviceToOfpList ):
assertion = main.TRUE
-
- main.log.report("Cluster size: "+str(cluster_count)+\
- " node(s)")
- #Calculate and report latency measurements
- port_down_graph_to_ofp_min = min(port_down_graph_to_ofp_list)
- port_down_graph_to_ofp_max = max(port_down_graph_to_ofp_list)
- port_down_graph_to_ofp_avg = \
- (sum(port_down_graph_to_ofp_list) /
- len(port_down_graph_to_ofp_list))
- port_down_graph_to_ofp_std_dev = \
- str(round(numpy.std(port_down_graph_to_ofp_list),1))
-
- main.log.report("Port down graph-to-ofp "+
- "Avg: "+str(port_down_graph_to_ofp_avg)+" ms "+
- "Std Deviation: "+port_down_graph_to_ofp_std_dev+" ms")
-
- port_down_device_to_ofp_min = min(port_down_device_to_ofp_list)
- port_down_device_to_ofp_max = max(port_down_device_to_ofp_list)
- port_down_device_to_ofp_avg = \
- (sum(port_down_device_to_ofp_list) /\
- len(port_down_device_to_ofp_list))
- port_down_device_to_ofp_std_dev = \
- str(round(numpy.std(port_down_device_to_ofp_list),1))
-
- main.log.report("Port down device-to-ofp "+
- "Avg: "+str(port_down_device_to_ofp_avg)+" ms "+
- "Std Deviation: "+port_down_device_to_ofp_std_dev+" ms")
-
- port_up_graph_to_ofp_min = min(port_up_graph_to_ofp_list)
- port_up_graph_to_ofp_max = max(port_up_graph_to_ofp_list)
- port_up_graph_to_ofp_avg = \
- (sum(port_up_graph_to_ofp_list) /\
- len(port_up_graph_to_ofp_list))
- port_up_graph_to_ofp_std_dev = \
- str(round(numpy.std(port_up_graph_to_ofp_list),1))
-
- main.log.report("Port up graph-to-ofp "+
- "Avg: "+str(port_up_graph_to_ofp_avg)+" ms "+
- "Std Deviation: "+port_up_graph_to_ofp_std_dev+" ms")
-
- port_up_device_to_ofp_min = min(port_up_device_to_ofp_list)
- port_up_device_to_ofp_max = max(port_up_device_to_ofp_list)
- port_up_device_to_ofp_avg = \
- (sum(port_up_device_to_ofp_list) /\
- len(port_up_device_to_ofp_list))
- port_up_device_to_ofp_std_dev = \
- str(round(numpy.std(port_up_device_to_ofp_list),1))
-
- main.log.report("Port up device-to-ofp "+
- "Avg: "+str(port_up_device_to_ofp_avg)+" ms "+
- "Std Deviation: "+port_up_device_to_ofp_std_dev+" ms")
- #Remove switches from controller for next test
- main.Mininet1.delete_sw_controller("s1")
- main.Mininet1.delete_sw_controller("s2")
-
- utilities.assert_equals(expect=main.TRUE, actual=assertion,
- onpass="Port discovery latency calculation successful",
- onfail="Port discovery test failed")
+ main.log.report( "Cluster size: " + str( clusterCount ) +
+ " node(s)" )
+ # Calculate and report latency measurements
+ portDownGraphToOfpMin = min( portDownGraphToOfpList )
+ portDownGraphToOfpMax = max( portDownGraphToOfpList )
+ portDownGraphToOfpAvg = \
+ ( sum( portDownGraphToOfpList ) /
+ len( portDownGraphToOfpList ) )
+ portDownGraphToOfpStdDev = \
+ str( round( numpy.std( portDownGraphToOfpList ), 1 ) )
- def CASE4(self, main):
- '''
+ main.log.report( "Port down graph-to-ofp " +
+ "Avg: " + str( portDownGraphToOfpAvg ) + " ms " +
+ "Std Deviation: " + portDownGraphToOfpStdDev + " ms" )
+
+ portDownDeviceToOfpMin = min( portDownDeviceToOfpList )
+ portDownDeviceToOfpMax = max( portDownDeviceToOfpList )
+ portDownDeviceToOfpAvg = \
+ ( sum( portDownDeviceToOfpList ) /
+ len( portDownDeviceToOfpList ) )
+ portDownDeviceToOfpStdDev = \
+ str( round( numpy.std( portDownDeviceToOfpList ), 1 ) )
+
+ main.log.report(
+ "Port down device-to-ofp " +
+ "Avg: " +
+ str( portDownDeviceToOfpAvg ) +
+ " ms " +
+ "Std Deviation: " +
+ portDownDeviceToOfpStdDev +
+ " ms" )
+
+ portUpGraphToOfpMin = min( portUpGraphToOfpList )
+ portUpGraphToOfpMax = max( portUpGraphToOfpList )
+ portUpGraphToOfpAvg = \
+ ( sum( portUpGraphToOfpList ) /
+ len( portUpGraphToOfpList ) )
+ portUpGraphToOfpStdDev = \
+ str( round( numpy.std( portUpGraphToOfpList ), 1 ) )
+
+ main.log.report( "Port up graph-to-ofp " +
+ "Avg: " + str( portUpGraphToOfpAvg ) + " ms " +
+ "Std Deviation: " + portUpGraphToOfpStdDev + " ms" )
+
+ portUpDeviceToOfpMin = min( portUpDeviceToOfpList )
+ portUpDeviceToOfpMax = max( portUpDeviceToOfpList )
+ portUpDeviceToOfpAvg = \
+ ( sum( portUpDeviceToOfpList ) /
+ len( portUpDeviceToOfpList ) )
+ portUpDeviceToOfpStdDev = \
+ str( round( numpy.std( portUpDeviceToOfpList ), 1 ) )
+
+ main.log.report( "Port up device-to-ofp " +
+ "Avg: " + str( portUpDeviceToOfpAvg ) + " ms " +
+ "Std Deviation: " + portUpDeviceToOfpStdDev + " ms" )
+
+ # Remove switches from controller for next test
+ main.Mininet1.deleteSwController( "s1" )
+ main.Mininet1.deleteSwController( "s2" )
+
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=assertion,
+ onpass="Port discovery latency calculation successful",
+ onfail="Port discovery test failed" )
+
+ def CASE4( self, main ):
+ """
Link down event using loss rate 100%
-
+
Important:
Use a simple 2 switch topology with 1 link between
- the two switches. Ensure that mac addresses of the
+ the two switches. Ensure that mac addresses of the
switches are 1 / 2 respectively
- '''
+ """
import time
import subprocess
import os
import requests
import json
- import numpy
-
- ONOS1_ip = main.params['CTRL']['ip1']
- ONOS2_ip = main.params['CTRL']['ip2']
- ONOS3_ip = main.params['CTRL']['ip3']
- ONOS_user = main.params['CTRL']['user']
+ import numpy
- default_sw_port = main.params['CTRL']['port1']
-
- #Number of iterations of case
- num_iter = main.params['TEST']['numIter']
-
- #Timestamp 'keys' for json metrics output.
- #These are subject to change, hence moved into params
- deviceTimestamp = main.params['JSON']['deviceTimestamp']
- linkTimestamp = main.params['JSON']['linkTimestamp']
- graphTimestamp = main.params['JSON']['graphTimestamp']
-
- debug_mode = main.params['TEST']['debugMode']
+ ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
+ ONOS2Ip = main.params[ 'CTRL' ][ 'ip2' ]
+ ONOS3Ip = main.params[ 'CTRL' ][ 'ip3' ]
+ ONOSUser = main.params[ 'CTRL' ][ 'user' ]
- local_time = time.strftime('%x %X')
- local_time = local_time.replace("/","")
- local_time = local_time.replace(" ","_")
- local_time = local_time.replace(":","")
- if debug_mode == 'on':
- main.ONOS1.tshark_pcap("eth0",
- "/tmp/link_lat_pcap_"+local_time)
+ defaultSwPort = main.params[ 'CTRL' ][ 'port1' ]
- #Threshold for this test case
- up_threshold_str = main.params['TEST']['linkUpThreshold']
- down_threshold_str = main.params['TEST']['linkDownThreshold']
+ # Number of iterations of case
+ numIter = main.params[ 'TEST' ][ 'numIter' ]
- up_threshold_obj = up_threshold_str.split(",")
- down_threshold_obj = down_threshold_str.split(",")
+ # Timestamp 'keys' for json metrics output.
+ # These are subject to change, hence moved into params
+ deviceTimestamp = main.params[ 'JSON' ][ 'deviceTimestamp' ]
+ linkTimestamp = main.params[ 'JSON' ][ 'linkTimestamp' ]
+ graphTimestamp = main.params[ 'JSON' ][ 'graphTimestamp' ]
- up_threshold_min = int(up_threshold_obj[0])
- up_threshold_max = int(up_threshold_obj[1])
+ debugMode = main.params[ 'TEST' ][ 'debugMode' ]
- down_threshold_min = int(down_threshold_obj[0])
- down_threshold_max = int(down_threshold_obj[1])
+ localTime = time.strftime( '%x %X' )
+ localTime = localTime.replace( "/", "" )
+ localTime = localTime.replace( " ", "_" )
+ localTime = localTime.replace( ":", "" )
+ if debugMode == 'on':
+ main.ONOS1.tsharkPcap( "eth0",
+ "/tmp/link_lat_pcap_" + localTime )
+
+ # Threshold for this test case
+ upThresholdStr = main.params[ 'TEST' ][ 'linkUpThreshold' ]
+ downThresholdStr = main.params[ 'TEST' ][ 'linkDownThreshold' ]
+
+ upThresholdObj = upThresholdStr.split( "," )
+ downThresholdObj = downThresholdStr.split( "," )
+
+ upThresholdMin = int( upThresholdObj[ 0 ] )
+ upThresholdMax = int( upThresholdObj[ 1 ] )
+
+ downThresholdMin = int( downThresholdObj[ 0 ] )
+ downThresholdMax = int( downThresholdObj[ 1 ] )
assertion = main.TRUE
- #Link event timestamp to system time list
- link_down_link_to_system_list = []
- link_up_link_to_system_list = []
- #Graph event timestamp to system time list
- link_down_graph_to_system_list = []
- link_up_graph_to_system_list = []
+ # Link event timestamp to system time list
+ linkDownLinkToSystemList = []
+ linkUpLinkToSystemList = []
+ # Graph event timestamp to system time list
+ linkDownGraphToSystemList = []
+ linkUpGraphToSystemList = []
- main.log.report("Link up / down discovery latency between "+
- "two switches")
- main.log.report("Simulated by setting loss-rate 100%")
- main.log.report("'tc qdisc add dev <intfs> root netem loss 100%'")
- main.log.report("Total iterations of test: "+str(num_iter))
+ main.log.report( "Link up / down discovery latency between " +
+ "two switches" )
+ main.log.report( "Simulated by setting loss-rate 100%" )
+ main.log.report( "'tc qdisc add dev <intfs> root netem loss 100%'" )
+ main.log.report( "Total iterations of test: " + str( numIter ) )
- main.step("Assign all switches")
- main.Mininet1.assign_sw_controller(sw="1",
- ip1=ONOS1_ip, port1=default_sw_port)
- main.Mininet1.assign_sw_controller(sw="2",
- ip1=ONOS1_ip, port1=default_sw_port)
+ main.step( "Assign all switches" )
+ main.Mininet1.assignSwController( sw="1",
+ ip1=ONOS1Ip, port1=defaultSwPort )
+ main.Mininet1.assignSwController( sw="2",
+ ip1=ONOS1Ip, port1=defaultSwPort )
- main.step("Verifying switch assignment")
- result_s1 = main.Mininet1.get_sw_controller(sw="s1")
- result_s2 = main.Mininet1.get_sw_controller(sw="s2")
-
- #Allow time for events to finish before taking measurements
- time.sleep(10)
+ main.step( "Verifying switch assignment" )
+ resultS1 = main.Mininet1.getSwController( sw="s1" )
+ resultS2 = main.Mininet1.getSwController( sw="s2" )
- link_down1 = False
- link_down2 = False
- link_down3 = False
- #Start iteration of link event test
- for i in range(0, int(num_iter)):
- main.step("Getting initial system time as t0")
-
- #System time in epoch ms
- timestamp_link_down_t0 = time.time() * 1000
- #Link down is simulated by 100% loss rate using traffic
- #control command
+ # Allow time for events to finish before taking measurements
+ time.sleep( 10 )
+
+ linkDown1 = False
+ linkDown2 = False
+ linkDown3 = False
+ # Start iteration of link event test
+ for i in range( 0, int( numIter ) ):
+ main.step( "Getting initial system time as t0" )
+
+ # System time in epoch ms
+ timestampLinkDownT0 = time.time() * 1000
+ # Link down is simulated by 100% loss rate using traffic
+ # control command
main.Mininet1.handle.sendline(
- "sh tc qdisc add dev s1-eth1 root netem loss 100%")
+ "sh tc qdisc add dev s1-eth1 root netem loss 100%" )
- #TODO: Iterate through 'links' command to verify that
- # link s1 -> s2 went down (loop timeout 30 seconds)
+ # TODO: Iterate through 'links' command to verify that
+ # link s1 -> s2 went down ( loop timeout 30 seconds )
# on all 3 ONOS instances
- main.log.info("Checking ONOS for link update")
- loop_count = 0
- while( not (link_down1 and link_down2 and link_down3)\
- and loop_count < 30 ):
- json_str1 = main.ONOS1cli.links()
- json_str2 = main.ONOS2cli.links()
- json_str3 = main.ONOS3cli.links()
-
- if not (json_str1 and json_str2 and json_str3):
- main.log.error("CLI command returned error ")
+ main.log.info( "Checking ONOS for link update" )
+ loopCount = 0
+ while( not ( linkDown1 and linkDown2 and linkDown3 )
+ and loopCount < 30 ):
+ jsonStr1 = main.ONOS1cli.links()
+ jsonStr2 = main.ONOS2cli.links()
+ jsonStr3 = main.ONOS3cli.links()
+
+ if not ( jsonStr1 and jsonStr2 and jsonStr3 ):
+ main.log.error( "CLI command returned error " )
break
else:
- json_obj1 = json.loads(json_str1)
- json_obj2 = json.loads(json_str2)
- json_obj3 = json.loads(json_str3)
- for obj1 in json_obj1:
- if '01' not in obj1['src']['device']:
- link_down1 = True
- main.log.info("Link down from "+
- "s1 -> s2 on ONOS1 detected")
- for obj2 in json_obj2:
- if '01' not in obj2['src']['device']:
- link_down2 = True
- main.log.info("Link down from "+
- "s1 -> s2 on ONOS2 detected")
- for obj3 in json_obj3:
- if '01' not in obj3['src']['device']:
- link_down3 = True
- main.log.info("Link down from "+
- "s1 -> s2 on ONOS3 detected")
-
- loop_count += 1
- #If CLI doesn't like the continuous requests
- #and exits in this loop, increase the sleep here.
- #Consequently, while loop timeout will increase
- time.sleep(1)
-
- #Give time for metrics measurement to catch up
- #NOTE: May need to be configured more accurately
- time.sleep(10)
- #If we exited the while loop and link down 1,2,3 are still
- #false, then ONOS has failed to discover link down event
- if not (link_down1 and link_down2 and link_down3):
- main.log.info("Link down discovery failed")
-
- link_down_lat_graph1 = 0
- link_down_lat_graph2 = 0
- link_down_lat_graph3 = 0
- link_down_lat_device1 = 0
- link_down_lat_device2 = 0
- link_down_lat_device3 = 0
-
+ jsonObj1 = json.loads( jsonStr1 )
+ jsonObj2 = json.loads( jsonStr2 )
+ jsonObj3 = json.loads( jsonStr3 )
+ for obj1 in jsonObj1:
+ if '01' not in obj1[ 'src' ][ 'device' ]:
+ linkDown1 = True
+ main.log.info( "Link down from " +
+ "s1 -> s2 on ONOS1 detected" )
+ for obj2 in jsonObj2:
+ if '01' not in obj2[ 'src' ][ 'device' ]:
+ linkDown2 = True
+ main.log.info( "Link down from " +
+ "s1 -> s2 on ONOS2 detected" )
+ for obj3 in jsonObj3:
+ if '01' not in obj3[ 'src' ][ 'device' ]:
+ linkDown3 = True
+ main.log.info( "Link down from " +
+ "s1 -> s2 on ONOS3 detected" )
+
+ loopCount += 1
+ # If CLI doesn't like the continuous requests
+ # and exits in this loop, increase the sleep here.
+ # Consequently, while loop timeout will increase
+ time.sleep( 1 )
+
+ # Give time for metrics measurement to catch up
+ # NOTE: May need to be configured more accurately
+ time.sleep( 10 )
+ # If we exited the while loop and link down 1,2,3 are still
+ # false, then ONOS has failed to discover link down event
+ if not ( linkDown1 and linkDown2 and linkDown3 ):
+ main.log.info( "Link down discovery failed" )
+
+ linkDownLatGraph1 = 0
+ linkDownLatGraph2 = 0
+ linkDownLatGraph3 = 0
+ linkDownLatDevice1 = 0
+ linkDownLatDevice2 = 0
+ linkDownLatDevice3 = 0
+
assertion = main.FALSE
else:
- json_topo_metrics_1 =\
- main.ONOS1cli.topology_events_metrics()
- json_topo_metrics_2 =\
- main.ONOS2cli.topology_events_metrics()
- json_topo_metrics_3 =\
- main.ONOS3cli.topology_events_metrics()
- json_topo_metrics_1 = json.loads(json_topo_metrics_1)
- json_topo_metrics_2 = json.loads(json_topo_metrics_2)
- json_topo_metrics_3 = json.loads(json_topo_metrics_3)
+ jsonTopoMetrics1 =\
+ main.ONOS1cli.topologyEventsMetrics()
+ jsonTopoMetrics2 =\
+ main.ONOS2cli.topologyEventsMetrics()
+ jsonTopoMetrics3 =\
+ main.ONOS3cli.topologyEventsMetrics()
+ jsonTopoMetrics1 = json.loads( jsonTopoMetrics1 )
+ jsonTopoMetrics2 = json.loads( jsonTopoMetrics2 )
+ jsonTopoMetrics3 = json.loads( jsonTopoMetrics3 )
- main.log.info("Obtaining graph and device timestamp")
- graph_timestamp_1 = \
- json_topo_metrics_1[graphTimestamp]['value']
- graph_timestamp_2 = \
- json_topo_metrics_2[graphTimestamp]['value']
- graph_timestamp_3 = \
- json_topo_metrics_3[graphTimestamp]['value']
+ main.log.info( "Obtaining graph and device timestamp" )
+ graphTimestamp1 = \
+ jsonTopoMetrics1[ graphTimestamp ][ 'value' ]
+ graphTimestamp2 = \
+ jsonTopoMetrics2[ graphTimestamp ][ 'value' ]
+ graphTimestamp3 = \
+ jsonTopoMetrics3[ graphTimestamp ][ 'value' ]
- link_timestamp_1 = \
- json_topo_metrics_1[linkTimestamp]['value']
- link_timestamp_2 = \
- json_topo_metrics_2[linkTimestamp]['value']
- link_timestamp_3 = \
- json_topo_metrics_3[linkTimestamp]['value']
+ linkTimestamp1 = \
+ jsonTopoMetrics1[ linkTimestamp ][ 'value' ]
+ linkTimestamp2 = \
+ jsonTopoMetrics2[ linkTimestamp ][ 'value' ]
+ linkTimestamp3 = \
+ jsonTopoMetrics3[ linkTimestamp ][ 'value' ]
- if graph_timestamp_1 and graph_timestamp_2 and\
- graph_timestamp_3 and link_timestamp_1 and\
- link_timestamp_2 and link_timestamp_3:
- link_down_lat_graph1 = int(graph_timestamp_1) -\
- int(timestamp_link_down_t0)
- link_down_lat_graph2 = int(graph_timestamp_2) -\
- int(timestamp_link_down_t0)
- link_down_lat_graph3 = int(graph_timestamp_3) -\
- int(timestamp_link_down_t0)
-
- link_down_lat_link1 = int(link_timestamp_1) -\
- int(timestamp_link_down_t0)
- link_down_lat_link2 = int(link_timestamp_2) -\
- int(timestamp_link_down_t0)
- link_down_lat_link3 = int(link_timestamp_3) -\
- int(timestamp_link_down_t0)
+ if graphTimestamp1 and graphTimestamp2 and\
+ graphTimestamp3 and linkTimestamp1 and\
+ linkTimestamp2 and linkTimestamp3:
+ linkDownLatGraph1 = int( graphTimestamp1 ) -\
+ int( timestampLinkDownT0 )
+ linkDownLatGraph2 = int( graphTimestamp2 ) -\
+ int( timestampLinkDownT0 )
+ linkDownLatGraph3 = int( graphTimestamp3 ) -\
+ int( timestampLinkDownT0 )
+
+ linkDownLatLink1 = int( linkTimestamp1 ) -\
+ int( timestampLinkDownT0 )
+ linkDownLatLink2 = int( linkTimestamp2 ) -\
+ int( timestampLinkDownT0 )
+ linkDownLatLink3 = int( linkTimestamp3 ) -\
+ int( timestampLinkDownT0 )
else:
- main.log.error("There was an error calculating"+
- " the delta for link down event")
- link_down_lat_graph1 = 0
- link_down_lat_graph2 = 0
- link_down_lat_graph3 = 0
-
- link_down_lat_device1 = 0
- link_down_lat_device2 = 0
- link_down_lat_device3 = 0
-
- main.log.info("Link down latency ONOS1 iteration "+
- str(i)+" (end-to-end): "+
- str(link_down_lat_graph1)+" ms")
- main.log.info("Link down latency ONOS2 iteration "+
- str(i)+" (end-to-end): "+
- str(link_down_lat_graph2)+" ms")
- main.log.info("Link down latency ONOS3 iteration "+
- str(i)+" (end-to-end): "+
- str(link_down_lat_graph3)+" ms")
-
- main.log.info("Link down latency ONOS1 iteration "+
- str(i)+" (link-event-to-system-timestamp): "+
- str(link_down_lat_link1)+" ms")
- main.log.info("Link down latency ONOS2 iteration "+
- str(i)+" (link-event-to-system-timestamp): "+
- str(link_down_lat_link2)+" ms")
- main.log.info("Link down latency ONOS3 iteration "+
- str(i)+" (link-event-to-system-timestamp): "+
- str(link_down_lat_link3))
-
- #Calculate avg of node calculations
- link_down_lat_graph_avg =\
- (link_down_lat_graph1 +
- link_down_lat_graph2 +
- link_down_lat_graph3) / 3
- link_down_lat_link_avg =\
- (link_down_lat_link1 +
- link_down_lat_link2 +
- link_down_lat_link3) / 3
+ main.log.error( "There was an error calculating" +
+ " the delta for link down event" )
+ linkDownLatGraph1 = 0
+ linkDownLatGraph2 = 0
+ linkDownLatGraph3 = 0
- #Set threshold and append latency to list
- if link_down_lat_graph_avg > down_threshold_min and\
- link_down_lat_graph_avg < down_threshold_max:
- link_down_graph_to_system_list.append(
- link_down_lat_graph_avg)
- else:
- main.log.info("Link down latency exceeded threshold")
- main.log.info("Results for iteration "+str(i)+
- "have been omitted")
- if link_down_lat_link_avg > down_threshold_min and\
- link_down_lat_link_avg < down_threshold_max:
- link_down_link_to_system_list.append(
- link_down_lat_link_avg)
- else:
- main.log.info("Link down latency exceeded threshold")
- main.log.info("Results for iteration "+str(i)+
- "have been omitted")
+ linkDownLatDevice1 = 0
+ linkDownLatDevice2 = 0
+ linkDownLatDevice3 = 0
- #NOTE: To remove loss rate and measure latency:
+ main.log.info( "Link down latency ONOS1 iteration " +
+ str( i ) + " (end-to-end): " +
+ str( linkDownLatGraph1 ) + " ms" )
+ main.log.info( "Link down latency ONOS2 iteration " +
+ str( i ) + " (end-to-end): " +
+ str( linkDownLatGraph2 ) + " ms" )
+ main.log.info( "Link down latency ONOS3 iteration " +
+ str( i ) + " (end-to-end): " +
+ str( linkDownLatGraph3 ) + " ms" )
+
+ main.log.info( "Link down latency ONOS1 iteration " +
+ str( i ) + " (link-event-to-system-timestamp): " +
+ str( linkDownLatLink1 ) + " ms" )
+ main.log.info( "Link down latency ONOS2 iteration " +
+ str( i ) + " (link-event-to-system-timestamp): " +
+ str( linkDownLatLink2 ) + " ms" )
+ main.log.info( "Link down latency ONOS3 iteration " +
+ str( i ) + " (link-event-to-system-timestamp): " +
+ str( linkDownLatLink3 ) )
+
+ # Calculate avg of node calculations
+ linkDownLatGraphAvg =\
+ ( linkDownLatGraph1 +
+ linkDownLatGraph2 +
+ linkDownLatGraph3 ) / 3
+ linkDownLatLinkAvg =\
+ ( linkDownLatLink1 +
+ linkDownLatLink2 +
+ linkDownLatLink3 ) / 3
+
+ # Set threshold and append latency to list
+ if linkDownLatGraphAvg > downThresholdMin and\
+ linkDownLatGraphAvg < downThresholdMax:
+ linkDownGraphToSystemList.append(
+ linkDownLatGraphAvg )
+ else:
+ main.log.info( "Link down latency exceeded threshold" )
+ main.log.info( "Results for iteration " + str( i ) +
+ "have been omitted" )
+ if linkDownLatLinkAvg > downThresholdMin and\
+ linkDownLatLinkAvg < downThresholdMax:
+ linkDownLinkToSystemList.append(
+ linkDownLatLinkAvg )
+ else:
+ main.log.info( "Link down latency exceeded threshold" )
+ main.log.info( "Results for iteration " + str( i ) +
+ "have been omitted" )
+
+ # NOTE: To remove loss rate and measure latency:
# 'sh tc qdisc del dev s1-eth1 root'
- timestamp_link_up_t0 = time.time() * 1000
- main.Mininet1.handle.sendline("sh tc qdisc del dev "+
- "s1-eth1 root")
- main.Mininet1.handle.expect("mininet>")
-
- main.log.info("Checking ONOS for link update")
-
- link_down1 = True
- link_down2 = True
- link_down3 = True
- loop_count = 0
- while( (link_down1 and link_down2 and link_down3)\
- and loop_count < 30 ):
- json_str1 = main.ONOS1cli.links()
- json_str2 = main.ONOS2cli.links()
- json_str3 = main.ONOS3cli.links()
- if not (json_str1 and json_str2 and json_str3):
- main.log.error("CLI command returned error ")
+ timestampLinkUpT0 = time.time() * 1000
+ main.Mininet1.handle.sendline( "sh tc qdisc del dev " +
+ "s1-eth1 root" )
+ main.Mininet1.handle.expect( "mininet>" )
+
+ main.log.info( "Checking ONOS for link update" )
+
+ linkDown1 = True
+ linkDown2 = True
+ linkDown3 = True
+ loopCount = 0
+ while( ( linkDown1 and linkDown2 and linkDown3 )
+ and loopCount < 30 ):
+ jsonStr1 = main.ONOS1cli.links()
+ jsonStr2 = main.ONOS2cli.links()
+ jsonStr3 = main.ONOS3cli.links()
+ if not ( jsonStr1 and jsonStr2 and jsonStr3 ):
+ main.log.error( "CLI command returned error " )
break
else:
- json_obj1 = json.loads(json_str1)
- json_obj2 = json.loads(json_str2)
- json_obj3 = json.loads(json_str3)
-
- for obj1 in json_obj1:
- if '01' in obj1['src']['device']:
- link_down1 = False
- main.log.info("Link up from "+
- "s1 -> s2 on ONOS1 detected")
- for obj2 in json_obj2:
- if '01' in obj2['src']['device']:
- link_down2 = False
- main.log.info("Link up from "+
- "s1 -> s2 on ONOS2 detected")
- for obj3 in json_obj3:
- if '01' in obj3['src']['device']:
- link_down3 = False
- main.log.info("Link up from "+
- "s1 -> s2 on ONOS3 detected")
-
- loop_count += 1
- time.sleep(1)
-
- if (link_down1 and link_down2 and link_down3):
- main.log.info("Link up discovery failed")
-
- link_up_lat_graph1 = 0
- link_up_lat_graph2 = 0
- link_up_lat_graph3 = 0
- link_up_lat_device1 = 0
- link_up_lat_device2 = 0
- link_up_lat_device3 = 0
-
+ jsonObj1 = json.loads( jsonStr1 )
+ jsonObj2 = json.loads( jsonStr2 )
+ jsonObj3 = json.loads( jsonStr3 )
+
+ for obj1 in jsonObj1:
+ if '01' in obj1[ 'src' ][ 'device' ]:
+ linkDown1 = False
+ main.log.info( "Link up from " +
+ "s1 -> s2 on ONOS1 detected" )
+ for obj2 in jsonObj2:
+ if '01' in obj2[ 'src' ][ 'device' ]:
+ linkDown2 = False
+ main.log.info( "Link up from " +
+ "s1 -> s2 on ONOS2 detected" )
+ for obj3 in jsonObj3:
+ if '01' in obj3[ 'src' ][ 'device' ]:
+ linkDown3 = False
+ main.log.info( "Link up from " +
+ "s1 -> s2 on ONOS3 detected" )
+
+ loopCount += 1
+ time.sleep( 1 )
+
+ if ( linkDown1 and linkDown2 and linkDown3 ):
+ main.log.info( "Link up discovery failed" )
+
+ linkUpLatGraph1 = 0
+ linkUpLatGraph2 = 0
+ linkUpLatGraph3 = 0
+ linkUpLatDevice1 = 0
+ linkUpLatDevice2 = 0
+ linkUpLatDevice3 = 0
+
assertion = main.FALSE
else:
- json_topo_metrics_1 =\
- main.ONOS1cli.topology_events_metrics()
- json_topo_metrics_2 =\
- main.ONOS2cli.topology_events_metrics()
- json_topo_metrics_3 =\
- main.ONOS3cli.topology_events_metrics()
- json_topo_metrics_1 = json.loads(json_topo_metrics_1)
- json_topo_metrics_2 = json.loads(json_topo_metrics_2)
- json_topo_metrics_3 = json.loads(json_topo_metrics_3)
+ jsonTopoMetrics1 =\
+ main.ONOS1cli.topologyEventsMetrics()
+ jsonTopoMetrics2 =\
+ main.ONOS2cli.topologyEventsMetrics()
+ jsonTopoMetrics3 =\
+ main.ONOS3cli.topologyEventsMetrics()
+ jsonTopoMetrics1 = json.loads( jsonTopoMetrics1 )
+ jsonTopoMetrics2 = json.loads( jsonTopoMetrics2 )
+ jsonTopoMetrics3 = json.loads( jsonTopoMetrics3 )
- main.log.info("Obtaining graph and device timestamp")
- graph_timestamp_1 = \
- json_topo_metrics_1[graphTimestamp]['value']
- graph_timestamp_2 = \
- json_topo_metrics_2[graphTimestamp]['value']
- graph_timestamp_3 = \
- json_topo_metrics_3[graphTimestamp]['value']
+ main.log.info( "Obtaining graph and device timestamp" )
+ graphTimestamp1 = \
+ jsonTopoMetrics1[ graphTimestamp ][ 'value' ]
+ graphTimestamp2 = \
+ jsonTopoMetrics2[ graphTimestamp ][ 'value' ]
+ graphTimestamp3 = \
+ jsonTopoMetrics3[ graphTimestamp ][ 'value' ]
- link_timestamp_1 = \
- json_topo_metrics_1[linkTimestamp]['value']
- link_timestamp_2 = \
- json_topo_metrics_2[linkTimestamp]['value']
- link_timestamp_3 = \
- json_topo_metrics_3[linkTimestamp]['value']
+ linkTimestamp1 = \
+ jsonTopoMetrics1[ linkTimestamp ][ 'value' ]
+ linkTimestamp2 = \
+ jsonTopoMetrics2[ linkTimestamp ][ 'value' ]
+ linkTimestamp3 = \
+ jsonTopoMetrics3[ linkTimestamp ][ 'value' ]
- if graph_timestamp_1 and graph_timestamp_2 and\
- graph_timestamp_3 and link_timestamp_1 and\
- link_timestamp_2 and link_timestamp_3:
- link_up_lat_graph1 = int(graph_timestamp_1) -\
- int(timestamp_link_up_t0)
- link_up_lat_graph2 = int(graph_timestamp_2) -\
- int(timestamp_link_up_t0)
- link_up_lat_graph3 = int(graph_timestamp_3) -\
- int(timestamp_link_up_t0)
-
- link_up_lat_link1 = int(link_timestamp_1) -\
- int(timestamp_link_up_t0)
- link_up_lat_link2 = int(link_timestamp_2) -\
- int(timestamp_link_up_t0)
- link_up_lat_link3 = int(link_timestamp_3) -\
- int(timestamp_link_up_t0)
+ if graphTimestamp1 and graphTimestamp2 and\
+ graphTimestamp3 and linkTimestamp1 and\
+ linkTimestamp2 and linkTimestamp3:
+ linkUpLatGraph1 = int( graphTimestamp1 ) -\
+ int( timestampLinkUpT0 )
+ linkUpLatGraph2 = int( graphTimestamp2 ) -\
+ int( timestampLinkUpT0 )
+ linkUpLatGraph3 = int( graphTimestamp3 ) -\
+ int( timestampLinkUpT0 )
+
+ linkUpLatLink1 = int( linkTimestamp1 ) -\
+ int( timestampLinkUpT0 )
+ linkUpLatLink2 = int( linkTimestamp2 ) -\
+ int( timestampLinkUpT0 )
+ linkUpLatLink3 = int( linkTimestamp3 ) -\
+ int( timestampLinkUpT0 )
else:
- main.log.error("There was an error calculating"+
- " the delta for link down event")
- link_up_lat_graph1 = 0
- link_up_lat_graph2 = 0
- link_up_lat_graph3 = 0
-
- link_up_lat_device1 = 0
- link_up_lat_device2 = 0
- link_up_lat_device3 = 0
-
- if debug_mode == 'on':
- main.log.info("Link up latency ONOS1 iteration "+
- str(i)+" (end-to-end): "+
- str(link_up_lat_graph1)+" ms")
- main.log.info("Link up latency ONOS2 iteration "+
- str(i)+" (end-to-end): "+
- str(link_up_lat_graph2)+" ms")
- main.log.info("Link up latency ONOS3 iteration "+
- str(i)+" (end-to-end): "+
- str(link_up_lat_graph3)+" ms")
-
- main.log.info("Link up latency ONOS1 iteration "+
- str(i)+" (link-event-to-system-timestamp): "+
- str(link_up_lat_link1)+" ms")
- main.log.info("Link up latency ONOS2 iteration "+
- str(i)+" (link-event-to-system-timestamp): "+
- str(link_up_lat_link2)+" ms")
- main.log.info("Link up latency ONOS3 iteration "+
- str(i)+" (link-event-to-system-timestamp): "+
- str(link_up_lat_link3))
-
- #Calculate avg of node calculations
- link_up_lat_graph_avg =\
- (link_up_lat_graph1 +
- link_up_lat_graph2 +
- link_up_lat_graph3) / 3
- link_up_lat_link_avg =\
- (link_up_lat_link1 +
- link_up_lat_link2 +
- link_up_lat_link3) / 3
+ main.log.error( "There was an error calculating" +
+ " the delta for link down event" )
+ linkUpLatGraph1 = 0
+ linkUpLatGraph2 = 0
+ linkUpLatGraph3 = 0
- #Set threshold and append latency to list
- if link_up_lat_graph_avg > up_threshold_min and\
- link_up_lat_graph_avg < up_threshold_max:
- link_up_graph_to_system_list.append(
- link_up_lat_graph_avg)
+ linkUpLatDevice1 = 0
+ linkUpLatDevice2 = 0
+ linkUpLatDevice3 = 0
+
+ if debugMode == 'on':
+ main.log.info( "Link up latency ONOS1 iteration " +
+ str( i ) + " (end-to-end): " +
+ str( linkUpLatGraph1 ) + " ms" )
+ main.log.info( "Link up latency ONOS2 iteration " +
+ str( i ) + " (end-to-end): " +
+ str( linkUpLatGraph2 ) + " ms" )
+ main.log.info( "Link up latency ONOS3 iteration " +
+ str( i ) + " (end-to-end): " +
+ str( linkUpLatGraph3 ) + " ms" )
+
+ main.log.info(
+ "Link up latency ONOS1 iteration " +
+ str( i ) +
+ " (link-event-to-system-timestamp): " +
+ str( linkUpLatLink1 ) +
+ " ms" )
+ main.log.info(
+ "Link up latency ONOS2 iteration " +
+ str( i ) +
+ " (link-event-to-system-timestamp): " +
+ str( linkUpLatLink2 ) +
+ " ms" )
+ main.log.info(
+ "Link up latency ONOS3 iteration " +
+ str( i ) +
+ " (link-event-to-system-timestamp): " +
+ str( linkUpLatLink3 ) )
+
+ # Calculate avg of node calculations
+ linkUpLatGraphAvg =\
+ ( linkUpLatGraph1 +
+ linkUpLatGraph2 +
+ linkUpLatGraph3 ) / 3
+ linkUpLatLinkAvg =\
+ ( linkUpLatLink1 +
+ linkUpLatLink2 +
+ linkUpLatLink3 ) / 3
+
+ # Set threshold and append latency to list
+ if linkUpLatGraphAvg > upThresholdMin and\
+ linkUpLatGraphAvg < upThresholdMax:
+ linkUpGraphToSystemList.append(
+ linkUpLatGraphAvg )
else:
- main.log.info("Link up latency exceeded threshold")
- main.log.info("Results for iteration "+str(i)+
- "have been omitted")
- if link_up_lat_link_avg > up_threshold_min and\
- link_up_lat_link_avg < up_threshold_max:
- link_up_link_to_system_list.append(
- link_up_lat_link_avg)
+ main.log.info( "Link up latency exceeded threshold" )
+ main.log.info( "Results for iteration " + str( i ) +
+ "have been omitted" )
+ if linkUpLatLinkAvg > upThresholdMin and\
+ linkUpLatLinkAvg < upThresholdMax:
+ linkUpLinkToSystemList.append(
+ linkUpLatLinkAvg )
else:
- main.log.info("Link up latency exceeded threshold")
- main.log.info("Results for iteration "+str(i)+
- "have been omitted")
+ main.log.info( "Link up latency exceeded threshold" )
+ main.log.info( "Results for iteration " + str( i ) +
+ "have been omitted" )
- #Calculate min, max, avg of list and report
- link_down_min = min(link_down_graph_to_system_list)
- link_down_max = max(link_down_graph_to_system_list)
- link_down_avg = sum(link_down_graph_to_system_list) / \
- len(link_down_graph_to_system_list)
- link_up_min = min(link_up_graph_to_system_list)
- link_up_max = max(link_up_graph_to_system_list)
- link_up_avg = sum(link_up_graph_to_system_list) / \
- len(link_up_graph_to_system_list)
- link_down_std_dev = \
- str(round(numpy.std(link_down_graph_to_system_list),1))
- link_up_std_dev = \
- str(round(numpy.std(link_up_graph_to_system_list),1))
+ # Calculate min, max, avg of list and report
+ linkDownMin = min( linkDownGraphToSystemList )
+ linkDownMax = max( linkDownGraphToSystemList )
+ linkDownAvg = sum( linkDownGraphToSystemList ) / \
+ len( linkDownGraphToSystemList )
+ linkUpMin = min( linkUpGraphToSystemList )
+ linkUpMax = max( linkUpGraphToSystemList )
+ linkUpAvg = sum( linkUpGraphToSystemList ) / \
+ len( linkUpGraphToSystemList )
+ linkDownStdDev = \
+ str( round( numpy.std( linkDownGraphToSystemList ), 1 ) )
+ linkUpStdDev = \
+ str( round( numpy.std( linkUpGraphToSystemList ), 1 ) )
- main.log.report("Link down latency " +
- "Avg: "+str(link_down_avg)+" ms "+
- "Std Deviation: "+link_down_std_dev+" ms")
- main.log.report("Link up latency "+
- "Avg: "+str(link_up_avg)+" ms "+
- "Std Deviation: "+link_up_std_dev+" ms")
+ main.log.report( "Link down latency " +
+ "Avg: " + str( linkDownAvg ) + " ms " +
+ "Std Deviation: " + linkDownStdDev + " ms" )
+ main.log.report( "Link up latency " +
+ "Avg: " + str( linkUpAvg ) + " ms " +
+ "Std Deviation: " + linkUpStdDev + " ms" )
- utilities.assert_equals(expect=main.TRUE, actual=assertion,
- onpass="Link discovery latency calculation successful",
- onfail="Link discovery latency case failed")
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=assertion,
+ onpass="Link discovery latency calculation successful",
+ onfail="Link discovery latency case failed" )
- def CASE5(self, main):
- '''
+ def CASE5( self, main ):
+ """
100 Switch discovery latency
Important:
- This test case can be potentially dangerous if
+ This test case can be potentially dangerous if
your machine has previously set iptables rules.
One of the steps of the test case will flush
all existing iptables rules.
Note:
- You can specify the number of switches in the
+ You can specify the number of switches in the
params file to adjust the switch discovery size
- (and specify the corresponding topology in Mininet1
- .topo file)
- '''
+ ( and specify the corresponding topology in Mininet1
+ .topo file )
+ """
import time
import subprocess
import os
import requests
import json
- ONOS1_ip = main.params['CTRL']['ip1']
- ONOS2_ip = main.params['CTRL']['ip2']
- ONOS3_ip = main.params['CTRL']['ip3']
- MN1_ip = main.params['MN']['ip1']
- ONOS_user = main.params['CTRL']['user']
+ ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
+ ONOS2Ip = main.params[ 'CTRL' ][ 'ip2' ]
+ ONOS3Ip = main.params[ 'CTRL' ][ 'ip3' ]
+ MN1Ip = main.params[ 'MN' ][ 'ip1' ]
+ ONOSUser = main.params[ 'CTRL' ][ 'user' ]
- default_sw_port = main.params['CTRL']['port1']
-
- #Number of iterations of case
- num_iter = main.params['TEST']['numIter']
- num_sw = main.params['TEST']['numSwitch']
+ defaultSwPort = main.params[ 'CTRL' ][ 'port1' ]
- #Timestamp 'keys' for json metrics output.
- #These are subject to change, hence moved into params
- deviceTimestamp = main.params['JSON']['deviceTimestamp']
- graphTimestamp = main.params['JSON']['graphTimestamp']
-
- debug_mode = main.params['TEST']['debugMode']
+ # Number of iterations of case
+ numIter = main.params[ 'TEST' ][ 'numIter' ]
+ numSw = main.params[ 'TEST' ][ 'numSwitch' ]
- local_time = time.strftime('%X')
- local_time = local_time.replace("/","")
- local_time = local_time.replace(" ","_")
- local_time = local_time.replace(":","")
- if debug_mode == 'on':
- main.ONOS1.tshark_pcap("eth0",
- "/tmp/100_sw_lat_pcap_"+local_time)
-
- #Threshold for this test case
- sw_disc_threshold_str = main.params['TEST']['swDisc100Threshold']
- sw_disc_threshold_obj = sw_disc_threshold_str.split(",")
- sw_disc_threshold_min = int(sw_disc_threshold_obj[0])
- sw_disc_threshold_max = int(sw_disc_threshold_obj[1])
+ # Timestamp 'keys' for json metrics output.
+ # These are subject to change, hence moved into params
+ deviceTimestamp = main.params[ 'JSON' ][ 'deviceTimestamp' ]
+ graphTimestamp = main.params[ 'JSON' ][ 'graphTimestamp' ]
- tshark_ofp_output = "/tmp/tshark_ofp_"+num_sw+"sw.txt"
- tshark_tcp_output = "/tmp/tshark_tcp_"+num_sw+"sw.txt"
+ debugMode = main.params[ 'TEST' ][ 'debugMode' ]
- tshark_ofp_result_list = []
- tshark_tcp_result_list = []
+ localTime = time.strftime( '%X' )
+ localTime = localTime.replace( "/", "" )
+ localTime = localTime.replace( " ", "_" )
+ localTime = localTime.replace( ":", "" )
+ if debugMode == 'on':
+ main.ONOS1.tsharkPcap( "eth0",
+ "/tmp/100_sw_lat_pcap_" + localTime )
- sw_discovery_lat_list = []
+ # Threshold for this test case
+ swDiscThresholdStr = main.params[ 'TEST' ][ 'swDisc100Threshold' ]
+ swDiscThresholdObj = swDiscThresholdStr.split( "," )
+ swDiscThresholdMin = int( swDiscThresholdObj[ 0 ] )
+ swDiscThresholdMax = int( swDiscThresholdObj[ 1 ] )
- main.case(num_sw+" Switch discovery latency")
- main.step("Assigning all switches to ONOS1")
- for i in range(1, int(num_sw)+1):
- main.Mininet1.assign_sw_controller(
- sw=str(i),
- ip1=ONOS1_ip,
- port1=default_sw_port)
-
- #Ensure that nodes are configured with ptpd
- #Just a warning message
- main.log.info("Please check ptpd configuration to ensure"+\
- " All nodes' system times are in sync")
- time.sleep(5)
+ tsharkOfpOutput = "/tmp/tshark_ofp_" + numSw + "sw.txt"
+ tsharkTcpOutput = "/tmp/tshark_tcp_" + numSw + "sw.txt"
- for i in range(0, int(num_iter)):
-
- main.step("Set iptables rule to block incoming sw connections")
- #Set iptables rule to block incoming switch connections
- #The rule description is as follows:
+ tsharkOfpResultList = []
+ tsharkTcpResultList = []
+
+ swDiscoveryLatList = []
+
+ main.case( numSw + " Switch discovery latency" )
+ main.step( "Assigning all switches to ONOS1" )
+ for i in range( 1, int( numSw ) + 1 ):
+ main.Mininet1.assignSwController(
+ sw=str( i ),
+ ip1=ONOS1Ip,
+ port1=defaultSwPort )
+
+ # Ensure that nodes are configured with ptpd
+ # Just a warning message
+ main.log.info( "Please check ptpd configuration to ensure" +
+ " All nodes' system times are in sync" )
+ time.sleep( 5 )
+
+ for i in range( 0, int( numIter ) ):
+
+ main.step( "Set iptables rule to block incoming sw connections" )
+ # Set iptables rule to block incoming switch connections
+ # The rule description is as follows:
# Append to INPUT rule,
# behavior DROP that matches following:
# * packet type: tcp
- # * source IP: MN1_ip
+ # * source IP: MN1Ip
# * destination PORT: 6633
main.ONOS1.handle.sendline(
- "sudo iptables -A INPUT -p tcp -s "+MN1_ip+
- " --dport "+default_sw_port+" -j DROP")
- main.ONOS1.handle.expect("\$")
- # Append to OUTPUT rule,
+ "sudo iptables -A INPUT -p tcp -s " + MN1Ip +
+ " --dport " + defaultSwPort + " -j DROP" )
+ main.ONOS1.handle.expect( "\$" )
+ # Append to OUTPUT rule,
# behavior DROP that matches following:
# * packet type: tcp
- # * source IP: MN1_ip
+ # * source IP: MN1Ip
# * destination PORT: 6633
main.ONOS1.handle.sendline(
- "sudo iptables -A OUTPUT -p tcp -s "+MN1_ip+
- " --dport "+default_sw_port+" -j DROP")
- main.ONOS1.handle.expect("\$")
- #Give time to allow rule to take effect
- #NOTE: Sleep period may need to be configured
+ "sudo iptables -A OUTPUT -p tcp -s " + MN1Ip +
+ " --dport " + defaultSwPort + " -j DROP" )
+ main.ONOS1.handle.expect( "\$" )
+ # Give time to allow rule to take effect
+ # NOTE: Sleep period may need to be configured
# based on the number of switches in the topology
- main.log.info("Please wait for switch connection to "+
- "time out")
- time.sleep(60)
-
- #Gather vendor OFP with tshark
- main.ONOS1.tshark_grep("OFP 86 Vendor",
- tshark_ofp_output)
- main.ONOS1.tshark_grep("TCP 74 ",
- tshark_tcp_output)
+ main.log.info( "Please wait for switch connection to " +
+ "time out" )
+ time.sleep( 60 )
- #NOTE: Remove all iptables rule quickly (flush)
- # Before removal, obtain TestON timestamp at which
+ # Gather vendor OFP with tshark
+ main.ONOS1.tsharkGrep( "OFP 86 Vendor",
+ tsharkOfpOutput )
+ main.ONOS1.tsharkGrep( "TCP 74 ",
+ tsharkTcpOutput )
+
+ # NOTE: Remove all iptables rule quickly ( flush )
+ # Before removal, obtain TestON timestamp at which
# removal took place
- # (ensuring nodes are configured via ptp)
+ # ( ensuring nodes are configured via ptp )
# sudo iptables -F
-
- t0_system = time.time() * 1000
+
+ t0System = time.time() * 1000
main.ONOS1.handle.sendline(
- "sudo iptables -F")
+ "sudo iptables -F" )
- #Counter to track loop count
- counter_loop = 0
- counter_avail1 = 0
- counter_avail2 = 0
- counter_avail3 = 0
- onos1_dev = False
- onos2_dev = False
- onos3_dev = False
- while counter_loop < 60:
- #Continue to check devices for all device
- #availability. When all devices in all 3
- #ONOS instances indicate that devices are available
- #obtain graph event timestamp for t1.
- device_str_obj1 = main.ONOS1cli.devices()
- device_str_obj2 = main.ONOS2cli.devices()
- device_str_obj3 = main.ONOS3cli.devices()
+ # Counter to track loop count
+ counterLoop = 0
+ counterAvail1 = 0
+ counterAvail2 = 0
+ counterAvail3 = 0
+ onos1Dev = False
+ onos2Dev = False
+ onos3Dev = False
+ while counterLoop < 60:
+ # Continue to check devices for all device
+ # availability. When all devices in all 3
+ # ONOS instances indicate that devices are available
+ # obtain graph event timestamp for t1.
+ deviceStrObj1 = main.ONOS1cli.devices()
+ deviceStrObj2 = main.ONOS2cli.devices()
+ deviceStrObj3 = main.ONOS3cli.devices()
- device_json1 = json.loads(device_str_obj1)
- device_json2 = json.loads(device_str_obj2)
- device_json3 = json.loads(device_str_obj3)
-
- for device1 in device_json1:
- if device1['available'] == True:
- counter_avail1 += 1
- if counter_avail1 == int(num_sw):
- onos1_dev = True
- main.log.info("All devices have been "+
- "discovered on ONOS1")
+ deviceJson1 = json.loads( deviceStrObj1 )
+ deviceJson2 = json.loads( deviceStrObj2 )
+ deviceJson3 = json.loads( deviceStrObj3 )
+
+ for device1 in deviceJson1:
+ if device1[ 'available' ]:
+ counterAvail1 += 1
+ if counterAvail1 == int( numSw ):
+ onos1Dev = True
+ main.log.info( "All devices have been " +
+ "discovered on ONOS1" )
else:
- counter_avail1 = 0
- for device2 in device_json2:
- if device2['available'] == True:
- counter_avail2 += 1
- if counter_avail2 == int(num_sw):
- onos2_dev = True
- main.log.info("All devices have been "+
- "discovered on ONOS2")
+ counterAvail1 = 0
+ for device2 in deviceJson2:
+ if device2[ 'available' ]:
+ counterAvail2 += 1
+ if counterAvail2 == int( numSw ):
+ onos2Dev = True
+ main.log.info( "All devices have been " +
+ "discovered on ONOS2" )
else:
- counter_avail2 = 0
- for device3 in device_json3:
- if device3['available'] == True:
- counter_avail3 += 1
- if counter_avail3 == int(num_sw):
- onos3_dev = True
- main.log.info("All devices have been "+
- "discovered on ONOS3")
+ counterAvail2 = 0
+ for device3 in deviceJson3:
+ if device3[ 'available' ]:
+ counterAvail3 += 1
+ if counterAvail3 == int( numSw ):
+ onos3Dev = True
+ main.log.info( "All devices have been " +
+ "discovered on ONOS3" )
else:
- counter_avail3 = 0
+ counterAvail3 = 0
- if onos1_dev and onos2_dev and onos3_dev:
- main.log.info("All devices have been discovered "+
- "on all ONOS instances")
- json_str_topology_metrics_1 =\
- main.ONOS1cli.topology_events_metrics()
- json_str_topology_metrics_2 =\
- main.ONOS2cli.topology_events_metrics()
- json_str_topology_metrics_3 =\
- main.ONOS3cli.topology_events_metrics()
-
- #Exit while loop if all devices discovered
- break
-
- counter_loop += 1
- #Give some time in between CLI calls
- #(will not affect measurement)
- time.sleep(3)
+ if onos1Dev and onos2Dev and onos3Dev:
+ main.log.info( "All devices have been discovered " +
+ "on all ONOS instances" )
+ jsonStrTopologyMetrics1 =\
+ main.ONOS1cli.topologyEventsMetrics()
+ jsonStrTopologyMetrics2 =\
+ main.ONOS2cli.topologyEventsMetrics()
+ jsonStrTopologyMetrics3 =\
+ main.ONOS3cli.topologyEventsMetrics()
- main.ONOS1.tshark_stop()
-
- os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
- tshark_ofp_output+" /tmp/")
- os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
- tshark_tcp_output+" /tmp/")
+ # Exit while loop if all devices discovered
+ break
- #TODO: Automate OFP output analysis
- #Debug mode - print out packets captured at runtime
- if debug_mode == 'on':
- ofp_file = open(tshark_ofp_output, 'r')
- main.log.info("Tshark OFP Vendor output: ")
- for line in ofp_file:
- tshark_ofp_result_list.append(line)
- main.log.info(line)
- ofp_file.close()
+ counterLoop += 1
+ # Give some time in between CLI calls
+ #( will not affect measurement )
+ time.sleep( 3 )
- tcp_file = open(tshark_tcp_output, 'r')
- main.log.info("Tshark TCP 74 output: ")
- for line in tcp_file:
- tshark_tcp_result_list.append(line)
- main.log.info(line)
- tcp_file.close()
+ main.ONOS1.tsharkStop()
- json_obj_1 = json.loads(json_str_topology_metrics_1)
- json_obj_2 = json.loads(json_str_topology_metrics_2)
- json_obj_3 = json.loads(json_str_topology_metrics_3)
+ os.system( "scp " + ONOSUser + "@" + ONOS1Ip + ":" +
+ tsharkOfpOutput + " /tmp/" )
+ os.system( "scp " + ONOSUser + "@" + ONOS1Ip + ":" +
+ tsharkTcpOutput + " /tmp/" )
- graph_timestamp_1 = \
- json_obj_1[graphTimestamp]['value']
- graph_timestamp_2 = \
- json_obj_2[graphTimestamp]['value']
- graph_timestamp_3 = \
- json_obj_3[graphTimestamp]['value']
+ # TODO: Automate OFP output analysis
+ # Debug mode - print out packets captured at runtime
+ if debugMode == 'on':
+ ofpFile = open( tsharkOfpOutput, 'r' )
+ main.log.info( "Tshark OFP Vendor output: " )
+ for line in ofpFile:
+ tsharkOfpResultList.append( line )
+ main.log.info( line )
+ ofpFile.close()
- graph_lat_1 = int(graph_timestamp_1) - int(t0_system)
- graph_lat_2 = int(graph_timestamp_2) - int(t0_system)
- graph_lat_3 = int(graph_timestamp_3) - int(t0_system)
+ tcpFile = open( tsharkTcpOutput, 'r' )
+ main.log.info( "Tshark TCP 74 output: " )
+ for line in tcpFile:
+ tsharkTcpResultList.append( line )
+ main.log.info( line )
+ tcpFile.close()
- avg_graph_lat = \
- (int(graph_lat_1) +\
- int(graph_lat_2) +\
- int(graph_lat_3)) / 3
-
- if avg_graph_lat > sw_disc_threshold_min \
- and avg_graph_lat < sw_disc_threshold_max:
- sw_discovery_lat_list.append(
- avg_graph_lat)
+ jsonObj1 = json.loads( jsonStrTopologyMetrics1 )
+ jsonObj2 = json.loads( jsonStrTopologyMetrics2 )
+ jsonObj3 = json.loads( jsonStrTopologyMetrics3 )
+
+ graphTimestamp1 = \
+ jsonObj1[ graphTimestamp ][ 'value' ]
+ graphTimestamp2 = \
+ jsonObj2[ graphTimestamp ][ 'value' ]
+ graphTimestamp3 = \
+ jsonObj3[ graphTimestamp ][ 'value' ]
+
+ graphLat1 = int( graphTimestamp1 ) - int( t0System )
+ graphLat2 = int( graphTimestamp2 ) - int( t0System )
+ graphLat3 = int( graphTimestamp3 ) - int( t0System )
+
+ avgGraphLat = \
+ ( int( graphLat1 ) +
+ int( graphLat2 ) +
+ int( graphLat3 ) ) / 3
+
+ if avgGraphLat > swDiscThresholdMin \
+ and avgGraphLat < swDiscThresholdMax:
+ swDiscoveryLatList.append(
+ avgGraphLat )
else:
- main.log.info("100 Switch discovery latency "+
- "exceeded the threshold.")
-
- #END ITERATION FOR LOOP
+ main.log.info( "100 Switch discovery latency " +
+ "exceeded the threshold." )
- sw_lat_min = min(sw_discovery_lat_list)
- sw_lat_max = max(sw_discovery_lat_list)
- sw_lat_avg = sum(sw_discovery_lat_list) /\
- len(sw_discovery_lat_list)
+ # END ITERATION FOR LOOP
- main.log.report("100 Switch discovery lat "+\
- "Min: "+str(sw_lat_min)+" ms"+\
- "Max: "+str(sw_lat_max)+" ms"+\
- "Avg: "+str(sw_lat_avg)+" ms")
+ swLatMin = min( swDiscoveryLatList )
+ swLatMax = max( swDiscoveryLatList )
+ swLatAvg = sum( swDiscoveryLatList ) /\
+ len( swDiscoveryLatList )
- def CASE6(self, main):
- '''
+ main.log.report( "100 Switch discovery lat " +
+ "Min: " + str( swLatMin ) + " ms" +
+ "Max: " + str( swLatMax ) + " ms" +
+ "Avg: " + str( swLatAvg ) + " ms" )
+
+ def CASE6( self, main ):
+ """
Increase number of nodes and initiate CLI
- '''
+ """
import time
-
- ONOS1_ip = main.params['CTRL']['ip1']
- ONOS2_ip = main.params['CTRL']['ip2']
- ONOS3_ip = main.params['CTRL']['ip3']
- ONOS4_ip = main.params['CTRL']['ip4']
- ONOS5_ip = main.params['CTRL']['ip5']
- ONOS6_ip = main.params['CTRL']['ip6']
- ONOS7_ip = main.params['CTRL']['ip7']
- cell_name = main.params['ENV']['cellName']
-
- global cluster_count
-
- #Cluster size increased everytime the case is defined
- cluster_count += 2
+ ONOS1Ip = main.params[ 'CTRL' ][ 'ip1' ]
+ ONOS2Ip = main.params[ 'CTRL' ][ 'ip2' ]
+ ONOS3Ip = main.params[ 'CTRL' ][ 'ip3' ]
+ ONOS4Ip = main.params[ 'CTRL' ][ 'ip4' ]
+ ONOS5Ip = main.params[ 'CTRL' ][ 'ip5' ]
+ ONOS6Ip = main.params[ 'CTRL' ][ 'ip6' ]
+ ONOS7Ip = main.params[ 'CTRL' ][ 'ip7' ]
- main.log.report("Increasing cluster size to "+
- str(cluster_count))
+ cellName = main.params[ 'ENV' ][ 'cellName' ]
- install_result = main.FALSE
- if cluster_count == 3:
- main.log.info("Installing nodes 2 and 3")
- node2_result = \
- main.ONOSbench.onos_install(node=ONOS2_ip)
- node3_result = \
- main.ONOSbench.onos_install(node=ONOS3_ip)
- install_result = node2_result and node3_result
-
- time.sleep(5)
+ global clusterCount
- main.ONOS2cli.start_onos_cli(ONOS2_ip)
- main.ONOS3cli.start_onos_cli(ONOS3_ip)
+ # Cluster size increased everytime the case is defined
+ clusterCount += 2
- elif cluster_count == 5:
- main.log.info("Installing nodes 4 and 5")
- node4_result = \
- main.ONOSbench.onos_install(node=ONOS4_ip)
- node5_result = \
- main.ONOSbench.onos_install(node=ONOS5_ip)
- install_result = node4_result and node5_result
+ main.log.report( "Increasing cluster size to " +
+ str( clusterCount ) )
- time.sleep(5)
+ installResult = main.FALSE
+ if clusterCount == 3:
+ main.log.info( "Installing nodes 2 and 3" )
+ node2Result = \
+ main.ONOSbench.onosInstall( node=ONOS2Ip )
+ node3Result = \
+ main.ONOSbench.onosInstall( node=ONOS3Ip )
+ installResult = node2Result and node3Result
- main.ONOS4cli.start_onos_cli(ONOS4_ip)
- main.ONOS5cli.start_onos_cli(ONOS5_ip)
+ time.sleep( 5 )
- elif cluster_count == 7:
- main.log.info("Installing nodes 4 and 5")
- node6_result = \
- main.ONOSbench.onos_install(node=ONOS6_ip)
- node7_result = \
- main.ONOSbench.onos_install(node=ONOS7_ip)
- install_result = node6_result and node7_result
+ main.ONOS2cli.startOnosCli( ONOS2Ip )
+ main.ONOS3cli.startOnosCli( ONOS3Ip )
- time.sleep(5)
+ elif clusterCount == 5:
+ main.log.info( "Installing nodes 4 and 5" )
+ node4Result = \
+ main.ONOSbench.onosInstall( node=ONOS4Ip )
+ node5Result = \
+ main.ONOSbench.onosInstall( node=ONOS5Ip )
+ installResult = node4Result and node5Result
- main.ONOS6cli.start_onos_cli(ONOS6_ip)
- main.ONOS7cli.start_onos_cli(ONOS7_ip)
+ time.sleep( 5 )
+ main.ONOS4cli.startOnosCli( ONOS4Ip )
+ main.ONOS5cli.startOnosCli( ONOS5Ip )
+ elif clusterCount == 7:
+ main.log.info( "Installing nodes 4 and 5" )
+ node6Result = \
+ main.ONOSbench.onosInstall( node=ONOS6Ip )
+ node7Result = \
+ main.ONOSbench.onosInstall( node=ONOS7Ip )
+ installResult = node6Result and node7Result
+ time.sleep( 5 )
+
+ main.ONOS6cli.startOnosCli( ONOS6Ip )
+ main.ONOS7cli.startOnosCli( ONOS7Ip )