WIP performance tests
diff --git a/TestON/tests/IntentPerfNext/Dependency/IntentClass.py b/TestON/tests/IntentPerfNext/Dependency/IntentClass.py
new file mode 100644
index 0000000..f5b17c2
--- /dev/null
+++ b/TestON/tests/IntentPerfNext/Dependency/IntentClass.py
@@ -0,0 +1,56 @@
+
+def __init__(self):
+    self_ = self
+
+def printLog(main):
+    main.log.info("Print log success")
+
+def iptablesDropAllNodes(main, MN_ip, sw_port):
+    #INPUT RULES 
+    main.ONOS1.handle.sendline(
+        "sudo iptables -A INPUT -p tcp -s "+
+        MN_ip+" --dport "+sw_port+" -j DROP")
+    main.ONOS2.handle.sendline(
+        "sudo iptables -A INPUT -p tcp -s "+
+        MN_ip+" --dport "+sw_port+" -j DROP")
+    main.ONOS3.handle.sendline(
+        "sudo iptables -A INPUT -p tcp -s "+
+        MN_ip+" --dport "+sw_port+" -j DROP")
+    main.ONOS4.handle.sendline(
+        "sudo iptables -A INPUT -p tcp -s "+
+        MN_ip+" --dport "+sw_port+" -j DROP")
+    main.ONOS5.handle.sendline(
+        "sudo iptables -A INPUT -p tcp -s "+
+        MN_ip+" --dport "+sw_port+" -j DROP")
+    main.ONOS6.handle.sendline(
+        "sudo iptables -A INPUT -p tcp -s "+
+        MN_ip+" --dport "+sw_port+" -j DROP")
+    main.ONOS7.handle.sendline(
+        "sudo iptables -A INPUT -p tcp -s "+
+        MN_ip+" --dport "+sw_port+" -j DROP")
+
+    main.ONOS1.handle.sendline(
+        "sudo iptables -A OUTPUT -p tcp -s "+
+        MN_ip+" --dport "+sw_port+" -j DROP")
+    main.ONOS2.handle.sendline(
+        "sudo iptables -A OUTPUT -p tcp -s "+
+        MN_ip+" --dport "+sw_port+" -j DROP")
+    main.ONOS3.handle.sendline(
+        "sudo iptables -A OUTPUT -p tcp -s "+
+        MN_ip+" --dport "+sw_port+" -j DROP")
+    main.ONOS4.handle.sendline(
+        "sudo iptables -A OUTPUT -p tcp -s "+
+        MN_ip+" --dport "+sw_port+" -j DROP")
+    main.ONOS5.handle.sendline(
+        "sudo iptables -A OUTPUT -p tcp -s "+
+        MN_ip+" --dport "+sw_port+" -j DROP")
+    main.ONOS6.handle.sendline(
+        "sudo iptables -A OUTPUT -p tcp -s "+
+        MN_ip+" --dport "+sw_port+" -j DROP")
+    main.ONOS7.handle.sendline(
+        "sudo iptables -A OUTPUT -p tcp -s "+
+        MN_ip+" --dport "+sw_port+" -j DROP")
+
+def uninstallAllNodes(main, node_ip_list):
+    for node in node_ip_list:
+        main.ONOSbench.onos_uninstall(node_ip = node)
diff --git a/TestON/tests/IntentPerfNext/Dependency/__init__.py b/TestON/tests/IntentPerfNext/Dependency/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/IntentPerfNext/Dependency/__init__.py
diff --git a/TestON/tests/IntentPerfNext/IntentPerfNext.params b/TestON/tests/IntentPerfNext/IntentPerfNext.params
index edba27b..dbcd05c 100644
--- a/TestON/tests/IntentPerfNext/IntentPerfNext.params
+++ b/TestON/tests/IntentPerfNext/IntentPerfNext.params
@@ -1,5 +1,5 @@
 <PARAMS>
-    <testcases>1,5,7,2,3</testcases>
+    <testcases>1,4,5,2,3,4,5,2,3,4,5,2,3,4</testcases>
 
     <ENV>
         <cellName>intent_perf_test</cellName>
@@ -36,13 +36,15 @@
 
     <TEST>
         #Number of times to iterate each case
-        <numIter>15</numIter>
-        <numIgnore>3</numIgnore>
-        <numSwitch>9</numSwitch>
+        <numIter>12</numIter>
+        <numIgnore>2</numIgnore>
+        <numSwitch>8</numSwitch>
         <batchThresholdMin>0</batchThresholdMin>
         <batchThresholdMax>1000</batchThresholdMax>
-        <batchIntentSize>200</batchIntentSize>
+        <batchIntentSize>1</batchIntentSize>
         <numMult>1</numMult>
+        #Interface to bring down for intent reroute case
+        <intfs>s3-eth2</intfs>
     </TEST>
 
     <JSON>
diff --git a/TestON/tests/IntentPerfNext/IntentPerfNext.py b/TestON/tests/IntentPerfNext/IntentPerfNext.py
index daa1ae2..74ce298 100644
--- a/TestON/tests/IntentPerfNext/IntentPerfNext.py
+++ b/TestON/tests/IntentPerfNext/IntentPerfNext.py
@@ -1,832 +1,1089 @@
-# Intent Performance Test for ONOS-next
+#Intent Performance Test for ONOS-next
 #
-# andrew@onlab.us
+#andrew@onlab.us
 #
-# November 5, 2014
-
+#November 5, 2014
 
 class IntentPerfNext:
-
-    def __init__( self ):
+    def __init__(self):
         self.default = ""
 
-    def CASE1( self, main ):
-        """
+    def CASE1(self, main):
+        '''
         ONOS startup sequence
-        """
+        '''
+        
         import time
         global cluster_count
-        cluster_count = 1
+        cluster_count = 1 
 
-        cell_name = main.params[ 'ENV' ][ 'cellName' ]
+        cell_name = main.params['ENV']['cellName']
 
-        git_pull = main.params[ 'GIT' ][ 'autoPull' ]
-        checkout_branch = main.params[ 'GIT' ][ 'checkout' ]
+        git_pull = main.params['GIT']['autoPull']
+        checkout_branch = main.params['GIT']['checkout']
 
-        ONOS1_ip = main.params[ 'CTRL' ][ 'ip1' ]
-        ONOS2_ip = main.params[ 'CTRL' ][ 'ip2' ]
-        ONOS3_ip = main.params[ 'CTRL' ][ 'ip3' ]
-        ONOS4_ip = main.params[ 'CTRL' ][ 'ip4' ]
-        ONOS5_ip = main.params[ 'CTRL' ][ 'ip5' ]
-        ONOS6_ip = main.params[ 'CTRL' ][ 'ip6' ]
-        ONOS7_ip = main.params[ 'CTRL' ][ 'ip7' ]
+        ONOS1_ip = main.params['CTRL']['ip1']
+        ONOS2_ip = main.params['CTRL']['ip2']
+        ONOS3_ip = main.params['CTRL']['ip3']
+        ONOS4_ip = main.params['CTRL']['ip4']
+        ONOS5_ip = main.params['CTRL']['ip5']
+        ONOS6_ip = main.params['CTRL']['ip6']
+        ONOS7_ip = main.params['CTRL']['ip7']
 
-        main.ONOSbench.onos_uninstall( node_ip=ONOS1_ip )
-        main.ONOSbench.onos_uninstall( node_ip=ONOS2_ip )
-        main.ONOSbench.onos_uninstall( node_ip=ONOS3_ip )
-        main.ONOSbench.onos_uninstall( node_ip=ONOS4_ip )
-        main.ONOSbench.onos_uninstall( node_ip=ONOS5_ip )
-        main.ONOSbench.onos_uninstall( node_ip=ONOS6_ip )
-        main.ONOSbench.onos_uninstall( node_ip=ONOS7_ip )
+        main.ONOSbench.onos_uninstall(node_ip=ONOS1_ip)
+        main.ONOSbench.onos_uninstall(node_ip=ONOS2_ip)
+        main.ONOSbench.onos_uninstall(node_ip=ONOS3_ip)
+        main.ONOSbench.onos_uninstall(node_ip=ONOS4_ip)
+        main.ONOSbench.onos_uninstall(node_ip=ONOS5_ip)
+        main.ONOSbench.onos_uninstall(node_ip=ONOS6_ip)
+        main.ONOSbench.onos_uninstall(node_ip=ONOS7_ip)
 
-        MN1_ip = main.params[ 'MN' ][ 'ip1' ]
-        BENCH_ip = main.params[ 'BENCH' ][ 'ip' ]
+        MN1_ip = main.params['MN']['ip1']
+        BENCH_ip = main.params['BENCH']['ip']
+    
+        main.case("Setting up test environment")
 
-        main.case( "Setting up test environment" )
-
-        main.step( "Creating cell file" )
+        main.step("Creating cell file")
         cell_file_result = main.ONOSbench.create_cell_file(
-            BENCH_ip, cell_name, MN1_ip,
-            "onos-core,onos-app-metrics,onos-gui",
-            # ONOS1_ip, ONOS2_ip, ONOS3_ip )
-            ONOS1_ip )
+                BENCH_ip, cell_name, MN1_ip,
+                "onos-core,onos-app-metrics,onos-gui",
+                ONOS1_ip)
 
-        main.step( "Applying cell file to environment" )
-        cell_apply_result = main.ONOSbench.set_cell( cell_name )
+        main.step("Applying cell file to environment")
+        cell_apply_result = main.ONOSbench.set_cell(cell_name)
         verify_cell_result = main.ONOSbench.verify_cell()
 
-        main.step( "Removing raft logs" )
+        main.step("Removing raft logs")
         main.ONOSbench.onos_remove_raft_logs()
 
-        main.step( "Git checkout and pull " + checkout_branch )
+        main.step("Git checkout and pull "+checkout_branch)
         if git_pull == 'on':
             checkout_result = \
-                main.ONOSbench.git_checkout( checkout_branch )
+                main.ONOSbench.git_checkout(checkout_branch)
             pull_result = main.ONOSbench.git_pull()
-
-            # If you used git pull, auto compile
-            main.step( "Using onos-build to compile ONOS" )
+           
+            #If you used git pull, auto compile
+            main.step("Using onos-build to compile ONOS")
             build_result = main.ONOSbench.onos_build()
         else:
             checkout_result = main.TRUE
             pull_result = main.TRUE
             build_result = main.TRUE
-            main.log.info( "Git pull skipped by configuration" )
+            main.log.info("Git pull skipped by configuration")
 
-        main.log.report( "Commit information - " )
-        main.ONOSbench.get_version( report=True )
+        main.log.report("Commit information - ")
+        main.ONOSbench.get_version(report=True)
 
-        main.step( "Creating ONOS package" )
+        main.step("Creating ONOS package")
         package_result = main.ONOSbench.onos_package()
 
-        main.step( "Installing ONOS package" )
-        install1_result = main.ONOSbench.onos_install( node=ONOS1_ip )
-        #install2_result = main.ONOSbench.onos_install( node=ONOS2_ip )
-        #install3_result = main.ONOSbench.onos_install( node=ONOS3_ip )
+        main.step("Installing ONOS package")
+        install1_result = main.ONOSbench.onos_install(node=ONOS1_ip)
+        #install2_result = main.ONOSbench.onos_install(node=ONOS2_ip)
+        #install3_result = main.ONOSbench.onos_install(node=ONOS3_ip)
 
-        main.step( "Set cell for ONOScli env" )
-        main.ONOS1cli.set_cell( cell_name )
-        # main.ONOS2cli.set_cell( cell_name )
-        # main.ONOS3cli.set_cell( cell_name )
+        main.step("Set cell for ONOScli env")
+        main.ONOS1cli.set_cell(cell_name)
+        #main.ONOS2cli.set_cell(cell_name)
+        #main.ONOS3cli.set_cell(cell_name)
 
-        time.sleep( 5 )
+        time.sleep(5)
 
-        main.step( "Start onos cli" )
-        cli1 = main.ONOS1cli.start_onos_cli( ONOS1_ip )
-        #cli2 = main.ONOS2cli.start_onos_cli( ONOS2_ip )
-        #cli3 = main.ONOS3cli.start_onos_cli( ONOS3_ip )
+        main.step("Start onos cli")
+        cli1 = main.ONOS1cli.start_onos_cli(ONOS1_ip)
+        #cli2 = main.ONOS2cli.start_onos_cli(ONOS2_ip)
+        #cli3 = main.ONOS3cli.start_onos_cli(ONOS3_ip)
 
-        utilities.assert_equals( expect=main.TRUE,
-                                 actual=cell_file_result and cell_apply_result and
-                                 verify_cell_result and checkout_result and
-                                 pull_result and build_result and
-                                 install1_result,  # and install2_result and
-                                 # install3_result,
-                                 onpass="ONOS started successfully",
-                                 onfail="Failed to start ONOS" )
+        utilities.assert_equals(expect=main.TRUE,
+                actual = cell_file_result and cell_apply_result and\
+                         verify_cell_result and checkout_result and\
+                         pull_result and build_result and\
+                         install1_result, #and install2_result and\
+                         #install3_result,
+                onpass="ONOS started successfully",
+                onfail="Failed to start ONOS")
 
-    def CASE2( self, main ):
-        """
+    def CASE2(self, main):
+        '''
         Single intent add latency
 
-        """
+        '''
         import time
         import json
         import requests
         import os
         import numpy
+        global cluster_count
 
-        ONOS1_ip = main.params[ 'CTRL' ][ 'ip1' ]
-        ONOS2_ip = main.params[ 'CTRL' ][ 'ip2' ]
-        ONOS3_ip = main.params[ 'CTRL' ][ 'ip3' ]
-        ONOS_user = main.params[ 'CTRL' ][ 'user' ]
+        ONOS1_ip = main.params['CTRL']['ip1']
+        ONOS2_ip = main.params['CTRL']['ip2']
+        ONOS3_ip = main.params['CTRL']['ip3']
+        ONOS_ip_list = []
+        for i in range(1, 8):
+            ONOS_ip_list.append(main.params['CTRL']['ip'+str(i)])
+        
+        ONOS_user = main.params['CTRL']['user']
 
-        default_sw_port = main.params[ 'CTRL' ][ 'port1' ]
+        default_sw_port = main.params['CTRL']['port1']
 
-        # number of iterations of case
-        num_iter = main.params[ 'TEST' ][ 'numIter' ]
-        num_ignore = int( main.params[ 'TEST' ][ 'numIgnore' ] )
+        #number of iterations of case
+        num_iter = main.params['TEST']['numIter']
+        num_ignore = int(main.params['TEST']['numIgnore'])
 
-        # Timestamp keys for json metrics output
-        submit_time = main.params[ 'JSON' ][ 'submittedTime' ]
-        install_time = main.params[ 'JSON' ][ 'installedTime' ]
-        wdRequest_time = main.params[ 'JSON' ][ 'wdRequestTime' ]
-        withdrawn_time = main.params[ 'JSON' ][ 'withdrawnTime' ]
+        #Timestamp keys for json metrics output
+        submit_time = main.params['JSON']['submittedTime']
+        install_time = main.params['JSON']['installedTime']
+        wdRequest_time = main.params['JSON']['wdRequestTime']
+        withdrawn_time = main.params['JSON']['withdrawnTime']
+        
+        assertion = main.TRUE
 
         intent_add_lat_list = []
+        
+        #Distribute switches according to cluster count
+        for i in range(1, 9):
+            if cluster_count == 1:
+                main.Mininet1.assign_sw_controller(
+                    sw=str(i), ip1=ONOS_ip_list[0],
+                    port1=default_sw_port
+                    )
+            elif cluster_count == 3:
+                if i < 3:
+                    index = 0
+                elif i < 6 and i >= 3:
+                    index = 1
+                else:
+                    index = 2
+                main.Mininet1.assign_sw_controller(
+                    sw=str(i), ip1=ONOS_ip_list[index],
+                    port1=default_sw_port
+                    )
+            elif cluster_count == 5:
+                if i < 3:
+                    index = 0
+                elif i < 5 and i >= 3:
+                    index = 1
+                elif i < 7 and i >= 5:
+                    index = 2
+                elif i == 7:
+                    index = 3
+                else:
+                    index = 4
+                main.Mininet1.assign_sw_controller(
+                    sw=str(i), ip1=ONOS_ip_list[index],
+                    port1=default_sw_port
+                    )
+            elif cluster_count == 7:
+                if i < 6:
+                    index = i
+                else:
+                    index = 6
+                main.Mininet1.assign_sw_controller(
+                    sw=str(i), ip1=ONOS_ip_list[index],
+                    port1=default_sw_port
+                    )
 
-        # Assign 'linear' switch format for basic intent testing
-        main.Mininet1.assign_sw_controller(
-            sw="1", ip1=ONOS1_ip, port1=default_sw_port )
-        main.Mininet1.assign_sw_controller(
-            sw="2", ip1=ONOS2_ip, port1=default_sw_port )
-        main.Mininet1.assign_sw_controller(
-            sw="3", ip1=ONOS2_ip, port1=default_sw_port )
-        main.Mininet1.assign_sw_controller(
-            sw="4", ip1=ONOS2_ip, port1=default_sw_port )
-        main.Mininet1.assign_sw_controller(
-            sw="5", ip1=ONOS3_ip, port1=default_sw_port )
+        time.sleep(10)
 
-        time.sleep( 10 )
-
-        main.log.report( "Single intent add latency test" )
+        main.log.report("Single intent add latency test")
 
         devices_json_str = main.ONOS1cli.devices()
-        devices_json_obj = json.loads( devices_json_str )
+        devices_json_obj = json.loads(devices_json_str)
+        
+        if not devices_json_obj:
+            main.log.report("Devices not discovered")
+            main.log.report("Aborting test")
+            main.exit()
+        else:
+            main.log.info("Devices discovered successfully")
+
         device_id_list = []
 
-        # Obtain device id list in ONOS format.
-        # They should already be in order ( 1,2,3,10,11,12,13, etc )
+        #Obtain device id list in ONOS format.
+        #They should already be in order (1,2,3,10,11,12,13, etc)
         for device in devices_json_obj:
-            device_id_list.append( device[ 'id' ] )
+            device_id_list.append(device['id'])
 
-        for i in range( 0, int( num_iter ) ):
-            # add_point_intent( ingr_device,  egr_device,
-            #                 ingr_port,    egr_port )
+        for i in range(0, int(num_iter)):
+            #add_point_intent(ingr_device,  egr_device,
+            #                 ingr_port,    egr_port)
             main.ONOS1cli.add_point_intent(
-                device_id_list[ 0 ] + "/1", device_id_list[ 4 ] + "/1" )
+                device_id_list[0]+"/2", device_id_list[7]+"/2")
+        
+            #Allow some time for intents to propagate
+            time.sleep(5)
+            
+            intents_str = main.ONOS1cli.intents(json_format=True)
+            intents_obj = json.loads(intents_str)
+            for intent in intents_obj:
+                if intent['state'] == "INSTALLED":
+                    main.log.info("Intent installed successfully")
+                    intent_id = intent['id']
+                    main.log.info("Intent id: "+str(intent_id))
+                else:
+                    #TODO: Add error handling
+                    main.log.info("Intent installation failed")
+                    intent_id = ""
 
-            # Allow some time for intents to propagate
-            time.sleep( 5 )
-
-            # Obtain metrics from ONOS 1, 2, 3
+            #Obtain metrics from ONOS 1, 2, 3
             intents_json_str_1 = main.ONOS1cli.intents_events_metrics()
-            intents_json_str_2 = main.ONOS2cli.intents_events_metrics()
-            intents_json_str_3 = main.ONOS3cli.intents_events_metrics()
-
-            intents_json_obj_1 = json.loads( intents_json_str_1 )
-            intents_json_obj_2 = json.loads( intents_json_str_2 )
-            intents_json_obj_3 = json.loads( intents_json_str_3 )
-
-            # Parse values from the json object
+            intents_json_obj_1 = json.loads(intents_json_str_1)
+            #Parse values from the json object
             intent_submit_1 = \
-                intents_json_obj_1[ submit_time ][ 'value' ]
-            intent_submit_2 = \
-                intents_json_obj_2[ submit_time ][ 'value' ]
-            intent_submit_3 = \
-                intents_json_obj_3[ submit_time ][ 'value' ]
-
+                    intents_json_obj_1[submit_time]['value']
             intent_install_1 = \
-                intents_json_obj_1[ install_time ][ 'value' ]
-            intent_install_2 = \
-                intents_json_obj_2[ install_time ][ 'value' ]
-            intent_install_3 = \
-                intents_json_obj_3[ install_time ][ 'value' ]
-
+                    intents_json_obj_1[install_time]['value']
             intent_install_lat_1 = \
-                int( intent_install_1 ) - int( intent_submit_1 )
-            intent_install_lat_2 = \
-                int( intent_install_2 ) - int( intent_submit_2 )
-            intent_install_lat_3 = \
-                int( intent_install_3 ) - int( intent_submit_3 )
+                    int(intent_install_1) - int(intent_submit_1)
+            
+            if cluster_count == 3: 
+                intents_json_str_2 = main.ONOS2cli.intents_events_metrics()
+                intents_json_str_3 = main.ONOS3cli.intents_events_metrics()
+                intents_json_obj_2 = json.loads(intents_json_str_2)
+                intents_json_obj_3 = json.loads(intents_json_str_3)
+                intent_submit_2 = \
+                    intents_json_obj_2[submit_time]['value']
+                intent_submit_3 = \
+                    intents_json_obj_3[submit_time]['value']
+                intent_install_2 = \
+                    intents_json_obj_2[install_time]['value']
+                intent_install_3 = \
+                    intents_json_obj_3[install_time]['value']
+                intent_install_lat_2 = \
+                    int(intent_install_2) - int(intent_submit_2)
+                intent_install_lat_3 = \
+                    int(intent_install_3) - int(intent_submit_3)
+            else:
+                intent_install_lat_2 = 0
+                intent_install_lat_3 = 0
+
+            if cluster_count == 5:
+                intents_json_str_4 = main.ONOS4cli.intents_events_metrics()
+                intents_json_str_5 = main.ONOS5cli.intents_events_metrics()
+                intents_json_obj_4 = json.loads(intents_json_str_4)
+                intents_json_obj_5 = json.loads(intents_json_str_5)
+                intent_submit_4 = \
+                    intents_json_obj_4[submit_time]['value']
+                intent_submit_5 = \
+                    intents_json_obj_5[submit_time]['value']
+                intent_install_4 = \
+                    intents_json_obj_5[install_time]['value']
+                intent_install_5 = \
+                    intents_json_obj_5[install_time]['value']
+                intent_install_lat_4 = \
+                    int(intent_install_4) - int(intent_submit_4)
+                intent_install_lat_5 = \
+                    int(intent_install_5) - int(intent_submit_5)
+            else:
+                intent_install_lat_4 = 0
+                intent_install_lat_5 = 0
+
+            if cluster_count == 7:
+                intents_json_str_6 = main.ONOS6cli.intents_events_metrics()
+                intents_json_str_7 = main.ONOS7cli.intents_events_metrics()
+                intents_json_obj_6 = json.loads(intents_json_str_6)
+                intents_json_obj_7 = json.loads(intents_json_str_7)
+                intent_submit_6 = \
+                    intents_json_obj_6[submit_time]['value']
+                intent_submit_7 = \
+                    intents_json_obj_6[submit_time]['value']
+                intent_install_6 = \
+                    intents_json_obj_6[install_time]['value']
+                intent_install_7 = \
+                    intents_json_obj_7[install_time]['value']
+                intent_install_lat_6 = \
+                    int(intent_install_6) - int(intent_submit_6)
+                intent_install_lat_7 = \
+                    int(intent_install_7) - int(intent_submit_7)
+            else:
+                intent_install_lat_6 = 0
+                intent_install_lat_7 = 0
 
             intent_install_lat_avg = \
-                ( intent_install_lat_1 +
-                  intent_install_lat_2 +
-                  intent_install_lat_3 ) / 3
+                    (intent_install_lat_1 + 
+                     intent_install_lat_2 +
+                     intent_install_lat_3 +
+                     intent_install_lat_4 +
+                     intent_install_lat_5 +
+                     intent_install_lat_6 +
+                     intent_install_lat_7) / cluster_count
 
-            main.log.info( "Intent add latency avg for iteration " + str( i ) +
-                           ": " + str( intent_install_lat_avg ) + " ms" )
+            main.log.info("Intent add latency avg for iteration "+str(i)+
+                    ": "+str(intent_install_lat_avg)+" ms")
 
             if intent_install_lat_avg > 0.0 and \
                intent_install_lat_avg < 1000 and i > num_ignore:
-                intent_add_lat_list.append( intent_install_lat_avg )
+                intent_add_lat_list.append(intent_install_lat_avg)
             else:
-                main.log.info( "Intent add latency exceeded " +
-                               "threshold. Skipping iteration " + str( i ) )
+                main.log.info("Intent add latency exceeded "+
+                        "threshold. Skipping iteration "+str(i))
 
-            time.sleep( 3 )
-
-            # TODO: Possibly put this in the driver function
-            main.log.info( "Removing intents for next iteration" )
+            time.sleep(3)
+            
+            #TODO: Only remove intents that were installed 
+            #      in this case... Otherwise many other intents
+            #      may show up distorting the results
+            main.log.info("Removing intents for next iteration")
             json_temp = \
-                main.ONOS1cli.intents( json_format=True )
-            json_obj_intents = json.loads( json_temp )
+                    main.ONOS1cli.intents(json_format=True)
+            json_obj_intents = json.loads(json_temp)
             if json_obj_intents:
                 for intents in json_obj_intents:
-                    temp_id = intents[ 'id' ]
-                    main.ONOS1cli.remove_intent( temp_id )
-                    main.log.info( "Removing intent id: " +
-                                   str( temp_id ) )
-                    main.ONOS1cli.remove_intent( temp_id )
+                    temp_id = intents['id']
+                    #main.ONOS1cli.remove_intent(temp_id)
+                    main.log.info("Removing intent id: "+
+                        str(temp_id))
+                    main.ONOS1cli.remove_intent(temp_id)
             else:
-                main.log.info( "Intents were not installed correctly" )
+                main.log.info("Intents were not installed correctly")
 
-            time.sleep( 5 )
+            time.sleep(5)
 
-        intent_add_lat_avg = sum( intent_add_lat_list ) /\
-            len( intent_add_lat_list )
+        if intent_add_lat_list:
+            intent_add_lat_avg = sum(intent_add_lat_list) /\
+                             len(intent_add_lat_list)
+        else:
+            main.log.report("Intent installation latency test failed")
+            intent_add_lat_avg = "NA"
+            assertion = main.FALSE
+
         intent_add_lat_std = \
-            round( numpy.std( intent_add_lat_list ), 1 )
-        # END ITERATION FOR LOOP
-        main.log.report( "Single intent add latency - " )
-        main.log.report( "Avg: " + str( intent_add_lat_avg ) + " ms" )
-        main.log.report(
-            "Std Deviation: " +
-            str( intent_add_lat_std ) +
-            " ms" )
+            round(numpy.std(intent_add_lat_list),1)
+        #END ITERATION FOR LOOP
+        main.log.report("Single intent add latency - ")
+        main.log.report("Avg: "+str(intent_add_lat_avg)+" ms")
+        main.log.report("Std Deviation: "+str(intent_add_lat_std)+" ms")
+        
+        utilities.assert_equals(expect=main.TRUE, actual=assertion,
+            onpass="Single intent install latency test successful",
+            onfail="Single intent install latency test failed")
 
-    def CASE3( self, main ):
-        """
+    def CASE3(self, main):
+        '''
         Intent Reroute latency
-        """
+        '''
         import time
         import json
         import requests
         import os
         import numpy
+        global cluster_count
 
-        ONOS1_ip = main.params[ 'CTRL' ][ 'ip1' ]
-        ONOS2_ip = main.params[ 'CTRL' ][ 'ip2' ]
-        ONOS3_ip = main.params[ 'CTRL' ][ 'ip3' ]
-        ONOS_user = main.params[ 'CTRL' ][ 'user' ]
+        ONOS1_ip = main.params['CTRL']['ip1']
+        ONOS2_ip = main.params['CTRL']['ip2']
+        ONOS3_ip = main.params['CTRL']['ip3']
+        ONOS_user = main.params['CTRL']['user']
 
-        default_sw_port = main.params[ 'CTRL' ][ 'port1' ]
+        default_sw_port = main.params['CTRL']['port1']
 
-        # number of iterations of case
-        num_iter = main.params[ 'TEST' ][ 'numIter' ]
-        num_ignore = int( main.params[ 'TEST' ][ 'numIgnore' ] )
+        #number of iterations of case
+        num_iter = main.params['TEST']['numIter']
+        num_ignore = int(main.params['TEST']['numIgnore'])
+        assertion = main.TRUE
 
-        # Timestamp keys for json metrics output
-        submit_time = main.params[ 'JSON' ][ 'submittedTime' ]
-        install_time = main.params[ 'JSON' ][ 'installedTime' ]
-        wdRequest_time = main.params[ 'JSON' ][ 'wdRequestTime' ]
-        withdrawn_time = main.params[ 'JSON' ][ 'withdrawnTime' ]
+        #Timestamp keys for json metrics output
+        submit_time = main.params['JSON']['submittedTime']
+        install_time = main.params['JSON']['installedTime']
+        wdRequest_time = main.params['JSON']['wdRequestTime']
+        withdrawn_time = main.params['JSON']['withdrawnTime']
+
+        #NOTE: May need to configure interface depending on topology
+        intfs = main.params['TEST']['intfs']
 
         devices_json_str = main.ONOS1cli.devices()
-        devices_json_obj = json.loads( devices_json_str )
+        devices_json_obj = json.loads(devices_json_str)
 
         device_id_list = []
 
-        # Obtain device id list in ONOS format.
-        # They should already be in order ( 1,2,3,10,11,12,13, etc )
+        #Obtain device id list in ONOS format.
+        #They should already be in order (1,2,3,10,11,12,13, etc)
         for device in devices_json_obj:
-            device_id_list.append( device[ 'id' ] )
+            device_id_list.append(device['id'])
 
         intent_reroute_lat_list = []
 
-        for i in range( 0, int( num_iter ) ):
-            # add_point_intent( ingr_device, ingr_port,
-            #                 egr_device, egr_port )
-            if len( device_id_list ) > 0:
+        for i in range(0, int(num_iter)):
+            #add_point_intent(ingr_device, ingr_port, 
+            #                 egr_device, egr_port)
+            if len(device_id_list) > 0:
                 main.ONOS1cli.add_point_intent(
-                    device_id_list[ 0 ] + "/2", device_id_list[ 4 ] + "/1" )
+                    device_id_list[0]+"/2", device_id_list[7]+"/2")
             else:
-                main.log.info( "Failed to fetch devices from ONOS" )
+                main.log.info("Failed to fetch devices from ONOS")
 
-            time.sleep( 5 )
+            time.sleep(5)
 
-            intents_str = main.ONOS1cli.intents( json_format=True )
-            intents_obj = json.loads( intents_str )
+            intents_str = main.ONOS1cli.intents(json_format=True)
+            intents_obj = json.loads(intents_str)
             for intent in intents_obj:
-                if intent[ 'state' ] == "INSTALLED":
-                    main.log.info( "Intent installed successfully" )
-                    intent_id = intent[ 'id' ]
+                if intent['state'] == "INSTALLED":
+                    main.log.info("Intent installed successfully")
+                    intent_id = intent['id']
+                    main.log.info("Intent id: "+str(intent_id))
                 else:
-                    # TODO: Add error handling
-                    main.log.info( "Intent installation failed" )
+                    #TODO: Add error handling
+                    main.log.info("Intent installation failed")
                     intent_id = ""
-
-            # NOTE: this interface is specific to
-            #      topo-intentFlower.py topology
-            #      reroute case.
-            main.log.info( "Disabling interface s2-eth3" )
+            
+            main.log.info("Disabling interface "+intfs)
+            t0_system = time.time()*1000
             main.Mininet1.handle.sendline(
-                "sh ifconfig s2-eth3 down" )
-            t0_system = time.time() * 1000
+                    "sh ifconfig "+intfs+" down")
+            main.Mininet1.handle.expect("mininet>")    
 
-            # TODO: Check for correct intent reroute
-            time.sleep( 5 )
+            #TODO: Check for correct intent reroute
+            time.sleep(1)
 
-            # Obtain metrics from ONOS 1, 2, 3
+            #Obtain metrics from ONOS 1, 2, 3
             intents_json_str_1 = main.ONOS1cli.intents_events_metrics()
-            intents_json_str_2 = main.ONOS2cli.intents_events_metrics()
-            intents_json_str_3 = main.ONOS3cli.intents_events_metrics()
-
-            intents_json_obj_1 = json.loads( intents_json_str_1 )
-            intents_json_obj_2 = json.loads( intents_json_str_2 )
-            intents_json_obj_3 = json.loads( intents_json_str_3 )
-
-            # Parse values from the json object
+            intents_json_obj_1 = json.loads(intents_json_str_1)
+            #Parse values from the json object
             intent_install_1 = \
-                intents_json_obj_1[ install_time ][ 'value' ]
-            intent_install_2 = \
-                intents_json_obj_2[ install_time ][ 'value' ]
-            intent_install_3 = \
-                intents_json_obj_3[ install_time ][ 'value' ]
-
+                    intents_json_obj_1[install_time]['value']
             intent_reroute_lat_1 = \
-                int( intent_install_1 ) - int( t0_system )
-            intent_reroute_lat_2 = \
-                int( intent_install_2 ) - int( t0_system )
-            intent_reroute_lat_3 = \
-                int( intent_install_3 ) - int( t0_system )
+                    int(intent_install_1) - int(t0_system)
+            
+            if cluster_count == 3:
+                intents_json_str_2 = main.ONOS2cli.intents_events_metrics()
+                intents_json_str_3 = main.ONOS3cli.intents_events_metrics()
+
+                intents_json_obj_2 = json.loads(intents_json_str_2)
+                intents_json_obj_3 = json.loads(intents_json_str_3)
+                intent_install_2 = \
+                    intents_json_obj_2[install_time]['value']
+                intent_install_3 = \
+                    intents_json_obj_3[install_time]['value']
+                intent_reroute_lat_2 = \
+                    int(intent_install_2) - int(t0_system)
+                intent_reroute_lat_3 = \
+                    int(intent_install_3) - int(t0_system)
+            else:
+                intent_reroute_lat_2 = 0
+                intent_reroute_lat_3 = 0
+
+            if cluster_count == 5:
+                intents_json_str_4 = main.ONOS4cli.intents_events_metrics()
+                intents_json_str_5 = main.ONOS5cli.intents_events_metrics()
+
+                intents_json_obj_4 = json.loads(intents_json_str_4)
+                intents_json_obj_5 = json.loads(intents_json_str_5)
+                intent_install_4 = \
+                    intents_json_obj_4[install_time]['value']
+                intent_install_5 = \
+                    intents_json_obj_5[install_time]['value']
+                intent_reroute_lat_4 = \
+                    int(intent_install_4) - int(t0_system)
+                intent_reroute_lat_5 = \
+                    int(intent_install_5) - int(t0_system)
+            else:
+                intent_reroute_lat_4 = 0
+                intent_reroute_lat_5 = 0
+
+            if cluster_count == 7:
+                intents_json_str_6 = main.ONOS6cli.intents_events_metrics()
+                intents_json_str_7 = main.ONOS7cli.intents_events_metrics()
+
+                intents_json_obj_6 = json.loads(intents_json_str_6)
+                intents_json_obj_7 = json.loads(intents_json_str_7)
+                intent_install_6 = \
+                    intents_json_obj_6[install_time]['value']
+                intent_install_7 = \
+                    intents_json_obj_7[install_time]['value']
+                intent_reroute_lat_6 = \
+                    int(intent_install_6) - int(t0_system)
+                intent_reroute_lat_7 = \
+                    int(intent_install_7) - int(t0_system)
+            else:
+                intent_reroute_lat_6 = 0
+                intent_reroute_lat_7 = 0
 
             intent_reroute_lat_avg = \
-                ( intent_reroute_lat_1 +
-                  intent_reroute_lat_2 +
-                  intent_reroute_lat_3 ) / 3
-
-            main.log.info( "Intent reroute latency avg for iteration " +
-                           str( i ) + ": " + str( intent_reroute_lat_avg ) )
+                    (intent_reroute_lat_1 + 
+                     intent_reroute_lat_2 +
+                     intent_reroute_lat_3 +
+                     intent_reroute_lat_4 +
+                     intent_reroute_lat_5 +
+                     intent_reroute_lat_6 +
+                     intent_reroute_lat_7) / cluster_count 
+    
+            main.log.info("Intent reroute latency avg for iteration "+
+                    str(i)+": "+str(intent_reroute_lat_avg))
 
             if intent_reroute_lat_avg > 0.0 and \
                intent_reroute_lat_avg < 1000 and i > num_ignore:
-                intent_reroute_lat_list.append( intent_reroute_lat_avg )
+                intent_reroute_lat_list.append(intent_reroute_lat_avg)
             else:
-                main.log.info( "Intent reroute latency exceeded " +
-                               "threshold. Skipping iteration " + str( i ) )
+                main.log.info("Intent reroute latency exceeded "+
+                        "threshold. Skipping iteration "+str(i))
 
-            main.log.info( "Removing intents for next iteration" )
-            main.ONOS1cli.remove_intent( intent_id )
+            main.log.info("Removing intents for next iteration")
+            main.ONOS1cli.remove_intent(intent_id)
 
-            main.log.info( "Bringing Mininet interface up for next " +
-                           "iteration" )
+            main.log.info("Bringing Mininet interface up for next "+
+                "iteration")
             main.Mininet1.handle.sendline(
-                "sh ifconfig s2-eth3 up" )
+                    "sh ifconfig "+intfs+" up")
+            main.Mininet1.handle.expect("mininet>")
 
-        intent_reroute_lat_avg = sum( intent_reroute_lat_list ) /\
-            len( intent_reroute_lat_list )
+        if intent_reroute_lat_list:
+            intent_reroute_lat_avg = sum(intent_reroute_lat_list) /\
+                             len(intent_reroute_lat_list)
+        else:
+            main.log.report("Intent reroute test failed. Results NA")
+            intent_reroute_lat_avg = "NA" 
+            #NOTE: fails test when list is empty
+            assertion = main.FALSE
+        
         intent_reroute_lat_std = \
-            round( numpy.std( intent_reroute_lat_list ), 1 )
-        # END ITERATION FOR LOOP
-        main.log.report( "Single intent reroute latency - " )
-        main.log.report( "Avg: " + str( intent_reroute_lat_avg ) + " ms" )
-        main.log.report(
-            "Std Deviation: " +
-            str( intent_reroute_lat_std ) +
-            " ms" )
-
-    def CASE7( self, main ):
-        """
-        Batch intent reroute latency
-        """
-        import time
-        import json
-        import requests
-        import os
-        import numpy
-
-        ONOS_ip_list = []
-        for i in range( 1, 8 ):
-            ONOS_ip_list.append( main.params[ 'CTRL' ][ 'ip' + str( i ) ] )
-
-        ONOS_user = main.params[ 'CTRL' ][ 'user' ]
-        default_sw_port = main.params[ 'CTRL' ][ 'port1' ]
-
-        batch_intent_size = main.params[ 'TEST' ][ 'batchIntentSize' ]
-        batch_thresh_min = int( main.params[ 'TEST' ][ 'batchThresholdMin' ] )
-        batch_thresh_max = int( main.params[ 'TEST' ][ 'batchThresholdMax' ] )
-        install_time = main.params[ 'JSON' ][ 'installedTime' ]
-
-        # number of iterations of case
-        num_iter = main.params[ 'TEST' ][ 'numIter' ]
-        num_ignore = int( main.params[ 'TEST' ][ 'numIgnore' ] )
-        num_switch = int( main.params[ 'TEST' ][ 'numSwitch' ] )
-        n_thread = main.params[ 'TEST' ][ 'numMult' ]
-
-        main.log.report( "Batch intent installation test of " +
-                         batch_intent_size + " intents" )
-
-        batch_result_list = []
-
-        # Assign 'linear' switch format for basic intent testing
-        main.Mininet1.assign_sw_controller(
-            sw="1", ip1=ONOS1_ip, port1=default_sw_port )
-        main.Mininet1.assign_sw_controller(
-            sw="2", ip1=ONOS2_ip, port1=default_sw_port )
-        main.Mininet1.assign_sw_controller(
-            sw="3", ip1=ONOS2_ip, port1=default_sw_port )
-        main.Mininet1.assign_sw_controller(
-            sw="4", ip1=ONOS2_ip, port1=default_sw_port )
-        main.Mininet1.assign_sw_controller(
-            sw="5", ip1=ONOS3_ip, port1=default_sw_port )
-
-        time.sleep( 10 )
-
-        main.log.info( "Getting list of available devices" )
-        device_id_list = []
-        json_str = main.ONOS1cli.devices()
-        json_obj = json.loads( json_str )
-        for device in json_obj:
-            device_id_list.append( device[ 'id' ] )
-
-        batch_install_lat = []
-        batch_withdraw_lat = []
-        sleep_time = 10
-
-        base_dir = "/tmp/"
-        max_install_lat = []
-
-        for i in range( 0, int( num_iter ) ):
-            main.log.info( "Pushing " +
-                           str( int( batch_intent_size ) * int( n_thread ) ) +
-                           " intents. Iteration " + str( i ) )
-
-            main.ONOS1cli.push_test_intents(
-                "of:0000000000000001/1",
-                "of:0000000000000005/1",
-                1000, num_mult="1", app_id="1" )
-
-            # TODO: Check for installation success then proceed
-            time.sleep( 30 )
-
-            # NOTE: this interface is specific to
-            #      topo-intentFlower.py topology
-            #      reroute case.
-            main.log.info( "Disabling interface s2-eth3" )
-            main.Mininet1.handle.sendline(
-                "sh ifconfig s2-eth3 down" )
-            t0_system = time.time() * 1000
-
-            # TODO: Wait sufficient time for intents to install
-            time.sleep( 10 )
-
-            # TODO: get intent installation time
-
-            # Obtain metrics from ONOS 1, 2, 3
-            intents_json_str_1 = main.ONOS1cli.intents_events_metrics()
-            intents_json_str_2 = main.ONOS2cli.intents_events_metrics()
-            intents_json_str_3 = main.ONOS3cli.intents_events_metrics()
-
-            intents_json_obj_1 = json.loads( intents_json_str_1 )
-            intents_json_obj_2 = json.loads( intents_json_str_2 )
-            intents_json_obj_3 = json.loads( intents_json_str_3 )
-
-            # Parse values from the json object
-            intent_install_1 = \
-                intents_json_obj_1[ install_time ][ 'value' ]
-            intent_install_2 = \
-                intents_json_obj_2[ install_time ][ 'value' ]
-            intent_install_3 = \
-                intents_json_obj_3[ install_time ][ 'value' ]
-
-            intent_reroute_lat_1 = \
-                int( intent_install_1 ) - int( t0_system )
-            intent_reroute_lat_2 = \
-                int( intent_install_2 ) - int( t0_system )
-            intent_reroute_lat_3 = \
-                int( intent_install_3 ) - int( t0_system )
-
-            intent_reroute_lat_avg = \
-                ( intent_reroute_lat_1 +
-                  intent_reroute_lat_2 +
-                  intent_reroute_lat_3 ) / 3
-
-            main.log.info( "Intent reroute latency avg for iteration " +
-                           str( i ) + ": " + str( intent_reroute_lat_avg ) )
-            # TODO: Remove intents for next iteration
-
-            time.sleep( 5 )
-
-            intents_str = main.ONOS1cli.intents()
-            intents_json = json.loads( intents_str )
-            for intents in intents_json:
-                intent_id = intents[ 'id' ]
-                if intent_id:
-                    main.ONOS1cli.remove_intent( intent_id )
-
-            main.Mininet1.handle.sendline(
-                "sh ifconfig s2-eth3 up" )
-
-            main.log.info( "Intents removed and port back up" )
-
-    def CASE4( self, main ):
-        """
+            round(numpy.std(intent_reroute_lat_list),1)
+        #END ITERATION FOR LOOP
+        main.log.report("Single intent reroute latency - ")
+        main.log.report("Avg: "+str(intent_reroute_lat_avg)+" ms")
+        main.log.report("Std Deviation: "+str(intent_reroute_lat_std)+" ms")
+       
+        utilities.assert_equals(expect=main.TRUE, actual=assertion,
+            onpass="Single intent reroute latency test successful",
+            onfail="Single intent reroute latency test failed")
+        
+    def CASE4(self, main):
+        '''
         Batch intent install
-        """
+        '''
+        
         import time
         import json
         import requests
         import os
         import numpy
+        global cluster_count
 
-        ONOS1_ip = main.params[ 'CTRL' ][ 'ip1' ]
-        ONOS2_ip = main.params[ 'CTRL' ][ 'ip2' ]
-        ONOS3_ip = main.params[ 'CTRL' ][ 'ip3' ]
-        ONOS4_ip = main.params[ 'CTRL' ][ 'ip4' ]
-        ONOS5_ip = main.params[ 'CTRL' ][ 'ip5' ]
-        ONOS6_ip = main.params[ 'CTRL' ][ 'ip6' ]
-        ONOS7_ip = main.params[ 'CTRL' ][ 'ip7' ]
+        ONOS1_ip = main.params['CTRL']['ip1']
+        ONOS2_ip = main.params['CTRL']['ip2']
+        ONOS3_ip = main.params['CTRL']['ip3']
+        ONOS4_ip = main.params['CTRL']['ip4']
+        ONOS5_ip = main.params['CTRL']['ip5']
+        ONOS6_ip = main.params['CTRL']['ip6']
+        ONOS7_ip = main.params['CTRL']['ip7']
+
+        assertion = main.TRUE
 
         ONOS_ip_list = []
-        for i in range( 1, 8 ):
-            ONOS_ip_list.append( main.params[ 'CTRL' ][ 'ip' + str( i ) ] )
+        for i in range(1, 8):
+            ONOS_ip_list.append(main.params['CTRL']['ip'+str(i)])
 
-        ONOS_user = main.params[ 'CTRL' ][ 'user' ]
+        ONOS_user = main.params['CTRL']['user']
 
-        default_sw_port = main.params[ 'CTRL' ][ 'port1' ]
+        default_sw_port = main.params['CTRL']['port1']
+    
+        batch_intent_size = main.params['TEST']['batchIntentSize']
+        batch_thresh_min = int(main.params['TEST']['batchThresholdMin'])
+        batch_thresh_max = int(main.params['TEST']['batchThresholdMax'])
 
-        batch_intent_size = main.params[ 'TEST' ][ 'batchIntentSize' ]
-        batch_thresh_min = int( main.params[ 'TEST' ][ 'batchThresholdMin' ] )
-        batch_thresh_max = int( main.params[ 'TEST' ][ 'batchThresholdMax' ] )
-
-        # number of iterations of case
-        num_iter = main.params[ 'TEST' ][ 'numIter' ]
-        num_ignore = int( main.params[ 'TEST' ][ 'numIgnore' ] )
-        num_switch = int( main.params[ 'TEST' ][ 'numSwitch' ] )
-        n_thread = main.params[ 'TEST' ][ 'numMult' ]
+        #number of iterations of case
+        num_iter = main.params['TEST']['numIter']
+        num_ignore = int(main.params['TEST']['numIgnore'])
+        num_switch = int(main.params['TEST']['numSwitch'])
+        n_thread = main.params['TEST']['numMult']
         #n_thread = 105
 
-        #*****
-        global cluster_count
-        #*****
-
-        # Switch assignment NOTE: hardcoded
+        #Switch assignment NOTE: hardcoded 
         if cluster_count == 1:
-            for i in range( 1, num_switch + 1 ):
+            for i in range(1, num_switch+1):
                 main.Mininet1.assign_sw_controller(
-                    sw=str( i ),
+                    sw=str(i), 
                     ip1=ONOS1_ip,
-                    port1=default_sw_port )
+                    port1=default_sw_port)
         if cluster_count == 3:
-            for i in range( 1, 3 ):
+            for i in range(1, 3):
                 main.Mininet1.assign_sw_controller(
-                    sw=str( i ),
+                    sw=str(i),
                     ip1=ONOS1_ip,
-                    port1=default_sw_port )
-            for i in range( 3, 6 ):
+                    port1=default_sw_port)
+            for i in range(3, 6):
                 main.Mininet1.assign_sw_controller(
-                    sw=str( i ),
+                    sw=str(i),
                     ip1=ONOS2_ip,
-                    port1=default_sw_port )
-            for i in range( 6, 9 ):
+                    port1=default_sw_port)
+            for i in range(6, 9):
                 main.Mininet1.assign_sw_controller(
-                    sw=str( i ),
+                    sw=str(i),
                     ip1=ONOS3_ip,
-                    port1=default_sw_port )
+                    port1=default_sw_port)
         if cluster_count == 5:
             main.Mininet1.assign_sw_controller(
-                sw="1",
-                ip1=ONOS1_ip,
-                port1=default_sw_port )
+                    sw="1",
+                    ip1=ONOS1_ip,
+                    port1=default_sw_port)
             main.Mininet1.assign_sw_controller(
-                sw="2",
-                ip1=ONOS2_ip,
-                port1=default_sw_port )
-            for i in range( 3, 6 ):
+                    sw="2",
+                    ip1=ONOS2_ip,
+                    port1=default_sw_port)
+            for i in range(3, 6):
                 main.Mininet1.assign_sw_controller(
-                    sw=str( i ),
+                    sw=str(i),
                     ip1=ONOS3_ip,
-                    port1=default_sw_port )
+                    port1=default_sw_port)
             main.Mininet1.assign_sw_controller(
-                sw="6",
-                ip1=ONOS4_ip,
-                port1=default_sw_port )
+                    sw="6",
+                    ip1=ONOS4_ip,
+                    port1=default_sw_port)
             main.Mininet1.assign_sw_controller(
-                sw="7",
-                ip1=ONOS5_ip,
-                port1=default_sw_port )
+                    sw="7",
+                    ip1=ONOS5_ip,
+                    port1=default_sw_port)
             main.Mininet1.assign_sw_controller(
-                sw="8",
-                ip1=ONOS5_ip,
-                port1=default_sw_port )
-
+                    sw="8",
+                    ip1=ONOS5_ip,
+                    port1=default_sw_port)
+        
         if cluster_count == 7:
-            for i in range( 1, 9 ):
+            for i in range(1,9):
                 if i < 8:
                     main.Mininet1.assign_sw_controller(
-                        sw=str( i ),
-                        ip1=ONOS_ip_list[ i - 1 ],
-                        port1=default_sw_port )
-                elif i >= 8:
+                        sw=str(i),
+                        ip1=ONOS_ip_list[i-1],
+                        port1=default_sw_port)
+                elif i >= 8: 
                     main.Mininet1.assign_sw_controller(
-                        sw=str( i ),
-                        ip1=ONOS_ip_list[ 6 ],
-                        port1=default_sw_port )
+                        sw=str(i),
+                        ip1=ONOS_ip_list[6],
+                        port1=default_sw_port)
 
-        time.sleep( 30 )
+        time.sleep(20)
 
-        main.log.report( "Batch intent installation test of " +
-                         batch_intent_size + " intents" )
+        main.log.report("Batch intent installation test of "+
+               batch_intent_size +" intent(s)")
 
         batch_result_list = []
 
-        main.log.info( "Getting list of available devices" )
+        main.log.info("Getting list of available devices")
         device_id_list = []
         json_str = main.ONOS1cli.devices()
-        json_obj = json.loads( json_str )
+        json_obj = json.loads(json_str)
         for device in json_obj:
-            device_id_list.append( device[ 'id' ] )
+            device_id_list.append(device['id'])
 
         batch_install_lat = []
         batch_withdraw_lat = []
-        sleep_time = 10
-
-        base_dir = "/tmp/"
+        
+        #Max intent install measurement of all nodes
         max_install_lat = []
+        max_withdraw_lat = []
+        sleep_time = 10
+        
+        base_dir = "/tmp/"
 
-        for i in range( 0, int( num_iter ) ):
-            main.log.info( "Pushing " +
-                           str( int( batch_intent_size ) * int( n_thread ) ) +
-                           " intents. Iteration " + str( i ) )
+        for batch in range(0, 5):
+            for i in range(0, int(num_iter)):
+                main.log.info("Pushing "+
+                    str(int(batch_intent_size)*int(n_thread))+
+                    " intents. Iteration "+str(i))
+               
+                for node in range(1, cluster_count+1):
+                    save_dir = base_dir + "batch_intent_"+str(node)+".txt" 
+                    main.ONOSbench.push_test_intents_shell(
+                    device_id_list[0]+"/2",
+                    device_id_list[7]+"/2",
+                    int(batch_intent_size),
+                    save_dir, ONOS_ip_list[node-1],
+                    num_mult=n_thread, app_id=node)
+         
+                #Wait sufficient time for intents to start
+                #installing
+                time.sleep(sleep_time)
 
-            for node in range( 1, cluster_count + 1 ):
-                save_dir = base_dir + "batch_intent_" + str( node ) + ".txt"
-                main.ONOSbench.push_test_intents_shell(
-                    "of:0000000000000001/" + str( node ),
-                    "of:0000000000000008/" + str( node ),
-                    int( batch_intent_size ),
-                    save_dir, ONOS_ip_list[ node - 1 ],
-                    num_mult=n_thread, app_id=node )
+                intent = ""
+                counter = 300
+                while len(intent) > 0 and counter > 0:
+                    main.ONOS1cli.handle.sendline(
+                        "intents | wc -l")
+                    main.ONOS1cli.handle.expect(
+                        "intents | wc -l")
+                    main.ONOS1cli.handle.expect(
+                        "onos>")
+                    intent_temp = main.ONOS1cli.handle.before()
+                    intent = main.ONOS1cli.intents()
+                    intent = json.loads(intent)
+                    counter = counter-1
+                    time.sleep(1)
 
-            # Wait sufficient time for intents to start
-            # installing
+                time.sleep(5)
 
-            time.sleep( sleep_time )
-            print sleep_time
+                for node in range(1, cluster_count+1):
+                    save_dir = base_dir + "batch_intent_"+str(node)+".txt"
+                    with open(save_dir) as f_onos:
+                        line_count = 0
+                        for line in f_onos:
+                            line = line[1:]
+                            line = line.split(": ")
+                            main.log.info("Line read: "+str(line))
+                            result = line[1].split(" ")[0]
+                            #TODO: add parameters before appending latency
+                            if line_count == 0:
+                                batch_install_lat.append(int(result))
+                                install_result = result
+                            elif line_count == 1:
+                                batch_withdraw_lat.append(int(result))
+                                withdraw_result = result
+                            line_count += 1
+                    main.log.info("Batch install latency for ONOS"+
+                        str(node)+" with "+\
+                        str(batch_intent_size) + "intents: "+\
+                        str(install_result)+" ms")
+                    main.log.info("Batch withdraw latency for ONOS"+
+                        str(node)+" with "+
+                        str(batch_intent_size) + "intents: "+
+                        str(withdraw_result)+" ms")
 
-            intent = ""
-            counter = 300
-            while len( intent ) > 0 and counter > 0:
-                main.ONOS1cli.handle.sendline(
-                    "intents | wc -l" )
-                main.ONOS1cli.handle.expect(
-                    "intents | wc -l" )
-                main.ONOS1cli.handle.expect(
-                    "onos>" )
-                intent_temp = main.ONOS1cli.handle.before()
-                print intent_temp
+                if len(batch_install_lat) > 0 and int(i) > num_ignore:
+                    max_install_lat.append(max(batch_install_lat))
+                elif len(batch_install_lat) == 0:
+                    #If I failed to read anything from the file,
+                    #increase the wait time before checking intents
+                    sleep_time += 30
+                if len(batch_withdraw_lat) > 0 and int(i) > num_ignore:
+                    max_withdraw_lat.append(max(batch_withdraw_lat))
+                batch_install_lat = []
+                batch_withdraw_lat = []
+    
+                #Sleep in between iterations
+                time.sleep(5)
+            
+            if max_install_lat:
+                avg_install_lat = str(sum(max_install_lat) /\
+                                  len(max_install_lat))
+            else:
+                avg_install_lat = "NA"
+                main.log.report("Batch installation failed")
+                assertion = main.FALSE
 
-                intent = main.ONOS1cli.intents()
-                intent = json.loads( intent )
-                counter = counter - 1
-                time.sleep( 1 )
+            if max_withdraw_lat:
+                avg_withdraw_lat = str(sum(max_withdraw_lat) /\
+                                   len(max_withdraw_lat))
+            else:
+                avg_withdraw_lat = "NA"
+                main.log.report("Batch withdraw failed")
+                assertion = main.FALSE
 
-            time.sleep( 5 )
+            main.log.report("Avg of batch installation latency "+
+                "of size "+batch_intent_size+": "+
+                str(avg_install_lat)+" ms")
+            main.log.report("Std Deviation of batch installation latency "+
+                ": "+str(numpy.std(max_install_lat))+" ms")
 
-            for node in range( 1, cluster_count + 1 ):
-                save_dir = base_dir + "batch_intent_" + str( node ) + ".txt"
-                with open( save_dir ) as f_onos:
-                    line_count = 0
-                    for line in f_onos:
-                        line = line[ 1: ]
-                        line = line.split( ": " )
-                        result = line[ 1 ].split( " " )[ 0 ]
-                        # TODO: add parameters before appending latency
-                        if line_count == 0:
-                            batch_install_lat.append( int( result ) )
-                        elif line_count == 1:
-                            batch_withdraw_lat.append( int( result ) )
-                        line_count += 1
-                main.log.info( "Batch install latency for ONOS" +
-                               str( node ) + " with " +
-                               str( batch_intent_size ) + "intents: " +
-                               str( batch_install_lat ) )
+            main.log.report("Avg of batch withdraw latency "+
+                "of size "+batch_intent_size+": "+
+                str(avg_withdraw_lat)+" ms")
+            main.log.report("Std Deviation of batch withdraw latency "+
+                ": "+str(numpy.std(max_withdraw_lat))+" ms")
+           
+            batch_intent_size += 1000
+            main.log.report("Increasing batch intent size to "+
+                             batch_intent_size)
+           
+        #main.log.info("Removing all intents for next test case")
+        #json_temp = main.ONOS1cli.intents(json_format=True)
+        #json_obj_intents = json.loads(json_temp)
+        #if json_obj_intents:
+        #    for intents in json_obj_intents:
+        #        temp_id = intents['id']
+                #main.ONOS1cli.remove_intent(temp_id)
+        #        main.ONOS1cli.remove_intent(temp_id)
+        
+        utilities.assert_equals(expect=main.TRUE, actual=assertion,
+            onpass="Batch intent install/withdraw test successful",
+            onfail="Batch intent install/withdraw test failed")
 
-            if len( batch_install_lat ) > 0 and int( i ) > num_ignore:
-                max_install_lat.append( max( batch_install_lat ) )
-            elif len( batch_install_lat ) == 0:
-                # If I failed to read anything from the file,
-                # increase the wait time before checking intents
-                sleep_time += 30
-            batch_install_lat = []
-
-            # Sleep in between iterations
-            time.sleep( 5 )
-
-        main.log.report( "Avg of batch installation latency " +
-                         ": " +
-                         str( sum( max_install_lat ) /
-                              len( max_install_lat ) ) )
-        main.log.report( "Std Deviation of batch installation latency " +
-                         ": " +
-                         str( numpy.std( max_install_lat ) ) )
-
-    def CASE5( self, main ):
-        """
+    def CASE5(self,main):
+        '''
         Increase number of nodes and initiate CLI
-        """
+        '''
         import time
         import json
 
-        ONOS1_ip = main.params[ 'CTRL' ][ 'ip1' ]
-        ONOS2_ip = main.params[ 'CTRL' ][ 'ip2' ]
-        ONOS3_ip = main.params[ 'CTRL' ][ 'ip3' ]
-        ONOS4_ip = main.params[ 'CTRL' ][ 'ip4' ]
-        ONOS5_ip = main.params[ 'CTRL' ][ 'ip5' ]
-        ONOS6_ip = main.params[ 'CTRL' ][ 'ip6' ]
-        ONOS7_ip = main.params[ 'CTRL' ][ 'ip7' ]
+        ONOS1_ip = main.params['CTRL']['ip1']
+        ONOS2_ip = main.params['CTRL']['ip2']
+        ONOS3_ip = main.params['CTRL']['ip3']
+        ONOS4_ip = main.params['CTRL']['ip4']
+        ONOS5_ip = main.params['CTRL']['ip5']
+        ONOS6_ip = main.params['CTRL']['ip6']
+        ONOS7_ip = main.params['CTRL']['ip7']
 
         global cluster_count
         cluster_count += 2
-        main.log.report( "Increasing cluster size to " +
-                         str( cluster_count ) )
+        main.log.report("Increasing cluster size to "+
+                str(cluster_count))
 
         install_result = main.FALSE
 
         if cluster_count == 3:
             install_result1 = \
-                main.ONOSbench.onos_install( node=ONOS2_ip )
+                main.ONOSbench.onos_install(node=ONOS2_ip)
             install_result2 = \
-                main.ONOSbench.onos_install( node=ONOS3_ip )
-            time.sleep( 5 )
+                main.ONOSbench.onos_install(node=ONOS3_ip)
+            time.sleep(5)
 
-            main.log.info( "Starting ONOS CLI" )
-            main.ONOS2cli.start_onos_cli( ONOS2_ip )
-            main.ONOS3cli.start_onos_cli( ONOS3_ip )
+            main.log.info("Starting ONOS CLI")
+            main.ONOS2cli.start_onos_cli(ONOS2_ip)
+            main.ONOS3cli.start_onos_cli(ONOS3_ip)
 
             install_result = install_result1 and install_result2
 
         if cluster_count == 5:
-            main.log.info( "Installing ONOS on node 4 and 5" )
+            main.log.info("Installing ONOS on node 4 and 5")
             install_result1 = \
-                main.ONOSbench.onos_install( node=ONOS4_ip )
+                main.ONOSbench.onos_install(node=ONOS4_ip)
             install_result2 = \
-                main.ONOSbench.onos_install( node=ONOS5_ip )
+                main.ONOSbench.onos_install(node=ONOS5_ip)
 
-            main.log.info( "Starting ONOS CLI" )
-            main.ONOS4cli.start_onos_cli( ONOS4_ip )
-            main.ONOS5cli.start_onos_cli( ONOS5_ip )
+            main.log.info("Starting ONOS CLI")
+            main.ONOS4cli.start_onos_cli(ONOS4_ip)
+            main.ONOS5cli.start_onos_cli(ONOS5_ip)
 
             install_result = install_result1 and install_result2
 
         if cluster_count == 7:
-            main.log.info( "Installing ONOS on node 6 and 7" )
+            main.log.info("Installing ONOS on node 6 and 7")
             install_result1 = \
-                main.ONOSbench.onos_install( node=ONOS6_ip )
+                main.ONOSbench.onos_install(node=ONOS6_ip)
             install_result2 = \
-                main.ONOSbench.onos_install( node=ONOS7_ip )
+                main.ONOSbench.onos_install(node=ONOS7_ip)
 
-            main.log.info( "Starting ONOS CLI" )
-            main.ONOS6cli.start_onos_cli( ONOS6_ip )
-            main.ONOS7cli.start_onos_cli( ONOS7_ip )
+            main.log.info("Starting ONOS CLI")
+            main.ONOS6cli.start_onos_cli(ONOS6_ip)
+            main.ONOS7cli.start_onos_cli(ONOS7_ip)
 
             install_result = install_result1 and install_result2
 
-        time.sleep( 5 )
+        time.sleep(5)
 
         if install_result == main.TRUE:
             assertion = main.TRUE
         else:
             assertion = main.FALSE
 
-        utilities.assert_equals(
-            expect=main.TRUE,
-            actual=assertion,
-            onpass="Scale out to " +
-            str( cluster_count ) +
-            " nodes successful",
-            onfail="Scale out to " +
-            str( cluster_count ) +
-            " nodes failed" )
+        utilities.assert_equals(expect=main.TRUE, actual=assertion,
+            onpass="Scale out to "+str(cluster_count)+\
+                   " nodes successful",
+            onfail="Scale out to "+str(cluster_count)+\
+                   " nodes failed")
+    
+    def CASE7(self, main):
+        #TODO: Fix for scale-out scenario
 
-    def CASE9( self, main ):
+        '''
+        Batch intent reroute latency
+        '''
+        import time
+        import json
+        import requests
+        import os
+        import numpy
+        global cluster_count
+
+        ONOS_ip_list = []
+        for i in range(1, 8):
+            ONOS_ip_list.append(main.params['CTRL']['ip'+str(i)])
+
+        ONOS_user = main.params['CTRL']['user']
+        default_sw_port = main.params['CTRL']['port1']
+    
+        batch_intent_size = main.params['TEST']['batchIntentSize']
+        batch_thresh_min = int(main.params['TEST']['batchThresholdMin'])
+        batch_thresh_max = int(main.params['TEST']['batchThresholdMax'])
+        intfs = main.params['TEST']['intfs']
+        install_time = main.params['JSON']['installedTime']
+
+        #number of iterations of case
+        num_iter = main.params['TEST']['numIter']
+        num_ignore = int(main.params['TEST']['numIgnore'])
+        num_switch = int(main.params['TEST']['numSwitch'])
+        n_thread = main.params['TEST']['numMult']
+
+        main.log.report("Batch intent installation test of "+
+               batch_intent_size +" intents")
+
+        batch_result_list = []
+
+        time.sleep(10)
+
+        main.log.info("Getting list of available devices")
+        device_id_list = []
+        json_str = main.ONOS1cli.devices()
+        json_obj = json.loads(json_str)
+        for device in json_obj:
+            device_id_list.append(device['id'])
+
+        batch_install_lat = []
+        batch_withdraw_lat = []
+        sleep_time = 10
+        
+        base_dir = "/tmp/"
+        max_install_lat = []
+
+        for i in range(0, int(num_iter)):
+            main.log.info("Pushing "+
+                    str(int(batch_intent_size)*int(n_thread))+
+                    " intents. Iteration "+str(i))
+
+            main.ONOSbench.push_test_intents_shell(
+                device_id_list[0]+"/2",
+                device_id_list[7]+"/2",
+                batch_intent_size, "/tmp/batch_install.txt",
+                ONOS_ip_list[0], num_mult="1", app_id="1",
+                report=False, options="--install")
+            #main.ONOSbench.push_test_intents_shell(
+            #    "of:0000000000001002/1",
+            #    "of:0000000000002002/1",
+            #    133, "/tmp/temp2.txt", "10.128.174.2",
+            #    num_mult="6", app_id="2",report=False)
+               
+            #TODO: Check for installation success then proceed
+            time.sleep(30)
+            
+            #NOTE: this interface is specific to
+            #      topo-intentFlower.py topology
+            #      reroute case.
+            main.log.info("Disabling interface "+intfs)
+            main.Mininet1.handle.sendline(
+                    "sh ifconfig "+intfs+" down")
+            t0_system = time.time()*1000
+
+            #TODO: Wait sufficient time for intents to install
+            time.sleep(10)
+
+            #TODO: get intent installation time
+            
+            #Obtain metrics from ONOS 1, 2, 3
+            intents_json_str_1 = main.ONOS1cli.intents_events_metrics()
+            intents_json_obj_1 = json.loads(intents_json_str_1)
+            #Parse values from the json object
+            intent_install_1 = \
+                    intents_json_obj_1[install_time]['value']
+            intent_reroute_lat_1 = \
+                    int(intent_install_1) - int(t0_system)
+            
+            
+            if cluster_count == 3: 
+                intents_json_str_2 =\
+                    main.ONOS2cli.intents_events_metrics()
+                intents_json_str_3 =\
+                    main.ONOS3cli.intents_events_metrics()
+                intents_json_obj_2 = json.loads(intents_json_str_2)
+                intents_json_obj_3 = json.loads(intents_json_str_3)
+                intent_install_2 = \
+                    intents_json_obj_2[install_time]['value']
+                intent_install_3 = \
+                    intents_json_obj_3[install_time]['value']
+                intent_reroute_lat_2 = \
+                    int(intent_install_2) - int(t0_system)
+                intent_reroute_lat_3 = \
+                    int(intent_install_3) - int(t0_system)
+            else:
+                intent_reroute_lat_2 = 0
+                intent_reroute_lat_3 = 0
+
+            if cluster_count == 5:
+                intents_json_str_4 =\
+                    main.ONOS4cli.intents_events_metrics()
+                intents_json_str_5 =\
+                    main.ONOS5cli.intents_events_metrics()
+                intents_json_obj_4 = json.loads(intents_json_str_4)
+                intents_json_obj_5 = json.loads(intents_json_str_5)
+                intent_install_4 = \
+                    intents_json_obj_4[install_time]['value']
+                intent_install_5 = \
+                    intents_json_obj_5[install_time]['value']
+                intent_reroute_lat_4 = \
+                    int(intent_install_4) - int(t0_system)
+                intent_reroute_lat_5 = \
+                    int(intent_install_5) - int(t0_system)
+            else:
+                intent_reroute_lat_4 = 0
+                intent_reroute_lat_5 = 0
+
+            if cluster_count == 7:
+                intents_json_str_6 =\
+                    main.ONOS6cli.intents_events_metrics()
+                intents_json_str_7 =\
+                    main.ONOS7cli.intents_events_metrics()
+                intents_json_obj_6 = json.loads(intents_json_str_6)
+                intents_json_obj_7 = json.loads(intents_json_str_7)
+                intent_install_6 = \
+                    intents_json_obj_6[install_time]['value']
+                intent_install_7 = \
+                    intents_json_obj_7[install_time]['value']
+                intent_reroute_lat_6 = \
+                    int(intent_install_6) - int(t0_system)
+                intent_reroute_lat_7 = \
+                    int(intent_install_7) - int(t0_system)
+            else:
+                intent_reroute_lat_6 = 0
+                intent_reroute_lat_7 = 0
+
+            intent_reroute_lat_avg = \
+                    (intent_reroute_lat_1 + 
+                     intent_reroute_lat_2 +
+                     intent_reroute_lat_3 +
+                     intent_reroute_lat_4 + 
+                     intent_reroute_lat_5 +
+                     intent_reroute_lat_6 +
+                     intent_reroute_lat_7) / cluster_count 
+    
+            main.log.info("Intent reroute latency avg for iteration "+
+                    str(i)+": "+str(intent_reroute_lat_avg))
+            #TODO: Remove intents for next iteration
+            
+            time.sleep(5)
+
+            intents_str = main.ONOS1cli.intents()
+            intents_json = json.loads(intents_str)
+            for intents in intents_json:
+                intent_id = intents['id']
+                #TODO: make sure this removes all intents
+                #print intent_id
+                if intent_id:
+                    main.ONOS1cli.remove_intent(intent_id)
+
+            main.Mininet1.handle.sendline(
+                    "sh ifconfig "+intfs+" up")
+            
+            main.log.info("Intents removed and port back up")
+
+    def CASE9(self, main):
         count = 0
-        sw_num1 = 1
+        sw_num1 = 1 
         sw_num2 = 1
         appid = 0
         port_num1 = 1
         port_num2 = 1
-
-        time.sleep( 30 )
+       
+        time.sleep(30)
 
         while True:
-            # main.ONOS1cli.push_test_intents(
+            #main.ONOS1cli.push_test_intents(
                     #"of:0000000000001001/1",
                 #"of:0000000000002001/1",
-                #    100, num_mult="10", app_id="1" )
-            # main.ONOS2cli.push_test_intents(
+                #    100, num_mult="10", app_id="1")
+            #main.ONOS2cli.push_test_intents(
             #    "of:0000000000001002/1",
             #    "of:0000000000002002/1",
-            #    100, num_mult="10", app_id="2" )
-            # main.ONOS2cli.push_test_intents(
+            #    100, num_mult="10", app_id="2")
+            #main.ONOS2cli.push_test_intents(
             #    "of:0000000000001003/1",
             #    "of:0000000000002003/1",
-            #    100, num_mult="10", app_id="3" )
+            #    100, num_mult="10", app_id="3")
             count += 1
-
+           
             if count >= 100:
                 main.ONOSbench.handle.sendline(
-                    "onos 10.128.174.1 intents-events-metrics >>" +
-                    " /tmp/metrics_intents_temp.txt &" )
+                    "onos 10.128.174.1 intents-events-metrics >>"+\
+                    " /tmp/metrics_intents_temp.txt &")
                 count = 0
 
-            arg1 = "of:000000000000100" + \
-                str( sw_num1 ) + "/" + str( port_num1 )
-            arg2 = "of:000000000000200" + \
-                str( sw_num2 ) + "/" + str( port_num2 )
-
+            arg1 = "of:000000000000100"+str(sw_num1)+"/"+str(port_num1)
+            arg2 = "of:000000000000200"+str(sw_num2)+"/"+str(port_num2)
+            
             sw_num1 += 1
 
             if sw_num1 > 7:
@@ -837,21 +1094,22 @@
 
             if sw_num2 > 7:
                 sw_num2 = 1
-
+            
             main.ONOSbench.push_test_intents_shell(
                 arg1,
-                arg2,
+                arg2, 
                 100, "/tmp/temp.txt", "10.128.174.1",
-                num_mult="10", app_id=appid, report=False )
-            # main.ONOSbench.push_test_intents_shell(
+                num_mult="10", app_id=appid,report=False)
+            #main.ONOSbench.push_test_intents_shell(
             #    "of:0000000000001002/1",
             #    "of:0000000000002002/1",
             #    133, "/tmp/temp2.txt", "10.128.174.2",
-            #    num_mult="6", app_id="2",report=False )
-            # main.ONOSbench.push_test_intents_shell(
+            #    num_mult="6", app_id="2",report=False)
+            #main.ONOSbench.push_test_intents_shell(
             #    "of:0000000000001003/1",
             #    "of:0000000000002003/1",
             #    133, "/tmp/temp3.txt", "10.128.174.3",
-            #    num_mult="6", app_id="3",report=False )
+            #    num_mult="6", app_id="3",report=False)
+   
+            time.sleep(0.2)
 
-            time.sleep( 0.2 )
diff --git a/TestON/tests/IntentPerfNext/IntentPerfNext.topo b/TestON/tests/IntentPerfNext/IntentPerfNext.topo
index 048695d..9de0291 100644
--- a/TestON/tests/IntentPerfNext/IntentPerfNext.topo
+++ b/TestON/tests/IntentPerfNext/IntentPerfNext.topo
@@ -89,7 +89,7 @@
             <type>MininetCliDriver</type>
             <connect_order>4</connect_order>
             <COMPONENTS>
-                <arg1> --custom topo-intentFlower.py </arg1>
+                <arg1> --custom topo-intent-8sw.py </arg1>
                 <arg2> --arp --mac --topo mytopo </arg2>
                 <arg3> </arg3>
                 <controller> remote </controller>
diff --git a/TestON/tests/TopoPerfNext/Backup/TopoPerfNext.py b/TestON/tests/TopoPerfNext/Backup/TopoPerfNext.py
new file mode 100644
index 0000000..cc40b94
--- /dev/null
+++ b/TestON/tests/TopoPerfNext/Backup/TopoPerfNext.py
@@ -0,0 +1,1710 @@
+#TopoPerfNext
+#
+#Topology Performance test for ONOS-next
+#
+#andrew@onlab.us
+#
+#If your machine does not come with numpy
+#run the following command:
+#sudo apt-get install python-numpy python-scipy 
+
+import time
+import sys
+import os
+import re
+
+class TopoPerfNext:
+    def __init__(self):
+        self.default = ''
+
+    def CASE1(self, main):
+        '''
+        ONOS startup sequence
+        '''
+        import time
+        
+        ## Global cluster count for scale-out purposes
+        global cluster_count
+        #Set initial cluster count
+        cluster_count = 1 
+        ##
+
+        cell_name = main.params['ENV']['cellName']
+
+        git_pull = main.params['GIT']['autoPull']
+        checkout_branch = main.params['GIT']['checkout']
+
+        ONOS1_ip = main.params['CTRL']['ip1']
+        ONOS2_ip = main.params['CTRL']['ip2']
+        ONOS3_ip = main.params['CTRL']['ip3']
+        
+        #### Hardcoded ONOS nodes particular to my env ####
+        ONOS4_ip = "10.128.174.4"
+        ONOS5_ip = "10.128.174.5"
+        ONOS6_ip = "10.128.174.6"
+        ONOS7_ip = "10.128.174.7"
+        #### ####
+
+        MN1_ip = main.params['MN']['ip1']
+        BENCH_ip = main.params['BENCH']['ip']
+
+        topo_cfg_file = main.params['TEST']['topo_config_file']
+        topo_cfg_name = main.params['TEST']['topo_config_name']
+        
+        main.case("Setting up test environment")
+        main.log.info("Copying topology event accumulator config"+\
+            " to ONOS /package/etc")
+        main.ONOSbench.handle.sendline("cp ~/"+\
+            topo_cfg_file+\
+            " ~/ONOS/tools/package/etc/"+\
+            topo_cfg_name)
+        main.ONOSbench.handle.expect("\$")
+
+        main.log.report("Setting up test environment")
+
+        main.step("Cleaning previously installed ONOS if any")
+        main.ONOSbench.onos_uninstall(node_ip=ONOS2_ip)
+        main.ONOSbench.onos_uninstall(node_ip=ONOS3_ip)
+        main.ONOSbench.onos_uninstall(node_ip=ONOS4_ip)
+        main.ONOSbench.onos_uninstall(node_ip=ONOS5_ip)
+        main.ONOSbench.onos_uninstall(node_ip=ONOS6_ip)
+        main.ONOSbench.onos_uninstall(node_ip=ONOS7_ip)
+
+        main.step("Creating cell file")
+        cell_file_result = main.ONOSbench.create_cell_file(
+                BENCH_ip, cell_name, MN1_ip, "onos-core,onos-app-metrics",
+                ONOS1_ip)
+
+        main.step("Applying cell file to environment")
+        cell_apply_result = main.ONOSbench.set_cell(cell_name)
+        verify_cell_result = main.ONOSbench.verify_cell()
+        
+        #NOTE: This step may be removed after proper 
+        #      copy cat log functionality
+        main.step("Removing raft/copy-cat logs from ONOS nodes")
+        main.ONOSbench.onos_remove_raft_logs()
+        time.sleep(30)
+
+        main.step("Git checkout and pull "+checkout_branch)
+        if git_pull == 'on':
+            checkout_result = \
+                    main.ONOSbench.git_checkout(checkout_branch)
+            pull_result = main.ONOSbench.git_pull()
+        else:
+            checkout_result = main.TRUE
+            pull_result = main.TRUE
+            main.log.info("Skipped git checkout and pull")
+
+        main.log.report("Commit information - ")
+        main.ONOSbench.get_version(report=True)
+
+        main.step("Using mvn clean & install")
+        mvn_result = main.ONOSbench.clean_install()
+        mvn_result = main.TRUE
+
+        main.step("Set cell for ONOS cli env")
+        main.ONOS1cli.set_cell(cell_name)
+        #main.ONOS2cli.set_cell(cell_name)
+        #main.ONOS3cli.set_cell(cell_name)
+
+        main.step("Creating ONOS package")
+        package_result = main.ONOSbench.onos_package()
+
+        main.step("Installing ONOS package")
+        install1_result = main.ONOSbench.onos_install(node=ONOS1_ip)
+        #install2_result = main.ONOSbench.onos_install(node=ONOS2_ip)
+        #install3_result = main.ONOSbench.onos_install(node=ONOS3_ip)
+
+        time.sleep(10)
+
+        main.step("Start onos cli")
+        cli1 = main.ONOS1cli.start_onos_cli(ONOS1_ip)
+        #cli2 = main.ONOS2cli.start_onos_cli(ONOS2_ip)
+        #cli3 = main.ONOS3cli.start_onos_cli(ONOS3_ip)
+
+        utilities.assert_equals(expect=main.TRUE,
+                actual= cell_file_result and cell_apply_result and\
+                        verify_cell_result and checkout_result and\
+                        pull_result and mvn_result and\
+                        install1_result, #and install2_result and\
+                        #install3_result,
+                onpass="Test Environment setup successful",
+                onfail="Failed to setup test environment")
+
+    def CASE2(self, main):
+        '''
+        Assign s1 to ONOS1 and measure latency
+        
+        There are 4 levels of latency measurements to this test:
+        1) End-to-end measurement: Complete end-to-end measurement
+           from TCP (SYN/ACK) handshake to Graph change
+        2) OFP-to-graph measurement: 'ONOS processing' snippet of
+           measurement from OFP Vendor message to Graph change
+        3) OFP-to-device measurement: 'ONOS processing without 
+           graph change' snippet of measurement from OFP vendor
+           message to Device change timestamp
+        4) T0-to-device measurement: Measurement that includes
+           the switch handshake to devices timestamp without 
+           the graph view change. (TCP handshake -> Device 
+           change)
+        '''
+        import time
+        import subprocess
+        import json
+        import requests
+        import os
+        import numpy
+        global cluster_count
+
+        ONOS1_ip = main.params['CTRL']['ip1']
+        ONOS2_ip = main.params['CTRL']['ip2']
+        ONOS3_ip = main.params['CTRL']['ip3']
+        ONOS4_ip = main.params['CTRL']['ip4']
+        ONOS5_ip = main.params['CTRL']['ip5']
+        ONOS6_ip = main.params['CTRL']['ip6']
+        ONOS7_ip = main.params['CTRL']['ip7']
+
+        ONOS_user = main.params['CTRL']['user']
+
+        default_sw_port = main.params['CTRL']['port1']
+       
+        #Number of iterations of case
+        num_iter = main.params['TEST']['numIter']
+        #Number of first 'x' iterations to ignore:
+        iter_ignore = int(main.params['TEST']['iterIgnore'])
+
+        #Timestamp 'keys' for json metrics output.
+        #These are subject to change, hence moved into params
+        deviceTimestamp = main.params['JSON']['deviceTimestamp']
+        graphTimestamp = main.params['JSON']['graphTimestamp']
+
+        debug_mode = main.params['TEST']['debugMode']
+        onos_log = main.params['TEST']['onosLogFile']
+
+        #Threshold for the test
+        threshold_str = main.params['TEST']['singleSwThreshold']
+        threshold_obj = threshold_str.split(",")
+        threshold_min = int(threshold_obj[0])
+        threshold_max = int(threshold_obj[1])
+
+        #List of switch add latency collected from
+        #all iterations
+        latency_end_to_end_list = []
+        latency_ofp_to_graph_list = []
+        latency_ofp_to_device_list = []
+        latency_t0_to_device_list = []
+        latency_tcp_to_ofp_list = []
+
+        #Directory/file to store tshark results
+        tshark_of_output = "/tmp/tshark_of_topo.txt"
+        tshark_tcp_output = "/tmp/tshark_tcp_topo.txt"
+
+        #String to grep in tshark output
+        tshark_tcp_string = "TCP 74 "+default_sw_port
+        tshark_of_string = "OFP 86 Vendor"
+     
+        #Initialize assertion to TRUE
+        assertion = main.TRUE
+      
+        local_time = time.strftime('%x %X')
+        local_time = local_time.replace("/","")
+        local_time = local_time.replace(" ","_")
+        local_time = local_time.replace(":","")
+        if debug_mode == 'on':
+            main.ONOS1.tshark_pcap("eth0",
+                    "/tmp/single_sw_lat_pcap_"+local_time) 
+
+            main.log.info("Debug mode is on")
+
+        main.log.report("Latency of adding one switch to controller")
+        main.log.report("First "+str(iter_ignore)+" iterations ignored"+
+                " for jvm warmup time")
+        main.log.report("Total iterations of test: "+str(num_iter))
+
+        for i in range(0, int(num_iter)):
+            main.log.info("Starting tshark capture")
+
+            #* TCP [ACK, SYN] is used as t0_a, the
+            #  very first "exchange" between ONOS and 
+            #  the switch for end-to-end measurement
+            #* OFP [Stats Reply] is used for t0_b
+            #  the very last OFP message between ONOS
+            #  and the switch for ONOS measurement
+            main.ONOS1.tshark_grep(tshark_tcp_string,
+                    tshark_tcp_output)
+            main.ONOS1.tshark_grep(tshark_of_string,
+                    tshark_of_output)
+
+            #Wait and ensure tshark is started and 
+            #capturing
+            time.sleep(10)
+
+            main.log.info("Assigning s1 to controller")
+
+            main.Mininet1.assign_sw_controller(sw="1",
+                    ip1=ONOS1_ip, port1=default_sw_port)
+
+            #Wait and ensure switch is assigned
+            #before stopping tshark
+            time.sleep(30)
+   
+            main.log.info("Stopping all Tshark processes")
+            main.ONOS1.stop_tshark()
+
+            #tshark output is saved in ONOS. Use subprocess
+            #to copy over files to TestON for parsing
+            main.log.info("Copying over tshark files")
+            
+            #TCP CAPTURE ****
+            #Copy the tshark output from ONOS machine to
+            #TestON machine in tshark_tcp_output directory>file
+            os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
+                    tshark_tcp_output+" /tmp/") 
+            tcp_file = open(tshark_tcp_output, 'r')
+            temp_text = tcp_file.readline()
+            temp_text = temp_text.split(" ")
+
+            main.log.info("Object read in from TCP capture: "+
+                    str(temp_text))
+            if len(temp_text) > 1:
+                t0_tcp = float(temp_text[1])*1000.0
+            else:
+                main.log.error("Tshark output file for TCP"+
+                        " returned unexpected results")
+                t0_tcp = 0
+                assertion = main.FALSE
+            
+            tcp_file.close()
+            #****************
+
+            #OF CAPTURE ****
+            os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
+                    tshark_of_output+" /tmp/")
+            of_file = open(tshark_of_output, 'r')
+           
+            line_ofp = ""
+            #Read until last line of file
+            while True:
+                temp_text = of_file.readline()
+                if temp_text !='':
+                    line_ofp = temp_text
+                else:
+                    break 
+            obj = line_ofp.split(" ")
+            
+            main.log.info("Object read in from OFP capture: "+
+                    str(line_ofp))
+    
+            if len(line_ofp) > 1:
+                t0_ofp = float(obj[1])*1000.0
+            else:
+                main.log.error("Tshark output file for OFP"+
+                        " returned unexpected results")
+                t0_ofp = 0
+                assertion = main.FALSE
+            
+            of_file.close()
+            #****************
+           
+            json_str_1 = main.ONOS1cli.topology_events_metrics()
+            #Initialize scale-out variables 
+            json_str_2 = "" 
+            json_str_3 = ""
+            json_str_4 = ""
+            json_str_5 = ""
+            json_str_6 = ""
+            json_str_7 = ""
+
+            json_obj_1 = json.loads(json_str_1)
+            json_obj_2 = json.loads(json_str_2)
+            json_obj_3 = json.loads(json_str_3)
+            #Initialize scale-out variables
+            json_obj_4 = ""
+            json_obj_5 = ""
+            json_obj_6 = ""
+            json_obj_7 = ""
+
+            #Include scale-out measurements when applicable
+            if cluster_count == 5:
+                json_str_4 = main.ONOS4cli.topology_events_metrics()
+                json_str_5 = main.ONOS5cli.topology_events_metrics()
+                
+                json_obj_4 = json.loads(json_str_4)
+                json_obj_5 = json.loads(json_str_5)
+            elif cluster_count == 6:
+                main.log.info("TODO: create even number cluster events")
+            elif cluster_count == 7:
+                json_str_6 = main.ONOS6cli.topology_events_metrics()
+                json_str_7 = main.ONOS7cli.topology_events_metrics()
+
+                json_obj_6 = json.loads(json_str_6)
+                json_obj_7 = json.loads(json_str_7)
+
+            #Obtain graph timestamp. This timestsamp captures
+            #the epoch time at which the topology graph was updated.
+            graph_timestamp_1 = \
+                    json_obj_1[graphTimestamp]['value']
+            graph_timestamp_2 = \
+                    json_obj_2[graphTimestamp]['value']
+            graph_timestamp_3 = \
+                    json_obj_3[graphTimestamp]['value']
+
+            #Obtain device timestamp. This timestamp captures
+            #the epoch time at which the device event happened
+            device_timestamp_1 = \
+                    json_obj_1[deviceTimestamp]['value'] 
+            device_timestamp_2 = \
+                    json_obj_2[deviceTimestamp]['value'] 
+            device_timestamp_3 = \
+                    json_obj_3[deviceTimestamp]['value'] 
+
+            #t0 to device processing latency 
+            delta_device_1 = int(device_timestamp_1) - int(t0_tcp)
+            delta_device_2 = int(device_timestamp_2) - int(t0_tcp)
+            delta_device_3 = int(device_timestamp_3) - int(t0_tcp)
+        
+            #Get average of delta from all instances
+            avg_delta_device = \
+                    (int(delta_device_1)+\
+                     int(delta_device_2)+\
+                     int(delta_device_3)) / 3
+
+            #Ensure avg delta meets the threshold before appending
+            if avg_delta_device > 0.0 and avg_delta_device < 10000\
+                    and int(i) > iter_ignore:
+                latency_t0_to_device_list.append(avg_delta_device)
+            else:
+                main.log.info("Results for t0-to-device ignored"+\
+                        "due to excess in threshold / warmup iteration.")
+
+            #t0 to graph processing latency (end-to-end)
+            delta_graph_1 = int(graph_timestamp_1) - int(t0_tcp)
+            delta_graph_2 = int(graph_timestamp_2) - int(t0_tcp)
+            delta_graph_3 = int(graph_timestamp_3) - int(t0_tcp)
+        
+            #Get average of delta from all instances
+            #TODO: use max delta graph
+            #max_delta_graph = max(three)
+            avg_delta_graph = \
+                    (int(delta_graph_1)+\
+                     int(delta_graph_2)+\
+                     int(delta_graph_3)) / 3
+
+            #Ensure avg delta meets the threshold before appending
+            if avg_delta_graph > 0.0 and avg_delta_graph < 10000\
+                    and int(i) > iter_ignore:
+                latency_end_to_end_list.append(avg_delta_graph)
+            else:
+                main.log.info("Results for end-to-end ignored"+\
+                        "due to excess in threshold")
+
+            #ofp to graph processing latency (ONOS processing)
+            delta_ofp_graph_1 = int(graph_timestamp_1) - int(t0_ofp)
+            delta_ofp_graph_2 = int(graph_timestamp_2) - int(t0_ofp)
+            delta_ofp_graph_3 = int(graph_timestamp_3) - int(t0_ofp)
+            
+            avg_delta_ofp_graph = \
+                    (int(delta_ofp_graph_1)+\
+                     int(delta_ofp_graph_2)+\
+                     int(delta_ofp_graph_3)) / 3
+            
+            if avg_delta_ofp_graph > threshold_min \
+                    and avg_delta_ofp_graph < threshold_max\
+                    and int(i) > iter_ignore:
+                latency_ofp_to_graph_list.append(avg_delta_ofp_graph)
+            elif avg_delta_ofp_graph > (-10) and \
+                    avg_delta_ofp_graph < 0.0 and\
+                    int(i) > iter_ignore:
+                main.log.info("Sub-millisecond result likely; "+
+                    "negative result was rounded to 0")
+                #NOTE: Current metrics framework does not 
+                #support sub-millisecond accuracy. Therefore,
+                #if the result is negative, we can reasonably
+                #conclude sub-millisecond results and just 
+                #append the best rounded effort - 0 ms. 
+                latency_ofp_to_graph_list.append(0)
+            else:
+                main.log.info("Results for ofp-to-graph "+\
+                        "ignored due to excess in threshold")
+
+            #ofp to device processing latency (ONOS processing)
+            delta_ofp_device_1 = float(device_timestamp_1) - float(t0_ofp)
+            delta_ofp_device_2 = float(device_timestamp_2) - float(t0_ofp)
+            delta_ofp_device_3 = float(device_timestamp_3) - float(t0_ofp)
+            
+            avg_delta_ofp_device = \
+                    (float(delta_ofp_device_1)+\
+                     float(delta_ofp_device_2)+\
+                     float(delta_ofp_device_3)) / 3
+            
+            #NOTE: ofp - delta measurements are occasionally negative
+            #      due to system time misalignment.
+            latency_ofp_to_device_list.append(avg_delta_ofp_device)
+
+            delta_ofp_tcp = int(t0_ofp) - int(t0_tcp)
+            if delta_ofp_tcp > threshold_min \
+                    and delta_ofp_tcp < threshold_max and\
+                    int(i) > iter_ignore:
+                latency_tcp_to_ofp_list.append(delta_ofp_tcp)
+            else:
+                main.log.info("Results fo tcp-to-ofp "+\
+                        "ignored due to excess in threshold")
+
+            #TODO:
+            #Fetch logs upon threshold excess
+
+            main.log.info("ONOS1 delta end-to-end: "+
+                    str(delta_graph_1) + " ms")
+            main.log.info("ONOS2 delta end-to-end: "+
+                    str(delta_graph_2) + " ms")
+            main.log.info("ONOS3 delta end-to-end: "+
+                    str(delta_graph_3) + " ms")
+
+            main.log.info("ONOS1 delta OFP - graph: "+
+                    str(delta_ofp_graph_1) + " ms")
+            main.log.info("ONOS2 delta OFP - graph: "+
+                    str(delta_ofp_graph_2) + " ms")
+            main.log.info("ONOS3 delta OFP - graph: "+
+                    str(delta_ofp_graph_3) + " ms")
+            
+            main.log.info("ONOS1 delta device - t0: "+
+                    str(delta_device_1) + " ms")
+            main.log.info("ONOS2 delta device - t0: "+
+                    str(delta_device_2) + " ms")
+            main.log.info("ONOS3 delta device - t0: "+
+                    str(delta_device_3) + " ms")
+         
+            main.log.info("TCP to OFP delta: "+
+                    str(delta_ofp_tcp) + " ms")
+            #main.log.info("ONOS1 delta OFP - device: "+
+            #        str(delta_ofp_device_1) + " ms")
+            #main.log.info("ONOS2 delta OFP - device: "+
+            #        str(delta_ofp_device_2) + " ms")
+            #main.log.info("ONOS3 delta OFP - device: "+
+            #        str(delta_ofp_device_3) + " ms")
+
+            main.step("Remove switch from controller")
+            main.Mininet1.delete_sw_controller("s1")
+
+            time.sleep(5)
+
+        #END of for loop iteration
+
+        #If there is at least 1 element in each list,
+        #pass the test case
+        if len(latency_end_to_end_list) > 0 and\
+           len(latency_ofp_to_graph_list) > 0 and\
+           len(latency_ofp_to_device_list) > 0 and\
+           len(latency_t0_to_device_list) > 0 and\
+           len(latency_tcp_to_ofp_list) > 0:
+            assertion = main.TRUE
+        elif len(latency_end_to_end_list) == 0:
+            #The appending of 0 here is to prevent 
+            #the min,max,sum functions from failing 
+            #below
+            latency_end_to_end_list.append(0)
+            assertion = main.FALSE
+        elif len(latency_ofp_to_graph_list) == 0:
+            latency_ofp_to_graph_list.append(0)
+            assertion = main.FALSE
+        elif len(latency_ofp_to_device_list) == 0:
+            latency_ofp_to_device_list.append(0)
+            assertion = main.FALSE
+        elif len(latency_t0_to_device_list) == 0:
+            latency_t0_to_device_list.append(0)
+            assertion = main.FALSE
+        elif len(latency_tcp_to_ofp_list) == 0:
+            latency_tcp_to_ofp_list.append(0)
+            assertion = main.FALSE
+
+        #Calculate min, max, avg of latency lists
+        latency_end_to_end_max = \
+                int(max(latency_end_to_end_list))
+        latency_end_to_end_min = \
+                int(min(latency_end_to_end_list))
+        latency_end_to_end_avg = \
+                (int(sum(latency_end_to_end_list)) / \
+                 len(latency_end_to_end_list))
+        latency_end_to_end_std_dev = \
+                str(round(numpy.std(latency_end_to_end_list),1))
+
+        latency_ofp_to_graph_max = \
+                int(max(latency_ofp_to_graph_list))
+        latency_ofp_to_graph_min = \
+                int(min(latency_ofp_to_graph_list))
+        latency_ofp_to_graph_avg = \
+                (int(sum(latency_ofp_to_graph_list)) / \
+                 len(latency_ofp_to_graph_list))
+        latency_ofp_to_graph_std_dev = \
+                str(round(numpy.std(latency_ofp_to_graph_list),1))
+
+        latency_ofp_to_device_max = \
+                int(max(latency_ofp_to_device_list))
+        latency_ofp_to_device_min = \
+                int(min(latency_ofp_to_device_list))
+        latency_ofp_to_device_avg = \
+                (int(sum(latency_ofp_to_device_list)) / \
+                 len(latency_ofp_to_device_list))
+        latency_ofp_to_device_std_dev = \
+                str(round(numpy.std(latency_ofp_to_device_list),1))
+
+        latency_t0_to_device_max = \
+                int(max(latency_t0_to_device_list))
+        latency_t0_to_device_min = \
+                int(min(latency_t0_to_device_list))
+        latency_t0_to_device_avg = \
+                (int(sum(latency_t0_to_device_list)) / \
+                 len(latency_t0_to_device_list))
+        latency_ofp_to_device_std_dev = \
+                str(round(numpy.std(latency_t0_to_device_list),1))
+
+        latency_tcp_to_ofp_max = \
+                int(max(latency_tcp_to_ofp_list))
+        latency_tcp_to_ofp_min = \
+                int(min(latency_tcp_to_ofp_list))
+        latency_tcp_to_ofp_avg = \
+                (int(sum(latency_tcp_to_ofp_list)) / \
+                 len(latency_tcp_to_ofp_list))
+        latency_tcp_to_ofp_std_dev = \
+                str(round(numpy.std(latency_tcp_to_ofp_list),1))
+
+        main.log.report("Switch add - End-to-end latency: "+\
+                "Avg: "+str(latency_end_to_end_avg)+" ms "+
+                "Std Deviation: "+latency_end_to_end_std_dev+" ms")
+        main.log.report("Switch add - OFP-to-Graph latency: "+\
+                "Note: results are not accurate to sub-millisecond. "+
+                "Any sub-millisecond results are rounded to 0 ms. ")
+        main.log.report("Avg: "+str(latency_ofp_to_graph_avg)+" ms "+
+                "Std Deviation: "+latency_ofp_to_graph_std_dev+" ms")
+        main.log.report("Switch add - TCP-to-OFP latency: "+\
+                "Avg: "+str(latency_tcp_to_ofp_avg)+" ms "+
+                "Std Deviation: "+latency_tcp_to_ofp_std_dev+" ms")
+
+        if debug_mode == 'on':
+            main.ONOS1.cp_logs_to_dir("/opt/onos/log/karaf.log",
+                    "/tmp/", copy_file_name="sw_lat_karaf")
+
+        utilities.assert_equals(expect=main.TRUE, actual=assertion,
+                onpass="Switch latency test successful",
+                onfail="Switch latency test failed")
+
+    def CASE3(self, main):
+        '''
+        Bring port up / down and measure latency.
+        Port enable / disable is simulated by ifconfig up / down
+        
+        In ONOS-next, we must ensure that the port we are 
+        manipulating is connected to another switch with a valid
+        connection. Otherwise, graph view will not be updated.
+        '''
+        import time
+        import subprocess
+        import os
+        import requests
+        import json
+        import numpy
+
+        ONOS1_ip = main.params['CTRL']['ip1']
+        ONOS2_ip = main.params['CTRL']['ip2']
+        ONOS3_ip = main.params['CTRL']['ip3']
+        ONOS_user = main.params['CTRL']['user']
+
+        default_sw_port = main.params['CTRL']['port1']
+      
+        assertion = main.TRUE
+        #Number of iterations of case
+        num_iter = main.params['TEST']['numIter']
+       
+        #Timestamp 'keys' for json metrics output.
+        #These are subject to change, hence moved into params
+        deviceTimestamp = main.params['JSON']['deviceTimestamp']
+        graphTimestamp = main.params['JSON']['graphTimestamp']
+        
+        debug_mode = main.params['TEST']['debugMode']
+
+        local_time = time.strftime('%x %X')
+        local_time = local_time.replace("/","")
+        local_time = local_time.replace(" ","_")
+        local_time = local_time.replace(":","")
+        if debug_mode == 'on':
+            main.ONOS1.tshark_pcap("eth0",
+                    "/tmp/port_lat_pcap_"+local_time) 
+
+        #Threshold for this test case
+        up_threshold_str = main.params['TEST']['portUpThreshold']
+        down_threshold_str = main.params['TEST']['portDownThreshold']
+        
+        up_threshold_obj = up_threshold_str.split(",")
+        down_threshold_obj = down_threshold_str.split(",")
+
+        up_threshold_min = int(up_threshold_obj[0])
+        up_threshold_max = int(up_threshold_obj[1])
+
+        down_threshold_min = int(down_threshold_obj[0])
+        down_threshold_max = int(down_threshold_obj[1])
+
+        #NOTE: Some hardcoded variables you may need to configure
+        #      besides the params
+            
+        tshark_port_status = "OFP 130 Port Status"
+
+        tshark_port_up = "/tmp/tshark_port_up.txt"
+        tshark_port_down = "/tmp/tshark_port_down.txt"
+        interface_config = "s1-eth1"
+
+        main.log.report("Port enable / disable latency")
+        main.log.report("Simulated by ifconfig up / down")
+        main.log.report("Total iterations of test: "+str(num_iter))
+
+        main.step("Assign switches s1 and s2 to controller 1")
+        main.Mininet1.assign_sw_controller(sw="1",ip1=ONOS1_ip,
+                port1=default_sw_port)
+        main.Mininet1.assign_sw_controller(sw="2",ip1=ONOS1_ip,
+                port1=default_sw_port)
+
+        #Give enough time for metrics to propagate the 
+        #assign controller event. Otherwise, these events may
+        #carry over to our measurements
+        time.sleep(15)
+
+        port_up_device_to_ofp_list = []
+        port_up_graph_to_ofp_list = []
+        port_down_device_to_ofp_list = []
+        port_down_graph_to_ofp_list = []
+
+        for i in range(0, int(num_iter)):
+            main.step("Starting wireshark capture for port status down")
+            main.ONOS1.tshark_grep(tshark_port_status,
+                    tshark_port_down)
+            
+            time.sleep(5)
+
+            #Disable interface that is connected to switch 2
+            main.step("Disable port: "+interface_config)
+            main.Mininet1.handle.sendline("sh ifconfig "+
+                    interface_config+" down")
+            main.Mininet1.handle.expect("mininet>")
+
+            time.sleep(3)
+            main.ONOS1.tshark_stop()
+            
+            main.step("Obtain t1 by metrics call")
+            json_str_up_1 = main.ONOS1cli.topology_events_metrics()
+            json_str_up_2 = main.ONOS2cli.topology_events_metrics()
+            json_str_up_3 = main.ONOS3cli.topology_events_metrics()
+
+            json_obj_1 = json.loads(json_str_up_1)
+            json_obj_2 = json.loads(json_str_up_2)
+            json_obj_3 = json.loads(json_str_up_3)
+            
+            #Copy tshark output file from ONOS to TestON instance
+            #/tmp directory
+            os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
+                    tshark_port_down+" /tmp/")
+
+            f_port_down = open(tshark_port_down, 'r')
+            #Get first line of port down event from tshark
+            f_line = f_port_down.readline()
+            obj_down = f_line.split(" ")
+            if len(f_line) > 0:
+                timestamp_begin_pt_down = int(float(obj_down[1])*1000)
+                main.log.info("Port down begin timestamp: "+
+                        str(timestamp_begin_pt_down))
+            else:
+                main.log.info("Tshark output file returned unexpected"+
+                        " results: "+str(obj_down))
+                timestamp_begin_pt_down = 0
+            
+            f_port_down.close()
+
+            main.log.info("TEST tshark obj: "+str(obj_down))
+
+            time.sleep(3)
+
+            #Obtain graph timestamp. This timestsamp captures
+            #the epoch time at which the topology graph was updated.
+            graph_timestamp_1 = \
+                    json_obj_1[graphTimestamp]['value']
+            graph_timestamp_2 = \
+                    json_obj_2[graphTimestamp]['value']
+            graph_timestamp_3 = \
+                    json_obj_3[graphTimestamp]['value']
+
+            main.log.info("TEST graph timestamp ONOS1: "+
+                    str(graph_timestamp_1))
+
+            #Obtain device timestamp. This timestamp captures
+            #the epoch time at which the device event happened
+            device_timestamp_1 = \
+                    json_obj_1[deviceTimestamp]['value'] 
+            device_timestamp_2 = \
+                    json_obj_2[deviceTimestamp]['value'] 
+            device_timestamp_3 = \
+                    json_obj_3[deviceTimestamp]['value'] 
+
+            #Get delta between graph event and OFP 
+            pt_down_graph_to_ofp_1 = int(graph_timestamp_1) -\
+                    int(timestamp_begin_pt_down)
+            pt_down_graph_to_ofp_2 = int(graph_timestamp_2) -\
+                    int(timestamp_begin_pt_down)
+            pt_down_graph_to_ofp_3 = int(graph_timestamp_3) -\
+                    int(timestamp_begin_pt_down)
+
+            #Get delta between device event and OFP
+            pt_down_device_to_ofp_1 = int(device_timestamp_1) -\
+                    int(timestamp_begin_pt_down)
+            pt_down_device_to_ofp_2 = int(device_timestamp_2) -\
+                    int(timestamp_begin_pt_down)
+            pt_down_device_to_ofp_3 = int(device_timestamp_3) -\
+                    int(timestamp_begin_pt_down)
+       
+            #Caluclate average across clusters
+            pt_down_graph_to_ofp_avg =\
+                    (int(pt_down_graph_to_ofp_1) +
+                     int(pt_down_graph_to_ofp_2) + 
+                     int(pt_down_graph_to_ofp_3)) / 3
+            pt_down_device_to_ofp_avg = \
+                    (int(pt_down_device_to_ofp_1) + 
+                     int(pt_down_device_to_ofp_2) +
+                     int(pt_down_device_to_ofp_3)) / 3
+
+            if pt_down_graph_to_ofp_avg > down_threshold_min and \
+                    pt_down_graph_to_ofp_avg < down_threshold_max:
+                port_down_graph_to_ofp_list.append(
+                    pt_down_graph_to_ofp_avg)
+                main.log.info("Port down: graph to ofp avg: "+
+                    str(pt_down_graph_to_ofp_avg) + " ms")
+            else:
+                main.log.info("Average port down graph-to-ofp result" +
+                        " exceeded the threshold: "+
+                        str(pt_down_graph_to_ofp_avg))
+
+            if pt_down_device_to_ofp_avg > 0 and \
+                    pt_down_device_to_ofp_avg < 1000:
+                port_down_device_to_ofp_list.append(
+                    pt_down_device_to_ofp_avg)
+                main.log.info("Port down: device to ofp avg: "+
+                    str(pt_down_device_to_ofp_avg) + " ms")
+            else:
+                main.log.info("Average port down device-to-ofp result" +
+                        " exceeded the threshold: "+
+                        str(pt_down_device_to_ofp_avg))
+
+            #Port up events 
+            main.step("Enable port and obtain timestamp")
+            main.step("Starting wireshark capture for port status up")
+            main.ONOS1.tshark_grep(tshark_port_status, tshark_port_up)
+            time.sleep(5)
+
+            main.Mininet1.handle.sendline("sh ifconfig "+
+                    interface_config+" up")
+            main.Mininet1.handle.expect("mininet>")
+            
+            #Allow time for tshark to capture event
+            time.sleep(3)
+            main.ONOS1.tshark_stop()
+
+            #Obtain metrics shortly afterwards
+            #This timestsamp captures
+            #the epoch time at which the topology graph was updated.
+            main.step("Obtain t1 by REST call")
+            json_str_up_1 = main.ONOS1cli.topology_events_metrics()
+            json_str_up_2 = main.ONOS2cli.topology_events_metrics()
+            json_str_up_3 = main.ONOS3cli.topology_events_metrics()
+            
+            json_obj_1 = json.loads(json_str_up_1)
+            json_obj_2 = json.loads(json_str_up_2)
+            json_obj_3 = json.loads(json_str_up_3)
+
+            os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
+                    tshark_port_up+" /tmp/")
+
+            f_port_up = open(tshark_port_up, 'r')
+            f_line = f_port_up.readline()
+            obj_up = f_line.split(" ")
+            if len(f_line) > 0:
+                timestamp_begin_pt_up = int(float(obj_up[1])*1000)
+                main.log.info("Port up begin timestamp: "+
+                        str(timestamp_begin_pt_up))
+            else:
+                main.log.info("Tshark output file returned unexpected"+
+                        " results.")
+                timestamp_begin_pt_up = 0
+            
+            f_port_up.close()
+
+            graph_timestamp_1 = \
+                    json_obj_1[graphTimestamp]['value']
+            graph_timestamp_2 = \
+                    json_obj_2[graphTimestamp]['value']
+            graph_timestamp_3 = \
+                    json_obj_3[graphTimestamp]['value']
+
+            #Obtain device timestamp. This timestamp captures
+            #the epoch time at which the device event happened
+            device_timestamp_1 = \
+                    json_obj_1[deviceTimestamp]['value'] 
+            device_timestamp_2 = \
+                    json_obj_2[deviceTimestamp]['value'] 
+            device_timestamp_3 = \
+                    json_obj_3[deviceTimestamp]['value'] 
+
+            #Get delta between graph event and OFP 
+            pt_up_graph_to_ofp_1 = int(graph_timestamp_1) -\
+                    int(timestamp_begin_pt_up)
+            pt_up_graph_to_ofp_2 = int(graph_timestamp_2) -\
+                    int(timestamp_begin_pt_up)
+            pt_up_graph_to_ofp_3 = int(graph_timestamp_3) -\
+                    int(timestamp_begin_pt_up)
+
+            #Get delta between device event and OFP
+            pt_up_device_to_ofp_1 = int(device_timestamp_1) -\
+                    int(timestamp_begin_pt_up)
+            pt_up_device_to_ofp_2 = int(device_timestamp_2) -\
+                    int(timestamp_begin_pt_up)
+            pt_up_device_to_ofp_3 = int(device_timestamp_3) -\
+                    int(timestamp_begin_pt_up)
+
+            main.log.info("ONOS1 delta G2O: "+str(pt_up_graph_to_ofp_1))
+            main.log.info("ONOS2 delta G2O: "+str(pt_up_graph_to_ofp_2))
+            main.log.info("ONOS3 delta G2O: "+str(pt_up_graph_to_ofp_3))
+
+            main.log.info("ONOS1 delta D2O: "+str(pt_up_device_to_ofp_1))
+            main.log.info("ONOS2 delta D2O: "+str(pt_up_device_to_ofp_2)) 
+            main.log.info("ONOS3 delta D2O: "+str(pt_up_device_to_ofp_3)) 
+
+            pt_up_graph_to_ofp_avg = \
+                    (int(pt_up_graph_to_ofp_1) + 
+                     int(pt_up_graph_to_ofp_2) +
+                     int(pt_up_graph_to_ofp_3)) / 3
+
+            pt_up_device_to_ofp_avg = \
+                    (int(pt_up_device_to_ofp_1) + 
+                     int(pt_up_device_to_ofp_2) +
+                     int(pt_up_device_to_ofp_3)) / 3
+
+            if pt_up_graph_to_ofp_avg > up_threshold_min and \
+                    pt_up_graph_to_ofp_avg < up_threshold_max: 
+                port_up_graph_to_ofp_list.append(
+                        pt_up_graph_to_ofp_avg)
+                main.log.info("Port down: graph to ofp avg: "+
+                    str(pt_up_graph_to_ofp_avg) + " ms")
+            else:
+                main.log.info("Average port up graph-to-ofp result"+
+                        " exceeded the threshold: "+
+                        str(pt_up_graph_to_ofp_avg))
+            
+            if pt_up_device_to_ofp_avg > up_threshold_min and \
+                    pt_up_device_to_ofp_avg < up_threshold_max:
+                port_up_device_to_ofp_list.append(
+                        pt_up_device_to_ofp_avg)
+                main.log.info("Port up: device to ofp avg: "+
+                    str(pt_up_device_to_ofp_avg) + " ms")
+            else:
+                main.log.info("Average port up device-to-ofp result"+
+                        " exceeded the threshold: "+
+                        str(pt_up_device_to_ofp_avg))
+            
+            #END ITERATION FOR LOOP
+        
+        #Check all list for latency existence and set assertion
+        if (port_down_graph_to_ofp_list and port_down_device_to_ofp_list\
+           and port_up_graph_to_ofp_list and port_up_device_to_ofp_list):
+            assertion = main.TRUE
+
+        #Calculate and report latency measurements
+        port_down_graph_to_ofp_min = min(port_down_graph_to_ofp_list)
+        port_down_graph_to_ofp_max = max(port_down_graph_to_ofp_list)
+        port_down_graph_to_ofp_avg = \
+                (sum(port_down_graph_to_ofp_list) / 
+                 len(port_down_graph_to_ofp_list))
+        port_down_graph_to_ofp_std_dev = \
+                str(round(numpy.std(port_down_graph_to_ofp_list),1))
+        
+        main.log.report("Port down graph-to-ofp "+
+                "Avg: "+str(port_down_graph_to_ofp_avg)+" ms "+
+                "Std Deviation: "+port_down_graph_to_ofp_std_dev+" ms")
+        
+        port_down_device_to_ofp_min = min(port_down_device_to_ofp_list)
+        port_down_device_to_ofp_max = max(port_down_device_to_ofp_list)
+        port_down_device_to_ofp_avg = \
+                (sum(port_down_device_to_ofp_list) /\
+                 len(port_down_device_to_ofp_list))
+        port_down_device_to_ofp_std_dev = \
+                str(round(numpy.std(port_down_device_to_ofp_list),1))
+        
+        main.log.report("Port down device-to-ofp "+
+                "Avg: "+str(port_down_device_to_ofp_avg)+" ms "+
+                "Std Deviation: "+port_down_device_to_ofp_std_dev+" ms")
+        
+        port_up_graph_to_ofp_min = min(port_up_graph_to_ofp_list)
+        port_up_graph_to_ofp_max = max(port_up_graph_to_ofp_list)
+        port_up_graph_to_ofp_avg = \
+                (sum(port_up_graph_to_ofp_list) /\
+                 len(port_up_graph_to_ofp_list))
+        port_up_graph_to_ofp_std_dev = \
+                str(round(numpy.std(port_up_graph_to_ofp_list),1))
+        
+        main.log.report("Port up graph-to-ofp "+
+                "Avg: "+str(port_up_graph_to_ofp_avg)+" ms "+
+                "Std Deviation: "+port_up_graph_to_ofp_std_dev+" ms")
+          
+        port_up_device_to_ofp_min = min(port_up_device_to_ofp_list)
+        port_up_device_to_ofp_max = max(port_up_device_to_ofp_list)
+        port_up_device_to_ofp_avg = \
+                (sum(port_up_device_to_ofp_list) /\
+                 len(port_up_device_to_ofp_list))
+        port_up_device_to_ofp_std_dev = \
+                str(round(numpy.std(port_up_device_to_ofp_list),1))
+        
+        main.log.report("Port up device-to-ofp "+
+                "Avg: "+str(port_up_device_to_ofp_avg)+" ms "+
+                "Std Deviation: "+port_up_device_to_ofp_std_dev+" ms")
+
+        utilities.assert_equals(expect=main.TRUE, actual=assertion,
+                onpass="Port discovery latency calculation successful",
+                onfail="Port discovery test failed")
+
+    def CASE4(self, main):
+        '''
+        Link down event using loss rate 100%
+        
+        Important:
+            Use a simple 2 switch topology with 1 link between
+            the two switches. Ensure that mac addresses of the 
+            switches are 1 / 2 respectively
+        '''
+        import time
+        import subprocess
+        import os
+        import requests
+        import json
+        import numpy 
+    
+        ONOS1_ip = main.params['CTRL']['ip1']
+        ONOS2_ip = main.params['CTRL']['ip2']
+        ONOS3_ip = main.params['CTRL']['ip3']
+        ONOS_user = main.params['CTRL']['user']
+
+        default_sw_port = main.params['CTRL']['port1']
+       
+        #Number of iterations of case
+        num_iter = main.params['TEST']['numIter']
+       
+        #Timestamp 'keys' for json metrics output.
+        #These are subject to change, hence moved into params
+        deviceTimestamp = main.params['JSON']['deviceTimestamp']
+        linkTimestamp = main.params['JSON']['linkTimestamp'] 
+        graphTimestamp = main.params['JSON']['graphTimestamp']
+        
+        debug_mode = main.params['TEST']['debugMode']
+
+        local_time = time.strftime('%x %X')
+        local_time = local_time.replace("/","")
+        local_time = local_time.replace(" ","_")
+        local_time = local_time.replace(":","")
+        if debug_mode == 'on':
+            main.ONOS1.tshark_pcap("eth0",
+                    "/tmp/link_lat_pcap_"+local_time) 
+
+        #Threshold for this test case
+        up_threshold_str = main.params['TEST']['linkUpThreshold']
+        down_threshold_str = main.params['TEST']['linkDownThreshold']
+
+        up_threshold_obj = up_threshold_str.split(",")
+        down_threshold_obj = down_threshold_str.split(",")
+
+        up_threshold_min = int(up_threshold_obj[0])
+        up_threshold_max = int(up_threshold_obj[1])
+
+        down_threshold_min = int(down_threshold_obj[0])
+        down_threshold_max = int(down_threshold_obj[1])
+
+        assertion = main.TRUE
+        #Link event timestamp to system time list
+        link_down_link_to_system_list = []
+        link_up_link_to_system_list = []
+        #Graph event timestamp to system time list
+        link_down_graph_to_system_list = []
+        link_up_graph_to_system_list = [] 
+
+        main.log.report("Link up / down discovery latency between "+
+                "two switches")
+        main.log.report("Simulated by setting loss-rate 100%")
+        main.log.report("'tc qdisc add dev <intfs> root netem loss 100%'") 
+        main.log.report("Total iterations of test: "+str(num_iter))
+
+        main.step("Assign all switches")
+        main.Mininet1.assign_sw_controller(sw="1",
+                ip1=ONOS1_ip, port1=default_sw_port)
+        main.Mininet1.assign_sw_controller(sw="2",
+                ip1=ONOS1_ip, port1=default_sw_port)
+
+        main.step("Verifying switch assignment")
+        result_s1 = main.Mininet1.get_sw_controller(sw="s1")
+        result_s2 = main.Mininet1.get_sw_controller(sw="s2")
+          
+        #Allow time for events to finish before taking measurements
+        time.sleep(10)
+
+        link_down1 = False
+        link_down2 = False
+        link_down3 = False
+        #Start iteration of link event test
+        for i in range(0, int(num_iter)):
+            main.step("Getting initial system time as t0")
+        
+            #System time in epoch ms
+            timestamp_link_down_t0 = time.time() * 1000
+            #Link down is simulated by 100% loss rate using traffic 
+            #control command
+            main.Mininet1.handle.sendline(
+                    "sh tc qdisc add dev s1-eth1 root netem loss 100%")
+
+            #TODO: Iterate through 'links' command to verify that
+            #      link s1 -> s2 went down (loop timeout 30 seconds) 
+            #      on all 3 ONOS instances
+            main.log.info("Checking ONOS for link update")
+            loop_count = 0
+            while( not (link_down1 and link_down2 and link_down3)\
+                    and loop_count < 30 ):
+                json_str1 = main.ONOS1cli.links()
+                json_str2 = main.ONOS2cli.links()
+                json_str3 = main.ONOS3cli.links()
+                
+                if not (json_str1 and json_str2 and json_str3):
+                    main.log.error("CLI command returned error ")
+                    break
+                else:
+                    json_obj1 = json.loads(json_str1)
+                    json_obj2 = json.loads(json_str2)
+                    json_obj3 = json.loads(json_str3)
+                for obj1 in json_obj1:
+                    if '01' not in obj1['src']['device']:
+                        link_down1 = True
+                        main.log.info("Link down from "+
+                                "s1 -> s2 on ONOS1 detected")
+                for obj2 in json_obj2:
+                    if '01' not in obj2['src']['device']:
+                        link_down2 = True
+                        main.log.info("Link down from "+
+                                "s1 -> s2 on ONOS2 detected")
+                for obj3 in json_obj3:
+                    if '01' not in obj3['src']['device']:
+                        link_down3 = True
+                        main.log.info("Link down from "+
+                                "s1 -> s2 on ONOS3 detected")
+                
+                loop_count += 1
+                #If CLI doesn't like the continuous requests
+                #and exits in this loop, increase the sleep here.
+                #Consequently, while loop timeout will increase
+                time.sleep(1)
+    
+            #Give time for metrics measurement to catch up
+            #NOTE: May need to be configured more accurately
+            time.sleep(10)
+            #If we exited the while loop and link down 1,2,3 are still 
+            #false, then ONOS has failed to discover link down event
+            if not (link_down1 and link_down2 and link_down3):
+                main.log.info("Link down discovery failed")
+                
+                link_down_lat_graph1 = 0
+                link_down_lat_graph2 = 0
+                link_down_lat_graph3 = 0
+                link_down_lat_device1 = 0
+                link_down_lat_device2 = 0
+                link_down_lat_device3 = 0
+                
+                assertion = main.FALSE
+            else:
+                json_topo_metrics_1 =\
+                        main.ONOS1cli.topology_events_metrics()
+                json_topo_metrics_2 =\
+                        main.ONOS2cli.topology_events_metrics()
+                json_topo_metrics_3 =\
+                        main.ONOS3cli.topology_events_metrics()
+                json_topo_metrics_1 = json.loads(json_topo_metrics_1)
+                json_topo_metrics_2 = json.loads(json_topo_metrics_2)
+                json_topo_metrics_3 = json.loads(json_topo_metrics_3)
+
+                main.log.info("Obtaining graph and device timestamp")
+                graph_timestamp_1 = \
+                    json_topo_metrics_1[graphTimestamp]['value']
+                graph_timestamp_2 = \
+                    json_topo_metrics_2[graphTimestamp]['value']
+                graph_timestamp_3 = \
+                    json_topo_metrics_3[graphTimestamp]['value']
+
+                link_timestamp_1 = \
+                    json_topo_metrics_1[linkTimestamp]['value']
+                link_timestamp_2 = \
+                    json_topo_metrics_2[linkTimestamp]['value']
+                link_timestamp_3 = \
+                    json_topo_metrics_3[linkTimestamp]['value']
+
+                if graph_timestamp_1 and graph_timestamp_2 and\
+                        graph_timestamp_3 and link_timestamp_1 and\
+                        link_timestamp_2 and link_timestamp_3:
+                    link_down_lat_graph1 = int(graph_timestamp_1) -\
+                            int(timestamp_link_down_t0)
+                    link_down_lat_graph2 = int(graph_timestamp_2) -\
+                            int(timestamp_link_down_t0)
+                    link_down_lat_graph3 = int(graph_timestamp_3) -\
+                            int(timestamp_link_down_t0)
+                
+                    link_down_lat_link1 = int(link_timestamp_1) -\
+                            int(timestamp_link_down_t0)
+                    link_down_lat_link2 = int(link_timestamp_2) -\
+                            int(timestamp_link_down_t0)
+                    link_down_lat_link3 = int(link_timestamp_3) -\
+                            int(timestamp_link_down_t0)
+                else:
+                    main.log.error("There was an error calculating"+
+                        " the delta for link down event")
+                    link_down_lat_graph1 = 0
+                    link_down_lat_graph2 = 0
+                    link_down_lat_graph3 = 0
+                    
+                    link_down_lat_device1 = 0
+                    link_down_lat_device2 = 0
+                    link_down_lat_device3 = 0
+        
+            main.log.info("Link down latency ONOS1 iteration "+
+                    str(i)+" (end-to-end): "+
+                    str(link_down_lat_graph1)+" ms")
+            main.log.info("Link down latency ONOS2 iteration "+
+                    str(i)+" (end-to-end): "+
+                    str(link_down_lat_graph2)+" ms")
+            main.log.info("Link down latency ONOS3 iteration "+
+                    str(i)+" (end-to-end): "+
+                    str(link_down_lat_graph3)+" ms")
+            
+            main.log.info("Link down latency ONOS1 iteration "+
+                    str(i)+" (link-event-to-system-timestamp): "+
+                    str(link_down_lat_link1)+" ms")
+            main.log.info("Link down latency ONOS2 iteration "+
+                    str(i)+" (link-event-to-system-timestamp): "+
+                    str(link_down_lat_link2)+" ms")
+            main.log.info("Link down latency ONOS3 iteration "+
+                    str(i)+" (link-event-to-system-timestamp): "+
+                    str(link_down_lat_link3))
+      
+            #Calculate avg of node calculations
+            link_down_lat_graph_avg =\
+                    (link_down_lat_graph1 +
+                     link_down_lat_graph2 +
+                     link_down_lat_graph3) / 3
+            link_down_lat_link_avg =\
+                    (link_down_lat_link1 +
+                     link_down_lat_link2 +
+                     link_down_lat_link3) / 3
+
+            #Set threshold and append latency to list
+            if link_down_lat_graph_avg > down_threshold_min and\
+               link_down_lat_graph_avg < down_threshold_max:
+                link_down_graph_to_system_list.append(
+                        link_down_lat_graph_avg)
+            else:
+                main.log.info("Link down latency exceeded threshold")
+                main.log.info("Results for iteration "+str(i)+
+                        "have been omitted")
+            if link_down_lat_link_avg > down_threshold_min and\
+               link_down_lat_link_avg < down_threshold_max:
+                link_down_link_to_system_list.append(
+                        link_down_lat_link_avg)
+            else:
+                main.log.info("Link down latency exceeded threshold")
+                main.log.info("Results for iteration "+str(i)+
+                        "have been omitted")
+
+            #NOTE: To remove loss rate and measure latency:
+            #       'sh tc qdisc del dev s1-eth1 root'
+            timestamp_link_up_t0 = time.time() * 1000
+            main.Mininet1.handle.sendline("sh tc qdisc del dev "+
+                    "s1-eth1 root")
+            main.Mininet1.handle.expect("mininet>")
+            
+            main.log.info("Checking ONOS for link update")
+            
+            link_down1 = True
+            link_down2 = True
+            link_down3 = True
+            loop_count = 0
+            while( (link_down1 and link_down2 and link_down3)\
+                    and loop_count < 30 ):
+                json_str1 = main.ONOS1cli.links()
+                json_str2 = main.ONOS2cli.links()
+                json_str3 = main.ONOS3cli.links()
+                if not (json_str1 and json_str2 and json_str3):
+                    main.log.error("CLI command returned error ")
+                    break
+                else:
+                    json_obj1 = json.loads(json_str1)
+                    json_obj2 = json.loads(json_str2)
+                    json_obj3 = json.loads(json_str3)
+                
+                for obj1 in json_obj1:
+                    if '01' in obj1['src']['device']:
+                        link_down1 = False 
+                        main.log.info("Link up from "+
+                            "s1 -> s2 on ONOS1 detected")
+                for obj2 in json_obj2:
+                    if '01' in obj2['src']['device']:
+                        link_down2 = False 
+                        main.log.info("Link up from "+
+                            "s1 -> s2 on ONOS2 detected")
+                for obj3 in json_obj3:
+                    if '01' in obj3['src']['device']:
+                        link_down3 = False 
+                        main.log.info("Link up from "+
+                            "s1 -> s2 on ONOS3 detected")
+                
+                loop_count += 1
+                time.sleep(1)
+            
+            if (link_down1 and link_down2 and link_down3):
+                main.log.info("Link up discovery failed")
+                
+                link_up_lat_graph1 = 0
+                link_up_lat_graph2 = 0
+                link_up_lat_graph3 = 0
+                link_up_lat_device1 = 0
+                link_up_lat_device2 = 0
+                link_up_lat_device3 = 0
+                
+                assertion = main.FALSE
+            else:
+                json_topo_metrics_1 =\
+                        main.ONOS1cli.topology_events_metrics()
+                json_topo_metrics_2 =\
+                        main.ONOS2cli.topology_events_metrics()
+                json_topo_metrics_3 =\
+                        main.ONOS3cli.topology_events_metrics()
+                json_topo_metrics_1 = json.loads(json_topo_metrics_1)
+                json_topo_metrics_2 = json.loads(json_topo_metrics_2)
+                json_topo_metrics_3 = json.loads(json_topo_metrics_3)
+
+                main.log.info("Obtaining graph and device timestamp")
+                graph_timestamp_1 = \
+                    json_topo_metrics_1[graphTimestamp]['value']
+                graph_timestamp_2 = \
+                    json_topo_metrics_2[graphTimestamp]['value']
+                graph_timestamp_3 = \
+                    json_topo_metrics_3[graphTimestamp]['value']
+
+                link_timestamp_1 = \
+                    json_topo_metrics_1[linkTimestamp]['value']
+                link_timestamp_2 = \
+                    json_topo_metrics_2[linkTimestamp]['value']
+                link_timestamp_3 = \
+                    json_topo_metrics_3[linkTimestamp]['value']
+
+                if graph_timestamp_1 and graph_timestamp_2 and\
+                        graph_timestamp_3 and link_timestamp_1 and\
+                        link_timestamp_2 and link_timestamp_3:
+                    link_up_lat_graph1 = int(graph_timestamp_1) -\
+                            int(timestamp_link_up_t0)
+                    link_up_lat_graph2 = int(graph_timestamp_2) -\
+                            int(timestamp_link_up_t0)
+                    link_up_lat_graph3 = int(graph_timestamp_3) -\
+                            int(timestamp_link_up_t0)
+                
+                    link_up_lat_link1 = int(link_timestamp_1) -\
+                            int(timestamp_link_up_t0)
+                    link_up_lat_link2 = int(link_timestamp_2) -\
+                            int(timestamp_link_up_t0)
+                    link_up_lat_link3 = int(link_timestamp_3) -\
+                            int(timestamp_link_up_t0)
+                else:
+                    main.log.error("There was an error calculating"+
+                        " the delta for link down event")
+                    link_up_lat_graph1 = 0
+                    link_up_lat_graph2 = 0
+                    link_up_lat_graph3 = 0
+                    
+                    link_up_lat_device1 = 0
+                    link_up_lat_device2 = 0
+                    link_up_lat_device3 = 0
+       
+            if debug_mode == 'on':
+                main.log.info("Link up latency ONOS1 iteration "+
+                    str(i)+" (end-to-end): "+
+                    str(link_up_lat_graph1)+" ms")
+                main.log.info("Link up latency ONOS2 iteration "+
+                    str(i)+" (end-to-end): "+
+                    str(link_up_lat_graph2)+" ms")
+                main.log.info("Link up latency ONOS3 iteration "+
+                    str(i)+" (end-to-end): "+
+                    str(link_up_lat_graph3)+" ms")
+            
+                main.log.info("Link up latency ONOS1 iteration "+
+                    str(i)+" (link-event-to-system-timestamp): "+
+                    str(link_up_lat_link1)+" ms")
+                main.log.info("Link up latency ONOS2 iteration "+
+                    str(i)+" (link-event-to-system-timestamp): "+
+                    str(link_up_lat_link2)+" ms")
+                main.log.info("Link up latency ONOS3 iteration "+
+                    str(i)+" (link-event-to-system-timestamp): "+
+                    str(link_up_lat_link3))
+      
+            #Calculate avg of node calculations
+            link_up_lat_graph_avg =\
+                    (link_up_lat_graph1 +
+                     link_up_lat_graph2 +
+                     link_up_lat_graph3) / 3
+            link_up_lat_link_avg =\
+                    (link_up_lat_link1 +
+                     link_up_lat_link2 +
+                     link_up_lat_link3) / 3
+
+            #Set threshold and append latency to list
+            if link_up_lat_graph_avg > up_threshold_min and\
+               link_up_lat_graph_avg < up_threshold_max:
+                link_up_graph_to_system_list.append(
+                        link_up_lat_graph_avg)
+            else:
+                main.log.info("Link up latency exceeded threshold")
+                main.log.info("Results for iteration "+str(i)+
+                        "have been omitted")
+            if link_up_lat_link_avg > up_threshold_min and\
+               link_up_lat_link_avg < up_threshold_max:
+                link_up_link_to_system_list.append(
+                        link_up_lat_link_avg)
+            else:
+                main.log.info("Link up latency exceeded threshold")
+                main.log.info("Results for iteration "+str(i)+
+                        "have been omitted")
+
+        #Calculate min, max, avg of list and report
+        link_down_min = min(link_down_graph_to_system_list)
+        link_down_max = max(link_down_graph_to_system_list)
+        link_down_avg = sum(link_down_graph_to_system_list) / \
+                        len(link_down_graph_to_system_list)
+        link_up_min = min(link_up_graph_to_system_list)
+        link_up_max = max(link_up_graph_to_system_list)
+        link_up_avg = sum(link_up_graph_to_system_list) / \
+                        len(link_up_graph_to_system_list)
+        link_down_std_dev = \
+                str(round(numpy.std(link_down_graph_to_system_list),1))
+        link_up_std_dev = \
+                str(round(numpy.std(link_up_graph_to_system_list),1))
+
+        main.log.report("Link down latency " +
+                "Avg: "+str(link_down_avg)+" ms "+
+                "Std Deviation: "+link_down_std_dev+" ms")
+        main.log.report("Link up latency "+
+                "Avg: "+str(link_up_avg)+" ms "+
+                "Std Deviation: "+link_up_std_dev+" ms")
+
+        utilities.assert_equals(expect=main.TRUE, actual=assertion,
+                onpass="Link discovery latency calculation successful",
+                onfail="Link discovery latency case failed")
+
+    def CASE5(self, main):
+        '''
+        100 Switch discovery latency
+
+        Important:
+            This test case can be potentially dangerous if 
+            your machine has previously set iptables rules.
+            One of the steps of the test case will flush
+            all existing iptables rules.
+        Note:
+            You can specify the number of switches in the 
+            params file to adjust the switch discovery size
+            (and specify the corresponding topology in Mininet1 
+            .topo file)
+        '''
+        import time
+        import subprocess
+        import os
+        import requests
+        import json
+
+        ONOS1_ip = main.params['CTRL']['ip1']
+        ONOS2_ip = main.params['CTRL']['ip2']
+        ONOS3_ip = main.params['CTRL']['ip3']
+        MN1_ip = main.params['MN']['ip1']
+        ONOS_user = main.params['CTRL']['user']
+
+        default_sw_port = main.params['CTRL']['port1']
+       
+        #Number of iterations of case
+        num_iter = main.params['TEST']['numIter']
+        num_sw = main.params['TEST']['numSwitch']
+
+        #Timestamp 'keys' for json metrics output.
+        #These are subject to change, hence moved into params
+        deviceTimestamp = main.params['JSON']['deviceTimestamp']
+        graphTimestamp = main.params['JSON']['graphTimestamp']
+        
+        debug_mode = main.params['TEST']['debugMode']
+
+        local_time = time.strftime('%X')
+        local_time = local_time.replace("/","")
+        local_time = local_time.replace(" ","_")
+        local_time = local_time.replace(":","")
+        if debug_mode == 'on':
+            main.ONOS1.tshark_pcap("eth0",
+                    "/tmp/100_sw_lat_pcap_"+local_time) 
+ 
+        #Threshold for this test case
+        sw_disc_threshold_str = main.params['TEST']['swDisc100Threshold']
+        sw_disc_threshold_obj = sw_disc_threshold_str.split(",")
+        sw_disc_threshold_min = int(sw_disc_threshold_obj[0])
+        sw_disc_threshold_max = int(sw_disc_threshold_obj[1])
+
+        tshark_ofp_output = "/tmp/tshark_ofp_"+num_sw+"sw.txt"
+        tshark_tcp_output = "/tmp/tshark_tcp_"+num_sw+"sw.txt"
+
+        tshark_ofp_result_list = []
+        tshark_tcp_result_list = []
+
+        sw_discovery_lat_list = []
+
+        main.case(num_sw+" Switch discovery latency")
+        main.step("Assigning all switches to ONOS1")
+        for i in range(1, int(num_sw)+1):
+            main.Mininet1.assign_sw_controller(
+                    sw=str(i),
+                    ip1=ONOS1_ip,
+                    port1=default_sw_port)
+        
+        #Ensure that nodes are configured with ptpd
+        #Just a warning message
+        main.log.info("Please check ptpd configuration to ensure"+\
+                " All nodes' system times are in sync")
+        time.sleep(5)
+
+        for i in range(0, int(num_iter)):
+            
+            main.step("Set iptables rule to block incoming sw connections")
+            #Set iptables rule to block incoming switch connections
+            #The rule description is as follows:
+            #   Append to INPUT rule,
+            #   behavior DROP that matches following:
+            #       * packet type: tcp
+            #       * source IP: MN1_ip
+            #       * destination PORT: 6633
+            main.ONOS1.handle.sendline(
+                    "sudo iptables -A INPUT -p tcp -s "+MN1_ip+
+                    " --dport "+default_sw_port+" -j DROP")
+            main.ONOS1.handle.expect("\$") 
+            #   Append to OUTPUT rule, 
+            #   behavior DROP that matches following:
+            #       * packet type: tcp
+            #       * source IP: MN1_ip
+            #       * destination PORT: 6633
+            main.ONOS1.handle.sendline(
+                    "sudo iptables -A OUTPUT -p tcp -s "+MN1_ip+
+                    " --dport "+default_sw_port+" -j DROP")
+            main.ONOS1.handle.expect("\$")
+            #Give time to allow rule to take effect
+            #NOTE: Sleep period may need to be configured 
+            #      based on the number of switches in the topology
+            main.log.info("Please wait for switch connection to "+
+                    "time out")
+            time.sleep(60)
+            
+            #Gather vendor OFP with tshark
+            main.ONOS1.tshark_grep("OFP 86 Vendor", 
+                    tshark_ofp_output)
+            main.ONOS1.tshark_grep("TCP 74 ",
+                    tshark_tcp_output)
+
+            #NOTE: Remove all iptables rule quickly (flush)
+            #      Before removal, obtain TestON timestamp at which 
+            #      removal took place
+            #      (ensuring nodes are configured via ptp)
+            #      sudo iptables -F
+            
+            t0_system = time.time() * 1000
+            main.ONOS1.handle.sendline(
+                    "sudo iptables -F")
+
+            #Counter to track loop count
+            counter_loop = 0
+            counter_avail1 = 0
+            counter_avail2 = 0
+            counter_avail3 = 0
+            onos1_dev = False
+            onos2_dev = False
+            onos3_dev = False
+            while counter_loop < 60:
+                #Continue to check devices for all device 
+                #availability. When all devices in all 3
+                #ONOS instances indicate that devices are available
+                #obtain graph event timestamp for t1.
+                device_str_obj1 = main.ONOS1cli.devices()
+                device_str_obj2 = main.ONOS2cli.devices()
+                device_str_obj3 = main.ONOS3cli.devices()
+
+                device_json1 = json.loads(device_str_obj1)                
+                device_json2 = json.loads(device_str_obj2)                
+                device_json3 = json.loads(device_str_obj3)           
+                
+                for device1 in device_json1:
+                    if device1['available'] == True:
+                        counter_avail1 += 1
+                        if counter_avail1 == int(num_sw):
+                            onos1_dev = True
+                            main.log.info("All devices have been "+
+                                    "discovered on ONOS1")
+                    else:
+                        counter_avail1 = 0
+                for device2 in device_json2:
+                    if device2['available'] == True:
+                        counter_avail2 += 1
+                        if counter_avail2 == int(num_sw):
+                            onos2_dev = True
+                            main.log.info("All devices have been "+
+                                    "discovered on ONOS2")
+                    else:
+                        counter_avail2 = 0
+                for device3 in device_json3:
+                    if device3['available'] == True:
+                        counter_avail3 += 1
+                        if counter_avail3 == int(num_sw):
+                            onos3_dev = True
+                            main.log.info("All devices have been "+
+                                    "discovered on ONOS3")
+                    else:
+                        counter_avail3 = 0
+
+                if onos1_dev and onos2_dev and onos3_dev:
+                    main.log.info("All devices have been discovered "+
+                            "on all ONOS instances")
+                    json_str_topology_metrics_1 =\
+                        main.ONOS1cli.topology_events_metrics()
+                    json_str_topology_metrics_2 =\
+                        main.ONOS2cli.topology_events_metrics()
+                    json_str_topology_metrics_3 =\
+                        main.ONOS3cli.topology_events_metrics()
+                   
+                    #Exit while loop if all devices discovered
+                    break 
+                
+                counter_loop += 1
+                #Give some time in between CLI calls
+                #(will not affect measurement)
+                time.sleep(3)
+
+            main.ONOS1.tshark_stop()
+            
+            os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
+                    tshark_ofp_output+" /tmp/") 
+            os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
+                    tshark_tcp_output+" /tmp/")
+
+            #TODO: Automate OFP output analysis
+            #Debug mode - print out packets captured at runtime     
+            if debug_mode == 'on': 
+                ofp_file = open(tshark_ofp_output, 'r')
+                main.log.info("Tshark OFP Vendor output: ")
+                for line in ofp_file:
+                    tshark_ofp_result_list.append(line)
+                    main.log.info(line)
+                ofp_file.close()
+
+                tcp_file = open(tshark_tcp_output, 'r')
+                main.log.info("Tshark TCP 74 output: ")
+                for line in tcp_file:
+                    tshark_tcp_result_list.append(line)
+                    main.log.info(line)
+                tcp_file.close()
+
+            json_obj_1 = json.loads(json_str_topology_metrics_1)
+            json_obj_2 = json.loads(json_str_topology_metrics_2)
+            json_obj_3 = json.loads(json_str_topology_metrics_3)
+
+            graph_timestamp_1 = \
+                    json_obj_1[graphTimestamp]['value']
+            graph_timestamp_2 = \
+                    json_obj_2[graphTimestamp]['value']
+            graph_timestamp_3 = \
+                    json_obj_3[graphTimestamp]['value']
+
+            graph_lat_1 = int(graph_timestamp_1) - int(t0_system)
+            graph_lat_2 = int(graph_timestamp_2) - int(t0_system)
+            graph_lat_3 = int(graph_timestamp_3) - int(t0_system)
+
+            avg_graph_lat = \
+                    (int(graph_lat_1) +\
+                     int(graph_lat_2) +\
+                     int(graph_lat_3)) / 3
+    
+            if avg_graph_lat > sw_disc_threshold_min \
+                    and avg_graph_lat < sw_disc_threshold_max:
+                sw_discovery_lat_list.append(
+                        avg_graph_lat)
+            else:
+                main.log.info("100 Switch discovery latency "+
+                        "exceeded the threshold.")
+            
+            #END ITERATION FOR LOOP
+
+        sw_lat_min = min(sw_discovery_lat_list)
+        sw_lat_max = max(sw_discovery_lat_list)
+        sw_lat_avg = sum(sw_discovery_lat_list) /\
+                     len(sw_discovery_lat_list)
+
+        main.log.report("100 Switch discovery lat "+\
+                "Min: "+str(sw_lat_min)+" ms"+\
+                "Max: "+str(sw_lat_max)+" ms"+\
+                "Avg: "+str(sw_lat_avg)+" ms")
+
+    def CASE6(self, main):
+        '''
+        Increase number of nodes and initiate CLI
+        '''
+        import time
+        
+        ONOS1_ip = main.params['CTRL']['ip1']
+        ONOS2_ip = main.params['CTRL']['ip2']
+        ONOS3_ip = main.params['CTRL']['ip3']
+        ONOS4_ip = main.params['CTRL']['ip4']
+        ONOS5_ip = main.params['CTRL']['ip5']
+        ONOS6_ip = main.params['CTRL']['ip6']
+        ONOS7_ip = main.params['CTRL']['ip7']
+
+        cell_name = main.params['ENV']['cellName']
+    
+        global cluster_count
+        
+        #Cluster size increased everytime the case is defined
+        cluster_count += 2 
+
+        main.log.report("Increasing cluster size to "+
+                str(cluster_count))
+
+        install_result = main.FALSE
+        if cluster_count == 5:
+            main.log.info("Installing nodes 4 and 5")
+            node4_result = \
+                main.ONOSbench.onos_install(node=ONOS4_ip)
+            node5_result = \
+                main.ONOSbench.onos_install(node=ONOS5_ip)
+            install_result = node4_result and node5_result
+
+            time.sleep(5)
+
+            main.ONOS4cli.start_onos_cli(ONOS4_ip)
+            main.ONOS5cli.start_onos_cli(ONOS5_ip)
+
+        elif cluster_count == 7:
+            main.log.info("Installing nodes 4 and 5")
+            node6_result = \
+                main.ONOSbench.onos_install(node=ONOS6_ip)
+            node7_result = \
+                main.ONOSbench.onos_install(node=ONOS7_ip)
+            install_result = node6_result and node7_result
+
+            time.sleep(5)
+
+            main.ONOS6cli.start_onos_cli(ONOS6_ip)
+            main.ONOS7cli.start_onos_cli(ONOS7_ip)
+
+
+
+
diff --git a/TestON/tests/TopoPerfNext/TopoPerfNext.params b/TestON/tests/TopoPerfNext/TopoPerfNext.params
index 851522c..8e31e62 100644
--- a/TestON/tests/TopoPerfNext/TopoPerfNext.params
+++ b/TestON/tests/TopoPerfNext/TopoPerfNext.params
@@ -1,5 +1,5 @@
 <PARAMS>
-    <testcases>1,2,3</testcases>
+    <testcases>1,2,3,6,2,3,6,2,3,6,2,3</testcases>
 
     <ENV>
         <cellName>topo_perf_test</cellName>
@@ -19,6 +19,11 @@
         <port2>6633</port2>
         <ip3>10.128.174.3</ip3>
         <port3>6633</port3>
+        <ip4>10.128.174.4</ip4>
+        
+        <ip5>10.128.174.5</ip5>
+        <ip6>10.128.174.6</ip6>
+        <ip7>10.128.174.7</ip7>
     </CTRL>
 
     <MN>
@@ -46,7 +51,7 @@
         </topo_config_name>
 
         #Number of times to iterate each case
-        <numIter>20</numIter>
+        <numIter>12</numIter>
         <numSwitch>2</numSwitch>
         #Number of iterations to ignore initially
         <iterIgnore>2</iterIgnore>
diff --git a/TestON/tests/TopoPerfNext/TopoPerfNext.py b/TestON/tests/TopoPerfNext/TopoPerfNext.py
index 87d7378..b737f4c 100644
--- a/TestON/tests/TopoPerfNext/TopoPerfNext.py
+++ b/TestON/tests/TopoPerfNext/TopoPerfNext.py
@@ -1,39 +1,43 @@
-# TopoPerfNext
+#TopoPerfNext
 #
-# Topology Performance test for ONOS-next
+#Topology Performance test for ONOS-next
 #
-# andrew@onlab.us
+#andrew@onlab.us
 #
-# If your machine does not come with numpy
-# run the following command:
-# sudo apt-get install python-numpy python-scipy
+#If your machine does not come with numpy
+#run the following command:
+#sudo apt-get install python-numpy python-scipy 
 
 import time
 import sys
 import os
 import re
 
-
 class TopoPerfNext:
-
-    def __init__( self ):
+    def __init__(self):
         self.default = ''
 
-    def CASE1( self, main ):
-        """
+    def CASE1(self, main):
+        '''
         ONOS startup sequence
-        """
+        '''
         import time
+        
+        ## Global cluster count for scale-out purposes
+        global cluster_count
+        #Set initial cluster count
+        cluster_count = 1 
+        ##
 
-        cell_name = main.params[ 'ENV' ][ 'cellName' ]
+        cell_name = main.params['ENV']['cellName']
 
-        git_pull = main.params[ 'GIT' ][ 'autoPull' ]
-        checkout_branch = main.params[ 'GIT' ][ 'checkout' ]
+        git_pull = main.params['GIT']['autoPull']
+        checkout_branch = main.params['GIT']['checkout']
 
-        ONOS1_ip = main.params[ 'CTRL' ][ 'ip1' ]
-        ONOS2_ip = main.params[ 'CTRL' ][ 'ip2' ]
-        ONOS3_ip = main.params[ 'CTRL' ][ 'ip3' ]
-
+        ONOS1_ip = main.params['CTRL']['ip1']
+        ONOS2_ip = main.params['CTRL']['ip2']
+        ONOS3_ip = main.params['CTRL']['ip3']
+        
         #### Hardcoded ONOS nodes particular to my env ####
         ONOS4_ip = "10.128.174.4"
         ONOS5_ip = "10.128.174.5"
@@ -41,1507 +45,1694 @@
         ONOS7_ip = "10.128.174.7"
         #### ####
 
-        MN1_ip = main.params[ 'MN' ][ 'ip1' ]
-        BENCH_ip = main.params[ 'BENCH' ][ 'ip' ]
+        MN1_ip = main.params['MN']['ip1']
+        BENCH_ip = main.params['BENCH']['ip']
 
-        topo_cfg_file = main.params[ 'TEST' ][ 'topo_config_file' ]
-        topo_cfg_name = main.params[ 'TEST' ][ 'topo_config_name' ]
+        topo_cfg_file = main.params['TEST']['topo_config_file']
+        topo_cfg_name = main.params['TEST']['topo_config_name']
+        
+        main.case("Setting up test environment")
+        main.log.info("Copying topology event accumulator config"+\
+            " to ONOS /package/etc")
+        main.ONOSbench.handle.sendline("cp ~/"+\
+            topo_cfg_file+\
+            " ~/ONOS/tools/package/etc/"+\
+            topo_cfg_name)
+        main.ONOSbench.handle.expect("\$")
 
-        main.case( "Setting up test environment" )
-        main.log.info( "Copying topology event accumulator config" +
-                       " to ONOS /package/etc" )
-        main.ONOSbench.handle.sendline( "cp ~/" +
-                                        topo_cfg_file +
-                                        " ~/ONOS/tools/package/etc/" +
-                                        topo_cfg_name )
-        main.ONOSbench.handle.expect( "\$" )
+        main.log.report("Setting up test environment")
 
-        main.log.report( "Setting up test environment" )
+        main.step("Cleaning previously installed ONOS if any")
+        main.ONOSbench.onos_uninstall(node_ip=ONOS2_ip)
+        main.ONOSbench.onos_uninstall(node_ip=ONOS3_ip)
+        main.ONOSbench.onos_uninstall(node_ip=ONOS4_ip)
+        main.ONOSbench.onos_uninstall(node_ip=ONOS5_ip)
+        main.ONOSbench.onos_uninstall(node_ip=ONOS6_ip)
+        main.ONOSbench.onos_uninstall(node_ip=ONOS7_ip)
 
-        main.step( "Cleaning previously installed ONOS if any" )
-        main.ONOSbench.onos_uninstall( node_ip=ONOS4_ip )
-        main.ONOSbench.onos_uninstall( node_ip=ONOS5_ip )
-        main.ONOSbench.onos_uninstall( node_ip=ONOS6_ip )
-        main.ONOSbench.onos_uninstall( node_ip=ONOS7_ip )
-
-        main.step( "Creating cell file" )
+        main.step("Creating cell file")
         cell_file_result = main.ONOSbench.create_cell_file(
-            BENCH_ip, cell_name, MN1_ip, "onos-core,onos-app-metrics",
-            ONOS1_ip, ONOS2_ip, ONOS3_ip )
+                BENCH_ip, cell_name, MN1_ip, 
+                "onos-core,onos-app-metrics,onos-app-gui",
+                ONOS1_ip)
 
-        main.step( "Applying cell file to environment" )
-        cell_apply_result = main.ONOSbench.set_cell( cell_name )
+        main.step("Applying cell file to environment")
+        cell_apply_result = main.ONOSbench.set_cell(cell_name)
         verify_cell_result = main.ONOSbench.verify_cell()
-
-        # NOTE: This step may be removed after proper
+        
+        #NOTE: This step may be removed after proper 
         #      copy cat log functionality
-        main.step( "Removing raft/copy-cat logs from ONOS nodes" )
+        main.step("Removing raft/copy-cat logs from ONOS nodes")
         main.ONOSbench.onos_remove_raft_logs()
-        time.sleep( 30 )
+        time.sleep(30)
 
-        main.step( "Git checkout and pull " + checkout_branch )
+        main.step("Git checkout and pull "+checkout_branch)
         if git_pull == 'on':
-            checkout_result = \
-                main.ONOSbench.git_checkout( checkout_branch )
+            #checkout_result = \
+                    #        main.ONOSbench.git_checkout(checkout_branch)
+            checkout_result = main.TRUE
             pull_result = main.ONOSbench.git_pull()
         else:
             checkout_result = main.TRUE
             pull_result = main.TRUE
-            main.log.info( "Skipped git checkout and pull" )
+            main.log.info("Skipped git checkout and pull")
 
-        # TODO: Uncomment when wiki posting works
-        #main.log.report( "Commit information - " )
-        # main.ONOSbench.get_version( report=True )
+        main.log.report("Commit information - ")
+        main.ONOSbench.get_version(report=True)
 
-        main.step( "Using mvn clean & install" )
-        #mvn_result = main.ONOSbench.clean_install()
+        main.step("Using mvn clean & install")
+        mvn_result = main.ONOSbench.clean_install()
         mvn_result = main.TRUE
 
-        main.step( "Set cell for ONOS cli env" )
-        main.ONOS1cli.set_cell( cell_name )
-        main.ONOS2cli.set_cell( cell_name )
-        main.ONOS3cli.set_cell( cell_name )
+        main.step("Set cell for ONOS cli env")
+        main.ONOS1cli.set_cell(cell_name)
+        #main.ONOS2cli.set_cell(cell_name)
+        #main.ONOS3cli.set_cell(cell_name)
 
-        main.step( "Creating ONOS package" )
+        main.step("Creating ONOS package")
         package_result = main.ONOSbench.onos_package()
 
-        main.step( "Installing ONOS package" )
-        install1_result = main.ONOSbench.onos_install( node=ONOS1_ip )
-        install2_result = main.ONOSbench.onos_install( node=ONOS2_ip )
-        install3_result = main.ONOSbench.onos_install( node=ONOS3_ip )
+        main.step("Installing ONOS package")
+        install1_result = main.ONOSbench.onos_install(node=ONOS1_ip)
+        #install2_result = main.ONOSbench.onos_install(node=ONOS2_ip)
+        #install3_result = main.ONOSbench.onos_install(node=ONOS3_ip)
 
-        time.sleep( 10 )
+        time.sleep(10)
 
-        main.step( "Start onos cli" )
-        cli1 = main.ONOS1cli.start_onos_cli( ONOS1_ip )
-        cli2 = main.ONOS2cli.start_onos_cli( ONOS2_ip )
-        cli3 = main.ONOS3cli.start_onos_cli( ONOS3_ip )
+        main.step("Start onos cli")
+        cli1 = main.ONOS1cli.start_onos_cli(ONOS1_ip)
+        #cli2 = main.ONOS2cli.start_onos_cli(ONOS2_ip)
+        #cli3 = main.ONOS3cli.start_onos_cli(ONOS3_ip)
 
-        utilities.assert_equals( expect=main.TRUE,
-                                 actual=cell_file_result and cell_apply_result and
-                                 verify_cell_result and checkout_result and
-                                 pull_result and mvn_result and
-                                 install1_result and install2_result and
-                                 install3_result,
-                                 onpass="Test Environment setup successful",
-                                 onfail="Failed to setup test environment" )
+        utilities.assert_equals(expect=main.TRUE,
+                actual= cell_file_result and cell_apply_result and\
+                        verify_cell_result and checkout_result and\
+                        pull_result and mvn_result and\
+                        install1_result, #and install2_result and\
+                        #install3_result,
+                onpass="Test Environment setup successful",
+                onfail="Failed to setup test environment")
 
-    def CASE2( self, main ):
-        """
+    def CASE2(self, main):
+        '''
         Assign s1 to ONOS1 and measure latency
-
+        
         There are 4 levels of latency measurements to this test:
-        1 ) End-to-end measurement: Complete end-to-end measurement
-           from TCP ( SYN/ACK ) handshake to Graph change
-        2 ) OFP-to-graph measurement: 'ONOS processing' snippet of
+        1) End-to-end measurement: Complete end-to-end measurement
+           from TCP (SYN/ACK) handshake to Graph change
+        2) OFP-to-graph measurement: 'ONOS processing' snippet of
            measurement from OFP Vendor message to Graph change
-        3 ) OFP-to-device measurement: 'ONOS processing without
+        3) OFP-to-device measurement: 'ONOS processing without 
            graph change' snippet of measurement from OFP vendor
            message to Device change timestamp
-        4 ) T0-to-device measurement: Measurement that includes
-           the switch handshake to devices timestamp without
-           the graph view change. ( TCP handshake -> Device
-           change )
-        """
+        4) T0-to-device measurement: Measurement that includes
+           the switch handshake to devices timestamp without 
+           the graph view change. (TCP handshake -> Device 
+           change)
+        '''
         import time
         import subprocess
         import json
         import requests
         import os
         import numpy
+        global cluster_count
 
-        ONOS1_ip = main.params[ 'CTRL' ][ 'ip1' ]
-        ONOS2_ip = main.params[ 'CTRL' ][ 'ip2' ]
-        ONOS3_ip = main.params[ 'CTRL' ][ 'ip3' ]
-        ONOS_user = main.params[ 'CTRL' ][ 'user' ]
+        ONOS1_ip = main.params['CTRL']['ip1']
+        ONOS2_ip = main.params['CTRL']['ip2']
+        ONOS3_ip = main.params['CTRL']['ip3']
+        ONOS4_ip = main.params['CTRL']['ip4']
+        ONOS5_ip = main.params['CTRL']['ip5']
+        ONOS6_ip = main.params['CTRL']['ip6']
+        ONOS7_ip = main.params['CTRL']['ip7']
 
-        default_sw_port = main.params[ 'CTRL' ][ 'port1' ]
+        ONOS_user = main.params['CTRL']['user']
 
-        # Number of iterations of case
-        num_iter = main.params[ 'TEST' ][ 'numIter' ]
-        # Number of first 'x' iterations to ignore:
-        iter_ignore = int( main.params[ 'TEST' ][ 'iterIgnore' ] )
+        default_sw_port = main.params['CTRL']['port1']
+       
+        #Number of iterations of case
+        num_iter = main.params['TEST']['numIter']
+        #Number of first 'x' iterations to ignore:
+        iter_ignore = int(main.params['TEST']['iterIgnore'])
 
-        # Timestamp 'keys' for json metrics output.
-        # These are subject to change, hence moved into params
-        deviceTimestamp = main.params[ 'JSON' ][ 'deviceTimestamp' ]
-        graphTimestamp = main.params[ 'JSON' ][ 'graphTimestamp' ]
+        #Timestamp 'keys' for json metrics output.
+        #These are subject to change, hence moved into params
+        deviceTimestamp = main.params['JSON']['deviceTimestamp']
+        graphTimestamp = main.params['JSON']['graphTimestamp']
 
-        debug_mode = main.params[ 'TEST' ][ 'debugMode' ]
-        onos_log = main.params[ 'TEST' ][ 'onosLogFile' ]
+        debug_mode = main.params['TEST']['debugMode']
+        onos_log = main.params['TEST']['onosLogFile']
 
-        # Threshold for the test
-        threshold_str = main.params[ 'TEST' ][ 'singleSwThreshold' ]
-        threshold_obj = threshold_str.split( "," )
-        threshold_min = int( threshold_obj[ 0 ] )
-        threshold_max = int( threshold_obj[ 1 ] )
+        #Threshold for the test
+        threshold_str = main.params['TEST']['singleSwThreshold']
+        threshold_obj = threshold_str.split(",")
+        threshold_min = int(threshold_obj[0])
+        threshold_max = int(threshold_obj[1])
 
-        # List of switch add latency collected from
-        # all iterations
+        #List of switch add latency collected from
+        #all iterations
         latency_end_to_end_list = []
         latency_ofp_to_graph_list = []
         latency_ofp_to_device_list = []
         latency_t0_to_device_list = []
         latency_tcp_to_ofp_list = []
 
-        # Directory/file to store tshark results
+        #Directory/file to store tshark results
         tshark_of_output = "/tmp/tshark_of_topo.txt"
         tshark_tcp_output = "/tmp/tshark_tcp_topo.txt"
 
-        # String to grep in tshark output
-        tshark_tcp_string = "TCP 74 " + default_sw_port
+        #String to grep in tshark output
+        tshark_tcp_string = "TCP 74 "+default_sw_port
         tshark_of_string = "OFP 86 Vendor"
-
-        # Initialize assertion to TRUE
+     
+        #Initialize assertion to TRUE
         assertion = main.TRUE
-
-        local_time = time.strftime( '%x %X' )
-        local_time = local_time.replace( "/", "" )
-        local_time = local_time.replace( " ", "_" )
-        local_time = local_time.replace( ":", "" )
+      
+        local_time = time.strftime('%x %X')
+        local_time = local_time.replace("/","")
+        local_time = local_time.replace(" ","_")
+        local_time = local_time.replace(":","")
         if debug_mode == 'on':
-            main.ONOS1.tshark_pcap( "eth0",
-                                    "/tmp/single_sw_lat_pcap_" + local_time )
+            main.ONOS1.tshark_pcap("eth0",
+                    "/tmp/single_sw_lat_pcap_"+local_time) 
 
-            main.log.info( "TEST" )
+            main.log.info("Debug mode is on")
 
-        main.log.report( "Latency of adding one switch to controller" )
-        main.log.report(
-            "First " +
-            str( iter_ignore ) +
-            " iterations ignored" +
-            " for jvm warmup time" )
-        main.log.report( "Total iterations of test: " + str( num_iter ) )
+        main.log.report("Latency of adding one switch to controller")
+        main.log.report("First "+str(iter_ignore)+" iterations ignored"+
+                " for jvm warmup time")
+        main.log.report("Total iterations of test: "+str(num_iter))
 
-        for i in range( 0, int( num_iter ) ):
-            main.log.info( "Starting tshark capture" )
+        for i in range(0, int(num_iter)):
+            main.log.info("Starting tshark capture")
 
-            #* TCP [ ACK, SYN ] is used as t0_a, the
-            #  very first "exchange" between ONOS and
+            #* TCP [ACK, SYN] is used as t0_a, the
+            #  very first "exchange" between ONOS and 
             #  the switch for end-to-end measurement
-            #* OFP [ Stats Reply ] is used for t0_b
+            #* OFP [Stats Reply] is used for t0_b
             #  the very last OFP message between ONOS
             #  and the switch for ONOS measurement
-            main.ONOS1.tshark_grep( tshark_tcp_string,
-                                    tshark_tcp_output )
-            main.ONOS1.tshark_grep( tshark_of_string,
-                                    tshark_of_output )
+            main.ONOS1.tshark_grep(tshark_tcp_string,
+                    tshark_tcp_output)
+            main.ONOS1.tshark_grep(tshark_of_string,
+                    tshark_of_output)
 
-            # Wait and ensure tshark is started and
-            # capturing
-            time.sleep( 10 )
+            #Wait and ensure tshark is started and 
+            #capturing
+            time.sleep(10)
 
-            main.log.info( "Assigning s1 to controller" )
+            main.log.info("Assigning s1 to controller")
 
-            main.Mininet1.assign_sw_controller(
-                sw="1",
-                ip1=ONOS1_ip,
-                port1=default_sw_port )
+            main.Mininet1.assign_sw_controller(sw="1",
+                    ip1=ONOS1_ip, port1=default_sw_port)
 
-            # Wait and ensure switch is assigned
-            # before stopping tshark
-            time.sleep( 30 )
-
-            main.log.info( "Stopping all Tshark processes" )
+            #Wait and ensure switch is assigned
+            #before stopping tshark
+            time.sleep(30)
+   
+            main.log.info("Stopping all Tshark processes")
             main.ONOS1.stop_tshark()
 
-            # tshark output is saved in ONOS. Use subprocess
-            # to copy over files to TestON for parsing
-            main.log.info( "Copying over tshark files" )
-
-            # TCP CAPTURE ****
-            # Copy the tshark output from ONOS machine to
-            # TestON machine in tshark_tcp_output directory>file
-            os.system( "scp " + ONOS_user + "@" + ONOS1_ip + ":" +
-                       tshark_tcp_output + " /tmp/" )
-            tcp_file = open( tshark_tcp_output, 'r' )
+            #tshark output is saved in ONOS. Use subprocess
+            #to copy over files to TestON for parsing
+            main.log.info("Copying over tshark files")
+            
+            #TCP CAPTURE ****
+            #Copy the tshark output from ONOS machine to
+            #TestON machine in tshark_tcp_output directory>file
+            os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
+                    tshark_tcp_output+" /tmp/") 
+            tcp_file = open(tshark_tcp_output, 'r')
             temp_text = tcp_file.readline()
-            temp_text = temp_text.split( " " )
+            temp_text = temp_text.split(" ")
 
-            main.log.info( "Object read in from TCP capture: " +
-                           str( temp_text ) )
-            if len( temp_text ) > 1:
-                t0_tcp = float( temp_text[ 1 ] ) * 1000.0
+            main.log.info("Object read in from TCP capture: "+
+                    str(temp_text))
+            if len(temp_text) > 1:
+                t0_tcp = float(temp_text[1])*1000.0
             else:
-                main.log.error( "Tshark output file for TCP" +
-                                " returned unexpected results" )
+                main.log.error("Tshark output file for TCP"+
+                        " returned unexpected results")
                 t0_tcp = 0
                 assertion = main.FALSE
-
+            
             tcp_file.close()
             #****************
 
-            # OF CAPTURE ****
-            os.system( "scp " + ONOS_user + "@" + ONOS1_ip + ":" +
-                       tshark_of_output + " /tmp/" )
-            of_file = open( tshark_of_output, 'r' )
-
+            #OF CAPTURE ****
+            os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
+                    tshark_of_output+" /tmp/")
+            of_file = open(tshark_of_output, 'r')
+           
             line_ofp = ""
-            # Read until last line of file
+            #Read until last line of file
             while True:
                 temp_text = of_file.readline()
-                if temp_text != '':
+                if temp_text !='':
                     line_ofp = temp_text
                 else:
-                    break
-            obj = line_ofp.split( " " )
-
-            main.log.info( "Object read in from OFP capture: " +
-                           str( line_ofp ) )
-
-            if len( line_ofp ) > 1:
-                t0_ofp = float( obj[ 1 ] ) * 1000.0
+                    break 
+            obj = line_ofp.split(" ")
+            
+            main.log.info("Object read in from OFP capture: "+
+                    str(line_ofp))
+    
+            if len(line_ofp) > 1:
+                t0_ofp = float(obj[1])*1000.0
             else:
-                main.log.error( "Tshark output file for OFP" +
-                                " returned unexpected results" )
+                main.log.error("Tshark output file for OFP"+
+                        " returned unexpected results")
                 t0_ofp = 0
                 assertion = main.FALSE
-
+            
             of_file.close()
             #****************
-
+           
             json_str_1 = main.ONOS1cli.topology_events_metrics()
-            json_str_2 = main.ONOS2cli.topology_events_metrics()
-            json_str_3 = main.ONOS3cli.topology_events_metrics()
+            #Initialize scale-out variables 
+            json_str_2 = "" 
+            json_str_3 = ""
+            json_str_4 = ""
+            json_str_5 = ""
+            json_str_6 = ""
+            json_str_7 = ""
 
-            json_obj_1 = json.loads( json_str_1 )
-            json_obj_2 = json.loads( json_str_2 )
-            json_obj_3 = json.loads( json_str_3 )
-
-            # Obtain graph timestamp. This timestsamp captures
-            # the epoch time at which the topology graph was updated.
+            json_obj_1 = json.loads(json_str_1)
+            #Initialize scale-out variables
+            json_obj_2 = ""
+            json_obj_3 = "" 
+            json_obj_4 = ""
+            json_obj_5 = ""
+            json_obj_6 = ""
+            json_obj_7 = ""
+            
+            #Obtain graph timestamp. This timestsamp captures
+            #the epoch time at which the topology graph was updated.
             graph_timestamp_1 = \
-                json_obj_1[ graphTimestamp ][ 'value' ]
-            graph_timestamp_2 = \
-                json_obj_2[ graphTimestamp ][ 'value' ]
-            graph_timestamp_3 = \
-                json_obj_3[ graphTimestamp ][ 'value' ]
-
-            # Obtain device timestamp. This timestamp captures
-            # the epoch time at which the device event happened
+                    json_obj_1[graphTimestamp]['value']
+            #Obtain device timestamp. This timestamp captures
+            #the epoch time at which the device event happened
             device_timestamp_1 = \
-                json_obj_1[ deviceTimestamp ][ 'value' ]
-            device_timestamp_2 = \
-                json_obj_2[ deviceTimestamp ][ 'value' ]
-            device_timestamp_3 = \
-                json_obj_3[ deviceTimestamp ][ 'value' ]
+                    json_obj_1[deviceTimestamp]['value'] 
+           
+            #t0 to device processing latency 
+            delta_device_1 = int(device_timestamp_1) - int(t0_tcp)
+            
+            #t0 to graph processing latency (end-to-end)
+            delta_graph_1 = int(graph_timestamp_1) - int(t0_tcp)
+            
+            #ofp to graph processing latency (ONOS processing)
+            delta_ofp_graph_1 = int(graph_timestamp_1) - int(t0_ofp)
+            
+            #ofp to device processing latency (ONOS processing)
+            delta_ofp_device_1 = float(device_timestamp_1) - float(t0_ofp)
 
-            # t0 to device processing latency
-            delta_device_1 = int( device_timestamp_1 ) - int( t0_tcp )
-            delta_device_2 = int( device_timestamp_2 ) - int( t0_tcp )
-            delta_device_3 = int( device_timestamp_3 ) - int( t0_tcp )
+            #TODO: Create even cluster number events
 
-            # Get average of delta from all instances
+            #Include scale-out measurements when applicable
+            if cluster_count >= 3:
+                json_str_2 = main.ONOS2cli.topology_events_metrics()
+                json_str_3 = main.ONOS3cli.topology_events_metrics()
+                json_obj_2 = json.loads(json_str_2)
+                json_obj_3 = json.loads(json_str_3)
+                graph_timestamp_2 = \
+                    json_obj_2[graphTimestamp]['value']
+                graph_timestamp_3 = \
+                    json_obj_3[graphTimestamp]['value']
+                device_timestamp_2 = \
+                    json_obj_2[deviceTimestamp]['value'] 
+                device_timestamp_3 = \
+                    json_obj_3[deviceTimestamp]['value'] 
+                delta_device_2 = int(device_timestamp_2) - int(t0_tcp)
+                delta_device_3 = int(device_timestamp_3) - int(t0_tcp)
+                delta_graph_2 = int(graph_timestamp_2) - int(t0_tcp)
+                delta_graph_3 = int(graph_timestamp_3) - int(t0_tcp)
+                delta_ofp_graph_2 = int(graph_timestamp_2) - int(t0_ofp)
+                delta_ofp_graph_3 = int(graph_timestamp_3) - int(t0_ofp)
+                delta_ofp_device_2 = float(device_timestamp_2) -\
+                        float(t0_ofp)
+                delta_ofp_device_3 = float(device_timestamp_3) -\
+                        float(t0_ofp)
+            else:
+                delta_device_2 = 0
+                delta_device_3 = 0
+                delta_graph_2 = 0
+                delta_graph_3 = 0
+                delta_ofp_graph_2 = 0
+                delta_ofp_graph_3 = 0
+                delta_ofp_device_2 = 0
+                delta_ofp_device_3 = 0
+
+            if cluster_count >= 5:
+                json_str_4 = main.ONOS4cli.topology_events_metrics()
+                json_str_5 = main.ONOS5cli.topology_events_metrics()
+                json_obj_4 = json.loads(json_str_4)
+                json_obj_5 = json.loads(json_str_5)
+                graph_timestamp_4 = \
+                    json_obj_4[graphTimestamp]['value']
+                graph_timestamp_5 = \
+                    json_obj_5[graphTimestamp]['value']
+                device_timestamp_4 = \
+                    json_obj_4[deviceTimestamp]['value'] 
+                device_timestamp_5 = \
+                    json_obj_5[deviceTimestamp]['value'] 
+                delta_device_4 = int(device_timestamp_4) - int(t0_tcp)
+                delta_device_5 = int(device_timestamp_5) - int(t0_tcp)
+                delta_graph_4 = int(graph_timestamp_4) - int(t0_tcp)
+                delta_graph_5 = int(graph_timestamp_5) - int(t0_tcp)
+                delta_ofp_graph_4 = int(graph_timestamp_4) - int(t0_ofp)
+                delta_ofp_graph_5 = int(graph_timestamp_5) - int(t0_ofp)
+                delta_ofp_device_4 = float(device_timestamp_4) -\
+                        float(t0_ofp)
+                delta_ofp_device_5 = float(device_timestamp_5) -\
+                        float(t0_ofp)
+            else:
+                delta_device_4 = 0
+                delta_device_5 = 0
+                delta_graph_4 = 0
+                delta_graph_5 = 0
+                delta_ofp_graph_4 = 0
+                delta_ofp_graph_5 = 0
+                delta_ofp_device_4 = 0
+                delta_ofp_device_5 = 0
+
+            if cluster_count >= 7:
+                json_str_6 = main.ONOS6cli.topology_events_metrics()
+                json_str_7 = main.ONOS7cli.topology_events_metrics()
+                json_obj_6 = json.loads(json_str_6)
+                json_obj_7 = json.loads(json_str_7)
+                graph_timestamp_6 = \
+                    json_obj_6[graphTimestamp]['value']
+                graph_timestamp_7 = \
+                    json_obj_7[graphTimestamp]['value']
+                device_timestamp_6 = \
+                    json_obj_6[deviceTimestamp]['value'] 
+                device_timestamp_7 = \
+                    json_obj_7[deviceTimestamp]['value'] 
+                delta_device_6 = int(device_timestamp_6) - int(t0_tcp)
+                delta_device_7 = int(device_timestamp_7) - int(t0_tcp)
+                delta_graph_6 = int(graph_timestamp_6) - int(t0_tcp)
+                delta_graph_7 = int(graph_timestamp_7) - int(t0_tcp)
+                delta_ofp_graph_6 = int(graph_timestamp_6) - int(t0_ofp)
+                delta_ofp_graph_7 = int(graph_timestamp_7) - int(t0_ofp)
+                delta_ofp_device_6 = float(device_timestamp_6) -\
+                        float(t0_ofp)
+                delta_ofp_device_7 = float(device_timestamp_7) -\
+                        float(t0_ofp)
+            else:
+                delta_device_6 = 0
+                delta_device_7 = 0
+                delta_graph_6 = 0
+                delta_graph_7 = 0
+                delta_ofp_graph_6 = 0 
+                delta_ofp_graph_7 = 0
+                delta_ofp_device_6 = 0
+                delta_ofp_device_7 = 0
+
+            #Get average of delta from all instances
             avg_delta_device = \
-                ( int( delta_device_1 ) +
-                  int( delta_device_2 ) +
-                  int( delta_device_3 ) ) / 3
+                    (int(delta_device_1)+\
+                     int(delta_device_2)+\
+                     int(delta_device_3)+\
+                     int(delta_device_4)+\
+                     int(delta_device_5)+\
+                     int(delta_device_6)+\
+                     int(delta_device_7)) / cluster_count 
 
-            # Ensure avg delta meets the threshold before appending
+            #Ensure avg delta meets the threshold before appending
             if avg_delta_device > 0.0 and avg_delta_device < 10000\
-                    and int( i ) > iter_ignore:
-                latency_t0_to_device_list.append( avg_delta_device )
+                    and int(i) > iter_ignore:
+                latency_t0_to_device_list.append(avg_delta_device)
             else:
-                main.log.info(
-                    "Results for t0-to-device ignored" +
-                    "due to excess in threshold / warmup iteration." )
+                main.log.info("Results for t0-to-device ignored"+\
+                        "due to excess in threshold / warmup iteration.")
 
-            # t0 to graph processing latency ( end-to-end )
-            delta_graph_1 = int( graph_timestamp_1 ) - int( t0_tcp )
-            delta_graph_2 = int( graph_timestamp_2 ) - int( t0_tcp )
-            delta_graph_3 = int( graph_timestamp_3 ) - int( t0_tcp )
-
-            # Get average of delta from all instances
+            #Get average of delta from all instances
+            #TODO: use max delta graph
+            #max_delta_graph = max(three)
             avg_delta_graph = \
-                ( int( delta_graph_1 ) +
-                  int( delta_graph_2 ) +
-                  int( delta_graph_3 ) ) / 3
+                    (int(delta_graph_1)+\
+                     int(delta_graph_2)+\
+                     int(delta_graph_3)+\
+                     int(delta_graph_4)+\
+                     int(delta_graph_5)+\
+                     int(delta_graph_6)+\
+                     int(delta_graph_7)) / cluster_count
 
-            # Ensure avg delta meets the threshold before appending
+            #Ensure avg delta meets the threshold before appending
             if avg_delta_graph > 0.0 and avg_delta_graph < 10000\
-                    and int( i ) > iter_ignore:
-                latency_end_to_end_list.append( avg_delta_graph )
+                    and int(i) > iter_ignore:
+                latency_end_to_end_list.append(avg_delta_graph)
             else:
-                main.log.info( "Results for end-to-end ignored" +
-                               "due to excess in threshold" )
+                main.log.info("Results for end-to-end ignored"+\
+                        "due to excess in threshold")
 
-            # ofp to graph processing latency ( ONOS processing )
-            delta_ofp_graph_1 = int( graph_timestamp_1 ) - int( t0_ofp )
-            delta_ofp_graph_2 = int( graph_timestamp_2 ) - int( t0_ofp )
-            delta_ofp_graph_3 = int( graph_timestamp_3 ) - int( t0_ofp )
-
+            
             avg_delta_ofp_graph = \
-                ( int( delta_ofp_graph_1 ) +
-                  int( delta_ofp_graph_2 ) +
-                  int( delta_ofp_graph_3 ) ) / 3
-
+                    (int(delta_ofp_graph_1)+\
+                     int(delta_ofp_graph_2)+\
+                     int(delta_ofp_graph_3)+\
+                     int(delta_ofp_graph_4)+\
+                     int(delta_ofp_graph_5)+\
+                     int(delta_ofp_graph_6)+\
+                     int(delta_ofp_graph_7)) / cluster_count 
+            
             if avg_delta_ofp_graph > threshold_min \
                     and avg_delta_ofp_graph < threshold_max\
-                    and int( i ) > iter_ignore:
-                latency_ofp_to_graph_list.append( avg_delta_ofp_graph )
-            elif avg_delta_ofp_graph > ( -10 ) and \
+                    and int(i) > iter_ignore:
+                latency_ofp_to_graph_list.append(avg_delta_ofp_graph)
+            elif avg_delta_ofp_graph > (-10) and \
                     avg_delta_ofp_graph < 0.0 and\
-                    int( i ) > iter_ignore:
-                main.log.info( "Sub-millisecond result likely; " +
-                               "negative result was rounded to 0" )
-                # NOTE: Current metrics framework does not
-                # support sub-millisecond accuracy. Therefore,
-                # if the result is negative, we can reasonably
-                # conclude sub-millisecond results and just
-                # append the best rounded effort - 0 ms.
-                latency_ofp_to_graph_list.append( 0 )
+                    int(i) > iter_ignore:
+                main.log.info("Sub-millisecond result likely; "+
+                    "negative result was rounded to 0")
+                #NOTE: Current metrics framework does not 
+                #support sub-millisecond accuracy. Therefore,
+                #if the result is negative, we can reasonably
+                #conclude sub-millisecond results and just 
+                #append the best rounded effort - 0 ms. 
+                latency_ofp_to_graph_list.append(0)
             else:
-                main.log.info( "Results for ofp-to-graph " +
-                               "ignored due to excess in threshold" )
+                main.log.info("Results for ofp-to-graph "+\
+                        "ignored due to excess in threshold")
 
-            # ofp to device processing latency ( ONOS processing )
-            delta_ofp_device_1 = float( device_timestamp_1 ) - float( t0_ofp )
-            delta_ofp_device_2 = float( device_timestamp_2 ) - float( t0_ofp )
-            delta_ofp_device_3 = float( device_timestamp_3 ) - float( t0_ofp )
-
+            
             avg_delta_ofp_device = \
-                ( float( delta_ofp_device_1 ) +
-                  float( delta_ofp_device_2 ) +
-                  float( delta_ofp_device_3 ) ) / 3
-
-            # NOTE: ofp - delta measurements are occasionally negative
+                    (float(delta_ofp_device_1)+\
+                     float(delta_ofp_device_2)+\
+                     float(delta_ofp_device_3)+\
+                     float(delta_ofp_device_4)+\
+                     float(delta_ofp_device_5)+\
+                     float(delta_ofp_device_6)+\
+                     float(delta_ofp_device_7)) / cluster_count 
+            
+            #NOTE: ofp - delta measurements are occasionally negative
             #      due to system time misalignment.
-            latency_ofp_to_device_list.append( avg_delta_ofp_device )
+            latency_ofp_to_device_list.append(avg_delta_ofp_device)
 
-            delta_ofp_tcp = int( t0_ofp ) - int( t0_tcp )
+            delta_ofp_tcp = int(t0_ofp) - int(t0_tcp)
             if delta_ofp_tcp > threshold_min \
                     and delta_ofp_tcp < threshold_max and\
-                    int( i ) > iter_ignore:
-                latency_tcp_to_ofp_list.append( delta_ofp_tcp )
+                    int(i) > iter_ignore:
+                latency_tcp_to_ofp_list.append(delta_ofp_tcp)
             else:
-                main.log.info( "Results fo tcp-to-ofp " +
-                               "ignored due to excess in threshold" )
+                main.log.info("Results fo tcp-to-ofp "+\
+                        "ignored due to excess in threshold")
 
-            # TODO:
-            # Fetch logs upon threshold excess
+            #TODO:
+            #Fetch logs upon threshold excess
 
-            main.log.info( "ONOS1 delta end-to-end: " +
-                           str( delta_graph_1 ) + " ms" )
-            main.log.info( "ONOS2 delta end-to-end: " +
-                           str( delta_graph_2 ) + " ms" )
-            main.log.info( "ONOS3 delta end-to-end: " +
-                           str( delta_graph_3 ) + " ms" )
+            main.log.info("ONOS1 delta end-to-end: "+
+                    str(delta_graph_1) + " ms")
 
-            main.log.info( "ONOS1 delta OFP - graph: " +
-                           str( delta_ofp_graph_1 ) + " ms" )
-            main.log.info( "ONOS2 delta OFP - graph: " +
-                           str( delta_ofp_graph_2 ) + " ms" )
-            main.log.info( "ONOS3 delta OFP - graph: " +
-                           str( delta_ofp_graph_3 ) + " ms" )
+            main.log.info("ONOS1 delta OFP - graph: "+
+                    str(delta_ofp_graph_1) + " ms")
+            
+            main.log.info("ONOS1 delta device - t0: "+
+                    str(delta_device_1) + " ms")
+         
+            main.log.info("TCP to OFP delta: "+
+                    str(delta_ofp_tcp) + " ms")
 
-            main.log.info( "ONOS1 delta device - t0: " +
-                           str( delta_device_1 ) + " ms" )
-            main.log.info( "ONOS2 delta device - t0: " +
-                           str( delta_device_2 ) + " ms" )
-            main.log.info( "ONOS3 delta device - t0: " +
-                           str( delta_device_3 ) + " ms" )
+            main.step("Remove switch from controller")
+            main.Mininet1.delete_sw_controller("s1")
 
-            main.log.info( "TCP to OFP delta: " +
-                           str( delta_ofp_tcp ) + " ms" )
-            # main.log.info( "ONOS1 delta OFP - device: "+
-            #        str( delta_ofp_device_1 ) + " ms" )
-            # main.log.info( "ONOS2 delta OFP - device: "+
-            #        str( delta_ofp_device_2 ) + " ms" )
-            # main.log.info( "ONOS3 delta OFP - device: "+
-            #        str( delta_ofp_device_3 ) + " ms" )
+            time.sleep(5)
 
-            main.step( "Remove switch from controller" )
-            main.Mininet1.delete_sw_controller( "s1" )
+        #END of for loop iteration
 
-            time.sleep( 5 )
-
-        # END of for loop iteration
-
-        # If there is at least 1 element in each list,
-        # pass the test case
-        if len( latency_end_to_end_list ) > 0 and\
-           len( latency_ofp_to_graph_list ) > 0 and\
-           len( latency_ofp_to_device_list ) > 0 and\
-           len( latency_t0_to_device_list ) > 0 and\
-           len( latency_tcp_to_ofp_list ) > 0:
+        #If there is at least 1 element in each list,
+        #pass the test case
+        if len(latency_end_to_end_list) > 0 and\
+           len(latency_ofp_to_graph_list) > 0 and\
+           len(latency_ofp_to_device_list) > 0 and\
+           len(latency_t0_to_device_list) > 0 and\
+           len(latency_tcp_to_ofp_list) > 0:
             assertion = main.TRUE
-        elif len( latency_end_to_end_list ) == 0:
-            # The appending of 0 here is to prevent
-            # the min,max,sum functions from failing
-            # below
-            latency_end_to_end_list.append( 0 )
+        elif len(latency_end_to_end_list) == 0:
+            #The appending of 0 here is to prevent 
+            #the min,max,sum functions from failing 
+            #below
+            latency_end_to_end_list.append(0)
             assertion = main.FALSE
-        elif len( latency_ofp_to_graph_list ) == 0:
-            latency_ofp_to_graph_list.append( 0 )
+        elif len(latency_ofp_to_graph_list) == 0:
+            latency_ofp_to_graph_list.append(0)
             assertion = main.FALSE
-        elif len( latency_ofp_to_device_list ) == 0:
-            latency_ofp_to_device_list.append( 0 )
+        elif len(latency_ofp_to_device_list) == 0:
+            latency_ofp_to_device_list.append(0)
             assertion = main.FALSE
-        elif len( latency_t0_to_device_list ) == 0:
-            latency_t0_to_device_list.append( 0 )
+        elif len(latency_t0_to_device_list) == 0:
+            latency_t0_to_device_list.append(0)
             assertion = main.FALSE
-        elif len( latency_tcp_to_ofp_list ) == 0:
-            latency_tcp_to_ofp_list.append( 0 )
+        elif len(latency_tcp_to_ofp_list) == 0:
+            latency_tcp_to_ofp_list.append(0)
             assertion = main.FALSE
 
-        # Calculate min, max, avg of latency lists
+        #Calculate min, max, avg of latency lists
         latency_end_to_end_max = \
-            int( max( latency_end_to_end_list ) )
+                int(max(latency_end_to_end_list))
         latency_end_to_end_min = \
-            int( min( latency_end_to_end_list ) )
+                int(min(latency_end_to_end_list))
         latency_end_to_end_avg = \
-            ( int( sum( latency_end_to_end_list ) ) /
-              len( latency_end_to_end_list ) )
+                (int(sum(latency_end_to_end_list)) / \
+                 len(latency_end_to_end_list))
         latency_end_to_end_std_dev = \
-            str( round( numpy.std( latency_end_to_end_list ), 1 ) )
+                str(round(numpy.std(latency_end_to_end_list),1))
 
         latency_ofp_to_graph_max = \
-            int( max( latency_ofp_to_graph_list ) )
+                int(max(latency_ofp_to_graph_list))
         latency_ofp_to_graph_min = \
-            int( min( latency_ofp_to_graph_list ) )
+                int(min(latency_ofp_to_graph_list))
         latency_ofp_to_graph_avg = \
-            ( int( sum( latency_ofp_to_graph_list ) ) /
-              len( latency_ofp_to_graph_list ) )
+                (int(sum(latency_ofp_to_graph_list)) / \
+                 len(latency_ofp_to_graph_list))
         latency_ofp_to_graph_std_dev = \
-            str( round( numpy.std( latency_ofp_to_graph_list ), 1 ) )
+                str(round(numpy.std(latency_ofp_to_graph_list),1))
 
         latency_ofp_to_device_max = \
-            int( max( latency_ofp_to_device_list ) )
+                int(max(latency_ofp_to_device_list))
         latency_ofp_to_device_min = \
-            int( min( latency_ofp_to_device_list ) )
+                int(min(latency_ofp_to_device_list))
         latency_ofp_to_device_avg = \
-            ( int( sum( latency_ofp_to_device_list ) ) /
-              len( latency_ofp_to_device_list ) )
+                (int(sum(latency_ofp_to_device_list)) / \
+                 len(latency_ofp_to_device_list))
         latency_ofp_to_device_std_dev = \
-            str( round( numpy.std( latency_ofp_to_device_list ), 1 ) )
+                str(round(numpy.std(latency_ofp_to_device_list),1))
 
         latency_t0_to_device_max = \
-            int( max( latency_t0_to_device_list ) )
+                int(max(latency_t0_to_device_list))
         latency_t0_to_device_min = \
-            int( min( latency_t0_to_device_list ) )
+                int(min(latency_t0_to_device_list))
         latency_t0_to_device_avg = \
-            ( int( sum( latency_t0_to_device_list ) ) /
-              len( latency_t0_to_device_list ) )
+                (int(sum(latency_t0_to_device_list)) / \
+                 len(latency_t0_to_device_list))
         latency_ofp_to_device_std_dev = \
-            str( round( numpy.std( latency_t0_to_device_list ), 1 ) )
+                str(round(numpy.std(latency_t0_to_device_list),1))
 
         latency_tcp_to_ofp_max = \
-            int( max( latency_tcp_to_ofp_list ) )
+                int(max(latency_tcp_to_ofp_list))
         latency_tcp_to_ofp_min = \
-            int( min( latency_tcp_to_ofp_list ) )
+                int(min(latency_tcp_to_ofp_list))
         latency_tcp_to_ofp_avg = \
-            ( int( sum( latency_tcp_to_ofp_list ) ) /
-              len( latency_tcp_to_ofp_list ) )
+                (int(sum(latency_tcp_to_ofp_list)) / \
+                 len(latency_tcp_to_ofp_list))
         latency_tcp_to_ofp_std_dev = \
-            str( round( numpy.std( latency_tcp_to_ofp_list ), 1 ) )
+                str(round(numpy.std(latency_tcp_to_ofp_list),1))
 
-        main.log.report(
-            "Switch add - End-to-end latency: " +
-            "Avg: " +
-            str( latency_end_to_end_avg ) +
-            " ms " +
-            "Std Deviation: " +
-            latency_end_to_end_std_dev +
-            " ms" )
-        main.log.report(
-            "Switch add - OFP-to-Graph latency: " +
-            "Note: results are not accurate to sub-millisecond. " +
-            "Any sub-millisecond results are rounded to 0 ms. " )
-        main.log.report(
-            "Avg: " +
-            str( latency_ofp_to_graph_avg ) +
-            " ms " +
-            "Std Deviation: " +
-            latency_ofp_to_graph_std_dev +
-            " ms" )
-        main.log.report(
-            "Switch add - TCP-to-OFP latency: " +
-            "Avg: " +
-            str( latency_tcp_to_ofp_avg ) +
-            " ms " +
-            "Std Deviation: " +
-            latency_tcp_to_ofp_std_dev +
-            " ms" )
+        main.log.report("Cluster size: "+str(cluster_count)+\
+                " node(s)")
+        main.log.report("Switch add - End-to-end latency: "+\
+                "Avg: "+str(latency_end_to_end_avg)+" ms "+
+                "Std Deviation: "+latency_end_to_end_std_dev+" ms")
+        main.log.report("Switch add - OFP-to-Graph latency: "+\
+                "Note: results are not accurate to sub-millisecond. "+
+                "Any sub-millisecond results are rounded to 0 ms. ")
+        main.log.report("Avg: "+str(latency_ofp_to_graph_avg)+" ms "+
+                "Std Deviation: "+latency_ofp_to_graph_std_dev+" ms")
+        main.log.report("Switch add - TCP-to-OFP latency: "+\
+                "Avg: "+str(latency_tcp_to_ofp_avg)+" ms "+
+                "Std Deviation: "+latency_tcp_to_ofp_std_dev+" ms")
 
         if debug_mode == 'on':
-            main.ONOS1.cp_logs_to_dir( "/opt/onos/log/karaf.log",
-                                       "/tmp/", copy_file_name="sw_lat_karaf" )
+            main.ONOS1.cp_logs_to_dir("/opt/onos/log/karaf.log",
+                    "/tmp/", copy_file_name="sw_lat_karaf")
 
-        utilities.assert_equals( expect=main.TRUE, actual=assertion,
-                                 onpass="Switch latency test successful",
-                                 onfail="Switch latency test failed" )
+        utilities.assert_equals(expect=main.TRUE, actual=assertion,
+                onpass="Switch latency test successful",
+                onfail="Switch latency test failed")
 
-    def CASE3( self, main ):
-        """
+    def CASE3(self, main):
+        '''
         Bring port up / down and measure latency.
         Port enable / disable is simulated by ifconfig up / down
-
-        In ONOS-next, we must ensure that the port we are
+        
+        In ONOS-next, we must ensure that the port we are 
         manipulating is connected to another switch with a valid
         connection. Otherwise, graph view will not be updated.
-        """
+        '''
         import time
         import subprocess
         import os
         import requests
         import json
         import numpy
+        global cluster_count
 
-        ONOS1_ip = main.params[ 'CTRL' ][ 'ip1' ]
-        ONOS2_ip = main.params[ 'CTRL' ][ 'ip2' ]
-        ONOS3_ip = main.params[ 'CTRL' ][ 'ip3' ]
-        ONOS_user = main.params[ 'CTRL' ][ 'user' ]
+        ONOS1_ip = main.params['CTRL']['ip1']
+        ONOS2_ip = main.params['CTRL']['ip2']
+        ONOS3_ip = main.params['CTRL']['ip3']
+        ONOS_user = main.params['CTRL']['user']
 
-        default_sw_port = main.params[ 'CTRL' ][ 'port1' ]
-
+        default_sw_port = main.params['CTRL']['port1']
+      
         assertion = main.TRUE
-        # Number of iterations of case
-        num_iter = main.params[ 'TEST' ][ 'numIter' ]
+        #Number of iterations of case
+        num_iter = main.params['TEST']['numIter']
+       
+        #Timestamp 'keys' for json metrics output.
+        #These are subject to change, hence moved into params
+        deviceTimestamp = main.params['JSON']['deviceTimestamp']
+        graphTimestamp = main.params['JSON']['graphTimestamp']
+        
+        debug_mode = main.params['TEST']['debugMode']
 
-        # Timestamp 'keys' for json metrics output.
-        # These are subject to change, hence moved into params
-        deviceTimestamp = main.params[ 'JSON' ][ 'deviceTimestamp' ]
-        graphTimestamp = main.params[ 'JSON' ][ 'graphTimestamp' ]
-
-        debug_mode = main.params[ 'TEST' ][ 'debugMode' ]
-
-        local_time = time.strftime( '%x %X' )
-        local_time = local_time.replace( "/", "" )
-        local_time = local_time.replace( " ", "_" )
-        local_time = local_time.replace( ":", "" )
+        local_time = time.strftime('%x %X')
+        local_time = local_time.replace("/","")
+        local_time = local_time.replace(" ","_")
+        local_time = local_time.replace(":","")
         if debug_mode == 'on':
-            main.ONOS1.tshark_pcap( "eth0",
-                                    "/tmp/port_lat_pcap_" + local_time )
+            main.ONOS1.tshark_pcap("eth0",
+                    "/tmp/port_lat_pcap_"+local_time) 
 
-        # Threshold for this test case
-        up_threshold_str = main.params[ 'TEST' ][ 'portUpThreshold' ]
-        down_threshold_str = main.params[ 'TEST' ][ 'portDownThreshold' ]
+        #Threshold for this test case
+        up_threshold_str = main.params['TEST']['portUpThreshold']
+        down_threshold_str = main.params['TEST']['portDownThreshold']
+        
+        up_threshold_obj = up_threshold_str.split(",")
+        down_threshold_obj = down_threshold_str.split(",")
 
-        up_threshold_obj = up_threshold_str.split( "," )
-        down_threshold_obj = down_threshold_str.split( "," )
+        up_threshold_min = int(up_threshold_obj[0])
+        up_threshold_max = int(up_threshold_obj[1])
 
-        up_threshold_min = int( up_threshold_obj[ 0 ] )
-        up_threshold_max = int( up_threshold_obj[ 1 ] )
+        down_threshold_min = int(down_threshold_obj[0])
+        down_threshold_max = int(down_threshold_obj[1])
 
-        down_threshold_min = int( down_threshold_obj[ 0 ] )
-        down_threshold_max = int( down_threshold_obj[ 1 ] )
-
-        # NOTE: Some hardcoded variables you may need to configure
+        #NOTE: Some hardcoded variables you may need to configure
         #      besides the params
-
+            
         tshark_port_status = "OFP 130 Port Status"
 
         tshark_port_up = "/tmp/tshark_port_up.txt"
         tshark_port_down = "/tmp/tshark_port_down.txt"
         interface_config = "s1-eth1"
 
-        main.log.report( "Port enable / disable latency" )
-        main.log.report( "Simulated by ifconfig up / down" )
-        main.log.report( "Total iterations of test: " + str( num_iter ) )
+        main.log.report("Port enable / disable latency")
+        main.log.report("Simulated by ifconfig up / down")
+        main.log.report("Total iterations of test: "+str(num_iter))
 
-        main.step( "Assign switches s1 and s2 to controller 1" )
-        main.Mininet1.assign_sw_controller( sw="1", ip1=ONOS1_ip,
-                                            port1=default_sw_port )
-        main.Mininet1.assign_sw_controller( sw="2", ip1=ONOS1_ip,
-                                            port1=default_sw_port )
+        main.step("Assign switches s1 and s2 to controller 1")
+        main.Mininet1.assign_sw_controller(sw="1",ip1=ONOS1_ip,
+                port1=default_sw_port)
+        main.Mininet1.assign_sw_controller(sw="2",ip1=ONOS1_ip,
+                port1=default_sw_port)
 
-        # Give enough time for metrics to propagate the
-        # assign controller event. Otherwise, these events may
-        # carry over to our measurements
-        time.sleep( 15 )
+        #Give enough time for metrics to propagate the 
+        #assign controller event. Otherwise, these events may
+        #carry over to our measurements
+        time.sleep(15)
 
         port_up_device_to_ofp_list = []
         port_up_graph_to_ofp_list = []
         port_down_device_to_ofp_list = []
         port_down_graph_to_ofp_list = []
 
-        for i in range( 0, int( num_iter ) ):
-            main.step( "Starting wireshark capture for port status down" )
-            main.ONOS1.tshark_grep( tshark_port_status,
-                                    tshark_port_down )
+        for i in range(0, int(num_iter)):
+            main.step("Starting wireshark capture for port status down")
+            main.ONOS1.tshark_grep(tshark_port_status,
+                    tshark_port_down)
+            
+            time.sleep(5)
 
-            time.sleep( 5 )
+            #Disable interface that is connected to switch 2
+            main.step("Disable port: "+interface_config)
+            main.Mininet1.handle.sendline("sh ifconfig "+
+                    interface_config+" down")
+            main.Mininet1.handle.expect("mininet>")
 
-            # Disable interface that is connected to switch 2
-            main.step( "Disable port: " + interface_config )
-            main.Mininet1.handle.sendline( "sh ifconfig " +
-                                           interface_config + " down" )
-            main.Mininet1.handle.expect( "mininet>" )
-
-            time.sleep( 3 )
+            time.sleep(3)
             main.ONOS1.tshark_stop()
-
-            main.step( "Obtain t1 by metrics call" )
-            json_str_up_1 = main.ONOS1cli.topology_events_metrics()
-            json_str_up_2 = main.ONOS2cli.topology_events_metrics()
-            json_str_up_3 = main.ONOS3cli.topology_events_metrics()
-
-            json_obj_1 = json.loads( json_str_up_1 )
-            json_obj_2 = json.loads( json_str_up_2 )
-            json_obj_3 = json.loads( json_str_up_3 )
-
-            # Copy tshark output file from ONOS to TestON instance
+            
+            #Copy tshark output file from ONOS to TestON instance
             #/tmp directory
-            os.system( "scp " + ONOS_user + "@" + ONOS1_ip + ":" +
-                       tshark_port_down + " /tmp/" )
+            os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
+                    tshark_port_down+" /tmp/")
 
-            f_port_down = open( tshark_port_down, 'r' )
-            # Get first line of port down event from tshark
+            f_port_down = open(tshark_port_down, 'r')
+            #Get first line of port down event from tshark
             f_line = f_port_down.readline()
-            obj_down = f_line.split( " " )
-            if len( f_line ) > 0:
-                timestamp_begin_pt_down = int( float( obj_down[ 1 ] ) * 1000 )
-                main.log.info( "Port down begin timestamp: " +
-                               str( timestamp_begin_pt_down ) )
+            obj_down = f_line.split(" ")
+            if len(f_line) > 0:
+                #NOTE: obj_down[1] is a very unreliable
+                #      way to determine the timestamp. If 
+                #      results seem way off, check the object 
+                #      itself by printing it out
+                timestamp_begin_pt_down = int(float(obj_down[1])*1000)
+                # For some reason, wireshark decides to record the 
+                # timestamp at the 3rd object position instead of
+                # 2nd at unpredictable times. This statement is 
+                # used to capture that odd behavior and use the
+                # correct epoch time
+                if timestamp_begin_pt_down < 1400000000000:
+                    timestamp_begin_pt_down = \
+                        int(float(obj_down[2])*1000)
+
+                main.log.info("Port down begin timestamp: "+
+                        str(timestamp_begin_pt_down))
             else:
-                main.log.info( "Tshark output file returned unexpected" +
-                               " results: " + str( obj_down ) )
+                main.log.info("Tshark output file returned unexpected"+
+                        " results: "+str(obj_down))
                 timestamp_begin_pt_down = 0
-
             f_port_down.close()
-
-            main.log.info( "TEST tshark obj: " + str( obj_down ) )
-
-            time.sleep( 3 )
-
-            # Obtain graph timestamp. This timestsamp captures
-            # the epoch time at which the topology graph was updated.
+            
+            main.step("Obtain t1 by metrics call")
+            json_str_up_1 = main.ONOS1cli.topology_events_metrics()
+            json_obj_1 = json.loads(json_str_up_1)
+            #Obtain graph timestamp. This timestsamp captures
+            #the epoch time at which the topology graph was updated.
             graph_timestamp_1 = \
-                json_obj_1[ graphTimestamp ][ 'value' ]
-            graph_timestamp_2 = \
-                json_obj_2[ graphTimestamp ][ 'value' ]
-            graph_timestamp_3 = \
-                json_obj_3[ graphTimestamp ][ 'value' ]
-
-            main.log.info( "TEST graph timestamp ONOS1: " +
-                           str( graph_timestamp_1 ) )
-
-            # Obtain device timestamp. This timestamp captures
-            # the epoch time at which the device event happened
+                    json_obj_1[graphTimestamp]['value']
+            #Obtain device timestamp. This timestamp captures
+            #the epoch time at which the device event happened
             device_timestamp_1 = \
-                json_obj_1[ deviceTimestamp ][ 'value' ]
-            device_timestamp_2 = \
-                json_obj_2[ deviceTimestamp ][ 'value' ]
-            device_timestamp_3 = \
-                json_obj_3[ deviceTimestamp ][ 'value' ]
+                    json_obj_1[deviceTimestamp]['value'] 
+            #Get delta between graph event and OFP 
+            pt_down_graph_to_ofp_1 = int(graph_timestamp_1) -\
+                    int(timestamp_begin_pt_down)
+            #Get delta between device event and OFP
+            pt_down_device_to_ofp_1 = int(device_timestamp_1) -\
+                    int(timestamp_begin_pt_down)
+            
+            if cluster_count >= 3:
+                json_str_up_2 = main.ONOS2cli.topology_events_metrics()
+                json_str_up_3 = main.ONOS3cli.topology_events_metrics()
+                json_obj_2 = json.loads(json_str_up_2)
+                json_obj_3 = json.loads(json_str_up_3)
+                graph_timestamp_2 = \
+                    json_obj_2[graphTimestamp]['value']
+                graph_timestamp_3 = \
+                    json_obj_3[graphTimestamp]['value']
+                device_timestamp_2 = \
+                    json_obj_2[deviceTimestamp]['value'] 
+                device_timestamp_3 = \
+                    json_obj_3[deviceTimestamp]['value'] 
+                pt_down_graph_to_ofp_2 = int(graph_timestamp_2) -\
+                    int(timestamp_begin_pt_down)
+                pt_down_graph_to_ofp_3 = int(graph_timestamp_3) -\
+                    int(timestamp_begin_pt_down)
+                pt_down_device_to_ofp_2 = int(device_timestamp_2) -\
+                    int(timestamp_begin_pt_down)
+                pt_down_device_to_ofp_3 = int(device_timestamp_3) -\
+                    int(timestamp_begin_pt_down)
+            else:
+                pt_down_graph_to_ofp_2 = 0
+                pt_down_graph_to_ofp_3 = 0
+                pt_down_device_to_ofp_2 = 0
+                pt_down_device_to_ofp_3 = 0
 
-            # Get delta between graph event and OFP
-            pt_down_graph_to_ofp_1 = int( graph_timestamp_1 ) -\
-                int( timestamp_begin_pt_down )
-            pt_down_graph_to_ofp_2 = int( graph_timestamp_2 ) -\
-                int( timestamp_begin_pt_down )
-            pt_down_graph_to_ofp_3 = int( graph_timestamp_3 ) -\
-                int( timestamp_begin_pt_down )
+            if cluster_count >= 5:
+                json_str_up_4 = main.ONOS4cli.topology_events_metrics()
+                json_str_up_5 = main.ONOS5cli.topology_events_metrics()
+                json_obj_4 = json.loads(json_str_up_4)
+                json_obj_5 = json.loads(json_str_up_5)
+                graph_timestamp_4 = \
+                    json_obj_4[graphTimestamp]['value']
+                graph_timestamp_5 = \
+                    json_obj_5[graphTimestamp]['value']
+                device_timestamp_4 = \
+                    json_obj_4[deviceTimestamp]['value'] 
+                device_timestamp_5 = \
+                    json_obj_5[deviceTimestamp]['value'] 
+                pt_down_graph_to_ofp_4 = int(graph_timestamp_4) -\
+                    int(timestamp_begin_pt_down)
+                pt_down_graph_to_ofp_5 = int(graph_timestamp_5) -\
+                    int(timestamp_begin_pt_down)
+                pt_down_device_to_ofp_4 = int(device_timestamp_4) -\
+                    int(timestamp_begin_pt_down)
+                pt_down_device_to_ofp_5 = int(device_timestamp_5) -\
+                    int(timestamp_begin_pt_down)
+            else:
+                pt_down_graph_to_ofp_4 = 0
+                pt_down_graph_to_ofp_5 = 0
+                pt_down_device_to_ofp_4 = 0
+                pt_down_device_to_ofp_5 = 0
 
-            # Get delta between device event and OFP
-            pt_down_device_to_ofp_1 = int( device_timestamp_1 ) -\
-                int( timestamp_begin_pt_down )
-            pt_down_device_to_ofp_2 = int( device_timestamp_2 ) -\
-                int( timestamp_begin_pt_down )
-            pt_down_device_to_ofp_3 = int( device_timestamp_3 ) -\
-                int( timestamp_begin_pt_down )
+            if cluster_count >= 7:
+                json_str_up_6 = main.ONOS6cli.topology_events_metrics()
+                json_str_up_7 = main.ONOS7cli.topology_events_metrics()
+                json_obj_6 = json.loads(json_str_up_6)
+                json_obj_7 = json.loads(json_str_up_7)
+                graph_timestamp_6 = \
+                    json_obj_6[graphTimestamp]['value']
+                graph_timestamp_7 = \
+                    json_obj_7[graphTimestamp]['value']
+                device_timestamp_6 = \
+                    json_obj_6[deviceTimestamp]['value'] 
+                device_timestamp_7 = \
+                    json_obj_7[deviceTimestamp]['value'] 
+                pt_down_graph_to_ofp_6 = int(graph_timestamp_6) -\
+                    int(timestamp_begin_pt_down)
+                pt_down_graph_to_ofp_7 = int(graph_timestamp_7) -\
+                    int(timestamp_begin_pt_down)
+                pt_down_device_to_ofp_6 = int(device_timestamp_6) -\
+                    int(timestamp_begin_pt_down)
+                pt_down_device_to_ofp_7 = int(device_timestamp_7) -\
+                    int(timestamp_begin_pt_down)
+            else:
+                pt_down_graph_to_ofp_6 = 0
+                pt_down_graph_to_ofp_7 = 0
+                pt_down_device_to_ofp_6 = 0
+                pt_down_device_to_ofp_7 = 0
 
-            # Caluclate average across clusters
+            time.sleep(3)
+
+            #Caluclate average across clusters
             pt_down_graph_to_ofp_avg =\
-                ( int( pt_down_graph_to_ofp_1 ) +
-                  int( pt_down_graph_to_ofp_2 ) +
-                  int( pt_down_graph_to_ofp_3 ) ) / 3
+                    (int(pt_down_graph_to_ofp_1) +
+                     int(pt_down_graph_to_ofp_2) + 
+                     int(pt_down_graph_to_ofp_3) +
+                     int(pt_down_graph_to_ofp_4) +
+                     int(pt_down_graph_to_ofp_5) +
+                     int(pt_down_graph_to_ofp_6) +
+                     int(pt_down_graph_to_ofp_7)) / cluster_count 
             pt_down_device_to_ofp_avg = \
-                ( int( pt_down_device_to_ofp_1 ) +
-                  int( pt_down_device_to_ofp_2 ) +
-                  int( pt_down_device_to_ofp_3 ) ) / 3
+                    (int(pt_down_device_to_ofp_1) + 
+                     int(pt_down_device_to_ofp_2) +
+                     int(pt_down_device_to_ofp_3) +
+                     int(pt_down_device_to_ofp_4) +
+                     int(pt_down_device_to_ofp_5) +
+                     int(pt_down_device_to_ofp_6) +
+                     int(pt_down_device_to_ofp_7)) / cluster_count 
 
             if pt_down_graph_to_ofp_avg > down_threshold_min and \
                     pt_down_graph_to_ofp_avg < down_threshold_max:
                 port_down_graph_to_ofp_list.append(
-                    pt_down_graph_to_ofp_avg )
-                main.log.info( "Port down: graph to ofp avg: " +
-                               str( pt_down_graph_to_ofp_avg ) + " ms" )
+                    pt_down_graph_to_ofp_avg)
+                main.log.info("Port down: graph to ofp avg: "+
+                    str(pt_down_graph_to_ofp_avg) + " ms")
             else:
-                main.log.info( "Average port down graph-to-ofp result" +
-                               " exceeded the threshold: " +
-                               str( pt_down_graph_to_ofp_avg ) )
+                main.log.info("Average port down graph-to-ofp result" +
+                        " exceeded the threshold: "+
+                        str(pt_down_graph_to_ofp_avg))
 
             if pt_down_device_to_ofp_avg > 0 and \
                     pt_down_device_to_ofp_avg < 1000:
                 port_down_device_to_ofp_list.append(
-                    pt_down_device_to_ofp_avg )
-                main.log.info( "Port down: device to ofp avg: " +
-                               str( pt_down_device_to_ofp_avg ) + " ms" )
+                    pt_down_device_to_ofp_avg)
+                main.log.info("Port down: device to ofp avg: "+
+                    str(pt_down_device_to_ofp_avg) + " ms")
             else:
-                main.log.info( "Average port down device-to-ofp result" +
-                               " exceeded the threshold: " +
-                               str( pt_down_device_to_ofp_avg ) )
+                main.log.info("Average port down device-to-ofp result" +
+                        " exceeded the threshold: "+
+                        str(pt_down_device_to_ofp_avg))
 
-            # Port up events
-            main.step( "Enable port and obtain timestamp" )
-            main.step( "Starting wireshark capture for port status up" )
-            main.ONOS1.tshark_grep( tshark_port_status, tshark_port_up )
-            time.sleep( 5 )
+            #Port up events 
+            main.step("Enable port and obtain timestamp")
+            main.step("Starting wireshark capture for port status up")
+            main.ONOS1.tshark_grep(tshark_port_status, tshark_port_up)
+            time.sleep(5)
 
-            main.Mininet1.handle.sendline( "sh ifconfig " +
-                                           interface_config + " up" )
-            main.Mininet1.handle.expect( "mininet>" )
-
-            # Allow time for tshark to capture event
-            time.sleep( 3 )
+            main.Mininet1.handle.sendline("sh ifconfig "+
+                    interface_config+" up")
+            main.Mininet1.handle.expect("mininet>")
+            
+            #Allow time for tshark to capture event
+            time.sleep(5)
             main.ONOS1.tshark_stop()
-
-            # Obtain metrics shortly afterwards
-            # This timestsamp captures
-            # the epoch time at which the topology graph was updated.
-            main.step( "Obtain t1 by REST call" )
-            json_str_up_1 = main.ONOS1cli.topology_events_metrics()
-            json_str_up_2 = main.ONOS2cli.topology_events_metrics()
-            json_str_up_3 = main.ONOS3cli.topology_events_metrics()
-
-            json_obj_1 = json.loads( json_str_up_1 )
-            json_obj_2 = json.loads( json_str_up_2 )
-            json_obj_3 = json.loads( json_str_up_3 )
-
-            os.system( "scp " + ONOS_user + "@" + ONOS1_ip + ":" +
-                       tshark_port_up + " /tmp/" )
-
-            f_port_up = open( tshark_port_up, 'r' )
+            
+            time.sleep(3)
+            os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
+                    tshark_port_up+" /tmp/")
+            f_port_up = open(tshark_port_up, 'r')
             f_line = f_port_up.readline()
-            obj_up = f_line.split( " " )
-            if len( f_line ) > 0:
-                timestamp_begin_pt_up = int( float( obj_up[ 1 ] ) * 1000 )
-                main.log.info( "Port up begin timestamp: " +
-                               str( timestamp_begin_pt_up ) )
+            obj_up = f_line.split(" ")
+            if len(f_line) > 0:
+                timestamp_begin_pt_up = int(float(obj_up[1])*1000)
+                if timestamp_begin_pt_up < 1400000000000:
+                    timestamp_begin_pt_up = \
+                        int(float(obj_up[2])*1000)
+                main.log.info("Port up begin timestamp: "+
+                        str(timestamp_begin_pt_up))
             else:
-                main.log.info( "Tshark output file returned unexpected" +
-                               " results." )
+                main.log.info("Tshark output file returned unexpected"+
+                        " results.")
                 timestamp_begin_pt_up = 0
-
             f_port_up.close()
 
+            #Obtain metrics shortly afterwards
+            #This timestsamp captures
+            #the epoch time at which the topology graph was updated.
+            main.step("Obtain t1 by REST call")
+            json_str_up_1 = main.ONOS1cli.topology_events_metrics()
+            json_obj_1 = json.loads(json_str_up_1)
             graph_timestamp_1 = \
-                json_obj_1[ graphTimestamp ][ 'value' ]
-            graph_timestamp_2 = \
-                json_obj_2[ graphTimestamp ][ 'value' ]
-            graph_timestamp_3 = \
-                json_obj_3[ graphTimestamp ][ 'value' ]
-
-            # Obtain device timestamp. This timestamp captures
-            # the epoch time at which the device event happened
+                    json_obj_1[graphTimestamp]['value']
+            #Obtain device timestamp. This timestamp captures
+            #the epoch time at which the device event happened
             device_timestamp_1 = \
-                json_obj_1[ deviceTimestamp ][ 'value' ]
-            device_timestamp_2 = \
-                json_obj_2[ deviceTimestamp ][ 'value' ]
-            device_timestamp_3 = \
-                json_obj_3[ deviceTimestamp ][ 'value' ]
+                    json_obj_1[deviceTimestamp]['value'] 
+            #Get delta between graph event and OFP 
+            pt_up_graph_to_ofp_1 = int(graph_timestamp_1) -\
+                    int(timestamp_begin_pt_up)
+            #Get delta between device event and OFP
+            pt_up_device_to_ofp_1 = int(device_timestamp_1) -\
+                    int(timestamp_begin_pt_up)
+            
+            if cluster_count >= 3:
+                json_str_up_2 = main.ONOS2cli.topology_events_metrics()
+                json_str_up_3 = main.ONOS3cli.topology_events_metrics()
+                json_obj_2 = json.loads(json_str_up_2)
+                json_obj_3 = json.loads(json_str_up_3)
+                graph_timestamp_2 = \
+                    json_obj_2[graphTimestamp]['value']
+                graph_timestamp_3 = \
+                    json_obj_3[graphTimestamp]['value']
+                device_timestamp_2 = \
+                    json_obj_2[deviceTimestamp]['value'] 
+                device_timestamp_3 = \
+                    json_obj_3[deviceTimestamp]['value'] 
+                pt_up_graph_to_ofp_2 = int(graph_timestamp_2) -\
+                    int(timestamp_begin_pt_up)
+                pt_up_graph_to_ofp_3 = int(graph_timestamp_3) -\
+                    int(timestamp_begin_pt_up)
+                pt_up_device_to_ofp_2 = int(device_timestamp_2) -\
+                    int(timestamp_begin_pt_up)
+                pt_up_device_to_ofp_3 = int(device_timestamp_3) -\
+                    int(timestamp_begin_pt_up)
+            else:
+                pt_up_graph_to_ofp_2 = 0
+                pt_up_graph_to_ofp_3 = 0
+                pt_up_device_to_ofp_2 = 0
+                pt_up_device_to_ofp_3 = 0
+            
+            if cluster_count >= 5:
+                json_str_up_4 = main.ONOS4cli.topology_events_metrics()
+                json_str_up_5 = main.ONOS5cli.topology_events_metrics()
+                json_obj_4 = json.loads(json_str_up_4)
+                json_obj_5 = json.loads(json_str_up_5)
+                graph_timestamp_4 = \
+                    json_obj_4[graphTimestamp]['value']
+                graph_timestamp_5 = \
+                    json_obj_5[graphTimestamp]['value']
+                device_timestamp_4 = \
+                    json_obj_4[deviceTimestamp]['value'] 
+                device_timestamp_5 = \
+                    json_obj_5[deviceTimestamp]['value'] 
+                pt_up_graph_to_ofp_4 = int(graph_timestamp_4) -\
+                    int(timestamp_begin_pt_up)
+                pt_up_graph_to_ofp_5 = int(graph_timestamp_5) -\
+                    int(timestamp_begin_pt_up)
+                pt_up_device_to_ofp_4 = int(device_timestamp_4) -\
+                    int(timestamp_begin_pt_up)
+                pt_up_device_to_ofp_5 = int(device_timestamp_5) -\
+                    int(timestamp_begin_pt_up)
+            else:
+                pt_up_graph_to_ofp_4 = 0
+                pt_up_graph_to_ofp_5 = 0
+                pt_up_device_to_ofp_4 = 0
+                pt_up_device_to_ofp_5 = 0
 
-            # Get delta between graph event and OFP
-            pt_up_graph_to_ofp_1 = int( graph_timestamp_1 ) -\
-                int( timestamp_begin_pt_up )
-            pt_up_graph_to_ofp_2 = int( graph_timestamp_2 ) -\
-                int( timestamp_begin_pt_up )
-            pt_up_graph_to_ofp_3 = int( graph_timestamp_3 ) -\
-                int( timestamp_begin_pt_up )
-
-            # Get delta between device event and OFP
-            pt_up_device_to_ofp_1 = int( device_timestamp_1 ) -\
-                int( timestamp_begin_pt_up )
-            pt_up_device_to_ofp_2 = int( device_timestamp_2 ) -\
-                int( timestamp_begin_pt_up )
-            pt_up_device_to_ofp_3 = int( device_timestamp_3 ) -\
-                int( timestamp_begin_pt_up )
-
-            main.log.info( "ONOS1 delta G2O: " + str( pt_up_graph_to_ofp_1 ) )
-            main.log.info( "ONOS2 delta G2O: " + str( pt_up_graph_to_ofp_2 ) )
-            main.log.info( "ONOS3 delta G2O: " + str( pt_up_graph_to_ofp_3 ) )
-
-            main.log.info( "ONOS1 delta D2O: " + str( pt_up_device_to_ofp_1 ) )
-            main.log.info( "ONOS2 delta D2O: " + str( pt_up_device_to_ofp_2 ) )
-            main.log.info( "ONOS3 delta D2O: " + str( pt_up_device_to_ofp_3 ) )
+            if cluster_count >= 7:
+                json_str_up_6 = main.ONOS6cli.topology_events_metrics()
+                json_str_up_7 = main.ONOS7cli.topology_events_metrics()
+                json_obj_6 = json.loads(json_str_up_6)
+                json_obj_7 = json.loads(json_str_up_7)
+                graph_timestamp_6 = \
+                    json_obj_6[graphTimestamp]['value']
+                graph_timestamp_7 = \
+                    json_obj_7[graphTimestamp]['value']
+                device_timestamp_6 = \
+                    json_obj_6[deviceTimestamp]['value'] 
+                device_timestamp_7 = \
+                    json_obj_7[deviceTimestamp]['value'] 
+                pt_up_graph_to_ofp_6 = int(graph_timestamp_6) -\
+                    int(timestamp_begin_pt_up)
+                pt_up_graph_to_ofp_7 = int(graph_timestamp_7) -\
+                    int(timestamp_begin_pt_up)
+                pt_up_device_to_ofp_6 = int(device_timestamp_6) -\
+                    int(timestamp_begin_pt_up)
+                pt_up_device_to_ofp_7 = int(device_timestamp_7) -\
+                    int(timestamp_begin_pt_up)
+            else:
+                pt_up_graph_to_ofp_6 = 0
+                pt_up_graph_to_ofp_7 = 0
+                pt_up_device_to_ofp_6 = 0
+                pt_up_device_to_ofp_7 = 0
 
             pt_up_graph_to_ofp_avg = \
-                ( int( pt_up_graph_to_ofp_1 ) +
-                  int( pt_up_graph_to_ofp_2 ) +
-                  int( pt_up_graph_to_ofp_3 ) ) / 3
+                    (int(pt_up_graph_to_ofp_1) + 
+                     int(pt_up_graph_to_ofp_2) +
+                     int(pt_up_graph_to_ofp_3) +
+                     int(pt_up_graph_to_ofp_4) +
+                     int(pt_up_graph_to_ofp_5) +
+                     int(pt_up_graph_to_ofp_6) +
+                     int(pt_up_graph_to_ofp_7)) / cluster_count 
 
             pt_up_device_to_ofp_avg = \
-                ( int( pt_up_device_to_ofp_1 ) +
-                  int( pt_up_device_to_ofp_2 ) +
-                  int( pt_up_device_to_ofp_3 ) ) / 3
+                    (int(pt_up_device_to_ofp_1) + 
+                     int(pt_up_device_to_ofp_2) +
+                     int(pt_up_device_to_ofp_3) +
+                     int(pt_up_device_to_ofp_4) +
+                     int(pt_up_device_to_ofp_5) +
+                     int(pt_up_device_to_ofp_6) +
+                     int(pt_up_device_to_ofp_7)) / cluster_count 
 
             if pt_up_graph_to_ofp_avg > up_threshold_min and \
-                    pt_up_graph_to_ofp_avg < up_threshold_max:
+                    pt_up_graph_to_ofp_avg < up_threshold_max: 
                 port_up_graph_to_ofp_list.append(
-                    pt_up_graph_to_ofp_avg )
-                main.log.info( "Port down: graph to ofp avg: " +
-                               str( pt_up_graph_to_ofp_avg ) + " ms" )
+                        pt_up_graph_to_ofp_avg)
+                main.log.info("Port down: graph to ofp avg: "+
+                    str(pt_up_graph_to_ofp_avg) + " ms")
             else:
-                main.log.info( "Average port up graph-to-ofp result" +
-                               " exceeded the threshold: " +
-                               str( pt_up_graph_to_ofp_avg ) )
-
+                main.log.info("Average port up graph-to-ofp result"+
+                        " exceeded the threshold: "+
+                        str(pt_up_graph_to_ofp_avg))
+            
             if pt_up_device_to_ofp_avg > up_threshold_min and \
                     pt_up_device_to_ofp_avg < up_threshold_max:
                 port_up_device_to_ofp_list.append(
-                    pt_up_device_to_ofp_avg )
-                main.log.info( "Port up: device to ofp avg: " +
-                               str( pt_up_device_to_ofp_avg ) + " ms" )
+                        pt_up_device_to_ofp_avg)
+                main.log.info("Port up: device to ofp avg: "+
+                    str(pt_up_device_to_ofp_avg) + " ms")
             else:
-                main.log.info( "Average port up device-to-ofp result" +
-                               " exceeded the threshold: " +
-                               str( pt_up_device_to_ofp_avg ) )
-
-            # END ITERATION FOR LOOP
-
-        # Check all list for latency existence and set assertion
-        if ( port_down_graph_to_ofp_list and port_down_device_to_ofp_list
-                and port_up_graph_to_ofp_list and port_up_device_to_ofp_list ):
+                main.log.info("Average port up device-to-ofp result"+
+                        " exceeded the threshold: "+
+                        str(pt_up_device_to_ofp_avg))
+            
+            #END ITERATION FOR LOOP
+        
+        #Check all list for latency existence and set assertion
+        if (port_down_graph_to_ofp_list and port_down_device_to_ofp_list\
+           and port_up_graph_to_ofp_list and port_up_device_to_ofp_list):
             assertion = main.TRUE
-
-        # Calculate and report latency measurements
-        port_down_graph_to_ofp_min = min( port_down_graph_to_ofp_list )
-        port_down_graph_to_ofp_max = max( port_down_graph_to_ofp_list )
+        
+        main.log.report("Cluster size: "+str(cluster_count)+\
+                " node(s)")
+        #Calculate and report latency measurements
+        port_down_graph_to_ofp_min = min(port_down_graph_to_ofp_list)
+        port_down_graph_to_ofp_max = max(port_down_graph_to_ofp_list)
         port_down_graph_to_ofp_avg = \
-            ( sum( port_down_graph_to_ofp_list ) /
-              len( port_down_graph_to_ofp_list ) )
+                (sum(port_down_graph_to_ofp_list) / 
+                 len(port_down_graph_to_ofp_list))
         port_down_graph_to_ofp_std_dev = \
-            str( round( numpy.std( port_down_graph_to_ofp_list ), 1 ) )
-
-        main.log.report(
-            "Port down graph-to-ofp " +
-            "Avg: " +
-            str( port_down_graph_to_ofp_avg ) +
-            " ms " +
-            "Std Deviation: " +
-            port_down_graph_to_ofp_std_dev +
-            " ms" )
-
-        port_down_device_to_ofp_min = min( port_down_device_to_ofp_list )
-        port_down_device_to_ofp_max = max( port_down_device_to_ofp_list )
+                str(round(numpy.std(port_down_graph_to_ofp_list),1))
+        
+        main.log.report("Port down graph-to-ofp "+
+                "Avg: "+str(port_down_graph_to_ofp_avg)+" ms "+
+                "Std Deviation: "+port_down_graph_to_ofp_std_dev+" ms")
+        
+        port_down_device_to_ofp_min = min(port_down_device_to_ofp_list)
+        port_down_device_to_ofp_max = max(port_down_device_to_ofp_list)
         port_down_device_to_ofp_avg = \
-            ( sum( port_down_device_to_ofp_list ) /
-              len( port_down_device_to_ofp_list ) )
+                (sum(port_down_device_to_ofp_list) /\
+                 len(port_down_device_to_ofp_list))
         port_down_device_to_ofp_std_dev = \
-            str( round( numpy.std( port_down_device_to_ofp_list ), 1 ) )
-
-        main.log.report(
-            "Port down device-to-ofp " +
-            "Avg: " +
-            str( port_down_device_to_ofp_avg ) +
-            " ms " +
-            "Std Deviation: " +
-            port_down_device_to_ofp_std_dev +
-            " ms" )
-
-        port_up_graph_to_ofp_min = min( port_up_graph_to_ofp_list )
-        port_up_graph_to_ofp_max = max( port_up_graph_to_ofp_list )
+                str(round(numpy.std(port_down_device_to_ofp_list),1))
+        
+        main.log.report("Port down device-to-ofp "+
+                "Avg: "+str(port_down_device_to_ofp_avg)+" ms "+
+                "Std Deviation: "+port_down_device_to_ofp_std_dev+" ms")
+        
+        port_up_graph_to_ofp_min = min(port_up_graph_to_ofp_list)
+        port_up_graph_to_ofp_max = max(port_up_graph_to_ofp_list)
         port_up_graph_to_ofp_avg = \
-            ( sum( port_up_graph_to_ofp_list ) /
-              len( port_up_graph_to_ofp_list ) )
+                (sum(port_up_graph_to_ofp_list) /\
+                 len(port_up_graph_to_ofp_list))
         port_up_graph_to_ofp_std_dev = \
-            str( round( numpy.std( port_up_graph_to_ofp_list ), 1 ) )
-
-        main.log.report(
-            "Port up graph-to-ofp " +
-            "Avg: " +
-            str( port_up_graph_to_ofp_avg ) +
-            " ms " +
-            "Std Deviation: " +
-            port_up_graph_to_ofp_std_dev +
-            " ms" )
-
-        port_up_device_to_ofp_min = min( port_up_device_to_ofp_list )
-        port_up_device_to_ofp_max = max( port_up_device_to_ofp_list )
+                str(round(numpy.std(port_up_graph_to_ofp_list),1))
+        
+        main.log.report("Port up graph-to-ofp "+
+                "Avg: "+str(port_up_graph_to_ofp_avg)+" ms "+
+                "Std Deviation: "+port_up_graph_to_ofp_std_dev+" ms")
+          
+        port_up_device_to_ofp_min = min(port_up_device_to_ofp_list)
+        port_up_device_to_ofp_max = max(port_up_device_to_ofp_list)
         port_up_device_to_ofp_avg = \
-            ( sum( port_up_device_to_ofp_list ) /
-              len( port_up_device_to_ofp_list ) )
+                (sum(port_up_device_to_ofp_list) /\
+                 len(port_up_device_to_ofp_list))
         port_up_device_to_ofp_std_dev = \
-            str( round( numpy.std( port_up_device_to_ofp_list ), 1 ) )
+                str(round(numpy.std(port_up_device_to_ofp_list),1))
+        
+        main.log.report("Port up device-to-ofp "+
+                "Avg: "+str(port_up_device_to_ofp_avg)+" ms "+
+                "Std Deviation: "+port_up_device_to_ofp_std_dev+" ms")
 
-        main.log.report(
-            "Port up device-to-ofp " +
-            "Avg: " +
-            str( port_up_device_to_ofp_avg ) +
-            " ms " +
-            "Std Deviation: " +
-            port_up_device_to_ofp_std_dev +
-            " ms" )
+        #Remove switches from controller for next test
+        main.Mininet1.delete_sw_controller("s1")
+        main.Mininet1.delete_sw_controller("s2")
+        
+        utilities.assert_equals(expect=main.TRUE, actual=assertion,
+                onpass="Port discovery latency calculation successful",
+                onfail="Port discovery test failed")
 
-        utilities.assert_equals(
-            expect=main.TRUE,
-            actual=assertion,
-            onpass="Port discovery latency calculation successful",
-            onfail="Port discovery test failed" )
-
-    def CASE4( self, main ):
-        """
+    def CASE4(self, main):
+        '''
         Link down event using loss rate 100%
-
+        
         Important:
             Use a simple 2 switch topology with 1 link between
-            the two switches. Ensure that mac addresses of the
+            the two switches. Ensure that mac addresses of the 
             switches are 1 / 2 respectively
-        """
+        '''
         import time
         import subprocess
         import os
         import requests
         import json
-        import numpy
+        import numpy 
+    
+        ONOS1_ip = main.params['CTRL']['ip1']
+        ONOS2_ip = main.params['CTRL']['ip2']
+        ONOS3_ip = main.params['CTRL']['ip3']
+        ONOS_user = main.params['CTRL']['user']
 
-        ONOS1_ip = main.params[ 'CTRL' ][ 'ip1' ]
-        ONOS2_ip = main.params[ 'CTRL' ][ 'ip2' ]
-        ONOS3_ip = main.params[ 'CTRL' ][ 'ip3' ]
-        ONOS_user = main.params[ 'CTRL' ][ 'user' ]
+        default_sw_port = main.params['CTRL']['port1']
+       
+        #Number of iterations of case
+        num_iter = main.params['TEST']['numIter']
+       
+        #Timestamp 'keys' for json metrics output.
+        #These are subject to change, hence moved into params
+        deviceTimestamp = main.params['JSON']['deviceTimestamp']
+        linkTimestamp = main.params['JSON']['linkTimestamp'] 
+        graphTimestamp = main.params['JSON']['graphTimestamp']
+        
+        debug_mode = main.params['TEST']['debugMode']
 
-        default_sw_port = main.params[ 'CTRL' ][ 'port1' ]
-
-        # Number of iterations of case
-        num_iter = main.params[ 'TEST' ][ 'numIter' ]
-
-        # Timestamp 'keys' for json metrics output.
-        # These are subject to change, hence moved into params
-        deviceTimestamp = main.params[ 'JSON' ][ 'deviceTimestamp' ]
-        linkTimestamp = main.params[ 'JSON' ][ 'linkTimestamp' ]
-        graphTimestamp = main.params[ 'JSON' ][ 'graphTimestamp' ]
-
-        debug_mode = main.params[ 'TEST' ][ 'debugMode' ]
-
-        local_time = time.strftime( '%x %X' )
-        local_time = local_time.replace( "/", "" )
-        local_time = local_time.replace( " ", "_" )
-        local_time = local_time.replace( ":", "" )
+        local_time = time.strftime('%x %X')
+        local_time = local_time.replace("/","")
+        local_time = local_time.replace(" ","_")
+        local_time = local_time.replace(":","")
         if debug_mode == 'on':
-            main.ONOS1.tshark_pcap( "eth0",
-                                    "/tmp/link_lat_pcap_" + local_time )
+            main.ONOS1.tshark_pcap("eth0",
+                    "/tmp/link_lat_pcap_"+local_time) 
 
-        # Threshold for this test case
-        up_threshold_str = main.params[ 'TEST' ][ 'linkUpThreshold' ]
-        down_threshold_str = main.params[ 'TEST' ][ 'linkDownThreshold' ]
+        #Threshold for this test case
+        up_threshold_str = main.params['TEST']['linkUpThreshold']
+        down_threshold_str = main.params['TEST']['linkDownThreshold']
 
-        up_threshold_obj = up_threshold_str.split( "," )
-        down_threshold_obj = down_threshold_str.split( "," )
+        up_threshold_obj = up_threshold_str.split(",")
+        down_threshold_obj = down_threshold_str.split(",")
 
-        up_threshold_min = int( up_threshold_obj[ 0 ] )
-        up_threshold_max = int( up_threshold_obj[ 1 ] )
+        up_threshold_min = int(up_threshold_obj[0])
+        up_threshold_max = int(up_threshold_obj[1])
 
-        down_threshold_min = int( down_threshold_obj[ 0 ] )
-        down_threshold_max = int( down_threshold_obj[ 1 ] )
+        down_threshold_min = int(down_threshold_obj[0])
+        down_threshold_max = int(down_threshold_obj[1])
 
         assertion = main.TRUE
-        # Link event timestamp to system time list
+        #Link event timestamp to system time list
         link_down_link_to_system_list = []
         link_up_link_to_system_list = []
-        # Graph event timestamp to system time list
+        #Graph event timestamp to system time list
         link_down_graph_to_system_list = []
-        link_up_graph_to_system_list = []
+        link_up_graph_to_system_list = [] 
 
-        main.log.report( "Link up / down discovery latency between " +
-                         "two switches" )
-        main.log.report( "Simulated by setting loss-rate 100%" )
-        main.log.report( "'tc qdisc add dev <intfs> root netem loss 100%'" )
-        main.log.report( "Total iterations of test: " + str( num_iter ) )
+        main.log.report("Link up / down discovery latency between "+
+                "two switches")
+        main.log.report("Simulated by setting loss-rate 100%")
+        main.log.report("'tc qdisc add dev <intfs> root netem loss 100%'") 
+        main.log.report("Total iterations of test: "+str(num_iter))
 
-        main.step( "Assign all switches" )
-        main.Mininet1.assign_sw_controller(
-            sw="1",
-            ip1=ONOS1_ip,
-            port1=default_sw_port )
-        main.Mininet1.assign_sw_controller(
-            sw="2",
-            ip1=ONOS1_ip,
-            port1=default_sw_port )
+        main.step("Assign all switches")
+        main.Mininet1.assign_sw_controller(sw="1",
+                ip1=ONOS1_ip, port1=default_sw_port)
+        main.Mininet1.assign_sw_controller(sw="2",
+                ip1=ONOS1_ip, port1=default_sw_port)
 
-        main.step( "Verifying switch assignment" )
-        result_s1 = main.Mininet1.get_sw_controller( sw="s1" )
-        result_s2 = main.Mininet1.get_sw_controller( sw="s2" )
-
-        # Allow time for events to finish before taking measurements
-        time.sleep( 10 )
+        main.step("Verifying switch assignment")
+        result_s1 = main.Mininet1.get_sw_controller(sw="s1")
+        result_s2 = main.Mininet1.get_sw_controller(sw="s2")
+          
+        #Allow time for events to finish before taking measurements
+        time.sleep(10)
 
         link_down1 = False
         link_down2 = False
         link_down3 = False
-        # Start iteration of link event test
-        for i in range( 0, int( num_iter ) ):
-            main.step( "Getting initial system time as t0" )
-
+        #Start iteration of link event test
+        for i in range(0, int(num_iter)):
+            main.step("Getting initial system time as t0")
+        
+            #System time in epoch ms
             timestamp_link_down_t0 = time.time() * 1000
-            # Link down is simulated by 100% loss rate using traffic
-            # control command
+            #Link down is simulated by 100% loss rate using traffic 
+            #control command
             main.Mininet1.handle.sendline(
-                "sh tc qdisc add dev s1-eth1 root netem loss 100%" )
+                    "sh tc qdisc add dev s1-eth1 root netem loss 100%")
 
-            # TODO: Iterate through 'links' command to verify that
-            #      link s1 -> s2 went down ( loop timeout 30 seconds )
+            #TODO: Iterate through 'links' command to verify that
+            #      link s1 -> s2 went down (loop timeout 30 seconds) 
             #      on all 3 ONOS instances
-            main.log.info( "Checking ONOS for link update" )
+            main.log.info("Checking ONOS for link update")
             loop_count = 0
-            while( not ( link_down1 and link_down2 and link_down3 )
+            while( not (link_down1 and link_down2 and link_down3)\
                     and loop_count < 30 ):
                 json_str1 = main.ONOS1cli.links()
                 json_str2 = main.ONOS2cli.links()
                 json_str3 = main.ONOS3cli.links()
-
-                if not ( json_str1 and json_str2 and json_str3 ):
-                    main.log.error( "CLI command returned error " )
+                
+                if not (json_str1 and json_str2 and json_str3):
+                    main.log.error("CLI command returned error ")
                     break
                 else:
-                    json_obj1 = json.loads( json_str1 )
-                    json_obj2 = json.loads( json_str2 )
-                    json_obj3 = json.loads( json_str3 )
+                    json_obj1 = json.loads(json_str1)
+                    json_obj2 = json.loads(json_str2)
+                    json_obj3 = json.loads(json_str3)
                 for obj1 in json_obj1:
-                    if '01' not in obj1[ 'src' ][ 'device' ]:
+                    if '01' not in obj1['src']['device']:
                         link_down1 = True
-                        main.log.info( "Link down from " +
-                                       "s1 -> s2 on ONOS1 detected" )
+                        main.log.info("Link down from "+
+                                "s1 -> s2 on ONOS1 detected")
                 for obj2 in json_obj2:
-                    if '01' not in obj2[ 'src' ][ 'device' ]:
+                    if '01' not in obj2['src']['device']:
                         link_down2 = True
-                        main.log.info( "Link down from " +
-                                       "s1 -> s2 on ONOS2 detected" )
+                        main.log.info("Link down from "+
+                                "s1 -> s2 on ONOS2 detected")
                 for obj3 in json_obj3:
-                    if '01' not in obj3[ 'src' ][ 'device' ]:
+                    if '01' not in obj3['src']['device']:
                         link_down3 = True
-                        main.log.info( "Link down from " +
-                                       "s1 -> s2 on ONOS3 detected" )
-
+                        main.log.info("Link down from "+
+                                "s1 -> s2 on ONOS3 detected")
+                
                 loop_count += 1
-                # If CLI doesn't like the continuous requests
-                # and exits in this loop, increase the sleep here.
-                # Consequently, while loop timeout will increase
-                time.sleep( 1 )
-
-            # Give time for metrics measurement to catch up
-            # NOTE: May need to be configured more accurately
-            time.sleep( 10 )
-            # If we exited the while loop and link down 1,2,3 are still
-            # false, then ONOS has failed to discover link down event
-            if not ( link_down1 and link_down2 and link_down3 ):
-                main.log.info( "Link down discovery failed" )
-
+                #If CLI doesn't like the continuous requests
+                #and exits in this loop, increase the sleep here.
+                #Consequently, while loop timeout will increase
+                time.sleep(1)
+    
+            #Give time for metrics measurement to catch up
+            #NOTE: May need to be configured more accurately
+            time.sleep(10)
+            #If we exited the while loop and link down 1,2,3 are still 
+            #false, then ONOS has failed to discover link down event
+            if not (link_down1 and link_down2 and link_down3):
+                main.log.info("Link down discovery failed")
+                
                 link_down_lat_graph1 = 0
                 link_down_lat_graph2 = 0
                 link_down_lat_graph3 = 0
                 link_down_lat_device1 = 0
                 link_down_lat_device2 = 0
                 link_down_lat_device3 = 0
-
+                
                 assertion = main.FALSE
             else:
                 json_topo_metrics_1 =\
-                    main.ONOS1cli.topology_events_metrics()
+                        main.ONOS1cli.topology_events_metrics()
                 json_topo_metrics_2 =\
-                    main.ONOS2cli.topology_events_metrics()
+                        main.ONOS2cli.topology_events_metrics()
                 json_topo_metrics_3 =\
-                    main.ONOS3cli.topology_events_metrics()
-                json_topo_metrics_1 = json.loads( json_topo_metrics_1 )
-                json_topo_metrics_2 = json.loads( json_topo_metrics_2 )
-                json_topo_metrics_3 = json.loads( json_topo_metrics_3 )
+                        main.ONOS3cli.topology_events_metrics()
+                json_topo_metrics_1 = json.loads(json_topo_metrics_1)
+                json_topo_metrics_2 = json.loads(json_topo_metrics_2)
+                json_topo_metrics_3 = json.loads(json_topo_metrics_3)
 
-                main.log.info( "Obtaining graph and device timestamp" )
+                main.log.info("Obtaining graph and device timestamp")
                 graph_timestamp_1 = \
-                    json_topo_metrics_1[ graphTimestamp ][ 'value' ]
+                    json_topo_metrics_1[graphTimestamp]['value']
                 graph_timestamp_2 = \
-                    json_topo_metrics_2[ graphTimestamp ][ 'value' ]
+                    json_topo_metrics_2[graphTimestamp]['value']
                 graph_timestamp_3 = \
-                    json_topo_metrics_3[ graphTimestamp ][ 'value' ]
+                    json_topo_metrics_3[graphTimestamp]['value']
 
                 link_timestamp_1 = \
-                    json_topo_metrics_1[ linkTimestamp ][ 'value' ]
+                    json_topo_metrics_1[linkTimestamp]['value']
                 link_timestamp_2 = \
-                    json_topo_metrics_2[ linkTimestamp ][ 'value' ]
+                    json_topo_metrics_2[linkTimestamp]['value']
                 link_timestamp_3 = \
-                    json_topo_metrics_3[ linkTimestamp ][ 'value' ]
+                    json_topo_metrics_3[linkTimestamp]['value']
 
                 if graph_timestamp_1 and graph_timestamp_2 and\
                         graph_timestamp_3 and link_timestamp_1 and\
                         link_timestamp_2 and link_timestamp_3:
-                    link_down_lat_graph1 = int( graph_timestamp_1 ) -\
-                        int( timestamp_link_down_t0 )
-                    link_down_lat_graph2 = int( graph_timestamp_2 ) -\
-                        int( timestamp_link_down_t0 )
-                    link_down_lat_graph3 = int( graph_timestamp_3 ) -\
-                        int( timestamp_link_down_t0 )
-
-                    link_down_lat_link1 = int( link_timestamp_1 ) -\
-                        int( timestamp_link_down_t0 )
-                    link_down_lat_link2 = int( link_timestamp_2 ) -\
-                        int( timestamp_link_down_t0 )
-                    link_down_lat_link3 = int( link_timestamp_3 ) -\
-                        int( timestamp_link_down_t0 )
+                    link_down_lat_graph1 = int(graph_timestamp_1) -\
+                            int(timestamp_link_down_t0)
+                    link_down_lat_graph2 = int(graph_timestamp_2) -\
+                            int(timestamp_link_down_t0)
+                    link_down_lat_graph3 = int(graph_timestamp_3) -\
+                            int(timestamp_link_down_t0)
+                
+                    link_down_lat_link1 = int(link_timestamp_1) -\
+                            int(timestamp_link_down_t0)
+                    link_down_lat_link2 = int(link_timestamp_2) -\
+                            int(timestamp_link_down_t0)
+                    link_down_lat_link3 = int(link_timestamp_3) -\
+                            int(timestamp_link_down_t0)
                 else:
-                    main.log.error( "There was an error calculating" +
-                                    " the delta for link down event" )
+                    main.log.error("There was an error calculating"+
+                        " the delta for link down event")
                     link_down_lat_graph1 = 0
                     link_down_lat_graph2 = 0
                     link_down_lat_graph3 = 0
-
+                    
                     link_down_lat_device1 = 0
                     link_down_lat_device2 = 0
                     link_down_lat_device3 = 0
-
-            main.log.info( "Link down latency ONOS1 iteration " +
-                           str( i ) + " (end-to-end): " +
-                           str( link_down_lat_graph1 ) + " ms" )
-            main.log.info( "Link down latency ONOS2 iteration " +
-                           str( i ) + " (end-to-end): " +
-                           str( link_down_lat_graph2 ) + " ms" )
-            main.log.info( "Link down latency ONOS3 iteration " +
-                           str( i ) + " (end-to-end): " +
-                           str( link_down_lat_graph3 ) + " ms" )
-
-            main.log.info( "Link down latency ONOS1 iteration " +
-                           str( i ) + " (link-event-to-system-timestamp): " +
-                           str( link_down_lat_link1 ) + " ms" )
-            main.log.info( "Link down latency ONOS2 iteration " +
-                           str( i ) + " (link-event-to-system-timestamp): " +
-                           str( link_down_lat_link2 ) + " ms" )
-            main.log.info( "Link down latency ONOS3 iteration " +
-                           str( i ) + " (link-event-to-system-timestamp): " +
-                           str( link_down_lat_link3 ) )
-
-            # Calculate avg of node calculations
+        
+            main.log.info("Link down latency ONOS1 iteration "+
+                    str(i)+" (end-to-end): "+
+                    str(link_down_lat_graph1)+" ms")
+            main.log.info("Link down latency ONOS2 iteration "+
+                    str(i)+" (end-to-end): "+
+                    str(link_down_lat_graph2)+" ms")
+            main.log.info("Link down latency ONOS3 iteration "+
+                    str(i)+" (end-to-end): "+
+                    str(link_down_lat_graph3)+" ms")
+            
+            main.log.info("Link down latency ONOS1 iteration "+
+                    str(i)+" (link-event-to-system-timestamp): "+
+                    str(link_down_lat_link1)+" ms")
+            main.log.info("Link down latency ONOS2 iteration "+
+                    str(i)+" (link-event-to-system-timestamp): "+
+                    str(link_down_lat_link2)+" ms")
+            main.log.info("Link down latency ONOS3 iteration "+
+                    str(i)+" (link-event-to-system-timestamp): "+
+                    str(link_down_lat_link3))
+      
+            #Calculate avg of node calculations
             link_down_lat_graph_avg =\
-                ( link_down_lat_graph1 +
-                  link_down_lat_graph2 +
-                  link_down_lat_graph3 ) / 3
+                    (link_down_lat_graph1 +
+                     link_down_lat_graph2 +
+                     link_down_lat_graph3) / 3
             link_down_lat_link_avg =\
-                ( link_down_lat_link1 +
-                  link_down_lat_link2 +
-                  link_down_lat_link3 ) / 3
+                    (link_down_lat_link1 +
+                     link_down_lat_link2 +
+                     link_down_lat_link3) / 3
 
-            # Set threshold and append latency to list
+            #Set threshold and append latency to list
             if link_down_lat_graph_avg > down_threshold_min and\
                link_down_lat_graph_avg < down_threshold_max:
                 link_down_graph_to_system_list.append(
-                    link_down_lat_graph_avg )
+                        link_down_lat_graph_avg)
             else:
-                main.log.info( "Link down latency exceeded threshold" )
-                main.log.info( "Results for iteration " + str( i ) +
-                               "have been omitted" )
+                main.log.info("Link down latency exceeded threshold")
+                main.log.info("Results for iteration "+str(i)+
+                        "have been omitted")
             if link_down_lat_link_avg > down_threshold_min and\
                link_down_lat_link_avg < down_threshold_max:
                 link_down_link_to_system_list.append(
-                    link_down_lat_link_avg )
+                        link_down_lat_link_avg)
             else:
-                main.log.info( "Link down latency exceeded threshold" )
-                main.log.info( "Results for iteration " + str( i ) +
-                               "have been omitted" )
+                main.log.info("Link down latency exceeded threshold")
+                main.log.info("Results for iteration "+str(i)+
+                        "have been omitted")
 
-            # NOTE: To remove loss rate and measure latency:
+            #NOTE: To remove loss rate and measure latency:
             #       'sh tc qdisc del dev s1-eth1 root'
             timestamp_link_up_t0 = time.time() * 1000
-            main.Mininet1.handle.sendline( "sh tc qdisc del dev " +
-                                           "s1-eth1 root" )
-            main.Mininet1.handle.expect( "mininet>" )
-
-            main.log.info( "Checking ONOS for link update" )
-
+            main.Mininet1.handle.sendline("sh tc qdisc del dev "+
+                    "s1-eth1 root")
+            main.Mininet1.handle.expect("mininet>")
+            
+            main.log.info("Checking ONOS for link update")
+            
             link_down1 = True
             link_down2 = True
             link_down3 = True
             loop_count = 0
-            while( ( link_down1 and link_down2 and link_down3 )
+            while( (link_down1 and link_down2 and link_down3)\
                     and loop_count < 30 ):
                 json_str1 = main.ONOS1cli.links()
                 json_str2 = main.ONOS2cli.links()
                 json_str3 = main.ONOS3cli.links()
-                if not ( json_str1 and json_str2 and json_str3 ):
-                    main.log.error( "CLI command returned error " )
+                if not (json_str1 and json_str2 and json_str3):
+                    main.log.error("CLI command returned error ")
                     break
                 else:
-                    json_obj1 = json.loads( json_str1 )
-                    json_obj2 = json.loads( json_str2 )
-                    json_obj3 = json.loads( json_str3 )
-
+                    json_obj1 = json.loads(json_str1)
+                    json_obj2 = json.loads(json_str2)
+                    json_obj3 = json.loads(json_str3)
+                
                 for obj1 in json_obj1:
-                    if '01' in obj1[ 'src' ][ 'device' ]:
-                        link_down1 = False
-                        main.log.info( "Link up from " +
-                                       "s1 -> s2 on ONOS1 detected" )
+                    if '01' in obj1['src']['device']:
+                        link_down1 = False 
+                        main.log.info("Link up from "+
+                            "s1 -> s2 on ONOS1 detected")
                 for obj2 in json_obj2:
-                    if '01' in obj2[ 'src' ][ 'device' ]:
-                        link_down2 = False
-                        main.log.info( "Link up from " +
-                                       "s1 -> s2 on ONOS2 detected" )
+                    if '01' in obj2['src']['device']:
+                        link_down2 = False 
+                        main.log.info("Link up from "+
+                            "s1 -> s2 on ONOS2 detected")
                 for obj3 in json_obj3:
-                    if '01' in obj3[ 'src' ][ 'device' ]:
-                        link_down3 = False
-                        main.log.info( "Link up from " +
-                                       "s1 -> s2 on ONOS3 detected" )
-
+                    if '01' in obj3['src']['device']:
+                        link_down3 = False 
+                        main.log.info("Link up from "+
+                            "s1 -> s2 on ONOS3 detected")
+                
                 loop_count += 1
-                time.sleep( 1 )
-
-            if ( link_down1 and link_down2 and link_down3 ):
-                main.log.info( "Link up discovery failed" )
-
+                time.sleep(1)
+            
+            if (link_down1 and link_down2 and link_down3):
+                main.log.info("Link up discovery failed")
+                
                 link_up_lat_graph1 = 0
                 link_up_lat_graph2 = 0
                 link_up_lat_graph3 = 0
                 link_up_lat_device1 = 0
                 link_up_lat_device2 = 0
                 link_up_lat_device3 = 0
-
+                
                 assertion = main.FALSE
             else:
                 json_topo_metrics_1 =\
-                    main.ONOS1cli.topology_events_metrics()
+                        main.ONOS1cli.topology_events_metrics()
                 json_topo_metrics_2 =\
-                    main.ONOS2cli.topology_events_metrics()
+                        main.ONOS2cli.topology_events_metrics()
                 json_topo_metrics_3 =\
-                    main.ONOS3cli.topology_events_metrics()
-                json_topo_metrics_1 = json.loads( json_topo_metrics_1 )
-                json_topo_metrics_2 = json.loads( json_topo_metrics_2 )
-                json_topo_metrics_3 = json.loads( json_topo_metrics_3 )
+                        main.ONOS3cli.topology_events_metrics()
+                json_topo_metrics_1 = json.loads(json_topo_metrics_1)
+                json_topo_metrics_2 = json.loads(json_topo_metrics_2)
+                json_topo_metrics_3 = json.loads(json_topo_metrics_3)
 
-                main.log.info( "Obtaining graph and device timestamp" )
+                main.log.info("Obtaining graph and device timestamp")
                 graph_timestamp_1 = \
-                    json_topo_metrics_1[ graphTimestamp ][ 'value' ]
+                    json_topo_metrics_1[graphTimestamp]['value']
                 graph_timestamp_2 = \
-                    json_topo_metrics_2[ graphTimestamp ][ 'value' ]
+                    json_topo_metrics_2[graphTimestamp]['value']
                 graph_timestamp_3 = \
-                    json_topo_metrics_3[ graphTimestamp ][ 'value' ]
+                    json_topo_metrics_3[graphTimestamp]['value']
 
                 link_timestamp_1 = \
-                    json_topo_metrics_1[ linkTimestamp ][ 'value' ]
+                    json_topo_metrics_1[linkTimestamp]['value']
                 link_timestamp_2 = \
-                    json_topo_metrics_2[ linkTimestamp ][ 'value' ]
+                    json_topo_metrics_2[linkTimestamp]['value']
                 link_timestamp_3 = \
-                    json_topo_metrics_3[ linkTimestamp ][ 'value' ]
+                    json_topo_metrics_3[linkTimestamp]['value']
 
                 if graph_timestamp_1 and graph_timestamp_2 and\
                         graph_timestamp_3 and link_timestamp_1 and\
                         link_timestamp_2 and link_timestamp_3:
-                    link_up_lat_graph1 = int( graph_timestamp_1 ) -\
-                        int( timestamp_link_up_t0 )
-                    link_up_lat_graph2 = int( graph_timestamp_2 ) -\
-                        int( timestamp_link_up_t0 )
-                    link_up_lat_graph3 = int( graph_timestamp_3 ) -\
-                        int( timestamp_link_up_t0 )
-
-                    link_up_lat_link1 = int( link_timestamp_1 ) -\
-                        int( timestamp_link_up_t0 )
-                    link_up_lat_link2 = int( link_timestamp_2 ) -\
-                        int( timestamp_link_up_t0 )
-                    link_up_lat_link3 = int( link_timestamp_3 ) -\
-                        int( timestamp_link_up_t0 )
+                    link_up_lat_graph1 = int(graph_timestamp_1) -\
+                            int(timestamp_link_up_t0)
+                    link_up_lat_graph2 = int(graph_timestamp_2) -\
+                            int(timestamp_link_up_t0)
+                    link_up_lat_graph3 = int(graph_timestamp_3) -\
+                            int(timestamp_link_up_t0)
+                
+                    link_up_lat_link1 = int(link_timestamp_1) -\
+                            int(timestamp_link_up_t0)
+                    link_up_lat_link2 = int(link_timestamp_2) -\
+                            int(timestamp_link_up_t0)
+                    link_up_lat_link3 = int(link_timestamp_3) -\
+                            int(timestamp_link_up_t0)
                 else:
-                    main.log.error( "There was an error calculating" +
-                                    " the delta for link down event" )
+                    main.log.error("There was an error calculating"+
+                        " the delta for link down event")
                     link_up_lat_graph1 = 0
                     link_up_lat_graph2 = 0
                     link_up_lat_graph3 = 0
-
+                    
                     link_up_lat_device1 = 0
                     link_up_lat_device2 = 0
                     link_up_lat_device3 = 0
-
+       
             if debug_mode == 'on':
-                main.log.info( "Link up latency ONOS1 iteration " +
-                               str( i ) + " (end-to-end): " +
-                               str( link_up_lat_graph1 ) + " ms" )
-                main.log.info( "Link up latency ONOS2 iteration " +
-                               str( i ) + " (end-to-end): " +
-                               str( link_up_lat_graph2 ) + " ms" )
-                main.log.info( "Link up latency ONOS3 iteration " +
-                               str( i ) + " (end-to-end): " +
-                               str( link_up_lat_graph3 ) + " ms" )
-
-                main.log.info(
-                    "Link up latency ONOS1 iteration " +
-                    str( i ) +
-                    " (link-event-to-system-timestamp): " +
-                    str( link_up_lat_link1 ) +
-                    " ms" )
-                main.log.info(
-                    "Link up latency ONOS2 iteration " +
-                    str( i ) +
-                    " (link-event-to-system-timestamp): " +
-                    str( link_up_lat_link2 ) +
-                    " ms" )
-                main.log.info(
-                    "Link up latency ONOS3 iteration " +
-                    str( i ) +
-                    " (link-event-to-system-timestamp): " +
-                    str( link_up_lat_link3 ) )
-
-            # Calculate avg of node calculations
+                main.log.info("Link up latency ONOS1 iteration "+
+                    str(i)+" (end-to-end): "+
+                    str(link_up_lat_graph1)+" ms")
+                main.log.info("Link up latency ONOS2 iteration "+
+                    str(i)+" (end-to-end): "+
+                    str(link_up_lat_graph2)+" ms")
+                main.log.info("Link up latency ONOS3 iteration "+
+                    str(i)+" (end-to-end): "+
+                    str(link_up_lat_graph3)+" ms")
+            
+                main.log.info("Link up latency ONOS1 iteration "+
+                    str(i)+" (link-event-to-system-timestamp): "+
+                    str(link_up_lat_link1)+" ms")
+                main.log.info("Link up latency ONOS2 iteration "+
+                    str(i)+" (link-event-to-system-timestamp): "+
+                    str(link_up_lat_link2)+" ms")
+                main.log.info("Link up latency ONOS3 iteration "+
+                    str(i)+" (link-event-to-system-timestamp): "+
+                    str(link_up_lat_link3))
+      
+            #Calculate avg of node calculations
             link_up_lat_graph_avg =\
-                ( link_up_lat_graph1 +
-                  link_up_lat_graph2 +
-                  link_up_lat_graph3 ) / 3
+                    (link_up_lat_graph1 +
+                     link_up_lat_graph2 +
+                     link_up_lat_graph3) / 3
             link_up_lat_link_avg =\
-                ( link_up_lat_link1 +
-                  link_up_lat_link2 +
-                  link_up_lat_link3 ) / 3
+                    (link_up_lat_link1 +
+                     link_up_lat_link2 +
+                     link_up_lat_link3) / 3
 
-            # Set threshold and append latency to list
+            #Set threshold and append latency to list
             if link_up_lat_graph_avg > up_threshold_min and\
                link_up_lat_graph_avg < up_threshold_max:
                 link_up_graph_to_system_list.append(
-                    link_up_lat_graph_avg )
+                        link_up_lat_graph_avg)
             else:
-                main.log.info( "Link up latency exceeded threshold" )
-                main.log.info( "Results for iteration " + str( i ) +
-                               "have been omitted" )
+                main.log.info("Link up latency exceeded threshold")
+                main.log.info("Results for iteration "+str(i)+
+                        "have been omitted")
             if link_up_lat_link_avg > up_threshold_min and\
                link_up_lat_link_avg < up_threshold_max:
                 link_up_link_to_system_list.append(
-                    link_up_lat_link_avg )
+                        link_up_lat_link_avg)
             else:
-                main.log.info( "Link up latency exceeded threshold" )
-                main.log.info( "Results for iteration " + str( i ) +
-                               "have been omitted" )
+                main.log.info("Link up latency exceeded threshold")
+                main.log.info("Results for iteration "+str(i)+
+                        "have been omitted")
 
-        # Calculate min, max, avg of list and report
-        link_down_min = min( link_down_graph_to_system_list )
-        link_down_max = max( link_down_graph_to_system_list )
-        link_down_avg = sum( link_down_graph_to_system_list ) / \
-            len( link_down_graph_to_system_list )
-        link_up_min = min( link_up_graph_to_system_list )
-        link_up_max = max( link_up_graph_to_system_list )
-        link_up_avg = sum( link_up_graph_to_system_list ) / \
-            len( link_up_graph_to_system_list )
+        #Calculate min, max, avg of list and report
+        link_down_min = min(link_down_graph_to_system_list)
+        link_down_max = max(link_down_graph_to_system_list)
+        link_down_avg = sum(link_down_graph_to_system_list) / \
+                        len(link_down_graph_to_system_list)
+        link_up_min = min(link_up_graph_to_system_list)
+        link_up_max = max(link_up_graph_to_system_list)
+        link_up_avg = sum(link_up_graph_to_system_list) / \
+                        len(link_up_graph_to_system_list)
         link_down_std_dev = \
-            str( round( numpy.std( link_down_graph_to_system_list ), 1 ) )
+                str(round(numpy.std(link_down_graph_to_system_list),1))
         link_up_std_dev = \
-            str( round( numpy.std( link_up_graph_to_system_list ), 1 ) )
+                str(round(numpy.std(link_up_graph_to_system_list),1))
 
-        main.log.report( "Link down latency " +
-                         "Avg: " + str( link_down_avg ) + " ms " +
-                         "Std Deviation: " + link_down_std_dev + " ms" )
-        main.log.report( "Link up latency " +
-                         "Avg: " + str( link_up_avg ) + " ms " +
-                         "Std Deviation: " + link_up_std_dev + " ms" )
+        main.log.report("Link down latency " +
+                "Avg: "+str(link_down_avg)+" ms "+
+                "Std Deviation: "+link_down_std_dev+" ms")
+        main.log.report("Link up latency "+
+                "Avg: "+str(link_up_avg)+" ms "+
+                "Std Deviation: "+link_up_std_dev+" ms")
 
-        utilities.assert_equals(
-            expect=main.TRUE,
-            actual=assertion,
-            onpass="Link discovery latency calculation successful",
-            onfail="Link discovery latency case failed" )
+        utilities.assert_equals(expect=main.TRUE, actual=assertion,
+                onpass="Link discovery latency calculation successful",
+                onfail="Link discovery latency case failed")
 
-    def CASE5( self, main ):
-        """
+    def CASE5(self, main):
+        '''
         100 Switch discovery latency
 
         Important:
-            This test case can be potentially dangerous if
+            This test case can be potentially dangerous if 
             your machine has previously set iptables rules.
             One of the steps of the test case will flush
             all existing iptables rules.
         Note:
-            You can specify the number of switches in the
+            You can specify the number of switches in the 
             params file to adjust the switch discovery size
-            ( and specify the corresponding topology in Mininet1
-            .topo file )
-        """
+            (and specify the corresponding topology in Mininet1 
+            .topo file)
+        '''
         import time
         import subprocess
         import os
         import requests
         import json
 
-        ONOS1_ip = main.params[ 'CTRL' ][ 'ip1' ]
-        ONOS2_ip = main.params[ 'CTRL' ][ 'ip2' ]
-        ONOS3_ip = main.params[ 'CTRL' ][ 'ip3' ]
-        MN1_ip = main.params[ 'MN' ][ 'ip1' ]
-        ONOS_user = main.params[ 'CTRL' ][ 'user' ]
+        ONOS1_ip = main.params['CTRL']['ip1']
+        ONOS2_ip = main.params['CTRL']['ip2']
+        ONOS3_ip = main.params['CTRL']['ip3']
+        MN1_ip = main.params['MN']['ip1']
+        ONOS_user = main.params['CTRL']['user']
 
-        default_sw_port = main.params[ 'CTRL' ][ 'port1' ]
+        default_sw_port = main.params['CTRL']['port1']
+       
+        #Number of iterations of case
+        num_iter = main.params['TEST']['numIter']
+        num_sw = main.params['TEST']['numSwitch']
 
-        # Number of iterations of case
-        num_iter = main.params[ 'TEST' ][ 'numIter' ]
-        num_sw = main.params[ 'TEST' ][ 'numSwitch' ]
+        #Timestamp 'keys' for json metrics output.
+        #These are subject to change, hence moved into params
+        deviceTimestamp = main.params['JSON']['deviceTimestamp']
+        graphTimestamp = main.params['JSON']['graphTimestamp']
+        
+        debug_mode = main.params['TEST']['debugMode']
 
-        # Timestamp 'keys' for json metrics output.
-        # These are subject to change, hence moved into params
-        deviceTimestamp = main.params[ 'JSON' ][ 'deviceTimestamp' ]
-        graphTimestamp = main.params[ 'JSON' ][ 'graphTimestamp' ]
-
-        debug_mode = main.params[ 'TEST' ][ 'debugMode' ]
-
-        local_time = time.strftime( '%X' )
-        local_time = local_time.replace( "/", "" )
-        local_time = local_time.replace( " ", "_" )
-        local_time = local_time.replace( ":", "" )
+        local_time = time.strftime('%X')
+        local_time = local_time.replace("/","")
+        local_time = local_time.replace(" ","_")
+        local_time = local_time.replace(":","")
         if debug_mode == 'on':
-            main.ONOS1.tshark_pcap( "eth0",
-                                    "/tmp/100_sw_lat_pcap_" + local_time )
+            main.ONOS1.tshark_pcap("eth0",
+                    "/tmp/100_sw_lat_pcap_"+local_time) 
+ 
+        #Threshold for this test case
+        sw_disc_threshold_str = main.params['TEST']['swDisc100Threshold']
+        sw_disc_threshold_obj = sw_disc_threshold_str.split(",")
+        sw_disc_threshold_min = int(sw_disc_threshold_obj[0])
+        sw_disc_threshold_max = int(sw_disc_threshold_obj[1])
 
-        # Threshold for this test case
-        sw_disc_threshold_str = main.params[ 'TEST' ][ 'swDisc100Threshold' ]
-        sw_disc_threshold_obj = sw_disc_threshold_str.split( "," )
-        sw_disc_threshold_min = int( sw_disc_threshold_obj[ 0 ] )
-        sw_disc_threshold_max = int( sw_disc_threshold_obj[ 1 ] )
-
-        tshark_ofp_output = "/tmp/tshark_ofp_" + num_sw + "sw.txt"
-        tshark_tcp_output = "/tmp/tshark_tcp_" + num_sw + "sw.txt"
+        tshark_ofp_output = "/tmp/tshark_ofp_"+num_sw+"sw.txt"
+        tshark_tcp_output = "/tmp/tshark_tcp_"+num_sw+"sw.txt"
 
         tshark_ofp_result_list = []
         tshark_tcp_result_list = []
 
         sw_discovery_lat_list = []
 
-        main.case( num_sw + " Switch discovery latency" )
-        main.step( "Assigning all switches to ONOS1" )
-        for i in range( 1, int( num_sw ) + 1 ):
+        main.case(num_sw+" Switch discovery latency")
+        main.step("Assigning all switches to ONOS1")
+        for i in range(1, int(num_sw)+1):
             main.Mininet1.assign_sw_controller(
-                sw=str( i ),
-                ip1=ONOS1_ip,
-                port1=default_sw_port )
+                    sw=str(i),
+                    ip1=ONOS1_ip,
+                    port1=default_sw_port)
+        
+        #Ensure that nodes are configured with ptpd
+        #Just a warning message
+        main.log.info("Please check ptpd configuration to ensure"+\
+                " All nodes' system times are in sync")
+        time.sleep(5)
 
-        # Ensure that nodes are configured with ptpd
-        # Just a warning message
-        main.log.info( "Please check ptpd configuration to ensure" +
-                       " All nodes' system times are in sync" )
-        time.sleep( 5 )
-
-        for i in range( 0, int( num_iter ) ):
-
-            main.step( "Set iptables rule to block incoming sw connections" )
-            # Set iptables rule to block incoming switch connections
-            # The rule description is as follows:
+        for i in range(0, int(num_iter)):
+            
+            main.step("Set iptables rule to block incoming sw connections")
+            #Set iptables rule to block incoming switch connections
+            #The rule description is as follows:
             #   Append to INPUT rule,
             #   behavior DROP that matches following:
             #       * packet type: tcp
             #       * source IP: MN1_ip
             #       * destination PORT: 6633
             main.ONOS1.handle.sendline(
-                "sudo iptables -A INPUT -p tcp -s " + MN1_ip +
-                " --dport " + default_sw_port + " -j DROP" )
-            main.ONOS1.handle.expect( "\$" )
-            #   Append to OUTPUT rule,
+                    "sudo iptables -A INPUT -p tcp -s "+MN1_ip+
+                    " --dport "+default_sw_port+" -j DROP")
+            main.ONOS1.handle.expect("\$") 
+            #   Append to OUTPUT rule, 
             #   behavior DROP that matches following:
             #       * packet type: tcp
             #       * source IP: MN1_ip
             #       * destination PORT: 6633
             main.ONOS1.handle.sendline(
-                "sudo iptables -A OUTPUT -p tcp -s " + MN1_ip +
-                " --dport " + default_sw_port + " -j DROP" )
-            main.ONOS1.handle.expect( "\$" )
-            # Give time to allow rule to take effect
-            # NOTE: Sleep period may need to be configured
+                    "sudo iptables -A OUTPUT -p tcp -s "+MN1_ip+
+                    " --dport "+default_sw_port+" -j DROP")
+            main.ONOS1.handle.expect("\$")
+            #Give time to allow rule to take effect
+            #NOTE: Sleep period may need to be configured 
             #      based on the number of switches in the topology
-            main.log.info( "Please wait for switch connection to " +
-                           "time out" )
-            time.sleep( 60 )
+            main.log.info("Please wait for switch connection to "+
+                    "time out")
+            time.sleep(60)
+            
+            #Gather vendor OFP with tshark
+            main.ONOS1.tshark_grep("OFP 86 Vendor", 
+                    tshark_ofp_output)
+            main.ONOS1.tshark_grep("TCP 74 ",
+                    tshark_tcp_output)
 
-            # Gather vendor OFP with tshark
-            main.ONOS1.tshark_grep( "OFP 86 Vendor",
-                                    tshark_ofp_output )
-            main.ONOS1.tshark_grep( "TCP 74 ",
-                                    tshark_tcp_output )
-
-            # NOTE: Remove all iptables rule quickly ( flush )
-            #      Before removal, obtain TestON timestamp at which
+            #NOTE: Remove all iptables rule quickly (flush)
+            #      Before removal, obtain TestON timestamp at which 
             #      removal took place
-            #      ( ensuring nodes are configured via ptp )
+            #      (ensuring nodes are configured via ptp)
             #      sudo iptables -F
-
+            
             t0_system = time.time() * 1000
             main.ONOS1.handle.sendline(
-                "sudo iptables -F" )
+                    "sudo iptables -F")
 
-            # Counter to track loop count
+            #Counter to track loop count
             counter_loop = 0
             counter_avail1 = 0
             counter_avail2 = 0
@@ -1550,124 +1741,191 @@
             onos2_dev = False
             onos3_dev = False
             while counter_loop < 60:
-                # Continue to check devices for all device
-                # availability. When all devices in all 3
-                # ONOS instances indicate that devices are available
-                # obtain graph event timestamp for t1.
+                #Continue to check devices for all device 
+                #availability. When all devices in all 3
+                #ONOS instances indicate that devices are available
+                #obtain graph event timestamp for t1.
                 device_str_obj1 = main.ONOS1cli.devices()
                 device_str_obj2 = main.ONOS2cli.devices()
                 device_str_obj3 = main.ONOS3cli.devices()
 
-                device_json1 = json.loads( device_str_obj1 )
-                device_json2 = json.loads( device_str_obj2 )
-                device_json3 = json.loads( device_str_obj3 )
-
+                device_json1 = json.loads(device_str_obj1)                
+                device_json2 = json.loads(device_str_obj2)                
+                device_json3 = json.loads(device_str_obj3)           
+                
                 for device1 in device_json1:
-                    if device1[ 'available' ]:
+                    if device1['available'] == True:
                         counter_avail1 += 1
-                        if counter_avail1 == int( num_sw ):
+                        if counter_avail1 == int(num_sw):
                             onos1_dev = True
-                            main.log.info( "All devices have been " +
-                                           "discovered on ONOS1" )
+                            main.log.info("All devices have been "+
+                                    "discovered on ONOS1")
                     else:
                         counter_avail1 = 0
                 for device2 in device_json2:
-                    if device2[ 'available' ]:
+                    if device2['available'] == True:
                         counter_avail2 += 1
-                        if counter_avail2 == int( num_sw ):
+                        if counter_avail2 == int(num_sw):
                             onos2_dev = True
-                            main.log.info( "All devices have been " +
-                                           "discovered on ONOS2" )
+                            main.log.info("All devices have been "+
+                                    "discovered on ONOS2")
                     else:
                         counter_avail2 = 0
                 for device3 in device_json3:
-                    if device3[ 'available' ]:
+                    if device3['available'] == True:
                         counter_avail3 += 1
-                        if counter_avail3 == int( num_sw ):
+                        if counter_avail3 == int(num_sw):
                             onos3_dev = True
-                            main.log.info( "All devices have been " +
-                                           "discovered on ONOS3" )
+                            main.log.info("All devices have been "+
+                                    "discovered on ONOS3")
                     else:
                         counter_avail3 = 0
 
                 if onos1_dev and onos2_dev and onos3_dev:
-                    main.log.info( "All devices have been discovered " +
-                                   "on all ONOS instances" )
+                    main.log.info("All devices have been discovered "+
+                            "on all ONOS instances")
                     json_str_topology_metrics_1 =\
                         main.ONOS1cli.topology_events_metrics()
                     json_str_topology_metrics_2 =\
                         main.ONOS2cli.topology_events_metrics()
                     json_str_topology_metrics_3 =\
                         main.ONOS3cli.topology_events_metrics()
-
-                    # Exit while loop if all devices discovered
-                    break
-
+                   
+                    #Exit while loop if all devices discovered
+                    break 
+                
                 counter_loop += 1
-                # Give some time in between CLI calls
-                #( will not affect measurement )
-                time.sleep( 3 )
+                #Give some time in between CLI calls
+                #(will not affect measurement)
+                time.sleep(3)
 
             main.ONOS1.tshark_stop()
+            
+            os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
+                    tshark_ofp_output+" /tmp/") 
+            os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+
+                    tshark_tcp_output+" /tmp/")
 
-            os.system( "scp " + ONOS_user + "@" + ONOS1_ip + ":" +
-                       tshark_ofp_output + " /tmp/" )
-            os.system( "scp " + ONOS_user + "@" + ONOS1_ip + ":" +
-                       tshark_tcp_output + " /tmp/" )
-
-            # TODO: Automate OFP output analysis
-            # Debug mode - print out packets captured at runtime
-            if debug_mode == 'on':
-                ofp_file = open( tshark_ofp_output, 'r' )
-                main.log.info( "Tshark OFP Vendor output: " )
+            #TODO: Automate OFP output analysis
+            #Debug mode - print out packets captured at runtime     
+            if debug_mode == 'on': 
+                ofp_file = open(tshark_ofp_output, 'r')
+                main.log.info("Tshark OFP Vendor output: ")
                 for line in ofp_file:
-                    tshark_ofp_result_list.append( line )
-                    main.log.info( line )
+                    tshark_ofp_result_list.append(line)
+                    main.log.info(line)
                 ofp_file.close()
 
-                tcp_file = open( tshark_tcp_output, 'r' )
-                main.log.info( "Tshark TCP 74 output: " )
+                tcp_file = open(tshark_tcp_output, 'r')
+                main.log.info("Tshark TCP 74 output: ")
                 for line in tcp_file:
-                    tshark_tcp_result_list.append( line )
-                    main.log.info( line )
+                    tshark_tcp_result_list.append(line)
+                    main.log.info(line)
                 tcp_file.close()
 
-            json_obj_1 = json.loads( json_str_topology_metrics_1 )
-            json_obj_2 = json.loads( json_str_topology_metrics_2 )
-            json_obj_3 = json.loads( json_str_topology_metrics_3 )
+            json_obj_1 = json.loads(json_str_topology_metrics_1)
+            json_obj_2 = json.loads(json_str_topology_metrics_2)
+            json_obj_3 = json.loads(json_str_topology_metrics_3)
 
             graph_timestamp_1 = \
-                json_obj_1[ graphTimestamp ][ 'value' ]
+                    json_obj_1[graphTimestamp]['value']
             graph_timestamp_2 = \
-                json_obj_2[ graphTimestamp ][ 'value' ]
+                    json_obj_2[graphTimestamp]['value']
             graph_timestamp_3 = \
-                json_obj_3[ graphTimestamp ][ 'value' ]
+                    json_obj_3[graphTimestamp]['value']
 
-            graph_lat_1 = int( graph_timestamp_1 ) - int( t0_system )
-            graph_lat_2 = int( graph_timestamp_2 ) - int( t0_system )
-            graph_lat_3 = int( graph_timestamp_3 ) - int( t0_system )
+            graph_lat_1 = int(graph_timestamp_1) - int(t0_system)
+            graph_lat_2 = int(graph_timestamp_2) - int(t0_system)
+            graph_lat_3 = int(graph_timestamp_3) - int(t0_system)
 
             avg_graph_lat = \
-                ( int( graph_lat_1 ) +
-                  int( graph_lat_2 ) +
-                  int( graph_lat_3 ) ) / 3
-
+                    (int(graph_lat_1) +\
+                     int(graph_lat_2) +\
+                     int(graph_lat_3)) / 3
+    
             if avg_graph_lat > sw_disc_threshold_min \
                     and avg_graph_lat < sw_disc_threshold_max:
                 sw_discovery_lat_list.append(
-                    avg_graph_lat )
+                        avg_graph_lat)
             else:
-                main.log.info( "100 Switch discovery latency " +
-                               "exceeded the threshold." )
+                main.log.info("100 Switch discovery latency "+
+                        "exceeded the threshold.")
+            
+            #END ITERATION FOR LOOP
 
-            # END ITERATION FOR LOOP
+        sw_lat_min = min(sw_discovery_lat_list)
+        sw_lat_max = max(sw_discovery_lat_list)
+        sw_lat_avg = sum(sw_discovery_lat_list) /\
+                     len(sw_discovery_lat_list)
 
-        sw_lat_min = min( sw_discovery_lat_list )
-        sw_lat_max = max( sw_discovery_lat_list )
-        sw_lat_avg = sum( sw_discovery_lat_list ) /\
-            len( sw_discovery_lat_list )
+        main.log.report("100 Switch discovery lat "+\
+                "Min: "+str(sw_lat_min)+" ms"+\
+                "Max: "+str(sw_lat_max)+" ms"+\
+                "Avg: "+str(sw_lat_avg)+" ms")
 
-        main.log.report( "100 Switch discovery lat " +
-                         "Min: " + str( sw_lat_min ) + " ms" +
-                         "Max: " + str( sw_lat_max ) + " ms" +
-                         "Avg: " + str( sw_lat_avg ) + " ms" )
+    def CASE6(self, main):
+        '''
+        Increase number of nodes and initiate CLI
+        '''
+        import time
+        
+        ONOS1_ip = main.params['CTRL']['ip1']
+        ONOS2_ip = main.params['CTRL']['ip2']
+        ONOS3_ip = main.params['CTRL']['ip3']
+        ONOS4_ip = main.params['CTRL']['ip4']
+        ONOS5_ip = main.params['CTRL']['ip5']
+        ONOS6_ip = main.params['CTRL']['ip6']
+        ONOS7_ip = main.params['CTRL']['ip7']
+
+        cell_name = main.params['ENV']['cellName']
+    
+        global cluster_count
+        
+        #Cluster size increased everytime the case is defined
+        cluster_count += 2 
+
+        main.log.report("Increasing cluster size to "+
+                str(cluster_count))
+
+        install_result = main.FALSE
+        if cluster_count == 3:
+            main.log.info("Installing nodes 2 and 3")
+            node2_result = \
+                main.ONOSbench.onos_install(node=ONOS2_ip)
+            node3_result = \
+                main.ONOSbench.onos_install(node=ONOS3_ip)
+            install_result = node2_result and node3_result
+            
+            time.sleep(5)
+
+            main.ONOS2cli.start_onos_cli(ONOS2_ip)
+            main.ONOS3cli.start_onos_cli(ONOS3_ip)
+
+        elif cluster_count == 5:
+            main.log.info("Installing nodes 4 and 5")
+            node4_result = \
+                main.ONOSbench.onos_install(node=ONOS4_ip)
+            node5_result = \
+                main.ONOSbench.onos_install(node=ONOS5_ip)
+            install_result = node4_result and node5_result
+
+            time.sleep(5)
+
+            main.ONOS4cli.start_onos_cli(ONOS4_ip)
+            main.ONOS5cli.start_onos_cli(ONOS5_ip)
+
+        elif cluster_count == 7:
+            main.log.info("Installing nodes 4 and 5")
+            node6_result = \
+                main.ONOSbench.onos_install(node=ONOS6_ip)
+            node7_result = \
+                main.ONOSbench.onos_install(node=ONOS7_ip)
+            install_result = node6_result and node7_result
+
+            time.sleep(5)
+
+            main.ONOS6cli.start_onos_cli(ONOS6_ip)
+            main.ONOS7cli.start_onos_cli(ONOS7_ip)
+
+
+
diff --git a/TestON/tests/TopoPerfNext/TopoPerfNext.topo b/TestON/tests/TopoPerfNext/TopoPerfNext.topo
index 4ee44e2..fc70784 100644
--- a/TestON/tests/TopoPerfNext/TopoPerfNext.topo
+++ b/TestON/tests/TopoPerfNext/TopoPerfNext.topo
@@ -36,6 +36,42 @@
             <connect_order>2</connect_order>
             <COMPONENTS> </COMPONENTS>
         </ONOS3cli>
+        
+        <ONOS4cli>
+            <host>10.128.174.10</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosCliDriver</type>
+            <connect_order>2</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS4cli>
+        
+        <ONOS5cli>
+            <host>10.128.174.10</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosCliDriver</type>
+            <connect_order>2</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS5cli>
+        
+        <ONOS6cli>
+            <host>10.128.174.10</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosCliDriver</type>
+            <connect_order>2</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS6cli>
+        
+        <ONOS7cli>
+            <host>10.128.174.10</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosCliDriver</type>
+            <connect_order>2</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS7cli>
 
         <ONOS1>
             <host>10.128.174.1</host>
@@ -63,6 +99,42 @@
             <connect_order>3</connect_order>
             <COMPONENTS> </COMPONENTS>
         </ONOS3>
+        
+        <ONOS4>
+            <host>10.128.174.4</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosDriver</type>
+            <connect_order>3</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS4>
+        
+        <ONOS5>
+            <host>10.128.174.5</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosDriver</type>
+            <connect_order>3</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS5>
+        
+        <ONOS6>
+            <host>10.128.174.6</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosDriver</type>
+            <connect_order>3</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS6>
+        
+        <ONOS7>
+            <host>10.128.174.7</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosDriver</type>
+            <connect_order>3</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS7>
 
         <Mininet1>
             <host>10.128.10.90</host>