Run codestyle script on HA test suite

Change-Id: Ib59ce54b5f5596a1d33845f05b86a117d42a4d32
diff --git a/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py b/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py
index 134ac76..a7ecb53 100644
--- a/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py
+++ b/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py
@@ -22,8 +22,6 @@
 CASE16: Install Distributed Primitives app
 CASE17: Check for basic functionality with distributed primitives
 """
-
-
 class HAclusterRestart:
 
     def __init__( self ):
@@ -147,7 +145,7 @@
                             filePath + topoName,
                             main.Mininet1.home,
                             direction="to" )
-        mnResult = main.Mininet1.startNet( )
+        mnResult = main.Mininet1.startNet()
         utilities.assert_equals( expect=main.TRUE, actual=mnResult,
                                  onpass="Mininet Started",
                                  onfail="Error starting Mininet" )
@@ -190,7 +188,7 @@
                   'seamless="seamless"></iframe>\n'
         graphs += ']]></ac:plain-text-body>\n'
         graphs += '</ac:structured-macro>\n'
-        main.log.wiki(graphs)
+        main.log.wiki( graphs )
 
         main.step( "Creating ONOS package" )
         packageResult = main.ONOSbench.buckBuild()
@@ -234,9 +232,9 @@
         cliResults = main.TRUE
         threads = []
         for i in range( main.numCtrls ):
-            t = main.Thread( target=main.CLIs[i].startOnosCli,
+            t = main.Thread( target=main.CLIs[ i ].startOnosCli,
                              name="startOnosCli-" + str( i ),
-                             args=[main.nodes[i].ip_address] )
+                             args=[ main.nodes[ i ].ip_address ] )
             threads.append( t )
             t.start()
 
@@ -261,7 +259,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        attempts=5 )
 
         utilities.assert_equals( expect=True, actual=nodeResults,
@@ -270,7 +268,7 @@
 
         if not nodeResults:
             for i in main.activeNodes:
-                cli = main.CLIs[i]
+                cli = main.CLIs[ i ]
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
                     cli.name,
                     cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
@@ -282,7 +280,7 @@
         # get data from the params
         apps = main.params.get( 'apps' )
         if apps:
-            apps = apps.split(',')
+            apps = apps.split( ',' )
             main.log.warn( apps )
             activateResult = True
             for app in apps:
@@ -309,8 +307,8 @@
             main.log.debug( config )
             checkResult = main.TRUE
             for component in config:
-                for setting in config[component]:
-                    value = config[component][setting]
+                for setting in config[ component ]:
+                    value = config[ component ][ setting ]
                     check = main.CLIs[ 0 ].setCfg( component, setting, value )
                     main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
                     checkResult = check and checkResult
@@ -325,7 +323,7 @@
         appCheck = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -335,9 +333,9 @@
             t.join()
             appCheck = appCheck and t.result
         if appCheck != main.TRUE:
-            node = main.activeNodes[0]
-            main.log.warn( main.CLIs[node].apps() )
-            main.log.warn( main.CLIs[node].appIDs() )
+            node = main.activeNodes[ 0 ]
+            main.log.warn( main.CLIs[ node ].apps() )
+            main.log.warn( main.CLIs[ node ].appIDs() )
         utilities.assert_equals( expect=main.TRUE, actual=appCheck,
                                  onpass="App Ids seem to be correct",
                                  onfail="Something is wrong with app Ids" )
@@ -422,9 +420,9 @@
         # Manually assign mastership to the controller we want
         roleCall = main.TRUE
 
-        ipList = [ ]
+        ipList = []
         deviceList = []
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         try:
             # Assign mastership to specific controllers. This assignment was
             # determined for a 7 node cluser, but will work with any sized
@@ -498,8 +496,8 @@
         #       atomic and is actually a multi step process
         time.sleep( 5 )
         for i in range( len( ipList ) ):
-            ip = ipList[i]
-            deviceId = deviceList[i]
+            ip = ipList[ i ]
+            deviceId = deviceList[ i ]
             # Check assignment
             master = onosCli.getRole( deviceId ).get( 'master' )
             if ip in master:
@@ -549,7 +547,7 @@
 
         # install onos-app-fwd
         main.step( "Install reactive forwarding app" )
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         installResults = onosCli.activateApp( "org.onosproject.fwd" )
         utilities.assert_equals( expect=main.TRUE, actual=installResults,
                                  onpass="Install fwd successful",
@@ -559,7 +557,7 @@
         appCheck = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -584,13 +582,13 @@
         pingResult = main.Mininet1.pingall()
         time2 = time.time()
         if not pingResult:
-            main.log.warn("First pingall failed. Trying again...")
+            main.log.warn( "First pingall failed. Trying again..." )
             pingResult = main.Mininet1.pingall()
             passMsg += " on the second try"
         utilities.assert_equals(
             expect=main.TRUE,
             actual=pingResult,
-            onpass= passMsg,
+            onpass=passMsg,
             onfail="Reactive Pingall failed, " +
                    "one or more ping pairs failed" )
         main.log.info( "Time for pingall: %2f seconds" %
@@ -599,8 +597,8 @@
         time.sleep( 11 )
         # uninstall onos-app-fwd
         main.step( "Uninstall reactive forwarding app" )
-        node = main.activeNodes[0]
-        uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
+        node = main.activeNodes[ 0 ]
+        uninstallResult = main.CLIs[ node ].deactivateApp( "org.onosproject.fwd" )
         utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
                                  onpass="Uninstall fwd successful",
                                  onfail="Uninstall fwd failed" )
@@ -609,7 +607,7 @@
         threads = []
         appCheck2 = main.TRUE
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -619,9 +617,9 @@
             t.join()
             appCheck2 = appCheck2 and t.result
         if appCheck2 != main.TRUE:
-            node = main.activeNodes[0]
-            main.log.warn( main.CLIs[node].apps() )
-            main.log.warn( main.CLIs[node].appIDs() )
+            node = main.activeNodes[ 0 ]
+            main.log.warn( main.CLIs[ node ].apps() )
+            main.log.warn( main.CLIs[ node ].appIDs() )
         utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
                                  onpass="App Ids seem to be correct",
                                  onfail="Something is wrong with app Ids" )
@@ -649,8 +647,8 @@
                 host2Id = host2Dict.get( 'id', None )
             if host1Id and host2Id:
                 nodeNum = ( i % len( main.activeNodes ) )
-                node = main.activeNodes[nodeNum]
-                tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
+                node = main.activeNodes[ nodeNum ]
+                tmpId = main.CLIs[ node ].addHostIntent( host1Id, host2Id )
                 if tmpId:
                     main.log.info( "Added intent with id: " + tmpId )
                     intentIds.append( tmpId )
@@ -660,8 +658,8 @@
             else:
                 main.log.error( "Error, getHost() failed for h" + str( i ) +
                                 " and/or h" + str( i + 10 ) )
-                node = main.activeNodes[0]
-                hosts = main.CLIs[node].hosts()
+                node = main.activeNodes[ 0 ]
+                hosts = main.CLIs[ node ].hosts()
                 main.log.warn( "Hosts output: " )
                 try:
                     main.log.warn( json.dumps( json.loads( hosts ),
@@ -729,7 +727,7 @@
                 for i in range( 14 ):
                     topics.append( "work-partition-" + str( i ) )
                 main.log.debug( topics )
-                ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                 for topic in topics:
                     if topic not in ONOStopics:
                         main.log.error( "Error: " + topic +
@@ -743,13 +741,13 @@
         # Check all nodes
         if missing:
             for i in main.activeNodes:
-                response = main.CLIs[i].leaders( jsonFormat=False)
-                main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
+                response = main.CLIs[ i ].leaders( jsonFormat=False )
+                main.log.warn( str( main.CLIs[ i ].name ) + " leaders output: \n" +
                                str( response ) )
 
         partitions = onosCli.partitions()
         try:
-            if partitions :
+            if partitions:
                 parsedPartitions = json.loads( partitions )
                 main.log.warn( json.dumps( parsedPartitions,
                                            sort_keys=True,
@@ -764,7 +762,7 @@
             main.log.error( repr( partitions ) )
         pendingMap = onosCli.pendingMap()
         try:
-            if pendingMap :
+            if pendingMap:
                 parsedPending = json.loads( pendingMap )
                 main.log.warn( json.dumps( parsedPending,
                                            sort_keys=True,
@@ -783,21 +781,21 @@
             main.log.error( "Error in pushing host intents to ONOS" )
 
         main.step( "Intent Anti-Entropy dispersion" )
-        for j in range(100):
+        for j in range( 100 ):
             correct = True
             main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
             for i in main.activeNodes:
                 onosIds = []
-                ids = main.CLIs[i].getAllIntentsId()
+                ids = main.CLIs[ i ].getAllIntentsId()
                 onosIds.append( ids )
-                main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
+                main.log.debug( "Intents in " + main.CLIs[ i ].name + ": " +
                                 str( sorted( onosIds ) ) )
                 if sorted( ids ) != sorted( intentIds ):
                     main.log.warn( "Set of intent IDs doesn't match" )
                     correct = False
                     break
                 else:
-                    intents = json.loads( main.CLIs[i].intents() )
+                    intents = json.loads( main.CLIs[ i ].intents() )
                     for intent in intents:
                         if intent[ 'state' ] != "INSTALLED":
                             main.log.warn( "Intent " + intent[ 'id' ] +
@@ -807,7 +805,7 @@
             if correct:
                 break
             else:
-                time.sleep(1)
+                time.sleep( 1 )
         if not intentStop:
             intentStop = time.time()
         global gossipTime
@@ -825,7 +823,7 @@
                 append = True
             else:
                 count += 1
-        gossipPeriod = int( main.params['timers']['gossip'] )
+        gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
         maxGossipTime = gossipPeriod * len( main.activeNodes )
         utilities.assert_greater_equals(
                 expect=maxGossipTime, actual=gossipTime,
@@ -887,7 +885,7 @@
                     # FIXME: this should only be after we start the app
                     topics.append( "org.onosproject.election" )
                     main.log.debug( topics )
-                    ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                    ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                     for topic in topics:
                         if topic not in ONOStopics:
                             main.log.error( "Error: " + topic +
@@ -901,14 +899,14 @@
             # Check all nodes
             if missing:
                 for i in main.activeNodes:
-                    node = main.CLIs[i]
-                    response = node.leaders( jsonFormat=False)
+                    node = main.CLIs[ i ]
+                    response = node.leaders( jsonFormat=False )
                     main.log.warn( str( node.name ) + " leaders output: \n" +
                                    str( response ) )
 
             partitions = onosCli.partitions()
             try:
-                if partitions :
+                if partitions:
                     parsedPartitions = json.loads( partitions )
                     main.log.warn( json.dumps( parsedPartitions,
                                                sort_keys=True,
@@ -923,7 +921,7 @@
                 main.log.error( repr( partitions ) )
             pendingMap = onosCli.pendingMap()
             try:
-                if pendingMap :
+                if pendingMap:
                     parsedPending = json.loads( pendingMap )
                     main.log.warn( json.dumps( parsedPending,
                                                sort_keys=True,
@@ -952,7 +950,7 @@
                                 "functionality and check the state of " +\
                                 "the intent"
 
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         main.step( "Check Intent state" )
         installedCheck = False
         loopCount = 0
@@ -1038,7 +1036,7 @@
                 # FIXME: topics.append( "org.onosproject.election" )
                 # Print leaders output
                 main.log.debug( topics )
-                ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                 for topic in topics:
                     if topic not in ONOStopics:
                         main.log.error( "Error: " + topic +
@@ -1055,8 +1053,8 @@
         # Check all nodes
         if topicCheck:
             for i in main.activeNodes:
-                node = main.CLIs[i]
-                response = node.leaders( jsonFormat=False)
+                node = main.CLIs[ i ]
+                response = node.leaders( jsonFormat=False )
                 main.log.warn( str( node.name ) + " leaders output: \n" +
                                str( response ) )
 
@@ -1066,7 +1064,7 @@
         # Print partitions
         partitions = onosCli.partitions()
         try:
-            if partitions :
+            if partitions:
                 parsedPartitions = json.loads( partitions )
                 main.log.warn( json.dumps( parsedPartitions,
                                            sort_keys=True,
@@ -1082,7 +1080,7 @@
         # Print Pending Map
         pendingMap = onosCli.pendingMap()
         try:
-            if pendingMap :
+            if pendingMap:
                 parsedPending = json.loads( pendingMap )
                 main.log.warn( json.dumps( parsedPending,
                                            sort_keys=True,
@@ -1136,7 +1134,7 @@
                     # FIXME: this should only be after we start the app
                     topics.append( "org.onosproject.election" )
                     main.log.debug( topics )
-                    ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                    ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                     for topic in topics:
                         if topic not in ONOStopics:
                             main.log.error( "Error: " + topic +
@@ -1149,14 +1147,14 @@
                 main.log.error( repr( leaders ) )
             if missing:
                 for i in main.activeNodes:
-                    node = main.CLIs[i]
-                    response = node.leaders( jsonFormat=False)
+                    node = main.CLIs[ i ]
+                    response = node.leaders( jsonFormat=False )
                     main.log.warn( str( node.name ) + " leaders output: \n" +
                                    str( response ) )
 
             partitions = onosCli.partitions()
             try:
-                if partitions :
+                if partitions:
                     parsedPartitions = json.loads( partitions )
                     main.log.warn( json.dumps( parsedPartitions,
                                                sort_keys=True,
@@ -1171,7 +1169,7 @@
                 main.log.error( repr( partitions ) )
             pendingMap = onosCli.pendingMap()
             try:
-                if pendingMap :
+                if pendingMap:
                     parsedPending = json.loads( pendingMap )
                     main.log.warn( json.dumps( parsedPending,
                                                sort_keys=True,
@@ -1184,8 +1182,8 @@
                 main.log.exception( "Error parsing pending map" )
                 main.log.error( repr( pendingMap ) )
         # Print flowrules
-        node = main.activeNodes[0]
-        main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
+        node = main.activeNodes[ 0 ]
+        main.log.debug( main.CLIs[ node ].flows( jsonFormat=False ) )
         main.step( "Wait a minute then ping again" )
         # the wait is above
         PingResult = main.TRUE
@@ -1243,7 +1241,7 @@
         rolesNotNull = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].rolesNotNull,
+            t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
                              name="rolesNotNull-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -1265,7 +1263,7 @@
         rolesResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].roles,
+            t = main.Thread( target=main.CLIs[ i ].roles,
                              name="roles-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -1276,11 +1274,11 @@
             ONOSMastership.append( t.result )
 
         for i in range( len( ONOSMastership ) ):
-            node = str( main.activeNodes[i] + 1 )
-            if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
+            node = str( main.activeNodes[ i ] + 1 )
+            if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " roles" )
                 main.log.warn( "ONOS" + node + " mastership response: " +
-                               repr( ONOSMastership[i] ) )
+                               repr( ONOSMastership[ i ] ) )
                 rolesResults = False
         utilities.assert_equals(
             expect=True,
@@ -1289,7 +1287,7 @@
             onfail="Error in reading roles from ONOS" )
 
         main.step( "Check for consistency in roles from each controller" )
-        if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
+        if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
             main.log.info(
                 "Switch roles are consistent across all ONOS nodes" )
         else:
@@ -1302,7 +1300,7 @@
 
         if rolesResults and not consistentMastership:
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 try:
                     main.log.warn(
                         "ONOS" + node + " roles: ",
@@ -1326,7 +1324,7 @@
         intentsResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].intents,
+            t = main.Thread( target=main.CLIs[ i ].intents,
                              name="intents-" + str( i ),
                              args=[],
                              kwargs={ 'jsonFormat': True } )
@@ -1338,7 +1336,7 @@
             ONOSIntents.append( t.result )
 
         for i in range( len( ONOSIntents ) ):
-            node = str( main.activeNodes[i] + 1 )
+            node = str( main.activeNodes[ i ] + 1 )
             if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " intents" )
                 main.log.warn( "ONOS" + node + " intents response: " +
@@ -1351,7 +1349,7 @@
             onfail="Error in reading intents from ONOS" )
 
         main.step( "Check for consistency in Intents from each controller" )
-        if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
+        if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
             main.log.info( "Intents are consistent across all ONOS " +
                              "nodes" )
         else:
@@ -1399,17 +1397,17 @@
 
         if intentsResults and not consistentIntents:
             # print the json objects
-            n = str( main.activeNodes[-1] + 1 )
+            n = str( main.activeNodes[ -1 ] + 1 )
             main.log.debug( "ONOS" + n + " intents: " )
             main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
                                         sort_keys=True,
                                         indent=4,
                                         separators=( ',', ': ' ) ) )
             for i in range( len( ONOSIntents ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
                     main.log.debug( "ONOS" + node + " intents: " )
-                    main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
+                    main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
                                                 sort_keys=True,
                                                 indent=4,
                                                 separators=( ',', ': ' ) ) )
@@ -1430,7 +1428,7 @@
         flowsResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].flows,
+            t = main.Thread( target=main.CLIs[ i ].flows,
                              name="flows-" + str( i ),
                              args=[],
                              kwargs={ 'jsonFormat': True } )
@@ -1438,14 +1436,14 @@
             t.start()
 
         # NOTE: Flows command can take some time to run
-        time.sleep(30)
+        time.sleep( 30 )
         for t in threads:
             t.join()
             result = t.result
             ONOSFlows.append( result )
 
         for i in range( len( ONOSFlows ) ):
-            num = str( main.activeNodes[i] + 1 )
+            num = str( main.activeNodes[ i ] + 1 )
             if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
                 main.log.error( "Error in getting ONOS" + num + " flows" )
                 main.log.warn( "ONOS" + num + " flows response: " +
@@ -1482,11 +1480,11 @@
 
         if flowsResults and not consistentFlows:
             for i in range( len( ONOSFlows ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 try:
                     main.log.warn(
                         "ONOS" + node + " flows: " +
-                        json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
+                        json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
                                     indent=4, separators=( ',', ': ' ) ) )
                 except ( ValueError, TypeError ):
                     main.log.warn( "ONOS" + node + " flows: " +
@@ -1551,9 +1549,9 @@
         devices = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].devices,
+            t = main.Thread( target=main.CLIs[ i ].devices,
                              name="devices-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1563,9 +1561,9 @@
         hosts = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].hosts,
+            t = main.Thread( target=main.CLIs[ i ].hosts,
                              name="hosts-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1583,9 +1581,9 @@
         ports = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].ports,
+            t = main.Thread( target=main.CLIs[ i ].ports,
                              name="ports-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1595,9 +1593,9 @@
         links = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].links,
+            t = main.Thread( target=main.CLIs[ i ].links,
                              name="links-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1607,9 +1605,9 @@
         clusters = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].clusters,
+            t = main.Thread( target=main.CLIs[ i ].clusters,
                              name="clusters-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1622,7 +1620,7 @@
         main.step( "Host view is consistent across ONOS nodes" )
         consistentHostsResult = main.TRUE
         for controller in range( len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ] and "Error" not in hosts[ controller ]:
                 if hosts[ controller ] == hosts[ 0 ]:
                     continue
@@ -1649,10 +1647,10 @@
         main.step( "Each host has an IP address" )
         ipResult = main.TRUE
         for controller in range( 0, len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ]:
                 for host in hosts[ controller ]:
-                    if not host.get( 'ipAddresses', [ ] ):
+                    if not host.get( 'ipAddresses', [] ):
                         main.log.error( "Error with host ips on controller" +
                                         controllerStr + ": " + str( host ) )
                         ipResult = main.FALSE
@@ -1666,7 +1664,7 @@
         main.step( "Cluster view is consistent across ONOS nodes" )
         consistentClustersResult = main.TRUE
         for controller in range( len( clusters ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if "Error" not in clusters[ controller ]:
                 if clusters[ controller ] == clusters[ 0 ]:
                     continue
@@ -1715,14 +1713,14 @@
         mnLinks = main.Mininet1.getLinks()
         mnHosts = main.Mininet1.getHosts()
         for controller in main.activeNodes:
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if devices[ controller ] and ports[ controller ] and\
-                "Error" not in devices[ controller ] and\
-                "Error" not in ports[ controller ]:
-                    currentDevicesResult = main.Mininet1.compareSwitches(
-                            mnSwitches,
-                            json.loads( devices[ controller ] ),
-                            json.loads( ports[ controller ] ) )
+                    "Error" not in devices[ controller ] and\
+                    "Error" not in ports[ controller ]:
+                currentDevicesResult = main.Mininet1.compareSwitches(
+                        mnSwitches,
+                        json.loads( devices[ controller ] ),
+                        json.loads( ports[ controller ] ) )
             else:
                 currentDevicesResult = main.FALSE
             utilities.assert_equals( expect=main.TRUE,
@@ -1846,9 +1844,9 @@
         cliResults = main.TRUE
         threads = []
         for i in range( main.numCtrls ):
-            t = main.Thread( target=main.CLIs[i].startOnosCli,
+            t = main.Thread( target=main.CLIs[ i ].startOnosCli,
                              name="startOnosCli-" + str( i ),
-                             args=[main.nodes[i].ip_address] )
+                             args=[ main.nodes[ i ].ip_address ] )
             threads.append( t )
             t.start()
 
@@ -1862,7 +1860,7 @@
         for i in range( 10 ):
             ready = True
             for i in main.activeNodes:
-                cli = main.CLIs[i]
+                cli = main.CLIs[ i ]
                 output = cli.summary()
                 if not output:
                     ready = False
@@ -1886,7 +1884,7 @@
         # Rerun for election on restarted nodes
         runResults = main.TRUE
         for i in main.activeNodes:
-            cli = main.CLIs[i]
+            cli = main.CLIs[ i ]
             run = cli.electionTestRun()
             if run != main.TRUE:
                 main.log.error( "Error running for election on " + cli.name )
@@ -1897,10 +1895,10 @@
 
         # TODO: Make this configurable
         time.sleep( 60 )
-        node = main.activeNodes[0]
-        main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
-        main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
-        main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
+        node = main.activeNodes[ 0 ]
+        main.log.debug( main.CLIs[ node ].nodes( jsonFormat=False ) )
+        main.log.debug( main.CLIs[ node ].leaders( jsonFormat=False ) )
+        main.log.debug( main.CLIs[ node ].partitions( jsonFormat=False ) )
 
     def CASE7( self, main ):
         """
@@ -1919,9 +1917,9 @@
         rolesNotNull = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].rolesNotNull,
+            t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
                              name="rolesNotNull-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1941,7 +1939,7 @@
         rolesResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].roles,
+            t = main.Thread( target=main.CLIs[ i ].roles,
                              name="roles-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -1952,11 +1950,11 @@
             ONOSMastership.append( t.result )
 
         for i in range( len( ONOSMastership ) ):
-            node = str( main.activeNodes[i] + 1 )
-            if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
+            node = str( main.activeNodes[ i ] + 1 )
+            if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " roles" )
                 main.log.warn( "ONOS" + node + " mastership response: " +
-                               repr( ONOSMastership[i] ) )
+                               repr( ONOSMastership[ i ] ) )
                 rolesResults = False
         utilities.assert_equals(
             expect=True,
@@ -1965,7 +1963,7 @@
             onfail="Error in reading roles from ONOS" )
 
         main.step( "Check for consistency in roles from each controller" )
-        if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
+        if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
             main.log.info(
                 "Switch roles are consistent across all ONOS nodes" )
         else:
@@ -1978,7 +1976,7 @@
 
         if rolesResults and not consistentMastership:
             for i in range( len( ONOSMastership ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 main.log.warn( "ONOS" + node + " roles: ",
                                json.dumps( json.loads( ONOSMastership[ i ] ),
                                            sort_keys=True,
@@ -1996,7 +1994,7 @@
         intentsResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].intents,
+            t = main.Thread( target=main.CLIs[ i ].intents,
                              name="intents-" + str( i ),
                              args=[],
                              kwargs={ 'jsonFormat': True } )
@@ -2007,8 +2005,8 @@
             t.join()
             ONOSIntents.append( t.result )
 
-        for i in range( len( ONOSIntents) ):
-            node = str( main.activeNodes[i] + 1 )
+        for i in range( len( ONOSIntents ) ):
+            node = str( main.activeNodes[ i ] + 1 )
             if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " intents" )
                 main.log.warn( "ONOS" + node + " intents response: " +
@@ -2021,7 +2019,7 @@
             onfail="Error in reading intents from ONOS" )
 
         main.step( "Check for consistency in Intents from each controller" )
-        if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
+        if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
             main.log.info( "Intents are consistent across all ONOS " +
                              "nodes" )
         else:
@@ -2070,12 +2068,12 @@
                 main.log.exception( "Error in parsing intents" )
                 main.log.error( repr( node ) )
             intentStates.append( nodeStates )
-            out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
+            out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
             main.log.info( dict( out ) )
 
         if intentsResults and not consistentIntents:
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 main.log.warn( "ONOS" + node + " intents: " )
                 main.log.warn( json.dumps(
                     json.loads( ONOSIntents[ i ] ),
@@ -2115,7 +2113,7 @@
                             main.log.debug( json.dumps( intent ) )
                 except ( ValueError, TypeError ):
                     main.log.exception( "Exception printing intents" )
-                    main.log.debug( repr( ONOSIntents[0] ) )
+                    main.log.debug( repr( ONOSIntents[ 0 ] ) )
                     main.log.debug( repr( intentState ) )
             if sameIntents == main.FALSE:
                 try:
@@ -2129,7 +2127,7 @@
                                                 separators=( ',', ': ' ) ) )
                 except ( ValueError, TypeError ):
                     main.log.exception( "Exception printing intents" )
-                    main.log.debug( repr( ONOSIntents[0] ) )
+                    main.log.debug( repr( ONOSIntents[ 0 ] ) )
                     main.log.debug( repr( intentState ) )
             utilities.assert_equals(
                 expect=main.TRUE,
@@ -2144,7 +2142,7 @@
         for i in range( 28 ):
             main.log.info( "Checking flow table on s" + str( i + 1 ) )
             tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
-            curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
+            curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
             FlowTables = FlowTables and curSwitch
             if curSwitch == main.FALSE:
                 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
@@ -2155,7 +2153,7 @@
             onfail="Changes were found in the flow tables" )
 
         main.Mininet2.pingLongKill()
-        '''
+        """
         # main.step( "Check the continuous pings to ensure that no packets " +
         #            "were dropped during component failure" )
         main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
@@ -2185,15 +2183,14 @@
         # NOTE: Since intents are not persisted with IntnentStore,
         #       we expect loss in dataplane connectivity
         LossInPings = main.FALSE
-        '''
-
+        """
         main.step( "Leadership Election is still functional" )
         # Test of LeadershipElection
         leaderList = []
         leaderResult = main.TRUE
 
         for i in main.activeNodes:
-            cli = main.CLIs[i]
+            cli = main.CLIs[ i ]
             leaderN = cli.electionTestLeader()
             leaderList.append( leaderN )
             if leaderN == main.FALSE:
@@ -2251,8 +2248,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="devices-" + str( i ),
-                                 args=[ main.CLIs[i].devices, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].devices, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2266,8 +2263,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="hosts-" + str( i ),
-                                 args=[ main.CLIs[i].hosts, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].hosts, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2281,7 +2278,7 @@
                     main.log.error( repr( t.result ) )
                     hosts.append( None )
             for controller in range( 0, len( hosts ) ):
-                controllerStr = str( main.activeNodes[controller] + 1 )
+                controllerStr = str( main.activeNodes[ controller ] + 1 )
                 if hosts[ controller ]:
                     for host in hosts[ controller ]:
                         if host is None or host.get( 'ipAddresses', [] ) == []:
@@ -2294,8 +2291,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="ports-" + str( i ),
-                                 args=[ main.CLIs[i].ports, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].ports, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2308,8 +2305,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="links-" + str( i ),
-                                 args=[ main.CLIs[i].links, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].links, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2322,8 +2319,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="clusters-" + str( i ),
-                                 args=[ main.CLIs[i].clusters, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].clusters, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2342,18 +2339,18 @@
                all( e is None for e in ports ) and\
                all( e is None for e in links ) and\
                all( e is None for e in clusters ):
-                   topoFailMsg = "Could not get topology from ONOS"
-                   main.log.error( topoFailMsg )
-                   continue  # Try again, No use trying to compare
+                topoFailMsg = "Could not get topology from ONOS"
+                main.log.error( topoFailMsg )
+                continue  # Try again, No use trying to compare
 
             mnSwitches = main.Mininet1.getSwitches()
             mnLinks = main.Mininet1.getLinks()
             mnHosts = main.Mininet1.getHosts()
             for controller in range( len( main.activeNodes ) ):
-                controllerStr = str( main.activeNodes[controller] + 1 )
+                controllerStr = str( main.activeNodes[ controller ] + 1 )
                 if devices[ controller ] and ports[ controller ] and\
-                    "Error" not in devices[ controller ] and\
-                    "Error" not in ports[ controller ]:
+                        "Error" not in devices[ controller ] and\
+                        "Error" not in ports[ controller ]:
 
                     try:
                         currentDevicesResult = main.Mininet1.compareSwitches(
@@ -2406,29 +2403,29 @@
                 mappings = {}
                 for i in range( 1, 29 ):  # hosts 1 through 28
                     # set up correct variables:
-                    macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
+                    macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
                     if i == 1:
-                        deviceId = "1000".zfill(16)
+                        deviceId = "1000".zfill( 16 )
                     elif i == 2:
-                        deviceId = "2000".zfill(16)
+                        deviceId = "2000".zfill( 16 )
                     elif i == 3:
-                        deviceId = "3000".zfill(16)
+                        deviceId = "3000".zfill( 16 )
                     elif i == 4:
-                        deviceId = "3004".zfill(16)
+                        deviceId = "3004".zfill( 16 )
                     elif i == 5:
-                        deviceId = "5000".zfill(16)
+                        deviceId = "5000".zfill( 16 )
                     elif i == 6:
-                        deviceId = "6000".zfill(16)
+                        deviceId = "6000".zfill( 16 )
                     elif i == 7:
-                        deviceId = "6007".zfill(16)
+                        deviceId = "6007".zfill( 16 )
                     elif i >= 8 and i <= 17:
                         dpid = '3' + str( i ).zfill( 3 )
-                        deviceId = dpid.zfill(16)
+                        deviceId = dpid.zfill( 16 )
                     elif i >= 18 and i <= 27:
                         dpid = '6' + str( i ).zfill( 3 )
-                        deviceId = dpid.zfill(16)
+                        deviceId = dpid.zfill( 16 )
                     elif i == 28:
-                        deviceId = "2800".zfill(16)
+                        deviceId = "2800".zfill( 16 )
                     mappings[ macId ] = deviceId
                 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
                     if hosts[ controller ] == []:
@@ -2448,7 +2445,7 @@
                                 assert location, "location field could not be found for this host object"
 
                                 # Trim the protocol identifier off deviceId
-                                device = str( location.get( 'elementId' ) ).split(':')[1]
+                                device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
                                 assert device, "elementId field could not be found for this host location object"
 
                                 port = location.get( 'port' )
@@ -2459,7 +2456,7 @@
                                     if str( port ) != "1":
                                         main.log.error( "The attachment port is incorrect for " +
                                                         "host " + str( mac ) +
-                                                        ". Expected: 1 Actual: " + str( port) )
+                                                        ". Expected: 1 Actual: " + str( port ) )
                                         hostAttachment = False
                                     if device != mappings[ str( mac ) ]:
                                         main.log.error( "The attachment device is incorrect for " +
@@ -2503,7 +2500,7 @@
         main.step( "Hosts view is consistent across all ONOS nodes" )
         consistentHostsResult = main.TRUE
         for controller in range( len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
                 if hosts[ controller ] == hosts[ 0 ]:
                     continue
@@ -2545,7 +2542,7 @@
         main.step( "Clusters view is consistent across all ONOS nodes" )
         consistentClustersResult = main.TRUE
         for controller in range( len( clusters ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if "Error" not in clusters[ controller ]:
                 if clusters[ controller ] == clusters[ 0 ]:
                     continue
@@ -2575,7 +2572,7 @@
             numClusters = len( json.loads( clusters[ 0 ] ) )
         except ( ValueError, TypeError ):
             main.log.exception( "Error parsing clusters[0]: " +
-                                repr( clusters[0] ) )
+                                repr( clusters[ 0 ] ) )
             numClusters = "ERROR"
         clusterResults = main.FALSE
         if numClusters == 1:
@@ -2625,7 +2622,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        attempts=5 )
 
         utilities.assert_equals( expect=True, actual=nodeResults,
@@ -2634,8 +2631,8 @@
         if not nodeResults:
             for i in main.activeNodes:
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
-                    main.CLIs[i].name,
-                    main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
+                    main.CLIs[ i ].name,
+                    main.CLIs[ i ].sendline( "scr:list | grep -v ACTIVE" ) ) )
 
         if not topoResult:
             main.cleanup()
@@ -2712,7 +2709,7 @@
         switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
 
         description = "Killing a switch to ensure it is discovered correctly"
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         main.case( description )
         switch = main.params[ 'kill' ][ 'switch' ]
         switchDPID = main.params[ 'kill' ][ 'dpid' ]
@@ -2757,7 +2754,7 @@
         switch = main.params[ 'kill' ][ 'switch' ]
         switchDPID = main.params[ 'kill' ][ 'dpid' ]
         links = main.params[ 'kill' ][ 'links' ].split()
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         description = "Adding a switch to ensure it is discovered correctly"
         main.case( description )
 
@@ -2844,12 +2841,12 @@
             main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
 
         try:
-            timerLog = open( main.logdir + "/Timers.csv", 'w')
+            timerLog = open( main.logdir + "/Timers.csv", 'w' )
             main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
             timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
             timerLog.close()
-        except NameError, e:
-            main.log.exception(e)
+        except NameError as e:
+            main.log.exception( e )
 
     def CASE14( self, main ):
         """
@@ -2861,9 +2858,9 @@
         assert main.CLIs, "main.CLIs not defined"
         assert main.nodes, "main.nodes not defined"
 
-        main.case("Start Leadership Election app")
+        main.case( "Start Leadership Election app" )
         main.step( "Install leadership election app" )
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         appResult = onosCli.activateApp( "org.onosproject.election" )
         utilities.assert_equals(
             expect=main.TRUE,
@@ -2873,9 +2870,9 @@
 
         main.step( "Run for election on each node" )
         for i in main.activeNodes:
-            main.CLIs[i].electionTestRun()
-        time.sleep(5)
-        activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+            main.CLIs[ i ].electionTestRun()
+        time.sleep( 5 )
+        activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
         sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
         utilities.assert_equals(
             expect=True,
@@ -2885,7 +2882,7 @@
 
         if sameResult:
             leader = leaders[ 0 ][ 0 ]
-            if main.nodes[main.activeNodes[0]].ip_address in leader:
+            if main.nodes[ main.activeNodes[ 0 ] ].ip_address in leader:
                 correctLeader = True
             else:
                 correctLeader = False
@@ -2935,7 +2932,7 @@
         electionResult = main.TRUE
 
         for i in main.activeNodes:  # run test election on each node
-            if main.CLIs[i].electionTestRun() == main.FALSE:
+            if main.CLIs[ i ].electionTestRun() == main.FALSE:
                 electionResult = main.FALSE
         utilities.assert_equals(
             expect=main.TRUE,
@@ -2950,7 +2947,7 @@
 
         main.step( "Check that each node shows the same leader and candidates" )
         failMessage = "Nodes have different leaderboards"
-        activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+        activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
         sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
         if sameResult:
             oldLeader = oldLeaders[ 0 ][ 0 ]
@@ -3000,7 +2997,7 @@
         if newLeader == oldLeader:
             newLeaderResult = False
             main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
-                " as the current leader" )
+                            " as the current leader" )
         utilities.assert_equals(
             expect=True,
             actual=newLeaderResult,
@@ -3017,7 +3014,7 @@
             else:
                 main.log.info( "Expected no leader, got: " + str( newLeader ) )
                 correctCandidateResult = main.FALSE
-        elif len( oldLeaders[0] ) >= 3:
+        elif len( oldLeaders[ 0 ] ) >= 3:
             if newLeader == oldLeaders[ 0 ][ 2 ]:
                 # correct leader was elected
                 correctCandidateResult = main.TRUE
@@ -3057,10 +3054,10 @@
         positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
 
         # Check that the re-elected node is last on the candidate List
-        if not reRunLeaders[0]:
+        if not reRunLeaders[ 0 ]:
             positionResult = main.FALSE
         elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
-            main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
+            main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
                                                                                       str( reRunLeaders[ 0 ] ) ) )
             positionResult = main.FALSE
         utilities.assert_equals(
@@ -3084,15 +3081,15 @@
         # Variables for the distributed primitives tests
         main.pCounterName = "TestON-Partitions"
         main.pCounterValue = 0
-        main.onosSet = set([])
+        main.onosSet = set( [] )
         main.onosSetName = "TestON-set"
 
         description = "Install Primitives app"
         main.case( description )
         main.step( "Install Primitives app" )
         appName = "org.onosproject.distributedprimitives"
-        node = main.activeNodes[0]
-        appResults = main.CLIs[node].activateApp( appName )
+        node = main.activeNodes[ 0 ]
+        appResults = main.CLIs[ node ].activateApp( appName )
         utilities.assert_equals( expect=main.TRUE,
                                  actual=appResults,
                                  onpass="Primitives app activated",
diff --git a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
index 742e340..14d73b9 100644
--- a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
+++ b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
@@ -23,8 +23,6 @@
 CASE16: Install Distributed Primitives app
 CASE17: Check for basic functionality with distributed primitives
 """
-
-
 class HAfullNetPartition:
 
     def __init__( self ):
@@ -149,7 +147,7 @@
                             filePath + topoName,
                             main.Mininet1.home,
                             direction="to" )
-        mnResult = main.Mininet1.startNet( )
+        mnResult = main.Mininet1.startNet()
         utilities.assert_equals( expect=main.TRUE, actual=mnResult,
                                  onpass="Mininet Started",
                                  onfail="Error starting Mininet" )
@@ -192,7 +190,7 @@
                   'seamless="seamless"></iframe>\n'
         graphs += ']]></ac:plain-text-body>\n'
         graphs += '</ac:structured-macro>\n'
-        main.log.wiki(graphs)
+        main.log.wiki( graphs )
 
         main.step( "Creating ONOS package" )
         # copy gen-partions file to ONOS
@@ -259,9 +257,9 @@
         cliResults = main.TRUE
         threads = []
         for i in range( main.numCtrls ):
-            t = main.Thread( target=main.CLIs[i].startOnosCli,
+            t = main.Thread( target=main.CLIs[ i ].startOnosCli,
                              name="startOnosCli-" + str( i ),
-                             args=[main.nodes[i].ip_address] )
+                             args=[ main.nodes[ i ].ip_address ] )
             threads.append( t )
             t.start()
 
@@ -286,7 +284,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        attempts=5 )
 
         utilities.assert_equals( expect=True, actual=nodeResults,
@@ -295,7 +293,7 @@
 
         if not nodeResults:
             for i in main.activeNodes:
-                cli = main.CLIs[i]
+                cli = main.CLIs[ i ]
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
                     cli.name,
                     cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
@@ -307,7 +305,7 @@
         # get data from the params
         apps = main.params.get( 'apps' )
         if apps:
-            apps = apps.split(',')
+            apps = apps.split( ',' )
             main.log.warn( apps )
             activateResult = True
             for app in apps:
@@ -334,8 +332,8 @@
             main.log.debug( config )
             checkResult = main.TRUE
             for component in config:
-                for setting in config[component]:
-                    value = config[component][setting]
+                for setting in config[ component ]:
+                    value = config[ component ][ setting ]
                     check = main.CLIs[ 0 ].setCfg( component, setting, value )
                     main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
                     checkResult = check and checkResult
@@ -350,7 +348,7 @@
         appCheck = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -360,9 +358,9 @@
             t.join()
             appCheck = appCheck and t.result
         if appCheck != main.TRUE:
-            node = main.activeNodes[0]
-            main.log.warn( main.CLIs[node].apps() )
-            main.log.warn( main.CLIs[node].appIDs() )
+            node = main.activeNodes[ 0 ]
+            main.log.warn( main.CLIs[ node ].apps() )
+            main.log.warn( main.CLIs[ node ].appIDs() )
         utilities.assert_equals( expect=main.TRUE, actual=appCheck,
                                  onpass="App Ids seem to be correct",
                                  onfail="Something is wrong with app Ids" )
@@ -447,9 +445,9 @@
         # Manually assign mastership to the controller we want
         roleCall = main.TRUE
 
-        ipList = [ ]
+        ipList = []
         deviceList = []
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         try:
             # Assign mastership to specific controllers. This assignment was
             # determined for a 7 node cluser, but will work with any sized
@@ -523,8 +521,8 @@
         #       atomic and is actually a multi step process
         time.sleep( 5 )
         for i in range( len( ipList ) ):
-            ip = ipList[i]
-            deviceId = deviceList[i]
+            ip = ipList[ i ]
+            deviceId = deviceList[ i ]
             # Check assignment
             master = onosCli.getRole( deviceId ).get( 'master' )
             if ip in master:
@@ -562,7 +560,7 @@
 
         # install onos-app-fwd
         main.step( "Install reactive forwarding app" )
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         installResults = onosCli.activateApp( "org.onosproject.fwd" )
         utilities.assert_equals( expect=main.TRUE, actual=installResults,
                                  onpass="Install fwd successful",
@@ -572,7 +570,7 @@
         appCheck = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -597,13 +595,13 @@
         pingResult = main.Mininet1.pingall()
         time2 = time.time()
         if not pingResult:
-            main.log.warn("First pingall failed. Trying again...")
+            main.log.warn( "First pingall failed. Trying again..." )
             pingResult = main.Mininet1.pingall()
             passMsg += " on the second try"
         utilities.assert_equals(
             expect=main.TRUE,
             actual=pingResult,
-            onpass= passMsg,
+            onpass=passMsg,
             onfail="Reactive Pingall failed, " +
                    "one or more ping pairs failed" )
         main.log.info( "Time for pingall: %2f seconds" %
@@ -612,8 +610,8 @@
         time.sleep( 11 )
         # uninstall onos-app-fwd
         main.step( "Uninstall reactive forwarding app" )
-        node = main.activeNodes[0]
-        uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
+        node = main.activeNodes[ 0 ]
+        uninstallResult = main.CLIs[ node ].deactivateApp( "org.onosproject.fwd" )
         utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
                                  onpass="Uninstall fwd successful",
                                  onfail="Uninstall fwd failed" )
@@ -622,7 +620,7 @@
         threads = []
         appCheck2 = main.TRUE
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -632,9 +630,9 @@
             t.join()
             appCheck2 = appCheck2 and t.result
         if appCheck2 != main.TRUE:
-            node = main.activeNodes[0]
-            main.log.warn( main.CLIs[node].apps() )
-            main.log.warn( main.CLIs[node].appIDs() )
+            node = main.activeNodes[ 0 ]
+            main.log.warn( main.CLIs[ node ].apps() )
+            main.log.warn( main.CLIs[ node ].appIDs() )
         utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
                                  onpass="App Ids seem to be correct",
                                  onfail="Something is wrong with app Ids" )
@@ -662,8 +660,8 @@
                 host2Id = host2Dict.get( 'id', None )
             if host1Id and host2Id:
                 nodeNum = ( i % len( main.activeNodes ) )
-                node = main.activeNodes[nodeNum]
-                tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
+                node = main.activeNodes[ nodeNum ]
+                tmpId = main.CLIs[ node ].addHostIntent( host1Id, host2Id )
                 if tmpId:
                     main.log.info( "Added intent with id: " + tmpId )
                     intentIds.append( tmpId )
@@ -673,8 +671,8 @@
             else:
                 main.log.error( "Error, getHost() failed for h" + str( i ) +
                                 " and/or h" + str( i + 10 ) )
-                node = main.activeNodes[0]
-                hosts = main.CLIs[node].hosts()
+                node = main.activeNodes[ 0 ]
+                hosts = main.CLIs[ node ].hosts()
                 main.log.warn( "Hosts output: " )
                 try:
                     main.log.warn( json.dumps( json.loads( hosts ),
@@ -742,7 +740,7 @@
                 for i in range( 14 ):
                     topics.append( "work-partition-" + str( i ) )
                 main.log.debug( topics )
-                ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                 for topic in topics:
                     if topic not in ONOStopics:
                         main.log.error( "Error: " + topic +
@@ -756,13 +754,13 @@
         # Check all nodes
         if missing:
             for i in main.activeNodes:
-                response = main.CLIs[i].leaders( jsonFormat=False)
-                main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
+                response = main.CLIs[ i ].leaders( jsonFormat=False )
+                main.log.warn( str( main.CLIs[ i ].name ) + " leaders output: \n" +
                                str( response ) )
 
         partitions = onosCli.partitions()
         try:
-            if partitions :
+            if partitions:
                 parsedPartitions = json.loads( partitions )
                 main.log.warn( json.dumps( parsedPartitions,
                                            sort_keys=True,
@@ -777,7 +775,7 @@
             main.log.error( repr( partitions ) )
         pendingMap = onosCli.pendingMap()
         try:
-            if pendingMap :
+            if pendingMap:
                 parsedPending = json.loads( pendingMap )
                 main.log.warn( json.dumps( parsedPending,
                                            sort_keys=True,
@@ -796,21 +794,21 @@
             main.log.error( "Error in pushing host intents to ONOS" )
 
         main.step( "Intent Anti-Entropy dispersion" )
-        for j in range(100):
+        for j in range( 100 ):
             correct = True
             main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
             for i in main.activeNodes:
                 onosIds = []
-                ids = main.CLIs[i].getAllIntentsId()
+                ids = main.CLIs[ i ].getAllIntentsId()
                 onosIds.append( ids )
-                main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
+                main.log.debug( "Intents in " + main.CLIs[ i ].name + ": " +
                                 str( sorted( onosIds ) ) )
                 if sorted( ids ) != sorted( intentIds ):
                     main.log.warn( "Set of intent IDs doesn't match" )
                     correct = False
                     break
                 else:
-                    intents = json.loads( main.CLIs[i].intents() )
+                    intents = json.loads( main.CLIs[ i ].intents() )
                     for intent in intents:
                         if intent[ 'state' ] != "INSTALLED":
                             main.log.warn( "Intent " + intent[ 'id' ] +
@@ -820,14 +818,14 @@
             if correct:
                 break
             else:
-                time.sleep(1)
+                time.sleep( 1 )
         if not intentStop:
             intentStop = time.time()
         global gossipTime
         gossipTime = intentStop - intentStart
         main.log.info( "It took about " + str( gossipTime ) +
                         " seconds for all intents to appear in each node" )
-        gossipPeriod = int( main.params['timers']['gossip'] )
+        gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
         maxGossipTime = gossipPeriod * len( main.activeNodes )
         utilities.assert_greater_equals(
                 expect=maxGossipTime, actual=gossipTime,
@@ -889,7 +887,7 @@
                     # FIXME: this should only be after we start the app
                     topics.append( "org.onosproject.election" )
                     main.log.debug( topics )
-                    ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                    ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                     for topic in topics:
                         if topic not in ONOStopics:
                             main.log.error( "Error: " + topic +
@@ -903,14 +901,14 @@
             # Check all nodes
             if missing:
                 for i in main.activeNodes:
-                    node = main.CLIs[i]
-                    response = node.leaders( jsonFormat=False)
+                    node = main.CLIs[ i ]
+                    response = node.leaders( jsonFormat=False )
                     main.log.warn( str( node.name ) + " leaders output: \n" +
                                    str( response ) )
 
             partitions = onosCli.partitions()
             try:
-                if partitions :
+                if partitions:
                     parsedPartitions = json.loads( partitions )
                     main.log.warn( json.dumps( parsedPartitions,
                                                sort_keys=True,
@@ -925,7 +923,7 @@
                 main.log.error( repr( partitions ) )
             pendingMap = onosCli.pendingMap()
             try:
-                if pendingMap :
+                if pendingMap:
                     parsedPending = json.loads( pendingMap )
                     main.log.warn( json.dumps( parsedPending,
                                                sort_keys=True,
@@ -954,7 +952,7 @@
                                 "functionality and check the state of " +\
                                 "the intent"
 
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         main.step( "Check Intent state" )
         installedCheck = False
         loopCount = 0
@@ -1040,7 +1038,7 @@
                 # FIXME: topics.append( "org.onosproject.election" )
                 # Print leaders output
                 main.log.debug( topics )
-                ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                 for topic in topics:
                     if topic not in ONOStopics:
                         main.log.error( "Error: " + topic +
@@ -1057,8 +1055,8 @@
         # Check all nodes
         if topicCheck:
             for i in main.activeNodes:
-                node = main.CLIs[i]
-                response = node.leaders( jsonFormat=False)
+                node = main.CLIs[ i ]
+                response = node.leaders( jsonFormat=False )
                 main.log.warn( str( node.name ) + " leaders output: \n" +
                                str( response ) )
 
@@ -1068,7 +1066,7 @@
         # Print partitions
         partitions = onosCli.partitions()
         try:
-            if partitions :
+            if partitions:
                 parsedPartitions = json.loads( partitions )
                 main.log.warn( json.dumps( parsedPartitions,
                                            sort_keys=True,
@@ -1084,7 +1082,7 @@
         # Print Pending Map
         pendingMap = onosCli.pendingMap()
         try:
-            if pendingMap :
+            if pendingMap:
                 parsedPending = json.loads( pendingMap )
                 main.log.warn( json.dumps( parsedPending,
                                            sort_keys=True,
@@ -1138,7 +1136,7 @@
                     # FIXME: this should only be after we start the app
                     topics.append( "org.onosproject.election" )
                     main.log.debug( topics )
-                    ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                    ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                     for topic in topics:
                         if topic not in ONOStopics:
                             main.log.error( "Error: " + topic +
@@ -1151,14 +1149,14 @@
                 main.log.error( repr( leaders ) )
             if missing:
                 for i in main.activeNodes:
-                    node = main.CLIs[i]
-                    response = node.leaders( jsonFormat=False)
+                    node = main.CLIs[ i ]
+                    response = node.leaders( jsonFormat=False )
                     main.log.warn( str( node.name ) + " leaders output: \n" +
                                    str( response ) )
 
             partitions = onosCli.partitions()
             try:
-                if partitions :
+                if partitions:
                     parsedPartitions = json.loads( partitions )
                     main.log.warn( json.dumps( parsedPartitions,
                                                sort_keys=True,
@@ -1173,7 +1171,7 @@
                 main.log.error( repr( partitions ) )
             pendingMap = onosCli.pendingMap()
             try:
-                if pendingMap :
+                if pendingMap:
                     parsedPending = json.loads( pendingMap )
                     main.log.warn( json.dumps( parsedPending,
                                                sort_keys=True,
@@ -1186,8 +1184,8 @@
                 main.log.exception( "Error parsing pending map" )
                 main.log.error( repr( pendingMap ) )
         # Print flowrules
-        node = main.activeNodes[0]
-        main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
+        node = main.activeNodes[ 0 ]
+        main.log.debug( main.CLIs[ node ].flows( jsonFormat=False ) )
         main.step( "Wait a minute then ping again" )
         # the wait is above
         PingResult = main.TRUE
@@ -1245,7 +1243,7 @@
         rolesNotNull = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].rolesNotNull,
+            t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
                              name="rolesNotNull-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -1267,7 +1265,7 @@
         rolesResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].roles,
+            t = main.Thread( target=main.CLIs[ i ].roles,
                              name="roles-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -1278,11 +1276,11 @@
             ONOSMastership.append( t.result )
 
         for i in range( len( ONOSMastership ) ):
-            node = str( main.activeNodes[i] + 1 )
-            if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
+            node = str( main.activeNodes[ i ] + 1 )
+            if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " roles" )
                 main.log.warn( "ONOS" + node + " mastership response: " +
-                               repr( ONOSMastership[i] ) )
+                               repr( ONOSMastership[ i ] ) )
                 rolesResults = False
         utilities.assert_equals(
             expect=True,
@@ -1291,7 +1289,7 @@
             onfail="Error in reading roles from ONOS" )
 
         main.step( "Check for consistency in roles from each controller" )
-        if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
+        if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
             main.log.info(
                 "Switch roles are consistent across all ONOS nodes" )
         else:
@@ -1304,7 +1302,7 @@
 
         if rolesResults and not consistentMastership:
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 try:
                     main.log.warn(
                         "ONOS" + node + " roles: ",
@@ -1328,7 +1326,7 @@
         intentsResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].intents,
+            t = main.Thread( target=main.CLIs[ i ].intents,
                              name="intents-" + str( i ),
                              args=[],
                              kwargs={ 'jsonFormat': True } )
@@ -1340,7 +1338,7 @@
             ONOSIntents.append( t.result )
 
         for i in range( len( ONOSIntents ) ):
-            node = str( main.activeNodes[i] + 1 )
+            node = str( main.activeNodes[ i ] + 1 )
             if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " intents" )
                 main.log.warn( "ONOS" + node + " intents response: " +
@@ -1353,7 +1351,7 @@
             onfail="Error in reading intents from ONOS" )
 
         main.step( "Check for consistency in Intents from each controller" )
-        if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
+        if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
             main.log.info( "Intents are consistent across all ONOS " +
                              "nodes" )
         else:
@@ -1401,17 +1399,17 @@
 
         if intentsResults and not consistentIntents:
             # print the json objects
-            n = str( main.activeNodes[-1] + 1 )
+            n = str( main.activeNodes[ -1 ] + 1 )
             main.log.debug( "ONOS" + n + " intents: " )
             main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
                                         sort_keys=True,
                                         indent=4,
                                         separators=( ',', ': ' ) ) )
             for i in range( len( ONOSIntents ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
                     main.log.debug( "ONOS" + node + " intents: " )
-                    main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
+                    main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
                                                 sort_keys=True,
                                                 indent=4,
                                                 separators=( ',', ': ' ) ) )
@@ -1432,7 +1430,7 @@
         flowsResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].flows,
+            t = main.Thread( target=main.CLIs[ i ].flows,
                              name="flows-" + str( i ),
                              args=[],
                              kwargs={ 'jsonFormat': True } )
@@ -1440,14 +1438,14 @@
             t.start()
 
         # NOTE: Flows command can take some time to run
-        time.sleep(30)
+        time.sleep( 30 )
         for t in threads:
             t.join()
             result = t.result
             ONOSFlows.append( result )
 
         for i in range( len( ONOSFlows ) ):
-            num = str( main.activeNodes[i] + 1 )
+            num = str( main.activeNodes[ i ] + 1 )
             if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
                 main.log.error( "Error in getting ONOS" + num + " flows" )
                 main.log.warn( "ONOS" + num + " flows response: " +
@@ -1484,11 +1482,11 @@
 
         if flowsResults and not consistentFlows:
             for i in range( len( ONOSFlows ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 try:
                     main.log.warn(
                         "ONOS" + node + " flows: " +
-                        json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
+                        json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
                                     indent=4, separators=( ',', ': ' ) ) )
                 except ( ValueError, TypeError ):
                     main.log.warn( "ONOS" + node + " flows: " +
@@ -1553,9 +1551,9 @@
         devices = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].devices,
+            t = main.Thread( target=main.CLIs[ i ].devices,
                              name="devices-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1565,9 +1563,9 @@
         hosts = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].hosts,
+            t = main.Thread( target=main.CLIs[ i ].hosts,
                              name="hosts-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1585,9 +1583,9 @@
         ports = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].ports,
+            t = main.Thread( target=main.CLIs[ i ].ports,
                              name="ports-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1597,9 +1595,9 @@
         links = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].links,
+            t = main.Thread( target=main.CLIs[ i ].links,
                              name="links-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1609,9 +1607,9 @@
         clusters = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].clusters,
+            t = main.Thread( target=main.CLIs[ i ].clusters,
                              name="clusters-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1624,7 +1622,7 @@
         main.step( "Host view is consistent across ONOS nodes" )
         consistentHostsResult = main.TRUE
         for controller in range( len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ] and "Error" not in hosts[ controller ]:
                 if hosts[ controller ] == hosts[ 0 ]:
                     continue
@@ -1651,10 +1649,10 @@
         main.step( "Each host has an IP address" )
         ipResult = main.TRUE
         for controller in range( 0, len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ]:
                 for host in hosts[ controller ]:
-                    if not host.get( 'ipAddresses', [ ] ):
+                    if not host.get( 'ipAddresses', [] ):
                         main.log.error( "Error with host ips on controller" +
                                         controllerStr + ": " + str( host ) )
                         ipResult = main.FALSE
@@ -1668,7 +1666,7 @@
         main.step( "Cluster view is consistent across ONOS nodes" )
         consistentClustersResult = main.TRUE
         for controller in range( len( clusters ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if "Error" not in clusters[ controller ]:
                 if clusters[ controller ] == clusters[ 0 ]:
                     continue
@@ -1717,14 +1715,14 @@
         mnLinks = main.Mininet1.getLinks()
         mnHosts = main.Mininet1.getHosts()
         for controller in main.activeNodes:
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if devices[ controller ] and ports[ controller ] and\
-                "Error" not in devices[ controller ] and\
-                "Error" not in ports[ controller ]:
-                    currentDevicesResult = main.Mininet1.compareSwitches(
-                            mnSwitches,
-                            json.loads( devices[ controller ] ),
-                            json.loads( ports[ controller ] ) )
+                    "Error" not in devices[ controller ] and\
+                    "Error" not in ports[ controller ]:
+                currentDevicesResult = main.Mininet1.compareSwitches(
+                        mnSwitches,
+                        json.loads( devices[ controller ] ),
+                        json.loads( ports[ controller ] ) )
             else:
                 currentDevicesResult = main.FALSE
             utilities.assert_equals( expect=main.TRUE,
@@ -1801,7 +1799,7 @@
             main.log.debug( "Checking logs for errors on " + node.name + ":" )
             main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
 
-        main.log.debug( main.CLIs[0].roles( jsonFormat=False ) )
+        main.log.debug( main.CLIs[ 0 ].roles( jsonFormat=False ) )
 
         n = len( main.nodes )  # Number of nodes
         p = ( ( n + 1 ) / 2 ) + 1  # Number of partitions
@@ -1815,10 +1813,10 @@
         main.log.info( "Nodes to be partitioned: " + str( nodeList ) )
         partitionResults = main.TRUE
         for i in range( 0, n ):
-            this = main.nodes[i]
+            this = main.nodes[ i ]
             if i not in main.partition:
                 for j in main.partition:
-                    foe = main.nodes[j]
+                    foe = main.nodes[ j ]
                     main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
                     #CMD HERE
                     cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
@@ -1828,7 +1826,7 @@
             else:
                 for j in range( 0, n ):
                     if j not in main.partition:
-                        foe = main.nodes[j]
+                        foe = main.nodes[ j ]
                         main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
                         #CMD HERE
                         cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
@@ -1883,7 +1881,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        sleep=15,
                                        attempts=5 )
 
@@ -1893,7 +1891,7 @@
 
         if not nodeResults:
             for i in main.activeNodes:
-                cli = main.CLIs[i]
+                cli = main.CLIs[ i ]
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
                     cli.name,
                     cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
@@ -1923,9 +1921,9 @@
         rolesNotNull = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].rolesNotNull,
+            t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
                              name="rolesNotNull-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1945,7 +1943,7 @@
         rolesResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].roles,
+            t = main.Thread( target=main.CLIs[ i ].roles,
                              name="roles-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -1956,11 +1954,11 @@
             ONOSMastership.append( t.result )
 
         for i in range( len( ONOSMastership ) ):
-            node = str( main.activeNodes[i] + 1 )
-            if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
+            node = str( main.activeNodes[ i ] + 1 )
+            if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " roles" )
                 main.log.warn( "ONOS" + node + " mastership response: " +
-                               repr( ONOSMastership[i] ) )
+                               repr( ONOSMastership[ i ] ) )
                 rolesResults = False
         utilities.assert_equals(
             expect=True,
@@ -1969,7 +1967,7 @@
             onfail="Error in reading roles from ONOS" )
 
         main.step( "Check for consistency in roles from each controller" )
-        if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
+        if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
             main.log.info(
                 "Switch roles are consistent across all ONOS nodes" )
         else:
@@ -1982,7 +1980,7 @@
 
         if rolesResults and not consistentMastership:
             for i in range( len( ONOSMastership ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 main.log.warn( "ONOS" + node + " roles: ",
                                json.dumps( json.loads( ONOSMastership[ i ] ),
                                            sort_keys=True,
@@ -1998,7 +1996,7 @@
         intentsResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].intents,
+            t = main.Thread( target=main.CLIs[ i ].intents,
                              name="intents-" + str( i ),
                              args=[],
                              kwargs={ 'jsonFormat': True } )
@@ -2009,8 +2007,8 @@
             t.join()
             ONOSIntents.append( t.result )
 
-        for i in range( len( ONOSIntents) ):
-            node = str( main.activeNodes[i] + 1 )
+        for i in range( len( ONOSIntents ) ):
+            node = str( main.activeNodes[ i ] + 1 )
             if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " intents" )
                 main.log.warn( "ONOS" + node + " intents response: " +
@@ -2023,7 +2021,7 @@
             onfail="Error in reading intents from ONOS" )
 
         main.step( "Check for consistency in Intents from each controller" )
-        if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
+        if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
             main.log.info( "Intents are consistent across all ONOS " +
                              "nodes" )
         else:
@@ -2072,12 +2070,12 @@
                 main.log.exception( "Error in parsing intents" )
                 main.log.error( repr( node ) )
             intentStates.append( nodeStates )
-            out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
+            out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
             main.log.info( dict( out ) )
 
         if intentsResults and not consistentIntents:
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 main.log.warn( "ONOS" + node + " intents: " )
                 main.log.warn( json.dumps(
                     json.loads( ONOSIntents[ i ] ),
@@ -2116,7 +2114,7 @@
                             main.log.debug( json.dumps( intent ) )
                 except ( ValueError, TypeError ):
                     main.log.exception( "Exception printing intents" )
-                    main.log.debug( repr( ONOSIntents[0] ) )
+                    main.log.debug( repr( ONOSIntents[ 0 ] ) )
                     main.log.debug( repr( intentState ) )
             if sameIntents == main.FALSE:
                 try:
@@ -2130,7 +2128,7 @@
                                                 separators=( ',', ': ' ) ) )
                 except ( ValueError, TypeError ):
                     main.log.exception( "Exception printing intents" )
-                    main.log.debug( repr( ONOSIntents[0] ) )
+                    main.log.debug( repr( ONOSIntents[ 0 ] ) )
                     main.log.debug( repr( intentState ) )
             utilities.assert_equals(
                 expect=main.TRUE,
@@ -2145,7 +2143,7 @@
         for i in range( 28 ):
             main.log.info( "Checking flow table on s" + str( i + 1 ) )
             tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
-            curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
+            curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
             FlowTables = FlowTables and curSwitch
             if curSwitch == main.FALSE:
                 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
@@ -2156,7 +2154,7 @@
             onfail="Changes were found in the flow tables" )
 
         main.Mininet2.pingLongKill()
-        '''
+        """
         main.step( "Check the continuous pings to ensure that no packets " +
                    "were dropped during component failure" )
         main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
@@ -2182,19 +2180,18 @@
             actual=LossInPings,
             onpass="No Loss of connectivity",
             onfail="Loss of dataplane connectivity detected" )
-        '''
-
+        """
         main.step( "Leadership Election is still functional" )
         # Test of LeadershipElection
         leaderList = []
 
         partitioned = []
         for i in main.partition:
-            partitioned.append( main.nodes[i].ip_address )
+            partitioned.append( main.nodes[ i ].ip_address )
         leaderResult = main.TRUE
 
         for i in main.activeNodes:
-            cli = main.CLIs[i]
+            cli = main.CLIs[ i ]
             leaderN = cli.electionTestLeader()
             leaderList.append( leaderN )
             if leaderN == main.FALSE:
@@ -2258,8 +2255,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="devices-" + str( i ),
-                                 args=[ main.CLIs[i].devices, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].devices, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2273,8 +2270,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="hosts-" + str( i ),
-                                 args=[ main.CLIs[i].hosts, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].hosts, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2288,7 +2285,7 @@
                     main.log.error( repr( t.result ) )
                     hosts.append( None )
             for controller in range( 0, len( hosts ) ):
-                controllerStr = str( main.activeNodes[controller] + 1 )
+                controllerStr = str( main.activeNodes[ controller ] + 1 )
                 if hosts[ controller ]:
                     for host in hosts[ controller ]:
                         if host is None or host.get( 'ipAddresses', [] ) == []:
@@ -2301,8 +2298,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="ports-" + str( i ),
-                                 args=[ main.CLIs[i].ports, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].ports, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2315,8 +2312,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="links-" + str( i ),
-                                 args=[ main.CLIs[i].links, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].links, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2329,8 +2326,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="clusters-" + str( i ),
-                                 args=[ main.CLIs[i].clusters, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].clusters, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2349,18 +2346,18 @@
                all( e is None for e in ports ) and\
                all( e is None for e in links ) and\
                all( e is None for e in clusters ):
-                   topoFailMsg = "Could not get topology from ONOS"
-                   main.log.error( topoFailMsg )
-                   continue  # Try again, No use trying to compare
+                topoFailMsg = "Could not get topology from ONOS"
+                main.log.error( topoFailMsg )
+                continue  # Try again, No use trying to compare
 
             mnSwitches = main.Mininet1.getSwitches()
             mnLinks = main.Mininet1.getLinks()
             mnHosts = main.Mininet1.getHosts()
             for controller in range( len( main.activeNodes ) ):
-                controllerStr = str( main.activeNodes[controller] + 1 )
+                controllerStr = str( main.activeNodes[ controller ] + 1 )
                 if devices[ controller ] and ports[ controller ] and\
-                    "Error" not in devices[ controller ] and\
-                    "Error" not in ports[ controller ]:
+                        "Error" not in devices[ controller ] and\
+                        "Error" not in ports[ controller ]:
 
                     try:
                         currentDevicesResult = main.Mininet1.compareSwitches(
@@ -2413,29 +2410,29 @@
                 mappings = {}
                 for i in range( 1, 29 ):  # hosts 1 through 28
                     # set up correct variables:
-                    macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
+                    macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
                     if i == 1:
-                        deviceId = "1000".zfill(16)
+                        deviceId = "1000".zfill( 16 )
                     elif i == 2:
-                        deviceId = "2000".zfill(16)
+                        deviceId = "2000".zfill( 16 )
                     elif i == 3:
-                        deviceId = "3000".zfill(16)
+                        deviceId = "3000".zfill( 16 )
                     elif i == 4:
-                        deviceId = "3004".zfill(16)
+                        deviceId = "3004".zfill( 16 )
                     elif i == 5:
-                        deviceId = "5000".zfill(16)
+                        deviceId = "5000".zfill( 16 )
                     elif i == 6:
-                        deviceId = "6000".zfill(16)
+                        deviceId = "6000".zfill( 16 )
                     elif i == 7:
-                        deviceId = "6007".zfill(16)
+                        deviceId = "6007".zfill( 16 )
                     elif i >= 8 and i <= 17:
                         dpid = '3' + str( i ).zfill( 3 )
-                        deviceId = dpid.zfill(16)
+                        deviceId = dpid.zfill( 16 )
                     elif i >= 18 and i <= 27:
                         dpid = '6' + str( i ).zfill( 3 )
-                        deviceId = dpid.zfill(16)
+                        deviceId = dpid.zfill( 16 )
                     elif i == 28:
-                        deviceId = "2800".zfill(16)
+                        deviceId = "2800".zfill( 16 )
                     mappings[ macId ] = deviceId
                 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
                     if hosts[ controller ] == []:
@@ -2455,7 +2452,7 @@
                                 assert location, "location field could not be found for this host object"
 
                                 # Trim the protocol identifier off deviceId
-                                device = str( location.get( 'elementId' ) ).split(':')[1]
+                                device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
                                 assert device, "elementId field could not be found for this host location object"
 
                                 port = location.get( 'port' )
@@ -2466,7 +2463,7 @@
                                     if str( port ) != "1":
                                         main.log.error( "The attachment port is incorrect for " +
                                                         "host " + str( mac ) +
-                                                        ". Expected: 1 Actual: " + str( port) )
+                                                        ". Expected: 1 Actual: " + str( port ) )
                                         hostAttachment = False
                                     if device != mappings[ str( mac ) ]:
                                         main.log.error( "The attachment device is incorrect for " +
@@ -2508,7 +2505,7 @@
         main.step( "Hosts view is consistent across all ONOS nodes" )
         consistentHostsResult = main.TRUE
         for controller in range( len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
                 if hosts[ controller ] == hosts[ 0 ]:
                     continue
@@ -2550,7 +2547,7 @@
         main.step( "Clusters view is consistent across all ONOS nodes" )
         consistentClustersResult = main.TRUE
         for controller in range( len( clusters ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if "Error" not in clusters[ controller ]:
                 if clusters[ controller ] == clusters[ 0 ]:
                     continue
@@ -2580,7 +2577,7 @@
             numClusters = len( json.loads( clusters[ 0 ] ) )
         except ( ValueError, TypeError ):
             main.log.exception( "Error parsing clusters[0]: " +
-                                repr( clusters[0] ) )
+                                repr( clusters[ 0 ] ) )
             numClusters = "ERROR"
         clusterResults = main.FALSE
         if numClusters == 1:
@@ -2630,7 +2627,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        attempts=5 )
 
         utilities.assert_equals( expect=True, actual=nodeResults,
@@ -2639,8 +2636,8 @@
         if not nodeResults:
             for i in main.activeNodes:
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
-                    main.CLIs[i].name,
-                    main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
+                    main.CLIs[ i ].name,
+                    main.CLIs[ i ].sendline( "scr:list | grep -v ACTIVE" ) ) )
 
         if not topoResult:
             main.cleanup()
@@ -2717,7 +2714,7 @@
         switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
 
         description = "Killing a switch to ensure it is discovered correctly"
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         main.case( description )
         switch = main.params[ 'kill' ][ 'switch' ]
         switchDPID = main.params[ 'kill' ][ 'dpid' ]
@@ -2762,7 +2759,7 @@
         switch = main.params[ 'kill' ][ 'switch' ]
         switchDPID = main.params[ 'kill' ][ 'dpid' ]
         links = main.params[ 'kill' ][ 'links' ].split()
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         description = "Adding a switch to ensure it is discovered correctly"
         main.case( description )
 
@@ -2849,14 +2846,14 @@
             main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
 
         try:
-            timerLog = open( main.logdir + "/Timers.csv", 'w')
+            timerLog = open( main.logdir + "/Timers.csv", 'w' )
             # Overwrite with empty line and close
             labels = "Gossip Intents"
             data = str( gossipTime )
             timerLog.write( labels + "\n" + data )
             timerLog.close()
-        except NameError, e:
-            main.log.exception(e)
+        except NameError as e:
+            main.log.exception( e )
 
     def CASE14( self, main ):
         """
@@ -2868,9 +2865,9 @@
         assert main.CLIs, "main.CLIs not defined"
         assert main.nodes, "main.nodes not defined"
 
-        main.case("Start Leadership Election app")
+        main.case( "Start Leadership Election app" )
         main.step( "Install leadership election app" )
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         appResult = onosCli.activateApp( "org.onosproject.election" )
         utilities.assert_equals(
             expect=main.TRUE,
@@ -2880,9 +2877,9 @@
 
         main.step( "Run for election on each node" )
         for i in main.activeNodes:
-            main.CLIs[i].electionTestRun()
-        time.sleep(5)
-        activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+            main.CLIs[ i ].electionTestRun()
+        time.sleep( 5 )
+        activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
         sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
         utilities.assert_equals(
             expect=True,
@@ -2892,7 +2889,7 @@
 
         if sameResult:
             leader = leaders[ 0 ][ 0 ]
-            if main.nodes[main.activeNodes[0]].ip_address in leader:
+            if main.nodes[ main.activeNodes[ 0 ] ].ip_address in leader:
                 correctLeader = True
             else:
                 correctLeader = False
@@ -2942,7 +2939,7 @@
         electionResult = main.TRUE
 
         for i in main.activeNodes:  # run test election on each node
-            if main.CLIs[i].electionTestRun() == main.FALSE:
+            if main.CLIs[ i ].electionTestRun() == main.FALSE:
                 electionResult = main.FALSE
         utilities.assert_equals(
             expect=main.TRUE,
@@ -2957,7 +2954,7 @@
 
         main.step( "Check that each node shows the same leader and candidates" )
         failMessage = "Nodes have different leaderboards"
-        activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+        activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
         sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
         if sameResult:
             oldLeader = oldLeaders[ 0 ][ 0 ]
@@ -3007,7 +3004,7 @@
         if newLeader == oldLeader:
             newLeaderResult = False
             main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
-                " as the current leader" )
+                            " as the current leader" )
         utilities.assert_equals(
             expect=True,
             actual=newLeaderResult,
@@ -3024,7 +3021,7 @@
             else:
                 main.log.info( "Expected no leader, got: " + str( newLeader ) )
                 correctCandidateResult = main.FALSE
-        elif len( oldLeaders[0] ) >= 3:
+        elif len( oldLeaders[ 0 ] ) >= 3:
             if newLeader == oldLeaders[ 0 ][ 2 ]:
                 # correct leader was elected
                 correctCandidateResult = main.TRUE
@@ -3064,10 +3061,10 @@
         positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
 
         # Check that the re-elected node is last on the candidate List
-        if not reRunLeaders[0]:
+        if not reRunLeaders[ 0 ]:
             positionResult = main.FALSE
         elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
-            main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
+            main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
                                                                                       str( reRunLeaders[ 0 ] ) ) )
             positionResult = main.FALSE
         utilities.assert_equals(
@@ -3091,15 +3088,15 @@
         # Variables for the distributed primitives tests
         main.pCounterName = "TestON-Partitions"
         main.pCounterValue = 0
-        main.onosSet = set([])
+        main.onosSet = set( [] )
         main.onosSetName = "TestON-set"
 
         description = "Install Primitives app"
         main.case( description )
         main.step( "Install Primitives app" )
         appName = "org.onosproject.distributedprimitives"
-        node = main.activeNodes[0]
-        appResults = main.CLIs[node].activateApp( appName )
+        node = main.activeNodes[ 0 ]
+        appResults = main.CLIs[ node ].activateApp( appName )
         utilities.assert_equals( expect=main.TRUE,
                                  actual=appResults,
                                  onpass="Primitives app activated",
diff --git a/TestON/tests/HA/HAkillNodes/HAkillNodes.py b/TestON/tests/HA/HAkillNodes/HAkillNodes.py
index 560cb71..e3feb50 100644
--- a/TestON/tests/HA/HAkillNodes/HAkillNodes.py
+++ b/TestON/tests/HA/HAkillNodes/HAkillNodes.py
@@ -23,8 +23,6 @@
 CASE16: Install Distributed Primitives app
 CASE17: Check for basic functionality with distributed primitives
 """
-
-
 class HAkillNodes:
 
     def __init__( self ):
@@ -149,7 +147,7 @@
                             filePath + topoName,
                             main.Mininet1.home,
                             direction="to" )
-        mnResult = main.Mininet1.startNet( )
+        mnResult = main.Mininet1.startNet()
         utilities.assert_equals( expect=main.TRUE, actual=mnResult,
                                  onpass="Mininet Started",
                                  onfail="Error starting Mininet" )
@@ -201,7 +199,7 @@
                   'seamless="seamless"></iframe>\n'
         graphs += ']]></ac:plain-text-body>\n'
         graphs += '</ac:structured-macro>\n'
-        main.log.wiki(graphs)
+        main.log.wiki( graphs )
 
         main.step( "Creating ONOS package" )
         # copy gen-partions file to ONOS
@@ -268,9 +266,9 @@
         cliResults = main.TRUE
         threads = []
         for i in range( main.numCtrls ):
-            t = main.Thread( target=main.CLIs[i].startOnosCli,
+            t = main.Thread( target=main.CLIs[ i ].startOnosCli,
                              name="startOnosCli-" + str( i ),
-                             args=[main.nodes[i].ip_address] )
+                             args=[ main.nodes[ i ].ip_address ] )
             threads.append( t )
             t.start()
 
@@ -300,7 +298,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        attempts=5 )
 
         utilities.assert_equals( expect=True, actual=nodeResults,
@@ -309,7 +307,7 @@
 
         if not nodeResults:
             for i in main.activeNodes:
-                cli = main.CLIs[i]
+                cli = main.CLIs[ i ]
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
                     cli.name,
                     cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
@@ -321,7 +319,7 @@
         # get data from the params
         apps = main.params.get( 'apps' )
         if apps:
-            apps = apps.split(',')
+            apps = apps.split( ',' )
             main.log.warn( apps )
             activateResult = True
             for app in apps:
@@ -348,8 +346,8 @@
             main.log.debug( config )
             checkResult = main.TRUE
             for component in config:
-                for setting in config[component]:
-                    value = config[component][setting]
+                for setting in config[ component ]:
+                    value = config[ component ][ setting ]
                     check = main.CLIs[ 0 ].setCfg( component, setting, value )
                     main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
                     checkResult = check and checkResult
@@ -364,7 +362,7 @@
         appCheck = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -374,9 +372,9 @@
             t.join()
             appCheck = appCheck and t.result
         if appCheck != main.TRUE:
-            node = main.activeNodes[0]
-            main.log.warn( main.CLIs[node].apps() )
-            main.log.warn( main.CLIs[node].appIDs() )
+            node = main.activeNodes[ 0 ]
+            main.log.warn( main.CLIs[ node ].apps() )
+            main.log.warn( main.CLIs[ node ].appIDs() )
         utilities.assert_equals( expect=main.TRUE, actual=appCheck,
                                  onpass="App Ids seem to be correct",
                                  onfail="Something is wrong with app Ids" )
@@ -461,9 +459,9 @@
         # Manually assign mastership to the controller we want
         roleCall = main.TRUE
 
-        ipList = [ ]
+        ipList = []
         deviceList = []
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         try:
             # Assign mastership to specific controllers. This assignment was
             # determined for a 7 node cluser, but will work with any sized
@@ -537,8 +535,8 @@
         #       atomic and is actually a multi step process
         time.sleep( 5 )
         for i in range( len( ipList ) ):
-            ip = ipList[i]
-            deviceId = deviceList[i]
+            ip = ipList[ i ]
+            deviceId = deviceList[ i ]
             # Check assignment
             master = onosCli.getRole( deviceId ).get( 'master' )
             if ip in master:
@@ -586,7 +584,7 @@
 
         # install onos-app-fwd
         main.step( "Install reactive forwarding app" )
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         installResults = onosCli.activateApp( "org.onosproject.fwd" )
         utilities.assert_equals( expect=main.TRUE, actual=installResults,
                                  onpass="Install fwd successful",
@@ -596,7 +594,7 @@
         appCheck = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -621,13 +619,13 @@
         pingResult = main.Mininet1.pingall()
         time2 = time.time()
         if not pingResult:
-            main.log.warn("First pingall failed. Trying again...")
+            main.log.warn( "First pingall failed. Trying again..." )
             pingResult = main.Mininet1.pingall()
             passMsg += " on the second try"
         utilities.assert_equals(
             expect=main.TRUE,
             actual=pingResult,
-            onpass= passMsg,
+            onpass=passMsg,
             onfail="Reactive Pingall failed, " +
                    "one or more ping pairs failed" )
         main.log.info( "Time for pingall: %2f seconds" %
@@ -636,8 +634,8 @@
         time.sleep( 11 )
         # uninstall onos-app-fwd
         main.step( "Uninstall reactive forwarding app" )
-        node = main.activeNodes[0]
-        uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
+        node = main.activeNodes[ 0 ]
+        uninstallResult = main.CLIs[ node ].deactivateApp( "org.onosproject.fwd" )
         utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
                                  onpass="Uninstall fwd successful",
                                  onfail="Uninstall fwd failed" )
@@ -646,7 +644,7 @@
         threads = []
         appCheck2 = main.TRUE
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -656,9 +654,9 @@
             t.join()
             appCheck2 = appCheck2 and t.result
         if appCheck2 != main.TRUE:
-            node = main.activeNodes[0]
-            main.log.warn( main.CLIs[node].apps() )
-            main.log.warn( main.CLIs[node].appIDs() )
+            node = main.activeNodes[ 0 ]
+            main.log.warn( main.CLIs[ node ].apps() )
+            main.log.warn( main.CLIs[ node ].appIDs() )
         utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
                                  onpass="App Ids seem to be correct",
                                  onfail="Something is wrong with app Ids" )
@@ -686,8 +684,8 @@
                 host2Id = host2Dict.get( 'id', None )
             if host1Id and host2Id:
                 nodeNum = ( i % len( main.activeNodes ) )
-                node = main.activeNodes[nodeNum]
-                tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
+                node = main.activeNodes[ nodeNum ]
+                tmpId = main.CLIs[ node ].addHostIntent( host1Id, host2Id )
                 if tmpId:
                     main.log.info( "Added intent with id: " + tmpId )
                     intentIds.append( tmpId )
@@ -697,8 +695,8 @@
             else:
                 main.log.error( "Error, getHost() failed for h" + str( i ) +
                                 " and/or h" + str( i + 10 ) )
-                node = main.activeNodes[0]
-                hosts = main.CLIs[node].hosts()
+                node = main.activeNodes[ 0 ]
+                hosts = main.CLIs[ node ].hosts()
                 main.log.warn( "Hosts output: " )
                 try:
                     main.log.warn( json.dumps( json.loads( hosts ),
@@ -766,7 +764,7 @@
                 for i in range( 14 ):
                     topics.append( "work-partition-" + str( i ) )
                 main.log.debug( topics )
-                ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                 for topic in topics:
                     if topic not in ONOStopics:
                         main.log.error( "Error: " + topic +
@@ -780,13 +778,13 @@
         # Check all nodes
         if missing:
             for i in main.activeNodes:
-                response = main.CLIs[i].leaders( jsonFormat=False)
-                main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
+                response = main.CLIs[ i ].leaders( jsonFormat=False )
+                main.log.warn( str( main.CLIs[ i ].name ) + " leaders output: \n" +
                                str( response ) )
 
         partitions = onosCli.partitions()
         try:
-            if partitions :
+            if partitions:
                 parsedPartitions = json.loads( partitions )
                 main.log.warn( json.dumps( parsedPartitions,
                                            sort_keys=True,
@@ -801,7 +799,7 @@
             main.log.error( repr( partitions ) )
         pendingMap = onosCli.pendingMap()
         try:
-            if pendingMap :
+            if pendingMap:
                 parsedPending = json.loads( pendingMap )
                 main.log.warn( json.dumps( parsedPending,
                                            sort_keys=True,
@@ -820,21 +818,21 @@
             main.log.error( "Error in pushing host intents to ONOS" )
 
         main.step( "Intent Anti-Entropy dispersion" )
-        for j in range(100):
+        for j in range( 100 ):
             correct = True
             main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
             for i in main.activeNodes:
                 onosIds = []
-                ids = main.CLIs[i].getAllIntentsId()
+                ids = main.CLIs[ i ].getAllIntentsId()
                 onosIds.append( ids )
-                main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
+                main.log.debug( "Intents in " + main.CLIs[ i ].name + ": " +
                                 str( sorted( onosIds ) ) )
                 if sorted( ids ) != sorted( intentIds ):
                     main.log.warn( "Set of intent IDs doesn't match" )
                     correct = False
                     break
                 else:
-                    intents = json.loads( main.CLIs[i].intents() )
+                    intents = json.loads( main.CLIs[ i ].intents() )
                     for intent in intents:
                         if intent[ 'state' ] != "INSTALLED":
                             main.log.warn( "Intent " + intent[ 'id' ] +
@@ -844,14 +842,14 @@
             if correct:
                 break
             else:
-                time.sleep(1)
+                time.sleep( 1 )
         if not intentStop:
             intentStop = time.time()
         global gossipTime
         gossipTime = intentStop - intentStart
         main.log.info( "It took about " + str( gossipTime ) +
                         " seconds for all intents to appear in each node" )
-        gossipPeriod = int( main.params['timers']['gossip'] )
+        gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
         maxGossipTime = gossipPeriod * len( main.activeNodes )
         utilities.assert_greater_equals(
                 expect=maxGossipTime, actual=gossipTime,
@@ -913,7 +911,7 @@
                     # FIXME: this should only be after we start the app
                     topics.append( "org.onosproject.election" )
                     main.log.debug( topics )
-                    ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                    ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                     for topic in topics:
                         if topic not in ONOStopics:
                             main.log.error( "Error: " + topic +
@@ -927,14 +925,14 @@
             # Check all nodes
             if missing:
                 for i in main.activeNodes:
-                    node = main.CLIs[i]
-                    response = node.leaders( jsonFormat=False)
+                    node = main.CLIs[ i ]
+                    response = node.leaders( jsonFormat=False )
                     main.log.warn( str( node.name ) + " leaders output: \n" +
                                    str( response ) )
 
             partitions = onosCli.partitions()
             try:
-                if partitions :
+                if partitions:
                     parsedPartitions = json.loads( partitions )
                     main.log.warn( json.dumps( parsedPartitions,
                                                sort_keys=True,
@@ -949,7 +947,7 @@
                 main.log.error( repr( partitions ) )
             pendingMap = onosCli.pendingMap()
             try:
-                if pendingMap :
+                if pendingMap:
                     parsedPending = json.loads( pendingMap )
                     main.log.warn( json.dumps( parsedPending,
                                                sort_keys=True,
@@ -978,7 +976,7 @@
                                 "functionality and check the state of " +\
                                 "the intent"
 
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         main.step( "Check Intent state" )
         installedCheck = False
         loopCount = 0
@@ -1064,7 +1062,7 @@
                 # FIXME: topics.append( "org.onosproject.election" )
                 # Print leaders output
                 main.log.debug( topics )
-                ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                 for topic in topics:
                     if topic not in ONOStopics:
                         main.log.error( "Error: " + topic +
@@ -1081,8 +1079,8 @@
         # Check all nodes
         if topicCheck:
             for i in main.activeNodes:
-                node = main.CLIs[i]
-                response = node.leaders( jsonFormat=False)
+                node = main.CLIs[ i ]
+                response = node.leaders( jsonFormat=False )
                 main.log.warn( str( node.name ) + " leaders output: \n" +
                                str( response ) )
 
@@ -1092,7 +1090,7 @@
         # Print partitions
         partitions = onosCli.partitions()
         try:
-            if partitions :
+            if partitions:
                 parsedPartitions = json.loads( partitions )
                 main.log.warn( json.dumps( parsedPartitions,
                                            sort_keys=True,
@@ -1108,7 +1106,7 @@
         # Print Pending Map
         pendingMap = onosCli.pendingMap()
         try:
-            if pendingMap :
+            if pendingMap:
                 parsedPending = json.loads( pendingMap )
                 main.log.warn( json.dumps( parsedPending,
                                            sort_keys=True,
@@ -1162,7 +1160,7 @@
                     # FIXME: this should only be after we start the app
                     topics.append( "org.onosproject.election" )
                     main.log.debug( topics )
-                    ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                    ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                     for topic in topics:
                         if topic not in ONOStopics:
                             main.log.error( "Error: " + topic +
@@ -1175,14 +1173,14 @@
                 main.log.error( repr( leaders ) )
             if missing:
                 for i in main.activeNodes:
-                    node = main.CLIs[i]
-                    response = node.leaders( jsonFormat=False)
+                    node = main.CLIs[ i ]
+                    response = node.leaders( jsonFormat=False )
                     main.log.warn( str( node.name ) + " leaders output: \n" +
                                    str( response ) )
 
             partitions = onosCli.partitions()
             try:
-                if partitions :
+                if partitions:
                     parsedPartitions = json.loads( partitions )
                     main.log.warn( json.dumps( parsedPartitions,
                                                sort_keys=True,
@@ -1197,7 +1195,7 @@
                 main.log.error( repr( partitions ) )
             pendingMap = onosCli.pendingMap()
             try:
-                if pendingMap :
+                if pendingMap:
                     parsedPending = json.loads( pendingMap )
                     main.log.warn( json.dumps( parsedPending,
                                                sort_keys=True,
@@ -1268,7 +1266,7 @@
         rolesNotNull = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].rolesNotNull,
+            t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
                              name="rolesNotNull-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -1290,7 +1288,7 @@
         rolesResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].roles,
+            t = main.Thread( target=main.CLIs[ i ].roles,
                              name="roles-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -1301,11 +1299,11 @@
             ONOSMastership.append( t.result )
 
         for i in range( len( ONOSMastership ) ):
-            node = str( main.activeNodes[i] + 1 )
-            if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
+            node = str( main.activeNodes[ i ] + 1 )
+            if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " roles" )
                 main.log.warn( "ONOS" + node + " mastership response: " +
-                               repr( ONOSMastership[i] ) )
+                               repr( ONOSMastership[ i ] ) )
                 rolesResults = False
         utilities.assert_equals(
             expect=True,
@@ -1314,7 +1312,7 @@
             onfail="Error in reading roles from ONOS" )
 
         main.step( "Check for consistency in roles from each controller" )
-        if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
+        if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
             main.log.info(
                 "Switch roles are consistent across all ONOS nodes" )
         else:
@@ -1327,7 +1325,7 @@
 
         if rolesResults and not consistentMastership:
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 try:
                     main.log.warn(
                         "ONOS" + node + " roles: ",
@@ -1351,7 +1349,7 @@
         intentsResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].intents,
+            t = main.Thread( target=main.CLIs[ i ].intents,
                              name="intents-" + str( i ),
                              args=[],
                              kwargs={ 'jsonFormat': True } )
@@ -1363,7 +1361,7 @@
             ONOSIntents.append( t.result )
 
         for i in range( len( ONOSIntents ) ):
-            node = str( main.activeNodes[i] + 1 )
+            node = str( main.activeNodes[ i ] + 1 )
             if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " intents" )
                 main.log.warn( "ONOS" + node + " intents response: " +
@@ -1376,7 +1374,7 @@
             onfail="Error in reading intents from ONOS" )
 
         main.step( "Check for consistency in Intents from each controller" )
-        if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
+        if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
             main.log.info( "Intents are consistent across all ONOS " +
                              "nodes" )
         else:
@@ -1424,17 +1422,17 @@
 
         if intentsResults and not consistentIntents:
             # print the json objects
-            n = str( main.activeNodes[-1] + 1 )
+            n = str( main.activeNodes[ -1 ] + 1 )
             main.log.debug( "ONOS" + n + " intents: " )
             main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
                                         sort_keys=True,
                                         indent=4,
                                         separators=( ',', ': ' ) ) )
             for i in range( len( ONOSIntents ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
                     main.log.debug( "ONOS" + node + " intents: " )
-                    main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
+                    main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
                                                 sort_keys=True,
                                                 indent=4,
                                                 separators=( ',', ': ' ) ) )
@@ -1455,7 +1453,7 @@
         flowsResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].flows,
+            t = main.Thread( target=main.CLIs[ i ].flows,
                              name="flows-" + str( i ),
                              args=[],
                              kwargs={ 'jsonFormat': True } )
@@ -1463,14 +1461,14 @@
             t.start()
 
         # NOTE: Flows command can take some time to run
-        time.sleep(30)
+        time.sleep( 30 )
         for t in threads:
             t.join()
             result = t.result
             ONOSFlows.append( result )
 
         for i in range( len( ONOSFlows ) ):
-            num = str( main.activeNodes[i] + 1 )
+            num = str( main.activeNodes[ i ] + 1 )
             if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
                 main.log.error( "Error in getting ONOS" + num + " flows" )
                 main.log.warn( "ONOS" + num + " flows response: " +
@@ -1507,11 +1505,11 @@
 
         if flowsResults and not consistentFlows:
             for i in range( len( ONOSFlows ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 try:
                     main.log.warn(
                         "ONOS" + node + " flows: " +
-                        json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
+                        json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
                                     indent=4, separators=( ',', ': ' ) ) )
                 except ( ValueError, TypeError ):
                     main.log.warn( "ONOS" + node + " flows: " +
@@ -1576,9 +1574,9 @@
         devices = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].devices,
+            t = main.Thread( target=main.CLIs[ i ].devices,
                              name="devices-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1588,9 +1586,9 @@
         hosts = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].hosts,
+            t = main.Thread( target=main.CLIs[ i ].hosts,
                              name="hosts-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1608,9 +1606,9 @@
         ports = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].ports,
+            t = main.Thread( target=main.CLIs[ i ].ports,
                              name="ports-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1620,9 +1618,9 @@
         links = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].links,
+            t = main.Thread( target=main.CLIs[ i ].links,
                              name="links-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1632,9 +1630,9 @@
         clusters = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].clusters,
+            t = main.Thread( target=main.CLIs[ i ].clusters,
                              name="clusters-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1647,7 +1645,7 @@
         main.step( "Host view is consistent across ONOS nodes" )
         consistentHostsResult = main.TRUE
         for controller in range( len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ] and "Error" not in hosts[ controller ]:
                 if hosts[ controller ] == hosts[ 0 ]:
                     continue
@@ -1674,10 +1672,10 @@
         main.step( "Each host has an IP address" )
         ipResult = main.TRUE
         for controller in range( 0, len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ]:
                 for host in hosts[ controller ]:
-                    if not host.get( 'ipAddresses', [ ] ):
+                    if not host.get( 'ipAddresses', [] ):
                         main.log.error( "Error with host ips on controller" +
                                         controllerStr + ": " + str( host ) )
                         ipResult = main.FALSE
@@ -1691,7 +1689,7 @@
         main.step( "Cluster view is consistent across ONOS nodes" )
         consistentClustersResult = main.TRUE
         for controller in range( len( clusters ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if "Error" not in clusters[ controller ]:
                 if clusters[ controller ] == clusters[ 0 ]:
                     continue
@@ -1740,14 +1738,14 @@
         mnLinks = main.Mininet1.getLinks()
         mnHosts = main.Mininet1.getHosts()
         for controller in main.activeNodes:
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if devices[ controller ] and ports[ controller ] and\
-                "Error" not in devices[ controller ] and\
-                "Error" not in ports[ controller ]:
-                    currentDevicesResult = main.Mininet1.compareSwitches(
-                            mnSwitches,
-                            json.loads( devices[ controller ] ),
-                            json.loads( ports[ controller ] ) )
+                    "Error" not in devices[ controller ] and\
+                    "Error" not in ports[ controller ]:
+                currentDevicesResult = main.Mininet1.compareSwitches(
+                        mnSwitches,
+                        json.loads( devices[ controller ] ),
+                        json.loads( ports[ controller ] ) )
             else:
                 currentDevicesResult = main.FALSE
             utilities.assert_equals( expect=main.TRUE,
@@ -1834,7 +1832,7 @@
         killResults = main.TRUE
         for i in main.kill:
             killResults = killResults and\
-                          main.ONOSbench.onosKill( main.nodes[i].ip_address )
+                          main.ONOSbench.onosKill( main.nodes[ i ].ip_address )
             main.activeNodes.remove( i )
         utilities.assert_equals( expect=main.TRUE, actual=killResults,
                                  onpass="ONOS nodes killed successfully",
@@ -1843,7 +1841,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        sleep=15,
                                        attempts=5 )
 
@@ -1853,7 +1851,7 @@
 
         if not nodeResults:
             for i in main.activeNodes:
-                cli = main.CLIs[i]
+                cli = main.CLIs[ i ]
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
                     cli.name,
                     cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
@@ -1879,7 +1877,7 @@
         restartTime = time.time()
         for i in main.kill:
             startResults = startResults and\
-                           main.ONOSbench.onosStart( main.nodes[i].ip_address )
+                           main.ONOSbench.onosStart( main.nodes[ i ].ip_address )
         utilities.assert_equals( expect=main.TRUE, actual=startResults,
                                  onpass="ONOS nodes started successfully",
                                  onfail="ONOS nodes NOT successfully started" )
@@ -1891,7 +1889,7 @@
             onosIsupResult = main.TRUE
             for i in main.kill:
                 onosIsupResult = onosIsupResult and\
-                                 main.ONOSbench.isup( main.nodes[i].ip_address )
+                                 main.ONOSbench.isup( main.nodes[ i ].ip_address )
             count = count + 1
         utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
                                  onpass="ONOS restarted successfully",
@@ -1901,7 +1899,7 @@
         cliResults = main.TRUE
         for i in main.kill:
             cliResults = cliResults and\
-                         main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
+                         main.CLIs[ i ].startOnosCli( main.nodes[ i ].ip_address )
             main.activeNodes.append( i )
         utilities.assert_equals( expect=main.TRUE, actual=cliResults,
                                  onpass="ONOS cli restarted",
@@ -1923,7 +1921,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        sleep=15,
                                        attempts=5 )
 
@@ -1933,7 +1931,7 @@
 
         if not nodeResults:
             for i in main.activeNodes:
-                cli = main.CLIs[i]
+                cli = main.CLIs[ i ]
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
                     cli.name,
                     cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
@@ -1941,16 +1939,16 @@
             main.cleanup()
             main.exit()
 
-        node = main.activeNodes[0]
-        main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
-        main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
-        main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
+        node = main.activeNodes[ 0 ]
+        main.log.debug( main.CLIs[ node ].nodes( jsonFormat=False ) )
+        main.log.debug( main.CLIs[ node ].leaders( jsonFormat=False ) )
+        main.log.debug( main.CLIs[ node ].partitions( jsonFormat=False ) )
 
         main.step( "Rerun for election on the node(s) that were killed" )
         runResults = main.TRUE
         for i in main.kill:
             runResults = runResults and\
-                         main.CLIs[i].electionTestRun()
+                         main.CLIs[ i ].electionTestRun()
         utilities.assert_equals( expect=main.TRUE, actual=runResults,
                                  onpass="ONOS nodes reran for election topic",
                                  onfail="Errror rerunning for election" )
@@ -1977,9 +1975,9 @@
         rolesNotNull = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].rolesNotNull,
+            t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
                              name="rolesNotNull-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1999,7 +1997,7 @@
         rolesResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].roles,
+            t = main.Thread( target=main.CLIs[ i ].roles,
                              name="roles-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -2010,11 +2008,11 @@
             ONOSMastership.append( t.result )
 
         for i in range( len( ONOSMastership ) ):
-            node = str( main.activeNodes[i] + 1 )
-            if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
+            node = str( main.activeNodes[ i ] + 1 )
+            if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " roles" )
                 main.log.warn( "ONOS" + node + " mastership response: " +
-                               repr( ONOSMastership[i] ) )
+                               repr( ONOSMastership[ i ] ) )
                 rolesResults = False
         utilities.assert_equals(
             expect=True,
@@ -2023,7 +2021,7 @@
             onfail="Error in reading roles from ONOS" )
 
         main.step( "Check for consistency in roles from each controller" )
-        if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
+        if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
             main.log.info(
                 "Switch roles are consistent across all ONOS nodes" )
         else:
@@ -2036,7 +2034,7 @@
 
         if rolesResults and not consistentMastership:
             for i in range( len( ONOSMastership ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 main.log.warn( "ONOS" + node + " roles: ",
                                json.dumps( json.loads( ONOSMastership[ i ] ),
                                            sort_keys=True,
@@ -2054,7 +2052,7 @@
         intentsResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].intents,
+            t = main.Thread( target=main.CLIs[ i ].intents,
                              name="intents-" + str( i ),
                              args=[],
                              kwargs={ 'jsonFormat': True } )
@@ -2065,8 +2063,8 @@
             t.join()
             ONOSIntents.append( t.result )
 
-        for i in range( len( ONOSIntents) ):
-            node = str( main.activeNodes[i] + 1 )
+        for i in range( len( ONOSIntents ) ):
+            node = str( main.activeNodes[ i ] + 1 )
             if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " intents" )
                 main.log.warn( "ONOS" + node + " intents response: " +
@@ -2079,7 +2077,7 @@
             onfail="Error in reading intents from ONOS" )
 
         main.step( "Check for consistency in Intents from each controller" )
-        if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
+        if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
             main.log.info( "Intents are consistent across all ONOS " +
                              "nodes" )
         else:
@@ -2128,12 +2126,12 @@
                 main.log.exception( "Error in parsing intents" )
                 main.log.error( repr( node ) )
             intentStates.append( nodeStates )
-            out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
+            out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
             main.log.info( dict( out ) )
 
         if intentsResults and not consistentIntents:
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 main.log.warn( "ONOS" + node + " intents: " )
                 main.log.warn( json.dumps(
                     json.loads( ONOSIntents[ i ] ),
@@ -2172,7 +2170,7 @@
                             main.log.debug( json.dumps( intent ) )
                 except ( ValueError, TypeError ):
                     main.log.exception( "Exception printing intents" )
-                    main.log.debug( repr( ONOSIntents[0] ) )
+                    main.log.debug( repr( ONOSIntents[ 0 ] ) )
                     main.log.debug( repr( intentState ) )
             if sameIntents == main.FALSE:
                 try:
@@ -2186,7 +2184,7 @@
                                                 separators=( ',', ': ' ) ) )
                 except ( ValueError, TypeError ):
                     main.log.exception( "Exception printing intents" )
-                    main.log.debug( repr( ONOSIntents[0] ) )
+                    main.log.debug( repr( ONOSIntents[ 0 ] ) )
                     main.log.debug( repr( intentState ) )
             utilities.assert_equals(
                 expect=main.TRUE,
@@ -2201,7 +2199,7 @@
         for i in range( 28 ):
             main.log.info( "Checking flow table on s" + str( i + 1 ) )
             tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
-            curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
+            curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
             FlowTables = FlowTables and curSwitch
             if curSwitch == main.FALSE:
                 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
@@ -2212,7 +2210,7 @@
             onfail="Changes were found in the flow tables" )
 
         main.Mininet2.pingLongKill()
-        '''
+        """
         main.step( "Check the continuous pings to ensure that no packets " +
                    "were dropped during component failure" )
         main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
@@ -2238,19 +2236,18 @@
             actual=LossInPings,
             onpass="No Loss of connectivity",
             onfail="Loss of dataplane connectivity detected" )
-        '''
-
+        """
         main.step( "Leadership Election is still functional" )
         # Test of LeadershipElection
         leaderList = []
 
         restarted = []
         for i in main.kill:
-            restarted.append( main.nodes[i].ip_address )
+            restarted.append( main.nodes[ i ].ip_address )
         leaderResult = main.TRUE
 
         for i in main.activeNodes:
-            cli = main.CLIs[i]
+            cli = main.CLIs[ i ]
             leaderN = cli.electionTestLeader()
             leaderList.append( leaderN )
             if leaderN == main.FALSE:
@@ -2314,8 +2311,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="devices-" + str( i ),
-                                 args=[ main.CLIs[i].devices, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].devices, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2329,8 +2326,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="hosts-" + str( i ),
-                                 args=[ main.CLIs[i].hosts, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].hosts, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2344,7 +2341,7 @@
                     main.log.error( repr( t.result ) )
                     hosts.append( None )
             for controller in range( 0, len( hosts ) ):
-                controllerStr = str( main.activeNodes[controller] + 1 )
+                controllerStr = str( main.activeNodes[ controller ] + 1 )
                 if hosts[ controller ]:
                     for host in hosts[ controller ]:
                         if host is None or host.get( 'ipAddresses', [] ) == []:
@@ -2357,8 +2354,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="ports-" + str( i ),
-                                 args=[ main.CLIs[i].ports, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].ports, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2371,8 +2368,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="links-" + str( i ),
-                                 args=[ main.CLIs[i].links, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].links, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2385,8 +2382,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="clusters-" + str( i ),
-                                 args=[ main.CLIs[i].clusters, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].clusters, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2405,18 +2402,18 @@
                all( e is None for e in ports ) and\
                all( e is None for e in links ) and\
                all( e is None for e in clusters ):
-                   topoFailMsg = "Could not get topology from ONOS"
-                   main.log.error( topoFailMsg )
-                   continue  # Try again, No use trying to compare
+                topoFailMsg = "Could not get topology from ONOS"
+                main.log.error( topoFailMsg )
+                continue  # Try again, No use trying to compare
 
             mnSwitches = main.Mininet1.getSwitches()
             mnLinks = main.Mininet1.getLinks()
             mnHosts = main.Mininet1.getHosts()
             for controller in range( len( main.activeNodes ) ):
-                controllerStr = str( main.activeNodes[controller] + 1 )
+                controllerStr = str( main.activeNodes[ controller ] + 1 )
                 if devices[ controller ] and ports[ controller ] and\
-                    "Error" not in devices[ controller ] and\
-                    "Error" not in ports[ controller ]:
+                        "Error" not in devices[ controller ] and\
+                        "Error" not in ports[ controller ]:
 
                     try:
                         currentDevicesResult = main.Mininet1.compareSwitches(
@@ -2469,29 +2466,29 @@
                 mappings = {}
                 for i in range( 1, 29 ):  # hosts 1 through 28
                     # set up correct variables:
-                    macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
+                    macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
                     if i == 1:
-                        deviceId = "1000".zfill(16)
+                        deviceId = "1000".zfill( 16 )
                     elif i == 2:
-                        deviceId = "2000".zfill(16)
+                        deviceId = "2000".zfill( 16 )
                     elif i == 3:
-                        deviceId = "3000".zfill(16)
+                        deviceId = "3000".zfill( 16 )
                     elif i == 4:
-                        deviceId = "3004".zfill(16)
+                        deviceId = "3004".zfill( 16 )
                     elif i == 5:
-                        deviceId = "5000".zfill(16)
+                        deviceId = "5000".zfill( 16 )
                     elif i == 6:
-                        deviceId = "6000".zfill(16)
+                        deviceId = "6000".zfill( 16 )
                     elif i == 7:
-                        deviceId = "6007".zfill(16)
+                        deviceId = "6007".zfill( 16 )
                     elif i >= 8 and i <= 17:
                         dpid = '3' + str( i ).zfill( 3 )
-                        deviceId = dpid.zfill(16)
+                        deviceId = dpid.zfill( 16 )
                     elif i >= 18 and i <= 27:
                         dpid = '6' + str( i ).zfill( 3 )
-                        deviceId = dpid.zfill(16)
+                        deviceId = dpid.zfill( 16 )
                     elif i == 28:
-                        deviceId = "2800".zfill(16)
+                        deviceId = "2800".zfill( 16 )
                     mappings[ macId ] = deviceId
                 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
                     if hosts[ controller ] == []:
@@ -2511,7 +2508,7 @@
                                 assert location, "location field could not be found for this host object"
 
                                 # Trim the protocol identifier off deviceId
-                                device = str( location.get( 'elementId' ) ).split(':')[1]
+                                device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
                                 assert device, "elementId field could not be found for this host location object"
 
                                 port = location.get( 'port' )
@@ -2522,7 +2519,7 @@
                                     if str( port ) != "1":
                                         main.log.error( "The attachment port is incorrect for " +
                                                         "host " + str( mac ) +
-                                                        ". Expected: 1 Actual: " + str( port) )
+                                                        ". Expected: 1 Actual: " + str( port ) )
                                         hostAttachment = False
                                     if device != mappings[ str( mac ) ]:
                                         main.log.error( "The attachment device is incorrect for " +
@@ -2564,7 +2561,7 @@
         main.step( "Hosts view is consistent across all ONOS nodes" )
         consistentHostsResult = main.TRUE
         for controller in range( len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
                 if hosts[ controller ] == hosts[ 0 ]:
                     continue
@@ -2606,7 +2603,7 @@
         main.step( "Clusters view is consistent across all ONOS nodes" )
         consistentClustersResult = main.TRUE
         for controller in range( len( clusters ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if "Error" not in clusters[ controller ]:
                 if clusters[ controller ] == clusters[ 0 ]:
                     continue
@@ -2636,7 +2633,7 @@
             numClusters = len( json.loads( clusters[ 0 ] ) )
         except ( ValueError, TypeError ):
             main.log.exception( "Error parsing clusters[0]: " +
-                                repr( clusters[0] ) )
+                                repr( clusters[ 0 ] ) )
             numClusters = "ERROR"
         clusterResults = main.FALSE
         if numClusters == 1:
@@ -2686,7 +2683,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        attempts=5 )
 
         utilities.assert_equals( expect=True, actual=nodeResults,
@@ -2695,8 +2692,8 @@
         if not nodeResults:
             for i in main.activeNodes:
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
-                    main.CLIs[i].name,
-                    main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
+                    main.CLIs[ i ].name,
+                    main.CLIs[ i ].sendline( "scr:list | grep -v ACTIVE" ) ) )
 
         if not topoResult:
             main.cleanup()
@@ -2773,7 +2770,7 @@
         switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
 
         description = "Killing a switch to ensure it is discovered correctly"
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         main.case( description )
         switch = main.params[ 'kill' ][ 'switch' ]
         switchDPID = main.params[ 'kill' ][ 'dpid' ]
@@ -2818,7 +2815,7 @@
         switch = main.params[ 'kill' ][ 'switch' ]
         switchDPID = main.params[ 'kill' ][ 'dpid' ]
         links = main.params[ 'kill' ][ 'links' ].split()
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         description = "Adding a switch to ensure it is discovered correctly"
         main.case( description )
 
@@ -2905,14 +2902,14 @@
             main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
 
         try:
-            timerLog = open( main.logdir + "/Timers.csv", 'w')
+            timerLog = open( main.logdir + "/Timers.csv", 'w' )
             # Overwrite with empty line and close
             labels = "Gossip Intents, Restart"
             data = str( gossipTime ) + ", " + str( main.restartTime )
             timerLog.write( labels + "\n" + data )
             timerLog.close()
-        except NameError, e:
-            main.log.exception(e)
+        except NameError as e:
+            main.log.exception( e )
 
     def CASE14( self, main ):
         """
@@ -2924,9 +2921,9 @@
         assert main.CLIs, "main.CLIs not defined"
         assert main.nodes, "main.nodes not defined"
 
-        main.case("Start Leadership Election app")
+        main.case( "Start Leadership Election app" )
         main.step( "Install leadership election app" )
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         appResult = onosCli.activateApp( "org.onosproject.election" )
         utilities.assert_equals(
             expect=main.TRUE,
@@ -2936,9 +2933,9 @@
 
         main.step( "Run for election on each node" )
         for i in main.activeNodes:
-            main.CLIs[i].electionTestRun()
-        time.sleep(5)
-        activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+            main.CLIs[ i ].electionTestRun()
+        time.sleep( 5 )
+        activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
         sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
         utilities.assert_equals(
             expect=True,
@@ -2948,7 +2945,7 @@
 
         if sameResult:
             leader = leaders[ 0 ][ 0 ]
-            if main.nodes[main.activeNodes[0]].ip_address in leader:
+            if main.nodes[ main.activeNodes[ 0 ] ].ip_address in leader:
                 correctLeader = True
             else:
                 correctLeader = False
@@ -2998,7 +2995,7 @@
         electionResult = main.TRUE
 
         for i in main.activeNodes:  # run test election on each node
-            if main.CLIs[i].electionTestRun() == main.FALSE:
+            if main.CLIs[ i ].electionTestRun() == main.FALSE:
                 electionResult = main.FALSE
         utilities.assert_equals(
             expect=main.TRUE,
@@ -3013,7 +3010,7 @@
 
         main.step( "Check that each node shows the same leader and candidates" )
         failMessage = "Nodes have different leaderboards"
-        activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+        activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
         sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
         if sameResult:
             oldLeader = oldLeaders[ 0 ][ 0 ]
@@ -3063,7 +3060,7 @@
         if newLeader == oldLeader:
             newLeaderResult = False
             main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
-                " as the current leader" )
+                            " as the current leader" )
         utilities.assert_equals(
             expect=True,
             actual=newLeaderResult,
@@ -3080,7 +3077,7 @@
             else:
                 main.log.info( "Expected no leader, got: " + str( newLeader ) )
                 correctCandidateResult = main.FALSE
-        elif len( oldLeaders[0] ) >= 3:
+        elif len( oldLeaders[ 0 ] ) >= 3:
             if newLeader == oldLeaders[ 0 ][ 2 ]:
                 # correct leader was elected
                 correctCandidateResult = main.TRUE
@@ -3120,10 +3117,10 @@
         positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
 
         # Check that the re-elected node is last on the candidate List
-        if not reRunLeaders[0]:
+        if not reRunLeaders[ 0 ]:
             positionResult = main.FALSE
         elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
-            main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
+            main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
                                                                                       str( reRunLeaders[ 0 ] ) ) )
             positionResult = main.FALSE
         utilities.assert_equals(
@@ -3147,15 +3144,15 @@
         # Variables for the distributed primitives tests
         main.pCounterName = "TestON-Partitions"
         main.pCounterValue = 0
-        main.onosSet = set([])
+        main.onosSet = set( [] )
         main.onosSetName = "TestON-set"
 
         description = "Install Primitives app"
         main.case( description )
         main.step( "Install Primitives app" )
         appName = "org.onosproject.distributedprimitives"
-        node = main.activeNodes[0]
-        appResults = main.CLIs[node].activateApp( appName )
+        node = main.activeNodes[ 0 ]
+        appResults = main.CLIs[ node ].activateApp( appName )
         utilities.assert_equals( expect=main.TRUE,
                                  actual=appResults,
                                  onpass="Primitives app activated",
diff --git a/TestON/tests/HA/HAsanity/HAsanity.py b/TestON/tests/HA/HAsanity/HAsanity.py
index 2b2ea08..8f640a1 100644
--- a/TestON/tests/HA/HAsanity/HAsanity.py
+++ b/TestON/tests/HA/HAsanity/HAsanity.py
@@ -23,8 +23,6 @@
 CASE16: Install Distributed Primitives app
 CASE17: Check for basic functionality with distributed primitives
 """
-
-
 class HAsanity:
 
     def __init__( self ):
@@ -148,7 +146,7 @@
                             filePath + topoName,
                             main.Mininet1.home,
                             direction="to" )
-        mnResult = main.Mininet1.startNet( )
+        mnResult = main.Mininet1.startNet()
         utilities.assert_equals( expect=main.TRUE, actual=mnResult,
                                  onpass="Mininet Started",
                                  onfail="Error starting Mininet" )
@@ -191,7 +189,7 @@
                   'seamless="seamless"></iframe>\n'
         graphs += ']]></ac:plain-text-body>\n'
         graphs += '</ac:structured-macro>\n'
-        main.log.wiki(graphs)
+        main.log.wiki( graphs )
 
         main.step( "Creating ONOS package" )
         packageResult = main.ONOSbench.buckBuild()
@@ -235,9 +233,9 @@
         cliResults = main.TRUE
         threads = []
         for i in range( main.numCtrls ):
-            t = main.Thread( target=main.CLIs[i].startOnosCli,
+            t = main.Thread( target=main.CLIs[ i ].startOnosCli,
                              name="startOnosCli-" + str( i ),
-                             args=[main.nodes[i].ip_address] )
+                             args=[ main.nodes[ i ].ip_address ] )
             threads.append( t )
             t.start()
 
@@ -262,7 +260,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        attempts=5 )
 
         utilities.assert_equals( expect=True, actual=nodeResults,
@@ -271,7 +269,7 @@
 
         if not nodeResults:
             for i in main.activeNodes:
-                cli = main.CLIs[i]
+                cli = main.CLIs[ i ]
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
                     cli.name,
                     cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
@@ -283,7 +281,7 @@
         # get data from the params
         apps = main.params.get( 'apps' )
         if apps:
-            apps = apps.split(',')
+            apps = apps.split( ',' )
             main.log.warn( apps )
             activateResult = True
             for app in apps:
@@ -310,8 +308,8 @@
             main.log.debug( config )
             checkResult = main.TRUE
             for component in config:
-                for setting in config[component]:
-                    value = config[component][setting]
+                for setting in config[ component ]:
+                    value = config[ component ][ setting ]
                     check = main.CLIs[ 0 ].setCfg( component, setting, value )
                     main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
                     checkResult = check and checkResult
@@ -326,7 +324,7 @@
         appCheck = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -336,9 +334,9 @@
             t.join()
             appCheck = appCheck and t.result
         if appCheck != main.TRUE:
-            node = main.activeNodes[0]
-            main.log.warn( main.CLIs[node].apps() )
-            main.log.warn( main.CLIs[node].appIDs() )
+            node = main.activeNodes[ 0 ]
+            main.log.warn( main.CLIs[ node ].apps() )
+            main.log.warn( main.CLIs[ node ].appIDs() )
         utilities.assert_equals( expect=main.TRUE, actual=appCheck,
                                  onpass="App Ids seem to be correct",
                                  onfail="Something is wrong with app Ids" )
@@ -423,9 +421,9 @@
         # Manually assign mastership to the controller we want
         roleCall = main.TRUE
 
-        ipList = [ ]
+        ipList = []
         deviceList = []
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         try:
             # Assign mastership to specific controllers. This assignment was
             # determined for a 7 node cluser, but will work with any sized
@@ -499,8 +497,8 @@
         #       atomic and is actually a multi step process
         time.sleep( 5 )
         for i in range( len( ipList ) ):
-            ip = ipList[i]
-            deviceId = deviceList[i]
+            ip = ipList[ i ]
+            deviceId = deviceList[ i ]
             # Check assignment
             master = onosCli.getRole( deviceId ).get( 'master' )
             if ip in master:
@@ -548,7 +546,7 @@
 
         # install onos-app-fwd
         main.step( "Install reactive forwarding app" )
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         installResults = onosCli.activateApp( "org.onosproject.fwd" )
         utilities.assert_equals( expect=main.TRUE, actual=installResults,
                                  onpass="Install fwd successful",
@@ -558,7 +556,7 @@
         appCheck = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -583,13 +581,13 @@
         pingResult = main.Mininet1.pingall()
         time2 = time.time()
         if not pingResult:
-            main.log.warn("First pingall failed. Trying again...")
+            main.log.warn( "First pingall failed. Trying again..." )
             pingResult = main.Mininet1.pingall()
             passMsg += " on the second try"
         utilities.assert_equals(
             expect=main.TRUE,
             actual=pingResult,
-            onpass= passMsg,
+            onpass=passMsg,
             onfail="Reactive Pingall failed, " +
                    "one or more ping pairs failed" )
         main.log.info( "Time for pingall: %2f seconds" %
@@ -601,8 +599,8 @@
         time.sleep( 11 )
         # uninstall onos-app-fwd
         main.step( "Uninstall reactive forwarding app" )
-        node = main.activeNodes[0]
-        uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
+        node = main.activeNodes[ 0 ]
+        uninstallResult = main.CLIs[ node ].deactivateApp( "org.onosproject.fwd" )
         utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
                                  onpass="Uninstall fwd successful",
                                  onfail="Uninstall fwd failed" )
@@ -611,7 +609,7 @@
         threads = []
         appCheck2 = main.TRUE
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -621,9 +619,9 @@
             t.join()
             appCheck2 = appCheck2 and t.result
         if appCheck2 != main.TRUE:
-            node = main.activeNodes[0]
-            main.log.warn( main.CLIs[node].apps() )
-            main.log.warn( main.CLIs[node].appIDs() )
+            node = main.activeNodes[ 0 ]
+            main.log.warn( main.CLIs[ node ].apps() )
+            main.log.warn( main.CLIs[ node ].appIDs() )
         utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
                                  onpass="App Ids seem to be correct",
                                  onfail="Something is wrong with app Ids" )
@@ -651,8 +649,8 @@
                 host2Id = host2Dict.get( 'id', None )
             if host1Id and host2Id:
                 nodeNum = ( i % len( main.activeNodes ) )
-                node = main.activeNodes[nodeNum]
-                tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
+                node = main.activeNodes[ nodeNum ]
+                tmpId = main.CLIs[ node ].addHostIntent( host1Id, host2Id )
                 if tmpId:
                     main.log.info( "Added intent with id: " + tmpId )
                     intentIds.append( tmpId )
@@ -662,8 +660,8 @@
             else:
                 main.log.error( "Error, getHost() failed for h" + str( i ) +
                                 " and/or h" + str( i + 10 ) )
-                node = main.activeNodes[0]
-                hosts = main.CLIs[node].hosts()
+                node = main.activeNodes[ 0 ]
+                hosts = main.CLIs[ node ].hosts()
                 main.log.warn( "Hosts output: " )
                 try:
                     main.log.warn( json.dumps( json.loads( hosts ),
@@ -731,7 +729,7 @@
                 for i in range( 14 ):
                     topics.append( "work-partition-" + str( i ) )
                 main.log.debug( topics )
-                ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                 for topic in topics:
                     if topic not in ONOStopics:
                         main.log.error( "Error: " + topic +
@@ -745,13 +743,13 @@
         # Check all nodes
         if missing:
             for i in main.activeNodes:
-                response = main.CLIs[i].leaders( jsonFormat=False)
-                main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
+                response = main.CLIs[ i ].leaders( jsonFormat=False )
+                main.log.warn( str( main.CLIs[ i ].name ) + " leaders output: \n" +
                                str( response ) )
 
         partitions = onosCli.partitions()
         try:
-            if partitions :
+            if partitions:
                 parsedPartitions = json.loads( partitions )
                 main.log.warn( json.dumps( parsedPartitions,
                                            sort_keys=True,
@@ -766,7 +764,7 @@
             main.log.error( repr( partitions ) )
         pendingMap = onosCli.pendingMap()
         try:
-            if pendingMap :
+            if pendingMap:
                 parsedPending = json.loads( pendingMap )
                 main.log.warn( json.dumps( parsedPending,
                                            sort_keys=True,
@@ -785,21 +783,21 @@
             main.log.error( "Error in pushing host intents to ONOS" )
 
         main.step( "Intent Anti-Entropy dispersion" )
-        for j in range(100):
+        for j in range( 100 ):
             correct = True
             main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
             for i in main.activeNodes:
                 onosIds = []
-                ids = main.CLIs[i].getAllIntentsId()
+                ids = main.CLIs[ i ].getAllIntentsId()
                 onosIds.append( ids )
-                main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
+                main.log.debug( "Intents in " + main.CLIs[ i ].name + ": " +
                                 str( sorted( onosIds ) ) )
                 if sorted( ids ) != sorted( intentIds ):
                     main.log.warn( "Set of intent IDs doesn't match" )
                     correct = False
                     break
                 else:
-                    intents = json.loads( main.CLIs[i].intents() )
+                    intents = json.loads( main.CLIs[ i ].intents() )
                     for intent in intents:
                         if intent[ 'state' ] != "INSTALLED":
                             main.log.warn( "Intent " + intent[ 'id' ] +
@@ -809,14 +807,14 @@
             if correct:
                 break
             else:
-                time.sleep(1)
+                time.sleep( 1 )
         if not intentStop:
             intentStop = time.time()
         global gossipTime
         gossipTime = intentStop - intentStart
         main.log.info( "It took about " + str( gossipTime ) +
                         " seconds for all intents to appear in each node" )
-        gossipPeriod = int( main.params['timers']['gossip'] )
+        gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
         maxGossipTime = gossipPeriod * len( main.activeNodes )
         utilities.assert_greater_equals(
                 expect=maxGossipTime, actual=gossipTime,
@@ -878,7 +876,7 @@
                     # FIXME: this should only be after we start the app
                     topics.append( "org.onosproject.election" )
                     main.log.debug( topics )
-                    ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                    ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                     for topic in topics:
                         if topic not in ONOStopics:
                             main.log.error( "Error: " + topic +
@@ -892,14 +890,14 @@
             # Check all nodes
             if missing:
                 for i in main.activeNodes:
-                    node = main.CLIs[i]
-                    response = node.leaders( jsonFormat=False)
+                    node = main.CLIs[ i ]
+                    response = node.leaders( jsonFormat=False )
                     main.log.warn( str( node.name ) + " leaders output: \n" +
                                    str( response ) )
 
             partitions = onosCli.partitions()
             try:
-                if partitions :
+                if partitions:
                     parsedPartitions = json.loads( partitions )
                     main.log.warn( json.dumps( parsedPartitions,
                                                sort_keys=True,
@@ -914,7 +912,7 @@
                 main.log.error( repr( partitions ) )
             pendingMap = onosCli.pendingMap()
             try:
-                if pendingMap :
+                if pendingMap:
                     parsedPending = json.loads( pendingMap )
                     main.log.warn( json.dumps( parsedPending,
                                                sort_keys=True,
@@ -943,7 +941,7 @@
                                 "functionality and check the state of " +\
                                 "the intent"
 
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         main.step( "Check Intent state" )
         installedCheck = False
         loopCount = 0
@@ -1029,7 +1027,7 @@
                 # FIXME: topics.append( "org.onosproject.election" )
                 # Print leaders output
                 main.log.debug( topics )
-                ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                 for topic in topics:
                     if topic not in ONOStopics:
                         main.log.error( "Error: " + topic +
@@ -1046,8 +1044,8 @@
         # Check all nodes
         if topicCheck:
             for i in main.activeNodes:
-                node = main.CLIs[i]
-                response = node.leaders( jsonFormat=False)
+                node = main.CLIs[ i ]
+                response = node.leaders( jsonFormat=False )
                 main.log.warn( str( node.name ) + " leaders output: \n" +
                                str( response ) )
 
@@ -1057,7 +1055,7 @@
         # Print partitions
         partitions = onosCli.partitions()
         try:
-            if partitions :
+            if partitions:
                 parsedPartitions = json.loads( partitions )
                 main.log.warn( json.dumps( parsedPartitions,
                                            sort_keys=True,
@@ -1073,7 +1071,7 @@
         # Print Pending Map
         pendingMap = onosCli.pendingMap()
         try:
-            if pendingMap :
+            if pendingMap:
                 parsedPending = json.loads( pendingMap )
                 main.log.warn( json.dumps( parsedPending,
                                            sort_keys=True,
@@ -1127,7 +1125,7 @@
                     # FIXME: this should only be after we start the app
                     topics.append( "org.onosproject.election" )
                     main.log.debug( topics )
-                    ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                    ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                     for topic in topics:
                         if topic not in ONOStopics:
                             main.log.error( "Error: " + topic +
@@ -1140,14 +1138,14 @@
                 main.log.error( repr( leaders ) )
             if missing:
                 for i in main.activeNodes:
-                    node = main.CLIs[i]
-                    response = node.leaders( jsonFormat=False)
+                    node = main.CLIs[ i ]
+                    response = node.leaders( jsonFormat=False )
                     main.log.warn( str( node.name ) + " leaders output: \n" +
                                    str( response ) )
 
             partitions = onosCli.partitions()
             try:
-                if partitions :
+                if partitions:
                     parsedPartitions = json.loads( partitions )
                     main.log.warn( json.dumps( parsedPartitions,
                                                sort_keys=True,
@@ -1162,7 +1160,7 @@
                 main.log.error( repr( partitions ) )
             pendingMap = onosCli.pendingMap()
             try:
-                if pendingMap :
+                if pendingMap:
                     parsedPending = json.loads( pendingMap )
                     main.log.warn( json.dumps( parsedPending,
                                                sort_keys=True,
@@ -1175,8 +1173,8 @@
                 main.log.exception( "Error parsing pending map" )
                 main.log.error( repr( pendingMap ) )
         # Print flowrules
-        node = main.activeNodes[0]
-        main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
+        node = main.activeNodes[ 0 ]
+        main.log.debug( main.CLIs[ node ].flows( jsonFormat=False ) )
         main.step( "Wait a minute then ping again" )
         # the wait is above
         PingResult = main.TRUE
@@ -1234,7 +1232,7 @@
         rolesNotNull = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].rolesNotNull,
+            t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
                              name="rolesNotNull-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -1256,7 +1254,7 @@
         rolesResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].roles,
+            t = main.Thread( target=main.CLIs[ i ].roles,
                              name="roles-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -1267,11 +1265,11 @@
             ONOSMastership.append( t.result )
 
         for i in range( len( ONOSMastership ) ):
-            node = str( main.activeNodes[i] + 1 )
-            if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
+            node = str( main.activeNodes[ i ] + 1 )
+            if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " roles" )
                 main.log.warn( "ONOS" + node + " mastership response: " +
-                               repr( ONOSMastership[i] ) )
+                               repr( ONOSMastership[ i ] ) )
                 rolesResults = False
         utilities.assert_equals(
             expect=True,
@@ -1280,7 +1278,7 @@
             onfail="Error in reading roles from ONOS" )
 
         main.step( "Check for consistency in roles from each controller" )
-        if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
+        if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
             main.log.info(
                 "Switch roles are consistent across all ONOS nodes" )
         else:
@@ -1293,7 +1291,7 @@
 
         if rolesResults and not consistentMastership:
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 try:
                     main.log.warn(
                         "ONOS" + node + " roles: ",
@@ -1317,7 +1315,7 @@
         intentsResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].intents,
+            t = main.Thread( target=main.CLIs[ i ].intents,
                              name="intents-" + str( i ),
                              args=[],
                              kwargs={ 'jsonFormat': True } )
@@ -1329,7 +1327,7 @@
             ONOSIntents.append( t.result )
 
         for i in range( len( ONOSIntents ) ):
-            node = str( main.activeNodes[i] + 1 )
+            node = str( main.activeNodes[ i ] + 1 )
             if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " intents" )
                 main.log.warn( "ONOS" + node + " intents response: " +
@@ -1342,7 +1340,7 @@
             onfail="Error in reading intents from ONOS" )
 
         main.step( "Check for consistency in Intents from each controller" )
-        if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
+        if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
             main.log.info( "Intents are consistent across all ONOS " +
                              "nodes" )
         else:
@@ -1390,17 +1388,17 @@
 
         if intentsResults and not consistentIntents:
             # print the json objects
-            n = str( main.activeNodes[-1] + 1 )
+            n = str( main.activeNodes[ -1 ] + 1 )
             main.log.debug( "ONOS" + n + " intents: " )
             main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
                                         sort_keys=True,
                                         indent=4,
                                         separators=( ',', ': ' ) ) )
             for i in range( len( ONOSIntents ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
                     main.log.debug( "ONOS" + node + " intents: " )
-                    main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
+                    main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
                                                 sort_keys=True,
                                                 indent=4,
                                                 separators=( ',', ': ' ) ) )
@@ -1421,7 +1419,7 @@
         flowsResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].flows,
+            t = main.Thread( target=main.CLIs[ i ].flows,
                              name="flows-" + str( i ),
                              args=[],
                              kwargs={ 'jsonFormat': True } )
@@ -1429,14 +1427,14 @@
             t.start()
 
         # NOTE: Flows command can take some time to run
-        time.sleep(30)
+        time.sleep( 30 )
         for t in threads:
             t.join()
             result = t.result
             ONOSFlows.append( result )
 
         for i in range( len( ONOSFlows ) ):
-            num = str( main.activeNodes[i] + 1 )
+            num = str( main.activeNodes[ i ] + 1 )
             if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
                 main.log.error( "Error in getting ONOS" + num + " flows" )
                 main.log.warn( "ONOS" + num + " flows response: " +
@@ -1473,11 +1471,11 @@
 
         if flowsResults and not consistentFlows:
             for i in range( len( ONOSFlows ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 try:
                     main.log.warn(
                         "ONOS" + node + " flows: " +
-                        json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
+                        json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
                                     indent=4, separators=( ',', ': ' ) ) )
                 except ( ValueError, TypeError ):
                     main.log.warn( "ONOS" + node + " flows: " +
@@ -1542,9 +1540,9 @@
         devices = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].devices,
+            t = main.Thread( target=main.CLIs[ i ].devices,
                              name="devices-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1554,9 +1552,9 @@
         hosts = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].hosts,
+            t = main.Thread( target=main.CLIs[ i ].hosts,
                              name="hosts-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1574,9 +1572,9 @@
         ports = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].ports,
+            t = main.Thread( target=main.CLIs[ i ].ports,
                              name="ports-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1586,9 +1584,9 @@
         links = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].links,
+            t = main.Thread( target=main.CLIs[ i ].links,
                              name="links-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1598,9 +1596,9 @@
         clusters = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].clusters,
+            t = main.Thread( target=main.CLIs[ i ].clusters,
                              name="clusters-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1613,7 +1611,7 @@
         main.step( "Host view is consistent across ONOS nodes" )
         consistentHostsResult = main.TRUE
         for controller in range( len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ] and "Error" not in hosts[ controller ]:
                 if hosts[ controller ] == hosts[ 0 ]:
                     continue
@@ -1640,10 +1638,10 @@
         main.step( "Each host has an IP address" )
         ipResult = main.TRUE
         for controller in range( 0, len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ]:
                 for host in hosts[ controller ]:
-                    if not host.get( 'ipAddresses', [ ] ):
+                    if not host.get( 'ipAddresses', [] ):
                         main.log.error( "Error with host ips on controller" +
                                         controllerStr + ": " + str( host ) )
                         ipResult = main.FALSE
@@ -1657,7 +1655,7 @@
         main.step( "Cluster view is consistent across ONOS nodes" )
         consistentClustersResult = main.TRUE
         for controller in range( len( clusters ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if "Error" not in clusters[ controller ]:
                 if clusters[ controller ] == clusters[ 0 ]:
                     continue
@@ -1706,14 +1704,14 @@
         mnLinks = main.Mininet1.getLinks()
         mnHosts = main.Mininet1.getHosts()
         for controller in main.activeNodes:
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if devices[ controller ] and ports[ controller ] and\
-                "Error" not in devices[ controller ] and\
-                "Error" not in ports[ controller ]:
-                    currentDevicesResult = main.Mininet1.compareSwitches(
-                            mnSwitches,
-                            json.loads( devices[ controller ] ),
-                            json.loads( ports[ controller ] ) )
+                    "Error" not in devices[ controller ] and\
+                    "Error" not in ports[ controller ]:
+                currentDevicesResult = main.Mininet1.compareSwitches(
+                        mnSwitches,
+                        json.loads( devices[ controller ] ),
+                        json.loads( ports[ controller ] ) )
             else:
                 currentDevicesResult = main.FALSE
             utilities.assert_equals( expect=main.TRUE,
@@ -1808,9 +1806,9 @@
         rolesNotNull = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].rolesNotNull,
+            t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
                              name="rolesNotNull-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1830,7 +1828,7 @@
         rolesResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].roles,
+            t = main.Thread( target=main.CLIs[ i ].roles,
                              name="roles-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -1841,11 +1839,11 @@
             ONOSMastership.append( t.result )
 
         for i in range( len( ONOSMastership ) ):
-            node = str( main.activeNodes[i] + 1 )
-            if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
+            node = str( main.activeNodes[ i ] + 1 )
+            if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " roles" )
                 main.log.warn( "ONOS" + node + " mastership response: " +
-                               repr( ONOSMastership[i] ) )
+                               repr( ONOSMastership[ i ] ) )
                 rolesResults = False
         utilities.assert_equals(
             expect=True,
@@ -1854,7 +1852,7 @@
             onfail="Error in reading roles from ONOS" )
 
         main.step( "Check for consistency in roles from each controller" )
-        if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
+        if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
             main.log.info(
                 "Switch roles are consistent across all ONOS nodes" )
         else:
@@ -1867,7 +1865,7 @@
 
         if rolesResults and not consistentMastership:
             for i in range( len( ONOSMastership ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 main.log.warn( "ONOS" + node + " roles: ",
                                json.dumps( json.loads( ONOSMastership[ i ] ),
                                            sort_keys=True,
@@ -1877,12 +1875,12 @@
         description2 = "Compare switch roles from before failure"
         main.step( description2 )
         try:
-            currentJson = json.loads( ONOSMastership[0] )
+            currentJson = json.loads( ONOSMastership[ 0 ] )
             oldJson = json.loads( mastershipState )
         except ( ValueError, TypeError ):
             main.log.exception( "Something is wrong with parsing " +
                                 "ONOSMastership[0] or mastershipState" )
-            main.log.error( "ONOSMastership[0]: " + repr( ONOSMastership[0] ) )
+            main.log.error( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
             main.log.error( "mastershipState" + repr( mastershipState ) )
             main.cleanup()
             main.exit()
@@ -1913,7 +1911,7 @@
         intentsResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].intents,
+            t = main.Thread( target=main.CLIs[ i ].intents,
                              name="intents-" + str( i ),
                              args=[],
                              kwargs={ 'jsonFormat': True } )
@@ -1924,8 +1922,8 @@
             t.join()
             ONOSIntents.append( t.result )
 
-        for i in range( len( ONOSIntents) ):
-            node = str( main.activeNodes[i] + 1 )
+        for i in range( len( ONOSIntents ) ):
+            node = str( main.activeNodes[ i ] + 1 )
             if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " intents" )
                 main.log.warn( "ONOS" + node + " intents response: " +
@@ -1938,7 +1936,7 @@
             onfail="Error in reading intents from ONOS" )
 
         main.step( "Check for consistency in Intents from each controller" )
-        if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
+        if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
             main.log.info( "Intents are consistent across all ONOS " +
                              "nodes" )
         else:
@@ -1987,12 +1985,12 @@
                 main.log.exception( "Error in parsing intents" )
                 main.log.error( repr( node ) )
             intentStates.append( nodeStates )
-            out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
+            out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
             main.log.info( dict( out ) )
 
         if intentsResults and not consistentIntents:
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 main.log.warn( "ONOS" + node + " intents: " )
                 main.log.warn( json.dumps(
                     json.loads( ONOSIntents[ i ] ),
@@ -2031,7 +2029,7 @@
                             main.log.debug( json.dumps( intent ) )
                 except ( ValueError, TypeError ):
                     main.log.exception( "Exception printing intents" )
-                    main.log.debug( repr( ONOSIntents[0] ) )
+                    main.log.debug( repr( ONOSIntents[ 0 ] ) )
                     main.log.debug( repr( intentState ) )
             if sameIntents == main.FALSE:
                 try:
@@ -2045,7 +2043,7 @@
                                                 separators=( ',', ': ' ) ) )
                 except ( ValueError, TypeError ):
                     main.log.exception( "Exception printing intents" )
-                    main.log.debug( repr( ONOSIntents[0] ) )
+                    main.log.debug( repr( ONOSIntents[ 0 ] ) )
                     main.log.debug( repr( intentState ) )
             utilities.assert_equals(
                 expect=main.TRUE,
@@ -2060,7 +2058,7 @@
         for i in range( 28 ):
             main.log.info( "Checking flow table on s" + str( i + 1 ) )
             tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
-            curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
+            curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
             FlowTables = FlowTables and curSwitch
             if curSwitch == main.FALSE:
                 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
@@ -2071,7 +2069,7 @@
             onfail="Changes were found in the flow tables" )
 
         main.Mininet2.pingLongKill()
-        '''
+        """
         main.step( "Check the continuous pings to ensure that no packets " +
                    "were dropped during component failure" )
         main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
@@ -2097,8 +2095,7 @@
             actual=LossInPings,
             onpass="No Loss of connectivity",
             onfail="Loss of dataplane connectivity detected" )
-        '''
-
+        """
         main.step( "Leadership Election is still functional" )
         # Test of LeadershipElection
         leaderList = []
@@ -2109,7 +2106,7 @@
         leaderResult = main.TRUE
 
         for i in main.activeNodes:
-            cli = main.CLIs[i]
+            cli = main.CLIs[ i ]
             leaderN = cli.electionTestLeader()
             leaderList.append( leaderN )
             # verify leader is ONOS1
@@ -2174,8 +2171,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="devices-" + str( i ),
-                                 args=[ main.CLIs[i].devices, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].devices, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2189,8 +2186,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="hosts-" + str( i ),
-                                 args=[ main.CLIs[i].hosts, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].hosts, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2204,7 +2201,7 @@
                     main.log.error( repr( t.result ) )
                     hosts.append( None )
             for controller in range( 0, len( hosts ) ):
-                controllerStr = str( main.activeNodes[controller] + 1 )
+                controllerStr = str( main.activeNodes[ controller ] + 1 )
                 if hosts[ controller ]:
                     for host in hosts[ controller ]:
                         if host is None or host.get( 'ipAddresses', [] ) == []:
@@ -2217,8 +2214,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="ports-" + str( i ),
-                                 args=[ main.CLIs[i].ports, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].ports, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2231,8 +2228,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="links-" + str( i ),
-                                 args=[ main.CLIs[i].links, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].links, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2245,8 +2242,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="clusters-" + str( i ),
-                                 args=[ main.CLIs[i].clusters, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].clusters, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2265,18 +2262,18 @@
                all( e is None for e in ports ) and\
                all( e is None for e in links ) and\
                all( e is None for e in clusters ):
-                   topoFailMsg = "Could not get topology from ONOS"
-                   main.log.error( topoFailMsg )
-                   continue  # Try again, No use trying to compare
+                topoFailMsg = "Could not get topology from ONOS"
+                main.log.error( topoFailMsg )
+                continue  # Try again, No use trying to compare
 
             mnSwitches = main.Mininet1.getSwitches()
             mnLinks = main.Mininet1.getLinks()
             mnHosts = main.Mininet1.getHosts()
             for controller in range( len( main.activeNodes ) ):
-                controllerStr = str( main.activeNodes[controller] + 1 )
+                controllerStr = str( main.activeNodes[ controller ] + 1 )
                 if devices[ controller ] and ports[ controller ] and\
-                    "Error" not in devices[ controller ] and\
-                    "Error" not in ports[ controller ]:
+                        "Error" not in devices[ controller ] and\
+                        "Error" not in ports[ controller ]:
 
                     try:
                         currentDevicesResult = main.Mininet1.compareSwitches(
@@ -2329,29 +2326,29 @@
                 mappings = {}
                 for i in range( 1, 29 ):  # hosts 1 through 28
                     # set up correct variables:
-                    macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
+                    macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
                     if i == 1:
-                        deviceId = "1000".zfill(16)
+                        deviceId = "1000".zfill( 16 )
                     elif i == 2:
-                        deviceId = "2000".zfill(16)
+                        deviceId = "2000".zfill( 16 )
                     elif i == 3:
-                        deviceId = "3000".zfill(16)
+                        deviceId = "3000".zfill( 16 )
                     elif i == 4:
-                        deviceId = "3004".zfill(16)
+                        deviceId = "3004".zfill( 16 )
                     elif i == 5:
-                        deviceId = "5000".zfill(16)
+                        deviceId = "5000".zfill( 16 )
                     elif i == 6:
-                        deviceId = "6000".zfill(16)
+                        deviceId = "6000".zfill( 16 )
                     elif i == 7:
-                        deviceId = "6007".zfill(16)
+                        deviceId = "6007".zfill( 16 )
                     elif i >= 8 and i <= 17:
                         dpid = '3' + str( i ).zfill( 3 )
-                        deviceId = dpid.zfill(16)
+                        deviceId = dpid.zfill( 16 )
                     elif i >= 18 and i <= 27:
                         dpid = '6' + str( i ).zfill( 3 )
-                        deviceId = dpid.zfill(16)
+                        deviceId = dpid.zfill( 16 )
                     elif i == 28:
-                        deviceId = "2800".zfill(16)
+                        deviceId = "2800".zfill( 16 )
                     mappings[ macId ] = deviceId
                 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
                     if hosts[ controller ] == []:
@@ -2371,7 +2368,7 @@
                                 assert location, "location field could not be found for this host object"
 
                                 # Trim the protocol identifier off deviceId
-                                device = str( location.get( 'elementId' ) ).split(':')[1]
+                                device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
                                 assert device, "elementId field could not be found for this host location object"
 
                                 port = location.get( 'port' )
@@ -2382,7 +2379,7 @@
                                     if str( port ) != "1":
                                         main.log.error( "The attachment port is incorrect for " +
                                                         "host " + str( mac ) +
-                                                        ". Expected: 1 Actual: " + str( port) )
+                                                        ". Expected: 1 Actual: " + str( port ) )
                                         hostAttachment = False
                                     if device != mappings[ str( mac ) ]:
                                         main.log.error( "The attachment device is incorrect for " +
@@ -2424,7 +2421,7 @@
         main.step( "Hosts view is consistent across all ONOS nodes" )
         consistentHostsResult = main.TRUE
         for controller in range( len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
                 if hosts[ controller ] == hosts[ 0 ]:
                     continue
@@ -2466,7 +2463,7 @@
         main.step( "Clusters view is consistent across all ONOS nodes" )
         consistentClustersResult = main.TRUE
         for controller in range( len( clusters ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if "Error" not in clusters[ controller ]:
                 if clusters[ controller ] == clusters[ 0 ]:
                     continue
@@ -2496,7 +2493,7 @@
             numClusters = len( json.loads( clusters[ 0 ] ) )
         except ( ValueError, TypeError ):
             main.log.exception( "Error parsing clusters[0]: " +
-                                repr( clusters[0] ) )
+                                repr( clusters[ 0 ] ) )
             numClusters = "ERROR"
         clusterResults = main.FALSE
         if numClusters == 1:
@@ -2546,7 +2543,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        attempts=5 )
 
         utilities.assert_equals( expect=True, actual=nodeResults,
@@ -2555,8 +2552,8 @@
         if not nodeResults:
             for i in main.activeNodes:
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
-                    main.CLIs[i].name,
-                    main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
+                    main.CLIs[ i ].name,
+                    main.CLIs[ i ].sendline( "scr:list | grep -v ACTIVE" ) ) )
 
         if not topoResult:
             main.cleanup()
@@ -2633,7 +2630,7 @@
         switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
 
         description = "Killing a switch to ensure it is discovered correctly"
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         main.case( description )
         switch = main.params[ 'kill' ][ 'switch' ]
         switchDPID = main.params[ 'kill' ][ 'dpid' ]
@@ -2678,7 +2675,7 @@
         switch = main.params[ 'kill' ][ 'switch' ]
         switchDPID = main.params[ 'kill' ][ 'dpid' ]
         links = main.params[ 'kill' ][ 'links' ].split()
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         description = "Adding a switch to ensure it is discovered correctly"
         main.case( description )
 
@@ -2765,14 +2762,14 @@
             main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
 
         try:
-            timerLog = open( main.logdir + "/Timers.csv", 'w')
+            timerLog = open( main.logdir + "/Timers.csv", 'w' )
             # Overwrite with empty line and close
             labels = "Gossip Intents"
             data = str( gossipTime )
             timerLog.write( labels + "\n" + data )
             timerLog.close()
-        except NameError, e:
-            main.log.exception(e)
+        except NameError as e:
+            main.log.exception( e )
 
     def CASE14( self, main ):
         """
@@ -2784,9 +2781,9 @@
         assert main.CLIs, "main.CLIs not defined"
         assert main.nodes, "main.nodes not defined"
 
-        main.case("Start Leadership Election app")
+        main.case( "Start Leadership Election app" )
         main.step( "Install leadership election app" )
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         appResult = onosCli.activateApp( "org.onosproject.election" )
         utilities.assert_equals(
             expect=main.TRUE,
@@ -2796,9 +2793,9 @@
 
         main.step( "Run for election on each node" )
         for i in main.activeNodes:
-            main.CLIs[i].electionTestRun()
-        time.sleep(5)
-        activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+            main.CLIs[ i ].electionTestRun()
+        time.sleep( 5 )
+        activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
         sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
         utilities.assert_equals(
             expect=True,
@@ -2808,7 +2805,7 @@
 
         if sameResult:
             leader = leaders[ 0 ][ 0 ]
-            if main.nodes[main.activeNodes[0]].ip_address in leader:
+            if main.nodes[ main.activeNodes[ 0 ] ].ip_address in leader:
                 correctLeader = True
             else:
                 correctLeader = False
@@ -2858,7 +2855,7 @@
         electionResult = main.TRUE
 
         for i in main.activeNodes:  # run test election on each node
-            if main.CLIs[i].electionTestRun() == main.FALSE:
+            if main.CLIs[ i ].electionTestRun() == main.FALSE:
                 electionResult = main.FALSE
         utilities.assert_equals(
             expect=main.TRUE,
@@ -2873,7 +2870,7 @@
 
         main.step( "Check that each node shows the same leader and candidates" )
         failMessage = "Nodes have different leaderboards"
-        activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+        activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
         sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
         if sameResult:
             oldLeader = oldLeaders[ 0 ][ 0 ]
@@ -2923,7 +2920,7 @@
         if newLeader == oldLeader:
             newLeaderResult = False
             main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
-                " as the current leader" )
+                            " as the current leader" )
         utilities.assert_equals(
             expect=True,
             actual=newLeaderResult,
@@ -2940,7 +2937,7 @@
             else:
                 main.log.info( "Expected no leader, got: " + str( newLeader ) )
                 correctCandidateResult = main.FALSE
-        elif len( oldLeaders[0] ) >= 3:
+        elif len( oldLeaders[ 0 ] ) >= 3:
             if newLeader == oldLeaders[ 0 ][ 2 ]:
                 # correct leader was elected
                 correctCandidateResult = main.TRUE
@@ -2980,10 +2977,10 @@
         positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
 
         # Check that the re-elected node is last on the candidate List
-        if not reRunLeaders[0]:
+        if not reRunLeaders[ 0 ]:
             positionResult = main.FALSE
         elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
-            main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
+            main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
                                                                                       str( reRunLeaders[ 0 ] ) ) )
             positionResult = main.FALSE
         utilities.assert_equals(
@@ -3007,15 +3004,15 @@
         # Variables for the distributed primitives tests
         main.pCounterName = "TestON-Partitions"
         main.pCounterValue = 0
-        main.onosSet = set([])
+        main.onosSet = set( [] )
         main.onosSetName = "TestON-set"
 
         description = "Install Primitives app"
         main.case( description )
         main.step( "Install Primitives app" )
         appName = "org.onosproject.distributedprimitives"
-        node = main.activeNodes[0]
-        appResults = main.CLIs[node].activateApp( appName )
+        node = main.activeNodes[ 0 ]
+        appResults = main.CLIs[ node ].activateApp( appName )
         utilities.assert_equals( expect=main.TRUE,
                                  actual=appResults,
                                  onpass="Primitives app activated",
diff --git a/TestON/tests/HA/HAscaling/HAscaling.py b/TestON/tests/HA/HAscaling/HAscaling.py
index c58ad41..276bcdf 100644
--- a/TestON/tests/HA/HAscaling/HAscaling.py
+++ b/TestON/tests/HA/HAscaling/HAscaling.py
@@ -22,8 +22,6 @@
 CASE16: Install Distributed Primitives app
 CASE17: Check for basic functionality with distributed primitives
 """
-
-
 class HAscaling:
 
     def __init__( self ):
@@ -130,7 +128,7 @@
             killResults = killResults and killed
 
         main.step( "Setup server for cluster metadata file" )
-        port = main.params['server']['port']
+        port = main.params[ 'server' ][ 'port' ]
         rootDir = os.path.dirname( main.testFile ) + "/dependencies"
         main.log.debug( "Root dir: {}".format( rootDir ) )
         status = main.Server.start( main.ONOSbench,
@@ -142,16 +140,16 @@
                                  onfail="Failled to start SimpleHTTPServer" )
 
         main.step( "Generate initial metadata file" )
-        main.scaling = main.params['scaling'].split( "," )
+        main.scaling = main.params[ 'scaling' ].split( "," )
         main.log.debug( main.scaling )
-        scale = main.scaling.pop(0)
-        main.log.debug( scale)
+        scale = main.scaling.pop( 0 )
+        main.log.debug( scale )
         if "e" in scale:
             equal = True
         else:
             equal = False
-        main.log.debug( equal)
-        main.numCtrls = int( re.search( "\d+", scale ).group(0) )
+        main.log.debug( equal )
+        main.numCtrls = int( re.search( "\d+", scale ).group( 0 ) )
         genResult = main.Server.generateFile( main.numCtrls, equal=equal )
         utilities.assert_equals( expect=main.TRUE, actual=genResult,
                                  onpass="New cluster metadata file generated",
@@ -169,7 +167,7 @@
                             filePath + topoName,
                             main.Mininet1.home,
                             direction="to" )
-        mnResult = main.Mininet1.startNet( )
+        mnResult = main.Mininet1.startNet()
         utilities.assert_equals( expect=main.TRUE, actual=mnResult,
                                  onpass="Mininet Started",
                                  onfail="Error starting Mininet" )
@@ -212,7 +210,7 @@
                   'seamless="seamless"></iframe>\n'
         graphs += ']]></ac:plain-text-body>\n'
         graphs += '</ac:structured-macro>\n'
-        main.log.wiki(graphs)
+        main.log.wiki( graphs )
 
         main.step( "Copying backup config files" )
         path = "~/onos/tools/package/bin/onos-service"
@@ -227,7 +225,7 @@
                                  onfail="Copy backup config file failed" )
         # we need to modify the onos-service file to use remote metadata file
         # url for cluster metadata file
-        iface = main.params['server'].get( 'interface' )
+        iface = main.params[ 'server' ].get( 'interface' )
         ip = main.ONOSbench.getIpAddr( iface=iface )
         metaFile = "cluster.json"
         javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
@@ -256,7 +254,7 @@
         main.step( "Installing ONOS package" )
         onosInstallResult = main.TRUE
         for i in range( main.ONOSbench.maxNodes ):
-            node = main.nodes[i]
+            node = main.nodes[ i ]
             options = "-f"
             if i >= main.numCtrls:
                 options = "-nf"  # Don't start more than the current scale
@@ -276,7 +274,7 @@
         main.step( "Set up ONOS secure SSH" )
         secureSshResult = main.TRUE
         for i in range( main.numCtrls ):
-            node = main.nodes[i]
+            node = main.nodes[ i ]
             secureSshResult = secureSshResult and main.ONOSbench.onosSecureSSH( node=node.ip_address )
         utilities.assert_equals( expect=main.TRUE, actual=secureSshResult,
                                  onpass="Test step PASS",
@@ -286,7 +284,7 @@
         for i in range( 2 ):
             onosIsupResult = main.TRUE
             for i in range( main.numCtrls ):
-                node = main.nodes[i]
+                node = main.nodes[ i ]
                 started = main.ONOSbench.isup( node.ip_address )
                 if not started:
                     main.log.error( node.name + " hasn't started" )
@@ -301,9 +299,9 @@
         cliResults = main.TRUE
         threads = []
         for i in range( main.numCtrls ):
-            t = main.Thread( target=main.CLIs[i].startOnosCli,
+            t = main.Thread( target=main.CLIs[ i ].startOnosCli,
                              name="startOnosCli-" + str( i ),
-                             args=[main.nodes[i].ip_address] )
+                             args=[ main.nodes[ i ].ip_address ] )
             threads.append( t )
             t.start()
 
@@ -328,7 +326,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        attempts=5 )
         utilities.assert_equals( expect=True, actual=nodeResults,
                                  onpass="Nodes check successful",
@@ -336,7 +334,7 @@
 
         if not nodeResults:
             for i in main.activeNodes:
-                cli = main.CLIs[i]
+                cli = main.CLIs[ i ]
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
                     cli.name,
                     cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
@@ -348,7 +346,7 @@
         # get data from the params
         apps = main.params.get( 'apps' )
         if apps:
-            apps = apps.split(',')
+            apps = apps.split( ',' )
             main.log.warn( apps )
             activateResult = True
             for app in apps:
@@ -375,8 +373,8 @@
             main.log.debug( config )
             checkResult = main.TRUE
             for component in config:
-                for setting in config[component]:
-                    value = config[component][setting]
+                for setting in config[ component ]:
+                    value = config[ component ][ setting ]
                     check = main.CLIs[ 0 ].setCfg( component, setting, value )
                     main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
                     checkResult = check and checkResult
@@ -391,7 +389,7 @@
         appCheck = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -401,9 +399,9 @@
             t.join()
             appCheck = appCheck and t.result
         if appCheck != main.TRUE:
-            node = main.activeNodes[0]
-            main.log.warn( main.CLIs[node].apps() )
-            main.log.warn( main.CLIs[node].appIDs() )
+            node = main.activeNodes[ 0 ]
+            main.log.warn( main.CLIs[ node ].apps() )
+            main.log.warn( main.CLIs[ node ].appIDs() )
         utilities.assert_equals( expect=main.TRUE, actual=appCheck,
                                  onpass="App Ids seem to be correct",
                                  onfail="Something is wrong with app Ids" )
@@ -474,9 +472,9 @@
         # Manually assign mastership to the controller we want
         roleCall = main.TRUE
 
-        ipList = [ ]
+        ipList = []
         deviceList = []
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         try:
             # Assign mastership to specific controllers. This assignment was
             # determined for a 7 node cluser, but will work with any sized
@@ -550,8 +548,8 @@
         #       atomic and is actually a multi step process
         time.sleep( 5 )
         for i in range( len( ipList ) ):
-            ip = ipList[i]
-            deviceId = deviceList[i]
+            ip = ipList[ i ]
+            deviceId = deviceList[ i ]
             # Check assignment
             master = onosCli.getRole( deviceId ).get( 'master' )
             if ip in master:
@@ -601,7 +599,7 @@
 
         # install onos-app-fwd
         main.step( "Install reactive forwarding app" )
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         installResults = onosCli.activateApp( "org.onosproject.fwd" )
         utilities.assert_equals( expect=main.TRUE, actual=installResults,
                                  onpass="Install fwd successful",
@@ -611,7 +609,7 @@
         appCheck = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -636,13 +634,13 @@
         pingResult = main.Mininet1.pingall()
         time2 = time.time()
         if not pingResult:
-            main.log.warn("First pingall failed. Trying again...")
+            main.log.warn( "First pingall failed. Trying again..." )
             pingResult = main.Mininet1.pingall()
             passMsg += " on the second try"
         utilities.assert_equals(
             expect=main.TRUE,
             actual=pingResult,
-            onpass= passMsg,
+            onpass=passMsg,
             onfail="Reactive Pingall failed, " +
                    "one or more ping pairs failed" )
         main.log.info( "Time for pingall: %2f seconds" %
@@ -651,8 +649,8 @@
         time.sleep( 11 )
         # uninstall onos-app-fwd
         main.step( "Uninstall reactive forwarding app" )
-        node = main.activeNodes[0]
-        uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
+        node = main.activeNodes[ 0 ]
+        uninstallResult = main.CLIs[ node ].deactivateApp( "org.onosproject.fwd" )
         utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
                                  onpass="Uninstall fwd successful",
                                  onfail="Uninstall fwd failed" )
@@ -661,7 +659,7 @@
         threads = []
         appCheck2 = main.TRUE
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -671,9 +669,9 @@
             t.join()
             appCheck2 = appCheck2 and t.result
         if appCheck2 != main.TRUE:
-            node = main.activeNodes[0]
-            main.log.warn( main.CLIs[node].apps() )
-            main.log.warn( main.CLIs[node].appIDs() )
+            node = main.activeNodes[ 0 ]
+            main.log.warn( main.CLIs[ node ].apps() )
+            main.log.warn( main.CLIs[ node ].appIDs() )
         utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
                                  onpass="App Ids seem to be correct",
                                  onfail="Something is wrong with app Ids" )
@@ -701,8 +699,8 @@
                 host2Id = host2Dict.get( 'id', None )
             if host1Id and host2Id:
                 nodeNum = ( i % len( main.activeNodes ) )
-                node = main.activeNodes[nodeNum]
-                tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
+                node = main.activeNodes[ nodeNum ]
+                tmpId = main.CLIs[ node ].addHostIntent( host1Id, host2Id )
                 if tmpId:
                     main.log.info( "Added intent with id: " + tmpId )
                     intentIds.append( tmpId )
@@ -712,8 +710,8 @@
             else:
                 main.log.error( "Error, getHost() failed for h" + str( i ) +
                                 " and/or h" + str( i + 10 ) )
-                node = main.activeNodes[0]
-                hosts = main.CLIs[node].hosts()
+                node = main.activeNodes[ 0 ]
+                hosts = main.CLIs[ node ].hosts()
                 main.log.warn( "Hosts output: " )
                 try:
                     main.log.warn( json.dumps( json.loads( hosts ),
@@ -781,7 +779,7 @@
                 for i in range( 14 ):
                     topics.append( "work-partition-" + str( i ) )
                 main.log.debug( topics )
-                ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                 for topic in topics:
                     if topic not in ONOStopics:
                         main.log.error( "Error: " + topic +
@@ -795,13 +793,13 @@
         # Check all nodes
         if missing:
             for i in main.activeNodes:
-                response = main.CLIs[i].leaders( jsonFormat=False)
-                main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
+                response = main.CLIs[ i ].leaders( jsonFormat=False )
+                main.log.warn( str( main.CLIs[ i ].name ) + " leaders output: \n" +
                                str( response ) )
 
         partitions = onosCli.partitions()
         try:
-            if partitions :
+            if partitions:
                 parsedPartitions = json.loads( partitions )
                 main.log.warn( json.dumps( parsedPartitions,
                                            sort_keys=True,
@@ -816,7 +814,7 @@
             main.log.error( repr( partitions ) )
         pendingMap = onosCli.pendingMap()
         try:
-            if pendingMap :
+            if pendingMap:
                 parsedPending = json.loads( pendingMap )
                 main.log.warn( json.dumps( parsedPending,
                                            sort_keys=True,
@@ -835,21 +833,21 @@
             main.log.error( "Error in pushing host intents to ONOS" )
 
         main.step( "Intent Anti-Entropy dispersion" )
-        for j in range(100):
+        for j in range( 100 ):
             correct = True
             main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
             for i in main.activeNodes:
                 onosIds = []
-                ids = main.CLIs[i].getAllIntentsId()
+                ids = main.CLIs[ i ].getAllIntentsId()
                 onosIds.append( ids )
-                main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
+                main.log.debug( "Intents in " + main.CLIs[ i ].name + ": " +
                                 str( sorted( onosIds ) ) )
                 if sorted( ids ) != sorted( intentIds ):
                     main.log.warn( "Set of intent IDs doesn't match" )
                     correct = False
                     break
                 else:
-                    intents = json.loads( main.CLIs[i].intents() )
+                    intents = json.loads( main.CLIs[ i ].intents() )
                     for intent in intents:
                         if intent[ 'state' ] != "INSTALLED":
                             main.log.warn( "Intent " + intent[ 'id' ] +
@@ -859,7 +857,7 @@
             if correct:
                 break
             else:
-                time.sleep(1)
+                time.sleep( 1 )
         if not intentStop:
             intentStop = time.time()
         global gossipTime
@@ -877,7 +875,7 @@
                 append = True
             else:
                 count += 1
-        gossipPeriod = int( main.params['timers']['gossip'] )
+        gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
         maxGossipTime = gossipPeriod * len( main.activeNodes )
         utilities.assert_greater_equals(
                 expect=maxGossipTime, actual=gossipTime,
@@ -939,7 +937,7 @@
                     # FIXME: this should only be after we start the app
                     topics.append( "org.onosproject.election" )
                     main.log.debug( topics )
-                    ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                    ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                     for topic in topics:
                         if topic not in ONOStopics:
                             main.log.error( "Error: " + topic +
@@ -953,14 +951,14 @@
             # Check all nodes
             if missing:
                 for i in main.activeNodes:
-                    node = main.CLIs[i]
-                    response = node.leaders( jsonFormat=False)
+                    node = main.CLIs[ i ]
+                    response = node.leaders( jsonFormat=False )
                     main.log.warn( str( node.name ) + " leaders output: \n" +
                                    str( response ) )
 
             partitions = onosCli.partitions()
             try:
-                if partitions :
+                if partitions:
                     parsedPartitions = json.loads( partitions )
                     main.log.warn( json.dumps( parsedPartitions,
                                                sort_keys=True,
@@ -975,7 +973,7 @@
                 main.log.error( repr( partitions ) )
             pendingMap = onosCli.pendingMap()
             try:
-                if pendingMap :
+                if pendingMap:
                     parsedPending = json.loads( pendingMap )
                     main.log.warn( json.dumps( parsedPending,
                                                sort_keys=True,
@@ -1004,7 +1002,7 @@
                                 "functionality and check the state of " +\
                                 "the intent"
 
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         main.step( "Check Intent state" )
         installedCheck = False
         loopCount = 0
@@ -1090,7 +1088,7 @@
                 # FIXME: topics.append( "org.onosproject.election" )
                 # Print leaders output
                 main.log.debug( topics )
-                ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                 for topic in topics:
                     if topic not in ONOStopics:
                         main.log.error( "Error: " + topic +
@@ -1107,8 +1105,8 @@
         # Check all nodes
         if topicCheck:
             for i in main.activeNodes:
-                node = main.CLIs[i]
-                response = node.leaders( jsonFormat=False)
+                node = main.CLIs[ i ]
+                response = node.leaders( jsonFormat=False )
                 main.log.warn( str( node.name ) + " leaders output: \n" +
                                str( response ) )
 
@@ -1118,7 +1116,7 @@
         # Print partitions
         partitions = onosCli.partitions()
         try:
-            if partitions :
+            if partitions:
                 parsedPartitions = json.loads( partitions )
                 main.log.warn( json.dumps( parsedPartitions,
                                            sort_keys=True,
@@ -1134,7 +1132,7 @@
         # Print Pending Map
         pendingMap = onosCli.pendingMap()
         try:
-            if pendingMap :
+            if pendingMap:
                 parsedPending = json.loads( pendingMap )
                 main.log.warn( json.dumps( parsedPending,
                                            sort_keys=True,
@@ -1188,7 +1186,7 @@
                     # FIXME: this should only be after we start the app
                     topics.append( "org.onosproject.election" )
                     main.log.debug( topics )
-                    ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                    ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                     for topic in topics:
                         if topic not in ONOStopics:
                             main.log.error( "Error: " + topic +
@@ -1201,14 +1199,14 @@
                 main.log.error( repr( leaders ) )
             if missing:
                 for i in main.activeNodes:
-                    node = main.CLIs[i]
-                    response = node.leaders( jsonFormat=False)
+                    node = main.CLIs[ i ]
+                    response = node.leaders( jsonFormat=False )
                     main.log.warn( str( node.name ) + " leaders output: \n" +
                                    str( response ) )
 
             partitions = onosCli.partitions()
             try:
-                if partitions :
+                if partitions:
                     parsedPartitions = json.loads( partitions )
                     main.log.warn( json.dumps( parsedPartitions,
                                                sort_keys=True,
@@ -1223,7 +1221,7 @@
                 main.log.error( repr( partitions ) )
             pendingMap = onosCli.pendingMap()
             try:
-                if pendingMap :
+                if pendingMap:
                     parsedPending = json.loads( pendingMap )
                     main.log.warn( json.dumps( parsedPending,
                                                sort_keys=True,
@@ -1236,8 +1234,8 @@
                 main.log.exception( "Error parsing pending map" )
                 main.log.error( repr( pendingMap ) )
         # Print flowrules
-        node = main.activeNodes[0]
-        main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
+        node = main.activeNodes[ 0 ]
+        main.log.debug( main.CLIs[ node ].flows( jsonFormat=False ) )
         main.step( "Wait a minute then ping again" )
         # the wait is above
         PingResult = main.TRUE
@@ -1295,7 +1293,7 @@
         rolesNotNull = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].rolesNotNull,
+            t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
                              name="rolesNotNull-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -1316,7 +1314,7 @@
         rolesResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].roles,
+            t = main.Thread( target=main.CLIs[ i ].roles,
                              name="roles-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -1327,11 +1325,11 @@
             ONOSMastership.append( t.result )
 
         for i in range( len( ONOSMastership ) ):
-            node = str( main.activeNodes[i] + 1 )
-            if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
+            node = str( main.activeNodes[ i ] + 1 )
+            if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " roles" )
                 main.log.warn( "ONOS" + node + " mastership response: " +
-                               repr( ONOSMastership[i] ) )
+                               repr( ONOSMastership[ i ] ) )
                 rolesResults = False
         utilities.assert_equals(
             expect=True,
@@ -1340,7 +1338,7 @@
             onfail="Error in reading roles from ONOS" )
 
         main.step( "Check for consistency in roles from each controller" )
-        if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
+        if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
             main.log.info(
                 "Switch roles are consistent across all ONOS nodes" )
         else:
@@ -1353,7 +1351,7 @@
 
         if rolesResults and not consistentMastership:
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 try:
                     main.log.warn(
                         "ONOS" + node + " roles: ",
@@ -1375,7 +1373,7 @@
         intentsResults = True  # Could we read Intents from ONOS?
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].intents,
+            t = main.Thread( target=main.CLIs[ i ].intents,
                              name="intents-" + str( i ),
                              args=[],
                              kwargs={ 'jsonFormat': True } )
@@ -1387,7 +1385,7 @@
             ONOSIntents.append( t.result )
 
         for i in range( len( ONOSIntents ) ):
-            node = str( main.activeNodes[i] + 1 )
+            node = str( main.activeNodes[ i ] + 1 )
             if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " intents" )
                 main.log.warn( "ONOS" + node + " intents response: " +
@@ -1400,7 +1398,7 @@
             onfail="Error in reading intents from ONOS" )
 
         main.step( "Check for consistency in Intents from each controller" )
-        if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
+        if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
             main.log.info( "Intents are consistent across all ONOS " +
                              "nodes" )
         else:
@@ -1448,17 +1446,17 @@
 
         if intentsResults and not consistentIntents:
             # print the json objects
-            n = str( main.activeNodes[-1] + 1 )
+            n = str( main.activeNodes[ -1 ] + 1 )
             main.log.debug( "ONOS" + n + " intents: " )
             main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
                                         sort_keys=True,
                                         indent=4,
                                         separators=( ',', ': ' ) ) )
             for i in range( len( ONOSIntents ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
                     main.log.debug( "ONOS" + node + " intents: " )
-                    main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
+                    main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
                                                 sort_keys=True,
                                                 indent=4,
                                                 separators=( ',', ': ' ) ) )
@@ -1478,7 +1476,7 @@
         flowsResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].flows,
+            t = main.Thread( target=main.CLIs[ i ].flows,
                              name="flows-" + str( i ),
                              args=[],
                              kwargs={ 'jsonFormat': True } )
@@ -1486,14 +1484,14 @@
             t.start()
 
         # NOTE: Flows command can take some time to run
-        time.sleep(30)
+        time.sleep( 30 )
         for t in threads:
             t.join()
             result = t.result
             ONOSFlows.append( result )
 
         for i in range( len( ONOSFlows ) ):
-            num = str( main.activeNodes[i] + 1 )
+            num = str( main.activeNodes[ i ] + 1 )
             if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
                 main.log.error( "Error in getting ONOS" + num + " flows" )
                 main.log.warn( "ONOS" + num + " flows response: " +
@@ -1530,11 +1528,11 @@
 
         if flowsResults and not consistentFlows:
             for i in range( len( ONOSFlows ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 try:
                     main.log.warn(
                         "ONOS" + node + " flows: " +
-                        json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
+                        json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
                                     indent=4, separators=( ',', ': ' ) ) )
                 except ( ValueError, TypeError ):
                     main.log.warn( "ONOS" + node + " flows: " +
@@ -1599,9 +1597,9 @@
         devices = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].devices,
+            t = main.Thread( target=main.CLIs[ i ].devices,
                              name="devices-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1611,9 +1609,9 @@
         hosts = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].hosts,
+            t = main.Thread( target=main.CLIs[ i ].hosts,
                              name="hosts-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1631,9 +1629,9 @@
         ports = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].ports,
+            t = main.Thread( target=main.CLIs[ i ].ports,
                              name="ports-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1643,9 +1641,9 @@
         links = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].links,
+            t = main.Thread( target=main.CLIs[ i ].links,
                              name="links-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1655,9 +1653,9 @@
         clusters = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].clusters,
+            t = main.Thread( target=main.CLIs[ i ].clusters,
                              name="clusters-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1670,7 +1668,7 @@
         main.step( "Host view is consistent across ONOS nodes" )
         consistentHostsResult = main.TRUE
         for controller in range( len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ] and "Error" not in hosts[ controller ]:
                 if hosts[ controller ] == hosts[ 0 ]:
                     continue
@@ -1697,10 +1695,10 @@
         main.step( "Each host has an IP address" )
         ipResult = main.TRUE
         for controller in range( 0, len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ]:
                 for host in hosts[ controller ]:
-                    if not host.get( 'ipAddresses', [ ] ):
+                    if not host.get( 'ipAddresses', [] ):
                         main.log.error( "Error with host ips on controller" +
                                         controllerStr + ": " + str( host ) )
                         ipResult = main.FALSE
@@ -1714,7 +1712,7 @@
         main.step( "Cluster view is consistent across ONOS nodes" )
         consistentClustersResult = main.TRUE
         for controller in range( len( clusters ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if "Error" not in clusters[ controller ]:
                 if clusters[ controller ] == clusters[ 0 ]:
                     continue
@@ -1760,14 +1758,14 @@
         mnLinks = main.Mininet1.getLinks()
         mnHosts = main.Mininet1.getHosts()
         for controller in main.activeNodes:
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if devices[ controller ] and ports[ controller ] and\
-                "Error" not in devices[ controller ] and\
-                "Error" not in ports[ controller ]:
-                    currentDevicesResult = main.Mininet1.compareSwitches(
-                            mnSwitches,
-                            json.loads( devices[ controller ] ),
-                            json.loads( ports[ controller ] ) )
+                    "Error" not in devices[ controller ] and\
+                    "Error" not in ports[ controller ]:
+                currentDevicesResult = main.Mininet1.compareSwitches(
+                        mnSwitches,
+                        json.loads( devices[ controller ] ),
+                        json.loads( ports[ controller ] ) )
             else:
                 currentDevicesResult = main.FALSE
             utilities.assert_equals( expect=main.TRUE,
@@ -1855,7 +1853,7 @@
 
         main.step( "Checking ONOS Logs for errors" )
         for i in main.activeNodes:
-            node = main.nodes[i]
+            node = main.nodes[ i ]
             main.log.debug( "Checking logs for errors on " + node.name + ":" )
             main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
 
@@ -1864,15 +1862,14 @@
         modify cluster.json file appropriately
         install/deactivate node as needed
         """
-
         try:
             prevNodes = main.activeNodes
-            scale = main.scaling.pop(0)
+            scale = main.scaling.pop( 0 )
             if "e" in scale:
                 equal = True
             else:
                 equal = False
-            main.numCtrls = int( re.search( "\d+", scale ).group(0) )
+            main.numCtrls = int( re.search( "\d+", scale ).group( 0 ) )
             main.log.info( "Scaling to {} nodes".format( main.numCtrls ) )
             genResult = main.Server.generateFile( main.numCtrls, equal=equal )
             utilities.assert_equals( expect=main.TRUE, actual=genResult,
@@ -1889,7 +1886,7 @@
         main.step( "Start new nodes" )  # OR stop old nodes?
         started = main.TRUE
         for i in newNodes:
-            started = main.ONOSbench.onosStart( main.nodes[i].ip_address ) and main.TRUE
+            started = main.ONOSbench.onosStart( main.nodes[ i ].ip_address ) and main.TRUE
         utilities.assert_equals( expect=main.TRUE, actual=started,
                                  onpass="ONOS started",
                                  onfail="ONOS start NOT successful" )
@@ -1898,7 +1895,7 @@
         for i in range( 2 ):
             onosIsupResult = main.TRUE
             for i in main.activeNodes:
-                node = main.nodes[i]
+                node = main.nodes[ i ]
                 main.ONOSbench.onosSecureSSH( node=node.ip_address )
                 started = main.ONOSbench.isup( node.ip_address )
                 if not started:
@@ -1914,9 +1911,9 @@
         cliResults = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].startOnosCli,
+            t = main.Thread( target=main.CLIs[ i ].startOnosCli,
                              name="startOnosCli-" + str( i ),
-                             args=[main.nodes[i].ip_address] )
+                             args=[ main.nodes[ i ].ip_address ] )
             threads.append( t )
             t.start()
 
@@ -1930,7 +1927,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        attempts=5 )
         utilities.assert_equals( expect=True, actual=nodeResults,
                                  onpass="Nodes check successful",
@@ -1939,7 +1936,7 @@
         for i in range( 10 ):
             ready = True
             for i in main.activeNodes:
-                cli = main.CLIs[i]
+                cli = main.CLIs[ i ]
                 output = cli.summary()
                 if not output:
                     ready = False
@@ -1956,7 +1953,7 @@
         # Rerun for election on new nodes
         runResults = main.TRUE
         for i in main.activeNodes:
-            cli = main.CLIs[i]
+            cli = main.CLIs[ i ]
             run = cli.electionTestRun()
             if run != main.TRUE:
                 main.log.error( "Error running for election on " + cli.name )
@@ -1968,11 +1965,11 @@
         # TODO: Make this configurable
         time.sleep( 60 )
         for node in main.activeNodes:
-            main.log.warn( "\n****************** {} **************".format( main.nodes[node].ip_address ) )
-            main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
-            main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
-            main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
-            main.log.debug( main.CLIs[node].apps( jsonFormat=False ) )
+            main.log.warn( "\n****************** {} **************".format( main.nodes[ node ].ip_address ) )
+            main.log.debug( main.CLIs[ node ].nodes( jsonFormat=False ) )
+            main.log.debug( main.CLIs[ node ].leaders( jsonFormat=False ) )
+            main.log.debug( main.CLIs[ node ].partitions( jsonFormat=False ) )
+            main.log.debug( main.CLIs[ node ].apps( jsonFormat=False ) )
 
     def CASE7( self, main ):
         """
@@ -1991,9 +1988,9 @@
         rolesNotNull = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].rolesNotNull,
+            t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
                              name="rolesNotNull-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -2012,7 +2009,7 @@
         rolesResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].roles,
+            t = main.Thread( target=main.CLIs[ i ].roles,
                              name="roles-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -2023,11 +2020,11 @@
             ONOSMastership.append( t.result )
 
         for i in range( len( ONOSMastership ) ):
-            node = str( main.activeNodes[i] + 1 )
-            if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
+            node = str( main.activeNodes[ i ] + 1 )
+            if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " roles" )
                 main.log.warn( "ONOS" + node + " mastership response: " +
-                               repr( ONOSMastership[i] ) )
+                               repr( ONOSMastership[ i ] ) )
                 rolesResults = False
         utilities.assert_equals(
             expect=True,
@@ -2036,7 +2033,7 @@
             onfail="Error in reading roles from ONOS" )
 
         main.step( "Check for consistency in roles from each controller" )
-        if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
+        if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
             main.log.info(
                 "Switch roles are consistent across all ONOS nodes" )
         else:
@@ -2049,7 +2046,7 @@
 
         if rolesResults and not consistentMastership:
             for i in range( len( ONOSMastership ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 main.log.warn( "ONOS" + node + " roles: ",
                                json.dumps( json.loads( ONOSMastership[ i ] ),
                                            sort_keys=True,
@@ -2065,7 +2062,7 @@
         intentsResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].intents,
+            t = main.Thread( target=main.CLIs[ i ].intents,
                              name="intents-" + str( i ),
                              args=[],
                              kwargs={ 'jsonFormat': True } )
@@ -2076,8 +2073,8 @@
             t.join()
             ONOSIntents.append( t.result )
 
-        for i in range( len( ONOSIntents) ):
-            node = str( main.activeNodes[i] + 1 )
+        for i in range( len( ONOSIntents ) ):
+            node = str( main.activeNodes[ i ] + 1 )
             if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " intents" )
                 main.log.warn( "ONOS" + node + " intents response: " +
@@ -2090,7 +2087,7 @@
             onfail="Error in reading intents from ONOS" )
 
         main.step( "Check for consistency in Intents from each controller" )
-        if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
+        if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
             main.log.info( "Intents are consistent across all ONOS " +
                              "nodes" )
         else:
@@ -2139,12 +2136,12 @@
                 main.log.exception( "Error in parsing intents" )
                 main.log.error( repr( node ) )
             intentStates.append( nodeStates )
-            out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
+            out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
             main.log.info( dict( out ) )
 
         if intentsResults and not consistentIntents:
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 main.log.warn( "ONOS" + node + " intents: " )
                 main.log.warn( json.dumps(
                     json.loads( ONOSIntents[ i ] ),
@@ -2181,7 +2178,7 @@
                             main.log.debug( json.dumps( intent ) )
                 except ( ValueError, TypeError ):
                     main.log.exception( "Exception printing intents" )
-                    main.log.debug( repr( ONOSIntents[0] ) )
+                    main.log.debug( repr( ONOSIntents[ 0 ] ) )
                     main.log.debug( repr( intentState ) )
             if sameIntents == main.FALSE:
                 try:
@@ -2195,7 +2192,7 @@
                                                 separators=( ',', ': ' ) ) )
                 except ( ValueError, TypeError ):
                     main.log.exception( "Exception printing intents" )
-                    main.log.debug( repr( ONOSIntents[0] ) )
+                    main.log.debug( repr( ONOSIntents[ 0 ] ) )
                     main.log.debug( repr( intentState ) )
             utilities.assert_equals(
                 expect=main.TRUE,
@@ -2210,7 +2207,7 @@
         for i in range( 28 ):
             main.log.info( "Checking flow table on s" + str( i + 1 ) )
             tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
-            curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
+            curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
             FlowTables = FlowTables and curSwitch
             if curSwitch == main.FALSE:
                 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
@@ -2221,7 +2218,7 @@
             onfail="Changes were found in the flow tables" )
 
         main.Mininet2.pingLongKill()
-        '''
+        """
         # main.step( "Check the continuous pings to ensure that no packets " +
         #            "were dropped during component failure" )
         main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
@@ -2251,15 +2248,14 @@
         # NOTE: Since intents are not persisted with IntnentStore,
         #       we expect loss in dataplane connectivity
         LossInPings = main.FALSE
-        '''
-
+        """
         main.step( "Leadership Election is still functional" )
         # Test of LeadershipElection
         leaderList = []
         leaderResult = main.TRUE
 
         for i in main.activeNodes:
-            cli = main.CLIs[i]
+            cli = main.CLIs[ i ]
             leaderN = cli.electionTestLeader()
             leaderList.append( leaderN )
             if leaderN == main.FALSE:
@@ -2317,8 +2313,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="devices-" + str( i ),
-                                 args=[ main.CLIs[i].devices, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].devices, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2332,8 +2328,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="hosts-" + str( i ),
-                                 args=[ main.CLIs[i].hosts, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].hosts, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2347,7 +2343,7 @@
                     main.log.error( repr( t.result ) )
                     hosts.append( None )
             for controller in range( 0, len( hosts ) ):
-                controllerStr = str( main.activeNodes[controller] + 1 )
+                controllerStr = str( main.activeNodes[ controller ] + 1 )
                 if hosts[ controller ]:
                     for host in hosts[ controller ]:
                         if host is None or host.get( 'ipAddresses', [] ) == []:
@@ -2360,8 +2356,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="ports-" + str( i ),
-                                 args=[ main.CLIs[i].ports, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].ports, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2374,8 +2370,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="links-" + str( i ),
-                                 args=[ main.CLIs[i].links, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].links, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2388,8 +2384,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="clusters-" + str( i ),
-                                 args=[ main.CLIs[i].clusters, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].clusters, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2408,18 +2404,18 @@
                all( e is None for e in ports ) and\
                all( e is None for e in links ) and\
                all( e is None for e in clusters ):
-                   topoFailMsg = "Could not get topology from ONOS"
-                   main.log.error( topoFailMsg )
-                   continue  # Try again, No use trying to compare
+                topoFailMsg = "Could not get topology from ONOS"
+                main.log.error( topoFailMsg )
+                continue  # Try again, No use trying to compare
 
             mnSwitches = main.Mininet1.getSwitches()
             mnLinks = main.Mininet1.getLinks()
             mnHosts = main.Mininet1.getHosts()
             for controller in range( len( main.activeNodes ) ):
-                controllerStr = str( main.activeNodes[controller] + 1 )
+                controllerStr = str( main.activeNodes[ controller ] + 1 )
                 if devices[ controller ] and ports[ controller ] and\
-                    "Error" not in devices[ controller ] and\
-                    "Error" not in ports[ controller ]:
+                        "Error" not in devices[ controller ] and\
+                        "Error" not in ports[ controller ]:
 
                     try:
                         currentDevicesResult = main.Mininet1.compareSwitches(
@@ -2472,29 +2468,29 @@
                 mappings = {}
                 for i in range( 1, 29 ):  # hosts 1 through 28
                     # set up correct variables:
-                    macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
+                    macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
                     if i == 1:
-                        deviceId = "1000".zfill(16)
+                        deviceId = "1000".zfill( 16 )
                     elif i == 2:
-                        deviceId = "2000".zfill(16)
+                        deviceId = "2000".zfill( 16 )
                     elif i == 3:
-                        deviceId = "3000".zfill(16)
+                        deviceId = "3000".zfill( 16 )
                     elif i == 4:
-                        deviceId = "3004".zfill(16)
+                        deviceId = "3004".zfill( 16 )
                     elif i == 5:
-                        deviceId = "5000".zfill(16)
+                        deviceId = "5000".zfill( 16 )
                     elif i == 6:
-                        deviceId = "6000".zfill(16)
+                        deviceId = "6000".zfill( 16 )
                     elif i == 7:
-                        deviceId = "6007".zfill(16)
+                        deviceId = "6007".zfill( 16 )
                     elif i >= 8 and i <= 17:
                         dpid = '3' + str( i ).zfill( 3 )
-                        deviceId = dpid.zfill(16)
+                        deviceId = dpid.zfill( 16 )
                     elif i >= 18 and i <= 27:
                         dpid = '6' + str( i ).zfill( 3 )
-                        deviceId = dpid.zfill(16)
+                        deviceId = dpid.zfill( 16 )
                     elif i == 28:
-                        deviceId = "2800".zfill(16)
+                        deviceId = "2800".zfill( 16 )
                     mappings[ macId ] = deviceId
                 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
                     if hosts[ controller ] == []:
@@ -2514,7 +2510,7 @@
                                 assert location, "location field could not be found for this host object"
 
                                 # Trim the protocol identifier off deviceId
-                                device = str( location.get( 'elementId' ) ).split(':')[1]
+                                device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
                                 assert device, "elementId field could not be found for this host location object"
 
                                 port = location.get( 'port' )
@@ -2525,7 +2521,7 @@
                                     if str( port ) != "1":
                                         main.log.error( "The attachment port is incorrect for " +
                                                         "host " + str( mac ) +
-                                                        ". Expected: 1 Actual: " + str( port) )
+                                                        ". Expected: 1 Actual: " + str( port ) )
                                         hostAttachment = False
                                     if device != mappings[ str( mac ) ]:
                                         main.log.error( "The attachment device is incorrect for " +
@@ -2569,7 +2565,7 @@
         main.step( "Hosts view is consistent across all ONOS nodes" )
         consistentHostsResult = main.TRUE
         for controller in range( len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
                 if hosts[ controller ] == hosts[ 0 ]:
                     continue
@@ -2611,7 +2607,7 @@
         main.step( "Clusters view is consistent across all ONOS nodes" )
         consistentClustersResult = main.TRUE
         for controller in range( len( clusters ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if "Error" not in clusters[ controller ]:
                 if clusters[ controller ] == clusters[ 0 ]:
                     continue
@@ -2641,7 +2637,7 @@
             numClusters = len( json.loads( clusters[ 0 ] ) )
         except ( ValueError, TypeError ):
             main.log.exception( "Error parsing clusters[0]: " +
-                                repr( clusters[0] ) )
+                                repr( clusters[ 0 ] ) )
             numClusters = "ERROR"
         clusterResults = main.FALSE
         if numClusters == 1:
@@ -2691,7 +2687,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        attempts=5 )
         utilities.assert_equals( expect=True, actual=nodeResults,
                                  onpass="Nodes check successful",
@@ -2699,8 +2695,8 @@
         if not nodeResults:
             for i in main.activeNodes:
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
-                    main.CLIs[i].name,
-                    main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
+                    main.CLIs[ i ].name,
+                    main.CLIs[ i ].sendline( "scr:list | grep -v ACTIVE" ) ) )
 
         if not topoResult:
             main.cleanup()
@@ -2777,7 +2773,7 @@
         switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
 
         description = "Killing a switch to ensure it is discovered correctly"
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         main.case( description )
         switch = main.params[ 'kill' ][ 'switch' ]
         switchDPID = main.params[ 'kill' ][ 'dpid' ]
@@ -2815,7 +2811,7 @@
         switch = main.params[ 'kill' ][ 'switch' ]
         switchDPID = main.params[ 'kill' ][ 'dpid' ]
         links = main.params[ 'kill' ][ 'links' ].split()
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         description = "Adding a switch to ensure it is discovered correctly"
         main.case( description )
 
@@ -2893,15 +2889,15 @@
             main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
 
         try:
-            timerLog = open( main.logdir + "/Timers.csv", 'w')
+            timerLog = open( main.logdir + "/Timers.csv", 'w' )
             main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
             timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
             timerLog.close()
-        except NameError, e:
-            main.log.exception(e)
+        except NameError as e:
+            main.log.exception( e )
 
         main.step( "Stopping webserver" )
-        status = main.Server.stop( )
+        status = main.Server.stop()
         utilities.assert_equals( expect=main.TRUE, actual=status,
                                  onpass="Stop Server",
                                  onfail="Failled to stop SimpleHTTPServer" )
@@ -2918,9 +2914,9 @@
         assert main.CLIs, "main.CLIs not defined"
         assert main.nodes, "main.nodes not defined"
 
-        main.case("Start Leadership Election app")
+        main.case( "Start Leadership Election app" )
         main.step( "Install leadership election app" )
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         appResult = onosCli.activateApp( "org.onosproject.election" )
         utilities.assert_equals(
             expect=main.TRUE,
@@ -2930,9 +2926,9 @@
 
         main.step( "Run for election on each node" )
         for i in main.activeNodes:
-            main.CLIs[i].electionTestRun()
-        time.sleep(5)
-        activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+            main.CLIs[ i ].electionTestRun()
+        time.sleep( 5 )
+        activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
         sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
         utilities.assert_equals(
             expect=True,
@@ -2942,7 +2938,7 @@
 
         if sameResult:
             leader = leaders[ 0 ][ 0 ]
-            if main.nodes[ main.activeNodes[0] ].ip_address in leader:
+            if main.nodes[ main.activeNodes[ 0 ] ].ip_address in leader:
                 correctLeader = True
             else:
                 correctLeader = False
@@ -2992,7 +2988,7 @@
         electionResult = main.TRUE
 
         for i in main.activeNodes:  # run test election on each node
-            if main.CLIs[i].electionTestRun() == main.FALSE:
+            if main.CLIs[ i ].electionTestRun() == main.FALSE:
                 electionResult = main.FALSE
         utilities.assert_equals(
             expect=main.TRUE,
@@ -3007,7 +3003,7 @@
 
         main.step( "Check that each node shows the same leader and candidates" )
         failMessage = "Nodes have different leaderboards"
-        activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+        activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
         sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
         if sameResult:
             oldLeader = oldLeaders[ 0 ][ 0 ]
@@ -3057,7 +3053,7 @@
         if newLeader == oldLeader:
             newLeaderResult = False
             main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
-                    " as the current leader" )
+                            " as the current leader" )
         utilities.assert_equals(
             expect=True,
             actual=newLeaderResult,
@@ -3074,7 +3070,7 @@
             else:
                 main.log.info( "Expected no leader, got: " + str( newLeader ) )
                 correctCandidateResult = main.FALSE
-        elif len( oldLeaders[0] ) >= 3:
+        elif len( oldLeaders[ 0 ] ) >= 3:
             if newLeader == oldLeaders[ 0 ][ 2 ]:
                 # correct leader was elected
                 correctCandidateResult = main.TRUE
@@ -3114,10 +3110,10 @@
         positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
 
         # Check that the re-elected node is last on the candidate List
-        if not reRunLeaders[0]:
+        if not reRunLeaders[ 0 ]:
             positionResult = main.FALSE
         elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
-            main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
+            main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
                                                                                       str( reRunLeaders[ 0 ] ) ) )
             positionResult = main.FALSE
         utilities.assert_equals(
@@ -3141,15 +3137,15 @@
         # Variables for the distributed primitives tests
         main.pCounterName = "TestON-Partitions"
         main.pCounterValue = 0
-        main.onosSet = set([])
+        main.onosSet = set( [] )
         main.onosSetName = "TestON-set"
 
         description = "Install Primitives app"
         main.case( description )
         main.step( "Install Primitives app" )
         appName = "org.onosproject.distributedprimitives"
-        node = main.activeNodes[0]
-        appResults = main.CLIs[node].activateApp( appName )
+        node = main.activeNodes[ 0 ]
+        appResults = main.CLIs[ node ].activateApp( appName )
         utilities.assert_equals( expect=main.TRUE,
                                  actual=appResults,
                                  onpass="Primitives app activated",
diff --git a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
index 4f15a60..605ad65 100644
--- a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
+++ b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
@@ -22,8 +22,6 @@
 CASE16: Install Distributed Primitives app
 CASE17: Check for basic functionality with distributed primitives
 """
-
-
 class HAsingleInstanceRestart:
 
     def __init__( self ):
@@ -124,7 +122,7 @@
                             filePath + topoName,
                             main.Mininet1.home,
                             direction="to" )
-        mnResult = main.Mininet1.startNet( )
+        mnResult = main.Mininet1.startNet()
         utilities.assert_equals( expect=main.TRUE, actual=mnResult,
                                  onpass="Mininet Started",
                                  onfail="Error starting Mininet" )
@@ -167,7 +165,7 @@
                   'seamless="seamless"></iframe>\n'
         graphs += ']]></ac:plain-text-body>\n'
         graphs += '</ac:structured-macro>\n'
-        main.log.wiki(graphs)
+        main.log.wiki( graphs )
 
         main.CLIs = []
         main.nodes = []
@@ -224,9 +222,9 @@
         cliResults = main.TRUE
         threads = []
         for i in range( main.numCtrls ):
-            t = main.Thread( target=main.CLIs[i].startOnosCli,
+            t = main.Thread( target=main.CLIs[ i ].startOnosCli,
                              name="startOnosCli-" + str( i ),
-                             args=[main.nodes[i].ip_address] )
+                             args=[ main.nodes[ i ].ip_address ] )
             threads.append( t )
             t.start()
 
@@ -251,7 +249,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        attempts=5 )
 
         utilities.assert_equals( expect=True, actual=nodeResults,
@@ -260,7 +258,7 @@
 
         if not nodeResults:
             for i in main.activeNodes:
-                cli = main.CLIs[i]
+                cli = main.CLIs[ i ]
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
                     cli.name,
                     cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
@@ -272,7 +270,7 @@
         # get data from the params
         apps = main.params.get( 'apps' )
         if apps:
-            apps = apps.split(',')
+            apps = apps.split( ',' )
             main.log.warn( apps )
             activateResult = True
             for app in apps:
@@ -299,8 +297,8 @@
             main.log.debug( config )
             checkResult = main.TRUE
             for component in config:
-                for setting in config[component]:
-                    value = config[component][setting]
+                for setting in config[ component ]:
+                    value = config[ component ][ setting ]
                     check = main.CLIs[ 0 ].setCfg( component, setting, value )
                     main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
                     checkResult = check and checkResult
@@ -315,7 +313,7 @@
         appCheck = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -325,9 +323,9 @@
             t.join()
             appCheck = appCheck and t.result
         if appCheck != main.TRUE:
-            node = main.activeNodes[0]
-            main.log.warn( main.CLIs[node].apps() )
-            main.log.warn( main.CLIs[node].appIDs() )
+            node = main.activeNodes[ 0 ]
+            main.log.warn( main.CLIs[ node ].apps() )
+            main.log.warn( main.CLIs[ node ].appIDs() )
         utilities.assert_equals( expect=main.TRUE, actual=appCheck,
                                  onpass="App Ids seem to be correct",
                                  onfail="Something is wrong with app Ids" )
@@ -395,9 +393,9 @@
         # Manually assign mastership to the controller we want
         roleCall = main.TRUE
 
-        ipList = [ ]
+        ipList = []
         deviceList = []
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         try:
             # Assign mastership to specific controllers. This assignment was
             # determined for a 7 node cluser, but will work with any sized
@@ -471,8 +469,8 @@
         #       atomic and is actually a multi step process
         time.sleep( 5 )
         for i in range( len( ipList ) ):
-            ip = ipList[i]
-            deviceId = deviceList[i]
+            ip = ipList[ i ]
+            deviceId = deviceList[ i ]
             # Check assignment
             master = onosCli.getRole( deviceId ).get( 'master' )
             if ip in master:
@@ -510,7 +508,7 @@
 
         # install onos-app-fwd
         main.step( "Install reactive forwarding app" )
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         installResults = onosCli.activateApp( "org.onosproject.fwd" )
         utilities.assert_equals( expect=main.TRUE, actual=installResults,
                                  onpass="Install fwd successful",
@@ -520,7 +518,7 @@
         appCheck = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -545,13 +543,13 @@
         pingResult = main.Mininet1.pingall()
         time2 = time.time()
         if not pingResult:
-            main.log.warn("First pingall failed. Trying again...")
+            main.log.warn( "First pingall failed. Trying again..." )
             pingResult = main.Mininet1.pingall()
             passMsg += " on the second try"
         utilities.assert_equals(
             expect=main.TRUE,
             actual=pingResult,
-            onpass= passMsg,
+            onpass=passMsg,
             onfail="Reactive Pingall failed, " +
                    "one or more ping pairs failed" )
         main.log.info( "Time for pingall: %2f seconds" %
@@ -560,8 +558,8 @@
         time.sleep( 11 )
         # uninstall onos-app-fwd
         main.step( "Uninstall reactive forwarding app" )
-        node = main.activeNodes[0]
-        uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
+        node = main.activeNodes[ 0 ]
+        uninstallResult = main.CLIs[ node ].deactivateApp( "org.onosproject.fwd" )
         utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
                                  onpass="Uninstall fwd successful",
                                  onfail="Uninstall fwd failed" )
@@ -570,7 +568,7 @@
         threads = []
         appCheck2 = main.TRUE
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -580,9 +578,9 @@
             t.join()
             appCheck2 = appCheck2 and t.result
         if appCheck2 != main.TRUE:
-            node = main.activeNodes[0]
-            main.log.warn( main.CLIs[node].apps() )
-            main.log.warn( main.CLIs[node].appIDs() )
+            node = main.activeNodes[ 0 ]
+            main.log.warn( main.CLIs[ node ].apps() )
+            main.log.warn( main.CLIs[ node ].appIDs() )
         utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
                                  onpass="App Ids seem to be correct",
                                  onfail="Something is wrong with app Ids" )
@@ -610,8 +608,8 @@
                 host2Id = host2Dict.get( 'id', None )
             if host1Id and host2Id:
                 nodeNum = ( i % len( main.activeNodes ) )
-                node = main.activeNodes[nodeNum]
-                tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
+                node = main.activeNodes[ nodeNum ]
+                tmpId = main.CLIs[ node ].addHostIntent( host1Id, host2Id )
                 if tmpId:
                     main.log.info( "Added intent with id: " + tmpId )
                     intentIds.append( tmpId )
@@ -621,8 +619,8 @@
             else:
                 main.log.error( "Error, getHost() failed for h" + str( i ) +
                                 " and/or h" + str( i + 10 ) )
-                node = main.activeNodes[0]
-                hosts = main.CLIs[node].hosts()
+                node = main.activeNodes[ 0 ]
+                hosts = main.CLIs[ node ].hosts()
                 main.log.warn( "Hosts output: " )
                 try:
                     main.log.warn( json.dumps( json.loads( hosts ),
@@ -690,7 +688,7 @@
                 for i in range( 14 ):
                     topics.append( "work-partition-" + str( i ) )
                 main.log.debug( topics )
-                ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                 for topic in topics:
                     if topic not in ONOStopics:
                         main.log.error( "Error: " + topic +
@@ -704,13 +702,13 @@
         # Check all nodes
         if missing:
             for i in main.activeNodes:
-                response = main.CLIs[i].leaders( jsonFormat=False)
-                main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
+                response = main.CLIs[ i ].leaders( jsonFormat=False )
+                main.log.warn( str( main.CLIs[ i ].name ) + " leaders output: \n" +
                                str( response ) )
 
         partitions = onosCli.partitions()
         try:
-            if partitions :
+            if partitions:
                 parsedPartitions = json.loads( partitions )
                 main.log.warn( json.dumps( parsedPartitions,
                                            sort_keys=True,
@@ -725,7 +723,7 @@
             main.log.error( repr( partitions ) )
         pendingMap = onosCli.pendingMap()
         try:
-            if pendingMap :
+            if pendingMap:
                 parsedPending = json.loads( pendingMap )
                 main.log.warn( json.dumps( parsedPending,
                                            sort_keys=True,
@@ -744,21 +742,21 @@
             main.log.error( "Error in pushing host intents to ONOS" )
 
         main.step( "Intent Anti-Entropy dispersion" )
-        for j in range(100):
+        for j in range( 100 ):
             correct = True
             main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
             for i in main.activeNodes:
                 onosIds = []
-                ids = main.CLIs[i].getAllIntentsId()
+                ids = main.CLIs[ i ].getAllIntentsId()
                 onosIds.append( ids )
-                main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
+                main.log.debug( "Intents in " + main.CLIs[ i ].name + ": " +
                                 str( sorted( onosIds ) ) )
                 if sorted( ids ) != sorted( intentIds ):
                     main.log.warn( "Set of intent IDs doesn't match" )
                     correct = False
                     break
                 else:
-                    intents = json.loads( main.CLIs[i].intents() )
+                    intents = json.loads( main.CLIs[ i ].intents() )
                     for intent in intents:
                         if intent[ 'state' ] != "INSTALLED":
                             main.log.warn( "Intent " + intent[ 'id' ] +
@@ -768,14 +766,14 @@
             if correct:
                 break
             else:
-                time.sleep(1)
+                time.sleep( 1 )
         if not intentStop:
             intentStop = time.time()
         global gossipTime
         gossipTime = intentStop - intentStart
         main.log.info( "It took about " + str( gossipTime ) +
                         " seconds for all intents to appear in each node" )
-        gossipPeriod = int( main.params['timers']['gossip'] )
+        gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
         maxGossipTime = gossipPeriod * len( main.activeNodes )
         utilities.assert_greater_equals(
                 expect=maxGossipTime, actual=gossipTime,
@@ -837,7 +835,7 @@
                     # FIXME: this should only be after we start the app
                     topics.append( "org.onosproject.election" )
                     main.log.debug( topics )
-                    ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                    ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                     for topic in topics:
                         if topic not in ONOStopics:
                             main.log.error( "Error: " + topic +
@@ -851,14 +849,14 @@
             # Check all nodes
             if missing:
                 for i in main.activeNodes:
-                    node = main.CLIs[i]
-                    response = node.leaders( jsonFormat=False)
+                    node = main.CLIs[ i ]
+                    response = node.leaders( jsonFormat=False )
                     main.log.warn( str( node.name ) + " leaders output: \n" +
                                    str( response ) )
 
             partitions = onosCli.partitions()
             try:
-                if partitions :
+                if partitions:
                     parsedPartitions = json.loads( partitions )
                     main.log.warn( json.dumps( parsedPartitions,
                                                sort_keys=True,
@@ -873,7 +871,7 @@
                 main.log.error( repr( partitions ) )
             pendingMap = onosCli.pendingMap()
             try:
-                if pendingMap :
+                if pendingMap:
                     parsedPending = json.loads( pendingMap )
                     main.log.warn( json.dumps( parsedPending,
                                                sort_keys=True,
@@ -900,7 +898,7 @@
                                 "functionality and check the state of " +\
                                 "the intent"
 
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         main.step( "Check Intent state" )
         installedCheck = True
         # Print the intent states
@@ -980,7 +978,7 @@
                 # FIXME: topics.append( "org.onosproject.election" )
                 # Print leaders output
                 main.log.debug( topics )
-                ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                 for topic in topics:
                     if topic not in ONOStopics:
                         main.log.error( "Error: " + topic +
@@ -997,8 +995,8 @@
         # Check all nodes
         if topicCheck:
             for i in main.activeNodes:
-                node = main.CLIs[i]
-                response = node.leaders( jsonFormat=False)
+                node = main.CLIs[ i ]
+                response = node.leaders( jsonFormat=False )
                 main.log.warn( str( node.name ) + " leaders output: \n" +
                                str( response ) )
 
@@ -1008,7 +1006,7 @@
         # Print partitions
         partitions = onosCli.partitions()
         try:
-            if partitions :
+            if partitions:
                 parsedPartitions = json.loads( partitions )
                 main.log.warn( json.dumps( parsedPartitions,
                                            sort_keys=True,
@@ -1024,7 +1022,7 @@
         # Print Pending Map
         pendingMap = onosCli.pendingMap()
         try:
-            if pendingMap :
+            if pendingMap:
                 parsedPending = json.loads( pendingMap )
                 main.log.warn( json.dumps( parsedPending,
                                            sort_keys=True,
@@ -1078,7 +1076,7 @@
                     # FIXME: this should only be after we start the app
                     topics.append( "org.onosproject.election" )
                     main.log.debug( topics )
-                    ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                    ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                     for topic in topics:
                         if topic not in ONOStopics:
                             main.log.error( "Error: " + topic +
@@ -1091,14 +1089,14 @@
                 main.log.error( repr( leaders ) )
             if missing:
                 for i in main.activeNodes:
-                    node = main.CLIs[i]
-                    response = node.leaders( jsonFormat=False)
+                    node = main.CLIs[ i ]
+                    response = node.leaders( jsonFormat=False )
                     main.log.warn( str( node.name ) + " leaders output: \n" +
                                    str( response ) )
 
             partitions = onosCli.partitions()
             try:
-                if partitions :
+                if partitions:
                     parsedPartitions = json.loads( partitions )
                     main.log.warn( json.dumps( parsedPartitions,
                                                sort_keys=True,
@@ -1113,7 +1111,7 @@
                 main.log.error( repr( partitions ) )
             pendingMap = onosCli.pendingMap()
             try:
-                if pendingMap :
+                if pendingMap:
                     parsedPending = json.loads( pendingMap )
                     main.log.warn( json.dumps( parsedPending,
                                                sort_keys=True,
@@ -1126,8 +1124,8 @@
                 main.log.exception( "Error parsing pending map" )
                 main.log.error( repr( pendingMap ) )
         # Print flowrules
-        node = main.activeNodes[0]
-        main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
+        node = main.activeNodes[ 0 ]
+        main.log.debug( main.CLIs[ node ].flows( jsonFormat=False ) )
         main.step( "Wait a minute then ping again" )
         # the wait is above
         PingResult = main.TRUE
@@ -1182,7 +1180,7 @@
         rolesNotNull = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].rolesNotNull,
+            t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
                              name="rolesNotNull-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -1259,10 +1257,10 @@
         main.step( "Each host has an IP address" )
         ipResult = main.TRUE
         for controller in range( 0, len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ]:
                 for host in hosts[ controller ]:
-                    if not host.get( 'ipAddresses', [ ] ):
+                    if not host.get( 'ipAddresses', [] ):
                         main.log.error( "Error with host ips on controller" +
                                         controllerStr + ": " + str( host ) )
                         ipResult = main.FALSE
@@ -1297,14 +1295,14 @@
         mnLinks = main.Mininet1.getLinks()
         mnHosts = main.Mininet1.getHosts()
         for controller in main.activeNodes:
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if devices[ controller ] and ports[ controller ] and\
-                "Error" not in devices[ controller ] and\
-                "Error" not in ports[ controller ]:
-                    currentDevicesResult = main.Mininet1.compareSwitches(
-                            mnSwitches,
-                            json.loads( devices[ controller ] ),
-                            json.loads( ports[ controller ] ) )
+                    "Error" not in devices[ controller ] and\
+                    "Error" not in ports[ controller ]:
+                currentDevicesResult = main.Mininet1.compareSwitches(
+                        mnSwitches,
+                        json.loads( devices[ controller ] ),
+                        json.loads( ports[ controller ] ) )
             else:
                 currentDevicesResult = main.FALSE
             utilities.assert_equals( expect=main.TRUE,
@@ -1390,7 +1388,7 @@
             main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
 
         main.step( "Killing ONOS processes" )
-        killResult = main.ONOSbench.onosKill( main.nodes[0].ip_address )
+        killResult = main.ONOSbench.onosKill( main.nodes[ 0 ].ip_address )
         start = time.time()
         utilities.assert_equals( expect=main.TRUE, actual=killResult,
                                  onpass="ONOS Killed",
@@ -1399,7 +1397,7 @@
         main.step( "Checking if ONOS is up yet" )
         count = 0
         while count < 10:
-            onos1Isup = main.ONOSbench.isup( main.nodes[0].ip_address )
+            onos1Isup = main.ONOSbench.isup( main.nodes[ 0 ].ip_address )
             if onos1Isup == main.TRUE:
                 elapsed = time.time() - start
                 break
@@ -1410,7 +1408,7 @@
                                  onfail="ONOS failed to start" )
 
         main.step( "Starting ONOS CLI sessions" )
-        cliResults = main.ONOScli1.startOnosCli( main.nodes[0].ip_address )
+        cliResults = main.ONOScli1.startOnosCli( main.nodes[ 0 ].ip_address )
         utilities.assert_equals( expect=main.TRUE, actual=cliResults,
                                  onpass="ONOS cli startup successful",
                                  onfail="ONOS cli startup failed" )
@@ -1440,9 +1438,9 @@
         rolesNotNull = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].rolesNotNull,
+            t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
                              name="rolesNotNull-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1520,7 +1518,7 @@
             for intent in json.loads( node ):
                 nodeStates.append( intent[ 'state' ] )
             intentStates.append( nodeStates )
-            out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
+            out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
             main.log.info( dict( out ) )
 
         # NOTE: Store has no durability, so intents are lost across system
@@ -1553,7 +1551,7 @@
                             main.log.debug( json.dumps( intent ) )
                 except ( ValueError, TypeError ):
                     main.log.exception( "Exception printing intents" )
-                    main.log.debug( repr( ONOSIntents[0] ) )
+                    main.log.debug( repr( ONOSIntents[ 0 ] ) )
                     main.log.debug( repr( intentState ) )
             if sameIntents == main.FALSE:
                 try:
@@ -1567,7 +1565,7 @@
                                                 separators=( ',', ': ' ) ) )
                 except ( ValueError, TypeError ):
                     main.log.exception( "Exception printing intents" )
-                    main.log.debug( repr( ONOSIntents[0] ) )
+                    main.log.debug( repr( ONOSIntents[ 0 ] ) )
                     main.log.debug( repr( intentState ) )
             utilities.assert_equals(
                 expect=main.TRUE,
@@ -1582,7 +1580,7 @@
         for i in range( 28 ):
             main.log.info( "Checking flow table on s" + str( i + 1 ) )
             tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
-            curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
+            curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
             FlowTables = FlowTables and curSwitch
             if curSwitch == main.FALSE:
                 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
@@ -1681,8 +1679,8 @@
             for controller in range( main.numCtrls ):
                 controllerStr = str( controller + 1 )
                 if devices[ controller ] and ports[ controller ] and\
-                    "Error" not in devices[ controller ] and\
-                    "Error" not in ports[ controller ]:
+                        "Error" not in devices[ controller ] and\
+                        "Error" not in ports[ controller ]:
 
                     try:
                         currentDevicesResult = main.Mininet1.compareSwitches(
@@ -1734,29 +1732,29 @@
                 mappings = {}
                 for i in range( 1, 29 ):  # hosts 1 through 28
                     # set up correct variables:
-                    macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
+                    macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
                     if i == 1:
-                        deviceId = "1000".zfill(16)
+                        deviceId = "1000".zfill( 16 )
                     elif i == 2:
-                        deviceId = "2000".zfill(16)
+                        deviceId = "2000".zfill( 16 )
                     elif i == 3:
-                        deviceId = "3000".zfill(16)
+                        deviceId = "3000".zfill( 16 )
                     elif i == 4:
-                        deviceId = "3004".zfill(16)
+                        deviceId = "3004".zfill( 16 )
                     elif i == 5:
-                        deviceId = "5000".zfill(16)
+                        deviceId = "5000".zfill( 16 )
                     elif i == 6:
-                        deviceId = "6000".zfill(16)
+                        deviceId = "6000".zfill( 16 )
                     elif i == 7:
-                        deviceId = "6007".zfill(16)
+                        deviceId = "6007".zfill( 16 )
                     elif i >= 8 and i <= 17:
                         dpid = '3' + str( i ).zfill( 3 )
-                        deviceId = dpid.zfill(16)
+                        deviceId = dpid.zfill( 16 )
                     elif i >= 18 and i <= 27:
                         dpid = '6' + str( i ).zfill( 3 )
-                        deviceId = dpid.zfill(16)
+                        deviceId = dpid.zfill( 16 )
                     elif i == 28:
-                        deviceId = "2800".zfill(16)
+                        deviceId = "2800".zfill( 16 )
                     mappings[ macId ] = deviceId
                 if hosts[ controller ] or "Error" not in hosts[ controller ]:
                     if hosts[ controller ] == []:
@@ -1776,7 +1774,7 @@
                                 assert location, "location field could not be found for this host object"
 
                                 # Trim the protocol identifier off deviceId
-                                device = str( location.get( 'elementId' ) ).split(':')[1]
+                                device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
                                 assert device, "elementId field could not be found for this host location object"
 
                                 port = location.get( 'port' )
@@ -1787,7 +1785,7 @@
                                     if str( port ) != "1":
                                         main.log.error( "The attachment port is incorrect for " +
                                                         "host " + str( mac ) +
-                                                        ". Expected: 1 Actual: " + str( port) )
+                                                        ". Expected: 1 Actual: " + str( port ) )
                                         hostAttachment = False
                                     if device != mappings[ str( mac ) ]:
                                         main.log.error( "The attachment device is incorrect for " +
@@ -1821,7 +1819,7 @@
                 numClusters = len( json.loads( clusters[ 0 ] ) )
             except ( ValueError, TypeError ):
                 main.log.exception( "Error parsing clusters[0]: " +
-                                    repr( clusters[0] ) )
+                                    repr( clusters[ 0 ] ) )
                 numClusters = "ERROR"
                 clusterResults = main.FALSE
             if numClusters == 1:
@@ -1850,7 +1848,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        attempts=5 )
 
         utilities.assert_equals( expect=True, actual=nodeResults,
@@ -1859,8 +1857,8 @@
         if not nodeResults:
             for i in main.activeNodes:
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
-                    main.CLIs[i].name,
-                    main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
+                    main.CLIs[ i ].name,
+                    main.CLIs[ i ].sendline( "scr:list | grep -v ACTIVE" ) ) )
 
         if not topoResult:
             main.cleanup()
@@ -1931,7 +1929,7 @@
         switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
 
         description = "Killing a switch to ensure it is discovered correctly"
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         main.case( description )
         switch = main.params[ 'kill' ][ 'switch' ]
         switchDPID = main.params[ 'kill' ][ 'dpid' ]
@@ -1967,7 +1965,7 @@
         switch = main.params[ 'kill' ][ 'switch' ]
         switchDPID = main.params[ 'kill' ][ 'dpid' ]
         links = main.params[ 'kill' ][ 'links' ].split()
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         description = "Adding a switch to ensure it is discovered correctly"
         main.case( description )
 
@@ -2051,14 +2049,14 @@
             main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
 
         try:
-            timerLog = open( main.logdir + "/Timers.csv", 'w')
+            timerLog = open( main.logdir + "/Timers.csv", 'w' )
             # Overwrite with empty line and close
             labels = "Gossip Intents, Restart"
             data = str( gossipTime ) + ", " + str( main.restartTime )
             timerLog.write( labels + "\n" + data )
             timerLog.close()
-        except NameError, e:
-            main.log.exception(e)
+        except NameError as e:
+            main.log.exception( e )
 
     def CASE14( self, main ):
         """
@@ -2068,9 +2066,9 @@
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
 
-        main.case("Start Leadership Election app")
+        main.case( "Start Leadership Election app" )
         main.step( "Install leadership election app" )
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         appResult = onosCli.activateApp( "org.onosproject.election" )
         utilities.assert_equals(
             expect=main.TRUE,
@@ -2080,9 +2078,9 @@
 
         main.step( "Run for election on each node" )
         for i in main.activeNodes:
-            main.CLIs[i].electionTestRun()
-        time.sleep(5)
-        activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+            main.CLIs[ i ].electionTestRun()
+        time.sleep( 5 )
+        activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
         sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
         utilities.assert_equals(
             expect=True,
@@ -2092,7 +2090,7 @@
 
         if sameResult:
             leader = leaders[ 0 ][ 0 ]
-            if main.nodes[main.activeNodes[0]].ip_address in leader:
+            if main.nodes[ main.activeNodes[ 0 ] ].ip_address in leader:
                 correctLeader = True
             else:
                 correctLeader = False
@@ -2142,7 +2140,7 @@
         electionResult = main.TRUE
 
         for i in main.activeNodes:  # run test election on each node
-            if main.CLIs[i].electionTestRun() == main.FALSE:
+            if main.CLIs[ i ].electionTestRun() == main.FALSE:
                 electionResult = main.FALSE
         utilities.assert_equals(
             expect=main.TRUE,
@@ -2157,7 +2155,7 @@
 
         main.step( "Check that each node shows the same leader and candidates" )
         failMessage = "Nodes have different leaderboards"
-        activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+        activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
         sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
         if sameResult:
             oldLeader = oldLeaders[ 0 ][ 0 ]
@@ -2207,7 +2205,7 @@
         if newLeader == oldLeader:
             newLeaderResult = False
             main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
-                " as the current leader" )
+                            " as the current leader" )
         utilities.assert_equals(
             expect=True,
             actual=newLeaderResult,
@@ -2224,7 +2222,7 @@
             else:
                 main.log.info( "Expected no leader, got: " + str( newLeader ) )
                 correctCandidateResult = main.FALSE
-        elif len( oldLeaders[0] ) >= 3:
+        elif len( oldLeaders[ 0 ] ) >= 3:
             if newLeader == oldLeaders[ 0 ][ 2 ]:
                 # correct leader was elected
                 correctCandidateResult = main.TRUE
@@ -2264,10 +2262,10 @@
         positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
 
         # Check that the re-elected node is last on the candidate List
-        if not reRunLeaders[0]:
+        if not reRunLeaders[ 0 ]:
             positionResult = main.FALSE
         elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
-            main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
+            main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
                                                                                       str( reRunLeaders[ 0 ] ) ) )
             positionResult = main.FALSE
         utilities.assert_equals(
@@ -2291,15 +2289,15 @@
         # Variables for the distributed primitives tests
         main.pCounterName = "TestON-Partitions"
         main.pCounterValue = 0
-        main.onosSet = set([])
+        main.onosSet = set( [] )
         main.onosSetName = "TestON-set"
 
         description = "Install Primitives app"
         main.case( description )
         main.step( "Install Primitives app" )
         appName = "org.onosproject.distributedprimitives"
-        node = main.activeNodes[0]
-        appResults = main.CLIs[node].activateApp( appName )
+        node = main.activeNodes[ 0 ]
+        appResults = main.CLIs[ node ].activateApp( appName )
         utilities.assert_equals( expect=main.TRUE,
                                  actual=appResults,
                                  onpass="Primitives app activated",
diff --git a/TestON/tests/HA/HAstopNodes/HAstopNodes.py b/TestON/tests/HA/HAstopNodes/HAstopNodes.py
index 6c32b16..0ec3b5d 100644
--- a/TestON/tests/HA/HAstopNodes/HAstopNodes.py
+++ b/TestON/tests/HA/HAstopNodes/HAstopNodes.py
@@ -23,8 +23,6 @@
 CASE16: Install Distributed Primitives app
 CASE17: Check for basic functionality with distributed primitives
 """
-
-
 class HAstopNodes:
 
     def __init__( self ):
@@ -149,7 +147,7 @@
                             filePath + topoName,
                             main.Mininet1.home,
                             direction="to" )
-        mnResult = main.Mininet1.startNet( )
+        mnResult = main.Mininet1.startNet()
         utilities.assert_equals( expect=main.TRUE, actual=mnResult,
                                  onpass="Mininet Started",
                                  onfail="Error starting Mininet" )
@@ -192,7 +190,7 @@
                   'seamless="seamless"></iframe>\n'
         graphs += ']]></ac:plain-text-body>\n'
         graphs += '</ac:structured-macro>\n'
-        main.log.wiki(graphs)
+        main.log.wiki( graphs )
 
         main.step( "Creating ONOS package" )
         # copy gen-partions file to ONOS
@@ -259,9 +257,9 @@
         cliResults = main.TRUE
         threads = []
         for i in range( main.numCtrls ):
-            t = main.Thread( target=main.CLIs[i].startOnosCli,
+            t = main.Thread( target=main.CLIs[ i ].startOnosCli,
                              name="startOnosCli-" + str( i ),
-                             args=[main.nodes[i].ip_address] )
+                             args=[ main.nodes[ i ].ip_address ] )
             threads.append( t )
             t.start()
 
@@ -286,7 +284,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        attempts=5 )
 
         utilities.assert_equals( expect=True, actual=nodeResults,
@@ -295,7 +293,7 @@
 
         if not nodeResults:
             for i in main.activeNodes:
-                cli = main.CLIs[i]
+                cli = main.CLIs[ i ]
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
                     cli.name,
                     cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
@@ -307,7 +305,7 @@
         # get data from the params
         apps = main.params.get( 'apps' )
         if apps:
-            apps = apps.split(',')
+            apps = apps.split( ',' )
             main.log.warn( apps )
             activateResult = True
             for app in apps:
@@ -334,8 +332,8 @@
             main.log.debug( config )
             checkResult = main.TRUE
             for component in config:
-                for setting in config[component]:
-                    value = config[component][setting]
+                for setting in config[ component ]:
+                    value = config[ component ][ setting ]
                     check = main.CLIs[ 0 ].setCfg( component, setting, value )
                     main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
                     checkResult = check and checkResult
@@ -350,7 +348,7 @@
         appCheck = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -360,9 +358,9 @@
             t.join()
             appCheck = appCheck and t.result
         if appCheck != main.TRUE:
-            node = main.activeNodes[0]
-            main.log.warn( main.CLIs[node].apps() )
-            main.log.warn( main.CLIs[node].appIDs() )
+            node = main.activeNodes[ 0 ]
+            main.log.warn( main.CLIs[ node ].apps() )
+            main.log.warn( main.CLIs[ node ].appIDs() )
         utilities.assert_equals( expect=main.TRUE, actual=appCheck,
                                  onpass="App Ids seem to be correct",
                                  onfail="Something is wrong with app Ids" )
@@ -447,9 +445,9 @@
         # Manually assign mastership to the controller we want
         roleCall = main.TRUE
 
-        ipList = [ ]
+        ipList = []
         deviceList = []
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         try:
             # Assign mastership to specific controllers. This assignment was
             # determined for a 7 node cluser, but will work with any sized
@@ -523,8 +521,8 @@
         #       atomic and is actually a multi step process
         time.sleep( 5 )
         for i in range( len( ipList ) ):
-            ip = ipList[i]
-            deviceId = deviceList[i]
+            ip = ipList[ i ]
+            deviceId = deviceList[ i ]
             # Check assignment
             master = onosCli.getRole( deviceId ).get( 'master' )
             if ip in master:
@@ -562,7 +560,7 @@
 
         # install onos-app-fwd
         main.step( "Install reactive forwarding app" )
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         installResults = onosCli.activateApp( "org.onosproject.fwd" )
         utilities.assert_equals( expect=main.TRUE, actual=installResults,
                                  onpass="Install fwd successful",
@@ -572,7 +570,7 @@
         appCheck = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -597,13 +595,13 @@
         pingResult = main.Mininet1.pingall()
         time2 = time.time()
         if not pingResult:
-            main.log.warn("First pingall failed. Trying again...")
+            main.log.warn( "First pingall failed. Trying again..." )
             pingResult = main.Mininet1.pingall()
             passMsg += " on the second try"
         utilities.assert_equals(
             expect=main.TRUE,
             actual=pingResult,
-            onpass= passMsg,
+            onpass=passMsg,
             onfail="Reactive Pingall failed, " +
                    "one or more ping pairs failed" )
         main.log.info( "Time for pingall: %2f seconds" %
@@ -612,8 +610,8 @@
         time.sleep( 11 )
         # uninstall onos-app-fwd
         main.step( "Uninstall reactive forwarding app" )
-        node = main.activeNodes[0]
-        uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
+        node = main.activeNodes[ 0 ]
+        uninstallResult = main.CLIs[ node ].deactivateApp( "org.onosproject.fwd" )
         utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
                                  onpass="Uninstall fwd successful",
                                  onfail="Uninstall fwd failed" )
@@ -622,7 +620,7 @@
         threads = []
         appCheck2 = main.TRUE
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -632,9 +630,9 @@
             t.join()
             appCheck2 = appCheck2 and t.result
         if appCheck2 != main.TRUE:
-            node = main.activeNodes[0]
-            main.log.warn( main.CLIs[node].apps() )
-            main.log.warn( main.CLIs[node].appIDs() )
+            node = main.activeNodes[ 0 ]
+            main.log.warn( main.CLIs[ node ].apps() )
+            main.log.warn( main.CLIs[ node ].appIDs() )
         utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
                                  onpass="App Ids seem to be correct",
                                  onfail="Something is wrong with app Ids" )
@@ -662,8 +660,8 @@
                 host2Id = host2Dict.get( 'id', None )
             if host1Id and host2Id:
                 nodeNum = ( i % len( main.activeNodes ) )
-                node = main.activeNodes[nodeNum]
-                tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
+                node = main.activeNodes[ nodeNum ]
+                tmpId = main.CLIs[ node ].addHostIntent( host1Id, host2Id )
                 if tmpId:
                     main.log.info( "Added intent with id: " + tmpId )
                     intentIds.append( tmpId )
@@ -673,8 +671,8 @@
             else:
                 main.log.error( "Error, getHost() failed for h" + str( i ) +
                                 " and/or h" + str( i + 10 ) )
-                node = main.activeNodes[0]
-                hosts = main.CLIs[node].hosts()
+                node = main.activeNodes[ 0 ]
+                hosts = main.CLIs[ node ].hosts()
                 main.log.warn( "Hosts output: " )
                 try:
                     main.log.warn( json.dumps( json.loads( hosts ),
@@ -742,7 +740,7 @@
                 for i in range( 14 ):
                     topics.append( "work-partition-" + str( i ) )
                 main.log.debug( topics )
-                ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                 for topic in topics:
                     if topic not in ONOStopics:
                         main.log.error( "Error: " + topic +
@@ -756,13 +754,13 @@
         # Check all nodes
         if missing:
             for i in main.activeNodes:
-                response = main.CLIs[i].leaders( jsonFormat=False)
-                main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
+                response = main.CLIs[ i ].leaders( jsonFormat=False )
+                main.log.warn( str( main.CLIs[ i ].name ) + " leaders output: \n" +
                                str( response ) )
 
         partitions = onosCli.partitions()
         try:
-            if partitions :
+            if partitions:
                 parsedPartitions = json.loads( partitions )
                 main.log.warn( json.dumps( parsedPartitions,
                                            sort_keys=True,
@@ -777,7 +775,7 @@
             main.log.error( repr( partitions ) )
         pendingMap = onosCli.pendingMap()
         try:
-            if pendingMap :
+            if pendingMap:
                 parsedPending = json.loads( pendingMap )
                 main.log.warn( json.dumps( parsedPending,
                                            sort_keys=True,
@@ -796,21 +794,21 @@
             main.log.error( "Error in pushing host intents to ONOS" )
 
         main.step( "Intent Anti-Entropy dispersion" )
-        for j in range(100):
+        for j in range( 100 ):
             correct = True
             main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
             for i in main.activeNodes:
                 onosIds = []
-                ids = main.CLIs[i].getAllIntentsId()
+                ids = main.CLIs[ i ].getAllIntentsId()
                 onosIds.append( ids )
-                main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
+                main.log.debug( "Intents in " + main.CLIs[ i ].name + ": " +
                                 str( sorted( onosIds ) ) )
                 if sorted( ids ) != sorted( intentIds ):
                     main.log.warn( "Set of intent IDs doesn't match" )
                     correct = False
                     break
                 else:
-                    intents = json.loads( main.CLIs[i].intents() )
+                    intents = json.loads( main.CLIs[ i ].intents() )
                     for intent in intents:
                         if intent[ 'state' ] != "INSTALLED":
                             main.log.warn( "Intent " + intent[ 'id' ] +
@@ -820,14 +818,14 @@
             if correct:
                 break
             else:
-                time.sleep(1)
+                time.sleep( 1 )
         if not intentStop:
             intentStop = time.time()
         global gossipTime
         gossipTime = intentStop - intentStart
         main.log.info( "It took about " + str( gossipTime ) +
                         " seconds for all intents to appear in each node" )
-        gossipPeriod = int( main.params['timers']['gossip'] )
+        gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
         maxGossipTime = gossipPeriod * len( main.activeNodes )
         utilities.assert_greater_equals(
                 expect=maxGossipTime, actual=gossipTime,
@@ -889,7 +887,7 @@
                     # FIXME: this should only be after we start the app
                     topics.append( "org.onosproject.election" )
                     main.log.debug( topics )
-                    ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                    ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                     for topic in topics:
                         if topic not in ONOStopics:
                             main.log.error( "Error: " + topic +
@@ -903,14 +901,14 @@
             # Check all nodes
             if missing:
                 for i in main.activeNodes:
-                    node = main.CLIs[i]
-                    response = node.leaders( jsonFormat=False)
+                    node = main.CLIs[ i ]
+                    response = node.leaders( jsonFormat=False )
                     main.log.warn( str( node.name ) + " leaders output: \n" +
                                    str( response ) )
 
             partitions = onosCli.partitions()
             try:
-                if partitions :
+                if partitions:
                     parsedPartitions = json.loads( partitions )
                     main.log.warn( json.dumps( parsedPartitions,
                                                sort_keys=True,
@@ -925,7 +923,7 @@
                 main.log.error( repr( partitions ) )
             pendingMap = onosCli.pendingMap()
             try:
-                if pendingMap :
+                if pendingMap:
                     parsedPending = json.loads( pendingMap )
                     main.log.warn( json.dumps( parsedPending,
                                                sort_keys=True,
@@ -954,7 +952,7 @@
                                 "functionality and check the state of " +\
                                 "the intent"
 
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         main.step( "Check Intent state" )
         installedCheck = False
         loopCount = 0
@@ -1040,7 +1038,7 @@
                 # FIXME: topics.append( "org.onosproject.election" )
                 # Print leaders output
                 main.log.debug( topics )
-                ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                 for topic in topics:
                     if topic not in ONOStopics:
                         main.log.error( "Error: " + topic +
@@ -1057,8 +1055,8 @@
         # Check all nodes
         if topicCheck:
             for i in main.activeNodes:
-                node = main.CLIs[i]
-                response = node.leaders( jsonFormat=False)
+                node = main.CLIs[ i ]
+                response = node.leaders( jsonFormat=False )
                 main.log.warn( str( node.name ) + " leaders output: \n" +
                                str( response ) )
 
@@ -1068,7 +1066,7 @@
         # Print partitions
         partitions = onosCli.partitions()
         try:
-            if partitions :
+            if partitions:
                 parsedPartitions = json.loads( partitions )
                 main.log.warn( json.dumps( parsedPartitions,
                                            sort_keys=True,
@@ -1084,7 +1082,7 @@
         # Print Pending Map
         pendingMap = onosCli.pendingMap()
         try:
-            if pendingMap :
+            if pendingMap:
                 parsedPending = json.loads( pendingMap )
                 main.log.warn( json.dumps( parsedPending,
                                            sort_keys=True,
@@ -1138,7 +1136,7 @@
                     # FIXME: this should only be after we start the app
                     topics.append( "org.onosproject.election" )
                     main.log.debug( topics )
-                    ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                    ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                     for topic in topics:
                         if topic not in ONOStopics:
                             main.log.error( "Error: " + topic +
@@ -1151,14 +1149,14 @@
                 main.log.error( repr( leaders ) )
             if missing:
                 for i in main.activeNodes:
-                    node = main.CLIs[i]
-                    response = node.leaders( jsonFormat=False)
+                    node = main.CLIs[ i ]
+                    response = node.leaders( jsonFormat=False )
                     main.log.warn( str( node.name ) + " leaders output: \n" +
                                    str( response ) )
 
             partitions = onosCli.partitions()
             try:
-                if partitions :
+                if partitions:
                     parsedPartitions = json.loads( partitions )
                     main.log.warn( json.dumps( parsedPartitions,
                                                sort_keys=True,
@@ -1173,7 +1171,7 @@
                 main.log.error( repr( partitions ) )
             pendingMap = onosCli.pendingMap()
             try:
-                if pendingMap :
+                if pendingMap:
                     parsedPending = json.loads( pendingMap )
                     main.log.warn( json.dumps( parsedPending,
                                                sort_keys=True,
@@ -1244,7 +1242,7 @@
         rolesNotNull = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].rolesNotNull,
+            t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
                              name="rolesNotNull-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -1266,7 +1264,7 @@
         rolesResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].roles,
+            t = main.Thread( target=main.CLIs[ i ].roles,
                              name="roles-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -1277,11 +1275,11 @@
             ONOSMastership.append( t.result )
 
         for i in range( len( ONOSMastership ) ):
-            node = str( main.activeNodes[i] + 1 )
-            if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
+            node = str( main.activeNodes[ i ] + 1 )
+            if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " roles" )
                 main.log.warn( "ONOS" + node + " mastership response: " +
-                               repr( ONOSMastership[i] ) )
+                               repr( ONOSMastership[ i ] ) )
                 rolesResults = False
         utilities.assert_equals(
             expect=True,
@@ -1290,7 +1288,7 @@
             onfail="Error in reading roles from ONOS" )
 
         main.step( "Check for consistency in roles from each controller" )
-        if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
+        if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
             main.log.info(
                 "Switch roles are consistent across all ONOS nodes" )
         else:
@@ -1303,7 +1301,7 @@
 
         if rolesResults and not consistentMastership:
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 try:
                     main.log.warn(
                         "ONOS" + node + " roles: ",
@@ -1327,7 +1325,7 @@
         intentsResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].intents,
+            t = main.Thread( target=main.CLIs[ i ].intents,
                              name="intents-" + str( i ),
                              args=[],
                              kwargs={ 'jsonFormat': True } )
@@ -1339,7 +1337,7 @@
             ONOSIntents.append( t.result )
 
         for i in range( len( ONOSIntents ) ):
-            node = str( main.activeNodes[i] + 1 )
+            node = str( main.activeNodes[ i ] + 1 )
             if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " intents" )
                 main.log.warn( "ONOS" + node + " intents response: " +
@@ -1352,7 +1350,7 @@
             onfail="Error in reading intents from ONOS" )
 
         main.step( "Check for consistency in Intents from each controller" )
-        if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
+        if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
             main.log.info( "Intents are consistent across all ONOS " +
                              "nodes" )
         else:
@@ -1400,17 +1398,17 @@
 
         if intentsResults and not consistentIntents:
             # print the json objects
-            n = str( main.activeNodes[-1] + 1 )
+            n = str( main.activeNodes[ -1 ] + 1 )
             main.log.debug( "ONOS" + n + " intents: " )
             main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
                                         sort_keys=True,
                                         indent=4,
                                         separators=( ',', ': ' ) ) )
             for i in range( len( ONOSIntents ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
                     main.log.debug( "ONOS" + node + " intents: " )
-                    main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
+                    main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
                                                 sort_keys=True,
                                                 indent=4,
                                                 separators=( ',', ': ' ) ) )
@@ -1431,7 +1429,7 @@
         flowsResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].flows,
+            t = main.Thread( target=main.CLIs[ i ].flows,
                              name="flows-" + str( i ),
                              args=[],
                              kwargs={ 'jsonFormat': True } )
@@ -1439,14 +1437,14 @@
             t.start()
 
         # NOTE: Flows command can take some time to run
-        time.sleep(30)
+        time.sleep( 30 )
         for t in threads:
             t.join()
             result = t.result
             ONOSFlows.append( result )
 
         for i in range( len( ONOSFlows ) ):
-            num = str( main.activeNodes[i] + 1 )
+            num = str( main.activeNodes[ i ] + 1 )
             if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
                 main.log.error( "Error in getting ONOS" + num + " flows" )
                 main.log.warn( "ONOS" + num + " flows response: " +
@@ -1483,11 +1481,11 @@
 
         if flowsResults and not consistentFlows:
             for i in range( len( ONOSFlows ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 try:
                     main.log.warn(
                         "ONOS" + node + " flows: " +
-                        json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
+                        json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
                                     indent=4, separators=( ',', ': ' ) ) )
                 except ( ValueError, TypeError ):
                     main.log.warn( "ONOS" + node + " flows: " +
@@ -1552,9 +1550,9 @@
         devices = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].devices,
+            t = main.Thread( target=main.CLIs[ i ].devices,
                              name="devices-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1564,9 +1562,9 @@
         hosts = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].hosts,
+            t = main.Thread( target=main.CLIs[ i ].hosts,
                              name="hosts-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1584,9 +1582,9 @@
         ports = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].ports,
+            t = main.Thread( target=main.CLIs[ i ].ports,
                              name="ports-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1596,9 +1594,9 @@
         links = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].links,
+            t = main.Thread( target=main.CLIs[ i ].links,
                              name="links-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1608,9 +1606,9 @@
         clusters = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].clusters,
+            t = main.Thread( target=main.CLIs[ i ].clusters,
                              name="clusters-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1623,7 +1621,7 @@
         main.step( "Host view is consistent across ONOS nodes" )
         consistentHostsResult = main.TRUE
         for controller in range( len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ] and "Error" not in hosts[ controller ]:
                 if hosts[ controller ] == hosts[ 0 ]:
                     continue
@@ -1650,10 +1648,10 @@
         main.step( "Each host has an IP address" )
         ipResult = main.TRUE
         for controller in range( 0, len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ]:
                 for host in hosts[ controller ]:
-                    if not host.get( 'ipAddresses', [ ] ):
+                    if not host.get( 'ipAddresses', [] ):
                         main.log.error( "Error with host ips on controller" +
                                         controllerStr + ": " + str( host ) )
                         ipResult = main.FALSE
@@ -1667,7 +1665,7 @@
         main.step( "Cluster view is consistent across ONOS nodes" )
         consistentClustersResult = main.TRUE
         for controller in range( len( clusters ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if "Error" not in clusters[ controller ]:
                 if clusters[ controller ] == clusters[ 0 ]:
                     continue
@@ -1716,14 +1714,14 @@
         mnLinks = main.Mininet1.getLinks()
         mnHosts = main.Mininet1.getHosts()
         for controller in main.activeNodes:
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if devices[ controller ] and ports[ controller ] and\
-                "Error" not in devices[ controller ] and\
-                "Error" not in ports[ controller ]:
-                    currentDevicesResult = main.Mininet1.compareSwitches(
-                            mnSwitches,
-                            json.loads( devices[ controller ] ),
-                            json.loads( ports[ controller ] ) )
+                    "Error" not in devices[ controller ] and\
+                    "Error" not in ports[ controller ]:
+                currentDevicesResult = main.Mininet1.compareSwitches(
+                        mnSwitches,
+                        json.loads( devices[ controller ] ),
+                        json.loads( ports[ controller ] ) )
             else:
                 currentDevicesResult = main.FALSE
             utilities.assert_equals( expect=main.TRUE,
@@ -1810,7 +1808,7 @@
         killResults = main.TRUE
         for i in main.kill:
             killResults = killResults and\
-                          main.ONOSbench.onosStop( main.nodes[i].ip_address )
+                          main.ONOSbench.onosStop( main.nodes[ i ].ip_address )
             main.activeNodes.remove( i )
         utilities.assert_equals( expect=main.TRUE, actual=killResults,
                                  onpass="ONOS nodes stopped successfully",
@@ -1819,7 +1817,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        sleep=15,
                                        attempts=5 )
 
@@ -1829,7 +1827,7 @@
 
         if not nodeResults:
             for i in main.activeNodes:
-                cli = main.CLIs[i]
+                cli = main.CLIs[ i ]
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
                     cli.name,
                     cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
@@ -1855,7 +1853,7 @@
         restartTime = time.time()
         for i in main.kill:
             startResults = startResults and\
-                           main.ONOSbench.onosStart( main.nodes[i].ip_address )
+                           main.ONOSbench.onosStart( main.nodes[ i ].ip_address )
         utilities.assert_equals( expect=main.TRUE, actual=startResults,
                                  onpass="ONOS nodes started successfully",
                                  onfail="ONOS nodes NOT successfully started" )
@@ -1867,7 +1865,7 @@
             onosIsupResult = main.TRUE
             for i in main.kill:
                 onosIsupResult = onosIsupResult and\
-                                 main.ONOSbench.isup( main.nodes[i].ip_address )
+                                 main.ONOSbench.isup( main.nodes[ i ].ip_address )
             count = count + 1
         utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
                                  onpass="ONOS restarted successfully",
@@ -1877,7 +1875,7 @@
         cliResults = main.TRUE
         for i in main.kill:
             cliResults = cliResults and\
-                         main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
+                         main.CLIs[ i ].startOnosCli( main.nodes[ i ].ip_address )
             main.activeNodes.append( i )
         utilities.assert_equals( expect=main.TRUE, actual=cliResults,
                                  onpass="ONOS cli restarted",
@@ -1899,7 +1897,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        sleep=15,
                                        attempts=5 )
 
@@ -1909,23 +1907,23 @@
 
         if not nodeResults:
             for i in main.activeNodes:
-                cli = main.CLIs[i]
+                cli = main.CLIs[ i ]
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
                     cli.name,
                     cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
             main.log.error( "Failed to start ONOS, stopping test" )
             main.cleanup()
             main.exit()
-        node = main.activeNodes[0]
-        main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
-        main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
-        main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
+        node = main.activeNodes[ 0 ]
+        main.log.debug( main.CLIs[ node ].nodes( jsonFormat=False ) )
+        main.log.debug( main.CLIs[ node ].leaders( jsonFormat=False ) )
+        main.log.debug( main.CLIs[ node ].partitions( jsonFormat=False ) )
 
         main.step( "Rerun for election on the node(s) that were killed" )
         runResults = main.TRUE
         for i in main.kill:
             runResults = runResults and\
-                         main.CLIs[i].electionTestRun()
+                         main.CLIs[ i ].electionTestRun()
         utilities.assert_equals( expect=main.TRUE, actual=runResults,
                                  onpass="ONOS nodes reran for election topic",
                                  onfail="Errror rerunning for election" )
@@ -1952,9 +1950,9 @@
         rolesNotNull = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].rolesNotNull,
+            t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
                              name="rolesNotNull-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1974,7 +1972,7 @@
         rolesResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].roles,
+            t = main.Thread( target=main.CLIs[ i ].roles,
                              name="roles-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -1985,11 +1983,11 @@
             ONOSMastership.append( t.result )
 
         for i in range( len( ONOSMastership ) ):
-            node = str( main.activeNodes[i] + 1 )
-            if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
+            node = str( main.activeNodes[ i ] + 1 )
+            if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " roles" )
                 main.log.warn( "ONOS" + node + " mastership response: " +
-                               repr( ONOSMastership[i] ) )
+                               repr( ONOSMastership[ i ] ) )
                 rolesResults = False
         utilities.assert_equals(
             expect=True,
@@ -1998,7 +1996,7 @@
             onfail="Error in reading roles from ONOS" )
 
         main.step( "Check for consistency in roles from each controller" )
-        if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
+        if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
             main.log.info(
                 "Switch roles are consistent across all ONOS nodes" )
         else:
@@ -2011,7 +2009,7 @@
 
         if rolesResults and not consistentMastership:
             for i in range( len( ONOSMastership ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 main.log.warn( "ONOS" + node + " roles: ",
                                json.dumps( json.loads( ONOSMastership[ i ] ),
                                            sort_keys=True,
@@ -2027,7 +2025,7 @@
         intentsResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].intents,
+            t = main.Thread( target=main.CLIs[ i ].intents,
                              name="intents-" + str( i ),
                              args=[],
                              kwargs={ 'jsonFormat': True } )
@@ -2038,8 +2036,8 @@
             t.join()
             ONOSIntents.append( t.result )
 
-        for i in range( len( ONOSIntents) ):
-            node = str( main.activeNodes[i] + 1 )
+        for i in range( len( ONOSIntents ) ):
+            node = str( main.activeNodes[ i ] + 1 )
             if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " intents" )
                 main.log.warn( "ONOS" + node + " intents response: " +
@@ -2052,7 +2050,7 @@
             onfail="Error in reading intents from ONOS" )
 
         main.step( "Check for consistency in Intents from each controller" )
-        if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
+        if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
             main.log.info( "Intents are consistent across all ONOS " +
                              "nodes" )
         else:
@@ -2101,12 +2099,12 @@
                 main.log.exception( "Error in parsing intents" )
                 main.log.error( repr( node ) )
             intentStates.append( nodeStates )
-            out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
+            out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
             main.log.info( dict( out ) )
 
         if intentsResults and not consistentIntents:
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 main.log.warn( "ONOS" + node + " intents: " )
                 main.log.warn( json.dumps(
                     json.loads( ONOSIntents[ i ] ),
@@ -2145,7 +2143,7 @@
                             main.log.debug( json.dumps( intent ) )
                 except ( ValueError, TypeError ):
                     main.log.exception( "Exception printing intents" )
-                    main.log.debug( repr( ONOSIntents[0] ) )
+                    main.log.debug( repr( ONOSIntents[ 0 ] ) )
                     main.log.debug( repr( intentState ) )
             if sameIntents == main.FALSE:
                 try:
@@ -2159,7 +2157,7 @@
                                                 separators=( ',', ': ' ) ) )
                 except ( ValueError, TypeError ):
                     main.log.exception( "Exception printing intents" )
-                    main.log.debug( repr( ONOSIntents[0] ) )
+                    main.log.debug( repr( ONOSIntents[ 0 ] ) )
                     main.log.debug( repr( intentState ) )
             utilities.assert_equals(
                 expect=main.TRUE,
@@ -2174,7 +2172,7 @@
         for i in range( 28 ):
             main.log.info( "Checking flow table on s" + str( i + 1 ) )
             tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
-            curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
+            curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
             FlowTables = FlowTables and curSwitch
             if curSwitch == main.FALSE:
                 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
@@ -2185,7 +2183,7 @@
             onfail="Changes were found in the flow tables" )
 
         main.Mininet2.pingLongKill()
-        '''
+        """
         main.step( "Check the continuous pings to ensure that no packets " +
                    "were dropped during component failure" )
         main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
@@ -2211,19 +2209,18 @@
             actual=LossInPings,
             onpass="No Loss of connectivity",
             onfail="Loss of dataplane connectivity detected" )
-        '''
-
+        """
         main.step( "Leadership Election is still functional" )
         # Test of LeadershipElection
         leaderList = []
 
         restarted = []
         for i in main.kill:
-            restarted.append( main.nodes[i].ip_address )
+            restarted.append( main.nodes[ i ].ip_address )
         leaderResult = main.TRUE
 
         for i in main.activeNodes:
-            cli = main.CLIs[i]
+            cli = main.CLIs[ i ]
             leaderN = cli.electionTestLeader()
             leaderList.append( leaderN )
             if leaderN == main.FALSE:
@@ -2287,8 +2284,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="devices-" + str( i ),
-                                 args=[ main.CLIs[i].devices, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].devices, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2302,8 +2299,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="hosts-" + str( i ),
-                                 args=[ main.CLIs[i].hosts, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].hosts, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2317,7 +2314,7 @@
                     main.log.error( repr( t.result ) )
                     hosts.append( None )
             for controller in range( 0, len( hosts ) ):
-                controllerStr = str( main.activeNodes[controller] + 1 )
+                controllerStr = str( main.activeNodes[ controller ] + 1 )
                 if hosts[ controller ]:
                     for host in hosts[ controller ]:
                         if host is None or host.get( 'ipAddresses', [] ) == []:
@@ -2330,8 +2327,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="ports-" + str( i ),
-                                 args=[ main.CLIs[i].ports, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].ports, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2344,8 +2341,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="links-" + str( i ),
-                                 args=[ main.CLIs[i].links, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].links, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2358,8 +2355,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="clusters-" + str( i ),
-                                 args=[ main.CLIs[i].clusters, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].clusters, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2378,18 +2375,18 @@
                all( e is None for e in ports ) and\
                all( e is None for e in links ) and\
                all( e is None for e in clusters ):
-                   topoFailMsg = "Could not get topology from ONOS"
-                   main.log.error( topoFailMsg )
-                   continue  # Try again, No use trying to compare
+                topoFailMsg = "Could not get topology from ONOS"
+                main.log.error( topoFailMsg )
+                continue  # Try again, No use trying to compare
 
             mnSwitches = main.Mininet1.getSwitches()
             mnLinks = main.Mininet1.getLinks()
             mnHosts = main.Mininet1.getHosts()
             for controller in range( len( main.activeNodes ) ):
-                controllerStr = str( main.activeNodes[controller] + 1 )
+                controllerStr = str( main.activeNodes[ controller ] + 1 )
                 if devices[ controller ] and ports[ controller ] and\
-                    "Error" not in devices[ controller ] and\
-                    "Error" not in ports[ controller ]:
+                        "Error" not in devices[ controller ] and\
+                        "Error" not in ports[ controller ]:
 
                     try:
                         currentDevicesResult = main.Mininet1.compareSwitches(
@@ -2442,29 +2439,29 @@
                 mappings = {}
                 for i in range( 1, 29 ):  # hosts 1 through 28
                     # set up correct variables:
-                    macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
+                    macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
                     if i == 1:
-                        deviceId = "1000".zfill(16)
+                        deviceId = "1000".zfill( 16 )
                     elif i == 2:
-                        deviceId = "2000".zfill(16)
+                        deviceId = "2000".zfill( 16 )
                     elif i == 3:
-                        deviceId = "3000".zfill(16)
+                        deviceId = "3000".zfill( 16 )
                     elif i == 4:
-                        deviceId = "3004".zfill(16)
+                        deviceId = "3004".zfill( 16 )
                     elif i == 5:
-                        deviceId = "5000".zfill(16)
+                        deviceId = "5000".zfill( 16 )
                     elif i == 6:
-                        deviceId = "6000".zfill(16)
+                        deviceId = "6000".zfill( 16 )
                     elif i == 7:
-                        deviceId = "6007".zfill(16)
+                        deviceId = "6007".zfill( 16 )
                     elif i >= 8 and i <= 17:
                         dpid = '3' + str( i ).zfill( 3 )
-                        deviceId = dpid.zfill(16)
+                        deviceId = dpid.zfill( 16 )
                     elif i >= 18 and i <= 27:
                         dpid = '6' + str( i ).zfill( 3 )
-                        deviceId = dpid.zfill(16)
+                        deviceId = dpid.zfill( 16 )
                     elif i == 28:
-                        deviceId = "2800".zfill(16)
+                        deviceId = "2800".zfill( 16 )
                     mappings[ macId ] = deviceId
                 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
                     if hosts[ controller ] == []:
@@ -2484,7 +2481,7 @@
                                 assert location, "location field could not be found for this host object"
 
                                 # Trim the protocol identifier off deviceId
-                                device = str( location.get( 'elementId' ) ).split(':')[1]
+                                device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
                                 assert device, "elementId field could not be found for this host location object"
 
                                 port = location.get( 'port' )
@@ -2495,7 +2492,7 @@
                                     if str( port ) != "1":
                                         main.log.error( "The attachment port is incorrect for " +
                                                         "host " + str( mac ) +
-                                                        ". Expected: 1 Actual: " + str( port) )
+                                                        ". Expected: 1 Actual: " + str( port ) )
                                         hostAttachment = False
                                     if device != mappings[ str( mac ) ]:
                                         main.log.error( "The attachment device is incorrect for " +
@@ -2537,7 +2534,7 @@
         main.step( "Hosts view is consistent across all ONOS nodes" )
         consistentHostsResult = main.TRUE
         for controller in range( len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
                 if hosts[ controller ] == hosts[ 0 ]:
                     continue
@@ -2579,7 +2576,7 @@
         main.step( "Clusters view is consistent across all ONOS nodes" )
         consistentClustersResult = main.TRUE
         for controller in range( len( clusters ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if "Error" not in clusters[ controller ]:
                 if clusters[ controller ] == clusters[ 0 ]:
                     continue
@@ -2609,7 +2606,7 @@
             numClusters = len( json.loads( clusters[ 0 ] ) )
         except ( ValueError, TypeError ):
             main.log.exception( "Error parsing clusters[0]: " +
-                                repr( clusters[0] ) )
+                                repr( clusters[ 0 ] ) )
             numClusters = "ERROR"
         clusterResults = main.FALSE
         if numClusters == 1:
@@ -2659,7 +2656,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        attempts=5 )
 
         utilities.assert_equals( expect=True, actual=nodeResults,
@@ -2668,8 +2665,8 @@
         if not nodeResults:
             for i in main.activeNodes:
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
-                    main.CLIs[i].name,
-                    main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
+                    main.CLIs[ i ].name,
+                    main.CLIs[ i ].sendline( "scr:list | grep -v ACTIVE" ) ) )
 
         if not topoResult:
             main.cleanup()
@@ -2746,7 +2743,7 @@
         switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
 
         description = "Killing a switch to ensure it is discovered correctly"
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         main.case( description )
         switch = main.params[ 'kill' ][ 'switch' ]
         switchDPID = main.params[ 'kill' ][ 'dpid' ]
@@ -2791,7 +2788,7 @@
         switch = main.params[ 'kill' ][ 'switch' ]
         switchDPID = main.params[ 'kill' ][ 'dpid' ]
         links = main.params[ 'kill' ][ 'links' ].split()
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         description = "Adding a switch to ensure it is discovered correctly"
         main.case( description )
 
@@ -2878,14 +2875,14 @@
             main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
 
         try:
-            timerLog = open( main.logdir + "/Timers.csv", 'w')
+            timerLog = open( main.logdir + "/Timers.csv", 'w' )
             # Overwrite with empty line and close
             labels = "Gossip Intents, Restart"
             data = str( gossipTime ) + ", " + str( main.restartTime )
             timerLog.write( labels + "\n" + data )
             timerLog.close()
-        except NameError, e:
-            main.log.exception(e)
+        except NameError as e:
+            main.log.exception( e )
 
     def CASE14( self, main ):
         """
@@ -2897,9 +2894,9 @@
         assert main.CLIs, "main.CLIs not defined"
         assert main.nodes, "main.nodes not defined"
 
-        main.case("Start Leadership Election app")
+        main.case( "Start Leadership Election app" )
         main.step( "Install leadership election app" )
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         appResult = onosCli.activateApp( "org.onosproject.election" )
         utilities.assert_equals(
             expect=main.TRUE,
@@ -2909,9 +2906,9 @@
 
         main.step( "Run for election on each node" )
         for i in main.activeNodes:
-            main.CLIs[i].electionTestRun()
-        time.sleep(5)
-        activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+            main.CLIs[ i ].electionTestRun()
+        time.sleep( 5 )
+        activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
         sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
         utilities.assert_equals(
             expect=True,
@@ -2921,7 +2918,7 @@
 
         if sameResult:
             leader = leaders[ 0 ][ 0 ]
-            if main.nodes[main.activeNodes[0]].ip_address in leader:
+            if main.nodes[ main.activeNodes[ 0 ] ].ip_address in leader:
                 correctLeader = True
             else:
                 correctLeader = False
@@ -2971,7 +2968,7 @@
         electionResult = main.TRUE
 
         for i in main.activeNodes:  # run test election on each node
-            if main.CLIs[i].electionTestRun() == main.FALSE:
+            if main.CLIs[ i ].electionTestRun() == main.FALSE:
                 electionResult = main.FALSE
         utilities.assert_equals(
             expect=main.TRUE,
@@ -2986,7 +2983,7 @@
 
         main.step( "Check that each node shows the same leader and candidates" )
         failMessage = "Nodes have different leaderboards"
-        activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+        activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
         sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
         if sameResult:
             oldLeader = oldLeaders[ 0 ][ 0 ]
@@ -3036,7 +3033,7 @@
         if newLeader == oldLeader:
             newLeaderResult = False
             main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
-                " as the current leader" )
+                            " as the current leader" )
         utilities.assert_equals(
             expect=True,
             actual=newLeaderResult,
@@ -3053,7 +3050,7 @@
             else:
                 main.log.info( "Expected no leader, got: " + str( newLeader ) )
                 correctCandidateResult = main.FALSE
-        elif len( oldLeaders[0] ) >= 3:
+        elif len( oldLeaders[ 0 ] ) >= 3:
             if newLeader == oldLeaders[ 0 ][ 2 ]:
                 # correct leader was elected
                 correctCandidateResult = main.TRUE
@@ -3093,10 +3090,10 @@
         positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
 
         # Check that the re-elected node is last on the candidate List
-        if not reRunLeaders[0]:
+        if not reRunLeaders[ 0 ]:
             positionResult = main.FALSE
         elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
-            main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
+            main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
                                                                                       str( reRunLeaders[ 0 ] ) ) )
             positionResult = main.FALSE
         utilities.assert_equals(
@@ -3120,15 +3117,15 @@
         # Variables for the distributed primitives tests
         main.pCounterName = "TestON-Partitions"
         main.pCounterValue = 0
-        main.onosSet = set([])
+        main.onosSet = set( [] )
         main.onosSetName = "TestON-set"
 
         description = "Install Primitives app"
         main.case( description )
         main.step( "Install Primitives app" )
         appName = "org.onosproject.distributedprimitives"
-        node = main.activeNodes[0]
-        appResults = main.CLIs[node].activateApp( appName )
+        node = main.activeNodes[ 0 ]
+        appResults = main.CLIs[ node ].activateApp( appName )
         utilities.assert_equals( expect=main.TRUE,
                                  actual=appResults,
                                  onpass="Primitives app activated",
diff --git a/TestON/tests/HA/HAswapNodes/HAswapNodes.py b/TestON/tests/HA/HAswapNodes/HAswapNodes.py
index 8f2b58c..2bfa272 100644
--- a/TestON/tests/HA/HAswapNodes/HAswapNodes.py
+++ b/TestON/tests/HA/HAswapNodes/HAswapNodes.py
@@ -22,8 +22,6 @@
 CASE16: Install Distributed Primitives app
 CASE17: Check for basic functionality with distributed primitives
 """
-
-
 class HAswapNodes:
 
     def __init__( self ):
@@ -130,7 +128,7 @@
             killResults = killResults and killed
 
         main.step( "Setup server for cluster metadata file" )
-        port = main.params['server']['port']
+        port = main.params[ 'server' ][ 'port' ]
         rootDir = os.path.dirname( main.testFile ) + "/dependencies"
         main.log.debug( "Root dir: {}".format( rootDir ) )
         status = main.Server.start( main.ONOSbench,
@@ -163,7 +161,7 @@
                             filePath + topoName,
                             main.Mininet1.home,
                             direction="to" )
-        mnResult = main.Mininet1.startNet( )
+        mnResult = main.Mininet1.startNet()
         utilities.assert_equals( expect=main.TRUE, actual=mnResult,
                                  onpass="Mininet Started",
                                  onfail="Error starting Mininet" )
@@ -206,7 +204,7 @@
                   'seamless="seamless"></iframe>\n'
         graphs += ']]></ac:plain-text-body>\n'
         graphs += '</ac:structured-macro>\n'
-        main.log.wiki(graphs)
+        main.log.wiki( graphs )
 
         main.step( "Copying backup config files" )
         path = "~/onos/tools/package/bin/onos-service"
@@ -221,7 +219,7 @@
                                  onfail="Copy backup config file failed" )
         # we need to modify the onos-service file to use remote metadata file
         # url for cluster metadata file
-        iface = main.params['server'].get( 'interface' )
+        iface = main.params[ 'server' ].get( 'interface' )
         ip = main.ONOSbench.getIpAddr( iface=iface )
         metaFile = "cluster.json"
         javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
@@ -250,7 +248,7 @@
         main.step( "Installing ONOS package" )
         onosInstallResult = main.TRUE
         for i in range( main.ONOSbench.maxNodes ):
-            node = main.nodes[i]
+            node = main.nodes[ i ]
             options = "-f"
             if i >= main.numCtrls:
                 options = "-nf"  # Don't start more than the current scale
@@ -270,7 +268,7 @@
         main.step( "Set up ONOS secure SSH" )
         secureSshResult = main.TRUE
         for i in range( main.numCtrls ):
-            node = main.nodes[i]
+            node = main.nodes[ i ]
             secureSshResult = secureSshResult and main.ONOSbench.onosSecureSSH( node=node.ip_address )
         utilities.assert_equals( expect=main.TRUE, actual=secureSshResult,
                                  onpass="Test step PASS",
@@ -280,7 +278,7 @@
         for i in range( 2 ):
             onosIsupResult = main.TRUE
             for i in range( main.numCtrls ):
-                node = main.nodes[i]
+                node = main.nodes[ i ]
                 started = main.ONOSbench.isup( node.ip_address )
                 if not started:
                     main.log.error( node.name + " hasn't started" )
@@ -295,9 +293,9 @@
         cliResults = main.TRUE
         threads = []
         for i in range( main.numCtrls ):
-            t = main.Thread( target=main.CLIs[i].startOnosCli,
+            t = main.Thread( target=main.CLIs[ i ].startOnosCli,
                              name="startOnosCli-" + str( i ),
-                             args=[main.nodes[i].ip_address] )
+                             args=[ main.nodes[ i ].ip_address ] )
             threads.append( t )
             t.start()
 
@@ -322,7 +320,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        attempts=5 )
         utilities.assert_equals( expect=True, actual=nodeResults,
                                  onpass="Nodes check successful",
@@ -330,7 +328,7 @@
 
         if not nodeResults:
             for i in main.activeNodes:
-                cli = main.CLIs[i]
+                cli = main.CLIs[ i ]
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
                     cli.name,
                     cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
@@ -342,7 +340,7 @@
         # get data from the params
         apps = main.params.get( 'apps' )
         if apps:
-            apps = apps.split(',')
+            apps = apps.split( ',' )
             main.log.warn( apps )
             activateResult = True
             for app in apps:
@@ -369,8 +367,8 @@
             main.log.debug( config )
             checkResult = main.TRUE
             for component in config:
-                for setting in config[component]:
-                    value = config[component][setting]
+                for setting in config[ component ]:
+                    value = config[ component ][ setting ]
                     check = main.CLIs[ 0 ].setCfg( component, setting, value )
                     main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
                     checkResult = check and checkResult
@@ -385,7 +383,7 @@
         appCheck = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -395,9 +393,9 @@
             t.join()
             appCheck = appCheck and t.result
         if appCheck != main.TRUE:
-            node = main.activeNodes[0]
-            main.log.warn( main.CLIs[node].apps() )
-            main.log.warn( main.CLIs[node].appIDs() )
+            node = main.activeNodes[ 0 ]
+            main.log.warn( main.CLIs[ node ].apps() )
+            main.log.warn( main.CLIs[ node ].appIDs() )
         utilities.assert_equals( expect=main.TRUE, actual=appCheck,
                                  onpass="App Ids seem to be correct",
                                  onfail="Something is wrong with app Ids" )
@@ -468,9 +466,9 @@
         # Manually assign mastership to the controller we want
         roleCall = main.TRUE
 
-        ipList = [ ]
+        ipList = []
         deviceList = []
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         try:
             # Assign mastership to specific controllers. This assignment was
             # determined for a 7 node cluser, but will work with any sized
@@ -544,8 +542,8 @@
         #       atomic and is actually a multi step process
         time.sleep( 5 )
         for i in range( len( ipList ) ):
-            ip = ipList[i]
-            deviceId = deviceList[i]
+            ip = ipList[ i ]
+            deviceId = deviceList[ i ]
             # Check assignment
             master = onosCli.getRole( deviceId ).get( 'master' )
             if ip in master:
@@ -595,7 +593,7 @@
 
         # install onos-app-fwd
         main.step( "Install reactive forwarding app" )
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         installResults = onosCli.activateApp( "org.onosproject.fwd" )
         utilities.assert_equals( expect=main.TRUE, actual=installResults,
                                  onpass="Install fwd successful",
@@ -605,7 +603,7 @@
         appCheck = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -630,13 +628,13 @@
         pingResult = main.Mininet1.pingall()
         time2 = time.time()
         if not pingResult:
-            main.log.warn("First pingall failed. Trying again...")
+            main.log.warn( "First pingall failed. Trying again..." )
             pingResult = main.Mininet1.pingall()
             passMsg += " on the second try"
         utilities.assert_equals(
             expect=main.TRUE,
             actual=pingResult,
-            onpass= passMsg,
+            onpass=passMsg,
             onfail="Reactive Pingall failed, " +
                    "one or more ping pairs failed" )
         main.log.info( "Time for pingall: %2f seconds" %
@@ -645,8 +643,8 @@
         time.sleep( 11 )
         # uninstall onos-app-fwd
         main.step( "Uninstall reactive forwarding app" )
-        node = main.activeNodes[0]
-        uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
+        node = main.activeNodes[ 0 ]
+        uninstallResult = main.CLIs[ node ].deactivateApp( "org.onosproject.fwd" )
         utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
                                  onpass="Uninstall fwd successful",
                                  onfail="Uninstall fwd failed" )
@@ -655,7 +653,7 @@
         threads = []
         appCheck2 = main.TRUE
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+            t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
                              name="appToIDCheck-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -665,9 +663,9 @@
             t.join()
             appCheck2 = appCheck2 and t.result
         if appCheck2 != main.TRUE:
-            node = main.activeNodes[0]
-            main.log.warn( main.CLIs[node].apps() )
-            main.log.warn( main.CLIs[node].appIDs() )
+            node = main.activeNodes[ 0 ]
+            main.log.warn( main.CLIs[ node ].apps() )
+            main.log.warn( main.CLIs[ node ].appIDs() )
         utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
                                  onpass="App Ids seem to be correct",
                                  onfail="Something is wrong with app Ids" )
@@ -695,8 +693,8 @@
                 host2Id = host2Dict.get( 'id', None )
             if host1Id and host2Id:
                 nodeNum = ( i % len( main.activeNodes ) )
-                node = main.activeNodes[nodeNum]
-                tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
+                node = main.activeNodes[ nodeNum ]
+                tmpId = main.CLIs[ node ].addHostIntent( host1Id, host2Id )
                 if tmpId:
                     main.log.info( "Added intent with id: " + tmpId )
                     intentIds.append( tmpId )
@@ -706,8 +704,8 @@
             else:
                 main.log.error( "Error, getHost() failed for h" + str( i ) +
                                 " and/or h" + str( i + 10 ) )
-                node = main.activeNodes[0]
-                hosts = main.CLIs[node].hosts()
+                node = main.activeNodes[ 0 ]
+                hosts = main.CLIs[ node ].hosts()
                 main.log.warn( "Hosts output: " )
                 try:
                     main.log.warn( json.dumps( json.loads( hosts ),
@@ -775,7 +773,7 @@
                 for i in range( 14 ):
                     topics.append( "work-partition-" + str( i ) )
                 main.log.debug( topics )
-                ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                 for topic in topics:
                     if topic not in ONOStopics:
                         main.log.error( "Error: " + topic +
@@ -789,13 +787,13 @@
         # Check all nodes
         if missing:
             for i in main.activeNodes:
-                response = main.CLIs[i].leaders( jsonFormat=False)
-                main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
+                response = main.CLIs[ i ].leaders( jsonFormat=False )
+                main.log.warn( str( main.CLIs[ i ].name ) + " leaders output: \n" +
                                str( response ) )
 
         partitions = onosCli.partitions()
         try:
-            if partitions :
+            if partitions:
                 parsedPartitions = json.loads( partitions )
                 main.log.warn( json.dumps( parsedPartitions,
                                            sort_keys=True,
@@ -810,7 +808,7 @@
             main.log.error( repr( partitions ) )
         pendingMap = onosCli.pendingMap()
         try:
-            if pendingMap :
+            if pendingMap:
                 parsedPending = json.loads( pendingMap )
                 main.log.warn( json.dumps( parsedPending,
                                            sort_keys=True,
@@ -829,21 +827,21 @@
             main.log.error( "Error in pushing host intents to ONOS" )
 
         main.step( "Intent Anti-Entropy dispersion" )
-        for j in range(100):
+        for j in range( 100 ):
             correct = True
             main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
             for i in main.activeNodes:
                 onosIds = []
-                ids = main.CLIs[i].getAllIntentsId()
+                ids = main.CLIs[ i ].getAllIntentsId()
                 onosIds.append( ids )
-                main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
+                main.log.debug( "Intents in " + main.CLIs[ i ].name + ": " +
                                 str( sorted( onosIds ) ) )
                 if sorted( ids ) != sorted( intentIds ):
                     main.log.warn( "Set of intent IDs doesn't match" )
                     correct = False
                     break
                 else:
-                    intents = json.loads( main.CLIs[i].intents() )
+                    intents = json.loads( main.CLIs[ i ].intents() )
                     for intent in intents:
                         if intent[ 'state' ] != "INSTALLED":
                             main.log.warn( "Intent " + intent[ 'id' ] +
@@ -853,7 +851,7 @@
             if correct:
                 break
             else:
-                time.sleep(1)
+                time.sleep( 1 )
         if not intentStop:
             intentStop = time.time()
         global gossipTime
@@ -871,7 +869,7 @@
                 append = True
             else:
                 count += 1
-        gossipPeriod = int( main.params['timers']['gossip'] )
+        gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
         maxGossipTime = gossipPeriod * len( main.activeNodes )
         utilities.assert_greater_equals(
                 expect=maxGossipTime, actual=gossipTime,
@@ -933,7 +931,7 @@
                     # FIXME: this should only be after we start the app
                     topics.append( "org.onosproject.election" )
                     main.log.debug( topics )
-                    ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                    ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                     for topic in topics:
                         if topic not in ONOStopics:
                             main.log.error( "Error: " + topic +
@@ -947,14 +945,14 @@
             # Check all nodes
             if missing:
                 for i in main.activeNodes:
-                    node = main.CLIs[i]
-                    response = node.leaders( jsonFormat=False)
+                    node = main.CLIs[ i ]
+                    response = node.leaders( jsonFormat=False )
                     main.log.warn( str( node.name ) + " leaders output: \n" +
                                    str( response ) )
 
             partitions = onosCli.partitions()
             try:
-                if partitions :
+                if partitions:
                     parsedPartitions = json.loads( partitions )
                     main.log.warn( json.dumps( parsedPartitions,
                                                sort_keys=True,
@@ -969,7 +967,7 @@
                 main.log.error( repr( partitions ) )
             pendingMap = onosCli.pendingMap()
             try:
-                if pendingMap :
+                if pendingMap:
                     parsedPending = json.loads( pendingMap )
                     main.log.warn( json.dumps( parsedPending,
                                                sort_keys=True,
@@ -998,7 +996,7 @@
                                 "functionality and check the state of " +\
                                 "the intent"
 
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         main.step( "Check Intent state" )
         installedCheck = False
         loopCount = 0
@@ -1084,7 +1082,7 @@
                 # FIXME: topics.append( "org.onosproject.election" )
                 # Print leaders output
                 main.log.debug( topics )
-                ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                 for topic in topics:
                     if topic not in ONOStopics:
                         main.log.error( "Error: " + topic +
@@ -1101,8 +1099,8 @@
         # Check all nodes
         if topicCheck:
             for i in main.activeNodes:
-                node = main.CLIs[i]
-                response = node.leaders( jsonFormat=False)
+                node = main.CLIs[ i ]
+                response = node.leaders( jsonFormat=False )
                 main.log.warn( str( node.name ) + " leaders output: \n" +
                                str( response ) )
 
@@ -1112,7 +1110,7 @@
         # Print partitions
         partitions = onosCli.partitions()
         try:
-            if partitions :
+            if partitions:
                 parsedPartitions = json.loads( partitions )
                 main.log.warn( json.dumps( parsedPartitions,
                                            sort_keys=True,
@@ -1128,7 +1126,7 @@
         # Print Pending Map
         pendingMap = onosCli.pendingMap()
         try:
-            if pendingMap :
+            if pendingMap:
                 parsedPending = json.loads( pendingMap )
                 main.log.warn( json.dumps( parsedPending,
                                            sort_keys=True,
@@ -1182,7 +1180,7 @@
                     # FIXME: this should only be after we start the app
                     topics.append( "org.onosproject.election" )
                     main.log.debug( topics )
-                    ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                    ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                     for topic in topics:
                         if topic not in ONOStopics:
                             main.log.error( "Error: " + topic +
@@ -1195,14 +1193,14 @@
                 main.log.error( repr( leaders ) )
             if missing:
                 for i in main.activeNodes:
-                    node = main.CLIs[i]
-                    response = node.leaders( jsonFormat=False)
+                    node = main.CLIs[ i ]
+                    response = node.leaders( jsonFormat=False )
                     main.log.warn( str( node.name ) + " leaders output: \n" +
                                    str( response ) )
 
             partitions = onosCli.partitions()
             try:
-                if partitions :
+                if partitions:
                     parsedPartitions = json.loads( partitions )
                     main.log.warn( json.dumps( parsedPartitions,
                                                sort_keys=True,
@@ -1217,7 +1215,7 @@
                 main.log.error( repr( partitions ) )
             pendingMap = onosCli.pendingMap()
             try:
-                if pendingMap :
+                if pendingMap:
                     parsedPending = json.loads( pendingMap )
                     main.log.warn( json.dumps( parsedPending,
                                                sort_keys=True,
@@ -1230,8 +1228,8 @@
                 main.log.exception( "Error parsing pending map" )
                 main.log.error( repr( pendingMap ) )
         # Print flowrules
-        node = main.activeNodes[0]
-        main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
+        node = main.activeNodes[ 0 ]
+        main.log.debug( main.CLIs[ node ].flows( jsonFormat=False ) )
         main.step( "Wait a minute then ping again" )
         # the wait is above
         PingResult = main.TRUE
@@ -1289,7 +1287,7 @@
         rolesNotNull = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].rolesNotNull,
+            t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
                              name="rolesNotNull-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -1310,7 +1308,7 @@
         rolesResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].roles,
+            t = main.Thread( target=main.CLIs[ i ].roles,
                              name="roles-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -1321,11 +1319,11 @@
             ONOSMastership.append( t.result )
 
         for i in range( len( ONOSMastership ) ):
-            node = str( main.activeNodes[i] + 1 )
-            if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
+            node = str( main.activeNodes[ i ] + 1 )
+            if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " roles" )
                 main.log.warn( "ONOS" + node + " mastership response: " +
-                               repr( ONOSMastership[i] ) )
+                               repr( ONOSMastership[ i ] ) )
                 rolesResults = False
         utilities.assert_equals(
             expect=True,
@@ -1334,7 +1332,7 @@
             onfail="Error in reading roles from ONOS" )
 
         main.step( "Check for consistency in roles from each controller" )
-        if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
+        if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
             main.log.info(
                 "Switch roles are consistent across all ONOS nodes" )
         else:
@@ -1347,7 +1345,7 @@
 
         if rolesResults and not consistentMastership:
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 try:
                     main.log.warn(
                         "ONOS" + node + " roles: ",
@@ -1369,7 +1367,7 @@
         intentsResults = True  # Could we read Intents from ONOS?
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].intents,
+            t = main.Thread( target=main.CLIs[ i ].intents,
                              name="intents-" + str( i ),
                              args=[],
                              kwargs={ 'jsonFormat': True } )
@@ -1381,7 +1379,7 @@
             ONOSIntents.append( t.result )
 
         for i in range( len( ONOSIntents ) ):
-            node = str( main.activeNodes[i] + 1 )
+            node = str( main.activeNodes[ i ] + 1 )
             if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " intents" )
                 main.log.warn( "ONOS" + node + " intents response: " +
@@ -1394,7 +1392,7 @@
             onfail="Error in reading intents from ONOS" )
 
         main.step( "Check for consistency in Intents from each controller" )
-        if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
+        if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
             main.log.info( "Intents are consistent across all ONOS " +
                              "nodes" )
         else:
@@ -1442,17 +1440,17 @@
 
         if intentsResults and not consistentIntents:
             # print the json objects
-            n = str( main.activeNodes[-1] + 1 )
+            n = str( main.activeNodes[ -1 ] + 1 )
             main.log.debug( "ONOS" + n + " intents: " )
             main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
                                         sort_keys=True,
                                         indent=4,
                                         separators=( ',', ': ' ) ) )
             for i in range( len( ONOSIntents ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
                     main.log.debug( "ONOS" + node + " intents: " )
-                    main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
+                    main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
                                                 sort_keys=True,
                                                 indent=4,
                                                 separators=( ',', ': ' ) ) )
@@ -1472,7 +1470,7 @@
         flowsResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].flows,
+            t = main.Thread( target=main.CLIs[ i ].flows,
                              name="flows-" + str( i ),
                              args=[],
                              kwargs={ 'jsonFormat': True } )
@@ -1480,14 +1478,14 @@
             t.start()
 
         # NOTE: Flows command can take some time to run
-        time.sleep(30)
+        time.sleep( 30 )
         for t in threads:
             t.join()
             result = t.result
             ONOSFlows.append( result )
 
         for i in range( len( ONOSFlows ) ):
-            num = str( main.activeNodes[i] + 1 )
+            num = str( main.activeNodes[ i ] + 1 )
             if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
                 main.log.error( "Error in getting ONOS" + num + " flows" )
                 main.log.warn( "ONOS" + num + " flows response: " +
@@ -1524,11 +1522,11 @@
 
         if flowsResults and not consistentFlows:
             for i in range( len( ONOSFlows ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 try:
                     main.log.warn(
                         "ONOS" + node + " flows: " +
-                        json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
+                        json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
                                     indent=4, separators=( ',', ': ' ) ) )
                 except ( ValueError, TypeError ):
                     main.log.warn( "ONOS" + node + " flows: " +
@@ -1593,9 +1591,9 @@
         devices = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].devices,
+            t = main.Thread( target=main.CLIs[ i ].devices,
                              name="devices-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1605,9 +1603,9 @@
         hosts = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].hosts,
+            t = main.Thread( target=main.CLIs[ i ].hosts,
                              name="hosts-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1625,9 +1623,9 @@
         ports = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].ports,
+            t = main.Thread( target=main.CLIs[ i ].ports,
                              name="ports-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1637,9 +1635,9 @@
         links = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].links,
+            t = main.Thread( target=main.CLIs[ i ].links,
                              name="links-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1649,9 +1647,9 @@
         clusters = []
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].clusters,
+            t = main.Thread( target=main.CLIs[ i ].clusters,
                              name="clusters-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -1664,7 +1662,7 @@
         main.step( "Host view is consistent across ONOS nodes" )
         consistentHostsResult = main.TRUE
         for controller in range( len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ] and "Error" not in hosts[ controller ]:
                 if hosts[ controller ] == hosts[ 0 ]:
                     continue
@@ -1691,10 +1689,10 @@
         main.step( "Each host has an IP address" )
         ipResult = main.TRUE
         for controller in range( 0, len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ]:
                 for host in hosts[ controller ]:
-                    if not host.get( 'ipAddresses', [ ] ):
+                    if not host.get( 'ipAddresses', [] ):
                         main.log.error( "Error with host ips on controller" +
                                         controllerStr + ": " + str( host ) )
                         ipResult = main.FALSE
@@ -1708,7 +1706,7 @@
         main.step( "Cluster view is consistent across ONOS nodes" )
         consistentClustersResult = main.TRUE
         for controller in range( len( clusters ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if "Error" not in clusters[ controller ]:
                 if clusters[ controller ] == clusters[ 0 ]:
                     continue
@@ -1754,14 +1752,14 @@
         mnLinks = main.Mininet1.getLinks()
         mnHosts = main.Mininet1.getHosts()
         for controller in main.activeNodes:
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if devices[ controller ] and ports[ controller ] and\
-                "Error" not in devices[ controller ] and\
-                "Error" not in ports[ controller ]:
-                    currentDevicesResult = main.Mininet1.compareSwitches(
-                            mnSwitches,
-                            json.loads( devices[ controller ] ),
-                            json.loads( ports[ controller ] ) )
+                    "Error" not in devices[ controller ] and\
+                    "Error" not in ports[ controller ]:
+                currentDevicesResult = main.Mininet1.compareSwitches(
+                        mnSwitches,
+                        json.loads( devices[ controller ] ),
+                        json.loads( ports[ controller ] ) )
             else:
                 currentDevicesResult = main.FALSE
             utilities.assert_equals( expect=main.TRUE,
@@ -1849,13 +1847,13 @@
 
         main.step( "Checking ONOS Logs for errors" )
         for i in main.activeNodes:
-            node = main.nodes[i]
+            node = main.nodes[ i ]
             main.log.debug( "Checking logs for errors on " + node.name + ":" )
             main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
 
         main.step( "Generate new metadata file" )
-        old = [ main.activeNodes[1],  main.activeNodes[-2] ]
-        new = range( main.ONOSbench.maxNodes )[-2:]
+        old = [ main.activeNodes[ 1 ], main.activeNodes[ -2 ] ]
+        new = range( main.ONOSbench.maxNodes )[ -2: ]
         assert len( old ) == len( new ), "Length of nodes to swap don't match"
         handle = main.ONOSbench.handle
         for x, y in zip( old, new ):
@@ -1877,7 +1875,7 @@
         main.step( "Start new nodes" )  # OR stop old nodes?
         started = main.TRUE
         for i in new:
-            started = main.ONOSbench.onosStart( main.nodes[i].ip_address ) and main.TRUE
+            started = main.ONOSbench.onosStart( main.nodes[ i ].ip_address ) and main.TRUE
         utilities.assert_equals( expect=main.TRUE, actual=started,
                                  onpass="ONOS started",
                                  onfail="ONOS start NOT successful" )
@@ -1886,7 +1884,7 @@
         for i in range( 2 ):
             onosIsupResult = main.TRUE
             for i in main.activeNodes:
-                node = main.nodes[i]
+                node = main.nodes[ i ]
                 main.ONOSbench.onosSecureSSH( node=node.ip_address )
                 started = main.ONOSbench.isup( node.ip_address )
                 if not started:
@@ -1902,9 +1900,9 @@
         cliResults = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].startOnosCli,
+            t = main.Thread( target=main.CLIs[ i ].startOnosCli,
                              name="startOnosCli-" + str( i ),
-                             args=[main.nodes[i].ip_address] )
+                             args=[ main.nodes[ i ].ip_address ] )
             threads.append( t )
             t.start()
 
@@ -1918,7 +1916,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        attempts=5 )
         utilities.assert_equals( expect=True, actual=nodeResults,
                                  onpass="Nodes check successful",
@@ -1927,7 +1925,7 @@
         for i in range( 10 ):
             ready = True
             for i in main.activeNodes:
-                cli = main.CLIs[i]
+                cli = main.CLIs[ i ]
                 output = cli.summary()
                 if not output:
                     ready = False
@@ -1944,7 +1942,7 @@
         # Rerun for election on new nodes
         runResults = main.TRUE
         for i in main.activeNodes:
-            cli = main.CLIs[i]
+            cli = main.CLIs[ i ]
             run = cli.electionTestRun()
             if run != main.TRUE:
                 main.log.error( "Error running for election on " + cli.name )
@@ -1954,11 +1952,11 @@
                                  onfail="Failed to rerun for election" )
 
         for node in main.activeNodes:
-            main.log.warn( "\n****************** {} **************".format( main.nodes[node].ip_address ) )
-            main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
-            main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
-            main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
-            main.log.debug( main.CLIs[node].apps( jsonFormat=False ) )
+            main.log.warn( "\n****************** {} **************".format( main.nodes[ node ].ip_address ) )
+            main.log.debug( main.CLIs[ node ].nodes( jsonFormat=False ) )
+            main.log.debug( main.CLIs[ node ].leaders( jsonFormat=False ) )
+            main.log.debug( main.CLIs[ node ].partitions( jsonFormat=False ) )
+            main.log.debug( main.CLIs[ node ].apps( jsonFormat=False ) )
 
         main.step( "Reapplying cell variable to environment" )
         cellName = main.params[ 'ENV' ][ 'cellName' ]
@@ -1984,9 +1982,9 @@
         rolesNotNull = main.TRUE
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].rolesNotNull,
+            t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
                              name="rolesNotNull-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
@@ -2005,7 +2003,7 @@
         rolesResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].roles,
+            t = main.Thread( target=main.CLIs[ i ].roles,
                              name="roles-" + str( i ),
                              args=[] )
             threads.append( t )
@@ -2016,11 +2014,11 @@
             ONOSMastership.append( t.result )
 
         for i in range( len( ONOSMastership ) ):
-            node = str( main.activeNodes[i] + 1 )
-            if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
+            node = str( main.activeNodes[ i ] + 1 )
+            if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " roles" )
                 main.log.warn( "ONOS" + node + " mastership response: " +
-                               repr( ONOSMastership[i] ) )
+                               repr( ONOSMastership[ i ] ) )
                 rolesResults = False
         utilities.assert_equals(
             expect=True,
@@ -2029,7 +2027,7 @@
             onfail="Error in reading roles from ONOS" )
 
         main.step( "Check for consistency in roles from each controller" )
-        if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
+        if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
             main.log.info(
                 "Switch roles are consistent across all ONOS nodes" )
         else:
@@ -2042,7 +2040,7 @@
 
         if rolesResults and not consistentMastership:
             for i in range( len( ONOSMastership ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 main.log.warn( "ONOS" + node + " roles: ",
                                json.dumps( json.loads( ONOSMastership[ i ] ),
                                            sort_keys=True,
@@ -2058,7 +2056,7 @@
         intentsResults = True
         threads = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].intents,
+            t = main.Thread( target=main.CLIs[ i ].intents,
                              name="intents-" + str( i ),
                              args=[],
                              kwargs={ 'jsonFormat': True } )
@@ -2069,8 +2067,8 @@
             t.join()
             ONOSIntents.append( t.result )
 
-        for i in range( len( ONOSIntents) ):
-            node = str( main.activeNodes[i] + 1 )
+        for i in range( len( ONOSIntents ) ):
+            node = str( main.activeNodes[ i ] + 1 )
             if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
                 main.log.error( "Error in getting ONOS" + node + " intents" )
                 main.log.warn( "ONOS" + node + " intents response: " +
@@ -2083,7 +2081,7 @@
             onfail="Error in reading intents from ONOS" )
 
         main.step( "Check for consistency in Intents from each controller" )
-        if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
+        if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
             main.log.info( "Intents are consistent across all ONOS " +
                              "nodes" )
         else:
@@ -2132,12 +2130,12 @@
                 main.log.exception( "Error in parsing intents" )
                 main.log.error( repr( node ) )
             intentStates.append( nodeStates )
-            out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
+            out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
             main.log.info( dict( out ) )
 
         if intentsResults and not consistentIntents:
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 main.log.warn( "ONOS" + node + " intents: " )
                 main.log.warn( json.dumps(
                     json.loads( ONOSIntents[ i ] ),
@@ -2174,7 +2172,7 @@
                             main.log.debug( json.dumps( intent ) )
                 except ( ValueError, TypeError ):
                     main.log.exception( "Exception printing intents" )
-                    main.log.debug( repr( ONOSIntents[0] ) )
+                    main.log.debug( repr( ONOSIntents[ 0 ] ) )
                     main.log.debug( repr( intentState ) )
             if sameIntents == main.FALSE:
                 try:
@@ -2188,7 +2186,7 @@
                                                 separators=( ',', ': ' ) ) )
                 except ( ValueError, TypeError ):
                     main.log.exception( "Exception printing intents" )
-                    main.log.debug( repr( ONOSIntents[0] ) )
+                    main.log.debug( repr( ONOSIntents[ 0 ] ) )
                     main.log.debug( repr( intentState ) )
             utilities.assert_equals(
                 expect=main.TRUE,
@@ -2203,7 +2201,7 @@
         for i in range( 28 ):
             main.log.info( "Checking flow table on s" + str( i + 1 ) )
             tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
-            curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
+            curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
             FlowTables = FlowTables and curSwitch
             if curSwitch == main.FALSE:
                 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
@@ -2214,7 +2212,7 @@
             onfail="Changes were found in the flow tables" )
 
         main.Mininet2.pingLongKill()
-        '''
+        """
         # main.step( "Check the continuous pings to ensure that no packets " +
         #            "were dropped during component failure" )
         main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
@@ -2244,15 +2242,14 @@
         # NOTE: Since intents are not persisted with IntnentStore,
         #       we expect loss in dataplane connectivity
         LossInPings = main.FALSE
-        '''
-
+        """
         main.step( "Leadership Election is still functional" )
         # Test of LeadershipElection
         leaderList = []
         leaderResult = main.TRUE
 
         for i in main.activeNodes:
-            cli = main.CLIs[i]
+            cli = main.CLIs[ i ]
             leaderN = cli.electionTestLeader()
             leaderList.append( leaderN )
             if leaderN == main.FALSE:
@@ -2310,8 +2307,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="devices-" + str( i ),
-                                 args=[ main.CLIs[i].devices, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].devices, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2325,8 +2322,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="hosts-" + str( i ),
-                                 args=[ main.CLIs[i].hosts, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].hosts, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2340,7 +2337,7 @@
                     main.log.error( repr( t.result ) )
                     hosts.append( None )
             for controller in range( 0, len( hosts ) ):
-                controllerStr = str( main.activeNodes[controller] + 1 )
+                controllerStr = str( main.activeNodes[ controller ] + 1 )
                 if hosts[ controller ]:
                     for host in hosts[ controller ]:
                         if host is None or host.get( 'ipAddresses', [] ) == []:
@@ -2353,8 +2350,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="ports-" + str( i ),
-                                 args=[ main.CLIs[i].ports, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].ports, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2367,8 +2364,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="links-" + str( i ),
-                                 args=[ main.CLIs[i].links, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].links, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2381,8 +2378,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="clusters-" + str( i ),
-                                 args=[ main.CLIs[i].clusters, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].clusters, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -2401,18 +2398,18 @@
                all( e is None for e in ports ) and\
                all( e is None for e in links ) and\
                all( e is None for e in clusters ):
-                   topoFailMsg = "Could not get topology from ONOS"
-                   main.log.error( topoFailMsg )
-                   continue  # Try again, No use trying to compare
+                topoFailMsg = "Could not get topology from ONOS"
+                main.log.error( topoFailMsg )
+                continue  # Try again, No use trying to compare
 
             mnSwitches = main.Mininet1.getSwitches()
             mnLinks = main.Mininet1.getLinks()
             mnHosts = main.Mininet1.getHosts()
             for controller in range( len( main.activeNodes ) ):
-                controllerStr = str( main.activeNodes[controller] + 1 )
+                controllerStr = str( main.activeNodes[ controller ] + 1 )
                 if devices[ controller ] and ports[ controller ] and\
-                    "Error" not in devices[ controller ] and\
-                    "Error" not in ports[ controller ]:
+                        "Error" not in devices[ controller ] and\
+                        "Error" not in ports[ controller ]:
 
                     try:
                         currentDevicesResult = main.Mininet1.compareSwitches(
@@ -2465,29 +2462,29 @@
                 mappings = {}
                 for i in range( 1, 29 ):  # hosts 1 through 28
                     # set up correct variables:
-                    macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
+                    macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
                     if i == 1:
-                        deviceId = "1000".zfill(16)
+                        deviceId = "1000".zfill( 16 )
                     elif i == 2:
-                        deviceId = "2000".zfill(16)
+                        deviceId = "2000".zfill( 16 )
                     elif i == 3:
-                        deviceId = "3000".zfill(16)
+                        deviceId = "3000".zfill( 16 )
                     elif i == 4:
-                        deviceId = "3004".zfill(16)
+                        deviceId = "3004".zfill( 16 )
                     elif i == 5:
-                        deviceId = "5000".zfill(16)
+                        deviceId = "5000".zfill( 16 )
                     elif i == 6:
-                        deviceId = "6000".zfill(16)
+                        deviceId = "6000".zfill( 16 )
                     elif i == 7:
-                        deviceId = "6007".zfill(16)
+                        deviceId = "6007".zfill( 16 )
                     elif i >= 8 and i <= 17:
                         dpid = '3' + str( i ).zfill( 3 )
-                        deviceId = dpid.zfill(16)
+                        deviceId = dpid.zfill( 16 )
                     elif i >= 18 and i <= 27:
                         dpid = '6' + str( i ).zfill( 3 )
-                        deviceId = dpid.zfill(16)
+                        deviceId = dpid.zfill( 16 )
                     elif i == 28:
-                        deviceId = "2800".zfill(16)
+                        deviceId = "2800".zfill( 16 )
                     mappings[ macId ] = deviceId
                 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
                     if hosts[ controller ] == []:
@@ -2507,7 +2504,7 @@
                                 assert location, "location field could not be found for this host object"
 
                                 # Trim the protocol identifier off deviceId
-                                device = str( location.get( 'elementId' ) ).split(':')[1]
+                                device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
                                 assert device, "elementId field could not be found for this host location object"
 
                                 port = location.get( 'port' )
@@ -2518,7 +2515,7 @@
                                     if str( port ) != "1":
                                         main.log.error( "The attachment port is incorrect for " +
                                                         "host " + str( mac ) +
-                                                        ". Expected: 1 Actual: " + str( port) )
+                                                        ". Expected: 1 Actual: " + str( port ) )
                                         hostAttachment = False
                                     if device != mappings[ str( mac ) ]:
                                         main.log.error( "The attachment device is incorrect for " +
@@ -2562,7 +2559,7 @@
         main.step( "Hosts view is consistent across all ONOS nodes" )
         consistentHostsResult = main.TRUE
         for controller in range( len( hosts ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
                 if hosts[ controller ] == hosts[ 0 ]:
                     continue
@@ -2604,7 +2601,7 @@
         main.step( "Clusters view is consistent across all ONOS nodes" )
         consistentClustersResult = main.TRUE
         for controller in range( len( clusters ) ):
-            controllerStr = str( main.activeNodes[controller] + 1 )
+            controllerStr = str( main.activeNodes[ controller ] + 1 )
             if "Error" not in clusters[ controller ]:
                 if clusters[ controller ] == clusters[ 0 ]:
                     continue
@@ -2630,14 +2627,13 @@
             for x in links:
                 main.log.warn( "{}: {}".format( len( x ), x ) )
 
-
         main.step( "There is only one SCC" )
         # there should always only be one cluster
         try:
             numClusters = len( json.loads( clusters[ 0 ] ) )
         except ( ValueError, TypeError ):
             main.log.exception( "Error parsing clusters[0]: " +
-                                repr( clusters[0] ) )
+                                repr( clusters[ 0 ] ) )
             numClusters = "ERROR"
         clusterResults = main.FALSE
         if numClusters == 1:
@@ -2687,7 +2683,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[main.activeNodes],
+                                       args=[ main.activeNodes ],
                                        attempts=5 )
         utilities.assert_equals( expect=True, actual=nodeResults,
                                  onpass="Nodes check successful",
@@ -2695,8 +2691,8 @@
         if not nodeResults:
             for i in main.activeNodes:
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
-                    main.CLIs[i].name,
-                    main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
+                    main.CLIs[ i ].name,
+                    main.CLIs[ i ].sendline( "scr:list | grep -v ACTIVE" ) ) )
 
         if not topoResult:
             main.cleanup()
@@ -2773,7 +2769,7 @@
         switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
 
         description = "Killing a switch to ensure it is discovered correctly"
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         main.case( description )
         switch = main.params[ 'kill' ][ 'switch' ]
         switchDPID = main.params[ 'kill' ][ 'dpid' ]
@@ -2811,7 +2807,7 @@
         switch = main.params[ 'kill' ][ 'switch' ]
         switchDPID = main.params[ 'kill' ][ 'dpid' ]
         links = main.params[ 'kill' ][ 'links' ].split()
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         description = "Adding a switch to ensure it is discovered correctly"
         main.case( description )
 
@@ -2889,15 +2885,15 @@
             main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
 
         try:
-            timerLog = open( main.logdir + "/Timers.csv", 'w')
+            timerLog = open( main.logdir + "/Timers.csv", 'w' )
             main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
             timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
             timerLog.close()
-        except NameError, e:
-            main.log.exception(e)
+        except NameError as e:
+            main.log.exception( e )
 
         main.step( "Stopping webserver" )
-        status = main.Server.stop( )
+        status = main.Server.stop()
         utilities.assert_equals( expect=main.TRUE, actual=status,
                                  onpass="Stop Server",
                                  onfail="Failled to stop SimpleHTTPServer" )
@@ -2914,9 +2910,9 @@
         assert main.CLIs, "main.CLIs not defined"
         assert main.nodes, "main.nodes not defined"
 
-        main.case("Start Leadership Election app")
+        main.case( "Start Leadership Election app" )
         main.step( "Install leadership election app" )
-        onosCli = main.CLIs[ main.activeNodes[0] ]
+        onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
         appResult = onosCli.activateApp( "org.onosproject.election" )
         utilities.assert_equals(
             expect=main.TRUE,
@@ -2926,9 +2922,9 @@
 
         main.step( "Run for election on each node" )
         for i in main.activeNodes:
-            main.CLIs[i].electionTestRun()
-        time.sleep(5)
-        activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+            main.CLIs[ i ].electionTestRun()
+        time.sleep( 5 )
+        activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
         sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
         utilities.assert_equals(
             expect=True,
@@ -2938,7 +2934,7 @@
 
         if sameResult:
             leader = leaders[ 0 ][ 0 ]
-            if main.nodes[ main.activeNodes[0] ].ip_address in leader:
+            if main.nodes[ main.activeNodes[ 0 ] ].ip_address in leader:
                 correctLeader = True
             else:
                 correctLeader = False
@@ -2988,7 +2984,7 @@
         electionResult = main.TRUE
 
         for i in main.activeNodes:  # run test election on each node
-            if main.CLIs[i].electionTestRun() == main.FALSE:
+            if main.CLIs[ i ].electionTestRun() == main.FALSE:
                 electionResult = main.FALSE
         utilities.assert_equals(
             expect=main.TRUE,
@@ -3003,7 +2999,7 @@
 
         main.step( "Check that each node shows the same leader and candidates" )
         failMessage = "Nodes have different leaderboards"
-        activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
+        activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
         sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
         if sameResult:
             oldLeader = oldLeaders[ 0 ][ 0 ]
@@ -3053,7 +3049,7 @@
         if newLeader == oldLeader:
             newLeaderResult = False
             main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
-                " as the current leader" )
+                            " as the current leader" )
         utilities.assert_equals(
             expect=True,
             actual=newLeaderResult,
@@ -3070,7 +3066,7 @@
             else:
                 main.log.info( "Expected no leader, got: " + str( newLeader ) )
                 correctCandidateResult = main.FALSE
-        elif len( oldLeaders[0] ) >= 3:
+        elif len( oldLeaders[ 0 ] ) >= 3:
             if newLeader == oldLeaders[ 0 ][ 2 ]:
                 # correct leader was elected
                 correctCandidateResult = main.TRUE
@@ -3110,10 +3106,10 @@
         positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
 
         # Check that the re-elected node is last on the candidate List
-        if not reRunLeaders[0]:
+        if not reRunLeaders[ 0 ]:
             positionResult = main.FALSE
         elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
-            main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
+            main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
                                                                                       str( reRunLeaders[ 0 ] ) ) )
             positionResult = main.FALSE
         utilities.assert_equals(
@@ -3137,15 +3133,15 @@
         # Variables for the distributed primitives tests
         main.pCounterName = "TestON-Partitions"
         main.pCounterValue = 0
-        main.onosSet = set([])
+        main.onosSet = set( [] )
         main.onosSetName = "TestON-set"
 
         description = "Install Primitives app"
         main.case( description )
         main.step( "Install Primitives app" )
         appName = "org.onosproject.distributedprimitives"
-        node = main.activeNodes[0]
-        appResults = main.CLIs[node].activateApp( appName )
+        node = main.activeNodes[ 0 ]
+        appResults = main.CLIs[ node ].activateApp( appName )
         utilities.assert_equals( expect=main.TRUE,
                                  actual=appResults,
                                  onpass="Primitives app activated",
diff --git a/TestON/tests/HA/dependencies/HA.py b/TestON/tests/HA/dependencies/HA.py
index 7df3e51..ac0e891 100644
--- a/TestON/tests/HA/dependencies/HA.py
+++ b/TestON/tests/HA/dependencies/HA.py
@@ -1,6 +1,7 @@
 import json
 import time
 
+
 class HA():
 
     def __init__( self ):
@@ -10,7 +11,7 @@
         """
         Checks that TestON counters are consistent across all nodes.
 
-        Returns the tuple (onosCounters, consistent)
+        Returns the tuple ( onosCounters, consistent )
         - onosCounters is the parsed json output of the counters command on
           all nodes
         - consistent is main.TRUE if all "TestON" counters are consitent across
@@ -23,8 +24,8 @@
             for i in main.activeNodes:
                 t = main.Thread( target=utilities.retry,
                                  name="counters-" + str( i ),
-                                 args=[ main.CLIs[i].counters, [ None ] ],
-                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                 args=[ main.CLIs[ i ].counters, [ None ] ],
+                                 kwargs={ 'sleep': 5, 'attempts': 5,
                                            'randomTime': True } )
                 threads.append( t )
                 t.start()
@@ -34,10 +35,10 @@
             onosCounters = []
             for i in range( len( main.activeNodes ) ):
                 try:
-                    onosCounters.append( json.loads( onosCountersRaw[i] ) )
+                    onosCounters.append( json.loads( onosCountersRaw[ i ] ) )
                 except ( ValueError, TypeError ):
                     main.log.error( "Could not parse counters response from ONOS" +
-                                    str( main.activeNodes[i] + 1 ) )
+                                    str( main.activeNodes[ i ] + 1 ) )
                     main.log.warn( repr( onosCountersRaw[ i ] ) )
                     onosCounters.append( [] )
 
@@ -45,20 +46,20 @@
             # make a list of all the "TestON-*" counters in ONOS
             # lookes like a dict whose keys are the name of the ONOS node and
             # values are a list of the counters. I.E.
-            # { "ONOS1": [ { "name":"TestON-Partitions","value":56} ]
+            # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
             # }
             # NOTE: There is an assumtion that all nodes are active
             #        based on the above for loops
             for controller in enumerate( onosCounters ):
-                for key, value in controller[1].iteritems():
+                for key, value in controller[ 1 ].iteritems():
                     if 'TestON' in key:
-                        node = 'ONOS' + str( controller[0] + 1 )
+                        node = 'ONOS' + str( controller[ 0 ] + 1 )
                         try:
-                            testCounters[node].append( { key: value } )
+                            testCounters[ node ].append( { key: value } )
                         except KeyError:
-                            testCounters[node] = [ { key: value } ]
+                            testCounters[ node ] = [ { key: value } ]
             # compare the counters on each node
-            firstV = testCounters.values()[0]
+            firstV = testCounters.values()[ 0 ]
             tmp = [ v == firstV for k, v in testCounters.iteritems() ]
             if all( tmp ):
                 consistent = main.TRUE
@@ -83,12 +84,12 @@
             onosCounters, consistent = self.consistentCheck()
             # Check for correct values
             for i in range( len( main.activeNodes ) ):
-                current = onosCounters[i]
+                current = onosCounters[ i ]
                 onosValue = None
                 try:
                     onosValue = current.get( counterName )
                 except AttributeError:
-                    node = str( main.activeNodes[i] + 1 )
+                    node = str( main.activeNodes[ i ] + 1 )
                     main.log.exception( "ONOS" + node + " counters result " +
                                         "is not as expected" )
                     correctResults = main.FALSE
@@ -116,13 +117,13 @@
             for cli in nodes:
                 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
             # Compare leaderboards
-            result = all( i == leaderList[0] for i in leaderList ) and\
+            result = all( i == leaderList[ 0 ] for i in leaderList ) and\
                      leaderList is not None
             main.log.debug( leaderList )
             main.log.warn( result )
             if result:
                 return ( result, leaderList )
-            time.sleep(5)  # TODO: paramerterize
+            time.sleep( 5 )  # TODO: paramerterize
         main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
         return ( result, leaderList )
 
@@ -131,25 +132,24 @@
         results = True
         threads = []
         for i in nodes:
-            t = main.Thread( target=main.CLIs[i].nodes,
+            t = main.Thread( target=main.CLIs[ i ].nodes,
                              name="nodes-" + str( i ),
-                             args=[ ] )
+                             args=[] )
             threads.append( t )
             t.start()
 
         for t in threads:
             t.join()
             nodesOutput.append( t.result )
-        ips = [ main.nodes[node].ip_address for node in nodes ]
-        ips.sort()
+        ips = sorted( [ main.nodes[ node ].ip_address for node in nodes ] )
         for i in nodesOutput:
             try:
                 current = json.loads( i )
                 activeIps = []
                 currentResult = False
                 for node in current:
-                    if node['state'] == 'READY':
-                        activeIps.append( node['ip'] )
+                    if node[ 'state' ] == 'READY':
+                        activeIps.append( node[ 'ip' ] )
                 activeIps.sort()
                 if ips == activeIps:
                     currentResult = True
@@ -165,7 +165,7 @@
         threads = []
         completedValues = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].workQueueTotalCompleted,
+            t = main.Thread( target=main.CLIs[ i ].workQueueTotalCompleted,
                              name="WorkQueueCompleted-" + str( i ),
                              args=[ workQueueName ] )
             threads.append( t )
@@ -185,7 +185,7 @@
         threads = []
         inProgressValues = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].workQueueTotalInProgress,
+            t = main.Thread( target=main.CLIs[ i ].workQueueTotalInProgress,
                              name="WorkQueueInProgress-" + str( i ),
                              args=[ workQueueName ] )
             threads.append( t )
@@ -205,7 +205,7 @@
         threads = []
         pendingValues = []
         for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[i].workQueueTotalPending,
+            t = main.Thread( target=main.CLIs[ i ].workQueueTotalPending,
                              name="WorkQueuePending-" + str( i ),
                              args=[ workQueueName ] )
             threads.append( t )
@@ -245,7 +245,7 @@
                 main.onosSet
             except NameError:
                 main.log.error( "main.onosSet not defined, setting to empty Set" )
-                main.onosSet = set([])
+                main.onosSet = set( [] )
             # Variables for the distributed primitives tests. These are local only
             addValue = "a"
             addAllValue = "a b c d e f"
@@ -269,7 +269,7 @@
             threads = []
             addedPValues = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
+                t = main.Thread( target=main.CLIs[ i ].counterTestAddAndGet,
                                  name="counterAddAndGet-" + str( i ),
                                  args=[ main.pCounterName ] )
                 main.pCounterValue += 1
@@ -299,7 +299,7 @@
             threads = []
             addedPValues = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
+                t = main.Thread( target=main.CLIs[ i ].counterTestGetAndAdd,
                                  name="counterGetAndAdd-" + str( i ),
                                  args=[ main.pCounterName ] )
                 addedPValues.append( main.pCounterValue )
@@ -336,7 +336,7 @@
             threads = []
             addedPValues = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
+                t = main.Thread( target=main.CLIs[ i ].counterTestAddAndGet,
                                  name="counterIncrement-" + str( i ),
                                  args=[ main.pCounterName ],
                                  kwargs={ "delta": -8 } )
@@ -367,7 +367,7 @@
             threads = []
             addedPValues = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
+                t = main.Thread( target=main.CLIs[ i ].counterTestAddAndGet,
                                  name="counterIncrement-" + str( i ),
                                  args=[ main.pCounterName ],
                                  kwargs={ "delta": 5 } )
@@ -398,7 +398,7 @@
             threads = []
             addedPValues = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
+                t = main.Thread( target=main.CLIs[ i ].counterTestGetAndAdd,
                                  name="counterIncrement-" + str( i ),
                                  args=[ main.pCounterName ],
                                  kwargs={ "delta": 5 } )
@@ -437,7 +437,7 @@
             getResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestGet,
+                t = main.Thread( target=main.CLIs[ i ].setTestGet,
                                  name="setTestGet-" + str( i ),
                                  args=[ main.onosSetName ] )
                 threads.append( t )
@@ -448,8 +448,8 @@
 
             getResults = main.TRUE
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
-                if isinstance( getResponses[ i ], list):
+                node = str( main.activeNodes[ i ] + 1 )
+                if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
                         # no repeats
@@ -479,7 +479,7 @@
             sizeResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestSize,
+                t = main.Thread( target=main.CLIs[ i ].setTestSize,
                                  name="setTestSize-" + str( i ),
                                  args=[ main.onosSetName ] )
                 threads.append( t )
@@ -490,7 +490,7 @@
 
             sizeResults = main.TRUE
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( "ONOS" + node +
@@ -507,7 +507,7 @@
             addResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestAdd,
+                t = main.Thread( target=main.CLIs[ i ].setTestAdd,
                                  name="setTestAdd-" + str( i ),
                                  args=[ main.onosSetName, addValue ] )
                 threads.append( t )
@@ -541,7 +541,7 @@
             getResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestGet,
+                t = main.Thread( target=main.CLIs[ i ].setTestGet,
                                  name="setTestGet-" + str( i ),
                                  args=[ main.onosSetName ] )
                 threads.append( t )
@@ -551,8 +551,8 @@
                 getResponses.append( t.result )
             getResults = main.TRUE
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
-                if isinstance( getResponses[ i ], list):
+                node = str( main.activeNodes[ i ] + 1 )
+                if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
                         # no repeats
@@ -574,7 +574,7 @@
             sizeResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestSize,
+                t = main.Thread( target=main.CLIs[ i ].setTestSize,
                                  name="setTestSize-" + str( i ),
                                  args=[ main.onosSetName ] )
                 threads.append( t )
@@ -584,7 +584,7 @@
                 sizeResponses.append( t.result )
             sizeResults = main.TRUE
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( "ONOS" + node +
@@ -602,7 +602,7 @@
             addResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestAdd,
+                t = main.Thread( target=main.CLIs[ i ].setTestAdd,
                                  name="setTestAddAll-" + str( i ),
                                  args=[ main.onosSetName, addAllValue ] )
                 threads.append( t )
@@ -636,7 +636,7 @@
             getResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestGet,
+                t = main.Thread( target=main.CLIs[ i ].setTestGet,
                                  name="setTestGet-" + str( i ),
                                  args=[ main.onosSetName ] )
                 threads.append( t )
@@ -646,8 +646,8 @@
                 getResponses.append( t.result )
             getResults = main.TRUE
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
-                if isinstance( getResponses[ i ], list):
+                node = str( main.activeNodes[ i ] + 1 )
+                if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
                         # no repeats
@@ -671,7 +671,7 @@
             sizeResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestSize,
+                t = main.Thread( target=main.CLIs[ i ].setTestSize,
                                  name="setTestSize-" + str( i ),
                                  args=[ main.onosSetName ] )
                 threads.append( t )
@@ -681,7 +681,7 @@
                 sizeResponses.append( t.result )
             sizeResults = main.TRUE
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( "ONOS" + node +
@@ -698,7 +698,7 @@
             containsResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestGet,
+                t = main.Thread( target=main.CLIs[ i ].setTestGet,
                                  name="setContains-" + str( i ),
                                  args=[ main.onosSetName ],
                                  kwargs={ "values": addValue } )
@@ -725,7 +725,7 @@
             containsAllResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestGet,
+                t = main.Thread( target=main.CLIs[ i ].setTestGet,
                                  name="setContainsAll-" + str( i ),
                                  args=[ main.onosSetName ],
                                  kwargs={ "values": addAllValue } )
@@ -753,7 +753,7 @@
             removeResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestRemove,
+                t = main.Thread( target=main.CLIs[ i ].setTestRemove,
                                  name="setTestRemove-" + str( i ),
                                  args=[ main.onosSetName, addValue ] )
                 threads.append( t )
@@ -787,7 +787,7 @@
             getResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestGet,
+                t = main.Thread( target=main.CLIs[ i ].setTestGet,
                                  name="setTestGet-" + str( i ),
                                  args=[ main.onosSetName ] )
                 threads.append( t )
@@ -797,8 +797,8 @@
                 getResponses.append( t.result )
             getResults = main.TRUE
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
-                if isinstance( getResponses[ i ], list):
+                node = str( main.activeNodes[ i ] + 1 )
+                if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
                         # no repeats
@@ -822,7 +822,7 @@
             sizeResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestSize,
+                t = main.Thread( target=main.CLIs[ i ].setTestSize,
                                  name="setTestSize-" + str( i ),
                                  args=[ main.onosSetName ] )
                 threads.append( t )
@@ -832,7 +832,7 @@
                 sizeResponses.append( t.result )
             sizeResults = main.TRUE
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( "ONOS" + node +
@@ -851,7 +851,7 @@
             threads = []
             try:
                 for i in main.activeNodes:
-                    t = main.Thread( target=main.CLIs[i].setTestRemove,
+                    t = main.Thread( target=main.CLIs[ i ].setTestRemove,
                                      name="setTestRemoveAll-" + str( i ),
                                      args=[ main.onosSetName, addAllValue ] )
                     threads.append( t )
@@ -859,8 +859,8 @@
                 for t in threads:
                     t.join()
                     removeAllResponses.append( t.result )
-            except Exception, e:
-                main.log.exception(e)
+            except Exception as e:
+                main.log.exception( e )
 
             # main.TRUE = successfully changed the set
             # main.FALSE = action resulted in no change in set
@@ -887,7 +887,7 @@
             getResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestGet,
+                t = main.Thread( target=main.CLIs[ i ].setTestGet,
                                  name="setTestGet-" + str( i ),
                                  args=[ main.onosSetName ] )
                 threads.append( t )
@@ -897,8 +897,8 @@
                 getResponses.append( t.result )
             getResults = main.TRUE
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
-                if isinstance( getResponses[ i ], list):
+                node = str( main.activeNodes[ i ] + 1 )
+                if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
                         # no repeats
@@ -922,7 +922,7 @@
             sizeResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestSize,
+                t = main.Thread( target=main.CLIs[ i ].setTestSize,
                                  name="setTestSize-" + str( i ),
                                  args=[ main.onosSetName ] )
                 threads.append( t )
@@ -932,7 +932,7 @@
                 sizeResponses.append( t.result )
             sizeResults = main.TRUE
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( "ONOS" + node +
@@ -950,7 +950,7 @@
             addResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestAdd,
+                t = main.Thread( target=main.CLIs[ i ].setTestAdd,
                                  name="setTestAddAll-" + str( i ),
                                  args=[ main.onosSetName, addAllValue ] )
                 threads.append( t )
@@ -984,7 +984,7 @@
             getResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestGet,
+                t = main.Thread( target=main.CLIs[ i ].setTestGet,
                                  name="setTestGet-" + str( i ),
                                  args=[ main.onosSetName ] )
                 threads.append( t )
@@ -994,8 +994,8 @@
                 getResponses.append( t.result )
             getResults = main.TRUE
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
-                if isinstance( getResponses[ i ], list):
+                node = str( main.activeNodes[ i ] + 1 )
+                if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
                         # no repeats
@@ -1019,7 +1019,7 @@
             sizeResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestSize,
+                t = main.Thread( target=main.CLIs[ i ].setTestSize,
                                  name="setTestSize-" + str( i ),
                                  args=[ main.onosSetName ] )
                 threads.append( t )
@@ -1029,7 +1029,7 @@
                 sizeResponses.append( t.result )
             sizeResults = main.TRUE
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( "ONOS" + node +
@@ -1047,9 +1047,9 @@
             clearResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestRemove,
+                t = main.Thread( target=main.CLIs[ i ].setTestRemove,
                                  name="setTestClear-" + str( i ),
-                                 args=[ main.onosSetName, " "],  # Values doesn't matter
+                                 args=[ main.onosSetName, " " ],  # Values doesn't matter
                                  kwargs={ "clear": True } )
                 threads.append( t )
                 t.start()
@@ -1082,7 +1082,7 @@
             getResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestGet,
+                t = main.Thread( target=main.CLIs[ i ].setTestGet,
                                  name="setTestGet-" + str( i ),
                                  args=[ main.onosSetName ] )
                 threads.append( t )
@@ -1092,8 +1092,8 @@
                 getResponses.append( t.result )
             getResults = main.TRUE
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
-                if isinstance( getResponses[ i ], list):
+                node = str( main.activeNodes[ i ] + 1 )
+                if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
                         # no repeats
@@ -1117,7 +1117,7 @@
             sizeResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestSize,
+                t = main.Thread( target=main.CLIs[ i ].setTestSize,
                                  name="setTestSize-" + str( i ),
                                  args=[ main.onosSetName ] )
                 threads.append( t )
@@ -1127,7 +1127,7 @@
                 sizeResponses.append( t.result )
             sizeResults = main.TRUE
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( "ONOS" + node +
@@ -1145,7 +1145,7 @@
             addResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestAdd,
+                t = main.Thread( target=main.CLIs[ i ].setTestAdd,
                                  name="setTestAddAll-" + str( i ),
                                  args=[ main.onosSetName, addAllValue ] )
                 threads.append( t )
@@ -1179,7 +1179,7 @@
             getResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestGet,
+                t = main.Thread( target=main.CLIs[ i ].setTestGet,
                                  name="setTestGet-" + str( i ),
                                  args=[ main.onosSetName ] )
                 threads.append( t )
@@ -1189,8 +1189,8 @@
                 getResponses.append( t.result )
             getResults = main.TRUE
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
-                if isinstance( getResponses[ i ], list):
+                node = str( main.activeNodes[ i ] + 1 )
+                if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
                         # no repeats
@@ -1214,7 +1214,7 @@
             sizeResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestSize,
+                t = main.Thread( target=main.CLIs[ i ].setTestSize,
                                  name="setTestSize-" + str( i ),
                                  args=[ main.onosSetName ] )
                 threads.append( t )
@@ -1224,7 +1224,7 @@
                 sizeResponses.append( t.result )
             sizeResults = main.TRUE
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( "ONOS" + node +
@@ -1242,7 +1242,7 @@
             retainResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestRemove,
+                t = main.Thread( target=main.CLIs[ i ].setTestRemove,
                                  name="setTestRetain-" + str( i ),
                                  args=[ main.onosSetName, retainValue ],
                                  kwargs={ "retain": True } )
@@ -1277,7 +1277,7 @@
             getResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestGet,
+                t = main.Thread( target=main.CLIs[ i ].setTestGet,
                                  name="setTestGet-" + str( i ),
                                  args=[ main.onosSetName ] )
                 threads.append( t )
@@ -1287,8 +1287,8 @@
                 getResponses.append( t.result )
             getResults = main.TRUE
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
-                if isinstance( getResponses[ i ], list):
+                node = str( main.activeNodes[ i ] + 1 )
+                if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
                         # no repeats
@@ -1312,7 +1312,7 @@
             sizeResponses = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].setTestSize,
+                t = main.Thread( target=main.CLIs[ i ].setTestSize,
                                  name="setTestSize-" + str( i ),
                                  args=[ main.onosSetName ] )
                 threads.append( t )
@@ -1322,7 +1322,7 @@
                 sizeResponses.append( t.result )
             sizeResults = main.TRUE
             for i in range( len( main.activeNodes ) ):
-                node = str( main.activeNodes[i] + 1 )
+                node = str( main.activeNodes[ i ] + 1 )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( "ONOS" + node + " expected a size of " +
@@ -1339,8 +1339,8 @@
             tMapValue = "Testing"
             numKeys = 100
             putResult = True
-            node = main.activeNodes[0]
-            putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
+            node = main.activeNodes[ 0 ]
+            putResponses = main.CLIs[ node ].transactionalMapPut( numKeys, tMapValue )
             if putResponses and len( putResponses ) == 100:
                 for i in putResponses:
                     if putResponses[ i ][ 'value' ] != tMapValue:
@@ -1364,7 +1364,7 @@
                 threads = []
                 valueCheck = True
                 for i in main.activeNodes:
-                    t = main.Thread( target=main.CLIs[i].transactionalMapGet,
+                    t = main.Thread( target=main.CLIs[ i ].transactionalMapGet,
                                      name="TMap-get-" + str( i ),
                                      args=[ "Key" + str( n ) ] )
                     threads.append( t )
@@ -1376,7 +1376,7 @@
                     if node != tMapValue:
                         valueCheck = False
                 if not valueCheck:
-                    main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
+                    main.log.warn( "Values for key 'Key" + str(n) + "' do not match:" )
                     main.log.warn( getResponses )
                 getCheck = getCheck and valueCheck
             utilities.assert_equals( expect=True,
@@ -1389,7 +1389,7 @@
             threads = []
             getValues = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].valueTestGet,
+                t = main.Thread( target=main.CLIs[ i ].valueTestGet,
                                  name="ValueGet-" + str( i ),
                                  args=[ valueName ] )
                 threads.append( t )
@@ -1418,7 +1418,7 @@
             threads = []
             setValues = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].valueTestSet,
+                t = main.Thread( target=main.CLIs[ i ].valueTestSet,
                                  name="ValueSet-" + str( i ),
                                  args=[ valueName, valueValue ] )
                 threads.append( t )
@@ -1443,7 +1443,7 @@
             threads = []
             getValues = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].valueTestGet,
+                t = main.Thread( target=main.CLIs[ i ].valueTestGet,
                                  name="ValueGet-" + str( i ),
                                  args=[ valueName ] )
                 threads.append( t )
@@ -1470,8 +1470,8 @@
             main.step( "Atomic Value compareAndSet()" )
             oldValue = valueValue
             valueValue = "bar"
-            i = main.activeNodes[0]
-            CASValue = main.CLIs[i].valueTestCompareAndSet( valueName, oldValue, valueValue )
+            i = main.activeNodes[ 0 ]
+            CASValue = main.CLIs[ i ].valueTestCompareAndSet( valueName, oldValue, valueValue )
             main.log.debug( CASValue )
             utilities.assert_equals( expect=main.TRUE,
                                      actual=CASValue,
@@ -1483,7 +1483,7 @@
             threads = []
             getValues = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].valueTestGet,
+                t = main.Thread( target=main.CLIs[ i ].valueTestGet,
                                  name="ValueGet-" + str( i ),
                                  args=[ valueName ] )
                 threads.append( t )
@@ -1510,8 +1510,8 @@
             main.step( "Atomic Value getAndSet()" )
             oldValue = valueValue
             valueValue = "baz"
-            i = main.activeNodes[0]
-            GASValue = main.CLIs[i].valueTestGetAndSet( valueName, valueValue )
+            i = main.activeNodes[ 0 ]
+            GASValue = main.CLIs[ i ].valueTestGetAndSet( valueName, valueValue )
             main.log.debug( GASValue )
             expected = oldValue if oldValue is not None else "null"
             utilities.assert_equals( expect=expected,
@@ -1525,7 +1525,7 @@
             threads = []
             getValues = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].valueTestGet,
+                t = main.Thread( target=main.CLIs[ i ].valueTestGet,
                                  name="ValueGet-" + str( i ),
                                  args=[ valueName ] )
                 threads.append( t )
@@ -1552,8 +1552,8 @@
             main.step( "Atomic Value destory()" )
             valueValue = None
             threads = []
-            i = main.activeNodes[0]
-            destroyResult = main.CLIs[i].valueTestDestroy( valueName )
+            i = main.activeNodes[ 0 ]
+            destroyResult = main.CLIs[ i ].valueTestDestroy( valueName )
             main.log.debug( destroyResult )
             # Check the results
             utilities.assert_equals( expect=main.TRUE,
@@ -1565,7 +1565,7 @@
             threads = []
             getValues = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].valueTestGet,
+                t = main.Thread( target=main.CLIs[ i ].valueTestGet,
                                  name="ValueGet-" + str( i ),
                                  args=[ valueName ] )
                 threads.append( t )
@@ -1592,8 +1592,8 @@
             # WORK QUEUES
             main.step( "Work Queue add()" )
             threads = []
-            i = main.activeNodes[0]
-            addResult = main.CLIs[i].workQueueAdd( workQueueName, 'foo' )
+            i = main.activeNodes[ 0 ]
+            addResult = main.CLIs[ i ].workQueueAdd( workQueueName, 'foo' )
             workQueuePending += 1
             main.log.debug( addResult )
             # Check the results
@@ -1614,8 +1614,8 @@
 
             main.step( "Work Queue addMultiple()" )
             threads = []
-            i = main.activeNodes[0]
-            addMultipleResult = main.CLIs[i].workQueueAddMultiple( workQueueName, 'bar', 'baz' )
+            i = main.activeNodes[ 0 ]
+            addMultipleResult = main.CLIs[ i ].workQueueAddMultiple( workQueueName, 'bar', 'baz' )
             workQueuePending += 2
             main.log.debug( addMultipleResult )
             # Check the results
@@ -1636,9 +1636,9 @@
 
             main.step( "Work Queue takeAndComplete() 1" )
             threads = []
-            i = main.activeNodes[0]
+            i = main.activeNodes[ 0 ]
             number = 1
-            take1Result = main.CLIs[i].workQueueTakeAndComplete( workQueueName, number )
+            take1Result = main.CLIs[ i ].workQueueTakeAndComplete( workQueueName, number )
             workQueuePending -= number
             workQueueCompleted += number
             main.log.debug( take1Result )
@@ -1660,9 +1660,9 @@
 
             main.step( "Work Queue takeAndComplete() 2" )
             threads = []
-            i = main.activeNodes[0]
+            i = main.activeNodes[ 0 ]
             number = 2
-            take2Result = main.CLIs[i].workQueueTakeAndComplete( workQueueName, number )
+            take2Result = main.CLIs[ i ].workQueueTakeAndComplete( workQueueName, number )
             workQueuePending -= number
             workQueueCompleted += number
             main.log.debug( take2Result )
@@ -1685,8 +1685,8 @@
             main.step( "Work Queue destroy()" )
             valueValue = None
             threads = []
-            i = main.activeNodes[0]
-            destroyResult = main.CLIs[i].workQueueDestroy( workQueueName )
+            i = main.activeNodes[ 0 ]
+            destroyResult = main.CLIs[ i ].workQueueDestroy( workQueueName )
             workQueueCompleted = 0
             workQueueInProgress = 0
             workQueuePending = 0