Add HA test for split brain scenario

- Add option to cleanup script to reset firewall rules
- minor fixes to drivers and other HA tests
- Add test-summary script

Change-Id: I758b60ada7a3675664456b4da056ad4a6d014b30
diff --git a/TestON/tests/HAclusterRestart/HAclusterRestart.py b/TestON/tests/HAclusterRestart/HAclusterRestart.py
index 7b19efc..4e7ac23 100644
--- a/TestON/tests/HAclusterRestart/HAclusterRestart.py
+++ b/TestON/tests/HAclusterRestart/HAclusterRestart.py
@@ -558,8 +558,8 @@
 
         main.step( "Add host intents via cli" )
         intentIds = []
-        # TODO:  move the host numbers to params
-        #        Maybe look at all the paths we ping?
+        # TODO: move the host numbers to params
+        #       Maybe look at all the paths we ping?
         intentAddResult = True
         hostResult = main.TRUE
         for i in range( 8, 18 ):
@@ -874,7 +874,7 @@
         assert utilities.assert_equals, "utilities.assert_equals not defined"
         assert main.CLIs, "main.CLIs not defined"
         assert main.nodes, "main.nodes not defined"
-        main.case( "Verify connectivity by sendind traffic across Intents" )
+        main.case( "Verify connectivity by sending traffic across Intents" )
         main.caseExplanation = "Ping across added host intents to check " +\
                                 "functionality and check the state of " +\
                                 "the intent"
@@ -1260,10 +1260,10 @@
             ONOSIntents.append( t.result )
 
         for i in range( main.numCtrls ):
+            node = str( i + 1 )
             if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
-                main.log.error( "Error in getting ONOS" + str( i + 1 ) +
-                                 " intents" )
-                main.log.warn( "ONOS" + str( i + 1 ) + " intents response: " +
+                main.log.error( "Error in getting ONOS" + node + " intents" )
+                main.log.warn( "ONOS" + node + " intents response: " +
                                repr( ONOSIntents[ i ] ) )
                 intentsResults = False
         utilities.assert_equals(
@@ -1609,6 +1609,7 @@
         except ( ValueError, TypeError ):
             main.log.exception( "Error parsing clusters[0]: " +
                                 repr( clusters[ 0 ] ) )
+            numClusters = "ERROR"
         clusterResults = main.FALSE
         if numClusters == 1:
             clusterResults = main.TRUE
@@ -1770,6 +1771,20 @@
                                  onpass="ONOS cli started",
                                  onfail="ONOS clis did not restart" )
 
+        for i in range( 10 ):
+            ready = True
+            for cli in main.CLIs:
+                output = cli.summary()
+                if not output:
+                    ready = False
+            time.sleep( 30 )
+        utilities.assert_equals( expect=True, actual=ready,
+                                 onpass="ONOS summary command succeded",
+                                 onfail="ONOS summary command failed" )
+        if not ready:
+            main.cleanup()
+            main.exit()
+
         # Grab the time of restart so we chan check how long the gossip
         # protocol has had time to work
         main.restartTime = time.time() - killTime
@@ -1872,48 +1887,14 @@
 
         if rolesResults and not consistentMastership:
             for i in range( main.numCtrls ):
-                main.log.warn(
-                    "ONOS" + str( i + 1 ) + " roles: ",
-                    json.dumps(
-                        json.loads( ONOSMastership[ i ] ),
-                        sort_keys=True,
-                        indent=4,
-                        separators=( ',', ': ' ) ) )
-        elif rolesResults and not consistentMastership:
+                main.log.warn( "ONOS" + str( i + 1 ) + " roles: ",
+                               json.dumps( json.loads( ONOSMastership[ i ] ),
+                                           sort_keys=True,
+                                           indent=4,
+                                           separators=( ',', ': ' ) ) )
+        elif rolesResults and consistentMastership:
             mastershipCheck = main.TRUE
 
-        '''
-        description2 = "Compare switch roles from before failure"
-        main.step( description2 )
-        try:
-            currentJson = json.loads( ONOSMastership[0] )
-            oldJson = json.loads( mastershipState )
-        except ( ValueError, TypeError ):
-            main.log.exception( "Something is wrong with parsing " +
-                                "ONOSMastership[0] or mastershipState" )
-            main.log.error( "ONOSMastership[0]: " + repr( ONOSMastership[0] ) )
-            main.log.error( "mastershipState" + repr( mastershipState ) )
-            main.cleanup()
-            main.exit()
-        mastershipCheck = main.TRUE
-        for i in range( 1, 29 ):
-            switchDPID = str(
-                main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
-            current = [ switch[ 'master' ] for switch in currentJson
-                        if switchDPID in switch[ 'id' ] ]
-            old = [ switch[ 'master' ] for switch in oldJson
-                    if switchDPID in switch[ 'id' ] ]
-            if current == old:
-                mastershipCheck = mastershipCheck and main.TRUE
-            else:
-                main.log.warn( "Mastership of switch %s changed" % switchDPID )
-                mastershipCheck = main.FALSE
-        utilities.assert_equals(
-            expect=main.TRUE,
-            actual=mastershipCheck,
-            onpass="Mastership of Switches was not changed",
-            onfail="Mastership of some switches changed" )
-        '''
         # NOTE: we expect mastership to change on controller failure
 
         main.step( "Get the intents and compare across all nodes" )
@@ -2068,7 +2049,6 @@
             FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
             if FlowTables == main.FALSE:
                 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
-
         utilities.assert_equals(
             expect=main.TRUE,
             actual=FlowTables,
@@ -2152,6 +2132,7 @@
         main.caseExplanation = "Compare topology objects between Mininet" +\
                                 " and ONOS"
         topoResult = main.FALSE
+        topoFailMsg = "ONOS topology don't match Mininet"
         elapsed = 0
         count = 0
         main.step( "Comparing ONOS topology to MN topology" )
@@ -2167,9 +2148,11 @@
             devices = []
             threads = []
             for i in range( main.numCtrls ):
-                t = main.Thread( target=main.CLIs[i].devices,
+                t = main.Thread( target=utilities.retry,
                                  name="devices-" + str( i ),
-                                 args=[ ] )
+                                 args=[ main.CLIs[i].devices, [ None ] ],
+                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                           'randomTime': True } )
                 threads.append( t )
                 t.start()
 
@@ -2208,9 +2191,11 @@
             ports = []
             threads = []
             for i in range( main.numCtrls ):
-                t = main.Thread( target=main.CLIs[i].ports,
+                t = main.Thread( target=utilities.retry,
                                  name="ports-" + str( i ),
-                                 args=[ ] )
+                                 args=[ main.CLIs[i].ports, [ None ] ],
+                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                           'randomTime': True } )
                 threads.append( t )
                 t.start()
 
@@ -2220,9 +2205,11 @@
             links = []
             threads = []
             for i in range( main.numCtrls ):
-                t = main.Thread( target=main.CLIs[i].links,
+                t = main.Thread( target=utilities.retry,
                                  name="links-" + str( i ),
-                                 args=[ ] )
+                                 args=[ main.CLIs[i].links, [ None ] ],
+                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                           'randomTime': True } )
                 threads.append( t )
                 t.start()
 
@@ -2232,9 +2219,11 @@
             clusters = []
             threads = []
             for i in range( main.numCtrls ):
-                t = main.Thread( target=main.CLIs[i].clusters,
+                t = main.Thread( target=utilities.retry,
                                  name="clusters-" + str( i ),
-                                 args=[ ] )
+                                 args=[ main.CLIs[i].clusters, [ None ] ],
+                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                           'randomTime': True } )
                 threads.append( t )
                 t.start()
 
@@ -2247,6 +2236,15 @@
             print "Elapsed time: " + str( elapsed )
             print "CLI time: " + str( cliTime )
 
+            if all( e is None for e in devices ) and\
+               all( e is None for e in hosts ) and\
+               all( e is None for e in ports ) and\
+               all( e is None for e in links ) and\
+               all( e is None for e in clusters ):
+                   topoFailMsg = "Could not get topology from ONOS"
+                   main.log.error( topoFailMsg )
+                   continue  # Try again, No use trying to compare
+
             mnSwitches = main.Mininet1.getSwitches()
             mnLinks = main.Mininet1.getLinks()
             mnHosts = main.Mininet1.getHosts()
@@ -2395,7 +2393,7 @@
         utilities.assert_equals( expect=True,
                                  actual=topoResult,
                                  onpass="ONOS topology matches Mininet",
-                                 onfail="ONOS topology don't match Mininet" )
+                                 onfail=topoFailMsg )
         # End of While loop to pull ONOS state
 
         # Compare json objects for hosts and dataplane clusters
@@ -2455,7 +2453,6 @@
                                      controllerStr +
                                      " is inconsistent with ONOS1" )
                     consistentClustersResult = main.FALSE
-
             else:
                 main.log.error( "Error in getting dataplane clusters " +
                                  "from ONOS" + controllerStr )
@@ -2725,7 +2722,7 @@
             # NOTE: must end in /
             for f in logFiles:
                 for node in main.nodes:
-                    dstName =  main.logdir + "/" + node.name + "-" + f
+                    dstName = main.logdir + "/" + node.name + "-" + f
                     main.ONOSbench.secureCopy( node.user_name, node.ip_address,
                                                logFolder + f, dstName )
             # std*.log's
@@ -2735,7 +2732,7 @@
             # NOTE: must end in /
             for f in logFiles:
                 for node in main.nodes:
-                    dstName =  main.logdir + "/" + node.name + "-" + f
+                    dstName = main.logdir + "/" + node.name + "-" + f
                     main.ONOSbench.secureCopy( node.user_name, node.ip_address,
                                                logFolder + f, dstName )
         else:
@@ -2802,7 +2799,7 @@
         sameLeader = main.TRUE
         if len( set( leaders ) ) != 1:
             sameLeader = main.FALSE
-            main.log.error( "Results of electionTestLeader is order of CLIs:" +
+            main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
                             str( leaders ) )
         utilities.assert_equals(
             expect=main.TRUE,
@@ -2875,8 +2872,13 @@
         for cli in main.CLIs:
             node = cli.specificLeaderCandidate( 'org.onosproject.election' )
             oldAllCandidates.append( node )
-            oldLeaders.append( node[ 0 ] )
+            if node:
+                oldLeaders.append( node[ 0 ] )
+            else:
+                oldLeaders.append( None )
         oldCandidates = oldAllCandidates[ 0 ]
+        if oldCandidates is None:
+            oldCandidates = [ None ]
 
         # Check that each node has the same leader. Defines oldLeader
         if len( set( oldLeaders ) ) != 1:
@@ -2889,13 +2891,14 @@
         # Check that each node's candidate list is the same
         candidateDiscrepancy = False  # Boolean of candidate mismatches
         for candidates in oldAllCandidates:
+            if candidates is None:
+                main.log.warn( "Error getting candidates" )
+                candidates = [ None ]
             if set( candidates ) != set( oldCandidates ):
                 sameResult = main.FALSE
                 candidateDiscrepancy = True
-
         if candidateDiscrepancy:
             failMessage += " and candidates"
-
         utilities.assert_equals(
             expect=main.TRUE,
             actual=sameResult,
@@ -2924,7 +2927,6 @@
             onfail="Node was not withdrawn from election" )
 
         main.step( "Check that a new node was elected leader" )
-
         # FIXME: use threads
         newLeaderResult = main.TRUE
         failMessage = "Nodes have different leaders"
@@ -2964,7 +2966,7 @@
         # Check that the new leader is not the older leader, which was withdrawn
         if newLeader == oldLeader:
             newLeaderResult = main.FALSE
-            main.log.error( "All nodes still see old leader: " + oldLeader +
+            main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
                 " as the current leader" )
 
         utilities.assert_equals(
@@ -2974,7 +2976,7 @@
             onfail="Something went wrong with Leadership election" )
 
         main.step( "Check that that new leader was the candidate of old leader")
-        # candidates[ 2 ] should be come the top candidate after withdrawl
+        # candidates[ 2 ] should become the top candidate after withdrawl
         correctCandidateResult = main.TRUE
         if expectNoLeader:
             if newLeader == 'none':
@@ -2983,11 +2985,13 @@
             else:
                 main.log.info( "Expected no leader, got: " + str( newLeader ) )
                 correctCandidateResult = main.FALSE
-        elif newLeader != oldCandidates[ 2 ]:
+        elif len( oldCandidates ) >= 3 and newLeader != oldCandidates[ 2 ]:
             correctCandidateResult = main.FALSE
-            main.log.error( "Candidate " + newLeader + " was elected. " +
-                oldCandidates[ 2 ] + " should have had priority." )
-
+            main.log.error( "Candidate {} was elected. {} should have had priority.".format(
+                                newLeader, oldCandidates[ 2 ] ) )
+        else:
+            main.log.warn( "Could not determine who should be the correct leader" )
+            correctCandidateResult = main.FALSE
         utilities.assert_equals(
             expect=main.TRUE,
             actual=correctCandidateResult,
@@ -3048,7 +3052,7 @@
 
         # Check that the re-elected node is last on the candidate List
         if oldLeader != newCandidates[ -1 ]:
-            main.log.error( "Old Leader ("  + oldLeader + ") not in the proper position " +
+            main.log.error( "Old Leader (" + str( oldLeader ) + ") not in the proper position " +
                 str( newCandidates ) )
             positionResult = main.FALSE
 
@@ -3507,12 +3511,13 @@
 
         getResults = main.TRUE
         for i in range( main.numCtrls ):
+            node = str( i + 1 )
             if isinstance( getResponses[ i ], list):
                 current = set( getResponses[ i ] )
                 if len( current ) == len( getResponses[ i ] ):
                     # no repeats
                     if onosSet != current:
-                        main.log.error( "ONOS" + str( i + 1 ) +
+                        main.log.error( "ONOS" + node +
                                         " has incorrect view" +
                                         " of set " + onosSetName + ":\n" +
                                         str( getResponses[ i ] ) )
@@ -3521,7 +3526,7 @@
                         getResults = main.FALSE
                 else:
                     # error, set is not a set
-                    main.log.error( "ONOS" + str( i + 1 ) +
+                    main.log.error( "ONOS" + node +
                                     " has repeat elements in" +
                                     " set " + onosSetName + ":\n" +
                                     str( getResponses[ i ] ) )
@@ -3548,9 +3553,10 @@
 
         sizeResults = main.TRUE
         for i in range( main.numCtrls ):
+            node = str( i + 1 )
             if size != sizeResponses[ i ]:
                 sizeResults = main.FALSE
-                main.log.error( "ONOS" + str( i + 1 ) +
+                main.log.error( "ONOS" + node +
                                 " expected a size of " + str( size ) +
                                 " for set " + onosSetName +
                                 " but got " + str( sizeResponses[ i ] ) )
@@ -3608,13 +3614,13 @@
             getResponses.append( t.result )
         getResults = main.TRUE
         for i in range( main.numCtrls ):
+            node = str( i + 1 )
             if isinstance( getResponses[ i ], list):
                 current = set( getResponses[ i ] )
                 if len( current ) == len( getResponses[ i ] ):
                     # no repeats
                     if onosSet != current:
-                        main.log.error( "ONOS" + str( i + 1 ) +
-                                        " has incorrect view" +
+                        main.log.error( "ONOS" + node + " has incorrect view" +
                                         " of set " + onosSetName + ":\n" +
                                         str( getResponses[ i ] ) )
                         main.log.debug( "Expected: " + str( onosSet ) )
@@ -3622,8 +3628,7 @@
                         getResults = main.FALSE
                 else:
                     # error, set is not a set
-                    main.log.error( "ONOS" + str( i + 1 ) +
-                                    " has repeat elements in" +
+                    main.log.error( "ONOS" + node + " has repeat elements in" +
                                     " set " + onosSetName + ":\n" +
                                     str( getResponses[ i ] ) )
                     getResults = main.FALSE
@@ -3642,9 +3647,10 @@
             sizeResponses.append( t.result )
         sizeResults = main.TRUE
         for i in range( main.numCtrls ):
+            node = str( i + 1 )
             if size != sizeResponses[ i ]:
                 sizeResults = main.FALSE
-                main.log.error( "ONOS" + str( i + 1 ) +
+                main.log.error( "ONOS" + node +
                                 " expected a size of " + str( size ) +
                                 " for set " + onosSetName +
                                 " but got " + str( sizeResponses[ i ] ) )
@@ -3703,12 +3709,13 @@
             getResponses.append( t.result )
         getResults = main.TRUE
         for i in range( main.numCtrls ):
+            node = str( i + 1 )
             if isinstance( getResponses[ i ], list):
                 current = set( getResponses[ i ] )
                 if len( current ) == len( getResponses[ i ] ):
                     # no repeats
                     if onosSet != current:
-                        main.log.error( "ONOS" + str( i + 1 ) +
+                        main.log.error( "ONOS" + node +
                                         " has incorrect view" +
                                         " of set " + onosSetName + ":\n" +
                                         str( getResponses[ i ] ) )
@@ -3717,7 +3724,7 @@
                         getResults = main.FALSE
                 else:
                     # error, set is not a set
-                    main.log.error( "ONOS" + str( i + 1 ) +
+                    main.log.error( "ONOS" + node +
                                     " has repeat elements in" +
                                     " set " + onosSetName + ":\n" +
                                     str( getResponses[ i ] ) )
@@ -3737,9 +3744,10 @@
             sizeResponses.append( t.result )
         sizeResults = main.TRUE
         for i in range( main.numCtrls ):
+            node = str( i + 1 )
             if size != sizeResponses[ i ]:
                 sizeResults = main.FALSE
-                main.log.error( "ONOS" + str( i + 1 ) +
+                main.log.error( "ONOS" + node +
                                 " expected a size of " + str( size ) +
                                 " for set " + onosSetName +
                                 " but got " + str( sizeResponses[ i ] ) )
@@ -3852,12 +3860,13 @@
             getResponses.append( t.result )
         getResults = main.TRUE
         for i in range( main.numCtrls ):
+            node = str( i + 1 )
             if isinstance( getResponses[ i ], list):
                 current = set( getResponses[ i ] )
                 if len( current ) == len( getResponses[ i ] ):
                     # no repeats
                     if onosSet != current:
-                        main.log.error( "ONOS" + str( i + 1 ) +
+                        main.log.error( "ONOS" + node +
                                         " has incorrect view" +
                                         " of set " + onosSetName + ":\n" +
                                         str( getResponses[ i ] ) )
@@ -3866,7 +3875,7 @@
                         getResults = main.FALSE
                 else:
                     # error, set is not a set
-                    main.log.error( "ONOS" + str( i + 1 ) +
+                    main.log.error( "ONOS" + node +
                                     " has repeat elements in" +
                                     " set " + onosSetName + ":\n" +
                                     str( getResponses[ i ] ) )
@@ -3886,9 +3895,10 @@
             sizeResponses.append( t.result )
         sizeResults = main.TRUE
         for i in range( main.numCtrls ):
+            node = str( i + 1 )
             if size != sizeResponses[ i ]:
                 sizeResults = main.FALSE
-                main.log.error( "ONOS" + str( i + 1 ) +
+                main.log.error( "ONOS" + node +
                                 " expected a size of " + str( size ) +
                                 " for set " + onosSetName +
                                 " but got " + str( sizeResponses[ i ] ) )
@@ -3950,12 +3960,13 @@
             getResponses.append( t.result )
         getResults = main.TRUE
         for i in range( main.numCtrls ):
+            node = str( i + 1 )
             if isinstance( getResponses[ i ], list):
                 current = set( getResponses[ i ] )
                 if len( current ) == len( getResponses[ i ] ):
                     # no repeats
                     if onosSet != current:
-                        main.log.error( "ONOS" + str( i + 1 ) +
+                        main.log.error( "ONOS" + node +
                                         " has incorrect view" +
                                         " of set " + onosSetName + ":\n" +
                                         str( getResponses[ i ] ) )
@@ -3964,7 +3975,7 @@
                         getResults = main.FALSE
                 else:
                     # error, set is not a set
-                    main.log.error( "ONOS" + str( i + 1 ) +
+                    main.log.error( "ONOS" + node +
                                     " has repeat elements in" +
                                     " set " + onosSetName + ":\n" +
                                     str( getResponses[ i ] ) )
@@ -3984,9 +3995,10 @@
             sizeResponses.append( t.result )
         sizeResults = main.TRUE
         for i in range( main.numCtrls ):
+            node = str( i + 1 )
             if size != sizeResponses[ i ]:
                 sizeResults = main.FALSE
-                main.log.error( "ONOS" + str( i + 1 ) +
+                main.log.error( "ONOS" + node +
                                 " expected a size of " + str( size ) +
                                 " for set " + onosSetName +
                                 " but got " + str( sizeResponses[ i ] ) )
@@ -4045,12 +4057,13 @@
             getResponses.append( t.result )
         getResults = main.TRUE
         for i in range( main.numCtrls ):
+            node = str( i + 1 )
             if isinstance( getResponses[ i ], list):
                 current = set( getResponses[ i ] )
                 if len( current ) == len( getResponses[ i ] ):
                     # no repeats
                     if onosSet != current:
-                        main.log.error( "ONOS" + str( i + 1 ) +
+                        main.log.error( "ONOS" + node +
                                         " has incorrect view" +
                                         " of set " + onosSetName + ":\n" +
                                         str( getResponses[ i ] ) )
@@ -4059,7 +4072,7 @@
                         getResults = main.FALSE
                 else:
                     # error, set is not a set
-                    main.log.error( "ONOS" + str( i + 1 ) +
+                    main.log.error( "ONOS" + node +
                                     " has repeat elements in" +
                                     " set " + onosSetName + ":\n" +
                                     str( getResponses[ i ] ) )
@@ -4079,9 +4092,10 @@
             sizeResponses.append( t.result )
         sizeResults = main.TRUE
         for i in range( main.numCtrls ):
+            node = str( i + 1 )
             if size != sizeResponses[ i ]:
                 sizeResults = main.FALSE
-                main.log.error( "ONOS" + str( i + 1 ) +
+                main.log.error( "ONOS" + node +
                                 " expected a size of " + str( size ) +
                                 " for set " + onosSetName +
                                 " but got " + str( sizeResponses[ i ] ) )
@@ -4141,12 +4155,13 @@
             getResponses.append( t.result )
         getResults = main.TRUE
         for i in range( main.numCtrls ):
+            node = str( i + 1 )
             if isinstance( getResponses[ i ], list):
                 current = set( getResponses[ i ] )
                 if len( current ) == len( getResponses[ i ] ):
                     # no repeats
                     if onosSet != current:
-                        main.log.error( "ONOS" + str( i + 1 ) +
+                        main.log.error( "ONOS" + node +
                                         " has incorrect view" +
                                         " of set " + onosSetName + ":\n" +
                                         str( getResponses[ i ] ) )
@@ -4155,7 +4170,7 @@
                         getResults = main.FALSE
                 else:
                     # error, set is not a set
-                    main.log.error( "ONOS" + str( i + 1 ) +
+                    main.log.error( "ONOS" + node +
                                     " has repeat elements in" +
                                     " set " + onosSetName + ":\n" +
                                     str( getResponses[ i ] ) )
@@ -4175,9 +4190,10 @@
             sizeResponses.append( t.result )
         sizeResults = main.TRUE
         for i in range( main.numCtrls ):
+            node = str( i + 1 )
             if size != sizeResponses[ i ]:
                 sizeResults = main.FALSE
-                main.log.error( "ONOS" + str( i + 1 ) +
+                main.log.error( "ONOS" + node +
                                 " expected a size of " + str( size ) +
                                 " for set " + onosSetName +
                                 " but got " + str( sizeResponses[ i ] ) )
@@ -4236,12 +4252,13 @@
             getResponses.append( t.result )
         getResults = main.TRUE
         for i in range( main.numCtrls ):
+            node = str( i + 1 )
             if isinstance( getResponses[ i ], list):
                 current = set( getResponses[ i ] )
                 if len( current ) == len( getResponses[ i ] ):
                     # no repeats
                     if onosSet != current:
-                        main.log.error( "ONOS" + str( i + 1 ) +
+                        main.log.error( "ONOS" + node +
                                         " has incorrect view" +
                                         " of set " + onosSetName + ":\n" +
                                         str( getResponses[ i ] ) )
@@ -4250,7 +4267,7 @@
                         getResults = main.FALSE
                 else:
                     # error, set is not a set
-                    main.log.error( "ONOS" + str( i + 1 ) +
+                    main.log.error( "ONOS" + node +
                                     " has repeat elements in" +
                                     " set " + onosSetName + ":\n" +
                                     str( getResponses[ i ] ) )
@@ -4270,9 +4287,10 @@
             sizeResponses.append( t.result )
         sizeResults = main.TRUE
         for i in range( main.numCtrls ):
+            node = str( i + 1 )
             if size != sizeResponses[ i ]:
                 sizeResults = main.FALSE
-                main.log.error( "ONOS" + str( i + 1 ) +
+                main.log.error( "ONOS" + node +
                                 " expected a size of " + str( size ) +
                                 " for set " + onosSetName +
                                 " but got " + str( sizeResponses[ i ] ) )
@@ -4332,12 +4350,13 @@
             getResponses.append( t.result )
         getResults = main.TRUE
         for i in range( main.numCtrls ):
+            node = str( i + 1 )
             if isinstance( getResponses[ i ], list):
                 current = set( getResponses[ i ] )
                 if len( current ) == len( getResponses[ i ] ):
                     # no repeats
                     if onosSet != current:
-                        main.log.error( "ONOS" + str( i + 1 ) +
+                        main.log.error( "ONOS" + node +
                                         " has incorrect view" +
                                         " of set " + onosSetName + ":\n" +
                                         str( getResponses[ i ] ) )
@@ -4346,7 +4365,7 @@
                         getResults = main.FALSE
                 else:
                     # error, set is not a set
-                    main.log.error( "ONOS" + str( i + 1 ) +
+                    main.log.error( "ONOS" + node +
                                     " has repeat elements in" +
                                     " set " + onosSetName + ":\n" +
                                     str( getResponses[ i ] ) )
@@ -4366,10 +4385,10 @@
             sizeResponses.append( t.result )
         sizeResults = main.TRUE
         for i in range( main.numCtrls ):
+            node = str( i + 1 )
             if size != sizeResponses[ i ]:
                 sizeResults = main.FALSE
-                main.log.error( "ONOS" + str( i + 1 ) +
-                                " expected a size of " +
+                main.log.error( "ONOS" + node + " expected a size of " +
                                 str( size ) + " for set " + onosSetName +
                                 " but got " + str( sizeResponses[ i ] ) )
         retainResults = retainResults and getResults and sizeResults
@@ -4384,7 +4403,7 @@
         numKeys = 100
         putResult = True
         putResponses = main.CLIs[ 0 ].transactionalMapPut( numKeys, tMapValue )
-        if len( putResponses ) == 100:
+        if putResponses and len( putResponses ) == 100:
             for i in putResponses:
                 if putResponses[ i ][ 'value' ] != tMapValue:
                     putResult = False
@@ -4406,7 +4425,7 @@
             for i in range( main.numCtrls ):
                 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
                                  name="TMap-get-" + str( i ),
-                                 args=[ "Key" + str ( n ) ] )
+                                 args=[ "Key" + str( n ) ] )
                 threads.append( t )
                 t.start()
             for t in threads:
@@ -4451,7 +4470,7 @@
             for i in range( main.numCtrls ):
                 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
                                  name="TMap-get-" + str( i ),
-                                 args=[ "Key" + str ( n ) ],
+                                 args=[ "Key" + str( n ) ],
                                  kwargs={ "inMemory": True } )
                 threads.append( t )
                 t.start()
diff --git a/TestON/tests/HAfullNetPartition/HAfullNetPartition.params b/TestON/tests/HAfullNetPartition/HAfullNetPartition.params
new file mode 100644
index 0000000..23e7ae8
--- /dev/null
+++ b/TestON/tests/HAfullNetPartition/HAfullNetPartition.params
@@ -0,0 +1,86 @@
+<PARAMS>
+    #List of test cases:
+    #CASE1: Compile ONOS and push it to the test machines
+    #CASE2: Assign devices to controllers
+    #CASE21: Assign mastership to controllers
+    #CASE3: Assign intents
+    #CASE4: Ping across added host intents
+    #CASE5: Reading state of ONOS
+    #CASE61: The Failure inducing case.
+    #CASE62: The Failure recovery case.
+    #CASE7: Check state after control plane failure
+    #CASE8: Compare topo
+    #CASE9: Link s3-s28 down
+    #CASE10: Link s3-s28 up
+    #CASE11: Switch down
+    #CASE12: Switch up
+    #CASE13: Clean up
+    #CASE14: start election app on all onos nodes
+    #CASE15: Check that Leadership Election is still functional
+    #CASE16: Install Distributed Primitives app
+    #CASE17: Check for basic functionality with distributed primitives
+    <testcases>1,[2,8,3,4,5,14,16,17]*1,[61,8,7,4,15,17,62],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+    <imports>
+        <path> /home/admin/OnosSystemTest/TestON/tests/HAfullNetPartition/dependencies/ </path>
+    </imports>
+    <ENV>
+        <cellName>HA</cellName>
+        <appString>drivers,openflow,proxyarp,mobility</appString>
+    </ENV>
+    <Git> False </Git>
+    <branch> master </branch>
+    <num_controllers> 7 </num_controllers>
+    <tcpdump> False </tcpdump>
+
+    <CTRL>
+        <port1>6653</port1>
+        <port2>6653</port2>
+        <port3>6653</port3>
+        <port4>6653</port4>
+        <port5>6653</port5>
+        <port6>6653</port6>
+        <port7>6653</port7>
+    </CTRL>
+    <BACKUP>
+        <ENABLED> False </ENABLED>
+        <TESTONUSER>admin</TESTONUSER>
+        <TESTONIP>10.128.30.9</TESTONIP>
+    </BACKUP>
+    <PING>
+        <source1>h8</source1>
+        <source2>h9</source2>
+        <source3>h10</source3>
+        <source4>h11</source4>
+        <source5>h12</source5>
+        <source6>h13</source6>
+        <source7>h14</source7>
+        <source8>h15</source8>
+        <source9>h16</source9>
+        <source10>h17</source10>
+        <target1>10.0.0.18</target1>
+        <target2>10.0.0.19</target2>
+        <target3>10.0.0.20</target3>
+        <target4>10.0.0.21</target4>
+        <target5>10.0.0.22</target5>
+        <target6>10.0.0.23</target6>
+        <target7>10.0.0.24</target7>
+        <target8>10.0.0.25</target8>
+        <target9>10.0.0.26</target9>
+        <target10>10.0.0.27</target10>
+    </PING>
+    <timers>
+        <LinkDiscovery>12</LinkDiscovery>
+        <SwitchDiscovery>12</SwitchDiscovery>
+        <gossip>5</gossip>
+    </timers>
+    <kill>
+        <switch> s5 </switch>
+        <dpid> 0000000000005000 </dpid>
+        <links> h5 s2 s1 s6 </links>
+    </kill>
+    <MNtcpdump>
+        <intf>eth0</intf>
+        <port> </port>
+        <folder>~/packet_captures/</folder>
+    </MNtcpdump>
+</PARAMS>
diff --git a/TestON/tests/HAfullNetPartition/HAfullNetPartition.py b/TestON/tests/HAfullNetPartition/HAfullNetPartition.py
new file mode 100644
index 0000000..925a1c6
--- /dev/null
+++ b/TestON/tests/HAfullNetPartition/HAfullNetPartition.py
@@ -0,0 +1,4504 @@
+"""
+Description: This test is to determine if ONOS can handle
+             a full network partion
+
+List of test cases:
+CASE1: Compile ONOS and push it to the test machines
+CASE2: Assign devices to controllers
+CASE21: Assign mastership to controllers
+CASE3: Assign intents
+CASE4: Ping across added host intents
+CASE5: Reading state of ONOS
+CASE61: The Failure inducing case.
+CASE62: The Failure recovery case.
+CASE7: Check state after control plane failure
+CASE8: Compare topo
+CASE9: Link s3-s28 down
+CASE10: Link s3-s28 up
+CASE11: Switch down
+CASE12: Switch up
+CASE13: Clean up
+CASE14: start election app on all onos nodes
+CASE15: Check that Leadership Election is still functional
+CASE16: Install Distributed Primitives app
+CASE17: Check for basic functionality with distributed primitives
+"""
+
+
+class HAfullNetPartition:
+
+    def __init__( self ):
+        self.default = ''
+
+    def CASE1( self, main ):
+        """
+        CASE1 is to compile ONOS and push it to the test machines
+
+        Startup sequence:
+        cell <name>
+        onos-verify-cell
+        NOTE: temporary - onos-remove-raft-logs
+        onos-uninstall
+        start mininet
+        git pull
+        mvn clean install
+        onos-package
+        onos-install -f
+        onos-wait-for-start
+        start cli sessions
+        start tcpdump
+        """
+        import imp
+        import pexpect
+        import time
+        main.log.info( "ONOS HA test: Partition ONOS nodes into two sub-clusters - " +
+                         "initialization" )
+        main.case( "Setting up test environment" )
+        main.caseExplanation = "Setup the test environment including " +\
+                                "installing ONOS, starting Mininet and ONOS" +\
+                                "cli sessions."
+
+        # load some variables from the params file
+        PULLCODE = False
+        if main.params[ 'Git' ] == 'True':
+            PULLCODE = True
+        gitBranch = main.params[ 'branch' ]
+        cellName = main.params[ 'ENV' ][ 'cellName' ]
+
+        main.numCtrls = int( main.params[ 'num_controllers' ] )
+        if main.ONOSbench.maxNodes:
+            if main.ONOSbench.maxNodes < main.numCtrls:
+                main.numCtrls = int( main.ONOSbench.maxNodes )
+        # set global variables
+        global ONOS1Port
+        global ONOS2Port
+        global ONOS3Port
+        global ONOS4Port
+        global ONOS5Port
+        global ONOS6Port
+        global ONOS7Port
+
+        # FIXME: just get controller port from params?
+        # TODO: do we really need all these?
+        ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
+        ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
+        ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
+        ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
+        ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
+        ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
+        ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
+
+        try:
+            fileName = "Counters"
+            # TODO: Maybe make a library folder somewhere?
+            path = main.params[ 'imports' ][ 'path' ]
+            main.Counters = imp.load_source( fileName,
+                                             path + fileName + ".py" )
+        except Exception as e:
+            main.log.exception( e )
+            main.cleanup()
+            main.exit()
+
+        main.CLIs = []
+        main.nodes = []
+        ipList = []
+        for i in range( 1, main.numCtrls + 1 ):
+            try:
+                main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
+                main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
+                ipList.append( main.nodes[ -1 ].ip_address )
+            except AttributeError:
+                break
+
+        main.step( "Create cell file" )
+        cellAppString = main.params[ 'ENV' ][ 'appString' ]
+        main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
+                                       main.Mininet1.ip_address,
+                                       cellAppString, ipList )
+        main.step( "Applying cell variable to environment" )
+        cellResult = main.ONOSbench.setCell( cellName )
+        verifyResult = main.ONOSbench.verifyCell()
+
+        # FIXME:this is short term fix
+        main.log.info( "Removing raft logs" )
+        main.ONOSbench.onosRemoveRaftLogs()
+
+        main.log.info( "Uninstalling ONOS" )
+        for node in main.nodes:
+            main.ONOSbench.onosUninstall( node.ip_address )
+
+        # Make sure ONOS is DEAD
+        main.log.info( "Killing any ONOS processes" )
+        killResults = main.TRUE
+        for node in main.nodes:
+            killed = main.ONOSbench.onosKill( node.ip_address )
+            killResults = killResults and killed
+
+        cleanInstallResult = main.TRUE
+        gitPullResult = main.TRUE
+
+        main.step( "Starting Mininet" )
+        # scp topo file to mininet
+        # TODO: move to params?
+        topoName = "obelisk.py"
+        filePath = main.ONOSbench.home + "/tools/test/topos/"
+        main.ONOSbench.scp( main.Mininet1,
+                            filePath + topoName,
+                            main.Mininet1.home,
+                            direction="to" )
+        mnResult = main.Mininet1.startNet( )
+        utilities.assert_equals( expect=main.TRUE, actual=mnResult,
+                                 onpass="Mininet Started",
+                                 onfail="Error starting Mininet" )
+
+        main.step( "Git checkout and pull " + gitBranch )
+        if PULLCODE:
+            main.ONOSbench.gitCheckout( gitBranch )
+            gitPullResult = main.ONOSbench.gitPull()
+            # values of 1 or 3 are good
+            utilities.assert_lesser( expect=0, actual=gitPullResult,
+                                      onpass="Git pull successful",
+                                      onfail="Git pull failed" )
+        main.ONOSbench.getVersion( report=True )
+
+        main.step( "Using mvn clean install" )
+        cleanInstallResult = main.TRUE
+        if PULLCODE and gitPullResult == main.TRUE:
+            cleanInstallResult = main.ONOSbench.cleanInstall()
+        else:
+            main.log.warn( "Did not pull new code so skipping mvn " +
+                           "clean install" )
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=cleanInstallResult,
+                                 onpass="MCI successful",
+                                 onfail="MCI failed" )
+        # GRAPHS
+        # NOTE: important params here:
+        #       job = name of Jenkins job
+        #       Plot Name = Plot-HA, only can be used if multiple plots
+        #       index = The number of the graph under plot name
+        job = "HAfullNetPartition"
+        plotName = "Plot-HA"
+        index = "0"
+        graphs = '<ac:structured-macro ac:name="html">\n'
+        graphs += '<ac:plain-text-body><![CDATA[\n'
+        graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
+                  '/plot/' + plotName + '/getPlot?index=' + index +\
+                  '&width=500&height=300"' +\
+                  'noborder="0" width="500" height="300" scrolling="yes" ' +\
+                  'seamless="seamless"></iframe>\n'
+        graphs += ']]></ac:plain-text-body>\n'
+        graphs += '</ac:structured-macro>\n'
+        main.log.wiki(graphs)
+
+        main.step( "Creating ONOS package" )
+        # copy gen-partions file to ONOS
+        # NOTE: this assumes TestON and ONOS are on the same machine
+        srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
+        dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
+        cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
+                                              main.ONOSbench.ip_address,
+                                              srcFile,
+                                              dstDir,
+                                              pwd=main.ONOSbench.pwd,
+                                              direction="from" )
+        packageResult = main.ONOSbench.onosPackage()
+        utilities.assert_equals( expect=main.TRUE, actual=packageResult,
+                                 onpass="ONOS package successful",
+                                 onfail="ONOS package failed" )
+
+        main.step( "Installing ONOS package" )
+        onosInstallResult = main.TRUE
+        for node in main.nodes:
+            tmpResult = main.ONOSbench.onosInstall( options="-f",
+                                                    node=node.ip_address )
+            onosInstallResult = onosInstallResult and tmpResult
+        utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
+                                 onpass="ONOS install successful",
+                                 onfail="ONOS install failed" )
+        # clean up gen-partitions file
+        try:
+            main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
+            main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
+            main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
+            main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
+            main.log.info( " Cleaning custom gen partitions file, response was: \n" +
+                           str( main.ONOSbench.handle.before ) )
+        except ( pexpect.TIMEOUT, pexpect.EOF ):
+            main.log.exception( "ONOSbench: pexpect exception found:" +
+                                main.ONOSbench.handle.before )
+            main.cleanup()
+            main.exit()
+
+        main.step( "Checking if ONOS is up yet" )
+        for i in range( 2 ):
+            onosIsupResult = main.TRUE
+            for node in main.nodes:
+                started = main.ONOSbench.isup( node.ip_address )
+                if not started:
+                    main.log.error( node.name + " hasn't started" )
+                onosIsupResult = onosIsupResult and started
+            if onosIsupResult == main.TRUE:
+                break
+        utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
+                                 onpass="ONOS startup successful",
+                                 onfail="ONOS startup failed" )
+
+        main.log.step( "Starting ONOS CLI sessions" )
+        cliResults = main.TRUE
+        threads = []
+        for i in range( main.numCtrls ):
+            t = main.Thread( target=main.CLIs[i].startOnosCli,
+                             name="startOnosCli-" + str( i ),
+                             args=[main.nodes[i].ip_address] )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            cliResults = cliResults and t.result
+        utilities.assert_equals( expect=main.TRUE, actual=cliResults,
+                                 onpass="ONOS cli startup successful",
+                                 onfail="ONOS cli startup failed" )
+
+        # Create a list of active nodes for use when some nodes are stopped
+        main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
+
+        if main.params[ 'tcpdump' ].lower() == "true":
+            main.step( "Start Packet Capture MN" )
+            main.Mininet2.startTcpdump(
+                str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
+                + "-MN.pcap",
+                intf=main.params[ 'MNtcpdump' ][ 'intf' ],
+                port=main.params[ 'MNtcpdump' ][ 'port' ] )
+
+        main.step( "App Ids check" )
+        time.sleep(60)
+        appCheck = main.TRUE
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+                             name="appToIDCheck-" + str( i ),
+                             args=[] )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            appCheck = appCheck and t.result
+        if appCheck != main.TRUE:
+            node = main.activeNodes[0]
+            main.log.warn( main.CLIs[node].apps() )
+            main.log.warn( main.CLIs[node].appIDs() )
+        utilities.assert_equals( expect=main.TRUE, actual=appCheck,
+                                 onpass="App Ids seem to be correct",
+                                 onfail="Something is wrong with app Ids" )
+
+        if cliResults == main.FALSE:
+            main.log.error( "Failed to start ONOS, stopping test" )
+            main.cleanup()
+            main.exit()
+
+    def CASE2( self, main ):
+        """
+        Assign devices to controllers
+        """
+        import re
+        assert main.numCtrls, "main.numCtrls not defined"
+        assert main, "main not defined"
+        assert utilities.assert_equals, "utilities.assert_equals not defined"
+        assert main.CLIs, "main.CLIs not defined"
+        assert main.nodes, "main.nodes not defined"
+        assert ONOS1Port, "ONOS1Port not defined"
+        assert ONOS2Port, "ONOS2Port not defined"
+        assert ONOS3Port, "ONOS3Port not defined"
+        assert ONOS4Port, "ONOS4Port not defined"
+        assert ONOS5Port, "ONOS5Port not defined"
+        assert ONOS6Port, "ONOS6Port not defined"
+        assert ONOS7Port, "ONOS7Port not defined"
+
+        main.case( "Assigning devices to controllers" )
+        main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
+                                "and check that an ONOS node becomes the " +\
+                                "master of the device."
+        main.step( "Assign switches to controllers" )
+
+        ipList = []
+        for i in range( main.numCtrls ):
+            ipList.append( main.nodes[ i ].ip_address )
+        swList = []
+        for i in range( 1, 29 ):
+            swList.append( "s" + str( i ) )
+        main.Mininet1.assignSwController( sw=swList, ip=ipList )
+
+        mastershipCheck = main.TRUE
+        for i in range( 1, 29 ):
+            response = main.Mininet1.getSwController( "s" + str( i ) )
+            try:
+                main.log.info( str( response ) )
+            except Exception:
+                main.log.info( repr( response ) )
+            for node in main.nodes:
+                if re.search( "tcp:" + node.ip_address, response ):
+                    mastershipCheck = mastershipCheck and main.TRUE
+                else:
+                    main.log.error( "Error, node " + node.ip_address + " is " +
+                                    "not in the list of controllers s" +
+                                    str( i ) + " is connecting to." )
+                    mastershipCheck = main.FALSE
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=mastershipCheck,
+            onpass="Switch mastership assigned correctly",
+            onfail="Switches not assigned correctly to controllers" )
+
+    def CASE21( self, main ):
+        """
+        Assign mastership to controllers
+        """
+        import time
+        assert main.numCtrls, "main.numCtrls not defined"
+        assert main, "main not defined"
+        assert utilities.assert_equals, "utilities.assert_equals not defined"
+        assert main.CLIs, "main.CLIs not defined"
+        assert main.nodes, "main.nodes not defined"
+        assert ONOS1Port, "ONOS1Port not defined"
+        assert ONOS2Port, "ONOS2Port not defined"
+        assert ONOS3Port, "ONOS3Port not defined"
+        assert ONOS4Port, "ONOS4Port not defined"
+        assert ONOS5Port, "ONOS5Port not defined"
+        assert ONOS6Port, "ONOS6Port not defined"
+        assert ONOS7Port, "ONOS7Port not defined"
+
+        main.case( "Assigning Controller roles for switches" )
+        main.caseExplanation = "Check that ONOS is connected to each " +\
+                                "device. Then manually assign" +\
+                                " mastership to specific ONOS nodes using" +\
+                                " 'device-role'"
+        main.step( "Assign mastership of switches to specific controllers" )
+        # Manually assign mastership to the controller we want
+        roleCall = main.TRUE
+
+        ipList = [ ]
+        deviceList = []
+        onosCli = main.CLIs[ main.activeNodes[0] ]
+        try:
+            # Assign mastership to specific controllers. This assignment was
+            # determined for a 7 node cluser, but will work with any sized
+            # cluster
+            for i in range( 1, 29 ):  # switches 1 through 28
+                # set up correct variables:
+                if i == 1:
+                    c = 0
+                    ip = main.nodes[ c ].ip_address  # ONOS1
+                    deviceId = onosCli.getDevice( "1000" ).get( 'id' )
+                elif i == 2:
+                    c = 1 % main.numCtrls
+                    ip = main.nodes[ c ].ip_address  # ONOS2
+                    deviceId = onosCli.getDevice( "2000" ).get( 'id' )
+                elif i == 3:
+                    c = 1 % main.numCtrls
+                    ip = main.nodes[ c ].ip_address  # ONOS2
+                    deviceId = onosCli.getDevice( "3000" ).get( 'id' )
+                elif i == 4:
+                    c = 3 % main.numCtrls
+                    ip = main.nodes[ c ].ip_address  # ONOS4
+                    deviceId = onosCli.getDevice( "3004" ).get( 'id' )
+                elif i == 5:
+                    c = 2 % main.numCtrls
+                    ip = main.nodes[ c ].ip_address  # ONOS3
+                    deviceId = onosCli.getDevice( "5000" ).get( 'id' )
+                elif i == 6:
+                    c = 2 % main.numCtrls
+                    ip = main.nodes[ c ].ip_address  # ONOS3
+                    deviceId = onosCli.getDevice( "6000" ).get( 'id' )
+                elif i == 7:
+                    c = 5 % main.numCtrls
+                    ip = main.nodes[ c ].ip_address  # ONOS6
+                    deviceId = onosCli.getDevice( "6007" ).get( 'id' )
+                elif i >= 8 and i <= 17:
+                    c = 4 % main.numCtrls
+                    ip = main.nodes[ c ].ip_address  # ONOS5
+                    dpid = '3' + str( i ).zfill( 3 )
+                    deviceId = onosCli.getDevice( dpid ).get( 'id' )
+                elif i >= 18 and i <= 27:
+                    c = 6 % main.numCtrls
+                    ip = main.nodes[ c ].ip_address  # ONOS7
+                    dpid = '6' + str( i ).zfill( 3 )
+                    deviceId = onosCli.getDevice( dpid ).get( 'id' )
+                elif i == 28:
+                    c = 0
+                    ip = main.nodes[ c ].ip_address  # ONOS1
+                    deviceId = onosCli.getDevice( "2800" ).get( 'id' )
+                else:
+                    main.log.error( "You didn't write an else statement for " +
+                                    "switch s" + str( i ) )
+                    roleCall = main.FALSE
+                # Assign switch
+                assert deviceId, "No device id for s" + str( i ) + " in ONOS"
+                # TODO: make this controller dynamic
+                roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
+                ipList.append( ip )
+                deviceList.append( deviceId )
+        except ( AttributeError, AssertionError ):
+            main.log.exception( "Something is wrong with ONOS device view" )
+            main.log.info( onosCli.devices() )
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=roleCall,
+            onpass="Re-assigned switch mastership to designated controller",
+            onfail="Something wrong with deviceRole calls" )
+
+        main.step( "Check mastership was correctly assigned" )
+        roleCheck = main.TRUE
+        # NOTE: This is due to the fact that device mastership change is not
+        #       atomic and is actually a multi step process
+        time.sleep( 5 )
+        for i in range( len( ipList ) ):
+            ip = ipList[i]
+            deviceId = deviceList[i]
+            # Check assignment
+            master = onosCli.getRole( deviceId ).get( 'master' )
+            if ip in master:
+                roleCheck = roleCheck and main.TRUE
+            else:
+                roleCheck = roleCheck and main.FALSE
+                main.log.error( "Error, controller " + ip + " is not" +
+                                " master " + "of device " +
+                                str( deviceId ) + ". Master is " +
+                                repr( master ) + "." )
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=roleCheck,
+            onpass="Switches were successfully reassigned to designated " +
+                   "controller",
+            onfail="Switches were not successfully reassigned" )
+
+    def CASE3( self, main ):
+        """
+        Assign intents
+        """
+        import time
+        import json
+        assert main.numCtrls, "main.numCtrls not defined"
+        assert main, "main not defined"
+        assert utilities.assert_equals, "utilities.assert_equals not defined"
+        assert main.CLIs, "main.CLIs not defined"
+        assert main.nodes, "main.nodes not defined"
+        main.case( "Adding host Intents" )
+        main.caseExplanation = "Discover hosts by using pingall then " +\
+                                "assign predetermined host-to-host intents." +\
+                                " After installation, check that the intent" +\
+                                " is distributed to all nodes and the state" +\
+                                " is INSTALLED"
+
+        # install onos-app-fwd
+        main.step( "Install reactive forwarding app" )
+        onosCli = main.CLIs[ main.activeNodes[0] ]
+        installResults = onosCli.activateApp( "org.onosproject.fwd" )
+        utilities.assert_equals( expect=main.TRUE, actual=installResults,
+                                 onpass="Install fwd successful",
+                                 onfail="Install fwd failed" )
+
+        main.step( "Check app ids" )
+        appCheck = main.TRUE
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+                             name="appToIDCheck-" + str( i ),
+                             args=[] )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            appCheck = appCheck and t.result
+        if appCheck != main.TRUE:
+            main.log.warn( onosCli.apps() )
+            main.log.warn( onosCli.appIDs() )
+        utilities.assert_equals( expect=main.TRUE, actual=appCheck,
+                                 onpass="App Ids seem to be correct",
+                                 onfail="Something is wrong with app Ids" )
+
+        main.step( "Discovering Hosts( Via pingall for now )" )
+        # FIXME: Once we have a host discovery mechanism, use that instead
+        # REACTIVE FWD test
+        pingResult = main.FALSE
+        passMsg = "Reactive Pingall test passed"
+        time1 = time.time()
+        pingResult = main.Mininet1.pingall()
+        time2 = time.time()
+        if not pingResult:
+            main.log.warn("First pingall failed. Trying again...")
+            pingResult = main.Mininet1.pingall()
+            passMsg += " on the second try"
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=pingResult,
+            onpass= passMsg,
+            onfail="Reactive Pingall failed, " +
+                   "one or more ping pairs failed" )
+        main.log.info( "Time for pingall: %2f seconds" %
+                       ( time2 - time1 ) )
+        # timeout for fwd flows
+        time.sleep( 11 )
+        # uninstall onos-app-fwd
+        main.step( "Uninstall reactive forwarding app" )
+        node = main.activeNodes[0]
+        uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
+        utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
+                                 onpass="Uninstall fwd successful",
+                                 onfail="Uninstall fwd failed" )
+
+        main.step( "Check app ids" )
+        threads = []
+        appCheck2 = main.TRUE
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].appToIDCheck,
+                             name="appToIDCheck-" + str( i ),
+                             args=[] )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            appCheck2 = appCheck2 and t.result
+        if appCheck2 != main.TRUE:
+            node = main.activeNodes[0]
+            main.log.warn( main.CLIs[node].apps() )
+            main.log.warn( main.CLIs[node].appIDs() )
+        utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
+                                 onpass="App Ids seem to be correct",
+                                 onfail="Something is wrong with app Ids" )
+
+        main.step( "Add host intents via cli" )
+        intentIds = []
+        # TODO: move the host numbers to params
+        #       Maybe look at all the paths we ping?
+        intentAddResult = True
+        hostResult = main.TRUE
+        for i in range( 8, 18 ):
+            main.log.info( "Adding host intent between h" + str( i ) +
+                           " and h" + str( i + 10 ) )
+            host1 = "00:00:00:00:00:" + \
+                str( hex( i )[ 2: ] ).zfill( 2 ).upper()
+            host2 = "00:00:00:00:00:" + \
+                str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
+            # NOTE: getHost can return None
+            host1Dict = onosCli.getHost( host1 )
+            host2Dict = onosCli.getHost( host2 )
+            host1Id = None
+            host2Id = None
+            if host1Dict and host2Dict:
+                host1Id = host1Dict.get( 'id', None )
+                host2Id = host2Dict.get( 'id', None )
+            if host1Id and host2Id:
+                nodeNum = ( i % len( main.activeNodes ) )
+                node = main.activeNodes[nodeNum]
+                tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
+                if tmpId:
+                    main.log.info( "Added intent with id: " + tmpId )
+                    intentIds.append( tmpId )
+                else:
+                    main.log.error( "addHostIntent returned: " +
+                                     repr( tmpId ) )
+            else:
+                main.log.error( "Error, getHost() failed for h" + str( i ) +
+                                " and/or h" + str( i + 10 ) )
+                node = main.activeNodes[0]
+                hosts = main.CLIs[node].hosts()
+                main.log.warn( "Hosts output: " )
+                try:
+                    main.log.warn( json.dumps( json.loads( hosts ),
+                                               sort_keys=True,
+                                               indent=4,
+                                               separators=( ',', ': ' ) ) )
+                except ( ValueError, TypeError ):
+                    main.log.warn( repr( hosts ) )
+                hostResult = main.FALSE
+        utilities.assert_equals( expect=main.TRUE, actual=hostResult,
+                                 onpass="Found a host id for each host",
+                                 onfail="Error looking up host ids" )
+
+        intentStart = time.time()
+        onosIds = onosCli.getAllIntentsId()
+        main.log.info( "Submitted intents: " + str( intentIds ) )
+        main.log.info( "Intents in ONOS: " + str( onosIds ) )
+        for intent in intentIds:
+            if intent in onosIds:
+                pass  # intent submitted is in onos
+            else:
+                intentAddResult = False
+        if intentAddResult:
+            intentStop = time.time()
+        else:
+            intentStop = None
+        # Print the intent states
+        intents = onosCli.intents()
+        intentStates = []
+        installedCheck = True
+        main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+        count = 0
+        try:
+            for intent in json.loads( intents ):
+                state = intent.get( 'state', None )
+                if "INSTALLED" not in state:
+                    installedCheck = False
+                intentId = intent.get( 'id', None )
+                intentStates.append( ( intentId, state ) )
+        except ( ValueError, TypeError ):
+            main.log.exception( "Error parsing intents" )
+        # add submitted intents not in the store
+        tmplist = [ i for i, s in intentStates ]
+        missingIntents = False
+        for i in intentIds:
+            if i not in tmplist:
+                intentStates.append( ( i, " - " ) )
+                missingIntents = True
+        intentStates.sort()
+        for i, s in intentStates:
+            count += 1
+            main.log.info( "%-6s%-15s%-15s" %
+                           ( str( count ), str( i ), str( s ) ) )
+        leaders = onosCli.leaders()
+        try:
+            missing = False
+            if leaders:
+                parsedLeaders = json.loads( leaders )
+                main.log.warn( json.dumps( parsedLeaders,
+                                           sort_keys=True,
+                                           indent=4,
+                                           separators=( ',', ': ' ) ) )
+                # check for all intent partitions
+                topics = []
+                for i in range( 14 ):
+                    topics.append( "intent-partition-" + str( i ) )
+                main.log.debug( topics )
+                ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                for topic in topics:
+                    if topic not in ONOStopics:
+                        main.log.error( "Error: " + topic +
+                                        " not in leaders" )
+                        missing = True
+            else:
+                main.log.error( "leaders() returned None" )
+        except ( ValueError, TypeError ):
+            main.log.exception( "Error parsing leaders" )
+            main.log.error( repr( leaders ) )
+        # Check all nodes
+        if missing:
+            for i in main.activeNodes:
+                response = main.CLIs[i].leaders( jsonFormat=False)
+                main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
+                               str( response ) )
+
+        partitions = onosCli.partitions()
+        try:
+            if partitions :
+                parsedPartitions = json.loads( partitions )
+                main.log.warn( json.dumps( parsedPartitions,
+                                           sort_keys=True,
+                                           indent=4,
+                                           separators=( ',', ': ' ) ) )
+                # TODO check for a leader in all paritions
+                # TODO check for consistency among nodes
+            else:
+                main.log.error( "partitions() returned None" )
+        except ( ValueError, TypeError ):
+            main.log.exception( "Error parsing partitions" )
+            main.log.error( repr( partitions ) )
+        pendingMap = onosCli.pendingMap()
+        try:
+            if pendingMap :
+                parsedPending = json.loads( pendingMap )
+                main.log.warn( json.dumps( parsedPending,
+                                           sort_keys=True,
+                                           indent=4,
+                                           separators=( ',', ': ' ) ) )
+                # TODO check something here?
+            else:
+                main.log.error( "pendingMap() returned None" )
+        except ( ValueError, TypeError ):
+            main.log.exception( "Error parsing pending map" )
+            main.log.error( repr( pendingMap ) )
+
+        intentAddResult = bool( intentAddResult and not missingIntents and
+                                installedCheck )
+        if not intentAddResult:
+            main.log.error( "Error in pushing host intents to ONOS" )
+
+        main.step( "Intent Anti-Entropy dispersion" )
+        for j in range(100):
+            correct = True
+            main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
+            for i in main.activeNodes:
+                onosIds = []
+                ids = main.CLIs[i].getAllIntentsId()
+                onosIds.append( ids )
+                main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
+                                str( sorted( onosIds ) ) )
+                if sorted( ids ) != sorted( intentIds ):
+                    main.log.warn( "Set of intent IDs doesn't match" )
+                    correct = False
+                    break
+                else:
+                    intents = json.loads( main.CLIs[i].intents() )
+                    for intent in intents:
+                        if intent[ 'state' ] != "INSTALLED":
+                            main.log.warn( "Intent " + intent[ 'id' ] +
+                                           " is " + intent[ 'state' ] )
+                            correct = False
+                            break
+            if correct:
+                break
+            else:
+                time.sleep(1)
+        if not intentStop:
+            intentStop = time.time()
+        global gossipTime
+        gossipTime = intentStop - intentStart
+        main.log.info( "It took about " + str( gossipTime ) +
+                        " seconds for all intents to appear in each node" )
+        gossipPeriod = int( main.params['timers']['gossip'] )
+        maxGossipTime = gossipPeriod * len( main.activeNodes )
+        utilities.assert_greater_equals(
+                expect=maxGossipTime, actual=gossipTime,
+                onpass="ECM anti-entropy for intents worked within " +
+                       "expected time",
+                onfail="Intent ECM anti-entropy took too long. " +
+                       "Expected time:{}, Actual time:{}".format( maxGossipTime,
+                                                                  gossipTime ) )
+        if gossipTime <= maxGossipTime:
+            intentAddResult = True
+
+        if not intentAddResult or "key" in pendingMap:
+            import time
+            installedCheck = True
+            main.log.info( "Sleeping 60 seconds to see if intents are found" )
+            time.sleep( 60 )
+            onosIds = onosCli.getAllIntentsId()
+            main.log.info( "Submitted intents: " + str( intentIds ) )
+            main.log.info( "Intents in ONOS: " + str( onosIds ) )
+            # Print the intent states
+            intents = onosCli.intents()
+            intentStates = []
+            main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+            count = 0
+            try:
+                for intent in json.loads( intents ):
+                    # Iter through intents of a node
+                    state = intent.get( 'state', None )
+                    if "INSTALLED" not in state:
+                        installedCheck = False
+                    intentId = intent.get( 'id', None )
+                    intentStates.append( ( intentId, state ) )
+            except ( ValueError, TypeError ):
+                main.log.exception( "Error parsing intents" )
+            # add submitted intents not in the store
+            tmplist = [ i for i, s in intentStates ]
+            for i in intentIds:
+                if i not in tmplist:
+                    intentStates.append( ( i, " - " ) )
+            intentStates.sort()
+            for i, s in intentStates:
+                count += 1
+                main.log.info( "%-6s%-15s%-15s" %
+                               ( str( count ), str( i ), str( s ) ) )
+            leaders = onosCli.leaders()
+            try:
+                missing = False
+                if leaders:
+                    parsedLeaders = json.loads( leaders )
+                    main.log.warn( json.dumps( parsedLeaders,
+                                               sort_keys=True,
+                                               indent=4,
+                                               separators=( ',', ': ' ) ) )
+                    # check for all intent partitions
+                    # check for election
+                    topics = []
+                    for i in range( 14 ):
+                        topics.append( "intent-partition-" + str( i ) )
+                    # FIXME: this should only be after we start the app
+                    topics.append( "org.onosproject.election" )
+                    main.log.debug( topics )
+                    ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                    for topic in topics:
+                        if topic not in ONOStopics:
+                            main.log.error( "Error: " + topic +
+                                            " not in leaders" )
+                            missing = True
+                else:
+                    main.log.error( "leaders() returned None" )
+            except ( ValueError, TypeError ):
+                main.log.exception( "Error parsing leaders" )
+                main.log.error( repr( leaders ) )
+            # Check all nodes
+            if missing:
+                for i in main.activeNodes:
+                    node = main.CLIs[i]
+                    response = node.leaders( jsonFormat=False)
+                    main.log.warn( str( node.name ) + " leaders output: \n" +
+                                   str( response ) )
+
+            partitions = onosCli.partitions()
+            try:
+                if partitions :
+                    parsedPartitions = json.loads( partitions )
+                    main.log.warn( json.dumps( parsedPartitions,
+                                               sort_keys=True,
+                                               indent=4,
+                                               separators=( ',', ': ' ) ) )
+                    # TODO check for a leader in all paritions
+                    # TODO check for consistency among nodes
+                else:
+                    main.log.error( "partitions() returned None" )
+            except ( ValueError, TypeError ):
+                main.log.exception( "Error parsing partitions" )
+                main.log.error( repr( partitions ) )
+            pendingMap = onosCli.pendingMap()
+            try:
+                if pendingMap :
+                    parsedPending = json.loads( pendingMap )
+                    main.log.warn( json.dumps( parsedPending,
+                                               sort_keys=True,
+                                               indent=4,
+                                               separators=( ',', ': ' ) ) )
+                    # TODO check something here?
+                else:
+                    main.log.error( "pendingMap() returned None" )
+            except ( ValueError, TypeError ):
+                main.log.exception( "Error parsing pending map" )
+                main.log.error( repr( pendingMap ) )
+
+    def CASE4( self, main ):
+        """
+        Ping across added host intents
+        """
+        import json
+        import time
+        assert main.numCtrls, "main.numCtrls not defined"
+        assert main, "main not defined"
+        assert utilities.assert_equals, "utilities.assert_equals not defined"
+        assert main.CLIs, "main.CLIs not defined"
+        assert main.nodes, "main.nodes not defined"
+        main.case( "Verify connectivity by sending traffic across Intents" )
+        main.caseExplanation = "Ping across added host intents to check " +\
+                                "functionality and check the state of " +\
+                                "the intent"
+        main.step( "Ping across added host intents" )
+        onosCli = main.CLIs[ main.activeNodes[0] ]
+        PingResult = main.TRUE
+        for i in range( 8, 18 ):
+            ping = main.Mininet1.pingHost( src="h" + str( i ),
+                                           target="h" + str( i + 10 ) )
+            PingResult = PingResult and ping
+            if ping == main.FALSE:
+                main.log.warn( "Ping failed between h" + str( i ) +
+                               " and h" + str( i + 10 ) )
+            elif ping == main.TRUE:
+                main.log.info( "Ping test passed!" )
+                # Don't set PingResult or you'd override failures
+        if PingResult == main.FALSE:
+            main.log.error(
+                "Intents have not been installed correctly, pings failed." )
+            # TODO: pretty print
+            main.log.warn( "ONOS1 intents: " )
+            try:
+                tmpIntents = onosCli.intents()
+                main.log.warn( json.dumps( json.loads( tmpIntents ),
+                                           sort_keys=True,
+                                           indent=4,
+                                           separators=( ',', ': ' ) ) )
+            except ( ValueError, TypeError ):
+                main.log.warn( repr( tmpIntents ) )
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=PingResult,
+            onpass="Intents have been installed correctly and pings work",
+            onfail="Intents have not been installed correctly, pings failed." )
+
+        main.step( "Check Intent state" )
+        installedCheck = False
+        loopCount = 0
+        while not installedCheck and loopCount < 40:
+            installedCheck = True
+            # Print the intent states
+            intents = onosCli.intents()
+            intentStates = []
+            main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+            count = 0
+            # Iter through intents of a node
+            try:
+                for intent in json.loads( intents ):
+                    state = intent.get( 'state', None )
+                    if "INSTALLED" not in state:
+                        installedCheck = False
+                    intentId = intent.get( 'id', None )
+                    intentStates.append( ( intentId, state ) )
+            except ( ValueError, TypeError ):
+                main.log.exception( "Error parsing intents." )
+            # Print states
+            intentStates.sort()
+            for i, s in intentStates:
+                count += 1
+                main.log.info( "%-6s%-15s%-15s" %
+                               ( str( count ), str( i ), str( s ) ) )
+            if not installedCheck:
+                time.sleep( 1 )
+                loopCount += 1
+        utilities.assert_equals( expect=True, actual=installedCheck,
+                                 onpass="Intents are all INSTALLED",
+                                 onfail="Intents are not all in " +
+                                        "INSTALLED state" )
+
+        main.step( "Check leadership of topics" )
+        leaders = onosCli.leaders()
+        topicCheck = main.TRUE
+        try:
+            if leaders:
+                parsedLeaders = json.loads( leaders )
+                main.log.warn( json.dumps( parsedLeaders,
+                                           sort_keys=True,
+                                           indent=4,
+                                           separators=( ',', ': ' ) ) )
+                # check for all intent partitions
+                # check for election
+                # TODO: Look at Devices as topics now that it uses this system
+                topics = []
+                for i in range( 14 ):
+                    topics.append( "intent-partition-" + str( i ) )
+                # FIXME: this should only be after we start the app
+                # FIXME: topics.append( "org.onosproject.election" )
+                # Print leaders output
+                main.log.debug( topics )
+                ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                for topic in topics:
+                    if topic not in ONOStopics:
+                        main.log.error( "Error: " + topic +
+                                        " not in leaders" )
+                        topicCheck = main.FALSE
+            else:
+                main.log.error( "leaders() returned None" )
+                topicCheck = main.FALSE
+        except ( ValueError, TypeError ):
+            topicCheck = main.FALSE
+            main.log.exception( "Error parsing leaders" )
+            main.log.error( repr( leaders ) )
+            # TODO: Check for a leader of these topics
+        # Check all nodes
+        if topicCheck:
+            for i in main.activeNodes:
+                node = main.CLIs[i]
+                response = node.leaders( jsonFormat=False)
+                main.log.warn( str( node.name ) + " leaders output: \n" +
+                               str( response ) )
+
+        utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
+                                 onpass="intent Partitions is in leaders",
+                                 onfail="Some topics were lost " )
+        # Print partitions
+        partitions = onosCli.partitions()
+        try:
+            if partitions :
+                parsedPartitions = json.loads( partitions )
+                main.log.warn( json.dumps( parsedPartitions,
+                                           sort_keys=True,
+                                           indent=4,
+                                           separators=( ',', ': ' ) ) )
+                # TODO check for a leader in all paritions
+                # TODO check for consistency among nodes
+            else:
+                main.log.error( "partitions() returned None" )
+        except ( ValueError, TypeError ):
+            main.log.exception( "Error parsing partitions" )
+            main.log.error( repr( partitions ) )
+        # Print Pending Map
+        pendingMap = onosCli.pendingMap()
+        try:
+            if pendingMap :
+                parsedPending = json.loads( pendingMap )
+                main.log.warn( json.dumps( parsedPending,
+                                           sort_keys=True,
+                                           indent=4,
+                                           separators=( ',', ': ' ) ) )
+                # TODO check something here?
+            else:
+                main.log.error( "pendingMap() returned None" )
+        except ( ValueError, TypeError ):
+            main.log.exception( "Error parsing pending map" )
+            main.log.error( repr( pendingMap ) )
+
+        if not installedCheck:
+            main.log.info( "Waiting 60 seconds to see if the state of " +
+                           "intents change" )
+            time.sleep( 60 )
+            # Print the intent states
+            intents = onosCli.intents()
+            intentStates = []
+            main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
+            count = 0
+            # Iter through intents of a node
+            try:
+                for intent in json.loads( intents ):
+                    state = intent.get( 'state', None )
+                    if "INSTALLED" not in state:
+                        installedCheck = False
+                    intentId = intent.get( 'id', None )
+                    intentStates.append( ( intentId, state ) )
+            except ( ValueError, TypeError ):
+                main.log.exception( "Error parsing intents." )
+            intentStates.sort()
+            for i, s in intentStates:
+                count += 1
+                main.log.info( "%-6s%-15s%-15s" %
+                               ( str( count ), str( i ), str( s ) ) )
+            leaders = onosCli.leaders()
+            try:
+                missing = False
+                if leaders:
+                    parsedLeaders = json.loads( leaders )
+                    main.log.warn( json.dumps( parsedLeaders,
+                                               sort_keys=True,
+                                               indent=4,
+                                               separators=( ',', ': ' ) ) )
+                    # check for all intent partitions
+                    # check for election
+                    topics = []
+                    for i in range( 14 ):
+                        topics.append( "intent-partition-" + str( i ) )
+                    # FIXME: this should only be after we start the app
+                    topics.append( "org.onosproject.election" )
+                    main.log.debug( topics )
+                    ONOStopics = [ j['topic'] for j in parsedLeaders ]
+                    for topic in topics:
+                        if topic not in ONOStopics:
+                            main.log.error( "Error: " + topic +
+                                            " not in leaders" )
+                            missing = True
+                else:
+                    main.log.error( "leaders() returned None" )
+            except ( ValueError, TypeError ):
+                main.log.exception( "Error parsing leaders" )
+                main.log.error( repr( leaders ) )
+            if missing:
+                for i in main.activeNodes:
+                    node = main.CLIs[i]
+                    response = node.leaders( jsonFormat=False)
+                    main.log.warn( str( node.name ) + " leaders output: \n" +
+                                   str( response ) )
+
+            partitions = onosCli.partitions()
+            try:
+                if partitions :
+                    parsedPartitions = json.loads( partitions )
+                    main.log.warn( json.dumps( parsedPartitions,
+                                               sort_keys=True,
+                                               indent=4,
+                                               separators=( ',', ': ' ) ) )
+                    # TODO check for a leader in all paritions
+                    # TODO check for consistency among nodes
+                else:
+                    main.log.error( "partitions() returned None" )
+            except ( ValueError, TypeError ):
+                main.log.exception( "Error parsing partitions" )
+                main.log.error( repr( partitions ) )
+            pendingMap = onosCli.pendingMap()
+            try:
+                if pendingMap :
+                    parsedPending = json.loads( pendingMap )
+                    main.log.warn( json.dumps( parsedPending,
+                                               sort_keys=True,
+                                               indent=4,
+                                               separators=( ',', ': ' ) ) )
+                    # TODO check something here?
+                else:
+                    main.log.error( "pendingMap() returned None" )
+            except ( ValueError, TypeError ):
+                main.log.exception( "Error parsing pending map" )
+                main.log.error( repr( pendingMap ) )
+        # Print flowrules
+        node = main.activeNodes[0]
+        main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
+        main.step( "Wait a minute then ping again" )
+        # the wait is above
+        PingResult = main.TRUE
+        for i in range( 8, 18 ):
+            ping = main.Mininet1.pingHost( src="h" + str( i ),
+                                           target="h" + str( i + 10 ) )
+            PingResult = PingResult and ping
+            if ping == main.FALSE:
+                main.log.warn( "Ping failed between h" + str( i ) +
+                               " and h" + str( i + 10 ) )
+            elif ping == main.TRUE:
+                main.log.info( "Ping test passed!" )
+                # Don't set PingResult or you'd override failures
+        if PingResult == main.FALSE:
+            main.log.error(
+                "Intents have not been installed correctly, pings failed." )
+            # TODO: pretty print
+            main.log.warn( "ONOS1 intents: " )
+            try:
+                tmpIntents = onosCli.intents()
+                main.log.warn( json.dumps( json.loads( tmpIntents ),
+                                           sort_keys=True,
+                                           indent=4,
+                                           separators=( ',', ': ' ) ) )
+            except ( ValueError, TypeError ):
+                main.log.warn( repr( tmpIntents ) )
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=PingResult,
+            onpass="Intents have been installed correctly and pings work",
+            onfail="Intents have not been installed correctly, pings failed." )
+
+    def CASE5( self, main ):
+        """
+        Reading state of ONOS
+        """
+        import json
+        import time
+        assert main.numCtrls, "main.numCtrls not defined"
+        assert main, "main not defined"
+        assert utilities.assert_equals, "utilities.assert_equals not defined"
+        assert main.CLIs, "main.CLIs not defined"
+        assert main.nodes, "main.nodes not defined"
+
+        main.case( "Setting up and gathering data for current state" )
+        # The general idea for this test case is to pull the state of
+        # ( intents,flows, topology,... ) from each ONOS node
+        # We can then compare them with each other and also with past states
+
+        main.step( "Check that each switch has a master" )
+        global mastershipState
+        mastershipState = '[]'
+
+        # Assert that each device has a master
+        rolesNotNull = main.TRUE
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].rolesNotNull,
+                             name="rolesNotNull-" + str( i ),
+                             args=[] )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            rolesNotNull = rolesNotNull and t.result
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=rolesNotNull,
+            onpass="Each device has a master",
+            onfail="Some devices don't have a master assigned" )
+
+        main.step( "Get the Mastership of each switch from each controller" )
+        ONOSMastership = []
+        mastershipCheck = main.FALSE
+        consistentMastership = True
+        rolesResults = True
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].roles,
+                             name="roles-" + str( i ),
+                             args=[] )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            ONOSMastership.append( t.result )
+
+        for i in range( len( ONOSMastership ) ):
+            node = str( main.activeNodes[i] + 1 )
+            if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
+                main.log.error( "Error in getting ONOS" + node + " roles" )
+                main.log.warn( "ONOS" + node + " mastership response: " +
+                               repr( ONOSMastership[i] ) )
+                rolesResults = False
+        utilities.assert_equals(
+            expect=True,
+            actual=rolesResults,
+            onpass="No error in reading roles output",
+            onfail="Error in reading roles from ONOS" )
+
+        main.step( "Check for consistency in roles from each controller" )
+        if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
+            main.log.info(
+                "Switch roles are consistent across all ONOS nodes" )
+        else:
+            consistentMastership = False
+        utilities.assert_equals(
+            expect=True,
+            actual=consistentMastership,
+            onpass="Switch roles are consistent across all ONOS nodes",
+            onfail="ONOS nodes have different views of switch roles" )
+
+        if rolesResults and not consistentMastership:
+            for i in range( len( main.activeNodes ) ):
+                node = str( main.activeNodes[i] + 1 )
+                try:
+                    main.log.warn(
+                        "ONOS" + node + " roles: ",
+                        json.dumps(
+                            json.loads( ONOSMastership[ i ] ),
+                            sort_keys=True,
+                            indent=4,
+                            separators=( ',', ': ' ) ) )
+                except ( ValueError, TypeError ):
+                    main.log.warn( repr( ONOSMastership[ i ] ) )
+        elif rolesResults and consistentMastership:
+            mastershipCheck = main.TRUE
+            mastershipState = ONOSMastership[ 0 ]
+
+        main.step( "Get the intents from each controller" )
+        global intentState
+        intentState = []
+        ONOSIntents = []
+        intentCheck = main.FALSE
+        consistentIntents = True
+        intentsResults = True
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].intents,
+                             name="intents-" + str( i ),
+                             args=[],
+                             kwargs={ 'jsonFormat': True } )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            ONOSIntents.append( t.result )
+
+        for i in range( len( ONOSIntents ) ):
+            node = str( main.activeNodes[i] + 1 )
+            if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
+                main.log.error( "Error in getting ONOS" + node + " intents" )
+                main.log.warn( "ONOS" + node + " intents response: " +
+                               repr( ONOSIntents[ i ] ) )
+                intentsResults = False
+        utilities.assert_equals(
+            expect=True,
+            actual=intentsResults,
+            onpass="No error in reading intents output",
+            onfail="Error in reading intents from ONOS" )
+
+        main.step( "Check for consistency in Intents from each controller" )
+        if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
+            main.log.info( "Intents are consistent across all ONOS " +
+                             "nodes" )
+        else:
+            consistentIntents = False
+            main.log.error( "Intents not consistent" )
+        utilities.assert_equals(
+            expect=True,
+            actual=consistentIntents,
+            onpass="Intents are consistent across all ONOS nodes",
+            onfail="ONOS nodes have different views of intents" )
+
+        if intentsResults:
+            # Try to make it easy to figure out what is happening
+            #
+            # Intent      ONOS1      ONOS2    ...
+            #  0x01     INSTALLED  INSTALLING
+            #  ...        ...         ...
+            #  ...        ...         ...
+            title = "   Id"
+            for n in main.activeNodes:
+                title += " " * 10 + "ONOS" + str( n + 1 )
+            main.log.warn( title )
+            # get all intent keys in the cluster
+            keys = []
+            for nodeStr in ONOSIntents:
+                node = json.loads( nodeStr )
+                for intent in node:
+                    keys.append( intent.get( 'id' ) )
+            keys = set( keys )
+            for key in keys:
+                row = "%-13s" % key
+                for nodeStr in ONOSIntents:
+                    node = json.loads( nodeStr )
+                    for intent in node:
+                        if intent.get( 'id', "Error" ) == key:
+                            row += "%-15s" % intent.get( 'state' )
+                main.log.warn( row )
+            # End table view
+
+        if intentsResults and not consistentIntents:
+            # print the json objects
+            n = str( main.activeNodes[-1] + 1 )
+            main.log.debug( "ONOS" + n + " intents: " )
+            main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
+                                        sort_keys=True,
+                                        indent=4,
+                                        separators=( ',', ': ' ) ) )
+            for i in range( len( ONOSIntents ) ):
+                node = str( main.activeNodes[i] + 1 )
+                if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
+                    main.log.debug( "ONOS" + node + " intents: " )
+                    main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
+                                                sort_keys=True,
+                                                indent=4,
+                                                separators=( ',', ': ' ) ) )
+                else:
+                    main.log.debug( "ONOS" + node + " intents match ONOS" +
+                                    n + " intents" )
+        elif intentsResults and consistentIntents:
+            intentCheck = main.TRUE
+            intentState = ONOSIntents[ 0 ]
+
+        main.step( "Get the flows from each controller" )
+        global flowState
+        flowState = []
+        ONOSFlows = []
+        ONOSFlowsJson = []
+        flowCheck = main.FALSE
+        consistentFlows = True
+        flowsResults = True
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].flows,
+                             name="flows-" + str( i ),
+                             args=[],
+                             kwargs={ 'jsonFormat': True } )
+            threads.append( t )
+            t.start()
+
+        # NOTE: Flows command can take some time to run
+        time.sleep(30)
+        for t in threads:
+            t.join()
+            result = t.result
+            ONOSFlows.append( result )
+
+        for i in range( len( ONOSFlows ) ):
+            num = str( main.activeNodes[i] + 1 )
+            if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
+                main.log.error( "Error in getting ONOS" + num + " flows" )
+                main.log.warn( "ONOS" + num + " flows response: " +
+                               repr( ONOSFlows[ i ] ) )
+                flowsResults = False
+                ONOSFlowsJson.append( None )
+            else:
+                try:
+                    ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
+                except ( ValueError, TypeError ):
+                    # FIXME: change this to log.error?
+                    main.log.exception( "Error in parsing ONOS" + num +
+                                        " response as json." )
+                    main.log.error( repr( ONOSFlows[ i ] ) )
+                    ONOSFlowsJson.append( None )
+                    flowsResults = False
+        utilities.assert_equals(
+            expect=True,
+            actual=flowsResults,
+            onpass="No error in reading flows output",
+            onfail="Error in reading flows from ONOS" )
+
+        main.step( "Check for consistency in Flows from each controller" )
+        tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
+        if all( tmp ):
+            main.log.info( "Flow count is consistent across all ONOS nodes" )
+        else:
+            consistentFlows = False
+        utilities.assert_equals(
+            expect=True,
+            actual=consistentFlows,
+            onpass="The flow count is consistent across all ONOS nodes",
+            onfail="ONOS nodes have different flow counts" )
+
+        if flowsResults and not consistentFlows:
+            for i in range( len( ONOSFlows ) ):
+                node = str( main.activeNodes[i] + 1 )
+                try:
+                    main.log.warn(
+                        "ONOS" + node + " flows: " +
+                        json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
+                                    indent=4, separators=( ',', ': ' ) ) )
+                except ( ValueError, TypeError ):
+                    main.log.warn( "ONOS" + node + " flows: " +
+                                   repr( ONOSFlows[ i ] ) )
+        elif flowsResults and consistentFlows:
+            flowCheck = main.TRUE
+            flowState = ONOSFlows[ 0 ]
+
+        main.step( "Get the OF Table entries" )
+        global flows
+        flows = []
+        for i in range( 1, 29 ):
+            flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
+        if flowCheck == main.FALSE:
+            for table in flows:
+                main.log.warn( table )
+        # TODO: Compare switch flow tables with ONOS flow tables
+
+        main.step( "Start continuous pings" )
+        main.Mininet2.pingLong(
+            src=main.params[ 'PING' ][ 'source1' ],
+            target=main.params[ 'PING' ][ 'target1' ],
+            pingTime=500 )
+        main.Mininet2.pingLong(
+            src=main.params[ 'PING' ][ 'source2' ],
+            target=main.params[ 'PING' ][ 'target2' ],
+            pingTime=500 )
+        main.Mininet2.pingLong(
+            src=main.params[ 'PING' ][ 'source3' ],
+            target=main.params[ 'PING' ][ 'target3' ],
+            pingTime=500 )
+        main.Mininet2.pingLong(
+            src=main.params[ 'PING' ][ 'source4' ],
+            target=main.params[ 'PING' ][ 'target4' ],
+            pingTime=500 )
+        main.Mininet2.pingLong(
+            src=main.params[ 'PING' ][ 'source5' ],
+            target=main.params[ 'PING' ][ 'target5' ],
+            pingTime=500 )
+        main.Mininet2.pingLong(
+            src=main.params[ 'PING' ][ 'source6' ],
+            target=main.params[ 'PING' ][ 'target6' ],
+            pingTime=500 )
+        main.Mininet2.pingLong(
+            src=main.params[ 'PING' ][ 'source7' ],
+            target=main.params[ 'PING' ][ 'target7' ],
+            pingTime=500 )
+        main.Mininet2.pingLong(
+            src=main.params[ 'PING' ][ 'source8' ],
+            target=main.params[ 'PING' ][ 'target8' ],
+            pingTime=500 )
+        main.Mininet2.pingLong(
+            src=main.params[ 'PING' ][ 'source9' ],
+            target=main.params[ 'PING' ][ 'target9' ],
+            pingTime=500 )
+        main.Mininet2.pingLong(
+            src=main.params[ 'PING' ][ 'source10' ],
+            target=main.params[ 'PING' ][ 'target10' ],
+            pingTime=500 )
+
+        main.step( "Collecting topology information from ONOS" )
+        devices = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].devices,
+                             name="devices-" + str( i ),
+                             args=[ ] )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            devices.append( t.result )
+        hosts = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].hosts,
+                             name="hosts-" + str( i ),
+                             args=[ ] )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            try:
+                hosts.append( json.loads( t.result ) )
+            except ( ValueError, TypeError ):
+                # FIXME: better handling of this, print which node
+                #        Maybe use thread name?
+                main.log.exception( "Error parsing json output of hosts" )
+                main.log.warn( repr( t.result ) )
+                hosts.append( None )
+
+        ports = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].ports,
+                             name="ports-" + str( i ),
+                             args=[ ] )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            ports.append( t.result )
+        links = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].links,
+                             name="links-" + str( i ),
+                             args=[ ] )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            links.append( t.result )
+        clusters = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].clusters,
+                             name="clusters-" + str( i ),
+                             args=[ ] )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            clusters.append( t.result )
+        # Compare json objects for hosts and dataplane clusters
+
+        # hosts
+        main.step( "Host view is consistent across ONOS nodes" )
+        consistentHostsResult = main.TRUE
+        for controller in range( len( hosts ) ):
+            controllerStr = str( main.activeNodes[controller] + 1 )
+            if hosts[ controller ] and "Error" not in hosts[ controller ]:
+                if hosts[ controller ] == hosts[ 0 ]:
+                    continue
+                else:  # hosts not consistent
+                    main.log.error( "hosts from ONOS" +
+                                     controllerStr +
+                                     " is inconsistent with ONOS1" )
+                    main.log.warn( repr( hosts[ controller ] ) )
+                    consistentHostsResult = main.FALSE
+
+            else:
+                main.log.error( "Error in getting ONOS hosts from ONOS" +
+                                 controllerStr )
+                consistentHostsResult = main.FALSE
+                main.log.warn( "ONOS" + controllerStr +
+                               " hosts response: " +
+                               repr( hosts[ controller ] ) )
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=consistentHostsResult,
+            onpass="Hosts view is consistent across all ONOS nodes",
+            onfail="ONOS nodes have different views of hosts" )
+
+        main.step( "Each host has an IP address" )
+        ipResult = main.TRUE
+        for controller in range( 0, len( hosts ) ):
+            controllerStr = str( main.activeNodes[controller] + 1 )
+            if hosts[ controller ]:
+                for host in hosts[ controller ]:
+                    if not host.get( 'ipAddresses', [ ] ):
+                        main.log.error( "Error with host ips on controller" +
+                                        controllerStr + ": " + str( host ) )
+                        ipResult = main.FALSE
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=ipResult,
+            onpass="The ips of the hosts aren't empty",
+            onfail="The ip of at least one host is missing" )
+
+        # Strongly connected clusters of devices
+        main.step( "Cluster view is consistent across ONOS nodes" )
+        consistentClustersResult = main.TRUE
+        for controller in range( len( clusters ) ):
+            controllerStr = str( main.activeNodes[controller] + 1 )
+            if "Error" not in clusters[ controller ]:
+                if clusters[ controller ] == clusters[ 0 ]:
+                    continue
+                else:  # clusters not consistent
+                    main.log.error( "clusters from ONOS" + controllerStr +
+                                     " is inconsistent with ONOS1" )
+                    consistentClustersResult = main.FALSE
+
+            else:
+                main.log.error( "Error in getting dataplane clusters " +
+                                 "from ONOS" + controllerStr )
+                consistentClustersResult = main.FALSE
+                main.log.warn( "ONOS" + controllerStr +
+                               " clusters response: " +
+                               repr( clusters[ controller ] ) )
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=consistentClustersResult,
+            onpass="Clusters view is consistent across all ONOS nodes",
+            onfail="ONOS nodes have different views of clusters" )
+        # there should always only be one cluster
+        main.step( "Cluster view correct across ONOS nodes" )
+        try:
+            numClusters = len( json.loads( clusters[ 0 ] ) )
+        except ( ValueError, TypeError ):
+            main.log.exception( "Error parsing clusters[0]: " +
+                                repr( clusters[ 0 ] ) )
+            numClusters = "ERROR"
+        clusterResults = main.FALSE
+        if numClusters == 1:
+            clusterResults = main.TRUE
+        utilities.assert_equals(
+            expect=1,
+            actual=numClusters,
+            onpass="ONOS shows 1 SCC",
+            onfail="ONOS shows " + str( numClusters ) + " SCCs" )
+
+        main.step( "Comparing ONOS topology to MN" )
+        devicesResults = main.TRUE
+        linksResults = main.TRUE
+        hostsResults = main.TRUE
+        mnSwitches = main.Mininet1.getSwitches()
+        mnLinks = main.Mininet1.getLinks()
+        mnHosts = main.Mininet1.getHosts()
+        for controller in main.activeNodes:
+            controllerStr = str( main.activeNodes[controller] + 1 )
+            if devices[ controller ] and ports[ controller ] and\
+                "Error" not in devices[ controller ] and\
+                "Error" not in ports[ controller ]:
+                    currentDevicesResult = main.Mininet1.compareSwitches(
+                            mnSwitches,
+                            json.loads( devices[ controller ] ),
+                            json.loads( ports[ controller ] ) )
+            else:
+                currentDevicesResult = main.FALSE
+            utilities.assert_equals( expect=main.TRUE,
+                                     actual=currentDevicesResult,
+                                     onpass="ONOS" + controllerStr +
+                                     " Switches view is correct",
+                                     onfail="ONOS" + controllerStr +
+                                     " Switches view is incorrect" )
+            if links[ controller ] and "Error" not in links[ controller ]:
+                currentLinksResult = main.Mininet1.compareLinks(
+                        mnSwitches, mnLinks,
+                        json.loads( links[ controller ] ) )
+            else:
+                currentLinksResult = main.FALSE
+            utilities.assert_equals( expect=main.TRUE,
+                                     actual=currentLinksResult,
+                                     onpass="ONOS" + controllerStr +
+                                     " links view is correct",
+                                     onfail="ONOS" + controllerStr +
+                                     " links view is incorrect" )
+
+            if hosts[ controller ] and "Error" not in hosts[ controller ]:
+                currentHostsResult = main.Mininet1.compareHosts(
+                        mnHosts,
+                        hosts[ controller ] )
+            else:
+                currentHostsResult = main.FALSE
+            utilities.assert_equals( expect=main.TRUE,
+                                     actual=currentHostsResult,
+                                     onpass="ONOS" + controllerStr +
+                                     " hosts exist in Mininet",
+                                     onfail="ONOS" + controllerStr +
+                                     " hosts don't match Mininet" )
+
+            devicesResults = devicesResults and currentDevicesResult
+            linksResults = linksResults and currentLinksResult
+            hostsResults = hostsResults and currentHostsResult
+
+        main.step( "Device information is correct" )
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=devicesResults,
+            onpass="Device information is correct",
+            onfail="Device information is incorrect" )
+
+        main.step( "Links are correct" )
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=linksResults,
+            onpass="Link are correct",
+            onfail="Links are incorrect" )
+
+        main.step( "Hosts are correct" )
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=hostsResults,
+            onpass="Hosts are correct",
+            onfail="Hosts are incorrect" )
+
+    def CASE61( self, main ):
+        """
+        The Failure case.
+        """
+        import math
+        assert main.numCtrls, "main.numCtrls not defined"
+        assert main, "main not defined"
+        assert utilities.assert_equals, "utilities.assert_equals not defined"
+        assert main.CLIs, "main.CLIs not defined"
+        assert main.nodes, "main.nodes not defined"
+        main.case( "Partition ONOS nodes into two distinct partitions" )
+
+        main.step( "Checking ONOS Logs for errors" )
+        for node in main.nodes:
+            main.log.debug( "Checking logs for errors on " + node.name + ":" )
+            main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
+
+        n = len( main.nodes )  # Number of nodes
+        p = ( ( n + 1 ) / 2 ) + 1  # Number of partitions
+        main.partition = [ 0 ]  # ONOS node to partition, listed by index in main.nodes
+        if n > 3:
+            main.partition.append( p - 1 )
+            # NOTE: This only works for cluster sizes of 3,5, or 7.
+
+        main.step( "Partitioning ONOS nodes" )
+        nodeList = [ str( i + 1 ) for i in main.partition ]
+        main.log.info( "Nodes to be partitioned: " + str( nodeList ) )
+        partitionResults = main.TRUE
+        for i in range( 0, n ):
+            this = main.nodes[i]
+            if i not in main.partition:
+                for j in main.partition:
+                    foe = main.nodes[j]
+                    main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
+                    #CMD HERE
+                    cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
+                    this.handle.sendline( cmdStr )
+                    this.handle.expect( "\$" )
+                    main.log.debug( this.handle.before )
+            else:
+                for j in range( 0, n ):
+                    if j not in main.partition:
+                        foe = main.nodes[j]
+                        main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
+                        #CMD HERE
+                        cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
+                        this.handle.sendline( cmdStr )
+                        this.handle.expect( "\$" )
+                        main.log.debug( this.handle.before )
+                main.activeNodes.remove( i )
+        # NOTE: When dynamic clustering is finished, we need to start checking
+        #       main.partion nodes still work when partitioned
+        utilities.assert_equals( expect=main.TRUE, actual=partitionResults,
+                                 onpass="Firewall rules set successfully",
+                                 onfail="Error setting firewall rules" )
+
+        main.log.step( "Sleeping 60 seconds" )
+        time.sleep( 60 )
+
+    def CASE62( self, main ):
+        """
+        Healing Partition
+        """
+        import time
+        assert main.numCtrls, "main.numCtrls not defined"
+        assert main, "main not defined"
+        assert utilities.assert_equals, "utilities.assert_equals not defined"
+        assert main.CLIs, "main.CLIs not defined"
+        assert main.nodes, "main.nodes not defined"
+        assert main.partition, "main.partition not defined"
+        main.case( "Healing Partition" )
+
+        main.step( "Deleteing firewall rules" )
+        healResults = main.TRUE
+        for node in main.nodes:
+            cmdStr = "sudo iptables -F"
+            node.handle.sendline( cmdStr )
+            node.handle.expect( "\$" )
+            main.log.debug( node.handle.before )
+        utilities.assert_equals( expect=main.TRUE, actual=healResults,
+                                 onpass="Firewall rules removed",
+                                 onfail="Error removing firewall rules" )
+
+        for node in main.partition:
+            main.activeNodes.append( node )
+        main.activeNodes.sort()
+        try:
+            assert list( set( main.activeNodes ) ) == main.activeNodes,\
+                   "List of active nodes has duplicates, this likely indicates something was run out of order"
+        except AssertionError:
+            main.log.exception( "" )
+            main.cleanup()
+            main.exit()
+
+    def CASE7( self, main ):
+        """
+        Check state after ONOS failure
+        """
+        import json
+        assert main.numCtrls, "main.numCtrls not defined"
+        assert main, "main not defined"
+        assert utilities.assert_equals, "utilities.assert_equals not defined"
+        assert main.CLIs, "main.CLIs not defined"
+        assert main.nodes, "main.nodes not defined"
+        try:
+            main.partition
+        except AttributeError:
+            main.partition = []
+
+        main.case( "Running ONOS Constant State Tests" )
+
+        main.step( "Check that each switch has a master" )
+        # Assert that each device has a master
+        rolesNotNull = main.TRUE
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].rolesNotNull,
+                             name="rolesNotNull-" + str( i ),
+                             args=[ ] )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            rolesNotNull = rolesNotNull and t.result
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=rolesNotNull,
+            onpass="Each device has a master",
+            onfail="Some devices don't have a master assigned" )
+
+        main.step( "Read device roles from ONOS" )
+        ONOSMastership = []
+        mastershipCheck = main.FALSE
+        consistentMastership = True
+        rolesResults = True
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].roles,
+                             name="roles-" + str( i ),
+                             args=[] )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            ONOSMastership.append( t.result )
+
+        for i in range( len( ONOSMastership ) ):
+            node = str( main.activeNodes[i] + 1 )
+            if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
+                main.log.error( "Error in getting ONOS" + node + " roles" )
+                main.log.warn( "ONOS" + node + " mastership response: " +
+                               repr( ONOSMastership[i] ) )
+                rolesResults = False
+        utilities.assert_equals(
+            expect=True,
+            actual=rolesResults,
+            onpass="No error in reading roles output",
+            onfail="Error in reading roles from ONOS" )
+
+        main.step( "Check for consistency in roles from each controller" )
+        if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
+            main.log.info(
+                "Switch roles are consistent across all ONOS nodes" )
+        else:
+            consistentMastership = False
+        utilities.assert_equals(
+            expect=True,
+            actual=consistentMastership,
+            onpass="Switch roles are consistent across all ONOS nodes",
+            onfail="ONOS nodes have different views of switch roles" )
+
+        if rolesResults and not consistentMastership:
+            for i in range( len( ONOSMastership ) ):
+                node = str( main.activeNodes[i] + 1 )
+                main.log.warn( "ONOS" + node + " roles: ",
+                               json.dumps( json.loads( ONOSMastership[ i ] ),
+                                           sort_keys=True,
+                                           indent=4,
+                                           separators=( ',', ': ' ) ) )
+
+        # NOTE: we expect mastership to change on controller failure
+
+        main.step( "Get the intents and compare across all nodes" )
+        ONOSIntents = []
+        intentCheck = main.FALSE
+        consistentIntents = True
+        intentsResults = True
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].intents,
+                             name="intents-" + str( i ),
+                             args=[],
+                             kwargs={ 'jsonFormat': True } )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            ONOSIntents.append( t.result )
+
+        for i in range( len( ONOSIntents) ):
+            node = str( main.activeNodes[i] + 1 )
+            if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
+                main.log.error( "Error in getting ONOS" + node + " intents" )
+                main.log.warn( "ONOS" + node + " intents response: " +
+                               repr( ONOSIntents[ i ] ) )
+                intentsResults = False
+        utilities.assert_equals(
+            expect=True,
+            actual=intentsResults,
+            onpass="No error in reading intents output",
+            onfail="Error in reading intents from ONOS" )
+
+        main.step( "Check for consistency in Intents from each controller" )
+        if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
+            main.log.info( "Intents are consistent across all ONOS " +
+                             "nodes" )
+        else:
+            consistentIntents = False
+
+        # Try to make it easy to figure out what is happening
+        #
+        # Intent      ONOS1      ONOS2    ...
+        #  0x01     INSTALLED  INSTALLING
+        #  ...        ...         ...
+        #  ...        ...         ...
+        title = "   ID"
+        for n in main.activeNodes:
+            title += " " * 10 + "ONOS" + str( n + 1 )
+        main.log.warn( title )
+        # get all intent keys in the cluster
+        keys = []
+        for nodeStr in ONOSIntents:
+            node = json.loads( nodeStr )
+            for intent in node:
+                keys.append( intent.get( 'id' ) )
+        keys = set( keys )
+        for key in keys:
+            row = "%-13s" % key
+            for nodeStr in ONOSIntents:
+                node = json.loads( nodeStr )
+                for intent in node:
+                    if intent.get( 'id' ) == key:
+                        row += "%-15s" % intent.get( 'state' )
+            main.log.warn( row )
+        # End table view
+
+        utilities.assert_equals(
+            expect=True,
+            actual=consistentIntents,
+            onpass="Intents are consistent across all ONOS nodes",
+            onfail="ONOS nodes have different views of intents" )
+        intentStates = []
+        for node in ONOSIntents:  # Iter through ONOS nodes
+            nodeStates = []
+            # Iter through intents of a node
+            try:
+                for intent in json.loads( node ):
+                    nodeStates.append( intent[ 'state' ] )
+            except ( ValueError, TypeError ):
+                main.log.exception( "Error in parsing intents" )
+                main.log.error( repr( node ) )
+            intentStates.append( nodeStates )
+            out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
+            main.log.info( dict( out ) )
+
+        if intentsResults and not consistentIntents:
+            for i in range( len( main.activeNodes ) ):
+                node = str( main.activeNodes[i] + 1 )
+                main.log.warn( "ONOS" + node + " intents: " )
+                main.log.warn( json.dumps(
+                    json.loads( ONOSIntents[ i ] ),
+                    sort_keys=True,
+                    indent=4,
+                    separators=( ',', ': ' ) ) )
+        elif intentsResults and consistentIntents:
+            intentCheck = main.TRUE
+
+        # NOTE: Store has no durability, so intents are lost across system
+        #       restarts
+        main.step( "Compare current intents with intents before the failure" )
+        # NOTE: this requires case 5 to pass for intentState to be set.
+        #      maybe we should stop the test if that fails?
+        sameIntents = main.FALSE
+        try:
+            intentState
+        except NameError:
+            main.log.warn( "No previous intent state was saved" )
+        else:
+            if intentState and intentState == ONOSIntents[ 0 ]:
+                sameIntents = main.TRUE
+                main.log.info( "Intents are consistent with before failure" )
+            # TODO: possibly the states have changed? we may need to figure out
+            #       what the acceptable states are
+            elif len( intentState ) == len( ONOSIntents[ 0 ] ):
+                sameIntents = main.TRUE
+                try:
+                    before = json.loads( intentState )
+                    after = json.loads( ONOSIntents[ 0 ] )
+                    for intent in before:
+                        if intent not in after:
+                            sameIntents = main.FALSE
+                            main.log.debug( "Intent is not currently in ONOS " +
+                                            "(at least in the same form):" )
+                            main.log.debug( json.dumps( intent ) )
+                except ( ValueError, TypeError ):
+                    main.log.exception( "Exception printing intents" )
+                    main.log.debug( repr( ONOSIntents[0] ) )
+                    main.log.debug( repr( intentState ) )
+            if sameIntents == main.FALSE:
+                try:
+                    main.log.debug( "ONOS intents before: " )
+                    main.log.debug( json.dumps( json.loads( intentState ),
+                                                sort_keys=True, indent=4,
+                                                separators=( ',', ': ' ) ) )
+                    main.log.debug( "Current ONOS intents: " )
+                    main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
+                                                sort_keys=True, indent=4,
+                                                separators=( ',', ': ' ) ) )
+                except ( ValueError, TypeError ):
+                    main.log.exception( "Exception printing intents" )
+                    main.log.debug( repr( ONOSIntents[0] ) )
+                    main.log.debug( repr( intentState ) )
+            utilities.assert_equals(
+                expect=main.TRUE,
+                actual=sameIntents,
+                onpass="Intents are consistent with before failure",
+                onfail="The Intents changed during failure" )
+        intentCheck = intentCheck and sameIntents
+
+        main.step( "Get the OF Table entries and compare to before " +
+                   "component failure" )
+        FlowTables = main.TRUE
+        for i in range( 28 ):
+            main.log.info( "Checking flow table on s" + str( i + 1 ) )
+            tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
+            FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
+            if FlowTables == main.FALSE:
+                main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=FlowTables,
+            onpass="No changes were found in the flow tables",
+            onfail="Changes were found in the flow tables" )
+
+        main.Mininet2.pingLongKill()
+        '''
+        main.step( "Check the continuous pings to ensure that no packets " +
+                   "were dropped during component failure" )
+        main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
+                                main.params[ 'TESTONIP' ] )
+        LossInPings = main.FALSE
+        # NOTE: checkForLoss returns main.FALSE with 0% packet loss
+        for i in range( 8, 18 ):
+            main.log.info(
+                "Checking for a loss in pings along flow from s" +
+                str( i ) )
+            LossInPings = main.Mininet2.checkForLoss(
+                "/tmp/ping.h" +
+                str( i ) ) or LossInPings
+        if LossInPings == main.TRUE:
+            main.log.info( "Loss in ping detected" )
+        elif LossInPings == main.ERROR:
+            main.log.info( "There are multiple mininet process running" )
+        elif LossInPings == main.FALSE:
+            main.log.info( "No Loss in the pings" )
+            main.log.info( "No loss of dataplane connectivity" )
+        utilities.assert_equals(
+            expect=main.FALSE,
+            actual=LossInPings,
+            onpass="No Loss of connectivity",
+            onfail="Loss of dataplane connectivity detected" )
+        '''
+
+        main.step( "Leadership Election is still functional" )
+        # Test of LeadershipElection
+        leaderList = []
+
+        partitioned = []
+        for i in main.partition:
+            partitioned.append( main.nodes[i].ip_address )
+        leaderResult = main.TRUE
+
+        for i in main.activeNodes:
+            cli = main.CLIs[i]
+            leaderN = cli.electionTestLeader()
+            leaderList.append( leaderN )
+            if leaderN == main.FALSE:
+                # error in response
+                main.log.error( "Something is wrong with " +
+                                 "electionTestLeader function, check the" +
+                                 " error logs" )
+                leaderResult = main.FALSE
+            elif leaderN is None:
+                main.log.error( cli.name +
+                                 " shows no leader for the election-app was" +
+                                 " elected after the old one died" )
+                leaderResult = main.FALSE
+            elif leaderN in partitioned:
+                main.log.error( cli.name + " shows " + str( leaderN ) +
+                                 " as leader for the election-app, but it " +
+                                 "was partitioned" )
+                leaderResult = main.FALSE
+        if len( set( leaderList ) ) != 1:
+            leaderResult = main.FALSE
+            main.log.error(
+                "Inconsistent view of leader for the election test app" )
+            # TODO: print the list
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=leaderResult,
+            onpass="Leadership election passed",
+            onfail="Something went wrong with Leadership election" )
+
+    def CASE8( self, main ):
+        """
+        Compare topo
+        """
+        import json
+        import time
+        assert main.numCtrls, "main.numCtrls not defined"
+        assert main, "main not defined"
+        assert utilities.assert_equals, "utilities.assert_equals not defined"
+        assert main.CLIs, "main.CLIs not defined"
+        assert main.nodes, "main.nodes not defined"
+
+        main.case( "Compare ONOS Topology view to Mininet topology" )
+        main.caseExplanation = "Compare topology objects between Mininet" +\
+                                " and ONOS"
+        topoResult = main.FALSE
+        topoFailMsg = "ONOS topology don't match Mininet"
+        elapsed = 0
+        count = 0
+        main.step( "Comparing ONOS topology to MN topology" )
+        startTime = time.time()
+        # Give time for Gossip to work
+        while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
+            devicesResults = main.TRUE
+            linksResults = main.TRUE
+            hostsResults = main.TRUE
+            hostAttachmentResults = True
+            count += 1
+            cliStart = time.time()
+            devices = []
+            threads = []
+            for i in main.activeNodes:
+                t = main.Thread( target=utilities.retry,
+                                 name="devices-" + str( i ),
+                                 args=[ main.CLIs[i].devices, [ None ] ],
+                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                           'randomTime': True } )
+                threads.append( t )
+                t.start()
+
+            for t in threads:
+                t.join()
+                devices.append( t.result )
+            hosts = []
+            ipResult = main.TRUE
+            threads = []
+            for i in main.activeNodes:
+                t = main.Thread( target=utilities.retry,
+                                 name="hosts-" + str( i ),
+                                 args=[ main.CLIs[i].hosts, [ None ] ],
+                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                           'randomTime': True } )
+                threads.append( t )
+                t.start()
+
+            for t in threads:
+                t.join()
+                try:
+                    hosts.append( json.loads( t.result ) )
+                except ( ValueError, TypeError ):
+                    main.log.exception( "Error parsing hosts results" )
+                    main.log.error( repr( t.result ) )
+                    hosts.append( None )
+            for controller in range( 0, len( hosts ) ):
+                controllerStr = str( main.activeNodes[controller] + 1 )
+                if hosts[ controller ]:
+                    for host in hosts[ controller ]:
+                        if host is None or host.get( 'ipAddresses', [] ) == []:
+                            main.log.error(
+                                "Error with host ipAddresses on controller" +
+                                controllerStr + ": " + str( host ) )
+                            ipResult = main.FALSE
+            ports = []
+            threads = []
+            for i in main.activeNodes:
+                t = main.Thread( target=utilities.retry,
+                                 name="ports-" + str( i ),
+                                 args=[ main.CLIs[i].ports, [ None ] ],
+                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                           'randomTime': True } )
+                threads.append( t )
+                t.start()
+
+            for t in threads:
+                t.join()
+                ports.append( t.result )
+            links = []
+            threads = []
+            for i in main.activeNodes:
+                t = main.Thread( target=utilities.retry,
+                                 name="links-" + str( i ),
+                                 args=[ main.CLIs[i].links, [ None ] ],
+                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                           'randomTime': True } )
+                threads.append( t )
+                t.start()
+
+            for t in threads:
+                t.join()
+                links.append( t.result )
+            clusters = []
+            threads = []
+            for i in main.activeNodes:
+                t = main.Thread( target=utilities.retry,
+                                 name="clusters-" + str( i ),
+                                 args=[ main.CLIs[i].clusters, [ None ] ],
+                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                           'randomTime': True } )
+                threads.append( t )
+                t.start()
+
+            for t in threads:
+                t.join()
+                clusters.append( t.result )
+
+            elapsed = time.time() - startTime
+            cliTime = time.time() - cliStart
+            print "Elapsed time: " + str( elapsed )
+            print "CLI time: " + str( cliTime )
+
+            if all( e is None for e in devices ) and\
+               all( e is None for e in hosts ) and\
+               all( e is None for e in ports ) and\
+               all( e is None for e in links ) and\
+               all( e is None for e in clusters ):
+                   topoFailMsg = "Could not get topology from ONOS"
+                   main.log.error( topoFailMsg )
+                   continue  # Try again, No use trying to compare
+
+            mnSwitches = main.Mininet1.getSwitches()
+            mnLinks = main.Mininet1.getLinks()
+            mnHosts = main.Mininet1.getHosts()
+            for controller in range( len( main.activeNodes ) ):
+                controllerStr = str( main.activeNodes[controller] + 1 )
+                if devices[ controller ] and ports[ controller ] and\
+                    "Error" not in devices[ controller ] and\
+                    "Error" not in ports[ controller ]:
+
+                    try:
+                        currentDevicesResult = main.Mininet1.compareSwitches(
+                                mnSwitches,
+                                json.loads( devices[ controller ] ),
+                                json.loads( ports[ controller ] ) )
+                    except ( TypeError, ValueError ) as e:
+                        main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
+                            devices[ controller ], ports[ controller ] ) )
+                else:
+                    currentDevicesResult = main.FALSE
+                utilities.assert_equals( expect=main.TRUE,
+                                         actual=currentDevicesResult,
+                                         onpass="ONOS" + controllerStr +
+                                         " Switches view is correct",
+                                         onfail="ONOS" + controllerStr +
+                                         " Switches view is incorrect" )
+
+                if links[ controller ] and "Error" not in links[ controller ]:
+                    currentLinksResult = main.Mininet1.compareLinks(
+                            mnSwitches, mnLinks,
+                            json.loads( links[ controller ] ) )
+                else:
+                    currentLinksResult = main.FALSE
+                utilities.assert_equals( expect=main.TRUE,
+                                         actual=currentLinksResult,
+                                         onpass="ONOS" + controllerStr +
+                                         " links view is correct",
+                                         onfail="ONOS" + controllerStr +
+                                         " links view is incorrect" )
+                if hosts[ controller ] and "Error" not in hosts[ controller ]:
+                    currentHostsResult = main.Mininet1.compareHosts(
+                            mnHosts,
+                            hosts[ controller ] )
+                elif hosts[ controller ] == []:
+                    currentHostsResult = main.TRUE
+                else:
+                    currentHostsResult = main.FALSE
+                utilities.assert_equals( expect=main.TRUE,
+                                         actual=currentHostsResult,
+                                         onpass="ONOS" + controllerStr +
+                                         " hosts exist in Mininet",
+                                         onfail="ONOS" + controllerStr +
+                                         " hosts don't match Mininet" )
+                # CHECKING HOST ATTACHMENT POINTS
+                hostAttachment = True
+                zeroHosts = False
+                # FIXME: topo-HA/obelisk specific mappings:
+                # key is mac and value is dpid
+                mappings = {}
+                for i in range( 1, 29 ):  # hosts 1 through 28
+                    # set up correct variables:
+                    macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
+                    if i == 1:
+                        deviceId = "1000".zfill(16)
+                    elif i == 2:
+                        deviceId = "2000".zfill(16)
+                    elif i == 3:
+                        deviceId = "3000".zfill(16)
+                    elif i == 4:
+                        deviceId = "3004".zfill(16)
+                    elif i == 5:
+                        deviceId = "5000".zfill(16)
+                    elif i == 6:
+                        deviceId = "6000".zfill(16)
+                    elif i == 7:
+                        deviceId = "6007".zfill(16)
+                    elif i >= 8 and i <= 17:
+                        dpid = '3' + str( i ).zfill( 3 )
+                        deviceId = dpid.zfill(16)
+                    elif i >= 18 and i <= 27:
+                        dpid = '6' + str( i ).zfill( 3 )
+                        deviceId = dpid.zfill(16)
+                    elif i == 28:
+                        deviceId = "2800".zfill(16)
+                    mappings[ macId ] = deviceId
+                if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
+                    if hosts[ controller ] == []:
+                        main.log.warn( "There are no hosts discovered" )
+                        zeroHosts = True
+                    else:
+                        for host in hosts[ controller ]:
+                            mac = None
+                            location = None
+                            device = None
+                            port = None
+                            try:
+                                mac = host.get( 'mac' )
+                                assert mac, "mac field could not be found for this host object"
+
+                                location = host.get( 'location' )
+                                assert location, "location field could not be found for this host object"
+
+                                # Trim the protocol identifier off deviceId
+                                device = str( location.get( 'elementId' ) ).split(':')[1]
+                                assert device, "elementId field could not be found for this host location object"
+
+                                port = location.get( 'port' )
+                                assert port, "port field could not be found for this host location object"
+
+                                # Now check if this matches where they should be
+                                if mac and device and port:
+                                    if str( port ) != "1":
+                                        main.log.error( "The attachment port is incorrect for " +
+                                                        "host " + str( mac ) +
+                                                        ". Expected: 1 Actual: " + str( port) )
+                                        hostAttachment = False
+                                    if device != mappings[ str( mac ) ]:
+                                        main.log.error( "The attachment device is incorrect for " +
+                                                        "host " + str( mac ) +
+                                                        ". Expected: " + mappings[ str( mac ) ] +
+                                                        " Actual: " + device )
+                                        hostAttachment = False
+                                else:
+                                    hostAttachment = False
+                            except AssertionError:
+                                main.log.exception( "Json object not as expected" )
+                                main.log.error( repr( host ) )
+                                hostAttachment = False
+                else:
+                    main.log.error( "No hosts json output or \"Error\"" +
+                                    " in output. hosts = " +
+                                    repr( hosts[ controller ] ) )
+                if zeroHosts is False:
+                    hostAttachment = True
+
+                # END CHECKING HOST ATTACHMENT POINTS
+                devicesResults = devicesResults and currentDevicesResult
+                linksResults = linksResults and currentLinksResult
+                hostsResults = hostsResults and currentHostsResult
+                hostAttachmentResults = hostAttachmentResults and\
+                                        hostAttachment
+            topoResult = devicesResults and linksResults and\
+                         hostsResults and hostAttachmentResults
+        utilities.assert_equals( expect=True,
+                                 actual=topoResult,
+                                 onpass="ONOS topology matches Mininet",
+                                 onfail=topoFailMsg )
+        # End of While loop to pull ONOS state
+
+        # Compare json objects for hosts and dataplane clusters
+
+        # hosts
+        main.step( "Hosts view is consistent across all ONOS nodes" )
+        consistentHostsResult = main.TRUE
+        for controller in range( len( hosts ) ):
+            controllerStr = str( main.activeNodes[controller] + 1 )
+            if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
+                if hosts[ controller ] == hosts[ 0 ]:
+                    continue
+                else:  # hosts not consistent
+                    main.log.error( "hosts from ONOS" + controllerStr +
+                                     " is inconsistent with ONOS1" )
+                    main.log.warn( repr( hosts[ controller ] ) )
+                    consistentHostsResult = main.FALSE
+
+            else:
+                main.log.error( "Error in getting ONOS hosts from ONOS" +
+                                 controllerStr )
+                consistentHostsResult = main.FALSE
+                main.log.warn( "ONOS" + controllerStr +
+                               " hosts response: " +
+                               repr( hosts[ controller ] ) )
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=consistentHostsResult,
+            onpass="Hosts view is consistent across all ONOS nodes",
+            onfail="ONOS nodes have different views of hosts" )
+
+        main.step( "Hosts information is correct" )
+        hostsResults = hostsResults and ipResult
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=hostsResults,
+            onpass="Host information is correct",
+            onfail="Host information is incorrect" )
+
+        main.step( "Host attachment points to the network" )
+        utilities.assert_equals(
+            expect=True,
+            actual=hostAttachmentResults,
+            onpass="Hosts are correctly attached to the network",
+            onfail="ONOS did not correctly attach hosts to the network" )
+
+        # Strongly connected clusters of devices
+        main.step( "Clusters view is consistent across all ONOS nodes" )
+        consistentClustersResult = main.TRUE
+        for controller in range( len( clusters ) ):
+            controllerStr = str( main.activeNodes[controller] + 1 )
+            if "Error" not in clusters[ controller ]:
+                if clusters[ controller ] == clusters[ 0 ]:
+                    continue
+                else:  # clusters not consistent
+                    main.log.error( "clusters from ONOS" +
+                                     controllerStr +
+                                     " is inconsistent with ONOS1" )
+                    consistentClustersResult = main.FALSE
+
+            else:
+                main.log.error( "Error in getting dataplane clusters " +
+                                 "from ONOS" + controllerStr )
+                consistentClustersResult = main.FALSE
+                main.log.warn( "ONOS" + controllerStr +
+                               " clusters response: " +
+                               repr( clusters[ controller ] ) )
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=consistentClustersResult,
+            onpass="Clusters view is consistent across all ONOS nodes",
+            onfail="ONOS nodes have different views of clusters" )
+
+        main.step( "There is only one SCC" )
+        # there should always only be one cluster
+        try:
+            numClusters = len( json.loads( clusters[ 0 ] ) )
+        except ( ValueError, TypeError ):
+            main.log.exception( "Error parsing clusters[0]: " +
+                                repr( clusters[0] ) )
+        clusterResults = main.FALSE
+        if numClusters == 1:
+            clusterResults = main.TRUE
+        utilities.assert_equals(
+            expect=1,
+            actual=numClusters,
+            onpass="ONOS shows 1 SCC",
+            onfail="ONOS shows " + str( numClusters ) + " SCCs" )
+
+        topoResult = ( devicesResults and linksResults
+                       and hostsResults and consistentHostsResult
+                       and consistentClustersResult and clusterResults
+                       and ipResult and hostAttachmentResults )
+
+        topoResult = topoResult and int( count <= 2 )
+        note = "note it takes about " + str( int( cliTime ) ) + \
+            " seconds for the test to make all the cli calls to fetch " +\
+            "the topology from each ONOS instance"
+        main.log.info(
+            "Very crass estimate for topology discovery/convergence( " +
+            str( note ) + " ): " + str( elapsed ) + " seconds, " +
+            str( count ) + " tries" )
+
+        main.step( "Device information is correct" )
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=devicesResults,
+            onpass="Device information is correct",
+            onfail="Device information is incorrect" )
+
+        main.step( "Links are correct" )
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=linksResults,
+            onpass="Link are correct",
+            onfail="Links are incorrect" )
+
+        # FIXME: move this to an ONOS state case
+        main.step( "Checking ONOS nodes" )
+        nodesOutput = []
+        nodeResults = main.TRUE
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].nodes,
+                             name="nodes-" + str( i ),
+                             args=[ ] )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            nodesOutput.append( t.result )
+        ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
+        ips.sort()
+        for i in nodesOutput:
+            try:
+                current = json.loads( i )
+                activeIps = []
+                currentResult = main.FALSE
+                for node in current:
+                    if node['state'] == 'ACTIVE':
+                        activeIps.append( node['ip'] )
+                activeIps.sort()
+                if ips == activeIps:
+                    currentResult = main.TRUE
+            except ( ValueError, TypeError ):
+                main.log.error( "Error parsing nodes output" )
+                main.log.warn( repr( i ) )
+                currentResult = main.FALSE
+            nodeResults = nodeResults and currentResult
+        utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
+                                 onpass="Nodes check successful",
+                                 onfail="Nodes check NOT successful" )
+
+    def CASE9( self, main ):
+        """
+        Link s3-s28 down
+        """
+        import time
+        assert main.numCtrls, "main.numCtrls not defined"
+        assert main, "main not defined"
+        assert utilities.assert_equals, "utilities.assert_equals not defined"
+        assert main.CLIs, "main.CLIs not defined"
+        assert main.nodes, "main.nodes not defined"
+        # NOTE: You should probably run a topology check after this
+
+        linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
+
+        description = "Turn off a link to ensure that Link Discovery " +\
+                      "is working properly"
+        main.case( description )
+
+        main.step( "Kill Link between s3 and s28" )
+        LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
+        main.log.info( "Waiting " + str( linkSleep ) +
+                       " seconds for link down to be discovered" )
+        time.sleep( linkSleep )
+        utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
+                                 onpass="Link down successful",
+                                 onfail="Failed to bring link down" )
+        # TODO do some sort of check here
+
+    def CASE10( self, main ):
+        """
+        Link s3-s28 up
+        """
+        import time
+        assert main.numCtrls, "main.numCtrls not defined"
+        assert main, "main not defined"
+        assert utilities.assert_equals, "utilities.assert_equals not defined"
+        assert main.CLIs, "main.CLIs not defined"
+        assert main.nodes, "main.nodes not defined"
+        # NOTE: You should probably run a topology check after this
+
+        linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
+
+        description = "Restore a link to ensure that Link Discovery is " + \
+                      "working properly"
+        main.case( description )
+
+        main.step( "Bring link between s3 and s28 back up" )
+        LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
+        main.log.info( "Waiting " + str( linkSleep ) +
+                       " seconds for link up to be discovered" )
+        time.sleep( linkSleep )
+        utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
+                                 onpass="Link up successful",
+                                 onfail="Failed to bring link up" )
+        # TODO do some sort of check here
+
+    def CASE11( self, main ):
+        """
+        Switch Down
+        """
+        # NOTE: You should probably run a topology check after this
+        import time
+        assert main.numCtrls, "main.numCtrls not defined"
+        assert main, "main not defined"
+        assert utilities.assert_equals, "utilities.assert_equals not defined"
+        assert main.CLIs, "main.CLIs not defined"
+        assert main.nodes, "main.nodes not defined"
+
+        switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
+
+        description = "Killing a switch to ensure it is discovered correctly"
+        onosCli = main.CLIs[ main.activeNodes[0] ]
+        main.case( description )
+        switch = main.params[ 'kill' ][ 'switch' ]
+        switchDPID = main.params[ 'kill' ][ 'dpid' ]
+
+        # TODO: Make this switch parameterizable
+        main.step( "Kill " + switch )
+        main.log.info( "Deleting " + switch )
+        main.Mininet1.delSwitch( switch )
+        main.log.info( "Waiting " + str( switchSleep ) +
+                       " seconds for switch down to be discovered" )
+        time.sleep( switchSleep )
+        device = onosCli.getDevice( dpid=switchDPID )
+        # Peek at the deleted switch
+        main.log.warn( str( device ) )
+        result = main.FALSE
+        if device and device[ 'available' ] is False:
+            result = main.TRUE
+        utilities.assert_equals( expect=main.TRUE, actual=result,
+                                 onpass="Kill switch successful",
+                                 onfail="Failed to kill switch?" )
+
+    def CASE12( self, main ):
+        """
+        Switch Up
+        """
+        # NOTE: You should probably run a topology check after this
+        import time
+        assert main.numCtrls, "main.numCtrls not defined"
+        assert main, "main not defined"
+        assert utilities.assert_equals, "utilities.assert_equals not defined"
+        assert main.CLIs, "main.CLIs not defined"
+        assert main.nodes, "main.nodes not defined"
+        assert ONOS1Port, "ONOS1Port not defined"
+        assert ONOS2Port, "ONOS2Port not defined"
+        assert ONOS3Port, "ONOS3Port not defined"
+        assert ONOS4Port, "ONOS4Port not defined"
+        assert ONOS5Port, "ONOS5Port not defined"
+        assert ONOS6Port, "ONOS6Port not defined"
+        assert ONOS7Port, "ONOS7Port not defined"
+
+        switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
+        switch = main.params[ 'kill' ][ 'switch' ]
+        switchDPID = main.params[ 'kill' ][ 'dpid' ]
+        links = main.params[ 'kill' ][ 'links' ].split()
+        onosCli = main.CLIs[ main.activeNodes[0] ]
+        description = "Adding a switch to ensure it is discovered correctly"
+        main.case( description )
+
+        main.step( "Add back " + switch )
+        main.Mininet1.addSwitch( switch, dpid=switchDPID )
+        for peer in links:
+            main.Mininet1.addLink( switch, peer )
+        ipList = [ node.ip_address for node in main.nodes ]
+        main.Mininet1.assignSwController( sw=switch, ip=ipList )
+        main.log.info( "Waiting " + str( switchSleep ) +
+                       " seconds for switch up to be discovered" )
+        time.sleep( switchSleep )
+        device = onosCli.getDevice( dpid=switchDPID )
+        # Peek at the deleted switch
+        main.log.warn( str( device ) )
+        result = main.FALSE
+        if device and device[ 'available' ]:
+            result = main.TRUE
+        utilities.assert_equals( expect=main.TRUE, actual=result,
+                                 onpass="add switch successful",
+                                 onfail="Failed to add switch?" )
+
+    def CASE13( self, main ):
+        """
+        Clean up
+        """
+        import os
+        import time
+        assert main.numCtrls, "main.numCtrls not defined"
+        assert main, "main not defined"
+        assert utilities.assert_equals, "utilities.assert_equals not defined"
+        assert main.CLIs, "main.CLIs not defined"
+        assert main.nodes, "main.nodes not defined"
+
+        # printing colors to terminal
+        colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
+                   'blue': '\033[94m', 'green': '\033[92m',
+                   'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
+        main.case( "Test Cleanup" )
+        main.step( "Killing tcpdumps" )
+        main.Mininet2.stopTcpdump()
+
+        testname = main.TEST
+        if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
+            main.step( "Copying MN pcap and ONOS log files to test station" )
+            teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
+            teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
+            # NOTE: MN Pcap file is being saved to logdir.
+            #       We scp this file as MN and TestON aren't necessarily the same vm
+
+            # FIXME: To be replaced with a Jenkin's post script
+            # TODO: Load these from params
+            # NOTE: must end in /
+            logFolder = "/opt/onos/log/"
+            logFiles = [ "karaf.log", "karaf.log.1" ]
+            # NOTE: must end in /
+            for f in logFiles:
+                for node in main.nodes:
+                    dstName = main.logdir + "/" + node.name + "-" + f
+                    main.ONOSbench.secureCopy( node.user_name, node.ip_address,
+                                               logFolder + f, dstName )
+            # std*.log's
+            # NOTE: must end in /
+            logFolder = "/opt/onos/var/"
+            logFiles = [ "stderr.log", "stdout.log" ]
+            # NOTE: must end in /
+            for f in logFiles:
+                for node in main.nodes:
+                    dstName = main.logdir + "/" + node.name + "-" + f
+                    main.ONOSbench.secureCopy( node.user_name, node.ip_address,
+                                               logFolder + f, dstName )
+        else:
+            main.log.debug( "skipping saving log files" )
+
+        main.step( "Stopping Mininet" )
+        mnResult = main.Mininet1.stopNet()
+        utilities.assert_equals( expect=main.TRUE, actual=mnResult,
+                                 onpass="Mininet stopped",
+                                 onfail="MN cleanup NOT successful" )
+
+        main.step( "Checking ONOS Logs for errors" )
+        for node in main.nodes:
+            main.log.debug( "Checking logs for errors on " + node.name + ":" )
+            main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
+
+        try:
+            timerLog = open( main.logdir + "/Timers.csv", 'w')
+            # Overwrite with empty line and close
+            labels = "Gossip Intents"
+            data = str( gossipTime )
+            timerLog.write( labels + "\n" + data )
+            timerLog.close()
+        except NameError, e:
+            main.log.exception(e)
+
+    def CASE14( self, main ):
+        """
+        start election app on all onos nodes
+        """
+        assert main.numCtrls, "main.numCtrls not defined"
+        assert main, "main not defined"
+        assert utilities.assert_equals, "utilities.assert_equals not defined"
+        assert main.CLIs, "main.CLIs not defined"
+        assert main.nodes, "main.nodes not defined"
+
+        main.case("Start Leadership Election app")
+        main.step( "Install leadership election app" )
+        onosCli = main.CLIs[ main.activeNodes[0] ]
+        appResult = onosCli.activateApp( "org.onosproject.election" )
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=appResult,
+            onpass="Election app installed",
+            onfail="Something went wrong with installing Leadership election" )
+
+        main.step( "Run for election on each node" )
+        leaderResult = main.TRUE
+        leaders = []
+        for i in main.activeNodes:
+            main.CLIs[i].electionTestRun()
+        for i in main.activeNodes:
+            cli = main.CLIs[i]
+            leader = cli.electionTestLeader()
+            if leader is None or leader == main.FALSE:
+                main.log.error( cli.name + ": Leader for the election app " +
+                                 "should be an ONOS node, instead got '" +
+                                 str( leader ) + "'" )
+                leaderResult = main.FALSE
+            leaders.append( leader )
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=leaderResult,
+            onpass="Successfully ran for leadership",
+            onfail="Failed to run for leadership" )
+
+        main.step( "Check that each node shows the same leader" )
+        sameLeader = main.TRUE
+        if len( set( leaders ) ) != 1:
+            sameLeader = main.FALSE
+            main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
+                            str( leaders ) )
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=sameLeader,
+            onpass="Leadership is consistent for the election topic",
+            onfail="Nodes have different leaders" )
+
+    def CASE15( self, main ):
+        """
+        Check that Leadership Election is still functional
+            15.1 Run election on each node
+            15.2 Check that each node has the same leaders and candidates
+            15.3 Find current leader and withdraw
+            15.4 Check that a new node was elected leader
+            15.5 Check that that new leader was the candidate of old leader
+            15.6 Run for election on old leader
+            15.7 Check that oldLeader is a candidate, and leader if only 1 node
+            15.8 Make sure that the old leader was added to the candidate list
+
+            old and new variable prefixes refer to data from before vs after
+                withdrawl and later before withdrawl vs after re-election
+        """
+        import time
+        assert main.numCtrls, "main.numCtrls not defined"
+        assert main, "main not defined"
+        assert utilities.assert_equals, "utilities.assert_equals not defined"
+        assert main.CLIs, "main.CLIs not defined"
+        assert main.nodes, "main.nodes not defined"
+
+        description = "Check that Leadership Election is still functional"
+        main.case( description )
+        # NOTE: Need to re-run since being a canidate is not persistant
+        # TODO: add check for "Command not found:" in the driver, this
+        #       means the election test app isn't loaded
+
+        oldLeaders = []  # leaders by node before withdrawl from candidates
+        newLeaders = []  # leaders by node after withdrawl from candidates
+        oldAllCandidates = []  # list of lists of each nodes' candidates before
+        newAllCandidates = []  # list of lists of each nodes' candidates after
+        oldCandidates = []  # list of candidates from node 0 before withdrawl
+        newCandidates = []  # list of candidates from node 0 after withdrawl
+        oldLeader = ''  # the old leader from oldLeaders, None if not same
+        newLeader = ''  # the new leaders fron newLoeaders, None if not same
+        oldLeaderCLI = None  # the CLI of the old leader used for re-electing
+        expectNoLeader = False  # True when there is only one leader
+        if main.numCtrls == 1:
+            expectNoLeader = True
+
+        main.step( "Run for election on each node" )
+        electionResult = main.TRUE
+
+        for i in main.activeNodes:  # run test election on each node
+            if main.CLIs[i].electionTestRun() == main.FALSE:
+                electionResult = main.FALSE
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=electionResult,
+            onpass="All nodes successfully ran for leadership",
+            onfail="At least one node failed to run for leadership" )
+
+        if electionResult == main.FALSE:
+            main.log.error(
+                "Skipping Test Case because Election Test App isn't loaded" )
+            main.skipCase()
+
+        main.step( "Check that each node shows the same leader and candidates" )
+        sameResult = main.TRUE
+        failMessage = "Nodes have different leaders"
+        for i in main.activeNodes:
+            cli = main.CLIs[i]
+            node = cli.specificLeaderCandidate( 'org.onosproject.election' )
+            oldAllCandidates.append( node )
+            if node:
+                oldLeaders.append( node[ 0 ] )
+            else:
+                oldLeaders.append( None )
+        oldCandidates = oldAllCandidates[ 0 ]
+        if oldCandidates is None:
+            oldCandidates = [ None ]
+
+        # Check that each node has the same leader. Defines oldLeader
+        if len( set( oldLeaders ) ) != 1:
+            sameResult = main.FALSE
+            main.log.error( "More than one leader present:" + str( oldLeaders ) )
+            # FIXME: for split brain, we will likely have 2. WHat should we do here?
+            oldLeader = None
+        else:
+            oldLeader = oldLeaders[ 0 ]
+
+        # Check that each node's candidate list is the same
+        candidateDiscrepancy = False  # Boolean of candidate mismatches
+        for candidates in oldAllCandidates:
+            if candidates is None:
+                main.log.warn( "Error getting candidates" )
+                candidates = [ None ]
+            if set( candidates ) != set( oldCandidates ):
+                sameResult = main.FALSE
+                candidateDiscrepancy = True
+        if candidateDiscrepancy:
+            failMessage += " and candidates"
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=sameResult,
+            onpass="Leadership is consistent for the election topic",
+            onfail=failMessage )
+
+        main.step( "Find current leader and withdraw" )
+        withdrawResult = main.TRUE
+        # do some sanity checking on leader before using it
+        if oldLeader is None:
+            main.log.error( "Leadership isn't consistent." )
+            withdrawResult = main.FALSE
+        # Get the CLI of the oldLeader
+        for i in main.activeNodes:
+            if oldLeader == main.nodes[ i ].ip_address:
+                oldLeaderCLI = main.CLIs[ i ]
+                break
+        else:  # FOR/ELSE statement
+            main.log.error( "Leader election, could not find current leader" )
+        if oldLeader:
+            withdrawResult = oldLeaderCLI.electionTestWithdraw()
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=withdrawResult,
+            onpass="Node was withdrawn from election",
+            onfail="Node was not withdrawn from election" )
+
+        main.step( "Check that a new node was elected leader" )
+        # FIXME: use threads
+        newLeaderResult = main.TRUE
+        failMessage = "Nodes have different leaders"
+
+        # Get new leaders and candidates
+        for i in main.activeNodes:
+            cli = main.CLIs[i]
+            node = cli.specificLeaderCandidate( 'org.onosproject.election' )
+            # elections might no have finished yet
+            if node[ 0 ] == 'none' and not expectNoLeader:
+                main.log.info( "Node has no leader, waiting 5 seconds to be " +
+                    "sure elections are complete." )
+                time.sleep(5)
+                node = cli.specificLeaderCandidate( 'org.onosproject.election' )
+                # election still isn't done or there is a problem
+                if node[ 0 ] == 'none':
+                    main.log.error( "No leader was elected on at least 1 node" )
+                    newLeaderResult = main.FALSE
+            newAllCandidates.append( node )
+            newLeaders.append( node[ 0 ] )
+        newCandidates = newAllCandidates[ 0 ]
+
+        # Check that each node has the same leader. Defines newLeader
+        if len( set( newLeaders ) ) != 1:
+            newLeaderResult = main.FALSE
+            main.log.error( "Nodes have different leaders: " +
+                            str( newLeaders ) )
+            newLeader = None
+        else:
+            newLeader = newLeaders[ 0 ]
+
+        # Check that each node's candidate list is the same
+        for candidates in newAllCandidates:
+            if set( candidates ) != set( newCandidates ):
+                newLeaderResult = main.FALSE
+                main.log.error( "Discrepancy in candidate lists detected" )
+
+        # Check that the new leader is not the older leader, which was withdrawn
+        if newLeader == oldLeader:
+            newLeaderResult = main.FALSE
+            main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
+                " as the current leader" )
+
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=newLeaderResult,
+            onpass="Leadership election passed",
+            onfail="Something went wrong with Leadership election" )
+
+        main.step( "Check that that new leader was the candidate of old leader")
+        # candidates[ 2 ] should become the top candidate after withdrawl
+        correctCandidateResult = main.TRUE
+        if expectNoLeader:
+            if newLeader == 'none':
+                main.log.info( "No leader expected. None found. Pass" )
+                correctCandidateResult = main.TRUE
+            else:
+                main.log.info( "Expected no leader, got: " + str( newLeader ) )
+                correctCandidateResult = main.FALSE
+        elif len( oldCandidates ) >= 3 and newLeader != oldCandidates[ 2 ]:
+            correctCandidateResult = main.FALSE
+            main.log.error( "Candidate {} was elected. {} should have had priority.".format(
+                                newLeader, oldCandidates[ 2 ] ) )
+        else:
+            main.log.warn( "Could not determine who should be the correct leader" )
+            correctCandidateResult = main.FALSE
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=correctCandidateResult,
+            onpass="Correct Candidate Elected",
+            onfail="Incorrect Candidate Elected" )
+
+        main.step( "Run for election on old leader( just so everyone " +
+                   "is in the hat )" )
+        if oldLeaderCLI is not None:
+            runResult = oldLeaderCLI.electionTestRun()
+        else:
+            main.log.error( "No old leader to re-elect" )
+            runResult = main.FALSE
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=runResult,
+            onpass="App re-ran for election",
+            onfail="App failed to run for election" )
+        main.step(
+            "Check that oldLeader is a candidate, and leader if only 1 node" )
+        # verify leader didn't just change
+        positionResult = main.TRUE
+        # Get new leaders and candidates, wait if oldLeader is not a candidate yet
+
+        # Reset and reuse the new candidate and leaders lists
+        newAllCandidates = []
+        newCandidates = []
+        newLeaders = []
+        for i in main.activeNodes:
+            cli = main.CLIs[i]
+            node = cli.specificLeaderCandidate( 'org.onosproject.election' )
+            if oldLeader not in node:  # election might no have finished yet
+                main.log.info( "Old Leader not elected, waiting 5 seconds to " +
+                    "be sure elections are complete" )
+                time.sleep(5)
+                node = cli.specificLeaderCandidate( 'org.onosproject.election' )
+            if oldLeader not in node:  # election still isn't done, errors
+                main.log.error(
+                    "Old leader was not elected on at least one node" )
+                positionResult = main.FALSE
+            newAllCandidates.append( node )
+            newLeaders.append( node[ 0 ] )
+        newCandidates = newAllCandidates[ 0 ]
+
+        # Check that each node has the same leader. Defines newLeader
+        if len( set( newLeaders ) ) != 1:
+            positionResult = main.FALSE
+            main.log.error( "Nodes have different leaders: " +
+                            str( newLeaders ) )
+            newLeader = None
+        else:
+            newLeader = newLeaders[ 0 ]
+
+        # Check that each node's candidate list is the same
+        for candidates in newAllCandidates:
+            if set( candidates ) != set( newCandidates ):
+                newLeaderResult = main.FALSE
+                main.log.error( "Discrepancy in candidate lists detected" )
+
+        # Check that the re-elected node is last on the candidate List
+        if oldLeader != newCandidates[ -1 ]:
+            main.log.error( "Old Leader (" + str( oldLeader ) + ") not in the proper position " +
+                str( newCandidates ) )
+            positionResult = main.FALSE
+
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=positionResult,
+            onpass="Old leader successfully re-ran for election",
+            onfail="Something went wrong with Leadership election after " +
+                   "the old leader re-ran for election" )
+
+    def CASE16( self, main ):
+        """
+        Install Distributed Primitives app
+        """
+        import time
+        assert main.numCtrls, "main.numCtrls not defined"
+        assert main, "main not defined"
+        assert utilities.assert_equals, "utilities.assert_equals not defined"
+        assert main.CLIs, "main.CLIs not defined"
+        assert main.nodes, "main.nodes not defined"
+
+        # Variables for the distributed primitives tests
+        global pCounterName
+        global iCounterName
+        global pCounterValue
+        global iCounterValue
+        global onosSet
+        global onosSetName
+        pCounterName = "TestON-Partitions"
+        iCounterName = "TestON-inMemory"
+        pCounterValue = 0
+        iCounterValue = 0
+        onosSet = set([])
+        onosSetName = "TestON-set"
+
+        description = "Install Primitives app"
+        main.case( description )
+        main.step( "Install Primitives app" )
+        appName = "org.onosproject.distributedprimitives"
+        node = main.activeNodes[0]
+        appResults = main.CLIs[node].activateApp( appName )
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=appResults,
+                                 onpass="Primitives app activated",
+                                 onfail="Primitives app not activated" )
+        time.sleep( 5 )  # To allow all nodes to activate
+
+    def CASE17( self, main ):
+        """
+        Check for basic functionality with distributed primitives
+        """
+        # Make sure variables are defined/set
+        assert main.numCtrls, "main.numCtrls not defined"
+        assert main, "main not defined"
+        assert utilities.assert_equals, "utilities.assert_equals not defined"
+        assert main.CLIs, "main.CLIs not defined"
+        assert main.nodes, "main.nodes not defined"
+        assert pCounterName, "pCounterName not defined"
+        assert iCounterName, "iCounterName not defined"
+        assert onosSetName, "onosSetName not defined"
+        # NOTE: assert fails if value is 0/None/Empty/False
+        try:
+            pCounterValue
+        except NameError:
+            main.log.error( "pCounterValue not defined, setting to 0" )
+            pCounterValue = 0
+        try:
+            iCounterValue
+        except NameError:
+            main.log.error( "iCounterValue not defined, setting to 0" )
+            iCounterValue = 0
+        try:
+            onosSet
+        except NameError:
+            main.log.error( "onosSet not defined, setting to empty Set" )
+            onosSet = set([])
+        # Variables for the distributed primitives tests. These are local only
+        addValue = "a"
+        addAllValue = "a b c d e f"
+        retainValue = "c d e f"
+
+        description = "Check for basic functionality with distributed " +\
+                      "primitives"
+        main.case( description )
+        main.caseExplanation = "Test the methods of the distributed " +\
+                                "primitives (counters and sets) throught the cli"
+        # DISTRIBUTED ATOMIC COUNTERS
+        # Partitioned counters
+        main.step( "Increment then get a default counter on each node" )
+        pCounters = []
+        threads = []
+        addedPValues = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
+                             name="counterAddAndGet-" + str( i ),
+                             args=[ pCounterName ] )
+            pCounterValue += 1
+            addedPValues.append( pCounterValue )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            pCounters.append( t.result )
+        # Check that counter incremented numController times
+        pCounterResults = True
+        for i in addedPValues:
+            tmpResult = i in pCounters
+            pCounterResults = pCounterResults and tmpResult
+            if not tmpResult:
+                main.log.error( str( i ) + " is not in partitioned "
+                                "counter incremented results" )
+        utilities.assert_equals( expect=True,
+                                 actual=pCounterResults,
+                                 onpass="Default counter incremented",
+                                 onfail="Error incrementing default" +
+                                        " counter" )
+
+        main.step( "Get then Increment a default counter on each node" )
+        pCounters = []
+        threads = []
+        addedPValues = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
+                             name="counterGetAndAdd-" + str( i ),
+                             args=[ pCounterName ] )
+            addedPValues.append( pCounterValue )
+            pCounterValue += 1
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            pCounters.append( t.result )
+        # Check that counter incremented numController times
+        pCounterResults = True
+        for i in addedPValues:
+            tmpResult = i in pCounters
+            pCounterResults = pCounterResults and tmpResult
+            if not tmpResult:
+                main.log.error( str( i ) + " is not in partitioned "
+                                "counter incremented results" )
+        utilities.assert_equals( expect=True,
+                                 actual=pCounterResults,
+                                 onpass="Default counter incremented",
+                                 onfail="Error incrementing default" +
+                                        " counter" )
+
+        main.step( "Counters we added have the correct values" )
+        incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=incrementCheck,
+                                 onpass="Added counters are correct",
+                                 onfail="Added counters are incorrect" )
+
+        main.step( "Add -8 to then get a default counter on each node" )
+        pCounters = []
+        threads = []
+        addedPValues = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
+                             name="counterIncrement-" + str( i ),
+                             args=[ pCounterName ],
+                             kwargs={ "delta": -8 } )
+            pCounterValue += -8
+            addedPValues.append( pCounterValue )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            pCounters.append( t.result )
+        # Check that counter incremented numController times
+        pCounterResults = True
+        for i in addedPValues:
+            tmpResult = i in pCounters
+            pCounterResults = pCounterResults and tmpResult
+            if not tmpResult:
+                main.log.error( str( i ) + " is not in partitioned "
+                                "counter incremented results" )
+        utilities.assert_equals( expect=True,
+                                 actual=pCounterResults,
+                                 onpass="Default counter incremented",
+                                 onfail="Error incrementing default" +
+                                        " counter" )
+
+        main.step( "Add 5 to then get a default counter on each node" )
+        pCounters = []
+        threads = []
+        addedPValues = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
+                             name="counterIncrement-" + str( i ),
+                             args=[ pCounterName ],
+                             kwargs={ "delta": 5 } )
+            pCounterValue += 5
+            addedPValues.append( pCounterValue )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            pCounters.append( t.result )
+        # Check that counter incremented numController times
+        pCounterResults = True
+        for i in addedPValues:
+            tmpResult = i in pCounters
+            pCounterResults = pCounterResults and tmpResult
+            if not tmpResult:
+                main.log.error( str( i ) + " is not in partitioned "
+                                "counter incremented results" )
+        utilities.assert_equals( expect=True,
+                                 actual=pCounterResults,
+                                 onpass="Default counter incremented",
+                                 onfail="Error incrementing default" +
+                                        " counter" )
+
+        main.step( "Get then add 5 to a default counter on each node" )
+        pCounters = []
+        threads = []
+        addedPValues = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
+                             name="counterIncrement-" + str( i ),
+                             args=[ pCounterName ],
+                             kwargs={ "delta": 5 } )
+            addedPValues.append( pCounterValue )
+            pCounterValue += 5
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            pCounters.append( t.result )
+        # Check that counter incremented numController times
+        pCounterResults = True
+        for i in addedPValues:
+            tmpResult = i in pCounters
+            pCounterResults = pCounterResults and tmpResult
+            if not tmpResult:
+                main.log.error( str( i ) + " is not in partitioned "
+                                "counter incremented results" )
+        utilities.assert_equals( expect=True,
+                                 actual=pCounterResults,
+                                 onpass="Default counter incremented",
+                                 onfail="Error incrementing default" +
+                                        " counter" )
+
+        main.step( "Counters we added have the correct values" )
+        incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=incrementCheck,
+                                 onpass="Added counters are correct",
+                                 onfail="Added counters are incorrect" )
+
+        # In-Memory counters
+        main.step( "Increment and get an in-memory counter on each node" )
+        iCounters = []
+        addedIValues = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
+                             name="icounterIncrement-" + str( i ),
+                             args=[ iCounterName ],
+                             kwargs={ "inMemory": True } )
+            iCounterValue += 1
+            addedIValues.append( iCounterValue )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            iCounters.append( t.result )
+        # Check that counter incremented numController times
+        iCounterResults = True
+        for i in addedIValues:
+            tmpResult = i in iCounters
+            iCounterResults = iCounterResults and tmpResult
+            if not tmpResult:
+                main.log.error( str( i ) + " is not in the in-memory "
+                                "counter incremented results" )
+        utilities.assert_equals( expect=True,
+                                 actual=iCounterResults,
+                                 onpass="In-memory counter incremented",
+                                 onfail="Error incrementing in-memory" +
+                                        " counter" )
+
+        main.step( "Get then Increment a in-memory counter on each node" )
+        iCounters = []
+        threads = []
+        addedIValues = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
+                             name="counterGetAndAdd-" + str( i ),
+                             args=[ iCounterName ],
+                             kwargs={ "inMemory": True } )
+            addedIValues.append( iCounterValue )
+            iCounterValue += 1
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            iCounters.append( t.result )
+        # Check that counter incremented numController times
+        iCounterResults = True
+        for i in addedIValues:
+            tmpResult = i in iCounters
+            iCounterResults = iCounterResults and tmpResult
+            if not tmpResult:
+                main.log.error( str( i ) + " is not in in-memory "
+                                "counter incremented results" )
+        utilities.assert_equals( expect=True,
+                                 actual=iCounterResults,
+                                 onpass="In-memory counter incremented",
+                                 onfail="Error incrementing in-memory" +
+                                        " counter" )
+
+        main.step( "Counters we added have the correct values" )
+        incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=incrementCheck,
+                                 onpass="Added counters are correct",
+                                 onfail="Added counters are incorrect" )
+
+        main.step( "Add -8 to then get a in-memory counter on each node" )
+        iCounters = []
+        threads = []
+        addedIValues = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
+                             name="counterIncrement-" + str( i ),
+                             args=[ iCounterName ],
+                             kwargs={ "delta": -8, "inMemory": True  } )
+            iCounterValue += -8
+            addedIValues.append( iCounterValue )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            iCounters.append( t.result )
+        # Check that counter incremented numController times
+        iCounterResults = True
+        for i in addedIValues:
+            tmpResult = i in iCounters
+            iCounterResults = iCounterResults and tmpResult
+            if not tmpResult:
+                main.log.error( str( i ) + " is not in in-memory "
+                                "counter incremented results" )
+        utilities.assert_equals( expect=True,
+                                 actual=pCounterResults,
+                                 onpass="In-memory counter incremented",
+                                 onfail="Error incrementing in-memory" +
+                                        " counter" )
+
+        main.step( "Add 5 to then get a in-memory counter on each node" )
+        iCounters = []
+        threads = []
+        addedIValues = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
+                             name="counterIncrement-" + str( i ),
+                             args=[ iCounterName ],
+                             kwargs={ "delta": 5, "inMemory": True  } )
+            iCounterValue += 5
+            addedIValues.append( iCounterValue )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            iCounters.append( t.result )
+        # Check that counter incremented numController times
+        iCounterResults = True
+        for i in addedIValues:
+            tmpResult = i in iCounters
+            iCounterResults = iCounterResults and tmpResult
+            if not tmpResult:
+                main.log.error( str( i ) + " is not in in-memory "
+                                "counter incremented results" )
+        utilities.assert_equals( expect=True,
+                                 actual=pCounterResults,
+                                 onpass="In-memory counter incremented",
+                                 onfail="Error incrementing in-memory" +
+                                        " counter" )
+
+        main.step( "Get then add 5 to a in-memory counter on each node" )
+        iCounters = []
+        threads = []
+        addedIValues = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
+                             name="counterIncrement-" + str( i ),
+                             args=[ iCounterName ],
+                             kwargs={ "delta": 5, "inMemory": True  } )
+            addedIValues.append( iCounterValue )
+            iCounterValue += 5
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            iCounters.append( t.result )
+        # Check that counter incremented numController times
+        iCounterResults = True
+        for i in addedIValues:
+            tmpResult = i in iCounters
+            iCounterResults = iCounterResults and tmpResult
+            if not tmpResult:
+                main.log.error( str( i ) + " is not in in-memory "
+                                "counter incremented results" )
+        utilities.assert_equals( expect=True,
+                                 actual=iCounterResults,
+                                 onpass="In-memory counter incremented",
+                                 onfail="Error incrementing in-memory" +
+                                        " counter" )
+
+        main.step( "Counters we added have the correct values" )
+        incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=incrementCheck,
+                                 onpass="Added counters are correct",
+                                 onfail="Added counters are incorrect" )
+
+        main.step( "Check counters are consistant across nodes" )
+        onosCounters, consistentCounterResults = main.Counters.consistentCheck()
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=consistentCounterResults,
+                                 onpass="ONOS counters are consistent " +
+                                        "across nodes",
+                                 onfail="ONOS Counters are inconsistent " +
+                                        "across nodes" )
+
+        main.step( "Counters we added have the correct values" )
+        incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
+        incrementCheck = incrementCheck and \
+                         main.Counters.counterCheck( iCounterName, iCounterValue )
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=incrementCheck,
+                                 onpass="Added counters are correct",
+                                 onfail="Added counters are incorrect" )
+        # DISTRIBUTED SETS
+        main.step( "Distributed Set get" )
+        size = len( onosSet )
+        getResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestGet,
+                             name="setTestGet-" + str( i ),
+                             args=[ onosSetName ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            getResponses.append( t.result )
+
+        getResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            node = str( main.activeNodes[i] + 1 )
+            if isinstance( getResponses[ i ], list):
+                current = set( getResponses[ i ] )
+                if len( current ) == len( getResponses[ i ] ):
+                    # no repeats
+                    if onosSet != current:
+                        main.log.error( "ONOS" + node +
+                                        " has incorrect view" +
+                                        " of set " + onosSetName + ":\n" +
+                                        str( getResponses[ i ] ) )
+                        main.log.debug( "Expected: " + str( onosSet ) )
+                        main.log.debug( "Actual: " + str( current ) )
+                        getResults = main.FALSE
+                else:
+                    # error, set is not a set
+                    main.log.error( "ONOS" + node +
+                                    " has repeat elements in" +
+                                    " set " + onosSetName + ":\n" +
+                                    str( getResponses[ i ] ) )
+                    getResults = main.FALSE
+            elif getResponses[ i ] == main.ERROR:
+                getResults = main.FALSE
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=getResults,
+                                 onpass="Set elements are correct",
+                                 onfail="Set elements are incorrect" )
+
+        main.step( "Distributed Set size" )
+        sizeResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestSize,
+                             name="setTestSize-" + str( i ),
+                             args=[ onosSetName ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            sizeResponses.append( t.result )
+
+        sizeResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            node = str( main.activeNodes[i] + 1 )
+            if size != sizeResponses[ i ]:
+                sizeResults = main.FALSE
+                main.log.error( "ONOS" + node +
+                                " expected a size of " + str( size ) +
+                                " for set " + onosSetName +
+                                " but got " + str( sizeResponses[ i ] ) )
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=sizeResults,
+                                 onpass="Set sizes are correct",
+                                 onfail="Set sizes are incorrect" )
+
+        main.step( "Distributed Set add()" )
+        onosSet.add( addValue )
+        addResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestAdd,
+                             name="setTestAdd-" + str( i ),
+                             args=[ onosSetName, addValue ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            addResponses.append( t.result )
+
+        # main.TRUE = successfully changed the set
+        # main.FALSE = action resulted in no change in set
+        # main.ERROR - Some error in executing the function
+        addResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            if addResponses[ i ] == main.TRUE:
+                # All is well
+                pass
+            elif addResponses[ i ] == main.FALSE:
+                # Already in set, probably fine
+                pass
+            elif addResponses[ i ] == main.ERROR:
+                # Error in execution
+                addResults = main.FALSE
+            else:
+                # unexpected result
+                addResults = main.FALSE
+        if addResults != main.TRUE:
+            main.log.error( "Error executing set add" )
+
+        # Check if set is still correct
+        size = len( onosSet )
+        getResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestGet,
+                             name="setTestGet-" + str( i ),
+                             args=[ onosSetName ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            getResponses.append( t.result )
+        getResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            node = str( main.activeNodes[i] + 1 )
+            if isinstance( getResponses[ i ], list):
+                current = set( getResponses[ i ] )
+                if len( current ) == len( getResponses[ i ] ):
+                    # no repeats
+                    if onosSet != current:
+                        main.log.error( "ONOS" + node + " has incorrect view" +
+                                        " of set " + onosSetName + ":\n" +
+                                        str( getResponses[ i ] ) )
+                        main.log.debug( "Expected: " + str( onosSet ) )
+                        main.log.debug( "Actual: " + str( current ) )
+                        getResults = main.FALSE
+                else:
+                    # error, set is not a set
+                    main.log.error( "ONOS" + node + " has repeat elements in" +
+                                    " set " + onosSetName + ":\n" +
+                                    str( getResponses[ i ] ) )
+                    getResults = main.FALSE
+            elif getResponses[ i ] == main.ERROR:
+                getResults = main.FALSE
+        sizeResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestSize,
+                             name="setTestSize-" + str( i ),
+                             args=[ onosSetName ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            sizeResponses.append( t.result )
+        sizeResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            node = str( main.activeNodes[i] + 1 )
+            if size != sizeResponses[ i ]:
+                sizeResults = main.FALSE
+                main.log.error( "ONOS" + node +
+                                " expected a size of " + str( size ) +
+                                " for set " + onosSetName +
+                                " but got " + str( sizeResponses[ i ] ) )
+        addResults = addResults and getResults and sizeResults
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=addResults,
+                                 onpass="Set add correct",
+                                 onfail="Set add was incorrect" )
+
+        main.step( "Distributed Set addAll()" )
+        onosSet.update( addAllValue.split() )
+        addResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestAdd,
+                             name="setTestAddAll-" + str( i ),
+                             args=[ onosSetName, addAllValue ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            addResponses.append( t.result )
+
+        # main.TRUE = successfully changed the set
+        # main.FALSE = action resulted in no change in set
+        # main.ERROR - Some error in executing the function
+        addAllResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            if addResponses[ i ] == main.TRUE:
+                # All is well
+                pass
+            elif addResponses[ i ] == main.FALSE:
+                # Already in set, probably fine
+                pass
+            elif addResponses[ i ] == main.ERROR:
+                # Error in execution
+                addAllResults = main.FALSE
+            else:
+                # unexpected result
+                addAllResults = main.FALSE
+        if addAllResults != main.TRUE:
+            main.log.error( "Error executing set addAll" )
+
+        # Check if set is still correct
+        size = len( onosSet )
+        getResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestGet,
+                             name="setTestGet-" + str( i ),
+                             args=[ onosSetName ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            getResponses.append( t.result )
+        getResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            node = str( main.activeNodes[i] + 1 )
+            if isinstance( getResponses[ i ], list):
+                current = set( getResponses[ i ] )
+                if len( current ) == len( getResponses[ i ] ):
+                    # no repeats
+                    if onosSet != current:
+                        main.log.error( "ONOS" + node +
+                                        " has incorrect view" +
+                                        " of set " + onosSetName + ":\n" +
+                                        str( getResponses[ i ] ) )
+                        main.log.debug( "Expected: " + str( onosSet ) )
+                        main.log.debug( "Actual: " + str( current ) )
+                        getResults = main.FALSE
+                else:
+                    # error, set is not a set
+                    main.log.error( "ONOS" + node +
+                                    " has repeat elements in" +
+                                    " set " + onosSetName + ":\n" +
+                                    str( getResponses[ i ] ) )
+                    getResults = main.FALSE
+            elif getResponses[ i ] == main.ERROR:
+                getResults = main.FALSE
+        sizeResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestSize,
+                             name="setTestSize-" + str( i ),
+                             args=[ onosSetName ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            sizeResponses.append( t.result )
+        sizeResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            node = str( main.activeNodes[i] + 1 )
+            if size != sizeResponses[ i ]:
+                sizeResults = main.FALSE
+                main.log.error( "ONOS" + node +
+                                " expected a size of " + str( size ) +
+                                " for set " + onosSetName +
+                                " but got " + str( sizeResponses[ i ] ) )
+        addAllResults = addAllResults and getResults and sizeResults
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=addAllResults,
+                                 onpass="Set addAll correct",
+                                 onfail="Set addAll was incorrect" )
+
+        main.step( "Distributed Set contains()" )
+        containsResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestGet,
+                             name="setContains-" + str( i ),
+                             args=[ onosSetName ],
+                             kwargs={ "values": addValue } )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            # NOTE: This is the tuple
+            containsResponses.append( t.result )
+
+        containsResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            if containsResponses[ i ] == main.ERROR:
+                containsResults = main.FALSE
+            else:
+                containsResults = containsResults and\
+                                  containsResponses[ i ][ 1 ]
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=containsResults,
+                                 onpass="Set contains is functional",
+                                 onfail="Set contains failed" )
+
+        main.step( "Distributed Set containsAll()" )
+        containsAllResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestGet,
+                             name="setContainsAll-" + str( i ),
+                             args=[ onosSetName ],
+                             kwargs={ "values": addAllValue } )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            # NOTE: This is the tuple
+            containsAllResponses.append( t.result )
+
+        containsAllResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            if containsResponses[ i ] == main.ERROR:
+                containsResults = main.FALSE
+            else:
+                containsResults = containsResults and\
+                                  containsResponses[ i ][ 1 ]
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=containsAllResults,
+                                 onpass="Set containsAll is functional",
+                                 onfail="Set containsAll failed" )
+
+        main.step( "Distributed Set remove()" )
+        onosSet.remove( addValue )
+        removeResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestRemove,
+                             name="setTestRemove-" + str( i ),
+                             args=[ onosSetName, addValue ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            removeResponses.append( t.result )
+
+        # main.TRUE = successfully changed the set
+        # main.FALSE = action resulted in no change in set
+        # main.ERROR - Some error in executing the function
+        removeResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            if removeResponses[ i ] == main.TRUE:
+                # All is well
+                pass
+            elif removeResponses[ i ] == main.FALSE:
+                # not in set, probably fine
+                pass
+            elif removeResponses[ i ] == main.ERROR:
+                # Error in execution
+                removeResults = main.FALSE
+            else:
+                # unexpected result
+                removeResults = main.FALSE
+        if removeResults != main.TRUE:
+            main.log.error( "Error executing set remove" )
+
+        # Check if set is still correct
+        size = len( onosSet )
+        getResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestGet,
+                             name="setTestGet-" + str( i ),
+                             args=[ onosSetName ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            getResponses.append( t.result )
+        getResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            node = str( main.activeNodes[i] + 1 )
+            if isinstance( getResponses[ i ], list):
+                current = set( getResponses[ i ] )
+                if len( current ) == len( getResponses[ i ] ):
+                    # no repeats
+                    if onosSet != current:
+                        main.log.error( "ONOS" + node +
+                                        " has incorrect view" +
+                                        " of set " + onosSetName + ":\n" +
+                                        str( getResponses[ i ] ) )
+                        main.log.debug( "Expected: " + str( onosSet ) )
+                        main.log.debug( "Actual: " + str( current ) )
+                        getResults = main.FALSE
+                else:
+                    # error, set is not a set
+                    main.log.error( "ONOS" + node +
+                                    " has repeat elements in" +
+                                    " set " + onosSetName + ":\n" +
+                                    str( getResponses[ i ] ) )
+                    getResults = main.FALSE
+            elif getResponses[ i ] == main.ERROR:
+                getResults = main.FALSE
+        sizeResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestSize,
+                             name="setTestSize-" + str( i ),
+                             args=[ onosSetName ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            sizeResponses.append( t.result )
+        sizeResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            node = str( main.activeNodes[i] + 1 )
+            if size != sizeResponses[ i ]:
+                sizeResults = main.FALSE
+                main.log.error( "ONOS" + node +
+                                " expected a size of " + str( size ) +
+                                " for set " + onosSetName +
+                                " but got " + str( sizeResponses[ i ] ) )
+        removeResults = removeResults and getResults and sizeResults
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=removeResults,
+                                 onpass="Set remove correct",
+                                 onfail="Set remove was incorrect" )
+
+        main.step( "Distributed Set removeAll()" )
+        onosSet.difference_update( addAllValue.split() )
+        removeAllResponses = []
+        threads = []
+        try:
+            for i in main.activeNodes:
+                t = main.Thread( target=main.CLIs[i].setTestRemove,
+                                 name="setTestRemoveAll-" + str( i ),
+                                 args=[ onosSetName, addAllValue ] )
+                threads.append( t )
+                t.start()
+            for t in threads:
+                t.join()
+                removeAllResponses.append( t.result )
+        except Exception, e:
+            main.log.exception(e)
+
+        # main.TRUE = successfully changed the set
+        # main.FALSE = action resulted in no change in set
+        # main.ERROR - Some error in executing the function
+        removeAllResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            if removeAllResponses[ i ] == main.TRUE:
+                # All is well
+                pass
+            elif removeAllResponses[ i ] == main.FALSE:
+                # not in set, probably fine
+                pass
+            elif removeAllResponses[ i ] == main.ERROR:
+                # Error in execution
+                removeAllResults = main.FALSE
+            else:
+                # unexpected result
+                removeAllResults = main.FALSE
+        if removeAllResults != main.TRUE:
+            main.log.error( "Error executing set removeAll" )
+
+        # Check if set is still correct
+        size = len( onosSet )
+        getResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestGet,
+                             name="setTestGet-" + str( i ),
+                             args=[ onosSetName ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            getResponses.append( t.result )
+        getResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            node = str( main.activeNodes[i] + 1 )
+            if isinstance( getResponses[ i ], list):
+                current = set( getResponses[ i ] )
+                if len( current ) == len( getResponses[ i ] ):
+                    # no repeats
+                    if onosSet != current:
+                        main.log.error( "ONOS" + node +
+                                        " has incorrect view" +
+                                        " of set " + onosSetName + ":\n" +
+                                        str( getResponses[ i ] ) )
+                        main.log.debug( "Expected: " + str( onosSet ) )
+                        main.log.debug( "Actual: " + str( current ) )
+                        getResults = main.FALSE
+                else:
+                    # error, set is not a set
+                    main.log.error( "ONOS" + node +
+                                    " has repeat elements in" +
+                                    " set " + onosSetName + ":\n" +
+                                    str( getResponses[ i ] ) )
+                    getResults = main.FALSE
+            elif getResponses[ i ] == main.ERROR:
+                getResults = main.FALSE
+        sizeResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestSize,
+                             name="setTestSize-" + str( i ),
+                             args=[ onosSetName ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            sizeResponses.append( t.result )
+        sizeResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            node = str( main.activeNodes[i] + 1 )
+            if size != sizeResponses[ i ]:
+                sizeResults = main.FALSE
+                main.log.error( "ONOS" + node +
+                                " expected a size of " + str( size ) +
+                                " for set " + onosSetName +
+                                " but got " + str( sizeResponses[ i ] ) )
+        removeAllResults = removeAllResults and getResults and sizeResults
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=removeAllResults,
+                                 onpass="Set removeAll correct",
+                                 onfail="Set removeAll was incorrect" )
+
+        main.step( "Distributed Set addAll()" )
+        onosSet.update( addAllValue.split() )
+        addResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestAdd,
+                             name="setTestAddAll-" + str( i ),
+                             args=[ onosSetName, addAllValue ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            addResponses.append( t.result )
+
+        # main.TRUE = successfully changed the set
+        # main.FALSE = action resulted in no change in set
+        # main.ERROR - Some error in executing the function
+        addAllResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            if addResponses[ i ] == main.TRUE:
+                # All is well
+                pass
+            elif addResponses[ i ] == main.FALSE:
+                # Already in set, probably fine
+                pass
+            elif addResponses[ i ] == main.ERROR:
+                # Error in execution
+                addAllResults = main.FALSE
+            else:
+                # unexpected result
+                addAllResults = main.FALSE
+        if addAllResults != main.TRUE:
+            main.log.error( "Error executing set addAll" )
+
+        # Check if set is still correct
+        size = len( onosSet )
+        getResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestGet,
+                             name="setTestGet-" + str( i ),
+                             args=[ onosSetName ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            getResponses.append( t.result )
+        getResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            node = str( main.activeNodes[i] + 1 )
+            if isinstance( getResponses[ i ], list):
+                current = set( getResponses[ i ] )
+                if len( current ) == len( getResponses[ i ] ):
+                    # no repeats
+                    if onosSet != current:
+                        main.log.error( "ONOS" + node +
+                                        " has incorrect view" +
+                                        " of set " + onosSetName + ":\n" +
+                                        str( getResponses[ i ] ) )
+                        main.log.debug( "Expected: " + str( onosSet ) )
+                        main.log.debug( "Actual: " + str( current ) )
+                        getResults = main.FALSE
+                else:
+                    # error, set is not a set
+                    main.log.error( "ONOS" + node +
+                                    " has repeat elements in" +
+                                    " set " + onosSetName + ":\n" +
+                                    str( getResponses[ i ] ) )
+                    getResults = main.FALSE
+            elif getResponses[ i ] == main.ERROR:
+                getResults = main.FALSE
+        sizeResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestSize,
+                             name="setTestSize-" + str( i ),
+                             args=[ onosSetName ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            sizeResponses.append( t.result )
+        sizeResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            node = str( main.activeNodes[i] + 1 )
+            if size != sizeResponses[ i ]:
+                sizeResults = main.FALSE
+                main.log.error( "ONOS" + node +
+                                " expected a size of " + str( size ) +
+                                " for set " + onosSetName +
+                                " but got " + str( sizeResponses[ i ] ) )
+        addAllResults = addAllResults and getResults and sizeResults
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=addAllResults,
+                                 onpass="Set addAll correct",
+                                 onfail="Set addAll was incorrect" )
+
+        main.step( "Distributed Set clear()" )
+        onosSet.clear()
+        clearResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestRemove,
+                             name="setTestClear-" + str( i ),
+                             args=[ onosSetName, " "],  # Values doesn't matter
+                             kwargs={ "clear": True } )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            clearResponses.append( t.result )
+
+        # main.TRUE = successfully changed the set
+        # main.FALSE = action resulted in no change in set
+        # main.ERROR - Some error in executing the function
+        clearResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            if clearResponses[ i ] == main.TRUE:
+                # All is well
+                pass
+            elif clearResponses[ i ] == main.FALSE:
+                # Nothing set, probably fine
+                pass
+            elif clearResponses[ i ] == main.ERROR:
+                # Error in execution
+                clearResults = main.FALSE
+            else:
+                # unexpected result
+                clearResults = main.FALSE
+        if clearResults != main.TRUE:
+            main.log.error( "Error executing set clear" )
+
+        # Check if set is still correct
+        size = len( onosSet )
+        getResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestGet,
+                             name="setTestGet-" + str( i ),
+                             args=[ onosSetName ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            getResponses.append( t.result )
+        getResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            node = str( main.activeNodes[i] + 1 )
+            if isinstance( getResponses[ i ], list):
+                current = set( getResponses[ i ] )
+                if len( current ) == len( getResponses[ i ] ):
+                    # no repeats
+                    if onosSet != current:
+                        main.log.error( "ONOS" + node +
+                                        " has incorrect view" +
+                                        " of set " + onosSetName + ":\n" +
+                                        str( getResponses[ i ] ) )
+                        main.log.debug( "Expected: " + str( onosSet ) )
+                        main.log.debug( "Actual: " + str( current ) )
+                        getResults = main.FALSE
+                else:
+                    # error, set is not a set
+                    main.log.error( "ONOS" + node +
+                                    " has repeat elements in" +
+                                    " set " + onosSetName + ":\n" +
+                                    str( getResponses[ i ] ) )
+                    getResults = main.FALSE
+            elif getResponses[ i ] == main.ERROR:
+                getResults = main.FALSE
+        sizeResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestSize,
+                             name="setTestSize-" + str( i ),
+                             args=[ onosSetName ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            sizeResponses.append( t.result )
+        sizeResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            node = str( main.activeNodes[i] + 1 )
+            if size != sizeResponses[ i ]:
+                sizeResults = main.FALSE
+                main.log.error( "ONOS" + node +
+                                " expected a size of " + str( size ) +
+                                " for set " + onosSetName +
+                                " but got " + str( sizeResponses[ i ] ) )
+        clearResults = clearResults and getResults and sizeResults
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=clearResults,
+                                 onpass="Set clear correct",
+                                 onfail="Set clear was incorrect" )
+
+        main.step( "Distributed Set addAll()" )
+        onosSet.update( addAllValue.split() )
+        addResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestAdd,
+                             name="setTestAddAll-" + str( i ),
+                             args=[ onosSetName, addAllValue ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            addResponses.append( t.result )
+
+        # main.TRUE = successfully changed the set
+        # main.FALSE = action resulted in no change in set
+        # main.ERROR - Some error in executing the function
+        addAllResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            if addResponses[ i ] == main.TRUE:
+                # All is well
+                pass
+            elif addResponses[ i ] == main.FALSE:
+                # Already in set, probably fine
+                pass
+            elif addResponses[ i ] == main.ERROR:
+                # Error in execution
+                addAllResults = main.FALSE
+            else:
+                # unexpected result
+                addAllResults = main.FALSE
+        if addAllResults != main.TRUE:
+            main.log.error( "Error executing set addAll" )
+
+        # Check if set is still correct
+        size = len( onosSet )
+        getResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestGet,
+                             name="setTestGet-" + str( i ),
+                             args=[ onosSetName ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            getResponses.append( t.result )
+        getResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            node = str( main.activeNodes[i] + 1 )
+            if isinstance( getResponses[ i ], list):
+                current = set( getResponses[ i ] )
+                if len( current ) == len( getResponses[ i ] ):
+                    # no repeats
+                    if onosSet != current:
+                        main.log.error( "ONOS" + node +
+                                        " has incorrect view" +
+                                        " of set " + onosSetName + ":\n" +
+                                        str( getResponses[ i ] ) )
+                        main.log.debug( "Expected: " + str( onosSet ) )
+                        main.log.debug( "Actual: " + str( current ) )
+                        getResults = main.FALSE
+                else:
+                    # error, set is not a set
+                    main.log.error( "ONOS" + node +
+                                    " has repeat elements in" +
+                                    " set " + onosSetName + ":\n" +
+                                    str( getResponses[ i ] ) )
+                    getResults = main.FALSE
+            elif getResponses[ i ] == main.ERROR:
+                getResults = main.FALSE
+        sizeResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestSize,
+                             name="setTestSize-" + str( i ),
+                             args=[ onosSetName ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            sizeResponses.append( t.result )
+        sizeResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            node = str( main.activeNodes[i] + 1 )
+            if size != sizeResponses[ i ]:
+                sizeResults = main.FALSE
+                main.log.error( "ONOS" + node +
+                                " expected a size of " + str( size ) +
+                                " for set " + onosSetName +
+                                " but got " + str( sizeResponses[ i ] ) )
+        addAllResults = addAllResults and getResults and sizeResults
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=addAllResults,
+                                 onpass="Set addAll correct",
+                                 onfail="Set addAll was incorrect" )
+
+        main.step( "Distributed Set retain()" )
+        onosSet.intersection_update( retainValue.split() )
+        retainResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestRemove,
+                             name="setTestRetain-" + str( i ),
+                             args=[ onosSetName, retainValue ],
+                             kwargs={ "retain": True } )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            retainResponses.append( t.result )
+
+        # main.TRUE = successfully changed the set
+        # main.FALSE = action resulted in no change in set
+        # main.ERROR - Some error in executing the function
+        retainResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            if retainResponses[ i ] == main.TRUE:
+                # All is well
+                pass
+            elif retainResponses[ i ] == main.FALSE:
+                # Already in set, probably fine
+                pass
+            elif retainResponses[ i ] == main.ERROR:
+                # Error in execution
+                retainResults = main.FALSE
+            else:
+                # unexpected result
+                retainResults = main.FALSE
+        if retainResults != main.TRUE:
+            main.log.error( "Error executing set retain" )
+
+        # Check if set is still correct
+        size = len( onosSet )
+        getResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestGet,
+                             name="setTestGet-" + str( i ),
+                             args=[ onosSetName ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            getResponses.append( t.result )
+        getResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            node = str( main.activeNodes[i] + 1 )
+            if isinstance( getResponses[ i ], list):
+                current = set( getResponses[ i ] )
+                if len( current ) == len( getResponses[ i ] ):
+                    # no repeats
+                    if onosSet != current:
+                        main.log.error( "ONOS" + node +
+                                        " has incorrect view" +
+                                        " of set " + onosSetName + ":\n" +
+                                        str( getResponses[ i ] ) )
+                        main.log.debug( "Expected: " + str( onosSet ) )
+                        main.log.debug( "Actual: " + str( current ) )
+                        getResults = main.FALSE
+                else:
+                    # error, set is not a set
+                    main.log.error( "ONOS" + node +
+                                    " has repeat elements in" +
+                                    " set " + onosSetName + ":\n" +
+                                    str( getResponses[ i ] ) )
+                    getResults = main.FALSE
+            elif getResponses[ i ] == main.ERROR:
+                getResults = main.FALSE
+        sizeResponses = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].setTestSize,
+                             name="setTestSize-" + str( i ),
+                             args=[ onosSetName ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            sizeResponses.append( t.result )
+        sizeResults = main.TRUE
+        for i in range( len( main.activeNodes ) ):
+            node = str( main.activeNodes[i] + 1 )
+            if size != sizeResponses[ i ]:
+                sizeResults = main.FALSE
+                main.log.error( "ONOS" + node + " expected a size of " +
+                                str( size ) + " for set " + onosSetName +
+                                " but got " + str( sizeResponses[ i ] ) )
+        retainResults = retainResults and getResults and sizeResults
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=retainResults,
+                                 onpass="Set retain correct",
+                                 onfail="Set retain was incorrect" )
+
+        # Transactional maps
+        main.step( "Partitioned Transactional maps put" )
+        tMapValue = "Testing"
+        numKeys = 100
+        putResult = True
+        node = main.activeNodes[0]
+        putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
+        if putResponses and len( putResponses ) == 100:
+            for i in putResponses:
+                if putResponses[ i ][ 'value' ] != tMapValue:
+                    putResult = False
+        else:
+            putResult = False
+        if not putResult:
+            main.log.debug( "Put response values: " + str( putResponses ) )
+        utilities.assert_equals( expect=True,
+                                 actual=putResult,
+                                 onpass="Partitioned Transactional Map put successful",
+                                 onfail="Partitioned Transactional Map put values are incorrect" )
+
+        main.step( "Partitioned Transactional maps get" )
+        getCheck = True
+        for n in range( 1, numKeys + 1 ):
+            getResponses = []
+            threads = []
+            valueCheck = True
+            for i in main.activeNodes:
+                t = main.Thread( target=main.CLIs[i].transactionalMapGet,
+                                 name="TMap-get-" + str( i ),
+                                 args=[ "Key" + str( n ) ] )
+                threads.append( t )
+                t.start()
+            for t in threads:
+                t.join()
+                getResponses.append( t.result )
+            for node in getResponses:
+                if node != tMapValue:
+                    valueCheck = False
+            if not valueCheck:
+                main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
+                main.log.warn( getResponses )
+            getCheck = getCheck and valueCheck
+        utilities.assert_equals( expect=True,
+                                 actual=getCheck,
+                                 onpass="Partitioned Transactional Map get values were correct",
+                                 onfail="Partitioned Transactional Map values incorrect" )
+
+        main.step( "In-memory Transactional maps put" )
+        tMapValue = "Testing"
+        numKeys = 100
+        putResult = True
+        node = main.activeNodes[0]
+        putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
+        if len( putResponses ) == 100:
+            for i in putResponses:
+                if putResponses[ i ][ 'value' ] != tMapValue:
+                    putResult = False
+        else:
+            putResult = False
+        if not putResult:
+            main.log.debug( "Put response values: " + str( putResponses ) )
+        utilities.assert_equals( expect=True,
+                                 actual=putResult,
+                                 onpass="In-Memory Transactional Map put successful",
+                                 onfail="In-Memory Transactional Map put values are incorrect" )
+
+        main.step( "In-Memory Transactional maps get" )
+        getCheck = True
+        for n in range( 1, numKeys + 1 ):
+            getResponses = []
+            threads = []
+            valueCheck = True
+            for i in main.activeNodes:
+                t = main.Thread( target=main.CLIs[i].transactionalMapGet,
+                                 name="TMap-get-" + str( i ),
+                                 args=[ "Key" + str( n ) ],
+                                 kwargs={ "inMemory": True } )
+                threads.append( t )
+                t.start()
+            for t in threads:
+                t.join()
+                getResponses.append( t.result )
+            for node in getResponses:
+                if node != tMapValue:
+                    valueCheck = False
+            if not valueCheck:
+                main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
+                main.log.warn( getResponses )
+            getCheck = getCheck and valueCheck
+        utilities.assert_equals( expect=True,
+                                 actual=getCheck,
+                                 onpass="In-Memory Transactional Map get values were correct",
+                                 onfail="In-Memory Transactional Map values incorrect" )
diff --git a/TestON/tests/HAfullNetPartition/HAfullNetPartition.topo b/TestON/tests/HAfullNetPartition/HAfullNetPartition.topo
new file mode 100644
index 0000000..5ba79fc
--- /dev/null
+++ b/TestON/tests/HAfullNetPartition/HAfullNetPartition.topo
@@ -0,0 +1,171 @@
+<TOPOLOGY>
+    <COMPONENT>
+
+        <ONOSbench>
+            <host>localhost</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosDriver</type>
+            <connect_order>1</connect_order>
+            <COMPONENTS>
+            </COMPONENTS>
+        </ONOSbench>
+
+        <ONOScli1>
+            <host>localhost</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosCliDriver</type>
+            <connect_order>2</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOScli1>
+
+        <ONOScli2>
+            <host>localhost</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosCliDriver</type>
+            <connect_order>3</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOScli2>
+
+        <ONOScli3>
+            <host>localhost</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosCliDriver</type>
+            <connect_order>4</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOScli3>
+
+
+        <ONOScli4>
+            <host>localhost</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosCliDriver</type>
+            <connect_order>5</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOScli4>
+
+
+        <ONOScli5>
+            <host>localhost</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosCliDriver</type>
+            <connect_order>6</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOScli5>
+
+
+        <ONOScli6>
+            <host>localhost</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosCliDriver</type>
+            <connect_order>7</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOScli6>
+
+
+        <ONOScli7>
+            <host>localhost</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosCliDriver</type>
+            <connect_order>8</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOScli7>
+
+        <ONOS1>
+            <host>OC1</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosDriver</type>
+            <connect_order>9</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS1>
+
+        <ONOS2>
+            <host>OC2</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosDriver</type>
+            <connect_order>10</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS2>
+
+        <ONOS3>
+            <host>OC3</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosDriver</type>
+            <connect_order>11</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS3>
+
+        <ONOS4>
+            <host>OC4</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosDriver</type>
+            <connect_order>12</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS4>
+
+        <ONOS5>
+            <host>OC5</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosDriver</type>
+            <connect_order>13</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS5>
+
+        <ONOS6>
+            <host>OC6</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosDriver</type>
+            <connect_order>14</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS6>
+
+        <ONOS7>
+            <host>OC7</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosDriver</type>
+            <connect_order>15</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS7>
+
+        <Mininet1>
+            <host>OCN</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>MininetCliDriver</type>
+            <connect_order>16</connect_order>
+            <COMPONENTS>
+                #Specify the Option for mininet
+                <arg1> --custom ~/mininet/custom/obelisk.py </arg1>
+                <arg2> --topo obelisk </arg2>
+                <arg3> --switch ovs,protocols=OpenFlow13 </arg3>
+                <controller> none </controller>
+                <home>~/mininet/custom/</home>
+            </COMPONENTS>
+        </Mininet1>
+
+        <Mininet2>
+            <host>OCN</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>RemoteMininetDriver</type>
+            <connect_order>17</connect_order>
+            <COMPONENTS>
+            </COMPONENTS>
+        </Mininet2>
+
+    </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/HAfullNetPartition/README b/TestON/tests/HAfullNetPartition/README
new file mode 100644
index 0000000..415af4c
--- /dev/null
+++ b/TestON/tests/HAfullNetPartition/README
@@ -0,0 +1,27 @@
+This test is designed to verify that an ONOS cluster behaves correctly when
+some ONOS nodes are partitioned. Currently, we will partition nodes so that
+each raft partition will lose a member, but we make sure that there is always
+a majority of nodes available in each partition. We are partitioning the nodes
+by using iptable firewall rules
+
+As written, the test only supports an ONOS cluster of 3,5, or 7 nodes.
+This is because the test doesn't apply to a single node cluster, ONOS clusters
+should be deployed in odd numbers, and the raft partition generation and node
+partitioning scheme used doesn't give the same properties for clusters of more
+than 7 nodes. Namely, each partition won't have exactly one node unavailable.
+
+The gerneral structure for the test:
+- Startup
+- Assign switches
+- Verify ONOS state and functionality
+    - Device mastership
+    - Intents
+    - Leadership election
+    - Distributed Primitives
+- Partition some ONOS nodes
+- Verify ONOS state and functionality
+- Heal partition
+- Verify ONOS state and functionality
+- Dataplane failures
+    - link down and up
+    - switch down and up
diff --git a/TestON/tests/HAfullNetPartition/__init__.py b/TestON/tests/HAfullNetPartition/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/HAfullNetPartition/__init__.py
diff --git a/TestON/tests/HAfullNetPartition/dependencies/Counters.py b/TestON/tests/HAfullNetPartition/dependencies/Counters.py
new file mode 100644
index 0000000..265ba13
--- /dev/null
+++ b/TestON/tests/HAfullNetPartition/dependencies/Counters.py
@@ -0,0 +1,112 @@
+def __init__( self ):
+    self.default = ''
+
+def consistentCheck():
+    """
+    Checks that TestON counters are consistent across all nodes.
+
+    Returns the tuple (onosCounters, consistent)
+    - onosCounters is the parsed json output of the counters command on all nodes
+    - consistent is main.TRUE if all "TestON" counters are consitent across all
+        nodes or main.FALSE
+    """
+    import json
+    try:
+        correctResults = main.TRUE
+        # Get onos counters results
+        onosCountersRaw = []
+        threads = []
+        for i in main.activeNodes:
+            t = main.Thread( target=utilities.retry,
+                             name="counters-" + str( i ),
+                             args=[ main.CLIs[i].counters, [ None ] ],
+                             kwargs= { 'sleep': 5, 'attempts': 5,
+                                       'randomTime': True } )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            onosCountersRaw.append( t.result )
+        onosCounters = []
+        for i in range( len( main.activeNodes ) ):
+            try:
+                onosCounters.append( json.loads( onosCountersRaw[i] ) )
+            except ( ValueError, TypeError ):
+                main.log.error( "Could not parse counters response from ONOS" +
+                                str( main.activeNodes[i] + 1 ) )
+                main.log.warn( repr( onosCountersRaw[ i ] ) )
+                onosCounters.append( [] )
+                #return ( onosCounters, main.FALSE )
+
+        testCounters = {}
+        # make a list of all the "TestON-*" counters in ONOS
+        # lookes like a dict whose keys are the name of the ONOS node and values
+        # are a list of the counters. I.E.
+        # { "ONOS1": [ {"name":"TestON-inMemory","value":56},
+        #              {"name":"TestON-Partitions","value":56} ]
+        # }
+        # NOTE: There is an assumtion that all nodes are active
+        #        based on the above for loops
+        for controller in enumerate( onosCounters ):
+            for dbType in controller[1]:
+                for dbName, items in dbType.iteritems():
+                    for item in items:
+                        if 'TestON' in item['name']:
+                            node = 'ONOS' + str( main.activeNodes[ controller[0] ] + 1 )
+                            try:
+                                testCounters[node].append( item )
+                            except KeyError:
+                                testCounters[node] = [ item ]
+        # compare the counters on each node
+        firstV = testCounters.values()[0]
+        tmp = [ v == firstV for k, v in testCounters.iteritems() ]
+        if all( tmp ):
+            consistent = main.TRUE
+        else:
+            consistent = main.FALSE
+            main.log.error( "ONOS nodes have different values for counters:\n" +
+                            testCounters )
+        return ( onosCounters, consistent )
+    except Exception:
+        main.log.exception( "" )
+        main.cleanup()
+        main.exit()
+
+def counterCheck( counterName, counterValue ):
+    """
+    Checks that TestON counters are consistent across all nodes and that
+    specified counter is in ONOS with the given value
+    """
+    import json
+    try:
+        correctResults = main.TRUE
+        # Get onos counters results and consistentCheck
+        onosCounters, consistent = main.Counters.consistentCheck()
+        # Check for correct values
+        for i in range( len( main.activeNodes ) ):
+            current = onosCounters[i]
+            onosValue = None
+            try:
+                for database in current:
+                    database = database.values()[0]
+                    for counter in database:
+                        if counter.get( 'name' ) == counterName:
+                            onosValue = counter.get( 'value' )
+                            break
+            except AttributeError, e:
+                node = str( main.activeNodes[i] + 1 )
+                main.log.error( "ONOS" + node + " counters result " +
+                                "is not as expected" )
+                correctResults = main.FALSE
+            if onosValue == counterValue:
+                main.log.info( counterName + " counter value is correct" )
+            else:
+                main.log.error( counterName + " counter value is incorrect," +
+                                " expected value: " + str( counterValue )
+                                + " current value: " + str( onosValue ) )
+                correctResults = main.FALSE
+        return consistent and correctResults
+    except Exception:
+        main.log.exception( "" )
+        main.cleanup()
+        main.exit()
diff --git a/TestON/tests/HAfullNetPartition/dependencies/__init__.py b/TestON/tests/HAfullNetPartition/dependencies/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/HAfullNetPartition/dependencies/__init__.py
diff --git a/TestON/tests/HAfullNetPartition/dependencies/obelisk.py b/TestON/tests/HAfullNetPartition/dependencies/obelisk.py
new file mode 100755
index 0000000..d613806
--- /dev/null
+++ b/TestON/tests/HAfullNetPartition/dependencies/obelisk.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+
+from mininet.topo import Topo
+
+class ObeliskTopo( Topo ):
+    def __init__( self ):
+        Topo.__init__( self )
+        topSwitch = self.addSwitch('s1',dpid='1000'.zfill(16))
+        leftTopSwitch = self.addSwitch('s2',dpid='2000'.zfill(16))
+        rightTopSwitch = self.addSwitch('s5',dpid='5000'.zfill(16))
+        leftBotSwitch = self.addSwitch('s3',dpid='3000'.zfill(16))
+        rightBotSwitch = self.addSwitch('s6',dpid='6000'.zfill(16))
+        midBotSwitch = self.addSwitch('s28',dpid='2800'.zfill(16))
+
+        topHost = self.addHost( 'h1' )
+        leftTopHost = self.addHost('h2')
+        rightTopHost = self.addHost('h5')
+        leftBotHost = self.addHost('h3')
+        rightBotHost = self.addHost('h6')
+        midBotHost = self.addHost('h28')
+        self.addLink(topSwitch,topHost)
+        self.addLink(leftTopSwitch,leftTopHost)
+        self.addLink(rightTopSwitch,rightTopHost)
+        self.addLink(leftBotSwitch,leftBotHost)
+        self.addLink(rightBotSwitch,rightBotHost)
+        self.addLink(midBotSwitch,midBotHost)
+        self.addLink(leftTopSwitch,rightTopSwitch)
+        self.addLink(topSwitch,leftTopSwitch)
+        self.addLink(topSwitch,rightTopSwitch)
+        self.addLink(leftTopSwitch,leftBotSwitch)
+        self.addLink(rightTopSwitch,rightBotSwitch)
+        self.addLink(leftBotSwitch,midBotSwitch)
+        self.addLink(midBotSwitch,rightBotSwitch)
+
+        agg1Switch = self.addSwitch('s4',dpid = '3004'.zfill(16))
+        agg2Switch = self.addSwitch('s7',dpid = '6007'.zfill(16))
+        agg1Host = self.addHost('h4')
+        agg2Host = self.addHost('h7')
+        self.addLink(agg1Switch,agg1Host)
+        self.addLink(agg2Switch,agg2Host)
+        self.addLink(agg1Switch, leftBotSwitch)
+        self.addLink(agg2Switch, rightBotSwitch)
+
+        for i in range(10):
+            num = str(i+8)
+            switch = self.addSwitch('s'+num,dpid = ('30'+num.zfill(2)).zfill(16))
+            host = self.addHost('h'+num)
+            self.addLink(switch, host)
+            self.addLink(switch, agg1Switch)
+
+        for i in range(10):
+            num = str(i+18)
+            switch = self.addSwitch('s'+num,dpid = ('60'+num.zfill(2)).zfill(16))
+            host = self.addHost('h'+num)
+            self.addLink(switch, host)
+            self.addLink(switch, agg2Switch)
+
+topos = { 'obelisk': (lambda: ObeliskTopo() ) }
+
+def run():
+    topo = ObeliskTopo()
+    net = Mininet( topo=topo, controller=RemoteController, autoSetMacs=True )
+    net.start()
+    CLI( net )
+    net.stop()
+
+if __name__ == '__main__':
+    setLogLevel( 'info' )
+    run()
diff --git a/TestON/tests/HAfullNetPartition/dependencies/onos-gen-partitions b/TestON/tests/HAfullNetPartition/dependencies/onos-gen-partitions
new file mode 100755
index 0000000..06c5ec9
--- /dev/null
+++ b/TestON/tests/HAfullNetPartition/dependencies/onos-gen-partitions
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+"""
+  Generate the partitions json file from the $OC* environment variables
+
+  Usage: onos-gen-partitions [output file]
+  If output file is not provided, the json is written to stdout.
+"""
+
+from os import environ
+from collections import deque, OrderedDict
+import re
+import json
+import sys
+import hashlib
+
+convert = lambda text: int(text) if text.isdigit() else text.lower()
+alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
+
+def get_OC_vars():
+  vars = []
+  for var in environ:
+    if re.match(r"OC[0-9]+", var):
+      vars.append(var)
+  return sorted(vars, key=alphanum_key)
+
+def get_nodes(vars, port=9876):
+  node = lambda k: { 'id': k, 'ip': k, 'port': port }
+  return [ node(environ[v]) for v in vars ]
+
+def generate_base_partition(nodes):
+  return {
+            'id': 0,
+            'members': nodes
+         }
+
+def generate_extended_partitions(nodes, k):
+  l = deque(nodes)
+  perms = []
+  for i in range(1, len(nodes)+1):
+    part = {
+             'id': i,
+             'members': list(l)[:k]
+           }
+    perms.append(part)
+    l.rotate(-1)
+  return perms
+
+def generate_extended_partitions_HA(nodes, k):
+  l = deque(nodes)
+  perms = []
+  for i in range(1, (len(nodes) + 1) / 2 + 1):
+    part = {
+             'id': i,
+             'members': list(l)[:k]
+           }
+    perms.append(part)
+    l.rotate(-2)
+  return perms
+
+if __name__ == '__main__':
+  vars = get_OC_vars()
+  nodes = get_nodes(vars)
+  base_partition = generate_base_partition([v.get('id') for v in nodes])
+  extended_partitions = generate_extended_partitions_HA([v.get('id') for v in nodes], 3)
+  partitions = []
+  partitions.append(base_partition)
+  partitions.extend(extended_partitions)
+  name = 0
+  for node in nodes:
+    name = name ^ hash(node['ip'])
+  data = {
+           'name': name,
+           'nodes': nodes,
+           'partitions': partitions
+         }
+  output = json.dumps(data, indent=4)
+
+  if len(sys.argv) == 2:
+    filename = sys.argv[1]
+    with open(filename, 'w') as f:
+      f.write(output)
+  else:
+    print output
diff --git a/TestON/tests/HAkillNodes/HAkillNodes.py b/TestON/tests/HAkillNodes/HAkillNodes.py
index c1f979a..3461278 100644
--- a/TestON/tests/HAkillNodes/HAkillNodes.py
+++ b/TestON/tests/HAkillNodes/HAkillNodes.py
@@ -49,15 +49,14 @@
         start tcpdump
         """
         import imp
-        import time
         import pexpect
-        main.log.info( "ONOS HA test: Restart minority of ONOS nodes - " +
+        import time
+        main.log.info( "ONOS HA test: Restart a minority of ONOS nodes - " +
                          "initialization" )
         main.case( "Setting up test environment" )
         main.caseExplanation = "Setup the test environment including " +\
                                 "installing ONOS, starting Mininet and ONOS" +\
                                 "cli sessions."
-        # TODO: save all the timers and output them for plotting
 
         # load some variables from the params file
         PULLCODE = False
@@ -1633,6 +1632,7 @@
         except ( ValueError, TypeError ):
             main.log.exception( "Error parsing clusters[0]: " +
                                 repr( clusters[ 0 ] ) )
+            numClusters = "ERROR"
         clusterResults = main.FALSE
         if numClusters == 1:
             clusterResults = main.TRUE
@@ -1654,11 +1654,10 @@
             if devices[ controller ] and ports[ controller ] and\
                 "Error" not in devices[ controller ] and\
                 "Error" not in ports[ controller ]:
-
-                currentDevicesResult = main.Mininet1.compareSwitches(
-                        mnSwitches,
-                        json.loads( devices[ controller ] ),
-                        json.loads( ports[ controller ] ) )
+                    currentDevicesResult = main.Mininet1.compareSwitches(
+                            mnSwitches,
+                            json.loads( devices[ controller ] ),
+                            json.loads( ports[ controller ] ) )
             else:
                 currentDevicesResult = main.FALSE
             utilities.assert_equals( expect=main.TRUE,
@@ -2149,6 +2148,7 @@
         main.caseExplanation = "Compare topology objects between Mininet" +\
                                 " and ONOS"
         topoResult = main.FALSE
+        topoFailMsg = "ONOS topology don't match Mininet"
         elapsed = 0
         count = 0
         main.step( "Comparing ONOS topology to MN topology" )
@@ -2164,9 +2164,11 @@
             devices = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].devices,
+                t = main.Thread( target=utilities.retry,
                                  name="devices-" + str( i ),
-                                 args=[ ] )
+                                 args=[ main.CLIs[i].devices, [ None ] ],
+                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                           'randomTime': True } )
                 threads.append( t )
                 t.start()
 
@@ -2205,9 +2207,11 @@
             ports = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].ports,
+                t = main.Thread( target=utilities.retry,
                                  name="ports-" + str( i ),
-                                 args=[ ] )
+                                 args=[ main.CLIs[i].ports, [ None ] ],
+                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                           'randomTime': True } )
                 threads.append( t )
                 t.start()
 
@@ -2217,9 +2221,11 @@
             links = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].links,
+                t = main.Thread( target=utilities.retry,
                                  name="links-" + str( i ),
-                                 args=[ ] )
+                                 args=[ main.CLIs[i].links, [ None ] ],
+                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                           'randomTime': True } )
                 threads.append( t )
                 t.start()
 
@@ -2229,9 +2235,11 @@
             clusters = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].clusters,
+                t = main.Thread( target=utilities.retry,
                                  name="clusters-" + str( i ),
-                                 args=[ ] )
+                                 args=[ main.CLIs[i].clusters, [ None ] ],
+                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                           'randomTime': True } )
                 threads.append( t )
                 t.start()
 
@@ -2244,6 +2252,15 @@
             print "Elapsed time: " + str( elapsed )
             print "CLI time: " + str( cliTime )
 
+            if all( e is None for e in devices ) and\
+               all( e is None for e in hosts ) and\
+               all( e is None for e in ports ) and\
+               all( e is None for e in links ) and\
+               all( e is None for e in clusters ):
+                   topoFailMsg = "Could not get topology from ONOS"
+                   main.log.error( topoFailMsg )
+                   continue  # Try again, No use trying to compare
+
             mnSwitches = main.Mininet1.getSwitches()
             mnLinks = main.Mininet1.getLinks()
             mnHosts = main.Mininet1.getHosts()
@@ -2389,7 +2406,7 @@
         utilities.assert_equals( expect=True,
                                  actual=topoResult,
                                  onpass="ONOS topology matches Mininet",
-                                 onfail="ONOS topology don't match Mininet" )
+                                 onfail=topoFailMsg )
         # End of While loop to pull ONOS state
 
         # Compare json objects for hosts and dataplane clusters
@@ -2873,8 +2890,13 @@
             cli = main.CLIs[i]
             node = cli.specificLeaderCandidate( 'org.onosproject.election' )
             oldAllCandidates.append( node )
-            oldLeaders.append( node[ 0 ] )
+            if node:
+                oldLeaders.append( node[ 0 ] )
+            else:
+                oldLeaders.append( None )
         oldCandidates = oldAllCandidates[ 0 ]
+        if oldCandidates is None:
+            oldCandidates = [ None ]
 
         # Check that each node has the same leader. Defines oldLeader
         if len( set( oldLeaders ) ) != 1:
@@ -2887,13 +2909,14 @@
         # Check that each node's candidate list is the same
         candidateDiscrepancy = False  # Boolean of candidate mismatches
         for candidates in oldAllCandidates:
+            if candidates is None:
+                main.log.warn( "Error getting candidates" )
+                candidates = [ None ]
             if set( candidates ) != set( oldCandidates ):
                 sameResult = main.FALSE
                 candidateDiscrepancy = True
-
         if candidateDiscrepancy:
             failMessage += " and candidates"
-            
         utilities.assert_equals(
             expect=main.TRUE,
             actual=sameResult,
@@ -2922,7 +2945,6 @@
             onfail="Node was not withdrawn from election" )
 
         main.step( "Check that a new node was elected leader" )
-
         # FIXME: use threads
         newLeaderResult = main.TRUE
         failMessage = "Nodes have different leaders"
@@ -2963,7 +2985,7 @@
         # Check that the new leader is not the older leader, which was withdrawn
         if newLeader == oldLeader:
             newLeaderResult = main.FALSE
-            main.log.error( "All nodes still see old leader: " + oldLeader +
+            main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
                 " as the current leader" )
 
         utilities.assert_equals(
@@ -2973,7 +2995,7 @@
             onfail="Something went wrong with Leadership election" )
 
         main.step( "Check that that new leader was the candidate of old leader")
-        # candidates[ 2 ] should be come the top candidate after withdrawl
+        # candidates[ 2 ] should become the top candidate after withdrawl
         correctCandidateResult = main.TRUE
         if expectNoLeader:
             if newLeader == 'none':
@@ -2982,11 +3004,13 @@
             else:
                 main.log.info( "Expected no leader, got: " + str( newLeader ) )
                 correctCandidateResult = main.FALSE
-        elif newLeader != oldCandidates[ 2 ]:
+        elif len( oldCandidates ) >= 3 and newLeader != oldCandidates[ 2 ]:
             correctCandidateResult = main.FALSE
-            main.log.error( "Candidate " + newLeader + " was elected. " +
-                oldCandidates[ 2 ] + " should have had priority." )
-
+            main.log.error( "Candidate {} was elected. {} should have had priority.".format(
+                                newLeader, oldCandidates[ 2 ] ) )
+        else:
+            main.log.warn( "Could not determine who should be the correct leader" )
+            correctCandidateResult = main.FALSE
         utilities.assert_equals(
             expect=main.TRUE,
             actual=correctCandidateResult,
@@ -3048,7 +3072,7 @@
 
         # Check that the re-elected node is last on the candidate List
         if oldLeader != newCandidates[ -1 ]:
-            main.log.error( "Old Leader (" + oldLeader + ") not in the proper position " +
+            main.log.error( "Old Leader (" + str( oldLeader ) + ") not in the proper position " +
                 str( newCandidates ) )
             positionResult = main.FALSE
 
@@ -4401,7 +4425,7 @@
         putResult = True
         node = main.activeNodes[0]
         putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
-        if len( putResponses ) == 100:
+        if putResponses and len( putResponses ) == 100:
             for i in putResponses:
                 if putResponses[ i ][ 'value' ] != tMapValue:
                     putResult = False
diff --git a/TestON/tests/HAsanity/HAsanity.py b/TestON/tests/HAsanity/HAsanity.py
index f58073f..b45bffc 100644
--- a/TestON/tests/HAsanity/HAsanity.py
+++ b/TestON/tests/HAsanity/HAsanity.py
@@ -542,8 +542,8 @@
 
         main.step( "Add host intents via cli" )
         intentIds = []
-        # TODO:  move the host numbers to params
-        #        Maybe look at all the paths we ping?
+        # TODO: move the host numbers to params
+        #       Maybe look at all the paths we ping?
         intentAddResult = True
         hostResult = main.TRUE
         for i in range( 8, 18 ):
@@ -1397,7 +1397,6 @@
         if flowCheck == main.FALSE:
             for table in flows:
                 main.log.warn( table )
-
         # TODO: Compare switch flow tables with ONOS flow tables
 
         main.step( "Start continuous pings" )
@@ -1589,6 +1588,7 @@
         except ( ValueError, TypeError ):
             main.log.exception( "Error parsing clusters[0]: " +
                                 repr( clusters[ 0 ] ) )
+            numClusters = "ERROR"
         clusterResults = main.FALSE
         if numClusters == 1:
             clusterResults = main.TRUE
@@ -2757,7 +2757,6 @@
         for cli in main.CLIs:  # run test election on each node
             if cli.electionTestRun() == main.FALSE:
                 electionResult = main.FALSE
-
         utilities.assert_equals(
             expect=main.TRUE,
             actual=electionResult,
@@ -2864,7 +2863,7 @@
         # Check that the new leader is not the older leader, which was withdrawn
         if newLeader == oldLeader:
             newLeaderResult = main.FALSE
-            main.log.error( "All nodes still see old leader: " + oldLeader +
+            main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
                 " as the current leader" )
 
         utilities.assert_equals(
@@ -2885,8 +2884,8 @@
                 correctCandidateResult = main.FALSE
         elif newLeader != oldCandidates[ 2 ]:
             correctCandidateResult = main.FALSE
-            main.log.error( "Candidate " + newLeader + " was elected. " +
-                oldCandidates[ 2 ] + " should have had priority." )
+            main.log.error( "Candidate {} was elected. {} should have had priority.".format(
+                                newLeader, oldCandidates[ 2 ] ) )
 
         utilities.assert_equals(
             expect=main.TRUE,
@@ -2948,7 +2947,7 @@
 
         # Check that the re-elected node is last on the candidate List
         if oldLeader != newCandidates[ -1 ]:
-            main.log.error( "Old Leader ("  + oldLeader + ") not in the proper position " +
+            main.log.error( "Old Leader (" + str( oldLeader ) + ") not in the proper position " +
                 str( newCandidates ) )
             positionResult = main.FALSE
 
diff --git a/TestON/tests/HAsingleInstanceRestart/HAsingleInstanceRestart.py b/TestON/tests/HAsingleInstanceRestart/HAsingleInstanceRestart.py
index 1587563..3ab5072 100644
--- a/TestON/tests/HAsingleInstanceRestart/HAsingleInstanceRestart.py
+++ b/TestON/tests/HAsingleInstanceRestart/HAsingleInstanceRestart.py
@@ -1088,6 +1088,7 @@
         except ( ValueError, TypeError ):
             main.log.exception( "Error parsing clusters[0]: " +
                                 repr( clusters[ 0 ] ) )
+            numClusters = "ERROR"
         clusterResults = main.FALSE
         if numClusters == 1:
             clusterResults = main.TRUE
@@ -1109,11 +1110,10 @@
             if devices[ controller ] and ports[ controller ] and\
                 "Error" not in devices[ controller ] and\
                 "Error" not in ports[ controller ]:
-
-                currentDevicesResult = main.Mininet1.compareSwitches(
-                        mnSwitches,
-                        json.loads( devices[ controller ] ),
-                        json.loads( ports[ controller ] ) )
+                    currentDevicesResult = main.Mininet1.compareSwitches(
+                            mnSwitches,
+                            json.loads( devices[ controller ] ),
+                            json.loads( ports[ controller ] ) )
             else:
                 currentDevicesResult = main.FALSE
             utilities.assert_equals( expect=main.TRUE,
@@ -1243,6 +1243,7 @@
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
         main.case( "Running ONOS Constant State Tests" )
+
         main.step( "Check that each switch has a master" )
         # Assert that each device has a master
         rolesNotNull = main.ONOScli1.rolesNotNull()
diff --git a/TestON/tests/HAstopNodes/HAstopNodes.py b/TestON/tests/HAstopNodes/HAstopNodes.py
index edcc4d1..a98c62a 100644
--- a/TestON/tests/HAstopNodes/HAstopNodes.py
+++ b/TestON/tests/HAstopNodes/HAstopNodes.py
@@ -57,7 +57,6 @@
         main.caseExplanation = "Setup the test environment including " +\
                                 "installing ONOS, starting Mininet and ONOS" +\
                                 "cli sessions."
-        # TODO: save all the timers and output them for plotting
 
         # load some variables from the params file
         PULLCODE = False
@@ -1622,6 +1621,7 @@
         except ( ValueError, TypeError ):
             main.log.exception( "Error parsing clusters[0]: " +
                                 repr( clusters[ 0 ] ) )
+            numClusters = "ERROR"
         clusterResults = main.FALSE
         if numClusters == 1:
             clusterResults = main.TRUE
@@ -1643,11 +1643,10 @@
             if devices[ controller ] and ports[ controller ] and\
                 "Error" not in devices[ controller ] and\
                 "Error" not in ports[ controller ]:
-
-                currentDevicesResult = main.Mininet1.compareSwitches(
-                        mnSwitches,
-                        json.loads( devices[ controller ] ),
-                        json.loads( ports[ controller ] ) )
+                    currentDevicesResult = main.Mininet1.compareSwitches(
+                            mnSwitches,
+                            json.loads( devices[ controller ] ),
+                            json.loads( ports[ controller ] ) )
             else:
                 currentDevicesResult = main.FALSE
             utilities.assert_equals( expect=main.TRUE,
@@ -2138,6 +2137,7 @@
         main.caseExplanation = "Compare topology objects between Mininet" +\
                                 " and ONOS"
         topoResult = main.FALSE
+        topoFailMsg = "ONOS topology don't match Mininet"
         elapsed = 0
         count = 0
         main.step( "Comparing ONOS topology to MN topology" )
@@ -2153,9 +2153,11 @@
             devices = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].devices,
+                t = main.Thread( target=utilities.retry,
                                  name="devices-" + str( i ),
-                                 args=[ ] )
+                                 args=[ main.CLIs[i].devices, [ None ] ],
+                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                           'randomTime': True } )
                 threads.append( t )
                 t.start()
 
@@ -2194,9 +2196,11 @@
             ports = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].ports,
+                t = main.Thread( target=utilities.retry,
                                  name="ports-" + str( i ),
-                                 args=[ ] )
+                                 args=[ main.CLIs[i].ports, [ None ] ],
+                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                           'randomTime': True } )
                 threads.append( t )
                 t.start()
 
@@ -2206,9 +2210,11 @@
             links = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].links,
+                t = main.Thread( target=utilities.retry,
                                  name="links-" + str( i ),
-                                 args=[ ] )
+                                 args=[ main.CLIs[i].links, [ None ] ],
+                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                           'randomTime': True } )
                 threads.append( t )
                 t.start()
 
@@ -2218,9 +2224,11 @@
             clusters = []
             threads = []
             for i in main.activeNodes:
-                t = main.Thread( target=main.CLIs[i].clusters,
+                t = main.Thread( target=utilities.retry,
                                  name="clusters-" + str( i ),
-                                 args=[ ] )
+                                 args=[ main.CLIs[i].clusters, [ None ] ],
+                                 kwargs= { 'sleep': 5, 'attempts': 5,
+                                           'randomTime': True } )
                 threads.append( t )
                 t.start()
 
@@ -2233,6 +2241,15 @@
             print "Elapsed time: " + str( elapsed )
             print "CLI time: " + str( cliTime )
 
+            if all( e is None for e in devices ) and\
+               all( e is None for e in hosts ) and\
+               all( e is None for e in ports ) and\
+               all( e is None for e in links ) and\
+               all( e is None for e in clusters ):
+                   topoFailMsg = "Could not get topology from ONOS"
+                   main.log.error( topoFailMsg )
+                   continue  # Try again, No use trying to compare
+
             mnSwitches = main.Mininet1.getSwitches()
             mnLinks = main.Mininet1.getLinks()
             mnHosts = main.Mininet1.getHosts()
@@ -2378,7 +2395,7 @@
         utilities.assert_equals( expect=True,
                                  actual=topoResult,
                                  onpass="ONOS topology matches Mininet",
-                                 onfail="ONOS topology don't match Mininet" )
+                                 onfail=topoFailMsg )
         # End of While loop to pull ONOS state
 
         # Compare json objects for hosts and dataplane clusters
@@ -2862,8 +2879,13 @@
             cli = main.CLIs[i]
             node = cli.specificLeaderCandidate( 'org.onosproject.election' )
             oldAllCandidates.append( node )
-            oldLeaders.append( node[ 0 ] )
+            if node:
+                oldLeaders.append( node[ 0 ] )
+            else:
+                oldLeaders.append( None )
         oldCandidates = oldAllCandidates[ 0 ]
+        if oldCandidates is None:
+            oldCandidates = [ None ]
 
         # Check that each node has the same leader. Defines oldLeader
         if len( set( oldLeaders ) ) != 1:
@@ -2876,13 +2898,14 @@
         # Check that each node's candidate list is the same
         candidateDiscrepancy = False  # Boolean of candidate mismatches
         for candidates in oldAllCandidates:
+            if candidates is None:
+                main.log.warn( "Error getting candidates" )
+                candidates = [ None ]
             if set( candidates ) != set( oldCandidates ):
                 sameResult = main.FALSE
                 candidateDiscrepancy = True
-
         if candidateDiscrepancy:
             failMessage += " and candidates"
-            
         utilities.assert_equals(
             expect=main.TRUE,
             actual=sameResult,
@@ -2911,7 +2934,6 @@
             onfail="Node was not withdrawn from election" )
 
         main.step( "Check that a new node was elected leader" )
-
         # FIXME: use threads
         newLeaderResult = main.TRUE
         failMessage = "Nodes have different leaders"
@@ -2952,7 +2974,7 @@
         # Check that the new leader is not the older leader, which was withdrawn
         if newLeader == oldLeader:
             newLeaderResult = main.FALSE
-            main.log.error( "All nodes still see old leader: " + oldLeader +
+            main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
                 " as the current leader" )
 
         utilities.assert_equals(
@@ -2962,7 +2984,7 @@
             onfail="Something went wrong with Leadership election" )
 
         main.step( "Check that that new leader was the candidate of old leader")
-        # candidates[ 2 ] should be come the top candidate after withdrawl
+        # candidates[ 2 ] should become the top candidate after withdrawl
         correctCandidateResult = main.TRUE
         if expectNoLeader:
             if newLeader == 'none':
@@ -2971,11 +2993,13 @@
             else:
                 main.log.info( "Expected no leader, got: " + str( newLeader ) )
                 correctCandidateResult = main.FALSE
-        elif newLeader != oldCandidates[ 2 ]:
+        elif len( oldCandidates ) >= 3 and newLeader != oldCandidates[ 2 ]:
             correctCandidateResult = main.FALSE
-            main.log.error( "Candidate " + newLeader + " was elected. " +
-                oldCandidates[ 2 ] + " should have had priority." )
-
+            main.log.error( "Candidate {} was elected. {} should have had priority.".format(
+                                newLeader, oldCandidates[ 2 ] ) )
+        else:
+            main.log.warn( "Could not determine who should be the correct leader" )
+            correctCandidateResult = main.FALSE
         utilities.assert_equals(
             expect=main.TRUE,
             actual=correctCandidateResult,
@@ -3037,7 +3061,7 @@
 
         # Check that the re-elected node is last on the candidate List
         if oldLeader != newCandidates[ -1 ]:
-            main.log.error( "Old Leader (" + oldLeader + ") not in the proper position " +
+            main.log.error( "Old Leader (" + str( oldLeader ) + ") not in the proper position " +
                 str( newCandidates ) )
             positionResult = main.FALSE
 
@@ -4390,7 +4414,7 @@
         putResult = True
         node = main.activeNodes[0]
         putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
-        if len( putResponses ) == 100:
+        if putResponses and len( putResponses ) == 100:
             for i in putResponses:
                 if putResponses[ i ][ 'value' ] != tMapValue:
                     putResult = False