Merge pull request #74 from opennetworkinglab/devl/HA_refactor

Refactor HA Tests
diff --git a/TestON/dependencies/Jenkins_getresult_HA.py b/TestON/dependencies/Jenkins_getresult_HA.py
index 859612d..1ac3a4c 100755
--- a/TestON/dependencies/Jenkins_getresult_HA.py
+++ b/TestON/dependencies/Jenkins_getresult_HA.py
@@ -101,11 +101,12 @@
     f.close()
     #https://wiki.onosproject.org/display/OST/Test+Results+-+HA#Test+Results+-+HA
     #Example anchor on new wiki:        #TestResults-HA-TestHATestSanity
-    page_name = "Master+-+HA"
+    page_name = "Master-HA"
     if "ONOS-HA-Maint" in job:
-        page_name = "1.0+-+HA"
+        #NOTE if page name starts with number it prepends 'id-' to anchor links
+        page_name = "id-1.0-HA"
 
-    header += "<li><a href=\'#" + str(page_name) + str(test) + "\'> " + str(test) + " - Results: " + str(passes) + " Passed, " + str(fails) + " Failed</a></li>"
+    header += "<li><a href=\'#" + str(page_name) + "-" + str(test) + "\'> " + str(test) + " - Results: " + str(passes) + " Passed, " + str(fails) + " Failed</a></li>"
 
     #*********************
     #include any other phrase specific to case you would like to include in wiki here
diff --git a/TestON/drivers/common/cli/emulator/mininetclidriver.py b/TestON/drivers/common/cli/emulator/mininetclidriver.py
index 52b1130..57fa94d 100644
--- a/TestON/drivers/common/cli/emulator/mininetclidriver.py
+++ b/TestON/drivers/common/cli/emulator/mininetclidriver.py
@@ -102,7 +102,8 @@
         """
         Starts Mininet accepts a topology(.py) file and/or an optional
         arguement ,to start the mininet, as a parameter.
-        Returns true if the mininet starts successfully
+        Returns main.TRUE if the mininet starts successfully and
+                main.FALSE otherwise
         """
         if self.handle:
             main.log.info(
@@ -1275,9 +1276,13 @@
 
     def stopNet( self ):
         """
-        Stops mininet. returns true if the mininet succesfully stops.
+        Stops mininet.
+        Returns main.TRUE if the mininet succesfully stops and
+                main.FALSE if the pexpect handle does not exist.
+
+        Will cleanuop and exit the test if mininet fails to stop
         """
-        
+
         main.log.info( self.name + ": Disconnecting mininet..." )
         response = ''
         if self.handle:
diff --git a/TestON/drivers/common/cli/onosdriver.py b/TestON/drivers/common/cli/onosdriver.py
index b4493f8..4719150 100644
--- a/TestON/drivers/common/cli/onosdriver.py
+++ b/TestON/drivers/common/cli/onosdriver.py
@@ -47,7 +47,7 @@
                     break
             if self.home == None or self.home == "":
                 self.home = "~/ONOS"
-            
+
             self.name = self.options[ 'name' ]
             self.handle = super( OnosDriver, self ).connect(
                 user_name=self.user_name,
@@ -1487,3 +1487,102 @@
             main.log.info( self.name + " ::::::" )
             main.cleanup()
             main.exit()
+
+    def setIpTables( self, ip, port='', action='add', packet_type='tcp',
+                     direction='INPUT', rule='DROP' ):
+        '''
+        Description:
+            add or remove iptables rule to DROP (default) packets from
+            specific IP and PORT
+        Usage:
+        * specify action ('add' or 'remove')
+          when removing, pass in the same argument as you would add. It will
+          delete that specific rule.
+        * specify the ip to block
+        * specify the destination port to block (defaults to all ports)
+        * optional packet type to block (default tcp)
+        * optional iptables rule (default DROP)
+        * optional direction to block (default 'INPUT')
+        Returns:
+            main.TRUE on success or
+            main.FALSE if given invalid input or
+            main.ERROR if there is an error in response from iptables
+        WARNING:
+        * This function uses root privilege iptables command which may result
+          in unwanted network errors. USE WITH CAUTION
+        '''
+        import time
+
+        # NOTE*********
+        #   The strict checking methods of this driver function is intentional
+        #   to discourage any misuse or error of iptables, which can cause
+        #   severe network errors
+        # *************
+
+        # NOTE: Sleep needed to give some time for rule to be added and
+        #       registered to the instance. If you are calling this function
+        #       multiple times this sleep will prevent any errors.
+        #       DO NOT REMOVE
+        time.sleep( 5 )
+        try:
+            # input validation
+            action_type = action.lower()
+            rule = rule.upper()
+            direction = direction.upper()
+            if action_type != 'add' and action_type != 'remove':
+                main.log.error( "Invalid action type. Use 'add' or "
+                                "'remove' table rule" )
+                if rule != 'DROP' and rule != 'ACCEPT' and rule != 'LOG':
+                    # NOTE Currently only supports rules DROP, ACCEPT, and LOG
+                    main.log.error( "Invalid rule. Valid rules are 'DROP' or "
+                                    "'ACCEPT' or 'LOG' only." )
+                    if direction != 'INPUT' and direction != 'OUTPUT':
+                        # NOTE currently only supports rules INPUT and OUPTUT
+                        main.log.error( "Invalid rule. Valid directions are"
+                                        " 'OUTPUT' or 'INPUT'" )
+                        return main.FALSE
+                    return main.FALSE
+                return main.FALSE
+            if action_type == 'add':
+                # -A is the 'append' action of iptables
+                actionFlag = '-A'
+            elif action_type == 'remove':
+                # -D is the 'delete' rule of iptables
+                actionFlag = '-D'
+            self.handle.sendline( "" )
+            self.handle.expect( "\$" )
+            cmd = "sudo iptables " + actionFlag + " " +\
+                  direction +\
+                  " -p " + str( packet_type ) +\
+                  " -s " + str( ip )
+            if port:
+                cmd += " --dport " + str( port )
+            cmd += " -j " + str( rule )
+
+            self.handle.sendline( cmd )
+            self.handle.expect( "\$" )
+            main.log.warn( self.handle.before )
+
+            info_string = "On " + str( self.name )
+            info_string += " " + str( action_type )
+            info_string += " iptable rule [ "
+            info_string += " IP: " + str( ip )
+            info_string += " Port: " + str( port )
+            info_string += " Rule: " + str( rule )
+            info_string += " Direction: " + str( direction ) + " ]"
+            main.log.info( info_string )
+            return main.TRUE
+        except pexpect.TIMEOUT:
+            main.log.exception( self.name + ": Timeout exception in "
+                                "setIpTables function" )
+            return main.ERROR
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except:
+            main.log.exception( "Unknown error:")
+            main.cleanup()
+            main.exit()
+
diff --git a/TestON/drivers/common/clidriver.py b/TestON/drivers/common/clidriver.py
index 1b8998b..6d7fb2a 100644
--- a/TestON/drivers/common/clidriver.py
+++ b/TestON/drivers/common/clidriver.py
@@ -81,7 +81,7 @@
         while i == 5:
             i = self.handle.expect( [
 				    ssh_newkey,
-                                    'password:|\$',
+                                    'password:',
                                     pexpect.EOF,
                                     pexpect.TIMEOUT,
                                     refused,
diff --git a/TestON/tests/HATestClusterRestart/HATestClusterRestart.params b/TestON/tests/HATestClusterRestart/HATestClusterRestart.params
index 6f2002d..123fd0f 100644
--- a/TestON/tests/HATestClusterRestart/HATestClusterRestart.params
+++ b/TestON/tests/HATestClusterRestart/HATestClusterRestart.params
@@ -1,7 +1,7 @@
 <PARAMS>
     <testcases>1,2,8,3,4,5,14,[6],8,3,7,4,15,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
     <ENV>
-    <cellName>HA</cellName>
+        <cellName>HA</cellName>
     </ENV>
     <Git>False</Git>
     <branch> master </branch>
diff --git a/TestON/tests/HATestClusterRestart/HATestClusterRestart.py b/TestON/tests/HATestClusterRestart/HATestClusterRestart.py
index ff38cfe..e9beb0c 100644
--- a/TestON/tests/HATestClusterRestart/HATestClusterRestart.py
+++ b/TestON/tests/HATestClusterRestart/HATestClusterRestart.py
@@ -150,24 +150,38 @@
             onos1Isup = main.ONOSbench.isup( ONOS1Ip )
             if not onos1Isup:
                 main.log.report( "ONOS1 didn't start!" )
+                main.ONOSbench.onosStop( ONOS1Ip )
+                main.ONOSbench.onosStart( ONOS1Ip )
             onos2Isup = main.ONOSbench.isup( ONOS2Ip )
             if not onos2Isup:
                 main.log.report( "ONOS2 didn't start!" )
+                main.ONOSbench.onosStop( ONOS2Ip )
+                main.ONOSbench.onosStart( ONOS2Ip )
             onos3Isup = main.ONOSbench.isup( ONOS3Ip )
             if not onos3Isup:
                 main.log.report( "ONOS3 didn't start!" )
+                main.ONOSbench.onosStop( ONOS3Ip )
+                main.ONOSbench.onosStart( ONOS3Ip )
             onos4Isup = main.ONOSbench.isup( ONOS4Ip )
             if not onos4Isup:
                 main.log.report( "ONOS4 didn't start!" )
+                main.ONOSbench.onosStop( ONOS4Ip )
+                main.ONOSbench.onosStart( ONOS4Ip )
             onos5Isup = main.ONOSbench.isup( ONOS5Ip )
             if not onos5Isup:
                 main.log.report( "ONOS5 didn't start!" )
+                main.ONOSbench.onosStop( ONOS5Ip )
+                main.ONOSbench.onosStart( ONOS5Ip )
             onos6Isup = main.ONOSbench.isup( ONOS6Ip )
             if not onos6Isup:
                 main.log.report( "ONOS6 didn't start!" )
+                main.ONOSbench.onosStop( ONOS6Ip )
+                main.ONOSbench.onosStart( ONOS6Ip )
             onos7Isup = main.ONOSbench.isup( ONOS7Ip )
             if not onos7Isup:
                 main.log.report( "ONOS7 didn't start!" )
+                main.ONOSbench.onosStop( ONOS7Ip )
+                main.ONOSbench.onosStart( ONOS7Ip )
             onosIsupResult = onos1Isup and onos2Isup and onos3Isup\
                 and onos4Isup and onos5Isup and onos6Isup and onos7Isup
             if onosIsupResult == main.TRUE:
@@ -378,9 +392,9 @@
             onfail="Switches were not successfully reassigned" )
         mastershipCheck = mastershipCheck and roleCall and roleCheck
         utilities.assert_equals( expect=main.TRUE, actual=mastershipCheck,
-                                onpass="Switch mastership correctly assigned",
-                                onfail="Error in ( re )assigning switch" +
-                                " mastership" )
+                                 onpass="Switch mastership correctly assigned",
+                                 onfail="Error in (re)assigning switch" +
+                                 " mastership" )
 
     def CASE3( self, main ):
         """
@@ -448,6 +462,7 @@
                 host1Id = host1Dict.get( 'id', None )
                 host2Id = host2Dict.get( 'id', None )
             if host1Id and host2Id:
+
                 tmpResult = main.ONOScli1.addHostIntent(
                     host1Id,
                     host2Id )
@@ -493,7 +508,7 @@
                                " and h" + str( i + 10 ) )
             elif ping == main.TRUE:
                 main.log.info( "Ping test passed!" )
-                PingResult = main.TRUE
+                # Don't set PingResult or you'd override failures
         if PingResult == main.FALSE:
             main.log.report(
                 "Intents have not been installed correctly, pings failed." )
@@ -1498,6 +1513,7 @@
         count = 0
         main.step( "Collecting topology information from ONOS" )
         startTime = time.time()
+        # Give time for Gossip to work
         while topoResult == main.FALSE and elapsed < 60:
             count = count + 1
             if count > 1:
diff --git a/TestON/tests/HATestClusterRestart/HATestClusterRestart.topo b/TestON/tests/HATestClusterRestart/HATestClusterRestart.topo
index 4d4156c..9305025 100644
--- a/TestON/tests/HATestClusterRestart/HATestClusterRestart.topo
+++ b/TestON/tests/HATestClusterRestart/HATestClusterRestart.topo
@@ -151,7 +151,7 @@
                 <arg1> --custom ~/mininet/custom/topo-HA.py </arg1>
                 <arg2> --topo mytopo </arg2>
                 <arg3> </arg3>
-                <controller> remote </controller>
+                <controller> none </controller>
             </COMPONENTS>
         </Mininet1>
 
@@ -162,11 +162,7 @@
             <type>RemoteMininetDriver</type>
             <connect_order>17</connect_order>
             <COMPONENTS>
-                # Specify the Option for mininet
-                <arg1> --custom ~/mininet/custom/topo-HA.py </arg1>
-                <arg2> --topo mytopo --arp</arg2>
-                <controller> remote </controller>
-             </COMPONENTS>
+            </COMPONENTS>
         </Mininet2>
 
     </COMPONENT>
diff --git a/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.params b/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.params
index 7d79860..595b78c 100644
--- a/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.params
+++ b/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.params
@@ -1,7 +1,7 @@
 <PARAMS>
     <testcases>1,2,8,3,4,5,14,[6],8,7,4,15,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
     <ENV>
-    <cellName>HA</cellName>
+        <cellName>HA</cellName>
     </ENV>
     <Git>False</Git>
     <branch> master </branch>
diff --git a/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.py b/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.py
index 3d9adc7..de26e67 100644
--- a/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.py
+++ b/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.py
@@ -150,24 +150,38 @@
             onos1Isup = main.ONOSbench.isup( ONOS1Ip )
             if not onos1Isup:
                 main.log.report( "ONOS1 didn't start!" )
+                main.ONOSbench.onosStop( ONOS1Ip )
+                main.ONOSbench.onosStart( ONOS1Ip )
             onos2Isup = main.ONOSbench.isup( ONOS2Ip )
             if not onos2Isup:
                 main.log.report( "ONOS2 didn't start!" )
+                main.ONOSbench.onosStop( ONOS2Ip )
+                main.ONOSbench.onosStart( ONOS2Ip )
             onos3Isup = main.ONOSbench.isup( ONOS3Ip )
             if not onos3Isup:
                 main.log.report( "ONOS3 didn't start!" )
+                main.ONOSbench.onosStop( ONOS3Ip )
+                main.ONOSbench.onosStart( ONOS3Ip )
             onos4Isup = main.ONOSbench.isup( ONOS4Ip )
             if not onos4Isup:
                 main.log.report( "ONOS4 didn't start!" )
+                main.ONOSbench.onosStop( ONOS4Ip )
+                main.ONOSbench.onosStart( ONOS4Ip )
             onos5Isup = main.ONOSbench.isup( ONOS5Ip )
             if not onos5Isup:
                 main.log.report( "ONOS5 didn't start!" )
+                main.ONOSbench.onosStop( ONOS5Ip )
+                main.ONOSbench.onosStart( ONOS5Ip )
             onos6Isup = main.ONOSbench.isup( ONOS6Ip )
             if not onos6Isup:
                 main.log.report( "ONOS6 didn't start!" )
+                main.ONOSbench.onosStop( ONOS6Ip )
+                main.ONOSbench.onosStart( ONOS6Ip )
             onos7Isup = main.ONOSbench.isup( ONOS7Ip )
             if not onos7Isup:
                 main.log.report( "ONOS7 didn't start!" )
+                main.ONOSbench.onosStop( ONOS7Ip )
+                main.ONOSbench.onosStart( ONOS7Ip )
             onosIsupResult = onos1Isup and onos2Isup and onos3Isup\
                 and onos4Isup and onos5Isup and onos6Isup and onos7Isup
             if onosIsupResult == main.TRUE:
@@ -378,9 +392,9 @@
             onfail="Switches were not successfully reassigned" )
         mastershipCheck = mastershipCheck and roleCall and roleCheck
         utilities.assert_equals( expect=main.TRUE, actual=mastershipCheck,
-                                onpass="Switch mastership correctly assigned",
-                                onfail="Error in ( re )assigning switch" +
-                                " mastership" )
+                                 onpass="Switch mastership correctly assigned",
+                                 onfail="Error in (re)assigning switch" +
+                                 " mastership" )
 
     def CASE3( self, main ):
         """
@@ -484,15 +498,15 @@
         main.case( description )
         PingResult = main.TRUE
         for i in range( 8, 18 ):
-            ping = main.Mininet1.pingHost(
-                src="h" + str( i ), target="h" + str( i + 10 ) )
+            ping = main.Mininet1.pingHost( src="h" + str( i ),
+                                           target="h" + str( i + 10 ) )
             PingResult = PingResult and ping
             if ping == main.FALSE:
                 main.log.warn( "Ping failed between h" + str( i ) +
                                " and h" + str( i + 10 ) )
             elif ping == main.TRUE:
                 main.log.info( "Ping test passed!" )
-                PingResult = main.TRUE
+                # Don't set PingResult or you'd override failures
         if PingResult == main.FALSE:
             main.log.report(
                 "Intents have not been installed correctly, pings failed." )
@@ -1109,8 +1123,9 @@
         cliResult3 = main.ONOScli3.startOnosCli( ONOS3Ip )
         cliResults = cliResult1 and cliResult2 and cliResult3
 
-        main.log.info( "Install leadership election app on restarted node" )
-
+        # Grab the time of restart so we chan check how long the gossip
+        # protocol has had time to work
+        main.restartTime = time.time()
         caseResults = main.TRUE and onosIsupResult and cliResults
         utilities.assert_equals( expect=main.TRUE, actual=caseResults,
                                 onpass="ONOS restart successful",
@@ -1149,8 +1164,6 @@
         ONOS5Mastership = main.ONOScli5.roles()
         ONOS6Mastership = main.ONOScli6.roles()
         ONOS7Mastership = main.ONOScli7.roles()
-        # print json.dumps( json.loads( ONOS1Mastership ), sort_keys=True,
-        # indent=4, separators=( ',', ': ' ) )
         if "Error" in ONOS1Mastership or not ONOS1Mastership\
                 or "Error" in ONOS2Mastership or not ONOS2Mastership\
                 or "Error" in ONOS3Mastership or not ONOS3Mastership\
@@ -1243,133 +1256,147 @@
         # NOTE: we expect mastership to change on controller failure
         mastershipCheck = consistentMastership
 
-        main.step( "Get the intents and compare across all nodes" )
-        ONOS1Intents = main.ONOScli1.intents( jsonFormat=True )
-        ONOS2Intents = main.ONOScli2.intents( jsonFormat=True )
-        ONOS3Intents = main.ONOScli3.intents( jsonFormat=True )
-        ONOS4Intents = main.ONOScli4.intents( jsonFormat=True )
-        ONOS5Intents = main.ONOScli5.intents( jsonFormat=True )
-        ONOS6Intents = main.ONOScli6.intents( jsonFormat=True )
-        ONOS7Intents = main.ONOScli7.intents( jsonFormat=True )
-        intentCheck = main.FALSE
-        if "Error" in ONOS1Intents or not ONOS1Intents\
-                or "Error" in ONOS2Intents or not ONOS2Intents\
-                or "Error" in ONOS3Intents or not ONOS3Intents\
-                or "Error" in ONOS4Intents or not ONOS4Intents\
-                or "Error" in ONOS5Intents or not ONOS5Intents\
-                or "Error" in ONOS6Intents or not ONOS6Intents\
-                or "Error" in ONOS7Intents or not ONOS7Intents:
-            main.log.report( "Error in getting ONOS intents" )
-            main.log.warn( "ONOS1 intents response: " + repr( ONOS1Intents ) )
-            main.log.warn( "ONOS2 intents response: " + repr( ONOS2Intents ) )
-            main.log.warn( "ONOS3 intents response: " + repr( ONOS3Intents ) )
-            main.log.warn( "ONOS4 intents response: " + repr( ONOS4Intents ) )
-            main.log.warn( "ONOS5 intents response: " + repr( ONOS5Intents ) )
-            main.log.warn( "ONOS6 intents response: " + repr( ONOS6Intents ) )
-            main.log.warn( "ONOS7 intents response: " + repr( ONOS7Intents ) )
-        elif ONOS1Intents == ONOS2Intents\
-                and ONOS1Intents == ONOS3Intents\
-                and ONOS1Intents == ONOS4Intents\
-                and ONOS1Intents == ONOS5Intents\
-                and ONOS1Intents == ONOS6Intents\
-                and ONOS1Intents == ONOS7Intents:
-            intentCheck = main.TRUE
-            main.log.report( "Intents are consistent across all ONOS nodes" )
-        else:
-            main.log.warn( "ONOS1 intents: " )
-            print json.dumps( json.loads( ONOS1Intents ), sort_keys=True,
-                              indent=4, separators=( ',', ': ' ) )
-            main.log.warn( "ONOS2 intents: " )
-            print json.dumps( json.loads( ONOS2Intents ), sort_keys=True,
-                              indent=4, separators=( ',', ': ' ) )
-            main.log.warn( "ONOS3 intents: " )
-            print json.dumps( json.loads( ONOS3Intents ), sort_keys=True,
-                              indent=4, separators=( ',', ': ' ) )
-            main.log.warn( "ONOS4 intents: " )
-            print json.dumps( json.loads( ONOS4Intents ), sort_keys=True,
-                              indent=4, separators=( ',', ': ' ) )
-            main.log.warn( "ONOS5 intents: " )
-            print json.dumps( json.loads( ONOS5Intents ), sort_keys=True,
-                              indent=4, separators=( ',', ': ' ) )
-            main.log.warn( "ONOS6 intents: " )
-            print json.dumps( json.loads( ONOS6Intents ), sort_keys=True,
-                              indent=4, separators=( ',', ': ' ) )
-            main.log.warn( "ONOS7 intents: " )
-            print json.dumps( json.loads( ONOS7Intents ), sort_keys=True,
-                              indent=4, separators=( ',', ': ' ) )
-        utilities.assert_equals(
-            expect=main.TRUE,
-            actual=intentCheck,
-            onpass="Intents are consistent across all ONOS nodes",
-            onfail="ONOS nodes have different views of intents" )
-        # Print the intent states
-        intents = []
-        intents.append( ONOS1Intents )
-        intents.append( ONOS2Intents )
-        intents.append( ONOS3Intents )
-        intents.append( ONOS4Intents )
-        intents.append( ONOS5Intents )
-        intents.append( ONOS6Intents )
-        intents.append( ONOS7Intents )
-        intentStates = []
-        for node in intents:  # Iter through ONOS nodes
-            nodeStates = []
-            for intent in json.loads( node ):  # Iter through intents of a node
-                nodeStates.append( intent[ 'state' ] )
-            intentStates.append( nodeStates )
-            out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
-            main.log.info( dict( out ) )
-
-
-        # NOTE: Hazelcast has no durability, so intents are lost across system
-        # restarts
-        main.step( "Compare current intents with intents before the failure" )
-        # NOTE: this requires case 5 to pass for intentState to be set.
-        #      maybe we should stop the test if that fails?
-        sameIntents = main.TRUE
-        if intentState and intentState == ONOS1Intents:
-            sameIntents = main.TRUE
-            main.log.report( "Intents are consistent with before failure" )
-        # TODO: possibly the states have changed? we may need to figure out
-        # what the aceptable states are
-        else:
-            try:
+        while True:
+            whileTime = time.time() - main.restartTime
+            # Gossip store
+            main.step( "Get the intents and compare across all nodes" )
+            ONOS1Intents = main.ONOScli1.intents( jsonFormat=True )
+            ONOS2Intents = main.ONOScli2.intents( jsonFormat=True )
+            ONOS3Intents = main.ONOScli3.intents( jsonFormat=True )
+            ONOS4Intents = main.ONOScli4.intents( jsonFormat=True )
+            ONOS5Intents = main.ONOScli5.intents( jsonFormat=True )
+            ONOS6Intents = main.ONOScli6.intents( jsonFormat=True )
+            ONOS7Intents = main.ONOScli7.intents( jsonFormat=True )
+            intentCheck = main.FALSE
+            if "Error" in ONOS1Intents or not ONOS1Intents\
+                    or "Error" in ONOS2Intents or not ONOS2Intents\
+                    or "Error" in ONOS3Intents or not ONOS3Intents\
+                    or "Error" in ONOS4Intents or not ONOS4Intents\
+                    or "Error" in ONOS5Intents or not ONOS5Intents\
+                    or "Error" in ONOS6Intents or not ONOS6Intents\
+                    or "Error" in ONOS7Intents or not ONOS7Intents:
+                main.log.report( "Error in getting ONOS intents" )
+                main.log.warn( "ONOS1 intents response: " +
+                               repr( ONOS1Intents ) )
+                main.log.warn( "ONOS2 intents response: " +
+                               repr( ONOS2Intents ) )
+                main.log.warn( "ONOS3 intents response: " +
+                               repr( ONOS3Intents ) )
+                main.log.warn( "ONOS4 intents response: " +
+                               repr( ONOS4Intents ) )
+                main.log.warn( "ONOS5 intents response: " +
+                               repr( ONOS5Intents ) )
+                main.log.warn( "ONOS6 intents response: " +
+                               repr( ONOS6Intents ) )
+                main.log.warn( "ONOS7 intents response: " +
+                               repr( ONOS7Intents ) )
+            elif ONOS1Intents == ONOS2Intents\
+                    and ONOS1Intents == ONOS3Intents\
+                    and ONOS1Intents == ONOS4Intents\
+                    and ONOS1Intents == ONOS5Intents\
+                    and ONOS1Intents == ONOS6Intents\
+                    and ONOS1Intents == ONOS7Intents:
+                intentCheck = main.TRUE
+                main.log.report( "Intents are consistent across all" +
+                                 " ONOS nodes" )
+            else:
                 main.log.warn( "ONOS1 intents: " )
-                print json.dumps( json.loads( ONOS1Intents ),
-                                  sort_keys=True, indent=4,
-                                  separators=( ',', ': ' ) )
-            except:
-                pass
-            sameIntents = main.FALSE
-        utilities.assert_equals(
-            expect=main.TRUE,
-            actual=sameIntents,
-            onpass="Intents are consistent with before failure",
-            onfail="The Intents changed during failure" )
-        intentCheck = intentCheck and sameIntents
+                print json.dumps( json.loads( ONOS1Intents ), sort_keys=True,
+                                  indent=4, separators=( ',', ': ' ) )
+                main.log.warn( "ONOS2 intents: " )
+                print json.dumps( json.loads( ONOS2Intents ), sort_keys=True,
+                                  indent=4, separators=( ',', ': ' ) )
+                main.log.warn( "ONOS3 intents: " )
+                print json.dumps( json.loads( ONOS3Intents ), sort_keys=True,
+                                  indent=4, separators=( ',', ': ' ) )
+                main.log.warn( "ONOS4 intents: " )
+                print json.dumps( json.loads( ONOS4Intents ), sort_keys=True,
+                                  indent=4, separators=( ',', ': ' ) )
+                main.log.warn( "ONOS5 intents: " )
+                print json.dumps( json.loads( ONOS5Intents ), sort_keys=True,
+                                  indent=4, separators=( ',', ': ' ) )
+                main.log.warn( "ONOS6 intents: " )
+                print json.dumps( json.loads( ONOS6Intents ), sort_keys=True,
+                                  indent=4, separators=( ',', ': ' ) )
+                main.log.warn( "ONOS7 intents: " )
+                print json.dumps( json.loads( ONOS7Intents ), sort_keys=True,
+                                  indent=4, separators=( ',', ': ' ) )
+            utilities.assert_equals(
+                expect=main.TRUE,
+                actual=intentCheck,
+                onpass="Intents are consistent across all ONOS nodes",
+                onfail="ONOS nodes have different views of intents" )
+            # Print the intent states
+            intents = []
+            intents.append( ONOS1Intents )
+            intents.append( ONOS2Intents )
+            intents.append( ONOS3Intents )
+            intents.append( ONOS4Intents )
+            intents.append( ONOS5Intents )
+            intents.append( ONOS6Intents )
+            intents.append( ONOS7Intents )
+            intentStates = []
+            for node in intents:  # Iter through ONOS nodes
+                nodeStates = []
+                # Iter through intents of a node
+                for intent in json.loads( node ):
+                    nodeStates.append( intent[ 'state' ] )
+                intentStates.append( nodeStates )
+                out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
+                main.log.info( dict( out ) )
 
-        main.step( "Get the OF Table entries and compare to before " +
-                   "component failure" )
-        FlowTables = main.TRUE
-        flows2 = []
-        for i in range( 28 ):
-            main.log.info( "Checking flow table on s" + str( i + 1 ) )
-            tmpFlows = main.Mininet2.getFlowTable( 1.3, "s" + str( i + 1 ) )
-            flows2.append( tmpFlows )
-            tempResult = main.Mininet2.flowComp(
-                flow1=flows[ i ],
-                flow2=tmpFlows )
-            FlowTables = FlowTables and tempResult
-            if FlowTables == main.FALSE:
-                main.log.info( "Differences in flow table for switch: s" +
-                               str( i + 1 ) )
-        if FlowTables == main.TRUE:
-            main.log.report( "No changes were found in the flow tables" )
-        utilities.assert_equals(
-            expect=main.TRUE,
-            actual=FlowTables,
-            onpass="No changes were found in the flow tables",
-            onfail="Changes were found in the flow tables" )
+
+            # NOTE: Store has no durability, so intents are lost across system
+            # restarts
+            main.step( "Compare current intents with intents before the failure" )
+            # NOTE: this requires case 5 to pass for intentState to be set.
+            #      maybe we should stop the test if that fails?
+            sameIntents = main.TRUE
+            if intentState and intentState == ONOS1Intents:
+                sameIntents = main.TRUE
+                main.log.report( "Intents are consistent with before failure" )
+            # TODO: possibly the states have changed? we may need to figure out
+            # what the aceptable states are
+            else:
+                try:
+                    main.log.warn( "ONOS1 intents: " )
+                    print json.dumps( json.loads( ONOS1Intents ),
+                                      sort_keys=True, indent=4,
+                                      separators=( ',', ': ' ) )
+                except:
+                    pass
+                sameIntents = main.FALSE
+            utilities.assert_equals(
+                expect=main.TRUE,
+                actual=sameIntents,
+                onpass="Intents are consistent with before failure",
+                onfail="The Intents changed during failure" )
+            intentCheck = intentCheck and sameIntents
+
+            main.step( "Get the OF Table entries and compare to before " +
+                       "component failure" )
+            FlowTables = main.TRUE
+            flows2 = []
+            for i in range( 28 ):
+                main.log.info( "Checking flow table on s" + str( i + 1 ) )
+                tmpFlows = main.Mininet2.getFlowTable( 1.3, "s" + str( i + 1 ) )
+                flows2.append( tmpFlows )
+                tempResult = main.Mininet2.flowComp(
+                    flow1=flows[ i ],
+                    flow2=tmpFlows )
+                FlowTables = FlowTables and tempResult
+                if FlowTables == main.FALSE:
+                    main.log.info( "Differences in flow table for switch: s" +
+                                   str( i + 1 ) )
+            if FlowTables == main.TRUE:
+                main.log.report( "No changes were found in the flow tables" )
+            utilities.assert_equals(
+                expect=main.TRUE,
+                actual=FlowTables,
+                onpass="No changes were found in the flow tables",
+                onfail="Changes were found in the flow tables" )
+            if topoResult == main.TRUE or ( whileTime  > 10 ) :
+                break
 
         main.step( "Check the continuous pings to ensure that no packets " +
                    "were dropped during component failure" )
@@ -1494,6 +1521,7 @@
         count = 0
         main.step( "Collecting topology information from ONOS" )
         startTime = time.time()
+        # Give time for Gossip to work
         while topoResult == main.FALSE and elapsed < 60:
             count = count + 1
             if count > 1:
diff --git a/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.topo b/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.topo
index 4d4156c..9305025 100644
--- a/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.topo
+++ b/TestON/tests/HATestMinorityRestart/HATestMinorityRestart.topo
@@ -151,7 +151,7 @@
                 <arg1> --custom ~/mininet/custom/topo-HA.py </arg1>
                 <arg2> --topo mytopo </arg2>
                 <arg3> </arg3>
-                <controller> remote </controller>
+                <controller> none </controller>
             </COMPONENTS>
         </Mininet1>
 
@@ -162,11 +162,7 @@
             <type>RemoteMininetDriver</type>
             <connect_order>17</connect_order>
             <COMPONENTS>
-                # Specify the Option for mininet
-                <arg1> --custom ~/mininet/custom/topo-HA.py </arg1>
-                <arg2> --topo mytopo --arp</arg2>
-                <controller> remote </controller>
-             </COMPONENTS>
+            </COMPONENTS>
         </Mininet2>
 
     </COMPONENT>
diff --git a/TestON/tests/HATestSanity/HATestSanity.py b/TestON/tests/HATestSanity/HATestSanity.py
index 58f9954..590ff56 100644
--- a/TestON/tests/HATestSanity/HATestSanity.py
+++ b/TestON/tests/HATestSanity/HATestSanity.py
@@ -150,24 +150,38 @@
             onos1Isup = main.ONOSbench.isup( ONOS1Ip )
             if not onos1Isup:
                 main.log.report( "ONOS1 didn't start!" )
+                main.ONOSbench.onosStop( ONOS1Ip )
+                main.ONOSbench.onosStart( ONOS1Ip )
             onos2Isup = main.ONOSbench.isup( ONOS2Ip )
             if not onos2Isup:
                 main.log.report( "ONOS2 didn't start!" )
+                main.ONOSbench.onosStop( ONOS2Ip )
+                main.ONOSbench.onosStart( ONOS2Ip )
             onos3Isup = main.ONOSbench.isup( ONOS3Ip )
             if not onos3Isup:
                 main.log.report( "ONOS3 didn't start!" )
+                main.ONOSbench.onosStop( ONOS3Ip )
+                main.ONOSbench.onosStart( ONOS3Ip )
             onos4Isup = main.ONOSbench.isup( ONOS4Ip )
             if not onos4Isup:
                 main.log.report( "ONOS4 didn't start!" )
+                main.ONOSbench.onosStop( ONOS4Ip )
+                main.ONOSbench.onosStart( ONOS4Ip )
             onos5Isup = main.ONOSbench.isup( ONOS5Ip )
             if not onos5Isup:
                 main.log.report( "ONOS5 didn't start!" )
+                main.ONOSbench.onosStop( ONOS5Ip )
+                main.ONOSbench.onosStart( ONOS5Ip )
             onos6Isup = main.ONOSbench.isup( ONOS6Ip )
             if not onos6Isup:
                 main.log.report( "ONOS6 didn't start!" )
+                main.ONOSbench.onosStop( ONOS6Ip )
+                main.ONOSbench.onosStart( ONOS6Ip )
             onos7Isup = main.ONOSbench.isup( ONOS7Ip )
             if not onos7Isup:
                 main.log.report( "ONOS7 didn't start!" )
+                main.ONOSbench.onosStop( ONOS7Ip )
+                main.ONOSbench.onosStart( ONOS7Ip )
             onosIsupResult = onos1Isup and onos2Isup and onos3Isup\
                 and onos4Isup and onos5Isup and onos6Isup and onos7Isup
             if onosIsupResult == main.TRUE:
@@ -446,6 +460,7 @@
                 host1Id = host1Dict.get( 'id', None )
                 host2Id = host2Dict.get( 'id', None )
             if host1Id and host2Id:
+                # TODO: distribute the intents across onos nodes
                 tmpResult = main.ONOScli1.addHostIntent(
                     host1Id,
                     host2Id )
@@ -491,7 +506,7 @@
                                " and h" + str( i + 10 ) )
             elif ping == main.TRUE:
                 main.log.info( "Ping test passed!" )
-                PingResult = main.TRUE
+                # Don't set PingResult or you'd override failures
         if PingResult == main.FALSE:
             main.log.report(
                 "Intents have not been installed correctly, pings failed." )
@@ -553,8 +568,6 @@
         ONOS5Mastership = main.ONOScli5.roles()
         ONOS6Mastership = main.ONOScli6.roles()
         ONOS7Mastership = main.ONOScli7.roles()
-        # print json.dumps( json.loads( ONOS1Mastership ), sort_keys=True,
-        # indent=4, separators=( ',', ': ' ) )
         if "Error" in ONOS1Mastership or not ONOS1Mastership\
                 or "Error" in ONOS2Mastership or not ONOS2Mastership\
                 or "Error" in ONOS3Mastership or not ONOS3Mastership\
@@ -1126,8 +1139,6 @@
         ONOS5Mastership = main.ONOScli5.roles()
         ONOS6Mastership = main.ONOScli6.roles()
         ONOS7Mastership = main.ONOScli7.roles()
-        # print json.dumps( json.loads( ONOS1Mastership ), sort_keys=True,
-        # indent=4, separators=( ',', ': ' ) )
         if "Error" in ONOS1Mastership or not ONOS1Mastership\
                 or "Error" in ONOS2Mastership or not ONOS2Mastership\
                 or "Error" in ONOS3Mastership or not ONOS3Mastership\
@@ -1466,6 +1477,7 @@
         count = 0
         main.step( "Collecting topology information from ONOS" )
         startTime = time.time()
+        # Give time for Gossip to work
         while topoResult == main.FALSE and elapsed < 60:
             count = count + 1
             if count > 1:
@@ -1526,140 +1538,134 @@
             cliTime = time.time() - cliStart
             print "CLI time: " + str( cliTime )
 
-            try: 
-                for controller in range( numControllers ):
-                    controllerStr = str( controller + 1 )
-                    if devices[ controller ] or "Error" not in devices[
-                            controller ]:
-                        currentDevicesResult = main.Mininet1.compareSwitches(
-                            MNTopo,
-                            json.loads(
-                                devices[ controller ] ) )
-                    else:
-                        currentDevicesResult = main.FALSE
-                    utilities.assert_equals( expect=main.TRUE,
-                                            actual=currentDevicesResult,
-                                            onpass="ONOS" + controllerStr +
-                                            " Switches view is correct",
-                                            onfail="ONOS" + controllerStr +
-                                            " Switches view is incorrect" )
+            for controller in range( numControllers ):
+                controllerStr = str( controller + 1 )
+                if devices[ controller ] or "Error" not in devices[
+                        controller ]:
+                    currentDevicesResult = main.Mininet1.compareSwitches(
+                        MNTopo,
+                        json.loads(
+                            devices[ controller ] ) )
+                else:
+                    currentDevicesResult = main.FALSE
+                utilities.assert_equals( expect=main.TRUE,
+                                        actual=currentDevicesResult,
+                                        onpass="ONOS" + controllerStr +
+                                        " Switches view is correct",
+                                        onfail="ONOS" + controllerStr +
+                                        " Switches view is incorrect" )
 
-                    if ports[ controller ] or "Error" not in ports[ controller ]:
-                        currentPortsResult = main.Mininet1.comparePorts(
-                            MNTopo,
-                            json.loads(
-                                ports[ controller ] ) )
-                    else:
-                        currentPortsResult = main.FALSE
-                    utilities.assert_equals( expect=main.TRUE,
-                                            actual=currentPortsResult,
-                                            onpass="ONOS" + controllerStr +
-                                            " ports view is correct",
-                                            onfail="ONOS" + controllerStr +
-                                            " ports view is incorrect" )
+                if ports[ controller ] or "Error" not in ports[ controller ]:
+                    currentPortsResult = main.Mininet1.comparePorts(
+                        MNTopo,
+                        json.loads(
+                            ports[ controller ] ) )
+                else:
+                    currentPortsResult = main.FALSE
+                utilities.assert_equals( expect=main.TRUE,
+                                        actual=currentPortsResult,
+                                        onpass="ONOS" + controllerStr +
+                                        " ports view is correct",
+                                        onfail="ONOS" + controllerStr +
+                                        " ports view is incorrect" )
 
-                    if links[ controller ] or "Error" not in links[ controller ]:
-                        currentLinksResult = main.Mininet1.compareLinks(
-                            MNTopo,
-                            json.loads(
-                                links[ controller ] ) )
-                    else:
-                        currentLinksResult = main.FALSE
-                    utilities.assert_equals( expect=main.TRUE,
-                                            actual=currentLinksResult,
-                                            onpass="ONOS" + controllerStr +
-                                            " links view is correct",
-                                            onfail="ONOS" + controllerStr +
-                                            " links view is incorrect" )
-                devicesResults = devicesResults and currentDevicesResult
-                portsResults = portsResults and currentPortsResult
-                linksResults = linksResults and currentLinksResult
+                if links[ controller ] or "Error" not in links[ controller ]:
+                    currentLinksResult = main.Mininet1.compareLinks(
+                        MNTopo,
+                        json.loads(
+                            links[ controller ] ) )
+                else:
+                    currentLinksResult = main.FALSE
+                utilities.assert_equals( expect=main.TRUE,
+                                        actual=currentLinksResult,
+                                        onpass="ONOS" + controllerStr +
+                                        " links view is correct",
+                                        onfail="ONOS" + controllerStr +
+                                        " links view is incorrect" )
+            devicesResults = devicesResults and currentDevicesResult
+            portsResults = portsResults and currentPortsResult
+            linksResults = linksResults and currentLinksResult
 
-                # Compare json objects for hosts and dataplane clusters
+            # Compare json objects for hosts and dataplane clusters
 
-                # hosts
-                consistentHostsResult = main.TRUE
-                for controller in range( len( hosts ) ):
-                    controllerStr = str( controller + 1 )
-                    if "Error" not in hosts[ controller ]:
-                        if hosts[ controller ] == hosts[ 0 ]:
-                            continue
-                        else:  # hosts not consistent
-                            main.log.report( "hosts from ONOS" + controllerStr +
-                                             " is inconsistent with ONOS1" )
-                            main.log.warn( repr( hosts[ controller ] ) )
-                            consistentHostsResult = main.FALSE
-
-                    else:
-                        main.log.report( "Error in getting ONOS hosts from ONOS" +
-                                         controllerStr )
+            # hosts
+            consistentHostsResult = main.TRUE
+            for controller in range( len( hosts ) ):
+                controllerStr = str( controller + 1 )
+                if "Error" not in hosts[ controller ]:
+                    if hosts[ controller ] == hosts[ 0 ]:
+                        continue
+                    else:  # hosts not consistent
+                        main.log.report( "hosts from ONOS" + controllerStr +
+                                         " is inconsistent with ONOS1" )
+                        main.log.warn( repr( hosts[ controller ] ) )
                         consistentHostsResult = main.FALSE
-                        main.log.warn( "ONOS" + controllerStr +
-                                       " hosts response: " +
-                                       repr( hosts[ controller ] ) )
-                utilities.assert_equals(
-                    expect=main.TRUE,
-                    actual=consistentHostsResult,
-                    onpass="Hosts view is consistent across all ONOS nodes",
-                    onfail="ONOS nodes have different views of hosts" )
 
-                # Strongly connected clusters of devices
-                consistentClustersResult = main.TRUE
-                for controller in range( len( clusters ) ):
-                    controllerStr = str( controller + 1 )
-                    if "Error" not in clusters[ controller ]:
-                        if clusters[ controller ] == clusters[ 0 ]:
-                            continue
-                        else:  # clusters not consistent
-                            main.log.report( "clusters from ONOS" +
-                                             controllerStr +
-                                             " is inconsistent with ONOS1" )
-                            consistentClustersResult = main.FALSE
+                else:
+                    main.log.report( "Error in getting ONOS hosts from ONOS" +
+                                     controllerStr )
+                    consistentHostsResult = main.FALSE
+                    main.log.warn( "ONOS" + controllerStr +
+                                   " hosts response: " +
+                                   repr( hosts[ controller ] ) )
+            utilities.assert_equals(
+                expect=main.TRUE,
+                actual=consistentHostsResult,
+                onpass="Hosts view is consistent across all ONOS nodes",
+                onfail="ONOS nodes have different views of hosts" )
 
-                    else:
-                        main.log.report( "Error in getting dataplane clusters " +
-                                         "from ONOS" + controllerStr )
+            # Strongly connected clusters of devices
+            consistentClustersResult = main.TRUE
+            for controller in range( len( clusters ) ):
+                controllerStr = str( controller + 1 )
+                if "Error" not in clusters[ controller ]:
+                    if clusters[ controller ] == clusters[ 0 ]:
+                        continue
+                    else:  # clusters not consistent
+                        main.log.report( "clusters from ONOS" +
+                                         controllerStr +
+                                         " is inconsistent with ONOS1" )
                         consistentClustersResult = main.FALSE
-                        main.log.warn( "ONOS" + controllerStr +
-                                       " clusters response: " +
-                                       repr( clusters[ controller ] ) )
-                utilities.assert_equals(
-                    expect=main.TRUE,
-                    actual=consistentClustersResult,
-                    onpass="Clusters view is consistent across all ONOS nodes",
-                    onfail="ONOS nodes have different views of clusters" )
-                # there should always only be one cluster
-                numClusters = len( json.loads( clusters[ 0 ] ) )
-                utilities.assert_equals(
-                    expect=1,
-                    actual=numClusters,
-                    onpass="ONOS shows 1 SCC",
-                    onfail="ONOS shows " +
-                    str( numClusters ) +
-                    " SCCs" )
 
-                topoResult = ( devicesResults and portsResults and linksResults
-                               and consistentHostsResult
-                               and consistentClustersResult )
+                else:
+                    main.log.report( "Error in getting dataplane clusters " +
+                                     "from ONOS" + controllerStr )
+                    consistentClustersResult = main.FALSE
+                    main.log.warn( "ONOS" + controllerStr +
+                                   " clusters response: " +
+                                   repr( clusters[ controller ] ) )
+            utilities.assert_equals(
+                expect=main.TRUE,
+                actual=consistentClustersResult,
+                onpass="Clusters view is consistent across all ONOS nodes",
+                onfail="ONOS nodes have different views of clusters" )
+            # there should always only be one cluster
+            numClusters = len( json.loads( clusters[ 0 ] ) )
+            utilities.assert_equals(
+                expect=1,
+                actual=numClusters,
+                onpass="ONOS shows 1 SCC",
+                onfail="ONOS shows " +
+                str( numClusters ) +
+                " SCCs" )
 
-                topoResult = topoResult and int( count <= 2 )
-                note = "note it takes about " + str( int( cliTime ) ) + \
-                    " seconds for the test to make all the cli calls to fetch " +\
-                    "the topology from each ONOS instance"
-                main.log.info(
-                    "Very crass estimate for topology discovery/convergence( " +
-                    str( note ) + " ): " + str( elapsed ) + " seconds, " +
-                    str( count ) + " tries" )
-                utilities.assert_equals( expect=main.TRUE, actual=topoResult,
-                                        onpass="Topology Check Test successful",
-                                        onfail="Topology Check Test NOT successful" )
-                if topoResult == main.TRUE:
-                    main.log.report( "ONOS topology view matches Mininet topology" )
-            except:
-                main.log.info( self.name + " ::::::" )
-                main.log.error( traceback.print_exc() )
-                main.log.info( self.name + " ::::::" )
+            topoResult = ( devicesResults and portsResults and linksResults
+                           and consistentHostsResult
+                           and consistentClustersResult )
 
+        topoResult = topoResult and int( count <= 2 )
+        note = "note it takes about " + str( int( cliTime ) ) + \
+            " seconds for the test to make all the cli calls to fetch " +\
+            "the topology from each ONOS instance"
+        main.log.info(
+            "Very crass estimate for topology discovery/convergence( " +
+            str( note ) + " ): " + str( elapsed ) + " seconds, " +
+            str( count ) + " tries" )
+        utilities.assert_equals( expect=main.TRUE, actual=topoResult,
+                                onpass="Topology Check Test successful",
+                                onfail="Topology Check Test NOT successful" )
+        if topoResult == main.TRUE:
+            main.log.report( "ONOS topology view matches Mininet topology" )
 
     def CASE9( self, main ):
         """
diff --git a/TestON/tests/HATestSanity/HATestSanity.topo b/TestON/tests/HATestSanity/HATestSanity.topo
index c24d546..9305025 100644
--- a/TestON/tests/HATestSanity/HATestSanity.topo
+++ b/TestON/tests/HATestSanity/HATestSanity.topo
@@ -151,7 +151,7 @@
                 <arg1> --custom ~/mininet/custom/topo-HA.py </arg1>
                 <arg2> --topo mytopo </arg2>
                 <arg3> </arg3>
-                <controller> remote </controller>
+                <controller> none </controller>
             </COMPONENTS>
         </Mininet1>
 
@@ -162,7 +162,7 @@
             <type>RemoteMininetDriver</type>
             <connect_order>17</connect_order>
             <COMPONENTS>
-             </COMPONENTS>
+            </COMPONENTS>
         </Mininet2>
 
     </COMPONENT>
diff --git a/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.params b/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.params
index 7f5cf4e..80de267 100644
--- a/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.params
+++ b/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.params
@@ -1,7 +1,7 @@
 <PARAMS>
     <testcases>1,2,8,3,4,5,14,[6],8,3,7,4,15,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
     <ENV>
-    <cellName>HA</cellName>
+        <cellName>HA</cellName>
     </ENV>
     <Git>False</Git>
     <branch> master </branch>
diff --git a/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.py b/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.py
index 4b9b287..44ce741 100644
--- a/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.py
+++ b/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.py
@@ -293,7 +293,7 @@
                                " and h" + str( i + 10 ) )
             elif ping == main.TRUE:
                 main.log.info( "Ping test passed!" )
-                PingResult = main.TRUE
+                # Don't set PingResult or you'd override failures
         if PingResult == main.FALSE:
             main.log.report(
                 "Intents have not been installed correctly, pings failed." )
diff --git a/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.topo b/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.topo
index 4d4156c..9305025 100644
--- a/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.topo
+++ b/TestON/tests/HATestSingleInstanceRestart/HATestSingleInstanceRestart.topo
@@ -151,7 +151,7 @@
                 <arg1> --custom ~/mininet/custom/topo-HA.py </arg1>
                 <arg2> --topo mytopo </arg2>
                 <arg3> </arg3>
-                <controller> remote </controller>
+                <controller> none </controller>
             </COMPONENTS>
         </Mininet1>
 
@@ -162,11 +162,7 @@
             <type>RemoteMininetDriver</type>
             <connect_order>17</connect_order>
             <COMPONENTS>
-                # Specify the Option for mininet
-                <arg1> --custom ~/mininet/custom/topo-HA.py </arg1>
-                <arg2> --topo mytopo --arp</arg2>
-                <controller> remote </controller>
-             </COMPONENTS>
+            </COMPONENTS>
         </Mininet2>
 
     </COMPONENT>