Merge "Implement moveHost and SRRouting test CASE651"
diff --git a/TestON/drivers/common/cli/emulator/mininetclidriver.py b/TestON/drivers/common/cli/emulator/mininetclidriver.py
index bc51188..0cea933 100644
--- a/TestON/drivers/common/cli/emulator/mininetclidriver.py
+++ b/TestON/drivers/common/cli/emulator/mininetclidriver.py
@@ -473,7 +473,7 @@
                     # Current host pings all other hosts specified
                     pingCmd = str( host ) + cmd + str( temp )
                     self.handle.sendline( pingCmd )
-                    self.handle.expect( "mininet>", timeout=wait + 1 )
+                    self.handle.expect( "mininet>", timeout=wait + 5 )
                     response = self.handle.before
                     if re.search( ',\s0\%\spacket\sloss', response ):
                         pingResponse += str( " h" + str( temp[ 1: ] ) )
@@ -536,7 +536,7 @@
                     while failedPings <= acceptableFailed:
                         main.log.debug( "Pinging from " + str( host ) + " to " + str( temp ) )
                         self.handle.sendline( pingCmd )
-                        self.handle.expect( "mininet>", timeout=wait + 1 )
+                        self.handle.expect( "mininet>", timeout=wait + 5 )
                         response = self.handle.before
                         if re.search( ',\s0\%\spacket\sloss', response ):
                             pingResponse += " " + str( temp )
@@ -606,9 +606,9 @@
                     discoveryResult = main.FALSE
                 if cmd:
                     self.handle.sendline( "{} ip neigh flush all".format( host ) )
-                    self.handle.expect( "mininet>", timeout=wait + 1 )
+                    self.handle.expect( "mininet>" )
                     self.handle.sendline( cmd )
-                    self.handle.expect( "mininet>", timeout=wait + 1 )
+                    self.handle.expect( "mininet>", timeout=wait + 5 )
             return discoveryResult
         except pexpect.TIMEOUT:
             main.log.exception( self.name + ": TIMEOUT exception" )
@@ -655,7 +655,7 @@
                     while failedPings <= acceptableFailed:
                         main.log.debug( "Pinging from " + str( host ) + " to " + str( temp ) )
                         self.handle.sendline( pingCmd )
-                        self.handle.expect( "mininet>", timeout=wait + 1 )
+                        self.handle.expect( "mininet>", timeout=wait + 5 )
                         response = self.handle.before
                         if re.search( ',\s0\%\spacket\sloss', response ):
                             pingResponse += " " + str( temp )
@@ -707,7 +707,7 @@
             main.log.info( "Sending: " + command )
             self.handle.sendline( command )
             i = self.handle.expect( [ command, pexpect.TIMEOUT ],
-                                    timeout=wait + 1 )
+                                    timeout=wait + 5 )
             if i == 1:
                 main.log.error(
                     self.name +
@@ -752,7 +752,7 @@
             main.log.info( "Sending: " + command )
             self.handle.sendline( command )
             i = self.handle.expect( [ command, pexpect.TIMEOUT ],
-                                    timeout=wait + 1 )
+                                    timeout=wait + 5 )
             if i == 1:
                 main.log.error(
                     self.name +
@@ -806,7 +806,7 @@
                 i = self.handle.expect( [ self.hostPrompt,
                                           '\*\*\* Unknown command: ' + pingCmd,
                                           pexpect.TIMEOUT ],
-                                        timeout=wait + 1 )
+                                        timeout=wait + 5 )
                 # For some reason we need to send something
                 # Otherwise ping results won't be read by handle
                 self.handle.sendline( "" )
diff --git a/TestON/drivers/common/cli/emulator/mininethostdriver.py b/TestON/drivers/common/cli/emulator/mininethostdriver.py
index 408f918..360fd9a 100644
--- a/TestON/drivers/common/cli/emulator/mininethostdriver.py
+++ b/TestON/drivers/common/cli/emulator/mininethostdriver.py
@@ -145,7 +145,7 @@
             main.log.info( self.name + ": Sending: " + command )
             self.handle.sendline( command )
             i = self.handle.expect( [ self.hostPrompt, pexpect.TIMEOUT ],
-                                    timeout=wait + 1 )
+                                    timeout=wait + 5 )
             if i == 1:
                 main.log.error(
                     self.name +
@@ -179,7 +179,7 @@
             main.log.info( self.name + ": Sending: " + command )
             self.handle.sendline( command )
             i = self.handle.expect( [ self.hostPrompt, pexpect.TIMEOUT ],
-                                    timeout=wait + 1 )
+                                    timeout=wait + 5 )
             if i == 1:
                 main.log.error(
                     self.name +
diff --git a/TestON/drivers/common/cli/hostdriver.py b/TestON/drivers/common/cli/hostdriver.py
index 238721e..4156902 100644
--- a/TestON/drivers/common/cli/hostdriver.py
+++ b/TestON/drivers/common/cli/hostdriver.py
@@ -145,7 +145,7 @@
             main.log.info( self.name + ": Sending: " + command )
             self.handle.sendline( command )
             i = self.handle.expect( [ self.prompt, pexpect.TIMEOUT ],
-                                    timeout=wait + 1 )
+                                    timeout=wait + 5 )
             if i == 1:
                 main.log.error(
                     self.name +
@@ -179,7 +179,7 @@
             main.log.info( self.name + ": Sending: " + command )
             self.handle.sendline( command )
             i = self.handle.expect( [ self.prompt, pexpect.TIMEOUT ],
-                                    timeout=wait + 1 )
+                                    timeout=wait + 5 )
             if i == 1:
                 main.log.error(
                     self.name +
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/SRMulticast.params b/TestON/tests/USECASE/SegmentRouting/SRMulticast/SRMulticast.params
index c9f4f55..4c7946e 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRMulticast/SRMulticast.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/SRMulticast.params
@@ -43,6 +43,10 @@
         <mcastSleep>5</mcastSleep>
     </timers>
 
+    <RETRY>
+        <hostDiscovery>10</hostDiscovery>
+    </RETRY>
+
     <SCAPY>
         <HOSTNAMES>h3v4,h4v4,h8v4,h10v4,h1v6,h3v6</HOSTNAMES>
     </SCAPY>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/SRMulticast.py b/TestON/tests/USECASE/SegmentRouting/SRMulticast/SRMulticast.py
index 866b273..8a8ba08 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRMulticast/SRMulticast.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/SRMulticast.py
@@ -215,7 +215,7 @@
         main.mcastRoutes = { "ipv4": { "src": [ 0 ], "dst": [ 0, 1, 2 ] }, "ipv6": { "src": [ 0 ], "dst": [ 0 ] } }
         setupTest( main, test_idx=103, onosNodes=3 )
         verifyMcastRoutes( main )
-        verifyLinkDown( main, [ "spine103", "spine101" ], 4 )
+        verifyLinkDown( main, [ "spine103", "spine101" ], 2 )
         verifyMcastRemoval( main )
         lib.cleanup( main, copyKarafLog=False )
 
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/SRMulticastTest.py b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/SRMulticastTest.py
index b0907d7..8d69443 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/SRMulticastTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/SRMulticastTest.py
@@ -146,7 +146,7 @@
     if hostsToDiscover:
         main.Network.discoverHosts( hostList=hostsToDiscover )
     if hostLocations:
-        lib.verifyHostLocations( main, hostLocations, retry=5 )
+        lib.verifyHostLocations( main, hostLocations, retry=int( main.params[ "RETRY" ][ "hostDiscovery" ] ) )
     for routeName in expectList.keys():
         lib.verifyMulticastTraffic( main, routeName, True )
 
@@ -156,17 +156,19 @@
     Reenable the port and verify traffic
     """
     from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as lib
+    # Disable the port(s)
     main.step( "Disable port {}/{}".format( dpid, port ) )
     main.Cluster.active( 0 ).CLI.portstate( dpid=dpid, port=port, state="disable" )
     time.sleep( 10 )
     for routeName in expectList.keys():
         lib.verifyMulticastTraffic( main, routeName, expectList[ routeName ] )
-    # Restore the link(s)
+    # Reenable the port(s)
+    main.step( "Enable port {}/{}".format( dpid, port ) )
     main.Cluster.active( 0 ).CLI.portstate( dpid=dpid, port=port, state="enable" )
     if hostsToDiscover:
         main.Network.discoverHosts( hostList=hostsToDiscover )
     if hostLocations:
-        lib.verifyHostLocations( main, hostLocations, retry=5 )
+        lib.verifyHostLocations( main, hostLocations, retry=int( main.params[ "RETRY" ][ "hostDiscovery" ] ) )
     for routeName in expectList.keys():
         lib.verifyMulticastTraffic( main, routeName, True )
 
@@ -184,7 +186,7 @@
     # Recover the switch(es)
     lib.recoverSwitch( main, switchName, int( main.params[ "TOPO" ][ "switchNum" ] ), int( main.params[ "TOPO" ][ "linkNum" ] ), True if hostsToDiscover else False, hostsToDiscover )
     if hostLocations:
-        lib.verifyHostLocations( main, hostLocations, retry=5 )
+        lib.verifyHostLocations( main, hostLocations, retry=int( main.params[ "RETRY" ][ "hostDiscovery" ] ) )
     for routeName in expectList.keys():
         lib.verifyMulticastTraffic( main, routeName, True )
 
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.py b/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.py
index f2b3b8a..dafbacd 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.py
@@ -1,5 +1,6 @@
 
 class SRRouting:
+
     def __init__( self ):
         self.default = ''
 
@@ -468,6 +469,64 @@
         verifyOnosFailure( main, internal=False )
         lib.cleanup( main, copyKarafLog=False, removeHostComponent=True )
 
+    def CASE601( self, main ):
+        """
+        Bring down all switches
+        Verify Topology
+        Bring up all switches
+        Verify
+
+        repeat x3
+        """
+        import time
+        from tests.USECASE.SegmentRouting.SRRouting.dependencies.SRRoutingTest import *
+        from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as lib
+        main.case( "Bring down all switches then recover" )
+        setupTest( main, test_idx=601, external=False )
+        main.Cluster.next().CLI.balanceMasters()
+        time.sleep( float( main.params[ 'timers' ][ 'balanceMasterSleep' ] ) )
+        main.Network.discoverHosts( hostList=main.internalIpv4Hosts + main.internalIpv6Hosts )
+        totalSwitches = int( main.params[ 'TOPO' ][ 'switchNum' ] )
+        totalLinks = int( main.params[ 'TOPO' ][ 'linkNum' ] )
+        switchList = [ 'spine101', 'spine102', 'spine103', 'spine104',
+                       'leaf1', 'leaf2', 'leaf3', 'leaf4', 'leaf5', 'leaf6' ]
+        verify( main, disconnected=False, external=False )
+        for i in range( 1, 4 ):
+            main.log.info( "Beginning iteration {} of stopping then starting all switches".format( i ) )
+            main.log.debug( main.Cluster.next().summary() )
+            # Bring down all switches
+            main.step( "Stopping switches - iteration " + str( i ) )
+            switchStop = main.TRUE
+            for switch in switchList:
+                switchStop = switchStop and main.Network.switch( SW=switch, OPTION="stop" )
+            utilities.assert_equals( expect=main.TRUE, actual=switchStop,
+                                     onpass="All switches stopped",
+                                     onfail="Failed to stop all switches" )
+
+            time.sleep( 60 )
+            lib.verifyTopology( main, 0, 0, main.Cluster.numCtrls )
+            # Bring up all switches
+            main.log.debug( main.Cluster.next().summary() )
+            main.step( "Starting switches - iteration " + str( i ) )
+            switchStart = main.TRUE
+            for switch in switchList:
+                switchStart = switchStart and main.Network.switch( SW=switch, OPTION="start" )
+            utilities.assert_equals( expect=main.TRUE, actual=switchStart,
+                                     onpass="All switches started",
+                                     onfail="Failed to start all switches" )
+
+            main.Network.discoverHosts( hostList=main.internalIpv4Hosts + main.internalIpv6Hosts )
+            lib.verifyTopology( main, totalSwitches, totalLinks, main.Cluster.numCtrls )
+            main.log.debug( main.Cluster.next().summary() )
+            time.sleep( 60 )
+            main.log.debug( main.Cluster.next().summary() )
+            time.sleep( 60 )
+            main.log.debug( main.Cluster.next().summary() )
+            verifyPing( main )
+            verify( main, disconnected=False, external=False )
+        verify( main, disconnected=False, external=False )
+        lib.cleanup( main, copyKarafLog=False, removeHostComponent=True )
+
     def CASE603( self, main ):
         """"
         Drop HAGG-1 device and test connectivity.
@@ -476,7 +535,6 @@
 
         Repeat the same with HAGG-2 and DAAS-2.
         """
-        import time
         from tests.USECASE.SegmentRouting.SRRouting.dependencies.SRRoutingTest import *
         from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as lib
         main.case( "Drop hagg spine switch along with dass leaf switch." )
@@ -485,30 +543,48 @@
         main.disconnectedIpv6Hosts = []
 
         verify( main )
-        lib.killSwitch( main, "spine103", int( main.params[ "TOPO" ]["switchNum" ] ) - 1, int( main.params[ "TOPO" ][ "linkNum" ] ) - 6 )
+        lib.killSwitch( main, "spine103",
+                        int( main.params[ "TOPO" ]["switchNum" ] ) - 1,
+                        int( main.params[ "TOPO" ][ "linkNum" ] ) - 6 )
         verify( main )
-        lib.killSwitch( main, "leaf6", int( main.params[ "TOPO" ]["switchNum" ] ) - 2, int( main.params[ "TOPO" ][ "linkNum" ] ) - 8 )
+        lib.killSwitch( main, "leaf6",
+                        int( main.params[ "TOPO" ]["switchNum" ] ) - 2,
+                        int( main.params[ "TOPO" ][ "linkNum" ] ) - 8 )
         main.disconnectedIpv4Hosts = [ 'h12v4', 'h13v4']
         main.disconnectedIpv6Hosts = [ 'h12v6', 'h13v6']
         verify( main )
-        lib.recoverSwitch( main, "leaf6", int( main.params[ "TOPO" ]["switchNum" ] ) - 1, int( main.params[ "TOPO" ][ "linkNum" ] ) - 6, rediscoverHosts=True)
+        lib.recoverSwitch( main, "leaf6",
+                           int( main.params[ "TOPO" ]["switchNum" ] ) - 1,
+                           int( main.params[ "TOPO" ][ "linkNum" ] ) - 6,
+                           rediscoverHosts=True)
         main.disconnectedIpv4Hosts = []
         main.disconnectedIpv6Hosts = []
         verify( main )
-        lib.recoverSwitch( main, "spine103", int( main.params[ "TOPO" ][ "switchNum" ] ), int( main.params[ "TOPO" ][ "linkNum" ] ))
+        lib.recoverSwitch( main, "spine103",
+                           int( main.params[ "TOPO" ][ "switchNum" ] ),
+                           int( main.params[ "TOPO" ][ "linkNum" ] ) )
         verify( main )
 
-        lib.killSwitch( main, "spine104", int( main.params[ "TOPO" ]["switchNum" ] ) - 1, int( main.params[ "TOPO" ][ "linkNum" ] ) - 6 )
+        lib.killSwitch( main, "spine104",
+                        int( main.params[ "TOPO" ]["switchNum" ] ) - 1,
+                        int( main.params[ "TOPO" ][ "linkNum" ] ) - 6 )
         verify( main )
-        lib.killSwitch( main, "leaf1", int( main.params[ "TOPO" ]["switchNum" ] ) - 2, int( main.params[ "TOPO" ][ "linkNum" ] ) - 8 )
+        lib.killSwitch( main, "leaf1",
+                        int( main.params[ "TOPO" ]["switchNum" ] ) - 2,
+                        int( main.params[ "TOPO" ][ "linkNum" ] ) - 8 )
         main.disconnectedIpv4Hosts = [ 'h1v4', 'h2v4']
         main.disconnectedIpv6Hosts = [ 'h1v6', 'h2v6']
         verify( main )
-        lib.recoverSwitch( main, "leaf1", int( main.params[ "TOPO" ]["switchNum" ] ) - 1, int( main.params[ "TOPO" ][ "linkNum" ] ) - 6, rediscoverHosts=True)
+        lib.recoverSwitch( main, "leaf1",
+                           int( main.params[ "TOPO" ]["switchNum" ] ) - 1,
+                           int( main.params[ "TOPO" ][ "linkNum" ] ) - 6,
+                           rediscoverHosts=True )
         main.disconnectedIpv4Hosts = []
         main.disconnectedIpv6Hosts = []
         verify( main )
-        lib.recoverSwitch( main, "spine104", int( main.params[ "TOPO" ][ "switchNum" ] ), int( main.params[ "TOPO" ][ "linkNum" ] ))
+        lib.recoverSwitch( main, "spine104",
+                           int( main.params[ "TOPO" ][ "switchNum" ] ),
+                           int( main.params[ "TOPO" ][ "linkNum" ] ) )
         verify( main )
 
         lib.cleanup( main, copyKarafLog=False, removeHostComponent=True )
@@ -520,7 +596,6 @@
         Drop HAGG-2 device and test connectivity.
         Bring up HAGG-2 device and test connectivity
         """
-        import time
         from tests.USECASE.SegmentRouting.SRRouting.dependencies.SRRoutingTest import *
         from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as lib
         main.case( "Drop hagg spine switches." )
@@ -528,13 +603,21 @@
         main.disconnectedIpv4Hosts = []
         main.disconnectedIpv6Hosts = []
         verify( main )
-        lib.killSwitch( main, "spine103", int( main.params[ "TOPO" ]["switchNum" ] ) - 1, int( main.params[ "TOPO" ][ "linkNum" ] ) - 6 )
+        lib.killSwitch( main, "spine103",
+                        int( main.params[ "TOPO" ]["switchNum" ] ) - 1,
+                        int( main.params[ "TOPO" ][ "linkNum" ] ) - 6 )
         verify( main )
-        lib.recoverSwitch( main, "spine103", int( main.params[ "TOPO" ][ "switchNum" ] ), int( main.params[ "TOPO" ][ "linkNum" ] ))
+        lib.recoverSwitch( main, "spine103",
+                           int( main.params[ "TOPO" ][ "switchNum" ] ),
+                           int( main.params[ "TOPO" ][ "linkNum" ] ) )
         verify( main )
-        lib.killSwitch( main, "spine104", int( main.params[ "TOPO" ]["switchNum" ] ) - 1, int( main.params[ "TOPO" ][ "linkNum" ] ) - 6 )
+        lib.killSwitch( main, "spine104",
+                        int( main.params[ "TOPO" ]["switchNum" ] ) - 1,
+                        int( main.params[ "TOPO" ][ "linkNum" ] ) - 6 )
         verify( main )
-        lib.recoverSwitch( main, "spine104", int( main.params[ "TOPO" ][ "switchNum" ] ), int( main.params[ "TOPO" ][ "linkNum" ] ))
+        lib.recoverSwitch( main, "spine104",
+                           int( main.params[ "TOPO" ][ "switchNum" ] ),
+                           int( main.params[ "TOPO" ][ "linkNum" ] ) )
         verify( main )
         lib.cleanup( main, copyKarafLog=False, removeHostComponent=True )
 
@@ -553,18 +636,24 @@
         main.disconnectedIpv4Hosts = []
         main.disconnectedIpv6Hosts = []
         verify( main )
-        lib.killSwitch( main, "spine103", int( main.params[ "TOPO" ][ "switchNum" ] ) - 1, int( main.params[ "TOPO" ][ "linkNum" ] ) - 6 )
+        lib.killSwitch( main, "spine103",
+                        int( main.params[ "TOPO" ][ "switchNum" ] ) - 1,
+                        int( main.params[ "TOPO" ][ "linkNum" ] ) - 6 )
         verify( main )
-        lib.killSwitch( main, [ "leaf2", "leaf3", "leaf4", "leaf5" ], int( main.params[ "TOPO" ][ "switchNum" ] ) - 5,
+        lib.killSwitch( main, [ "leaf2", "leaf3", "leaf4", "leaf5" ],
+                        int( main.params[ "TOPO" ][ "switchNum" ] ) - 5,
                         int( main.params[ "TOPO" ][ "linkNum" ] ) - 42 )
         main.disconnectedIpv4Hosts = [ "h3v4", "h4v4", "h5v4", "h6v4", "h7v4", "h8v4", "h9v4", "h10v4", "h11v4" ]
         main.disconnectedIpv6Hosts = [ "h3v6", "h4v6", "h5v6", "h6v6", "h7v6", "h8v6", "h9v6", "h10v6", "h11v6" ]
         main.disconnectedExternalIpv4Hosts = [ "rh1v4", "rh2v4", "rh5v4" ]
         main.disconnectedExternalIpv6Hosts = [ "rh1v6", "rh11v6", "rh5v6", "rh2v6", "rh22v6" ]
         verify( main, disconnected=True )
-        lib.recoverSwitch( main, "spine103", int( main.params[ "TOPO" ][ "switchNum" ] ) - 4, int( main.params[ "TOPO" ][ "linkNum" ] ) - 36 )
+        lib.recoverSwitch( main, "spine103",
+                           int( main.params[ "TOPO" ][ "switchNum" ] ) - 4,
+                           int( main.params[ "TOPO" ][ "linkNum" ] ) - 36 )
         verify( main, disconnected=True )
-        lib.recoverSwitch( main, [ "leaf2", "leaf3", "leaf4", "leaf5" ], int( main.params[ "TOPO" ][ "switchNum" ] ),
+        lib.recoverSwitch( main, [ "leaf2", "leaf3", "leaf4", "leaf5" ],
+                           int( main.params[ "TOPO" ][ "switchNum" ] ),
                            int( main.params[ "TOPO" ][ "linkNum" ] ) )
         main.disconnectedIpv4Hosts = []
         main.disconnectedIpv6Hosts = []
@@ -572,18 +661,24 @@
         main.disconnectedExternalIpv6Hosts = [ ]
         verify( main )
 
-        lib.killSwitch( main, "spine104", int( main.params[ "TOPO" ][ "switchNum" ] ) - 1, int( main.params[ "TOPO" ][ "linkNum" ] ) - 6 )
+        lib.killSwitch( main, "spine104",
+                        int( main.params[ "TOPO" ][ "switchNum" ] ) - 1,
+                        int( main.params[ "TOPO" ][ "linkNum" ] ) - 6 )
         verify( main )
-        lib.killSwitch( main, [ "leaf2", "leaf3", "leaf4", "leaf5" ], int( main.params[ "TOPO" ][ "switchNum" ] ) - 5,
+        lib.killSwitch( main, [ "leaf2", "leaf3", "leaf4", "leaf5" ],
+                        int( main.params[ "TOPO" ][ "switchNum" ] ) - 5,
                         int( main.params[ "TOPO" ][ "linkNum" ] ) - 42 )
         main.disconnectedIpv4Hosts = [ "h3v4", "h4v4", "h5v4", "h6v4", "h7v4", "h8v4", "h9v4", "h10v4", "h11v4" ]
         main.disconnectedIpv6Hosts = [ "h3v6", "h4v6", "h5v6", "h6v6", "h7v6", "h8v6", "h9v6", "h10v6", "h11v6" ]
         main.disconnectedExternalIpv4Hosts = [ "rh1v4", "rh2v4", "rh5v4" ]
         main.disconnectedExternalIpv6Hosts = [ "rh1v6", "rh11v6", "rh5v6", "rh2v6", "rh22v6" ]
         verify( main, disconnected=True )
-        lib.recoverSwitch( main, "spine104", int( main.params[ "TOPO" ][ "switchNum" ] ) - 4, int( main.params[ "TOPO" ][ "linkNum" ] ) - 36 )
+        lib.recoverSwitch( main, "spine104",
+                           int( main.params[ "TOPO" ][ "switchNum" ] ) - 4,
+                           int( main.params[ "TOPO" ][ "linkNum" ] ) - 36 )
         verify( main, disconnected=True )
-        lib.recoverSwitch( main, [ "leaf2", "leaf3", "leaf4", "leaf5" ], int( main.params[ "TOPO" ][ "switchNum" ] ),
+        lib.recoverSwitch( main, [ "leaf2", "leaf3", "leaf4", "leaf5" ],
+                           int( main.params[ "TOPO" ][ "switchNum" ] ),
                            int( main.params[ "TOPO" ][ "linkNum" ] ) )
         main.disconnectedIpv4Hosts = []
         main.disconnectedIpv6Hosts = []
@@ -600,7 +695,6 @@
         Bring up the paired leaf and test connectivity
         Repeat above with SPINE-2 and a different paired leaf
         """
-        import time
         from tests.USECASE.SegmentRouting.SRRouting.dependencies.SRRoutingTest import *
         from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as lib
         main.case( "Drop spine and paired leaf" )
@@ -646,7 +740,6 @@
         check that buckets in select groups change accordingly
         Bring up links again and check that buckets in select groups change accordingly
         """
-        import time
         from tests.USECASE.SegmentRouting.SRRouting.dependencies.SRRoutingTest import *
         from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as lib
         main.case( "Take down one of double links towards the spine" )
@@ -719,7 +812,6 @@
         """
         Take down all uplinks from a paired leaf switch
         """
-        import time
         from tests.USECASE.SegmentRouting.SRRouting.dependencies.SRRoutingTest import *
         from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as lib
         from core import utilities
@@ -756,7 +848,6 @@
         Drop a device
         Bring that same instance up again and observe that this specific instance sees that the device is down.
         """
-        import time
         from tests.USECASE.SegmentRouting.SRRouting.dependencies.SRRoutingTest import *
         from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as lib
         from core import utilities
@@ -773,6 +864,186 @@
                                  onfail="ONOS instance {} doesn't see correct device numbers".format( onosToKill ) )
         lib.cleanup( main, copyKarafLog=False, removeHostComponent=True )
 
+    def CASE640( self, main ):
+        """
+        Controller instance going down and switch coming down at the same time and then we bring them up together
+
+        A. Instance goes down and SPINE-1 goes down
+            - All connectivity should be there
+            - Bring them up together
+            - All connectivity should be there
+        B. Instance goes down and HAGG-1 goes down
+            - All connectivity should be there
+            - Bring them up together
+            - All connectivity should be there
+        C. Instance goes down and a paired leaf switch goes down
+            - Single homed hosts in this leaf should lose connectivity all others should be ok
+            - Bring them up together
+            - Test connectivity
+        """
+        import time
+        from tests.USECASE.SegmentRouting.SRRouting.dependencies.SRRoutingTest import *
+        from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as lib
+        main.case( "Drop an ONOS instance and switch(es) at the same time" )
+        caseDict = { 'A': { 'switches': "spine101",
+                            'disconnectedV4': [],
+                            'disconnectedV6': [],
+                            'expectedSwitches': 9,
+                            'expectedLinks': 30 },
+                     'B': { 'switches': "spine103",
+                            'disconnectedV4': [],
+                            'disconnectedV6': [],
+                            'expectedSwitches': 9,
+                            'expectedLinks': 42 },
+                     'C': { 'switches': "leaf2",
+                            'disconnectedV4': [ "h3v4" ],
+                            'disconnectedV6': [ "h3v6" ],
+                            'expectedSwitches': 9,
+                            'expectedLinks': 38 } }
+        totalSwitches = int( main.params[ 'TOPO' ][ 'switchNum' ] )
+        totalLinks = int( main.params[ 'TOPO' ][ 'linkNum' ] )
+        nodeIndex = 0
+        cases = sorted( caseDict.keys() )
+        for case in cases:
+            switches = caseDict[ case ][ 'switches' ]
+            expectedSwitches = caseDict[ case ][ 'expectedSwitches' ]
+            expectedLinks = caseDict[ case ][ 'expectedLinks' ]
+            main.step( "\n640{}: Drop ONOS{} and switch(es) {} at the same time".format( case,
+                                                                                         nodeIndex + 1,
+                                                                                         switches ) )
+            setupTest( main, test_idx=640 )
+            main.Cluster.next().CLI.balanceMasters()
+            time.sleep( float( main.params[ 'timers' ][ 'balanceMasterSleep' ] ) )
+            main.Network.discoverHosts( hostList=main.internalIpv4Hosts + main.internalIpv6Hosts )
+            instance = main.Cluster.controllers[ nodeIndex ]
+            verify( main, disconnected=False, external=False )
+
+            # Simultaneous failures
+            main.step( "Kill ONOS{}: {}".format( nodeIndex + 1, instance.ipAddress ) )
+            killResult = main.ONOSbench.onosDie( instance.ipAddress )
+            utilities.assert_equals( expect=main.TRUE, actual=killResult,
+                                     onpass="ONOS node killed",
+                                     onfail="Failed to kill ONOS node" )
+            instance.active = False
+            main.Cluster.reset()
+            # TODO: Remove sleeps from the concurrent events
+            lib.killSwitch( main, switches, expectedSwitches, expectedLinks )
+            main.disconnectedIpv4Hosts = caseDict[ case ][ 'disconnectedV4' ]
+            main.disconnectedIpv6Hosts = caseDict[ case ][ 'disconnectedV6' ]
+
+            # verify functionality
+            main.log.debug( main.Cluster.next().summary() )
+            main.Network.discoverHosts( hostList=main.internalIpv4Hosts + main.internalIpv6Hosts )
+            main.log.debug( main.Cluster.next().summary() )
+            lib.verifyTopology( main, expectedSwitches, expectedLinks, main.Cluster.numCtrls - 1  )
+            lib.verifyNodes( main )
+            verify( main, external=False )
+
+            # Bring everything back up
+            lib.recoverSwitch( main, switches, totalSwitches, totalLinks, rediscoverHosts=True )
+            main.disconnectedIpv4Hosts = []
+            main.disconnectedIpv6Hosts = []
+            lib.recoverOnos( main, [ nodeIndex ], expectedSwitches, expectedLinks, main.Cluster.numCtrls )
+
+            # Verify functionality
+            lib.verifyNodes( main )
+            verify( main, disconnected=False, external=False )
+            lib.cleanup( main, copyKarafLog=False, removeHostComponent=True )
+            nodeIndex = ( nodeIndex + 1 ) % main.Cluster.numCtrls
+
+    def CASE641( self, main ):
+        """
+        Controller instance going down while switch comes up at the same time
+
+        A. Take down SPINE-1
+            - Test connectivity
+            - Bring up SPINE-1 and drop an instance at the same time
+            - Test connectivity
+            - Bring up instance one
+            - Test connectivity
+        B. Take down HAGG-1
+            - Test connectivity
+            - Bring up HAGG-1 and drop an instance at the same time
+            - Test connectivity
+            - Bring up instance one
+            - Test connectivity
+        C. Take down a paired leaf switch
+            - Test connectivity ( single homed hosts on this leaf will lose it )
+            - Bring up paired leaf switch and drop a controller instance at the same time
+            - Test connectivity
+            - Bring up the instance
+            - Test connectivity
+        """
+        import time
+        from tests.USECASE.SegmentRouting.SRRouting.dependencies.SRRoutingTest import *
+        from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as lib
+        main.case( "Drop an ONOS instance and recover switch(es) at the same time" )
+        caseDict = { 'A': { 'switches': "spine101",
+                            'disconnectedV4': [],
+                            'disconnectedV6': [],
+                            'expectedSwitches': 9,
+                            'expectedLinks': 30 },
+                     'B': { 'switches': "spine103",
+                            'disconnectedV4': [],
+                            'disconnectedV6': [],
+                            'expectedSwitches': 9,
+                            'expectedLinks': 42 },
+                     'C': { 'switches': "leaf2",
+                            'disconnectedV4': [ "h3v4" ],
+                            'disconnectedV6': [ "h3v6" ],
+                            'expectedSwitches': 9,
+                            'expectedLinks': 38 } }
+        totalSwitches = int( main.params[ 'TOPO' ][ 'switchNum' ] )
+        totalLinks = int( main.params[ 'TOPO' ][ 'linkNum' ] )
+        nodeIndex = 0
+        cases = sorted( caseDict.keys() )
+        for case in cases:
+            switches = caseDict[ case ][ 'switches' ]
+            expectedSwitches = caseDict[ case ][ 'expectedSwitches' ]
+            expectedLinks = caseDict[ case ][ 'expectedLinks' ]
+            main.step( "\n641{}: Drop ONOS{} and recover switch(es) {} at the same time".format( case,
+                                                                                                 nodeIndex + 1,
+                                                                                                 switches ) )
+            setupTest( main, test_idx=641 )
+            main.Cluster.next().CLI.balanceMasters()
+            time.sleep( float( main.params[ 'timers' ][ 'balanceMasterSleep' ] ) )
+            main.Network.discoverHosts( hostList=main.internalIpv4Hosts + main.internalIpv6Hosts )
+            instance = main.Cluster.controllers[ nodeIndex ]
+            verify( main, disconnected=False, external=False )
+            # Drop the switch to setup scenario
+            lib.killSwitch( main, switches, expectedSwitches, expectedLinks )
+            main.disconnectedIpv4Hosts = caseDict[ case ][ 'disconnectedV4' ]
+            main.disconnectedIpv6Hosts = caseDict[ case ][ 'disconnectedV6' ]
+            verify( main, external=False )
+
+            # Simultaneous node failure and switch recovery
+            main.step( "Kill ONOS{}: {}".format( nodeIndex + 1, instance.ipAddress ) )
+            killResult = main.ONOSbench.onosDie( instance.ipAddress )
+            utilities.assert_equals( expect=main.TRUE, actual=killResult,
+                                     onpass="ONOS node killed",
+                                     onfail="Failed to kill ONOS node" )
+            instance.active = False
+            main.Cluster.reset()
+            # TODO: Remove sleeps from the concurrent events
+            lib.recoverSwitch( main, switches, totalSwitches, totalLinks, rediscoverHosts=True )
+            main.disconnectedIpv4Hosts = []
+            main.disconnectedIpv6Hosts = []
+
+            # verify functionality
+            main.log.debug( main.Cluster.next().summary() )
+            lib.recoverSwitch( main, switches, totalSwitches, totalLinks, rediscoverHosts=True )
+            main.log.debug( main.Cluster.next().summary() )
+            lib.verifyTopology( main, totalSwitches, totalLinks, main.Cluster.numCtrls - 1 )
+            lib.verifyNodes( main )
+            verify( main, disconnected=False, external=False )
+
+            # Bring everything back up and verify functionality
+            lib.recoverOnos( main, [ nodeIndex ], totalSwitches, totalLinks, main.Cluster.numCtrls )
+            lib.verifyNodes( main )
+            verify( main, external=False )
+            lib.cleanup( main, copyKarafLog=False, removeHostComponent=True )
+            nodeIndex = ( nodeIndex + 1 ) % main.Cluster.numCtrls
+
     def CASE642( self, main ):
         """
         Drop one link from each double link
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py
index f4b5481..90e8129 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py
@@ -19,7 +19,8 @@
     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-def setupTest( main, test_idx, onosNodes, ipv4=True, ipv6=True, external=True, static=False, countFlowsGroups=False ):
+def setupTest( main, test_idx, onosNodes=-1, ipv4=True, ipv6=True,
+               external=True, static=False, countFlowsGroups=False ):
     """
     SRRouting test setup
     """
@@ -31,6 +32,8 @@
     if not hasattr( main, 'apps' ):
         init = True
         lib.initTest( main )
+    if onosNodes < 0:
+        onosNodes = main.Cluster.numCtrls
     # Skip onos packaging if the cluster size stays the same
     if not init and onosNodes == main.Cluster.numCtrls:
         skipPackage = True
@@ -162,7 +165,8 @@
     if external:
         verifyPingExternal( main, ipv4, ipv6, disconnected )
 
-def verifyLinkFailure( main, ipv4=True, ipv6=True, disconnected=False, internal=True, external=True, countFlowsGroups=False ):
+def verifyLinkFailure( main, ipv4=True, ipv6=True, disconnected=False,
+                       internal=True, external=True, countFlowsGroups=False ):
     """
     Kill and recover all links to spine101 and 102 sequencially and run verifications
     """
@@ -186,7 +190,8 @@
     lib.restoreLinkBatch( main, linksToRemove, 48, 10 )
     verify( main, ipv4, ipv6, disconnected, internal, external, countFlowsGroups )
 
-def verifySwitchFailure( main, ipv4=True, ipv6=True, disconnected=False, internal=True, external=True, countFlowsGroups=False ):
+def verifySwitchFailure( main, ipv4=True, ipv6=True, disconnected=False,
+                         internal=True, external=True, countFlowsGroups=False ):
     """
     Kill and recover spine101 and 102 sequencially and run verifications
     """
@@ -197,7 +202,8 @@
         lib.recoverSwitch( main, switchToKill, 10, 48 )
         verify( main, ipv4, ipv6, disconnected, internal, external, countFlowsGroups )
 
-def verifyOnosFailure( main, ipv4=True, ipv6=True, disconnected=False, internal=True, external=True, countFlowsGroups=False ):
+def verifyOnosFailure( main, ipv4=True, ipv6=True, disconnected=False,
+                       internal=True, external=True, countFlowsGroups=False ):
     """
     Kill and recover onos nodes sequencially and run verifications
     """
@@ -208,16 +214,17 @@
     numCtrls = len( main.Cluster.runningNodes )
     links = len( json.loads( main.Cluster.next().links() ) )
     switches = len( json.loads( main.Cluster.next().devices() ) )
+    mastershipSleep = float( main.params[ 'timers' ][ 'balanceMasterSleep' ] )
     for ctrl in xrange( numCtrls ):
         # Kill node
         lib.killOnos( main, [ ctrl ], switches, links, ( numCtrls - 1 ) )
         main.Cluster.active(0).CLI.balanceMasters()
-        time.sleep( float( main.params[ 'timers' ][ 'balanceMasterSleep' ] ) )
+        time.sleep( mastershipSleep )
         verify( main, ipv4, ipv6, disconnected, internal, external, countFlowsGroups )
         # Recover node
         lib.recoverOnos( main, [ ctrl ], switches, links, numCtrls )
         main.Cluster.active(0).CLI.balanceMasters()
-        time.sleep( float( main.params[ 'timers' ][ 'balanceMasterSleep' ] ) )
+        time.sleep( mastershipSleep )
         verify( main, ipv4, ipv6, disconnected, internal, external, countFlowsGroups )
 
 def verify( main, ipv4=True, ipv6=True, disconnected=True, internal=True, external=True, countFlowsGroups=False ):
@@ -230,6 +237,6 @@
     lib.verifyNetworkHostIp( main )
     # check flows / groups numbers
     if countFlowsGroups:
-        run.checkFlowsGroupsFromFile( main )
+        lib.checkFlowsGroupsFromFile( main )
     # ping hosts
     verifyPing( main, ipv4, ipv6, disconnected, internal, external )
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py b/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
index 01aa8f7..ce6d20c 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
@@ -19,7 +19,6 @@
     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
 """
 import os
-import imp
 import time
 import json
 import urllib
@@ -52,7 +51,6 @@
         main.testSetUp.envSetupDescription( False )
         stepResult = main.FALSE
         try:
-            main.step( "Constructing test variables" )
             # Test variables
             main.cellName = main.params[ 'ENV' ][ 'cellName' ]
             main.apps = main.params[ 'ENV' ][ 'cellApps' ]
@@ -427,7 +425,8 @@
         return
 
     @staticmethod
-    def pingAll( main, tag="", dumpflows=True, acceptableFailed=0, basedOnIp=False, sleep=10, retryAttempts=1, skipOnFail=False ):
+    def pingAll( main, tag="", dumpflows=True, acceptableFailed=0, basedOnIp=False,
+                 sleep=10, retryAttempts=1, skipOnFail=False ):
         '''
         Verify connectivity between hosts according to the ping chart
         acceptableFailed: max number of acceptable failed pings.
@@ -507,81 +506,92 @@
                                         tag + "_GroupsOn" )
 
     @staticmethod
-    def killLink( main, end1, end2, switches, links ):
+    def killLink( main, end1, end2, switches, links, sleep=None ):
         """
         end1,end2: identify the switches, ex.: 'leaf1', 'spine1'
         switches, links: number of expected switches and links after linkDown, ex.: '4', '6'
         Kill a link and verify ONOS can see the proper link change
         """
-        main.linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
+        if sleep is None:
+            sleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
+        else:
+            sleep = float( sleep )
         main.step( "Kill link between %s and %s" % ( end1, end2 ) )
-        LinkDown = main.Network.link( END1=end1, END2=end2, OPTION="down" )
-        LinkDown = main.Network.link( END2=end1, END1=end2, OPTION="down" )
+        linkDown = main.Network.link( END1=end1, END2=end2, OPTION="down" )
+        linkDown = linkDown and main.Network.link( END2=end1, END1=end2, OPTION="down" )
+        # TODO: Can remove this, since in the retry we will wait anyways if topology is incorrect
         main.log.info(
-                "Waiting %s seconds for link down to be discovered" % main.linkSleep )
-        time.sleep( main.linkSleep )
+                "Waiting %s seconds for link down to be discovered" % sleep )
+        time.sleep( sleep )
         topology = utilities.retry( main.Cluster.active( 0 ).CLI.checkStatus,
                                     main.FALSE,
                                     kwargs={ 'numoswitch': switches,
                                              'numolink': links },
                                     attempts=10,
-                                    sleep=main.linkSleep )
-        result = topology & LinkDown
+                                    sleep=sleep )
+        result = topology and linkDown
         utilities.assert_equals( expect=main.TRUE, actual=result,
                                  onpass="Link down successful",
                                  onfail="Failed to turn off link?" )
 
     @staticmethod
-    def killLinkBatch( main, links, linksAfter, switches ):
+    def killLinkBatch( main, links, linksAfter, switches, sleep=None ):
         """
         links = list of links (src, dst) to bring down.
         """
 
         main.step("Killing a batch of links {0}".format(links))
+        if sleep is None:
+            sleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
+        else:
+            sleep = float( sleep )
 
         for end1, end2 in links:
             main.Network.link( END1=end1, END2=end2, OPTION="down")
             main.Network.link( END1=end2, END2=end1, OPTION="down")
 
-        main.linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
+        # TODO: Can remove this, since in the retry we will wait anyways if topology is incorrect
         main.log.info(
-                "Waiting %s seconds for links down to be discovered" % main.linkSleep )
-        time.sleep( main.linkSleep )
+                "Waiting %s seconds for links down to be discovered" % sleep )
+        time.sleep( sleep )
 
         topology = utilities.retry( main.Cluster.active( 0 ).CLI.checkStatus,
                                     main.FALSE,
                                     kwargs={ 'numoswitch': switches,
                                              'numolink': linksAfter },
                                     attempts=10,
-                                    sleep=main.linkSleep )
+                                    sleep=sleep )
 
         utilities.assert_equals( expect=main.TRUE, actual=topology,
                                  onpass="Link batch down successful",
                                  onfail="Link batch down failed" )
 
     @staticmethod
-    def restoreLinkBatch( main, links, linksAfter, switches ):
+    def restoreLinkBatch( main, links, linksAfter, switches, sleep=None ):
         """
         links = list of link (src, dst) to bring up again.
         """
 
         main.step("Restoring a batch of links {0}".format(links))
+        if sleep is None:
+            sleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
+        else:
+            sleep = float( sleep )
 
         for end1, end2 in links:
             main.Network.link( END1=end1, END2=end2, OPTION="up")
             main.Network.link( END1=end2, END2=end1, OPTION="up")
 
-        main.linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
         main.log.info(
-                "Waiting %s seconds for links up to be discovered" % main.linkSleep )
-        time.sleep( main.linkSleep )
+                "Waiting %s seconds for links up to be discovered" % sleep )
+        time.sleep( sleep )
 
         topology = utilities.retry( main.Cluster.active( 0 ).CLI.checkStatus,
                                     main.FALSE,
                                     kwargs={ 'numoswitch': switches,
                                              'numolink': linksAfter },
                                     attempts=10,
-                                    sleep=main.linkSleep )
+                                    sleep=sleep )
 
         utilities.assert_equals( expect=main.TRUE, actual=topology,
                                  onpass="Link batch up successful",
@@ -635,7 +645,7 @@
 
     @staticmethod
     def restoreLink( main, end1, end2, switches, links,
-                     portUp=False, dpid1='', dpid2='', port1='', port2='' ):
+                     portUp=False, dpid1='', dpid2='', port1='', port2='', sleep=None ):
         """
         Params:
             end1,end2: identify the end switches, ex.: 'leaf1', 'spine1'
@@ -646,23 +656,28 @@
         Kill a link and verify ONOS can see the proper link change
         """
         main.step( "Restore link between %s and %s" % ( end1, end2 ) )
+        if sleep is None:
+            sleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
+        else:
+            sleep = float( sleep )
         result = False
         count = 0
         while True:
             count += 1
+            ctrl = main.Cluster.next()
             main.Network.link( END1=end1, END2=end2, OPTION="up" )
             main.Network.link( END2=end1, END1=end2, OPTION="up" )
             main.log.info(
-                    "Waiting %s seconds for link up to be discovered" % main.linkSleep )
-            time.sleep( main.linkSleep )
+                    "Waiting %s seconds for link up to be discovered" % sleep )
+            time.sleep( sleep )
 
             if portUp:
                 ctrl.CLI.portstate( dpid=dpid1, port=port1, state='Enable' )
                 ctrl.CLI.portstate( dpid=dpid2, port=port2, state='Enable' )
-                time.sleep( main.linkSleep )
+                time.sleep( sleep )
 
-            result = main.Cluster.active( 0 ).CLI.checkStatus( numoswitch=switches,
-                                                               numolink=links )
+            result = ctrl.CLI.checkStatus( numoswitch=switches,
+                                           numolink=links )
             if count > 5 or result:
                 break
         utilities.assert_equals( expect=main.TRUE, actual=result,
@@ -670,58 +685,67 @@
                                  onfail="Failed to bring link up" )
 
     @staticmethod
-    def killSwitch( main, switch, switches, links ):
+    def killSwitch( main, switch, switches, links, sleep=None ):
         """
         Params: switches, links: number of expected switches and links after SwitchDown, ex.: '4', '6'
         Completely kill a switch and verify ONOS can see the proper change
         """
-        main.switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
+        if sleep is None:
+            sleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
+        else:
+            sleep = float( sleep )
         switch = switch if isinstance( switch, list ) else [ switch ]
         main.step( "Kill " + str( switch ) )
         for s in switch:
             main.log.info( "Stopping " + s )
             main.Network.switch( SW=s, OPTION="stop" )
         # todo make this repeatable
+
+        # TODO: Can remove this, since in the retry we will wait anyways if topology is incorrect
         main.log.info( "Waiting %s seconds for switch down to be discovered" % (
-            main.switchSleep ) )
-        time.sleep( main.switchSleep )
+            sleep ) )
+        time.sleep( sleep )
         topology = utilities.retry( main.Cluster.active( 0 ).CLI.checkStatus,
                                     main.FALSE,
                                     kwargs={ 'numoswitch': switches,
                                              'numolink': links },
                                     attempts=10,
-                                    sleep=main.switchSleep )
+                                    sleep=sleep )
         utilities.assert_equals( expect=main.TRUE, actual=topology,
                                  onpass="Kill switch successful",
                                  onfail="Failed to kill switch?" )
 
     @staticmethod
-    def recoverSwitch( main, switch, switches, links, rediscoverHosts=False, hostsToDiscover=[] ):
+    def recoverSwitch( main, switch, switches, links, rediscoverHosts=False, hostsToDiscover=[], sleep=None ):
         """
         Params: switches, links: number of expected switches and links after SwitchUp, ex.: '4', '6'
         Recover a switch and verify ONOS can see the proper change
         """
-        # todo make this repeatable
+        if sleep is None:
+            sleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
+        else:
+            sleep = float( sleep )
+        # TODO make this repeatable
         switch = switch if isinstance( switch, list ) else [ switch ]
         main.step( "Recovering " + str( switch ) )
         for s in switch:
             main.log.info( "Starting " + s )
             main.Network.switch( SW=s, OPTION="start" )
         main.log.info( "Waiting %s seconds for switch up to be discovered" % (
-            main.switchSleep ) )
-        time.sleep( main.switchSleep )
+            sleep ) )
+        time.sleep( sleep )
         if rediscoverHosts:
             main.Network.discoverHosts( hostList=hostsToDiscover )
             main.log.info( "Waiting %s seconds for hosts to get re-discovered" % (
-                           main.switchSleep ) )
-            time.sleep( main.switchSleep )
+                           sleep ) )
+            time.sleep( sleep )
 
         topology = utilities.retry( main.Cluster.active( 0 ).CLI.checkStatus,
                                     main.FALSE,
                                     kwargs={ 'numoswitch': switches,
                                              'numolink': links },
                                     attempts=10,
-                                    sleep=main.switchSleep )
+                                    sleep=sleep )
         utilities.assert_equals( expect=main.TRUE, actual=topology,
                                  onpass="Switch recovery successful",
                                  onfail="Failed to recover switch?" )
@@ -768,14 +792,62 @@
             main.ONOSbench.onosStop( ctrl.ipAddress )
 
     @staticmethod
-    def killOnos( main, nodes, switches, links, expNodes ):
+    def verifyNodes( main ):
+        """
+        Verifies Each active node in the cluster has an accurate view of other node's and their status
+
+        Params:
+        nodes, integer array with position of the ONOS nodes in the CLIs array
+        """
+        nodeResults = utilities.retry( main.Cluster.nodesCheck,
+                                       False,
+                                       attempts=10,
+                                       sleep=10 )
+        utilities.assert_equals( expect=True, actual=nodeResults,
+                                 onpass="Nodes check successful",
+                                 onfail="Nodes check NOT successful" )
+
+        if not nodeResults:
+            for ctrl in main.Cluster.runningNodes:
+                main.log.debug( "{} components not ACTIVE: \n{}".format(
+                    ctrl.name,
+                    ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
+            main.log.error( "Failed to kill ONOS, stopping test" )
+            main.cleanAndExit()
+
+    @staticmethod
+    def verifyTopology( main, switches, links, expNodes ):
+        """
+        Verifies that the ONOS cluster has an acuurate view of the topology
+
+        Params:
+        switches, links, expNodes: number of expected switches, links, and nodes at this point in the test ex.: '4', '6', '2'
+        """
+        main.step( "Check number of topology elements" )
+        topology = utilities.retry( main.Cluster.active( 0 ).CLI.checkStatus,
+                                    main.FALSE,
+                                    kwargs={ 'numoswitch': switches,
+                                             'numolink': links,
+                                             'numoctrl': expNodes },
+                                    attempts=10,
+                                    sleep=12 )
+        utilities.assert_equals( expect=main.TRUE, actual=topology,
+                                 onpass="Number of topology elements are correct",
+                                 onfail="Unexpected number of links, switches, and/or controllers" )
+
+    @staticmethod
+    def killOnos( main, nodes, switches, links, expNodes, sleep=None ):
         """
         Params: nodes, integer array with position of the ONOS nodes in the CLIs array
         switches, links, nodes: number of expected switches, links and nodes after KillOnos, ex.: '4', '6'
         Completely Kill an ONOS instance and verify the ONOS cluster can see the proper change
         """
+        # TODO: We have enough information in the Cluster instance to remove expNodes from here and verifyTopology
         main.step( "Killing ONOS instances with index(es): {}".format( nodes ) )
-        main.onosSleep = float( main.params[ 'timers' ][ 'OnosDiscovery' ] )
+        if sleep is None:
+            sleep = float( main.params[ 'timers' ][ 'OnosDiscovery' ] )
+        else:
+            sleep = float( sleep )
 
         for i in nodes:
             killResult = main.ONOSbench.onosDie( main.Cluster.runningNodes[ i ].ipAddress )
@@ -783,48 +855,27 @@
                                      onpass="ONOS instance Killed",
                                      onfail="Error killing ONOS instance" )
             main.Cluster.runningNodes[ i ].active = False
-        time.sleep( main.onosSleep )
+        main.Cluster.reset()
+        time.sleep( sleep )
 
         if len( nodes ) < main.Cluster.numCtrls:
-
-            nodeResults = utilities.retry( main.Cluster.nodesCheck,
-                                           False,
-                                           attempts=10,
-                                           sleep=10 )
-            utilities.assert_equals( expect=True, actual=nodeResults,
-                                     onpass="Nodes check successful",
-                                     onfail="Nodes check NOT successful" )
-
-            if not nodeResults:
-                for i in nodes:
-                    ctrl = main.Cluster.runningNodes[ i ]
-                    main.log.debug( "{} components not ACTIVE: \n{}".format(
-                        ctrl.name,
-                        ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
-                main.log.error( "Failed to kill ONOS, stopping test" )
-                main.cleanAndExit()
-
-            topology = utilities.retry( main.Cluster.active( 0 ).CLI.checkStatus,
-                                        main.FALSE,
-                                        kwargs={ 'numoswitch': switches,
-                                                 'numolink': links,
-                                                 'numoctrl': expNodes },
-                                        attempts=10,
-                                        sleep=12 )
-            utilities.assert_equals( expect=main.TRUE, actual=topology,
-                                     onpass="ONOS Instance down successful",
-                                     onfail="Failed to turn off ONOS Instance" )
+            Testcaselib.verifyNodes( main )
+            Testcaselib.verifyTopology( main, switches, links, expNodes )
 
     @staticmethod
-    def recoverOnos( main, nodes, switches, links, expNodes ):
+    def recoverOnos( main, nodes, switches, links, expNodes, sleep=None ):
         """
         Params: nodes, integer array with position of the ONOS nodes in the CLIs array
         switches, links, nodes: number of expected switches, links and nodes after recoverOnos, ex.: '4', '6'
         Recover an ONOS instance and verify the ONOS cluster can see the proper change
         """
         main.step( "Recovering ONOS instances with index(es): {}".format( nodes ) )
+        if sleep is None:
+            sleep = float( main.params[ 'timers' ][ 'OnosDiscovery' ] )
+        else:
+            sleep = float( sleep )
         [ main.ONOSbench.onosStart( main.Cluster.runningNodes[ i ].ipAddress ) for i in nodes ]
-        time.sleep( main.onosSleep )
+        time.sleep( sleep )
         for i in nodes:
             isUp = main.ONOSbench.isup( main.Cluster.runningNodes[ i ].ipAddress )
             utilities.assert_equals( expect=main.TRUE, actual=isUp,
@@ -843,34 +894,11 @@
                                      onpass="ONOS CLI is ready",
                                      onfail="ONOS CLI is not ready" )
 
+        main.Cluster.reset()
         main.step( "Checking ONOS nodes" )
-        nodeResults = utilities.retry( main.Cluster.nodesCheck,
-                                       False,
-                                       attempts=5,
-                                       sleep=10 )
-        utilities.assert_equals( expect=True, actual=nodeResults,
-                                 onpass="Nodes check successful",
-                                 onfail="Nodes check NOT successful" )
+        Testcaselib.verifyNodes( main )
+        Testcaselib.verifyTopology( main, switches, links, expNodes )
 
-        if not nodeResults:
-            for i in nodes:
-                ctrl = main.Cluster.runningNodes[ i ]
-                main.log.debug( "{} components not ACTIVE: \n{}".format(
-                    ctrl.name,
-                    ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
-            main.log.error( "Failed to start ONOS, stopping test" )
-            main.cleanAndExit()
-
-        topology = utilities.retry( main.Cluster.active( 0 ).CLI.checkStatus,
-                                    main.FALSE,
-                                    kwargs={ 'numoswitch': switches,
-                                             'numolink': links,
-                                             'numoctrl': expNodes },
-                                    attempts=10,
-                                    sleep=12 )
-        utilities.assert_equals( expect=main.TRUE, actual=topology,
-                                 onpass="ONOS Instance down successful",
-                                 onfail="Failed to turn off ONOS Instance" )
         ready = utilities.retry( main.Cluster.active( 0 ).CLI.summary,
                                  main.FALSE,
                                  attempts=10,
diff --git a/TestON/tests/dependencies/ONOSSetup.py b/TestON/tests/dependencies/ONOSSetup.py
index ee339be..66eced1 100644
--- a/TestON/tests/dependencies/ONOSSetup.py
+++ b/TestON/tests/dependencies/ONOSSetup.py
@@ -35,7 +35,7 @@
             main.case( "Constructing test variables and building ONOS package" )
             main.caseExplanation = "For loading from params file, and pull" + \
                                    " and build the latest ONOS package"
-        main.step("Constructing test variables")
+        main.step( "Constructing test variables" )
         try:
             from tests.dependencies.Cluster import Cluster
         except ImportError: