Merge "Use t3-troubleshoot-simple in SR tests for dual-homed hosts"
diff --git a/TestON/drivers/common/cli/onosclidriver.py b/TestON/drivers/common/cli/onosclidriver.py
index 83374b6..dee2be5 100755
--- a/TestON/drivers/common/cli/onosclidriver.py
+++ b/TestON/drivers/common/cli/onosclidriver.py
@@ -2505,7 +2505,7 @@
             else:
                 main.cleanAndExit()
 
-    def flows( self, state="", jsonFormat=True, timeout=60, noExit=False, noCore=False ):
+    def flows( self, state="any", jsonFormat=True, timeout=60, noExit=False, noCore=False, device=""):
         """
         Optional:
             * jsonFormat: enable output formatting in json
@@ -2516,10 +2516,11 @@
         try:
             cmdStr = "flows"
             if jsonFormat:
-                cmdStr += " -j "
+                cmdStr += " -j"
             if noCore:
-                cmdStr += " -n "
-            cmdStr += state
+                cmdStr += " -n"
+            cmdStr += " " + state
+            cmdStr += " " + device
             handle = self.sendline( cmdStr, timeout=timeout, noExit=noExit )
             assert handle is not None, "Error in sendline"
             assert "Command not found:" not in handle, handle
@@ -2942,6 +2943,32 @@
         main.log.debug( "found {} groups".format( count ) )
         return count if ((count > expectedGroupCount) if (comparison == 0) else (count == expectedGroupCount)) else main.FALSE
 
+    def getGroups( self, deviceId, groupType="any" ):
+        """
+        Retrieve groups from a specific device.
+        deviceId: Id of the device from which we retrieve groups
+        groupType: Type of group
+        """
+        try:
+            groupCmd = "groups -t {0} any {1}".format( groupType, deviceId )
+            handle = self.sendline( groupCmd )
+            assert handle is not None, "Error in sendline"
+            assert "Command not found:" not in handle, handle
+            return handle
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanAndExit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanAndExit()
+
     def checkFlowAddedCount( self, deviceId, expectedFlowCount=0, core=False, comparison=0):
         """
         Description:
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.params b/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.params
index 52a05fd..0658e5f 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.params
@@ -1,5 +1,5 @@
 <PARAMS>
-    <testcases>101,102,103,104,105,106,107,108,109,201,202,203,204,205,206,207,208,209,301,302,303,304,305,306,307,308,309</testcases>
+    <testcases>101,102,103,104,105,106,107,108,109,201,202,203,204,205,206,207,208,209,301,302,303,304,305,306,307,308,309,601,602,603,604,605,606,620,621,622,630,640,641,642,643,651</testcases>
 
     <GRAPH>
         <nodeCluster>Fabric</nodeCluster>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.py b/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.py
index dafbacd..53f52bb 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.py
@@ -527,6 +527,59 @@
         verify( main, disconnected=False, external=False )
         lib.cleanup( main, copyKarafLog=False, removeHostComponent=True )
 
+    def CASE602( self, main ):
+        """"
+        Take down a leaf switch that is paired and has a dual homed host
+        Restore the leaf switch
+        Repeat for various dual homed hosts and paired switches
+        """
+        from tests.USECASE.SegmentRouting.SRRouting.dependencies.SRRoutingTest import *
+        from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as lib
+        main.case( "Drop a leaf switch that is paired and has a dual homed host." )
+        setupTest( main, test_idx=602, onosNodes=3 )
+        verify( main, disconnected=False )
+        # Kill leaf-2
+        lib.killSwitch( main, "leaf2", 9, 38 )
+        hostLocations = { "h4v6": "of:0000000000000003/6",
+                          "h5v6": "of:0000000000000003/7",
+                          "h4v4": "of:0000000000000003/10",
+                          "h5v4": "of:0000000000000003/11" }
+        lib.verifyHostLocations( main, hostLocations )
+        main.disconnectedIpv4Hosts = [ "h3v4" ]
+        main.disconnectedIpv6Hosts = [ "h3v6" ]
+        verify( main )
+        # Recover leaf-2
+        lib.recoverSwitch( main, "leaf2", 10, 48, rediscoverHosts=True)
+        hostLocations = { "h4v6": [ "of:0000000000000002/7", "of:0000000000000003/6" ],
+                          "h5v6": [ "of:0000000000000002/8", "of:0000000000000003/7" ],
+                          "h4v4": [ "of:0000000000000002/10", "of:0000000000000003/10" ],
+                          "h5v4": [ "of:0000000000000002/11", "of:0000000000000003/11" ] }
+        lib.verifyHostLocations( main, hostLocations )
+        main.disconnectedIpv4Hosts = []
+        main.disconnectedIpv6Hosts = []
+        verify( main, disconnected=False )
+        # Kill leaf-4
+        lib.killSwitch( main, "leaf4", 9, 38 )
+        hostLocations = { "h9v6": "of:0000000000000005/6",
+                          "h10v6": "of:0000000000000005/7",
+                          "h9v4": "of:0000000000000005/9",
+                          "h10v4": "of:0000000000000005/10" }
+        lib.verifyHostLocations( main, hostLocations )
+        main.disconnectedIpv4Hosts = [ "h8v4" ]
+        main.disconnectedIpv6Hosts = [ "h8v6" ]
+        verify( main )
+        # Recover leaf-4
+        lib.recoverSwitch( main, "leaf4", 10, 48, rediscoverHosts=True)
+        hostLocations = { "h9v6": [ "of:0000000000000004/7", "of:0000000000000005/6" ],
+                          "h10v6": [ "of:0000000000000004/8", "of:0000000000000005/7" ],
+                          "h9v4": [ "of:0000000000000004/10", "of:0000000000000005/9" ],
+                          "h10v4": [ "of:0000000000000004/11", "of:0000000000000005/10" ] }
+        lib.verifyHostLocations( main, hostLocations )
+        main.disconnectedIpv4Hosts = []
+        main.disconnectedIpv6Hosts = []
+        verify( main, disconnected=False )
+        lib.cleanup( main, copyKarafLog=False, removeHostComponent=True )
+
     def CASE603( self, main ):
         """"
         Drop HAGG-1 device and test connectivity.
@@ -745,15 +798,61 @@
         main.case( "Take down one of double links towards the spine" )
         setupTest( main, test_idx=620, onosNodes=3 )
         verify( main, disconnected=False )
+        groupBuckets = { "of:0000000000000002": { "10.1.0.0/24": 4, "10.1.10.0/24": 4,
+                                                  "10.2.0.0/24": 1, "10.2.30.0/24": 1, "10.2.20.0/24": 1,
+                                                  "10.2.10.0/24": 4, "10.2.40.0/24": 4,
+                                                  "10.3.0.0/24": 4, "10.3.10.0/24": 8, "10.3.30.0/24": 8,
+                                                  "10.3.20.0/24": 4, "10.5.10.0/24": 4, "10.5.20.0/24": 4 },
+                         "of:0000000000000003": { "10.1.0.0/24": 4, "10.1.10.0/24": 4,
+                                                  "10.2.0.0/24": 4, "10.2.30.0/24": 1, "10.2.20.0/24": 1,
+                                                  "10.2.10.0/24": 1, "10.2.40.0/24": 1,
+                                                  "10.3.0.0/24": 4, "10.3.10.0/24": 8, "10.3.30.0/24": 8,
+                                                  "10.3.20.0/24": 4, "10.5.10.0/24": 4, "10.5.20.0/24": 4 },
+                         "of:0000000000000004": { "10.1.0.0/24": 4, "10.1.10.0/24": 4,
+                                                  "10.2.0.0/24": 4, "10.2.30.0/24": 8, "10.2.20.0/24": 8,
+                                                  "10.2.10.0/24": 4, "10.2.40.0/24": 4,
+                                                  "10.3.0.0/24": 1, "10.3.10.0/24": 1, "10.3.30.0/24": 1,
+                                                  "10.3.20.0/24": 4, "10.5.10.0/24": 4, "10.5.20.0/24": 4 },
+                         "of:0000000000000005": { "10.1.0.0/24": 4, "10.1.10.0/24": 4,
+                                                  "10.2.0.0/24": 4, "10.2.30.0/24": 8, "10.2.20.0/24": 8,
+                                                  "10.2.10.0/24": 4, "10.2.40.0/24": 4,
+                                                  "10.3.0.0/24": 4, "10.3.10.0/24": 1, "10.3.30.0/24": 1,
+                                                  "10.3.20.0/24": 1, "10.5.10.0/24": 4, "10.5.20.0/24": 4 } }
+        for switch, subnets in groupBuckets.items():
+            lib.checkGroupsForBuckets( main, switch, subnets )
+        # Take down one of double links
         portsToDisable = [ [ "of:0000000000000002", 1 ], [ "of:0000000000000002", 3 ],
                            [ "of:0000000000000003", 1 ], [ "of:0000000000000003", 3 ],
                            [ "of:0000000000000004", 1 ], [ "of:0000000000000004", 3 ],
                            [ "of:0000000000000005", 1 ], [ "of:0000000000000005", 3 ] ]
         lib.disablePortBatch( main, portsToDisable, 10, 32 )
-        # TODO: check buckets in groups
+        groupBucketsNew = { "of:0000000000000002": { "10.1.0.0/24": 2, "10.1.10.0/24": 2,
+                                                     "10.2.0.0/24": 1, "10.2.30.0/24": 1, "10.2.20.0/24": 1,
+                                                     "10.2.10.0/24": 2, "10.2.40.0/24": 2,
+                                                     "10.3.0.0/24": 2, "10.3.10.0/24": 4, "10.3.30.0/24": 4,
+                                                     "10.3.20.0/24": 2, "10.5.10.0/24": 2, "10.5.20.0/24": 2 },
+                            "of:0000000000000003": { "10.1.0.0/24": 2, "10.1.10.0/24": 2,
+                                                     "10.2.0.0/24": 2, "10.2.30.0/24": 1, "10.2.20.0/24": 1,
+                                                     "10.2.10.0/24": 1, "10.2.40.0/24": 1,
+                                                     "10.3.0.0/24": 2, "10.3.10.0/24": 4, "10.3.30.0/24": 4,
+                                                     "10.3.20.0/24": 2, "10.5.10.0/24": 2, "10.5.20.0/24": 2 },
+                            "of:0000000000000004": { "10.1.0.0/24": 2, "10.1.10.0/24": 2,
+                                                     "10.2.0.0/24": 2, "10.2.30.0/24": 4, "10.2.20.0/24": 4,
+                                                     "10.2.10.0/24": 2, "10.2.40.0/24": 2,
+                                                     "10.3.0.0/24": 1, "10.3.10.0/24": 1, "10.3.30.0/24": 1,
+                                                     "10.3.20.0/24": 2, "10.5.10.0/24": 2, "10.5.20.0/24": 2 },
+                            "of:0000000000000005": { "10.1.0.0/24": 2, "10.1.10.0/24": 2,
+                                                     "10.2.0.0/24": 2, "10.2.30.0/24": 4, "10.2.20.0/24": 4,
+                                                     "10.2.10.0/24": 2, "10.2.40.0/24": 2,
+                                                     "10.3.0.0/24": 2, "10.3.10.0/24": 1, "10.3.30.0/24": 1,
+                                                     "10.3.20.0/24": 1, "10.5.10.0/24": 2, "10.5.20.0/24": 2 } }
+        for switch, subnets in groupBucketsNew.items():
+            lib.checkGroupsForBuckets( main, switch, subnets )
         verify( main, disconnected=False )
+        # Bring up the links
         lib.enablePortBatch( main, portsToDisable, 10, 48 )
-        # TODO: check buckets in groups
+        for switch, subnets in groupBuckets.items():
+            lib.checkGroupsForBuckets( main, switch, subnets )
         verify( main, disconnected=False )
         lib.cleanup( main, copyKarafLog=False, removeHostComponent=True )
 
@@ -818,7 +917,6 @@
         main.case( "Take down all uplinks from a paired leaf switch" )
         setupTest( main, test_idx=622, onosNodes=3 )
         verify( main, disconnected=False )
-        ctrl = main.Cluster.active( 0 )
         hostLocations = { "h4v6": [ "of:0000000000000002/7", "of:0000000000000003/6" ],
                           "h5v6": [ "of:0000000000000002/8", "of:0000000000000003/7" ],
                           "h4v4": [ "of:0000000000000002/10", "of:0000000000000003/10" ],
@@ -827,6 +925,8 @@
         linksToRemove = [ ["spine101", "leaf2"], ["spine102", "leaf2"] ]
         lib.killLinkBatch( main, linksToRemove, 40, 10 )
         # TODO: more verifications are required
+        main.disconnectedIpv4Hosts = [ "h3v4" ]
+        main.disconnectedIpv6Hosts = [ "h3v6" ]
         verify( main )
         hostLocations = { "h4v6": "of:0000000000000003/6",
                           "h5v6": "of:0000000000000003/7",
@@ -834,7 +934,9 @@
                           "h5v4": "of:0000000000000003/11" }
         lib.verifyHostLocations( main, hostLocations )
         lib.restoreLinkBatch( main, linksToRemove, 48, 10 )
-        verify( main )
+        main.disconnectedIpv4Hosts = []
+        main.disconnectedIpv6Hosts = []
+        verify( main, disconnected=False )
         hostLocations = { "h4v6": [ "of:0000000000000002/7", "of:0000000000000003/6" ],
                           "h5v6": [ "of:0000000000000002/8", "of:0000000000000003/7" ],
                           "h4v4": [ "of:0000000000000002/10", "of:0000000000000003/10" ],
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py b/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
index 7fc7164..c3c5dbc 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
@@ -22,6 +22,7 @@
 import time
 import json
 import urllib
+import re
 from core import utilities
 
 
@@ -309,6 +310,35 @@
                                  onfail="route-add command failed")
 
     @staticmethod
+    def checkGroupsForBuckets( main, deviceId, subnetDict, routingTable=30 ):
+        """
+        Check number of groups for each subnet on device deviceId and matches
+        it with an expected value. subnetDict is a dictionarty containing values
+        of the type "10.0.1.0/24" : 5.
+        """
+        main.step( "Checking if number of groups for subnets in device {0} is as expected.".format( deviceId ) )
+        groups = main.Cluster.active( 0 ).CLI.getGroups( deviceId, groupType="select" )
+        flows = main.Cluster.active( 0 ).CLI.flows( jsonFormat=False, device=deviceId )
+
+        result = main.TRUE
+        for subnet, numberInSelect in subnetDict.iteritems():
+            for flow in flows.splitlines():
+                if "tableId={0}".format( routingTable ) in flow and subnet in flow:
+                    # this will match the group id that this flow entry points to, for example :
+                    # 0x70000041 in flow entry which contains "deferred=[GROUP:0x70000041], transition=TABLE:60,"
+                    groupId = re.search( r".*GROUP:(0x.*)], transition.*", flow ).groups()[0]
+                    count = 0
+                    for group in groups.splitlines():
+                        if 'id={0}'.format( groupId ) in group:
+                            count += 1
+                    if count - 1 != numberInSelect:
+                        result = main.FALSE
+                        main.log.warn( "Mismatch in number of buckets of select group, found {0}, expected {1} for subnet {2} on device {3}".format( count - 1, numberInSelect, subnet, deviceId ) )
+        utilities.assert_equals( expect=main.TRUE, actual=result,
+                                 onpass="All bucket numbers are as expected",
+                                 onfail="Some bucket numbers are not as expected" )
+
+    @staticmethod
     def checkFlows( main, minFlowCount, tag="", dumpflows=True, sleep=10 ):
         main.step(
                 "Check whether the flow count is bigger than %s" % minFlowCount )