Add stratum bmv2 for SROnosFailure and SRLinkFailure

- Add running stratum switches to these tests
- Add support for docker in SROnosFailure functions
- Add xconnect to cfgtranslator
- Rework switch roles in cfgtranslator

Change-Id: Ic0b386107a7813810de9bf7c34bdf2d869eafa84
(cherry picked from commit d2c84e79dd2257cba150dcde5c089b9d0bf81140)
diff --git a/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/SRBridgingTest.py b/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/SRBridgingTest.py
index c3d93be..56d44ec 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/SRBridgingTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/SRBridgingTest.py
@@ -47,7 +47,7 @@
             if not hasattr( main, 'apps' ):
                 init = True
                 run.initTest( main )
-            # Skip onos packaging if the clusrer size stays the same
+            # Skip onos packaging if the cluster size stays the same
             if not init and onosNodes == main.Cluster.numCtrls:
                 skipPackage = True
 
diff --git a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params.stratum b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params.stratum
new file mode 100644
index 0000000..07cfeb9
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params.stratum
@@ -0,0 +1,102 @@
+<PARAMS>
+    <testcases>1,2,4,5</testcases>
+
+    <GRAPH>
+        <nodeCluster>Fabric</nodeCluster>
+        <builds>20</builds>
+        <jobName>SRLinkFailure-stratum</jobName>
+    </GRAPH>
+
+    <SCALE>
+        <size>3</size>
+        <max>3</max>
+    </SCALE>
+
+    <DEPENDENCY>
+        <useCommonConf>True</useCommonConf>
+        <useCommonTopo>True</useCommonTopo>
+        <useBmv2>True</useBmv2>
+        <bmv2SwitchType>stratum</bmv2SwitchType>
+        <switchPrefix>bmv2</switchPrefix>
+        <stratumRoot>~/stratum</stratumRoot>
+        <topology>cord_fabric.py</topology>
+        <lib>routinglib.py,trellislib.py,stratum.py</lib>
+        <conf>bgpdbgp1.conf,bgpdbgp2.conf,bgpdr1.conf,bgpdr2.conf,dhcpd6.conf,dhcpd.conf,zebradbgp1.conf,zebradbgp2.conf</conf>
+    </DEPENDENCY>
+
+    <MN_DOCKER>
+        <args>--privileged --net host --rm -v topo:/topo -v ~/mininet/custom:/home/root/mininet/custom -v /var/run/openvswitch/:/var/run/openvswitch/ -v /tmp/mn-stratum:/tmp -v /tmp/mn_conf/:/home/root/config --hostname mn-stratum -v /etc/network/interfaces:/etc/network/interfaces -it -d</args>
+        <name>trellis_mininet</name>
+        <home>/home/root/</home>
+    </MN_DOCKER>
+
+    <CLUSTER>
+        # Params for onos docker
+        <dockerSkipBuild>True</dockerSkipBuild>
+        <dockerBuildCmd>make ONOS_VERSION=master DOCKER_TAG=TestON-master onos-build trellis-control-build trellis-t3-build tost-build</dockerBuildCmd> # If using another cmd like make
+        <dockerBuildTimeout>1200</dockerBuildTimeout>
+        <dockerFilePath>~/tost-onos</dockerFilePath>
+        <dockerImageTag>tost:TestON-master</dockerImageTag>
+        <dockerOptions>-d --rm --network host -v ~/.ssh/authorized_keys:/root/.ssh/authorized_keys -v /tmp/cluster.json:/root/onos/config/cluster.json </dockerOptions> # We start the container detached, so the docker component can connect to cli instead of logs
+        <atomixImageTag>atomix/atomix:3.1.5</atomixImageTag>
+        <atomixOptions>-d --rm --network host -v ~/.ssh/authorized_keys:/root/.ssh/authorized_keys -v /tmp/atomix.json:/opt/atomix/conf/atomix.json </atomixOptions>
+    </CLUSTER>
+
+    <ENV>
+        <cellName>productionCell</cellName>
+        <cellApps>drivers,hostprovider,netcfghostprovider,lldpprovider,drivers.bmv2,pipelines.fabric,segmentrouting,t3 </cellApps>
+    </ENV>
+
+    <ONOS_Configuration>
+        <org.onosproject.grpc.ctl.GrpcChannelControllerImpl>
+            <enableMessageLog>true</enableMessageLog>
+        </org.onosproject.grpc.ctl.GrpcChannelControllerImpl>
+    </ONOS_Configuration>
+
+    <ONOS_Logging>
+        <org.onosproject.events>TRACE</org.onosproject.events>
+        <org.onosproject.segmentrouting>DEBUG</org.onosproject.segmentrouting>
+        <org.onosproject.driver>DEBUG</org.onosproject.driver>
+        <org.onosproject.net.flowobjective.impl>DEBUG</org.onosproject.net.flowobjective.impl>
+        <org.onosproject.routeservice.impl>DEBUG</org.onosproject.routeservice.impl>
+        <org.onosproject.routeservice.store>DEBUG</org.onosproject.routeservice.store>
+        <org.onosproject.routing.fpm>DEBUG</org.onosproject.routing.fpm>
+        <org.onosproject.fpm>DEBUG</org.onosproject.fpm>
+        <org.onosproject.mcast>DEBUG</org.onosproject.mcast>
+        <org.onosproject.p4runtime>DEBUG</org.onosproject.p4runtime>
+        <org.onosproject.protocols.p4runtime>DEBUG</org.onosproject.protocols.p4runtime>
+        <org.onosproject.drivers.p4runtime>DEBUG</org.onosproject.drivers.p4runtime>
+        <org.onosproject.protocols.grpc>DEBUG</org.onosproject.protocols.grpc>
+        <org.onosproject.protocols.gnmi>DEBUG</org.onosproject.protocols.gnmi>
+        <org.onosproject.protocols.gnoi>DEBUG</org.onosproject.protocols.gnoi>
+        <org.onosproject.drivers.gnoi>DEBUG</org.onosproject.drivers.gnoi>
+        <org.onosproject.drivers.gmni>DEBUG</org.onosproject.drivers.gmni>
+        <org.onosproject.drivers.stratum>DEBUG</org.onosproject.drivers.stratum>
+        <org.onosproject.bmv2>DEBUG</org.onosproject.bmv2>
+    </ONOS_Logging>
+
+    <GIT>
+        <pull>False</pull>
+        <branch>master</branch>
+    </GIT>
+
+    <CTRL>
+        <port>6653</port>
+    </CTRL>
+
+    <timers>
+        <LinkDiscovery>12</LinkDiscovery>
+        <SwitchDiscovery>12</SwitchDiscovery>
+    </timers>
+
+    <kill>
+        <switch> spine101 </switch>
+        <dpid> 000000000101 </dpid>
+        <links> leaf1 leaf2 </links>
+    </kill>
+
+    <SLEEP>
+        <startup>10</startup>
+    </SLEEP>
+
+</PARAMS>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/dependencies/SRLinkFailFuncs.py b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/dependencies/SRLinkFailFuncs.py
index 4f07fe5..0a0ef5a 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/dependencies/SRLinkFailFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/dependencies/SRLinkFailFuncs.py
@@ -20,6 +20,7 @@
 """
 
 from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as run
+import tests.USECASE.SegmentRouting.dependencies.cfgtranslator as translator
 
 class SRLinkFailFuncs():
 
@@ -29,6 +30,7 @@
         self.topo[ '0x1' ] = ( 0, 1, '--leaf=1 --spine=0', 'single switch' )
         self.topo[ '2x2' ] = ( 2, 2, '', '2x2 Leaf-spine' )
         self.topo[ '4x4' ] = ( 4, 4, '--leaf=4 --spine=4', '4x4 Leaf-spine' )
+        main.switchType = "ovs"
         self.switchOne = 'spine101'
         self.switchTwo = 'leaf2'
         self.dpidOne = 'of:0000000000000101'
@@ -38,18 +40,46 @@
 
     def runTest( self, main, caseNum, numNodes, Topo, minFlow ):
         try:
-            if not hasattr( main, 'apps' ):
-                run.initTest( main )
-
             description = "Bridging and Routing Link Failure test with " + self.topo[ Topo ][ 3 ] + " and {} Onos".format( numNodes )
             main.case( description )
-
+            if not hasattr( main, 'apps' ):
+                run.initTest( main )
             main.cfgName = Topo
             main.Cluster.setRunningNode( numNodes )
             run.installOnos( main )
-            run.loadJson( main )
+            suf = main.params.get( 'jsonFileSuffix', '')
+            xconnectFile = "%s%s-xconnects.json%s" % ( main.configPath + main.forJson,
+                    main.cfgName, suf )
+            if main.useBmv2:
+                switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
+                # Translate configuration file from OVS-OFDPA to BMv2 driver
+                translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
+                translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
+                # translate xconnects
+                translator.bmv2ToOfdpa( main, cfgFile=xconnectFile )
+                translator.ofdpaToBmv2( main, cfgFile=xconnectFile, switchPrefix=switchPrefix )
+            else:
+                translator.bmv2ToOfdpa( main )
+                translator.bmv2ToOfdpa( main, cfgFile=xconnectFile )
+            if suf:
+                run.loadJson( main, suffix=suf )
+            else:
+                run.loadJson( main )
             run.loadChart( main )
-            run.startMininet( main, 'cord_fabric.py', args=self.topo[ Topo ][ 2 ] )
+            if hasattr( main, 'Mininet1' ):
+                run.mnDockerSetup( main )  # optionally create and setup docker image
+
+                # Run the test with Mininet
+                mininet_args = self.topo[ Topo ][ 2 ]
+                if main.useBmv2:
+                    mininet_args += ' --switch %s' % main.switchType
+                    main.log.info( "Using %s switch" % main.switchType )
+
+                run.startMininet( main, 'cord_fabric.py', args=mininet_args )
+            else:
+                # Run the test with physical devices
+                # TODO: connect TestON to the physical network
+                pass
             # xconnects need to be loaded after topology
             run.loadXconnects( main )
             # pre-configured routing and bridging test
diff --git a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/dependencies/SROnosFailureFuncs.py b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/dependencies/SROnosFailureFuncs.py
index 679baf0..a5ff3ac 100644
--- a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/dependencies/SROnosFailureFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/dependencies/SROnosFailureFuncs.py
@@ -20,6 +20,7 @@
 """
 
 from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as run
+import tests.USECASE.SegmentRouting.dependencies.cfgtranslator as translator
 
 class SROnosFailureFuncs():
 
@@ -29,6 +30,7 @@
         self.topo[ '0x1' ] = ( 0, 1, '--leaf=1 --spine=0', 'single switch' )
         self.topo[ '2x2' ] = ( 2, 2, '', '2x2 Leaf-spine' )
         self.topo[ '4x4' ] = ( 4, 4, '--leaf=4 --spine=4', '4x4 Leaf-spine' )
+        main.switchType = "ovs"
 
     def runTest( self, main, caseNum, numNodes, Topo, minFlow, killList=[ 0 ] ):
         try:
@@ -39,7 +41,24 @@
             main.cfgName = Topo
             main.Cluster.setRunningNode( numNodes )
             run.installOnos( main )
-            run.loadJson( main )
+            suf = main.params.get( 'jsonFileSuffix', '')
+            xconnectFile = "%s%s-xconnects.json%s" % ( main.configPath + main.forJson,
+                    main.cfgName, suf )
+            if main.useBmv2:
+                switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
+                # Translate configuration file from OVS-OFDPA to BMv2 driver
+                translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
+                translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
+                # translate xconnects
+                translator.bmv2ToOfdpa( main, cfgFile=xconnectFile )
+                translator.ofdpaToBmv2( main, cfgFile=xconnectFile, switchPrefix=switchPrefix )
+            else:
+                translator.bmv2ToOfdpa( main )
+                translator.bmv2ToOfdpa( main, cfgFile=xconnectFile )
+            if suf:
+                run.loadJson( main, suffix=suf )
+            else:
+                run.loadJson( main )
             run.loadChart( main )
             if hasattr( main, 'Mininet1' ):
                 run.mnDockerSetup( main )  # optionally create and setup docker image
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py b/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
index 68072a7..e20e31a 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
@@ -720,20 +720,23 @@
         main.step( "Kill link between %s and %s" % ( end1, end2 ) )
         linkDown = main.Network.link( END1=end1, END2=end2, OPTION="down" )
         linkDown = linkDown and main.Network.link( END2=end1, END1=end2, OPTION="down" )
+        utilities.assert_equals( expect=main.TRUE, actual=linkDown,
+                                 onpass="Link down successful",
+                                 onfail="Failed to turn off link?" )
         # TODO: Can remove this, since in the retry we will wait anyways if topology is incorrect
         main.log.info(
                 "Waiting %s seconds for link down to be discovered" % sleep )
         time.sleep( sleep )
+        main.step( "Checking topology after link down" )
         topology = utilities.retry( main.Cluster.active( 0 ).CLI.checkStatus,
                                     main.FALSE,
                                     kwargs={ 'numoswitch': switches,
                                              'numolink': links },
                                     attempts=10,
                                     sleep=sleep )
-        result = topology and linkDown
-        utilities.assert_equals( expect=main.TRUE, actual=result,
-                                 onpass="Link down successful",
-                                 onfail="Failed to turn off link?" )
+        utilities.assert_equals( expect=main.TRUE, actual=topology,
+                                 onpass="Topology after link down is correct",
+                                 onfail="Topology after link down is incorrect" )
 
     @staticmethod
     def killLinkBatch( main, links, linksAfter, switches, sleep=None ):
@@ -1092,12 +1095,18 @@
         else:
             sleep = float( sleep )
 
+        stepResult = main.TRUE
         for i in nodes:
-            killResult = main.ONOSbench.onosDie( main.Cluster.runningNodes[ i ].ipAddress )
-            utilities.assert_equals( expect=main.TRUE, actual=killResult,
-                                     onpass="ONOS instance Killed",
-                                     onfail="Error killing ONOS instance" )
+            node = main.Cluster.runningNodes[ i ]
+            if node.inDocker:
+                killResult = node.server.dockerStop( node.name )
+            else:
+                killResult = main.ONOSbench.onosDie( node.ipAddress )
+            stepResult = stepResult and killResult
             main.Cluster.runningNodes[ i ].active = False
+        utilities.assert_equals( expect=main.TRUE, actual=stepResult,
+                                 onpass="ONOS instance Killed",
+                                 onfail="Error killing ONOS instance" )
         main.Cluster.reset()
         main.log.debug( "sleeping %i seconds" % ( sleep ) )
         time.sleep( sleep )
@@ -1118,11 +1127,22 @@
             sleep = float( main.params[ 'timers' ][ 'OnosDiscovery' ] )
         else:
             sleep = float( sleep )
-        [ main.ONOSbench.onosStart( main.Cluster.runningNodes[ i ].ipAddress ) for i in nodes ]
+        for i in nodes:
+            node = main.Cluster.runningNodes[ i ]
+            if node.inDocker:
+                main.Cluster.startONOSDockerNode( i )
+            else:
+                main.ONOSbench.onosStart( node.ipAddress )
         main.log.debug( "sleeping %i seconds" % ( sleep ) )
         time.sleep( sleep )
         for i in nodes:
-            isUp = main.ONOSbench.isup( main.Cluster.runningNodes[ i ].ipAddress )
+            node =  main.Cluster.runningNodes[ i ]
+            if node.inDocker:
+                isUp = node.CLI.dockerExec( node.name, dockerPrompt=node.dockerPrompt )
+                isUp = isUp and node.CLI.prepareForCLI()
+                isUp = isUp and node.CLI.onosSecureSSH( userName=node.karafUser, userPWD=node.karafPass )
+            else:
+                isUp = main.ONOSbench.isup( node.ipAddress )
             utilities.assert_equals( expect=main.TRUE, actual=isUp,
                                      onpass="ONOS service is ready",
                                      onfail="ONOS service did not start properly" )
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/cfgtranslator.py b/TestON/tests/USECASE/SegmentRouting/dependencies/cfgtranslator.py
index 5e95efe..b978b73 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/cfgtranslator.py
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/cfgtranslator.py
@@ -27,8 +27,8 @@
 DHCP_APP_ID = ONOS_GROUP_ID + '.' + DHCP_APP
 
 # Translate configuration JSON file from BMv2 driver to OFDPA-OVS driver.
-def bmv2ToOfdpa( main, cfgFile="" ):
-    didRE = r"device:(?P<swType>bmv2|tofino):(?P<swRole>leaf|spine)(?P<swNum>[1-9][0-9]*)(/(?P<portNum>[0-9]+))?"
+def bmv2ToOfdpa( main, cfgFile="", rolesRE=r'spine|leaf' ):
+    didRE = r"device:(?P<swType>bmv2|tofino):(?P<swRole>" + rolesRE + ")(?P<swNum>[1-9][0-9]*)(/(?P<portNum>[0-9]+))?"
     if not cfgFile:
         cfgFile = "%s%s.json" % ( main.configPath + main.forJson,
                                   main.cfgName )
@@ -84,12 +84,21 @@
                         netcfg[ 'apps' ][ DHCP_APP_ID ][ 'default' ][ i ][ 'dhcpServerConnectPoint' ] = \
                             'of:' + searchObj.group( 'swNum' ).zfill(16) + '/' + searchObj.group( 'portNum' )
 
+    if 'xconnects' in netcfg.keys():
+        new_xconnects = []
+        for xconnect in netcfg[ 'xconnects' ]:
+            searchObj = re.search( didRE, xconnect.get( "deviceId" ) )
+            if searchObj:
+                new_device = 'of:' + searchObj.group( 'swNum' ).zfill( 16 )
+                xconnect[ 'deviceId' ] = new_device
+            new_xconnects.append( xconnect )
+        netcfg[ 'xconnects' ] = new_xconnects
+
     with open( cfgFile, 'w' ) as cfg:
         cfg.write( json.dumps( netcfg, indent=4, separators=( ',', ':' ), sort_keys=True ) )
 
 # Translate configuration JSON file from OFDPA-OVS driver to BMv2 driver.
-def ofdpaToBmv2( main, switchPrefix="bmv2", cfgFile="" ):
-    didRE= r"device:(?P<swType>bmv2|tofino):(?P<swRole>leaf|spine)(?P<swNum>[1-9][0-9]*)(/(?P<portNum>[0-9]+))?"
+def ofdpaToBmv2( main, switchPrefix="bmv2", cfgFile="", roleMap={r'0*[1-9]([0-9]){2}': 'spine', r'0{15}[1-9]': "leaf"} ):
     didRE = r"of:0*(?P<swNum>[1-9][0-9]*)(/(?P<portNum>[0-9]+))?"
     if not cfgFile:
         cfgFile = "%s%s.json" % ( main.configPath + main.forJson,
@@ -101,7 +110,14 @@
         for port in netcfg[ 'ports' ].keys():
             searchObj = re.search( didRE, port )
             if searchObj:
-                new_port = 'device:' + switchPrefix + ':leaf' + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' )
+                # search  for a match between keys of roleMap and device id and set role to value of key
+                role = "leaf"
+                for roleRE, roleValue in roleMap.items():
+                    roleMatch = re.search( roleRE, searchObj.group( 'swNum' ) )
+                    if roleMatch:
+                        role = roleValue
+                        break
+                new_port = 'device:' + switchPrefix + ':' + role + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' )
                 netcfg[ 'ports' ][ new_port ] = netcfg[ 'ports' ].pop( port )
 
     if 'hosts' in netcfg.keys():
@@ -111,7 +127,14 @@
                 for location in hostCfg[ 'basic' ][ 'locations' ]:
                     searchObj = re.search( didRE, location )
                     if searchObj:
-                        new_locations.append( 'device:' + switchPrefix + ':leaf' + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' ) )
+                        # search  for a match between keys of roleMap and device id and set role to value of key
+                        role = "leaf"
+                        for roleRE, roleValue in roleMap.items():
+                            roleMatch = re.search( roleRE, searchObj.group( 'swNum' ) )
+                            if roleMatch:
+                                role = roleValue
+                                break
+                        new_locations.append( 'device:' + switchPrefix + ':' + role + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' ) )
                     else:
                         new_locations.append( location )
                 netcfg[ 'hosts' ][ host ][ 'basic' ][ 'locations' ] = new_locations
@@ -119,7 +142,14 @@
                 location = hostCfg[ 'basic' ][ 'locations' ]
                 searchObj = re.search( didRE, location )
                 if searchObj:
-                    new_location = 'device:' + switchPrefix + ':leaf' + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' )
+                    # search  for a match between keys of roleMap and device id and set role to value of key
+                    role = "leaf"
+                    for roleRE, roleValue in roleMap.items():
+                        roleMatch = re.search( roleRE, searchObj.group( 'swNum' ) )
+                        if roleMatch:
+                            role = roleValue
+                            break
+                    new_location = 'device:' + switchPrefix + ':' + role + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' )
                     netcfg[ 'hosts' ][ host ][ 'basic' ][ 'locations' ] = new_location
 
     if 'devices' in netcfg.keys():
@@ -127,6 +157,7 @@
             searchObj = re.search( didRE, device )
             new_device = device
             if searchObj:
+                #TODO This or roleMap? maybe use this to populate role Map?
                 isLeaf = netcfg[ 'devices' ][ device ][ SR_APP ][ 'isEdgeRouter' ]
                 if isLeaf is True:
                     new_device = 'device:' + switchPrefix + ':leaf' + searchObj.group( 'swNum' )
@@ -137,7 +168,14 @@
                 searchObj = re.search( didRE,
                                        netcfg[ 'devices' ][ new_device ][ SR_APP ][ 'pairDeviceId' ])
                 if searchObj:
-                    netcfg[ 'devices' ][ new_device ][ SR_APP ][ 'pairDeviceId' ] = 'device:' + switchPrefix + ':leaf' + \
+                    # search  for a match between keys of roleMap and device id and set role to value of key
+                    role = "leaf"
+                    for roleRE, roleValue in roleMap.items():
+                        roleMatch = re.search( roleRE, searchObj.group( 'swNum' ) )
+                        if roleMatch:
+                            role = roleValue
+                            break
+                    netcfg[ 'devices' ][ new_device ][ SR_APP ][ 'pairDeviceId' ] = 'device:' + switchPrefix + ':' + role + \
                                                                                     searchObj.group( 'swNum' )
             if 'basic' in netcfg[ 'devices' ][ new_device ].keys():
                 if 'driver' in netcfg[ 'devices' ][ new_device ][ 'basic' ].keys():
@@ -150,8 +188,32 @@
                     searchObj = re.search( didRE,
                                            dhcpcfg[ 'dhcpServerConnectPoint' ] )
                     if searchObj:
+                        # search  for a match between keys of roleMap and device id and set role to value of key
+                        role = "leaf"
+                        for roleRE, roleValue in roleMap.items():
+                            roleMatch = re.search( roleRE, searchObj.group( 'swNum' ) )
+                            if roleMatch:
+                                role = roleValue
+                                break
                         netcfg[ 'apps' ][ DHCP_APP_ID ][ 'default' ][ i ][ 'dhcpServerConnectPoint' ] = \
-                            'device:' + switchPrefix + ':leaf' + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' )
+                            'device:' + switchPrefix + ':' + role + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' )
+
+    if 'xconnects' in netcfg.keys():
+        new_xconnects = []
+        for xconnect in netcfg[ 'xconnects' ]:
+            searchObj = re.search( didRE, xconnect.get( "deviceId" ) )
+            if searchObj:
+                # search  for a match between keys of roleMap and device id and set role to value of key
+                role = "leaf"
+                for roleRE, roleValue in roleMap.items():
+                    roleMatch = re.search( roleRE, searchObj.group( 'swNum' ) )
+                    if roleMatch:
+                        role = roleValue
+                        break
+                new_device = 'device:' + switchPrefix + ':' + role + searchObj.group( 'swNum' )
+                xconnect[ 'deviceId' ] = new_device
+            new_xconnects.append( xconnect )
+        netcfg[ 'xconnects' ] = new_xconnects
 
     with open( cfgFile, 'w' ) as cfg:
         cfg.write( json.dumps( netcfg, indent=4, separators=( ',', ':' ), sort_keys=True ) )