Allow use of onos docker for existing tests

- Allow Cluster to pull/build onos docker
- Connect clidriver to cli runnning in docker
- Some changes for debugability in components
- To use, set the useDocker and diffCliHosts tags in the cluster
  component to True, then define parameters in the params file
- Update all SR Stratum tests to use the tost docker image
- NOTE: Since the tost-onos image doesn't have openflow installe, we are
  currently only using docker for the bmv2 and tofino switches

Change-Id: If900b0bdbf9a41b8885c692ccba18a3b1bc580cc
diff --git a/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.py b/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.py
index ed7bf61..85dcfc2 100644
--- a/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.py
+++ b/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.py
@@ -147,7 +147,7 @@
         main.step( "Executing onos-form-cluster" )
         formClusterResult = main.ONOSbench.formCluster( main.Cluster.getIps( True, True ) )
         utilities.assert_equals( expect=main.TRUE,
-                                 actual=result,
+                                 actual=formClusterResult,
                                  onpass="Successfully formed clusters to ONOS",
                                  onfail="Failed to form clusters to ONOS" )
         onosServiceResult = main.testSetUp.checkOnosService( main.Cluster )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params.stratum b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params.stratum
index 9219001..9b1a5c9 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params.stratum
+++ b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params.stratum
@@ -23,8 +23,6 @@
         <topology>trellis_fabric.py</topology>
         <lib>routinglib.py,trellislib.py,stratum.py</lib>
         <conf>bgpdbgp1.conf,bgpdbgp2.conf,bgpdr1.conf,bgpdr2.conf,dhcpd6.conf,dhcpd.conf,zebradbgp1.conf,zebradbgp2.conf</conf>
-        <trellisOar>/home/sdn/segmentrouting-oar-3.0.0-SNAPSHOT.oar</trellisOar>
-        <t3Oar>/home/sdn/t3-app-3.0.0-SNAPSHOT.oar</t3Oar>
     </DEPENDENCY>
 
     <MN_DOCKER>
@@ -33,9 +31,21 @@
         <home>/home/root/</home>
     </MN_DOCKER>
 
+    <CLUSTER>
+        # Params for onos docker
+        <dockerSkipBuild>True</dockerSkipBuild>
+        <dockerBuildCmd>make ONOS_BRANCH=master DOCKER_TAG=TestON-master onos-build trellis-control-build trellis-t3-build tost-build</dockerBuildCmd> # If using another cmd like make
+        <dockerBuildTimeout>1200</dockerBuildTimeout>
+        <dockerFilePath>~/tost-onos</dockerFilePath>
+        <dockerImageTag>tost:TestON-master</dockerImageTag>
+        <dockerOptions>-d --rm --network host -v ~/.ssh/authorized_keys:/root/.ssh/authorized_keys -v /tmp/cluster.json:/root/onos/config/cluster.json </dockerOptions> # We start the container detached, so the docker component can connect to cli instead of logs
+        <atomixImageTag>atomix/atomix:3.1.5</atomixImageTag>
+        <atomixOptions>-d --rm --network host -v ~/.ssh/authorized_keys:/root/.ssh/authorized_keys -v /tmp/atomix.json:/opt/atomix/conf/atomix.json </atomixOptions>
+    </CLUSTER>
+
     <ENV>
         <cellName>productionCell</cellName>
-        <cellApps>drivers,openflow,fpm,netcfghostprovider,drivers.bmv2,pipelines.fabric</cellApps>
+        <cellApps>drivers,fpm,netcfghostprovider,drivers.bmv2,pipelines.fabric,segmentrouting,t3</cellApps>
     </ENV>
 
     <ONOS_Configuration>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo
index 82c7b3a..78792d0 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo
+++ b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo
@@ -8,6 +8,8 @@
             <connect_order>1</connect_order>
             <home></home>   # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
             <COMPONENTS>
+                <useDocker></useDocker>  # Whether to use docker for ONOS nodes
+                <docker_prompt>~/onos#</docker_prompt>
                 <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
                 <diff_clihost></diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
                 <karaf_username></karaf_username>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo.0x1.physical b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo.0x1.physical
index d103eae..d6828b3 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo.0x1.physical
+++ b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo.0x1.physical
@@ -8,12 +8,14 @@
             <connect_order>1</connect_order>
             <home></home>   # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
             <COMPONENTS>
+                <useDocker>True</useDocker>  # Whether to use docker for ONOS nodes
+                <docker_prompt>~/onos#</docker_prompt>
                 <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
-                <diff_clihost></diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+                <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
                 <karaf_username></karaf_username>
                 <karaf_password></karaf_password>
-                <web_user></web_user>
-                <web_pass></web_pass>
+                <web_user>sdn</web_user>
+                <web_pass>rocks</web_pass>
                 <rest_port></rest_port>
                 <prompt></prompt>  # TODO: we technically need a few of these, one per component
                 <onos_home></onos_home>  # defines where onos home is on the target cell machine. Defaults to entry in "home" if empty.
diff --git a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo.docker b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo.docker
new file mode 100644
index 0000000..a6f21d7
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo.docker
@@ -0,0 +1,39 @@
+<TOPOLOGY>
+    <COMPONENT>
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosClusterDriver</type>
+            <connect_order>1</connect_order>
+            <home></home>   # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
+            <COMPONENTS>
+                <useDocker>True</useDocker>  # Whether to use docker for ONOS nodes
+                <docker_prompt>~/onos#</docker_prompt>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+                <karaf_username></karaf_username>
+                <karaf_password></karaf_password>
+                <web_user>sdn</web_user>
+                <web_pass>rocks</web_pass>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home></onos_home>  # defines where onos home is on the target cell machine. Defaults to entry in "home" if empty.
+                <nodes>3</nodes>  # number of nodes in the cluster
+            </COMPONENTS>
+        </ONOScell>
+
+        <Mininet1>
+            <host>OCN</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>MininetCliDriver</type>
+            <connect_order>2</connect_order>
+            <COMPONENTS>
+                <home>~/mininet/</home>
+                <prompt></prompt>
+            </COMPONENTS>
+        </Mininet1>
+
+    </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo.physical b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo.physical
index 905d24b..37e1830 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo.physical
+++ b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo.physical
@@ -8,6 +8,8 @@
             <connect_order>1</connect_order>
             <home></home>   # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
             <COMPONENTS>
+                <useDocker></useDocker>  # Whether to use docker for ONOS nodes
+                <docker_prompt>~/onos#</docker_prompt>
                 <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
                 <diff_clihost></diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
                 <karaf_username></karaf_username>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.topo.docker b/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.topo.docker
new file mode 100644
index 0000000..a6f21d7
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.topo.docker
@@ -0,0 +1,39 @@
+<TOPOLOGY>
+    <COMPONENT>
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosClusterDriver</type>
+            <connect_order>1</connect_order>
+            <home></home>   # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
+            <COMPONENTS>
+                <useDocker>True</useDocker>  # Whether to use docker for ONOS nodes
+                <docker_prompt>~/onos#</docker_prompt>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+                <karaf_username></karaf_username>
+                <karaf_password></karaf_password>
+                <web_user>sdn</web_user>
+                <web_pass>rocks</web_pass>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home></onos_home>  # defines where onos home is on the target cell machine. Defaults to entry in "home" if empty.
+                <nodes>3</nodes>  # number of nodes in the cluster
+            </COMPONENTS>
+        </ONOScell>
+
+        <Mininet1>
+            <host>OCN</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>MininetCliDriver</type>
+            <connect_order>2</connect_order>
+            <COMPONENTS>
+                <home>~/mininet/</home>
+                <prompt></prompt>
+            </COMPONENTS>
+        </Mininet1>
+
+    </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/SRDhcprelay.params.stratum b/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/SRDhcprelay.params.stratum
index 6f12598..e7e1564 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/SRDhcprelay.params.stratum
+++ b/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/SRDhcprelay.params.stratum
@@ -17,12 +17,11 @@
         <useCommonTopo>True</useCommonTopo>
         <useBmv2>True</useBmv2>
         <bmv2SwitchType>stratum</bmv2SwitchType>
+        <switchPrefix>bmv2</switchPrefix>
         <stratumRoot>~/stratum</stratumRoot>
         <topology>trellis_fabric.py</topology>
         <lib>routinglib.py,trellislib.py,stratum.py</lib>
         <conf>dhcpd.conf,dhcpd6.conf,bgpdr1.conf,bgpdr2.conf,bgpdbgp1.conf,zebradbgp1.conf,bgpdbgp2.conf,zebradbgp2.conf</conf>
-        <trellisOar>/home/sdn/segmentrouting-oar-3.0.0-SNAPSHOT.oar</trellisOar>
-        <t3Oar>/home/sdn/t3-app-3.0.0-SNAPSHOT.oar</t3Oar>
     </DEPENDENCY>
 
     <MN_DOCKER>
@@ -31,9 +30,21 @@
         <home>/home/root/</home>
     </MN_DOCKER>
 
+    <CLUSTER>
+        # Params for onos docker
+        <dockerSkipBuild>True</dockerSkipBuild>
+        <dockerBuildCmd>make ONOS_BRANCH=master DOCKER_TAG=TestON-master onos-build trellis-control-build trellis-t3-build tost-build</dockerBuildCmd> # If using another cmd like make
+        <dockerBuildTimeout>1200</dockerBuildTimeout>
+        <dockerFilePath>~/tost-onos</dockerFilePath>
+        <dockerImageTag>tost:TestON-master</dockerImageTag>
+        <dockerOptions>-d --rm --network host -v ~/.ssh/authorized_keys:/root/.ssh/authorized_keys -v /tmp/cluster.json:/root/onos/config/cluster.json </dockerOptions> # We start the container detached, so the docker component can connect to cli instead of logs
+        <atomixImageTag>atomix/atomix:3.1.5</atomixImageTag>
+        <atomixOptions>-d --rm --network host -v ~/.ssh/authorized_keys:/root/.ssh/authorized_keys -v /tmp/atomix.json:/opt/atomix/conf/atomix.json </atomixOptions>
+    </CLUSTER>
+
     <ENV>
         <cellName>productionCell</cellName>
-        <cellApps>drivers,openflow,fpm,dhcprelay,netcfghostprovider,routeradvertisement,drivers.bmv2,pipelines.fabric</cellApps>
+        <cellApps>drivers,fpm,dhcprelay,netcfghostprovider,routeradvertisement,drivers.bmv2,pipelines.fabric</cellApps>
     </ENV>
 
     <GIT>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/SRDhcprelay.topo.docker b/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/SRDhcprelay.topo.docker
new file mode 100644
index 0000000..a6f21d7
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/SRDhcprelay.topo.docker
@@ -0,0 +1,39 @@
+<TOPOLOGY>
+    <COMPONENT>
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosClusterDriver</type>
+            <connect_order>1</connect_order>
+            <home></home>   # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
+            <COMPONENTS>
+                <useDocker>True</useDocker>  # Whether to use docker for ONOS nodes
+                <docker_prompt>~/onos#</docker_prompt>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+                <karaf_username></karaf_username>
+                <karaf_password></karaf_password>
+                <web_user>sdn</web_user>
+                <web_pass>rocks</web_pass>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home></onos_home>  # defines where onos home is on the target cell machine. Defaults to entry in "home" if empty.
+                <nodes>3</nodes>  # number of nodes in the cluster
+            </COMPONENTS>
+        </ONOScell>
+
+        <Mininet1>
+            <host>OCN</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>MininetCliDriver</type>
+            <connect_order>2</connect_order>
+            <COMPONENTS>
+                <home>~/mininet/</home>
+                <prompt></prompt>
+            </COMPONENTS>
+        </Mininet1>
+
+    </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.topo.docker b/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.topo.docker
new file mode 100644
index 0000000..a6f21d7
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.topo.docker
@@ -0,0 +1,39 @@
+<TOPOLOGY>
+    <COMPONENT>
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosClusterDriver</type>
+            <connect_order>1</connect_order>
+            <home></home>   # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
+            <COMPONENTS>
+                <useDocker>True</useDocker>  # Whether to use docker for ONOS nodes
+                <docker_prompt>~/onos#</docker_prompt>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+                <karaf_username></karaf_username>
+                <karaf_password></karaf_password>
+                <web_user>sdn</web_user>
+                <web_pass>rocks</web_pass>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home></onos_home>  # defines where onos home is on the target cell machine. Defaults to entry in "home" if empty.
+                <nodes>3</nodes>  # number of nodes in the cluster
+            </COMPONENTS>
+        </ONOScell>
+
+        <Mininet1>
+            <host>OCN</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>MininetCliDriver</type>
+            <connect_order>2</connect_order>
+            <COMPONENTS>
+                <home>~/mininet/</home>
+                <prompt></prompt>
+            </COMPONENTS>
+        </Mininet1>
+
+    </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDynamicConf/SRDynamicConf.topo.docker b/TestON/tests/USECASE/SegmentRouting/SRDynamicConf/SRDynamicConf.topo.docker
new file mode 100644
index 0000000..a6f21d7
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRDynamicConf/SRDynamicConf.topo.docker
@@ -0,0 +1,39 @@
+<TOPOLOGY>
+    <COMPONENT>
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosClusterDriver</type>
+            <connect_order>1</connect_order>
+            <home></home>   # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
+            <COMPONENTS>
+                <useDocker>True</useDocker>  # Whether to use docker for ONOS nodes
+                <docker_prompt>~/onos#</docker_prompt>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+                <karaf_username></karaf_username>
+                <karaf_password></karaf_password>
+                <web_user>sdn</web_user>
+                <web_pass>rocks</web_pass>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home></onos_home>  # defines where onos home is on the target cell machine. Defaults to entry in "home" if empty.
+                <nodes>3</nodes>  # number of nodes in the cluster
+            </COMPONENTS>
+        </ONOScell>
+
+        <Mininet1>
+            <host>OCN</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>MininetCliDriver</type>
+            <connect_order>2</connect_order>
+            <COMPONENTS>
+                <home>~/mininet/</home>
+                <prompt></prompt>
+            </COMPONENTS>
+        </Mininet1>
+
+    </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.topo.docker b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.topo.docker
new file mode 100644
index 0000000..a6f21d7
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.topo.docker
@@ -0,0 +1,39 @@
+<TOPOLOGY>
+    <COMPONENT>
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosClusterDriver</type>
+            <connect_order>1</connect_order>
+            <home></home>   # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
+            <COMPONENTS>
+                <useDocker>True</useDocker>  # Whether to use docker for ONOS nodes
+                <docker_prompt>~/onos#</docker_prompt>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+                <karaf_username></karaf_username>
+                <karaf_password></karaf_password>
+                <web_user>sdn</web_user>
+                <web_pass>rocks</web_pass>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home></onos_home>  # defines where onos home is on the target cell machine. Defaults to entry in "home" if empty.
+                <nodes>3</nodes>  # number of nodes in the cluster
+            </COMPONENTS>
+        </ONOScell>
+
+        <Mininet1>
+            <host>OCN</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>MininetCliDriver</type>
+            <connect_order>2</connect_order>
+            <COMPONENTS>
+                <home>~/mininet/</home>
+                <prompt></prompt>
+            </COMPONENTS>
+        </Mininet1>
+
+    </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.topo.docker b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.topo.docker
new file mode 100644
index 0000000..a6f21d7
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.topo.docker
@@ -0,0 +1,39 @@
+<TOPOLOGY>
+    <COMPONENT>
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosClusterDriver</type>
+            <connect_order>1</connect_order>
+            <home></home>   # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
+            <COMPONENTS>
+                <useDocker>True</useDocker>  # Whether to use docker for ONOS nodes
+                <docker_prompt>~/onos#</docker_prompt>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+                <karaf_username></karaf_username>
+                <karaf_password></karaf_password>
+                <web_user>sdn</web_user>
+                <web_pass>rocks</web_pass>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home></onos_home>  # defines where onos home is on the target cell machine. Defaults to entry in "home" if empty.
+                <nodes>3</nodes>  # number of nodes in the cluster
+            </COMPONENTS>
+        </ONOScell>
+
+        <Mininet1>
+            <host>OCN</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>MininetCliDriver</type>
+            <connect_order>2</connect_order>
+            <COMPONENTS>
+                <home>~/mininet/</home>
+                <prompt></prompt>
+            </COMPONENTS>
+        </Mininet1>
+
+    </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/SRMulticast.topo.docker b/TestON/tests/USECASE/SegmentRouting/SRMulticast/SRMulticast.topo.docker
new file mode 100644
index 0000000..a6f21d7
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/SRMulticast.topo.docker
@@ -0,0 +1,39 @@
+<TOPOLOGY>
+    <COMPONENT>
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosClusterDriver</type>
+            <connect_order>1</connect_order>
+            <home></home>   # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
+            <COMPONENTS>
+                <useDocker>True</useDocker>  # Whether to use docker for ONOS nodes
+                <docker_prompt>~/onos#</docker_prompt>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+                <karaf_username></karaf_username>
+                <karaf_password></karaf_password>
+                <web_user>sdn</web_user>
+                <web_pass>rocks</web_pass>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home></onos_home>  # defines where onos home is on the target cell machine. Defaults to entry in "home" if empty.
+                <nodes>3</nodes>  # number of nodes in the cluster
+            </COMPONENTS>
+        </ONOScell>
+
+        <Mininet1>
+            <host>OCN</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>MininetCliDriver</type>
+            <connect_order>2</connect_order>
+            <COMPONENTS>
+                <home>~/mininet/</home>
+                <prompt></prompt>
+            </COMPONENTS>
+        </Mininet1>
+
+    </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.topo.docker b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.topo.docker
new file mode 100644
index 0000000..a6f21d7
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.topo.docker
@@ -0,0 +1,39 @@
+<TOPOLOGY>
+    <COMPONENT>
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosClusterDriver</type>
+            <connect_order>1</connect_order>
+            <home></home>   # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
+            <COMPONENTS>
+                <useDocker>True</useDocker>  # Whether to use docker for ONOS nodes
+                <docker_prompt>~/onos#</docker_prompt>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+                <karaf_username></karaf_username>
+                <karaf_password></karaf_password>
+                <web_user>sdn</web_user>
+                <web_pass>rocks</web_pass>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home></onos_home>  # defines where onos home is on the target cell machine. Defaults to entry in "home" if empty.
+                <nodes>3</nodes>  # number of nodes in the cluster
+            </COMPONENTS>
+        </ONOScell>
+
+        <Mininet1>
+            <host>OCN</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>MininetCliDriver</type>
+            <connect_order>2</connect_order>
+            <COMPONENTS>
+                <home>~/mininet/</home>
+                <prompt></prompt>
+            </COMPONENTS>
+        </Mininet1>
+
+    </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.params.stratum b/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.params.stratum
index 38edc6f..835baff 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.params.stratum
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.params.stratum
@@ -17,12 +17,11 @@
         <useCommonTopo>True</useCommonTopo>
         <useBmv2>True</useBmv2>
         <bmv2SwitchType>stratum</bmv2SwitchType>
+        <switchPrefix>bmv2</switchPrefix>
         <stratumRoot>~/stratum</stratumRoot>
         <topology>hagg_fabric.py</topology>
         <lib>routinglib.py,trellislib.py,trellis_fabric.py,stratum.py</lib>
         <conf>bgpdbgp1.conf,bgpdbgp2.conf,bgpdr1.conf,bgpdr2.conf,dhcpd6.conf,dhcpd.conf,zebradbgp1.conf,zebradbgp2.conf</conf>
-        <trellisOar>/home/sdn/segmentrouting-oar-3.0.0-SNAPSHOT.oar</trellisOar>
-        <t3Oar>/home/sdn/t3-app-3.0.0-SNAPSHOT.oar</t3Oar>
     </DEPENDENCY>
 
     <MN_DOCKER>
@@ -31,9 +30,21 @@
         <home>/home/root/</home>
     </MN_DOCKER>
 
+    <CLUSTER>
+        # Params for onos docker
+        <dockerSkipBuild>True</dockerSkipBuild>
+        <dockerBuildCmd>make ONOS_BRANCH=master DOCKER_TAG=TestON-master onos-build trellis-control-build trellis-t3-build tost-build</dockerBuildCmd> # If using another cmd like make
+        <dockerBuildTimeout>1200</dockerBuildTimeout>
+        <dockerFilePath>~/tost-onos</dockerFilePath>
+        <dockerImageTag>tost:TestON-master</dockerImageTag>
+        <dockerOptions>-d --rm --network host -v ~/.ssh/authorized_keys:/root/.ssh/authorized_keys -v /tmp/cluster.json:/root/onos/config/cluster.json </dockerOptions> # We start the container detached, so the docker component can connect to cli instead of logs
+        <atomixImageTag>atomix/atomix:3.1.5</atomixImageTag>
+        <atomixOptions>-d --rm --network host -v ~/.ssh/authorized_keys:/root/.ssh/authorized_keys -v /tmp/atomix.json:/opt/atomix/conf/atomix.json </atomixOptions>
+    </CLUSTER>
+
     <ENV>
         <cellName>productionCell</cellName>
-        <cellApps>drivers,openflow,fpm,dhcprelay,netcfghostprovider,routeradvertisement,hostprobingprovider,drivers.bmv2,pipelines.fabric</cellApps>
+        <cellApps>drivers,fpm,dhcprelay,hostprovider,netcfghostprovider,lldpprovider,routeradvertisement,hostprobingprovider,drivers.bmv2,pipelines.fabric,segmentrouting,t3</cellApps>
     </ENV>
 
     <GIT>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.topo.docker b/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.topo.docker
new file mode 100644
index 0000000..a6f21d7
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.topo.docker
@@ -0,0 +1,39 @@
+<TOPOLOGY>
+    <COMPONENT>
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosClusterDriver</type>
+            <connect_order>1</connect_order>
+            <home></home>   # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
+            <COMPONENTS>
+                <useDocker>True</useDocker>  # Whether to use docker for ONOS nodes
+                <docker_prompt>~/onos#</docker_prompt>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+                <karaf_username></karaf_username>
+                <karaf_password></karaf_password>
+                <web_user>sdn</web_user>
+                <web_pass>rocks</web_pass>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home></onos_home>  # defines where onos home is on the target cell machine. Defaults to entry in "home" if empty.
+                <nodes>3</nodes>  # number of nodes in the cluster
+            </COMPONENTS>
+        </ONOScell>
+
+        <Mininet1>
+            <host>OCN</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>MininetCliDriver</type>
+            <connect_order>2</connect_order>
+            <COMPONENTS>
+                <home>~/mininet/</home>
+                <prompt></prompt>
+            </COMPONENTS>
+        </Mininet1>
+
+    </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.topo.docker b/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.topo.docker
new file mode 100644
index 0000000..a6f21d7
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.topo.docker
@@ -0,0 +1,39 @@
+<TOPOLOGY>
+    <COMPONENT>
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosClusterDriver</type>
+            <connect_order>1</connect_order>
+            <home></home>   # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
+            <COMPONENTS>
+                <useDocker>True</useDocker>  # Whether to use docker for ONOS nodes
+                <docker_prompt>~/onos#</docker_prompt>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+                <karaf_username></karaf_username>
+                <karaf_password></karaf_password>
+                <web_user>sdn</web_user>
+                <web_pass>rocks</web_pass>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home></onos_home>  # defines where onos home is on the target cell machine. Defaults to entry in "home" if empty.
+                <nodes>3</nodes>  # number of nodes in the cluster
+            </COMPONENTS>
+        </ONOScell>
+
+        <Mininet1>
+            <host>OCN</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>MininetCliDriver</type>
+            <connect_order>2</connect_order>
+            <COMPONENTS>
+                <home>~/mininet/</home>
+                <prompt></prompt>
+            </COMPONENTS>
+        </Mininet1>
+
+    </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.topo.docker b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.topo.docker
new file mode 100644
index 0000000..a6f21d7
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.topo.docker
@@ -0,0 +1,39 @@
+<TOPOLOGY>
+    <COMPONENT>
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosClusterDriver</type>
+            <connect_order>1</connect_order>
+            <home></home>   # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
+            <COMPONENTS>
+                <useDocker>True</useDocker>  # Whether to use docker for ONOS nodes
+                <docker_prompt>~/onos#</docker_prompt>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+                <karaf_username></karaf_username>
+                <karaf_password></karaf_password>
+                <web_user>sdn</web_user>
+                <web_pass>rocks</web_pass>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home></onos_home>  # defines where onos home is on the target cell machine. Defaults to entry in "home" if empty.
+                <nodes>3</nodes>  # number of nodes in the cluster
+            </COMPONENTS>
+        </ONOScell>
+
+        <Mininet1>
+            <host>OCN</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>MininetCliDriver</type>
+            <connect_order>2</connect_order>
+            <COMPONENTS>
+                <home>~/mininet/</home>
+                <prompt></prompt>
+            </COMPONENTS>
+        </Mininet1>
+
+    </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/Dockerfile b/TestON/tests/USECASE/SegmentRouting/dependencies/Dockerfile
index 4b714c5..bed2f54 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/Dockerfile
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/Dockerfile
@@ -5,7 +5,7 @@
 RUN ln -s /root/* .
 RUN chmod 777 $HOME
 
-RUN install_packages python-pip openvswitch-switch vim quagga isc-dhcp-server isc-dhcp-client iptables vlan
+RUN install_packages python-pip openvswitch-switch vim quagga isc-dhcp-server isc-dhcp-client iptables vlan vzctl
 RUN pip install ipaddress
 
 RUN ln -s $HOME /var/run/quagga
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py b/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
index d4a7b7e..8fa0b96 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
@@ -86,7 +86,7 @@
             main.stratumRoot = main.params[ 'DEPENDENCY'][ 'stratumRoot'] if 'stratumRoot' in main.params[ 'DEPENDENCY' ] else None
             main.scale = ( main.params[ 'SCALE' ][ 'size' ] ).split( "," )
             main.maxNodes = int( main.params[ 'SCALE' ][ 'max' ] )
-            main.trellisOar = main.params[ 'DEPENDENCY' ][ 'trellisOar' ]
+            main.trellisOar = main.params[ 'DEPENDENCY' ][ 'trellisOar' ] if 'trellisOar' in main.params[ 'DEPENDENCY' ] else None
             main.t3Oar = main.params[ 'DEPENDENCY' ][ 't3Oar' ] if 't3Oar' in main.params[ 'DEPENDENCY' ] else None
 
             stepResult = main.testSetUp.envSetup( False )
@@ -141,7 +141,9 @@
             main.cleanAndExit()
 
         # Install segmentrouting and t3 app
-        appInstallResult = main.ONOSbench.onosAppInstall( main.Cluster.runningNodes[0].ipAddress, main.trellisOar)
+        appInstallResult = main.TRUE
+        if main.trellisOar:
+            appInstallResult = appInstallResult and main.ONOSbench.onosAppInstall( main.Cluster.runningNodes[0].ipAddress, main.trellisOar)
         if main.t3Oar:
             appInstallResult = appInstallResult and main.ONOSbench.onosAppInstall( main.Cluster.runningNodes[0].ipAddress, main.t3Oar)
         utilities.assert_equals( expect=main.TRUE, actual=appInstallResult,
@@ -235,18 +237,6 @@
             if 'MN_DOCKER' in main.params and main.params['MN_DOCKER']['args']:
                 destDir = "/tmp/mn_conf/"
                 # Try to ensure the destination exists
-                main.log.info( "Create folder for network config files" )
-                handle = main.Mininet1.handle
-                handle.sendline( "mkdir -p %s" % destDir )
-                handle.expect( [ main.Mininet1.prompt, main.Mininet1.dockerPrompt ] )
-                main.log.debug( handle.before + handle.after )
-                # Make sure permissions are correct
-                handle.sendline( "sudo chown %s:%s %s" % ( main.Mininet1.user_name, main.Mininet1.user_name, destDir ) )
-                handle.expect( [ main.Mininet1.prompt, main.Mininet1.dockerPrompt ] )
-                main.log.debug( handle.before + handle.after )
-                handle.sendline( "sudo chmod -R a+rwx %s" % ( destDir ) )
-                handle.expect( [ main.Mininet1.prompt, main.Mininet1.dockerPrompt ] )
-                main.log.debug( handle.before + handle.after )
             for conf in main.topologyConf.split(","):
                 # Update zebra configurations with correct ONOS instance IP
                 if conf in [ "zebradbgp1.conf", "zebradbgp2.conf" ]:
@@ -272,7 +262,7 @@
                                  onfail="Failed to copy topo files" )
         if main.stratumRoot:
             main.Mininet1.handle.sendline( "export STRATUM_ROOT=" + str( main.stratumRoot ) )
-            main.Mininet1.handle.expect( main.Mininet1.prompt )
+            main.Mininet1.handle.expect( main.Mininet1.Prompt() )
         main.step( "Starting Mininet Topology" )
         arg = "--onos-ip=%s %s" % (",".join([ctrl.ipAddress for ctrl in main.Cluster.runningNodes]), args)
         main.topology = topology
@@ -287,9 +277,11 @@
         if not topoResult:
             main.cleanAndExit()
         if main.useBmv2:
+            main.step( "Configure switches in ONOS" )
             # Upload the net-cfg file created for each switch
             filename = "onos-netcfg.json"
             switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
+            switchNetCfg = main.TRUE
             for switch in main.Mininet1.getSwitches( switchRegex=r"(StratumBmv2Switch)|(Bmv2Switch)" ).keys():
                 path = "/tmp/mn-stratum/%s/" % switch
                 dstPath = "/tmp/"
@@ -299,6 +291,7 @@
                                      "%s%s" % ( dstPath, dstFileName ),
                                      "from" )
                 main.ONOSbench1.handle.sendline( "sudo sed -i 's/localhost/%s/g' %s%s" % ( main.Mininet1.ip_address, dstPath, dstFileName ) )
+                main.ONOSbench1.handle.expect( main.ONOSbench1.prompt )
                 # Configure managementAddress
                 main.ONOSbench1.handle.sendline( "sudo sed -i 's/localhost/%s/g' %s%s" % ( main.Mininet1.ip_address, dstPath, dstFileName ) )
                 main.ONOSbench1.handle.expect( main.ONOSbench1.prompt )
@@ -311,7 +304,19 @@
                 main.ONOSbench1.handle.sendline( "sudo sed -i '/\"basic\"/a\        \"name\": \"%s\",' %s%s" % ( switch, dstPath, dstFileName ) )
                 main.ONOSbench1.handle.expect( main.ONOSbench1.prompt )
                 main.log.debug( main.ONOSbench1.handle.before + main.ONOSbench1.handle.after )
-                main.ONOSbench1.onosNetCfg( main.ONOSserver1.ip_address, dstPath, dstFileName )
+                node = main.Cluster.active(0)
+                switchNetCfg = switchNetCfg and node.onosNetCfg( node.server.ip_address,
+                                                                 dstPath,
+                                                                 dstFileName,
+                                                                 user=node.REST.user_name,
+                                                                 password=node.REST.pwd )
+            # Stop test if we fail to push switch netcfg
+            utilities.assert_equals( expect=main.TRUE,
+                                     actual=switchNetCfg,
+                                     onpass="Successfully pushed switch netcfg",
+                                     onfail="Failed to configure switches in onos" )
+            if not switchNetCfg:
+                main.cleanAndExit()
         # Make sure hosts make some noise
         Testcaselib.discoverHosts( main )
 
@@ -1594,56 +1599,36 @@
 
             main.log.info( "Creating Mininet Docker" )
             handle = main.Mininet1.handle
-            main.Mininet1.dockerPrompt = '#'
             # build docker image
-            buildOutput = ""
-            try:
-                handle.sendline( " docker build -t trellis_mininet %s/../dependencies/" % main.testDir )
-                handle.expect( "Successfully built", timeout=600 )
-                buildOutput = handle.before + str( handle.after )
-                handle.expect( main.Mininet1.prompt )
-                buildOutput += handle.before
-                main.log.debug( buildOutput )
-            except pexpect.TIMEOUT as e:
-                main.log.error( e )
-                buildOutput += handle.before
-                main.log.debug( buildOutput )
+            dockerFilePath = "%s/../dependencies/" % main.testDir
+            dockerName = "trellis_mininet"
+            # TODO: assert on these docker calls
+            main.Mininet1.dockerBuild( dockerFilePath, dockerName )
 
             confDir = "/tmp/mn_conf/"
             # Try to ensure the destination exists
             main.log.info( "Create folder for network config files" )
             handle.sendline( "mkdir -p %s" % confDir )
-            handle.expect( main.Mininet1.prompt )
+            handle.expect( main.Mininet1.Prompt() )
             main.log.debug( handle.before + handle.after )
             # Make sure permissions are correct
             handle.sendline( "sudo chown %s:%s %s" % ( main.Mininet1.user_name, main.Mininet1.user_name, confDir ) )
-            handle.expect( main.Mininet1.prompt )
+            handle.expect( main.Mininet1.Prompt() )
             handle.sendline( "sudo chmod -R a+rwx %s" % ( confDir ) )
-            handle.expect( main.Mininet1.prompt )
+            handle.expect( main.Mininet1.Prompt() )
             main.log.debug( handle.before + handle.after )
             # Stop any leftover container
-            handle.sendline( "docker stop trellis_mininet" )
-            handle.expect( main.Mininet1.bashPrompt )
-            main.log.debug( handle.before )
+            main.Mininet1.dockerStop( dockerName )
             # Start docker container
-            handle.sendline( "docker run --name trellis_mininet %s %s" % ( main.params[ 'MN_DOCKER' ][ 'args' ], main.params[ 'MN_DOCKER' ][ 'name' ] ) )
-            i = handle.expect( [ main.Mininet1.bashPrompt, "Error response from daemon: Conflict. The container name" ] )
-            output = handle.before + handle.after
-            main.log.debug( repr(output) )
-            if i == 1:
+            runResponse = main.Mininet1.dockerRun( main.params[ 'MN_DOCKER' ][ 'name' ],
+                                                   dockerName,
+                                                   main.params[ 'MN_DOCKER' ][ 'args' ] )
+            if runResponse == main.FALSE:
                 main.log.error( "Docker container already running, aborting test" )
                 main.cleanup()
                 main.exit()
 
-            handle.sendline( "docker attach trellis_mininet" )
-            handle.expect( main.Mininet1.dockerPrompt )
-            main.log.debug( handle.before + handle.after )
-            handle.sendline( "sysctl -w net.ipv4.ip_forward=0" )
-            handle.sendline( "sysctl -w net.ipv4.conf.all.forwarding=0" )
-            handle.expect( main.Mininet1.dockerPrompt )
-            main.log.debug( handle.before + handle.after )
-            # We should be good to go
-            main.Mininet1.prompt = main.Mininet1.dockerPrompt
+            main.Mininet1.dockerAttach( dockerName, dockerPrompt='~#' )
             main.Mininet1.sudoRequired = False
 
             # Fow when we create component handles
@@ -1659,19 +1644,12 @@
 
         if hasattr( main, 'Mininet1' ):
             if 'MN_DOCKER' in main.params and main.params['MN_DOCKER']['args']:
-                main.log.info( "Deleting Mininet Docker" )
+                main.log.info( "Exiting from Mininet Docker" )
 
                 # Detach from container
                 handle = main.Mininet1.handle
                 try:
-                    handle.sendline( "exit" )  # ctrl-p ctrk-q  to detach from container
-                    main.log.debug( "sleeping %i seconds" % ( 5 ) )
-                    time.sleep(5)
-                    handle.expect( main.Mininet1.dockerPrompt )
-                    main.log.debug( handle.before + handle.after )
-                    main.Mininet1.prompt = main.Mininet1.bashPrompt
-                    handle.expect( main.Mininet1.prompt )
-                    main.log.debug( handle.before + handle.after )
+                    main.Mininet1.dockerDisconnect()
                     main.Mininet1.sudoRequired = True
                 except Exception as e:
                     main.log.error( e )
diff --git a/TestON/tests/dependencies/Cluster.py b/TestON/tests/dependencies/Cluster.py
index 9ed9256..ae08de9 100644
--- a/TestON/tests/dependencies/Cluster.py
+++ b/TestON/tests/dependencies/Cluster.py
@@ -37,7 +37,7 @@
             atomixNodes.append( "{%s:%s}" % ( node.name, node.ipAddress ) )
         return "%s[%s; Atomix Nodes:%s]" % ( self.name, ", ".join( controllers ), ", ".join( atomixNodes ) )
 
-    def __init__( self, ctrlList=[], name="Cluster" ):
+    def __init__( self, ctrlList=[], name="Cluster", useDocker=False ):
         """
             controllers : All the nodes
             runningNodes : Node that are specifically running from the test.
@@ -53,6 +53,16 @@
         self.name = str( name )
         self.atomixNodes = ctrlList
         self.iterator = iter( self.active() )
+        self.useDocker = useDocker
+        clusterParams = main.params.get( "CLUSTER", {} )
+        self.dockerSkipBuild = clusterParams.get( "dockerSkipBuild", False )
+        self.dockerBuildCmd = clusterParams.get( "dockerBuildCmd", None )
+        self.dockerBuildTimeout = int( clusterParams.get( "dockerBuildTimeout", 600 ) )
+        self.dockerFilePath = clusterParams.get( "dockerFilePath", None )
+        self.dockerImageTag = clusterParams.get( "dockerImageTag", None )
+        self.dockerOptions = clusterParams.get( "dockerOptions", "" )
+        self.atomixImageTag = clusterParams.get( "atomixImageTag", None )
+        self.atomixOptions = clusterParams.get( "atomixOptions", "" )
 
     def fromNode( self, ctrlList ):
         """
@@ -389,6 +399,88 @@
             ctrlList[ i ].active = False
         return result
 
+    def dockerStop( self, killMax, atomix=True ):
+        """
+        Description:
+            killing the onos docker containers. It will either kill the
+            current runningnodes or max number of the nodes.
+        Required:
+            * killRemoveMax - The boolean that will decide either to kill
+            only running nodes ( False ) or max number of nodes ( True ).
+        Returns:
+            Returns main.TRUE if successfully killing it.
+        """
+        getFrom = "all" if killMax else "running"
+        result = main.TRUE
+        stopResult = self.command( "dockerStop",
+                                   args=[ "name" ],
+                                   specificDriver=4,
+                                   getFrom=getFrom,
+                                   funcFromCtrl=True )
+        ctrlList = self.fromNode( getFrom )
+        for i in range( len( stopResult ) ):
+            result = result and stopResult[ i ]
+            ctrlList[ i ].active = False
+        atomixResult = main.TRUE
+        if atomix:
+            atomixResult = self.stopAtomixDocker( killMax )
+        return result and atomixResult
+
+    def dockerBuild( self, pull=True ):
+        """
+        Description:
+        Build ONOS docker image
+        Optional:
+            * pull - Try to pull latest image before building
+        Returns:
+            Returns main.TRUE if successfully killing it.
+        """
+        getFrom = "all"
+        result = main.TRUE
+        atomixResult = []
+        buildResult = []
+        if self.atomixImageTag:
+            atomixResult = self.command( "dockerPull",
+                                         args=[ self.atomixImageTag ],
+                                         specificDriver=4,
+                                         getFrom=getFrom,
+                                         funcFromCtrl=False )
+        if not self.dockerImageTag:
+            main.log.error( "No image given, exiting test" )
+            return main.FALSE
+        if pull and self.dockerImageTag:
+            buildResult = self.command( "dockerPull",
+                                        args=[ self.dockerImageTag ],
+                                        specificDriver=4,
+                                        getFrom=getFrom,
+                                        funcFromCtrl=False )
+            for i in range( len( buildResult ) ):
+                result = result and buildResult[ i ]
+        if self.dockerSkipBuild:
+            return main.TRUE
+        if not result and self.dockerBuildCmd:
+            buildResult = self.command( "makeDocker",
+                                        args=[ self.dockerFilePath, self.dockerBuildCmd ],
+                                        kwargs={ "timeout": self.dockerBuildTimeout,
+                                                 "prompt": "Successfully tagged %s" % self.dockerImageTag },
+                                        specificDriver=4,
+                                        getFrom=getFrom,
+                                        funcFromCtrl=False )
+
+        elif not result:
+            buildResult = self.command( "dockerBuild",
+                                        args=[ self.dockerFilePath, self.dockerImageTag ],
+                                        kwargs={ "timeout": self.dockerBuildTimeout,
+                                                 "pull": pull },
+                                        specificDriver=4,
+                                        getFrom=getFrom,
+                                        funcFromCtrl=False )
+        for i in range( len( atomixResult ) ):
+            result = result and atomixResult[ i ]
+        for i in range( len( buildResult ) ):
+            result = result and buildResult[ i ]
+        return result
+
     def ssh( self ):
         """
         Description:
@@ -399,9 +491,16 @@
             the onos.
         """
         result = main.TRUE
+        if self.useDocker:
+            driver = 2
+            kwargs = { "userName": "karafUser",
+                       "userPWD": "karafPass" }
+        else:
+            driver = 1
+            kwargs = { "node": "ipAddress" }
         sshResult = self.command( "onosSecureSSH",
-                                   kwargs={ "node": "ipAddress" },
-                                   specificDriver=1,
+                                   kwargs=kwargs,
+                                   specificDriver=driver,
                                    getFrom="running",
                                    funcFromCtrl=True )
         for sshR in sshResult:
@@ -417,6 +516,9 @@
             Returns main.TRUE if it successfully installed
         """
         result = main.TRUE
+        if self.useDocker:
+            # We will do this as part of startDocker
+            return result
         threads = []
         i = 0
         for ctrl in self.atomixNodes:
@@ -472,6 +574,124 @@
                 result = result and t.result
         return result
 
+    def startONOSDocker( self, installMax=True, installParallel=True ):
+        """
+        Description:
+            Installing onos via docker containers.
+        Required:
+            * installMax - True for installing max number of nodes
+            False for installing current running nodes only.
+        Returns:
+            Returns main.TRUE if it successfully installed
+        """
+        result = main.TRUE
+        threads = []
+        for ctrl in self.controllers if installMax else self.runningNodes:
+            if installParallel:
+                t = main.Thread( target=ctrl.server.dockerRun,
+                                 name="onos-run-docker-" + ctrl.name,
+                                 args=[ self.dockerImageTag, ctrl.name ],
+                                 kwargs={ "options" : self.dockerOptions } )
+                threads.append( t )
+                t.start()
+            else:
+                result = result and \
+                            ctrl.server.dockerRun( self.dockerImageTag,
+                                                   ctrl.name,
+                                                   options=self.dockerOptions )
+        if installParallel:
+            for t in threads:
+                t.join()
+                result = result and t.result
+        return result
+
+    def startAtomixDocker( self, installParallel=True ):
+        """
+        Description:
+            Installing atomix via docker containers.
+        Required:
+            * installParallel - True for installing atomix in parallel.
+        Returns:
+            Returns main.TRUE if it successfully installed
+        """
+        result = main.TRUE
+        threads = []
+        for ctrl in self.atomixNodes:
+            if installParallel:
+                t = main.Thread( target=ctrl.server.dockerRun,
+                                 name="atomix-run-docker-" + ctrl.name,
+                                 args=[ self.atomixImageTag, "atomix-" + ctrl.name ],
+                                 kwargs={ "options" : main.params['CLUSTER']['atomixOptions'],
+                                          "imageArgs": " --config /opt/atomix/conf/atomix.json --ignore-resources"} )
+                threads.append( t )
+                t.start()
+            else:
+                result = result and \
+                            ctrl.server.dockerRun( self.atomixImageTag,
+                                                   "atomix-" + ctrl.name,
+                                                   options=main.params['CLUSTER']['atomixOptions'] )
+        if installParallel:
+            for t in threads:
+                t.join()
+                result = result and t.result
+        return result
+
+    def stopAtomixDocker( self, killMax=True, installParallel=True ):
+        """
+        Description:
+            Stoping all atomix containers
+        Required:
+            * killMax - True for stoping max number of nodes
+            False for stoping current running nodes only.
+        Returns:
+            Returns main.TRUE if it successfully stoped
+        """
+        result = main.TRUE
+        threads = []
+        for ctrl in self.controllers if killMax else self.atomixNodes:
+            if installParallel:
+                t = main.Thread( target=ctrl.server.dockerStop,
+                                 name="atomix-stop-docker-" + ctrl.name,
+                                 args=[ "atomix-" + ctrl.name ] )
+                threads.append( t )
+                t.start()
+            else:
+                result = result and \
+                            ctrl.server.dockerStop( "atomix-" + ctrl.name )
+        if installParallel:
+            for t in threads:
+                t.join()
+                result = result and t.result
+        return result
+
+    def genPartitions( self, path="/tmp/cluster.json" ):
+        """
+        Description:
+           Create cluster config and move to each onos server
+        Required:
+            * installMax - True for installing max number of nodes
+            False for installing current running nodes only.
+        Returns:
+            Returns main.TRUE if it successfully installed
+        """
+        result = main.TRUE
+        # move files to onos servers
+        for ctrl in self.atomixNodes:
+            localAtomixFile = ctrl.ip_address + "-atomix.json"
+            result = result and main.ONOSbench.generateAtomixConfig( ctrl.server.ip_address, path=localAtomixFile )
+            result = result and main.ONOSbench.scp( ctrl.server,
+                                                    localAtomixFile,
+                                                    "/tmp/atomix.json",
+                                                    direction="to" )
+        for ctrl in self.controllers:
+            localOnosFile = ctrl.ip_address + "-cluster.json"
+            result = result and main.ONOSbench.generateOnosConfig( ctrl.server.ip_address, path=localOnosFile )
+            result = result and main.ONOSbench.scp( ctrl.server,
+                                                    localOnosFile,
+                                                    path,
+                                                    direction="to" )
+        return result
+
     def startCLIs( self ):
         """
         Description:
@@ -522,6 +742,16 @@
                 main.log.warn( repr( i ) )
                 currentResult = False
             results = results and currentResult
+        # Check to make sure all bundles are started
+        bundleOutput = self.command( "sendline", args=[ "bundle:list" ] )
+        for i in bundleOutput:
+            if "START LEVEL 100" in i:
+                currentResult = True
+            else:
+                currentResult = False
+                main.log.warn( "Node's bundles not fully started" )
+                main.log.debug( i )
+            results = results and currentResult
         return results
 
     def appsCheck( self, apps ):
@@ -548,6 +778,50 @@
                     main.log.warn( "{}: {} is in {} state".format( ctrl.name, app, states[ i ] ) )
         return results
 
+    def attachToONOSDocker( self ):
+        """
+        Description:
+            connect to onos docker using onosCli driver
+        Required:
+        Returns:
+            Returns main.TRUE if it successfully started.
+        """
+        getFrom = "running"
+        result = main.TRUE
+        execResults = self.command( "dockerExec",
+                                    args=[ "name" ],
+                                    kwargs={ "dockerPrompt": "dockerPrompt" },
+                                    specificDriver=2,
+                                    getFrom=getFrom,
+                                    funcFromCtrl=True )
+        ctrlList = self.fromNode( getFrom )
+        for i in range( len( execResults ) ):
+            result = result and execResults[ i ]
+            ctrlList[ i ].active = True
+        return result
+
+    def prepareForCLI( self ):
+        """
+        Description:
+            prepare docker to connect to the onos cli
+        Required:
+        Returns:
+            Returns main.TRUE if it successfully started.
+        """
+        getFrom = "running"
+        for ctrl in self.getRunningNodes():
+            ctrl.CLI.inDocker = True
+        result = main.TRUE
+        execResults = self.command( "prepareForCLI",
+                                    specificDriver=2,
+                                    getFrom=getFrom,
+                                    funcFromCtrl=True )
+        ctrlList = self.fromNode( getFrom )
+        for i in range( len( execResults ) ):
+            result = result and execResults[ i ]
+            ctrlList[ i ].active = True
+        return result
+
     def printResult( self, results, activeList, logLevel="debug" ):
         """
         Description:
@@ -623,6 +897,7 @@
                 1 - from bench
                 2 - from cli
                 3 - from rest
+                4 - from server
             * contentCheck - If this is True, it will check if the result has some
             contents.
             * getFrom - from which nodes
@@ -637,24 +912,34 @@
             Returns resultContent of the result if contentCheck
         """
         threads = []
-        drivers = [ None, "Bench", "CLI", "REST" ]
+        drivers = [ None, "Bench", "CLI", "REST", "server" ]
         results = []
         for ctrl in self.fromNode( getFrom ):
+            funcArgs = []
+            funcKwargs = {}
             try:
-                funcArgs = []
-                funcKwargs = {}
                 f = getattr( ( ctrl if not specificDriver else
                                getattr( ctrl, drivers[ specificDriver ] ) ), function )
-                if funcFromCtrl:
-                    if args:
-                        for i in range( len( args ) ):
-                            funcArgs.append( getattr( ctrl, args[ i ] ) )
-                    if kwargs:
-                        for k in kwargs:
-                            funcKwargs.update( { k: getattr( ctrl, kwargs[ k ] ) } )
             except AttributeError:
                 main.log.error( "Function " + function + " not found. Exiting the Test." )
                 main.cleanAndExit()
+            if funcFromCtrl:
+                if args:
+                    try:
+                        for i in range( len( args ) ):
+                            funcArgs.append( getattr( ctrl, args[ i ] ) )
+                    except AttributeError:
+                        main.log.error( "Argument " + str( args[ i ] ) + " for " + str( f ) + " not found. Exiting the Test." )
+                        main.cleanAndExit()
+                if kwargs:
+                    try:
+                        for k in kwargs:
+                            funcKwargs.update( { k: getattr( ctrl, kwargs[ k ] ) } )
+                    except AttributeError as e:
+                        main.log.exception("")
+                        main.log.error( "Keyword Argument " + str( k ) + " for " + str( f ) + " not found. Exiting the Test." )
+                        main.log.debug( "Passed kwargs: %s; dir(ctrl): %s" % ( repr( kwargs ), dir( ctrl ) ) )
+                        main.cleanAndExit()
             t = main.Thread( target=f,
                              name=function + "-" + ctrl.name,
                              args=funcArgs if funcFromCtrl else args,
diff --git a/TestON/tests/dependencies/ONOSSetup.py b/TestON/tests/dependencies/ONOSSetup.py
index fabfe9a..020af9d 100644
--- a/TestON/tests/dependencies/ONOSSetup.py
+++ b/TestON/tests/dependencies/ONOSSetup.py
@@ -44,7 +44,7 @@
         try:
             main.Cluster
         except ( NameError, AttributeError ):
-            main.Cluster = Cluster( main.ONOScell.nodes )
+            main.Cluster = Cluster( main.ONOScell.nodes, useDocker=main.ONOScell.useDocker )
         main.ONOSbench = main.Cluster.controllers[ 0 ].Bench
         main.testOnDirectory = re.sub( "(/tests)$", "", main.testsRoot )
 
@@ -100,20 +100,20 @@
         try:
             main.Cluster
         except ( NameError, AttributeError ):
-            main.Cluster = Cluster( main.ONOScell.nodes )
+            main.Cluster = Cluster( main.ONOScell.nodes, useDocker=main.ONOScell.useDocker )
 
         main.cellData = {}  # For creating cell file
 
         return main.TRUE
 
-    def envSetupException( self, e ):
+    def envSetupException( self, error ):
         """
         Description:
             handles the exception that might occur from the environment setup.
         Required:
-            * includeGitPull - exceeption code e.
+            * error - exception returned from except.
         """
-        main.log.exception( e )
+        main.log.exception( error )
         main.cleanAndExit()
 
     def envSetupConclusion( self, stepResult ):
@@ -219,6 +219,21 @@
         main.log.info( "Safety check, killing all ONOS processes" )
         return cluster.killOnos( killRemoveMax, stopOnos )
 
+    def killingAllOnosDocker( self, cluster, killRemoveMax ):
+        """
+        Description:
+            killing the onos docker images . It will either kill the
+            current runningnodes or max number of the nodes.
+        Required:
+            * cluster - the cluster driver that will be used.
+            * killRemoveMax - The boolean that will decide either to kill
+            only running nodes ( False ) or max number of nodes ( True ).
+        Returns:
+            Returns main.TRUE if successfully killing it.
+        """
+        main.log.info( "Safety check, stopping all ONOS docker containers" )
+        return cluster.dockerStop( killRemoveMax )
+
     def createApplyCell( self, cluster, newCell, cellName, cellApps,
                          mininetIp, useSSH, onosIps, installMax=False,
                          atomixClusterSize=None ):
@@ -243,6 +258,8 @@
         """
         if atomixClusterSize is None:
             atomixClusterSize = len( cluster.runningNodes )
+        if atomixClusterSize is 1:
+            atomixClusterSize = len( cluster.controllers )
         atomixClusterSize = int( atomixClusterSize )
         cluster.setAtomixNodes( atomixClusterSize )
         atomixIps = [ node.ipAddress for node in cluster.atomixNodes ]
@@ -316,6 +333,25 @@
             main.cleanAndExit()
         return packageResult
 
+    def buildDocker( self, cluster ):
+        """
+        Description:
+            Build the latest docker
+        Required:
+            * cluster - the cluster driver that will be used.
+        Returns:
+            Returns main.TRUE if it successfully built.
+        """
+        main.step( "Building ONOS Docker image" )
+        buildResult = cluster.dockerBuild()
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=buildResult,
+                                 onpass="Successfully created ONOS docker",
+                                 onfail="Failed to create ONOS docker" )
+        if not buildResult:
+            main.cleanAndExit()
+        return buildResult
+
     def installAtomix( self, cluster, parallel=True ):
         """
         Description:
@@ -361,6 +397,42 @@
             main.cleanAndExit()
         return onosInstallResult
 
+    def startDocker( self, cluster, installMax, parallel=True ):
+        """
+        Description:
+            Start onos docker containers and verify the result
+        Required:
+            * cluster - the cluster driver that will be used.
+            * installMax - True for installing max number of nodes
+            False for installing current running nodes only.
+        Returns:
+            Returns main.TRUE if it successfully installed
+        """
+        main.step( "Create Cluster Config" )
+        configResult = cluster.genPartitions()
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=configResult,
+                                 onpass="Successfully create cluster config",
+                                 onfail="Failed to create cluster config" )
+
+        # install atomix docker containers
+        main.step( "Installing Atomix via docker containers" )
+        atomixInstallResult = cluster.startAtomixDocker( parallel )
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=atomixInstallResult,
+                                 onpass="Successfully start atomix containers",
+                                 onfail="Failed to start atomix containers" )
+
+        main.step( "Installing ONOS via docker containers" )
+        onosInstallResult = cluster.startONOSDocker( installMax, parallel )
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=onosInstallResult,
+                                 onpass="Successfully start ONOS containers",
+                                 onfail="Failed to start ONOS containers" )
+        if not onosInstallResult and atomixInstallResult:
+            main.cleanAndExit()
+        return onosInstallResult and atomixInstallResult
+
     def setupSsh( self, cluster ):
         """
         Description:
@@ -565,6 +637,7 @@
         if restartCluster:
             atomixKillResult = self.killingAllAtomix( cluster, killRemoveMax, stopAtomix )
             onosKillResult = self.killingAllOnos( cluster, killRemoveMax, stopOnos )
+            dockerKillResult = self.killingAllOnosDocker( cluster, killRemoveMax )
             killResult = atomixKillResult and onosKillResult
         else:
             killResult = main.TRUE
@@ -577,11 +650,24 @@
 
             packageResult = main.TRUE
             if not skipPack:
-                packageResult = self.buildOnos(cluster)
+                if cluster.useDocker:
+                    packageResult = self.buildDocker( cluster )
+                else:
+                    packageResult = self.buildOnos( cluster )
 
-            atomixInstallResult = self.installAtomix( cluster, installParallel )
-            onosInstallResult = self.installOnos( cluster, installMax, installParallel )
-            installResult = atomixInstallResult and onosInstallResult
+            if cluster.useDocker:
+                installResult = self.startDocker( cluster, installMax, installParallel )
+            else:
+                atomixInstallResult = self.installAtomix( cluster, installParallel )
+                onosInstallResult = self.installOnos( cluster, installMax, installParallel )
+                installResult = atomixInstallResult and onosInstallResult
+
+            preCLIResult = main.TRUE
+            if cluster.useDocker:
+                attachResult = cluster.attachToONOSDocker()
+                prepareResult = cluster.prepareForCLI()
+
+                preCLIResult = preCLIResult and attachResult and prepareResult
 
             self.processList( extraClean, cleanArgs )
             secureSshResult = self.setupSsh( cluster )
@@ -590,8 +676,11 @@
             uninstallResult = main.TRUE
             installResult = main.TRUE
             secureSshResult = main.TRUE
+            preCLIResult = main.TRUE
 
-        onosServiceResult = self.checkOnosService( cluster )
+        onosServiceResult = main.TRUE
+        if not cluster.useDocker:
+            onosServiceResult = self.checkOnosService( cluster )
 
         onosCliResult = main.TRUE
         if startOnosCli:
@@ -604,6 +693,11 @@
             if apps:
                 apps = apps.split( ',' )
                 apps = [ appPrefix + app for app in apps ]
+                if cluster.useDocker:
+                    node = main.Cluster.active( 0 )
+                    for app in apps:
+                        node.activateApp( app )
+
                 onosAppsResult = self.checkOnosApps( cluster, apps )
             else:
                 main.log.warn( "No apps were specified to be checked after startup" )
@@ -616,4 +710,4 @@
 
         return killResult and cellResult and packageResult and uninstallResult and \
                installResult and secureSshResult and onosServiceResult and onosCliResult and \
-               onosNodesResult and onosAppsResult
+               onosNodesResult and onosAppsResult and preCLIResult
diff --git a/TestON/tests/dependencies/utils.py b/TestON/tests/dependencies/utils.py
index 6537afc..3cf849a 100644
--- a/TestON/tests/dependencies/utils.py
+++ b/TestON/tests/dependencies/utils.py
@@ -74,10 +74,20 @@
         scpResult = main.TRUE
         copyResult = main.TRUE
         for ctrl in main.Cluster.runningNodes:
-            scpResult = scpResult and main.ONOSbench.scp( ctrl,
-                                                          "/opt/onos/log/karaf.log",
-                                                          "/tmp/karaf.log",
-                                                          direction="from" )
+            if ctrl.inDocker:
+                scpResult = scpResult and ctrl.server.dockerCp( ctrl.name,
+                                                                "/opt/onos/log/karaf.log",
+                                                                "/tmp/karaf.log",
+                                                                direction="from" )
+                scpResult = scpResult and main.ONOSbench.scp( ctrl.server,
+                                                              "/tmp/karaf.log",
+                                                              "/tmp/karaf.log",
+                                                              direction="from" )
+            else:
+                scpResult = scpResult and main.ONOSbench.scp( ctrl,
+                                                              "/opt/onos/log/karaf.log",
+                                                              "/tmp/karaf.log",
+                                                              direction="from" )
             copyResult = copyResult and main.ONOSbench.cpLogsToDir( "/tmp/karaf.log", main.logdir,
                                                                     copyFileName=( copyFileName + "_karaf.log." +
                                                                                    ctrl.name + "_" ) if before else