Update Cluster Driver

Change-Id: I8a3a57e19637ff210548e57d41178e6f194cf694
diff --git a/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py b/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py
index 14ffc7c..eb7cf8a 100644
--- a/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py
+++ b/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py
@@ -88,18 +88,13 @@
             # load some variables from the params file
             cellName = main.params[ 'ENV' ][ 'cellName' ]
             main.apps = main.params[ 'ENV' ][ 'appString' ]
-            main.numCtrls = int( main.params[ 'num_controllers' ] )
-            if main.ONOSbench.maxNodes and \
-                            main.ONOSbench.maxNodes < main.numCtrls:
-                main.numCtrls = int( main.ONOSbench.maxNodes )
-            main.maxNodes = main.numCtrls
-            stepResult = main.testSetUp.envSetup( hasNode=True )
+            stepResult = main.testSetUp.envSetup()
         except Exception as e:
             main.testSetUp.envSetupException( e )
         main.testSetUp.evnSetupConclusion( stepResult )
         main.HA.generateGraph( "HAclusterRestart" )
 
-        main.testSetUp.ONOSSetUp( main.Mininet1, cellName=cellName, removeLog=True,
+        main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
                                   extraApply=main.HA.startingMininet )
 
         main.HA.initialSetUp()
@@ -138,11 +133,8 @@
         The Failure case.
         """
         import time
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
-        assert main.CLIs, "main.CLIs not defined"
-        assert main.nodes, "main.nodes not defined"
         try:
             main.HAlabels
         except ( NameError, AttributeError ):
@@ -163,61 +155,21 @@
         main.case( "Restart entire ONOS cluster" )
 
         main.step( "Checking ONOS Logs for errors" )
-        for node in main.nodes:
-            main.log.debug( "Checking logs for errors on " + node.name + ":" )
-            main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
+        for ctrl in main.Cluster.active():
+            main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
+            main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
 
-        main.step( "Killing ONOS nodes" )
-        killResults = main.TRUE
-        killTime = time.time()
-        for node in main.nodes:
-            killed = main.ONOSbench.onosKill( node.ip_address )
-            killResults = killResults and killed
-        utilities.assert_equals( expect=main.TRUE, actual=killResults,
-                                 onpass="ONOS nodes killed",
-                                 onfail="ONOS kill unsuccessful" )
+        main.testSetUp.killingAllOnos( main.Cluster, True, False )
 
-        main.step( "Checking if ONOS is up yet" )
-        for i in range( 2 ):
-            onosIsupResult = main.TRUE
-            for node in main.nodes:
-                started = main.ONOSbench.isup( node.ip_address )
-                if not started:
-                    main.log.error( node.name + " didn't start!" )
-                onosIsupResult = onosIsupResult and started
-            if onosIsupResult == main.TRUE:
-                break
-        utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
-                                 onpass="ONOS restarted",
-                                 onfail="ONOS restart NOT successful" )
+        main.testSetUp.checkOnosService( main.Cluster )
 
-        main.step( "Starting ONOS CLI sessions" )
-        cliResults = main.TRUE
-        threads = []
-        for i in range( main.numCtrls ):
-            t = main.Thread( target=main.CLIs[ i ].startOnosCli,
-                             name="startOnosCli-" + str( i ),
-                             args=[ main.nodes[ i ].ip_address ] )
-            threads.append( t )
-            t.start()
+        main.testSetUp.startOnosClis( main.Cluster )
 
-        for t in threads:
-            t.join()
-            cliResults = cliResults and t.result
-        utilities.assert_equals( expect=main.TRUE, actual=cliResults,
-                                 onpass="ONOS cli started",
-                                 onfail="ONOS clis did not restart" )
-
-        for i in range( 10 ):
-            ready = True
-            for i in main.activeNodes:
-                cli = main.CLIs[ i ]
-                output = cli.summary()
-                if not output:
-                    ready = False
-            if ready:
-                break
-            time.sleep( 30 )
+        ready = utilities.retry( main.Cluster.command,
+                                 False,
+                                 kwargs={ "function":"summary", "contentCheck":True },
+                                 sleep=30,
+                                 attempts=10 )
         utilities.assert_equals( expect=True, actual=ready,
                                  onpass="ONOS summary command succeded",
                                  onfail="ONOS summary command failed" )
@@ -233,23 +185,15 @@
         main.HAdata.append( str( main.restartTime ) )
 
         # Rerun for election on restarted nodes
-        runResults = main.TRUE
-        for i in main.activeNodes:
-            cli = main.CLIs[ i ]
-            run = cli.electionTestRun()
-            if run != main.TRUE:
-                main.log.error( "Error running for election on " + cli.name )
-            runResults = runResults and run
-        utilities.assert_equals( expect=main.TRUE, actual=runResults,
+        runResults = main.Cluster.command( "electionTestRun", returnBool=True )
+        utilities.assert_equals( expect=True, actual=runResults,
                                  onpass="Reran for election",
                                  onfail="Failed to rerun for election" )
 
         # TODO: Make this configurable
         time.sleep( 60 )
-        node = main.activeNodes[ 0 ]
-        main.log.debug( main.CLIs[ node ].nodes( jsonFormat=False ) )
-        main.log.debug( main.CLIs[ node ].leaders( jsonFormat=False ) )
-        main.log.debug( main.CLIs[ node ].partitions( jsonFormat=False ) )
+
+        main.HA.commonChecks()
 
     def CASE7( self, main ):
         """
@@ -257,16 +201,16 @@
         """
         # NOTE: Store has no durability, so intents are lost across system
         #       restarts
-        main.HA.checkStateAfterONOS( main, afterWhich=0, isRestart=True )
+        main.HA.checkStateAfterEvent( main, afterWhich=0, isRestart=True )
 
         main.step( "Leadership Election is still functional" )
         # Test of LeadershipElection
         leaderList = []
         leaderResult = main.TRUE
 
-        for i in main.activeNodes:
-            cli = main.CLIs[ i ]
-            leaderN = cli.electionTestLeader()
+        for ctrl in main.Cluster.active():
+            ctrl.CLI.electionTestLeader()
+            leaderN = ctrl.CLI.electionTestLeader()
             leaderList.append( leaderN )
             if leaderN == main.FALSE:
                 # error in response
diff --git a/TestON/tests/HA/HAclusterRestart/HAclusterRestart.topo b/TestON/tests/HA/HAclusterRestart/HAclusterRestart.topo
index 7c18a98..4bf4bd4 100644
--- a/TestON/tests/HA/HAclusterRestart/HAclusterRestart.topo
+++ b/TestON/tests/HA/HAclusterRestart/HAclusterRestart.topo
@@ -1,183 +1,32 @@
 <TOPOLOGY>
     <COMPONENT>
 
-        <ONOSbench>
-            <host>localhost</host>
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
             <user>sdn</user>
             <password>rocks</password>
-            <type>OnosDriver</type>
+            <type>OnosClusterDriver</type>
             <connect_order>1</connect_order>
             <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOSbench>
-
-        <ONOScli1>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>2</connect_order>
-            <COMPONENTS>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost></diff_clihost>  # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
                 <karaf_username></karaf_username>
                 <karaf_password></karaf_password>
-                <prompt></prompt>
+                <web_user></web_user>
+                <web_pass></web_pass>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home></onos_home>  # defines where onos home is
+                <nodes> 7 </nodes>  # number of nodes in the cluster
             </COMPONENTS>
-        </ONOScli1>
-
-        <ONOScli2>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>3</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli2>
-
-        <ONOScli3>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>4</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli3>
-
-
-        <ONOScli4>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>5</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli4>
-
-
-        <ONOScli5>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>6</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli5>
-
-
-        <ONOScli6>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>7</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli6>
-
-
-        <ONOScli7>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>8</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli7>
-
-        <ONOS1>
-            <host>OC1</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>9</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS1>
-
-        <ONOS2>
-            <host>OC2</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>10</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS2>
-
-        <ONOS3>
-            <host>OC3</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>11</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS3>
-
-        <ONOS4>
-            <host>OC4</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>12</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS4>
-
-        <ONOS5>
-            <host>OC5</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>13</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS5>
-
-        <ONOS6>
-            <host>OC6</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>14</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS6>
-
-        <ONOS7>
-            <host>OC7</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>15</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS7>
+        </ONOScell>
 
         <Mininet1>
             <host>OCN</host>
             <user>sdn</user>
             <password>rocks</password>
             <type>MininetCliDriver</type>
-            <connect_order>16</connect_order>
+            <connect_order>2</connect_order>
             <COMPONENTS>
                 #Specify the Option for mininet
                 <arg1> --custom ~/mininet/custom/obelisk.py </arg1>
@@ -194,7 +43,7 @@
             <user>sdn</user>
             <password>rocks</password>
             <type>RemoteMininetDriver</type>
-            <connect_order>17</connect_order>
+            <connect_order>3</connect_order>
             <COMPONENTS>
                 <prompt></prompt>
             </COMPONENTS>
diff --git a/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.py b/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.py
index 92aae09..1f9f32d 100644
--- a/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.py
+++ b/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.py
@@ -91,20 +91,16 @@
             cellName = main.params[ 'ENV' ][ 'cellName' ]
             main.apps = main.params[ 'ENV' ][ 'appString' ]
             main.numCtrls = int( main.params[ 'num_controllers' ] )
-            if main.ONOSbench.maxNodes and\
-                        main.ONOSbench.maxNodes < main.numCtrls:
-                main.numCtrls = int( main.ONOSbench.maxNodes )
-            main.maxNodes = main.numCtrls
-            stepResult = main.testSetUp.envSetup( hasNode=True )
+            stepResult = main.testSetUp.envSetup()
         except Exception as e:
             main.testSetUp.envSetupException( e )
         main.testSetUp.evnSetupConclusion( stepResult )
         main.HA.generateGraph( "HAcontinuousStopNodes" )
 
-        main.testSetUp.ONOSSetUp( main.Mininet1, cellName=cellName, removeLog=True,
-                                 extraApply=main.HA.customizeOnosGenPartitions,
-                                 extraClean=main.HA.cleanUpGenPartition )
-
+        main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
+                                  extraApply=[ main.HA.startingMininet,
+                                               main.HA.customizeOnosGenPartitions ],
+                                  extraClean=main.HA.cleanUpGenPartition )
         main.HA.initialSetUp()
 
 
@@ -142,11 +138,8 @@
         """
         The Failure case.
         """
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
-        assert main.CLIs, "main.CLIs not defined"
-        assert main.nodes, "main.nodes not defined"
         try:
             assert main.nodeIndex is not None, "main.nodeIndex not defined"
             assert main.killCount is not None, "main.killCount not defined"
@@ -158,23 +151,23 @@
         main.case( "Stopping ONOS nodes - iteration " + str( main.killCount ) )
 
         main.step( "Checking ONOS Logs for errors" )
-        for node in main.nodes:
-            main.log.debug( "Checking logs for errors on " + node.name + ":" )
-            main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
+        for ctrl in main.Cluster.runningNodes:
+            main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
+            main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
 
         # NOTE: For now only kill one node. If we move to killing more, we need to
         #       make sure we don't lose any partitions
-        n = len( main.nodes )  # Number of nodes
+        n = len( main.Cluster.runningNodes )  # Number of nodes
         main.nodeIndex = ( main.nodeIndex + 1 ) % n
-        main.kill = [ main.nodeIndex ]  # ONOS node to kill, listed by index in main.nodes
+        main.kill = [ main.Cluster.runningNodes[ main.nodeIndex ] ]  # ONOS node to kill, listed by index in main.nodes
 
         # TODO: Be able to configure bringing up old node vs. a new/fresh installation
         main.step( "Stopping " + str( len( main.kill ) ) + " ONOS nodes" )
         killResults = main.TRUE
-        for i in main.kill:
+        for ctrl in main.kill:
             killResults = killResults and\
-                          main.ONOSbench.onosStop( main.nodes[ i ].ip_address )
-            main.activeNodes.remove( i )
+                          main.ONOSbench.onosStop( ctrl.ipAddress )
+            ctrl.active = False
         utilities.assert_equals( expect=main.TRUE, actual=killResults,
                                  onpass="ONOS nodes stopped successfully",
                                  onfail="ONOS nodes NOT successfully stopped" )
@@ -182,7 +175,7 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[ main.activeNodes ],
+                                       args=[ main.Cluster.active() ],
                                        sleep=15,
                                        attempts=5 )
 
@@ -191,11 +184,10 @@
                                  onfail="Nodes check NOT successful" )
 
         if not nodeResults:
-            for i in main.activeNodes:
-                cli = main.CLIs[ i ]
+            for ctrl in main.Cluster.active():
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
-                    cli.name,
-                    cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
+                    ctrl.name,
+                    ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
             main.log.error( "Failed to start ONOS, stopping test" )
             main.cleanup()
             main.exit()
@@ -217,20 +209,19 @@
         except AttributeError:
             main.kill = []
 
-        main.HA.checkStateAfterONOS( main, afterWhich=0 )
+        main.HA.checkStateAfterEvent( main, afterWhich=0 )
 
         main.step( "Leadership Election is still functional" )
         # Test of LeadershipElection
         leaderList = []
 
         restarted = []
-        for i in main.kill:
-            restarted.append( main.nodes[ i ].ip_address )
+        for ctrl in main.kill:
+            restarted.append( ctrl.ipAddress )
         leaderResult = main.TRUE
 
-        for i in main.activeNodes:
-            cli = main.CLIs[ i ]
-            leaderN = cli.electionTestLeader()
+        for ctrl in main.Cluster.active():
+            leaderN = ctrl.CLI.electionTestLeader()
             leaderList.append( leaderN )
             if leaderN == main.FALSE:
                 # error in response
@@ -239,12 +230,12 @@
                                  " error logs" )
                 leaderResult = main.FALSE
             elif leaderN is None:
-                main.log.error( cli.name +
+                main.log.error( ctrl.name +
                                  " shows no leader for the election-app was" +
                                  " elected after the old one died" )
                 leaderResult = main.FALSE
             elif leaderN in restarted:
-                main.log.error( cli.name + " shows " + str( leaderN ) +
+                main.log.error( ctrl.name + " shows " + str( leaderN ) +
                                  " as leader for the election-app, but it " +
                                  "was restarted" )
                 leaderResult = main.FALSE
diff --git a/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.topo b/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.topo
index 7c18a98..5d64681 100644
--- a/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.topo
+++ b/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.topo
@@ -1,176 +1,25 @@
 <TOPOLOGY>
     <COMPONENT>
 
-        <ONOSbench>
-            <host>localhost</host>
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
             <user>sdn</user>
             <password>rocks</password>
-            <type>OnosDriver</type>
+            <type>OnosClusterDriver</type>
             <connect_order>1</connect_order>
             <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOSbench>
-
-        <ONOScli1>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>2</connect_order>
-            <COMPONENTS>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost></diff_clihost>  # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
                 <karaf_username></karaf_username>
                 <karaf_password></karaf_password>
-                <prompt></prompt>
+                <web_user></web_user>
+                <web_pass></web_pass>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home></onos_home>  # defines where onos home is
+                <nodes> 7 </nodes>  # number of nodes in the cluster
             </COMPONENTS>
-        </ONOScli1>
-
-        <ONOScli2>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>3</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli2>
-
-        <ONOScli3>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>4</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli3>
-
-
-        <ONOScli4>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>5</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli4>
-
-
-        <ONOScli5>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>6</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli5>
-
-
-        <ONOScli6>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>7</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli6>
-
-
-        <ONOScli7>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>8</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli7>
-
-        <ONOS1>
-            <host>OC1</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>9</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS1>
-
-        <ONOS2>
-            <host>OC2</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>10</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS2>
-
-        <ONOS3>
-            <host>OC3</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>11</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS3>
-
-        <ONOS4>
-            <host>OC4</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>12</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS4>
-
-        <ONOS5>
-            <host>OC5</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>13</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS5>
-
-        <ONOS6>
-            <host>OC6</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>14</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS6>
-
-        <ONOS7>
-            <host>OC7</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>15</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS7>
+        </ONOScell>
 
         <Mininet1>
             <host>OCN</host>
diff --git a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
index 7e10a09..475759f 100644
--- a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
+++ b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
@@ -90,21 +90,16 @@
             # load some variables from the params file
             cellName = main.params[ 'ENV' ][ 'cellName' ]
             main.apps = main.params[ 'ENV' ][ 'appString' ]
-            main.numCtrls = int( main.params[ 'num_controllers' ] )
-            if main.ONOSbench.maxNodes and\
-                        main.ONOSbench.maxNodes < main.numCtrls:
-                main.numCtrls = int( main.ONOSbench.maxNodes )
-            main.maxNodes = main.numCtrls
-            stepResult = main.testSetUp.envSetup( hasNode=True )
+            stepResult = main.testSetUp.envSetup()
         except Exception as e:
             main.testSetUp.envSetupException( e )
         main.testSetUp.evnSetupConclusion( stepResult )
         main.HA.generateGraph( "HAfullNetPartition" )
 
-        main.testSetUp.ONOSSetUp( main.Mininet1, cellName=cellName, removeLog=True,
-                                 extraApply=main.HA.customizeOnosGenPartitions,
-                                 extraClean=main.HA.cleanUpGenPartition )
-
+        main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
+                                  extraApply=[ main.HA.startingMininet,
+                                               main.HA.customizeOnosGenPartitions ],
+                                  extraClean=main.HA.cleanUpGenPartition )
         main.HA.initialSetUp()
 
 
@@ -143,21 +138,18 @@
         The Failure case.
         """
         import math
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
-        assert main.CLIs, "main.CLIs not defined"
-        assert main.nodes, "main.nodes not defined"
         main.case( "Partition ONOS nodes into two distinct partitions" )
 
         main.step( "Checking ONOS Logs for errors" )
-        for node in main.nodes:
-            main.log.debug( "Checking logs for errors on " + node.name + ":" )
-            main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
+        for ctrl in main.Cluster.runningNodes:
+            main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
+            main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
 
-        main.log.debug( main.CLIs[ 0 ].roles( jsonFormat=False ) )
+        main.log.debug( main.Cluster.next().CLI.roles( jsonFormat=False ) )
 
-        n = len( main.nodes )  # Number of nodes
+        n = len( main.Cluster.runningNodes )  # Number of nodes
         p = ( ( n + 1 ) / 2 ) + 1  # Number of partitions
         main.partition = [ 0 ]  # ONOS node to partition, listed by index in main.nodes
         if n > 3:
@@ -169,27 +161,49 @@
         main.log.info( "Nodes to be partitioned: " + str( nodeList ) )
         partitionResults = main.TRUE
         for i in range( 0, n ):
-            this = main.nodes[ i ]
+            iCtrl = main.Cluster.runningNodes[ i ]
+            this = iCtrl.Bench.sshToNode( iCtrl.ipAddress )
             if i not in main.partition:
                 for j in main.partition:
-                    foe = main.nodes[ j ]
-                    main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
+                    foe =  main.Cluster.runningNodes[ j ]
+                    main.log.warn( "Setting IP Tables rule from {} to {}. ".format( iCtrl.ipAddress, foe.ipAddress ) )
                     #CMD HERE
-                    cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
-                    this.handle.sendline( cmdStr )
-                    this.handle.expect( "\$" )
-                    main.log.debug( this.handle.before )
+                    try:
+                        cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", iCtrl.ipAddress, foe.ipAddress )
+                        this.sendline( cmdStr )
+                        this.expect( "\$" )
+                        main.log.debug( this.before )
+                    except pexpect.EOF:
+                        main.log.error( self.name + ": EOF exception found" )
+                        main.log.error( self.name + ":    " + self.handle.before )
+                        main.cleanup()
+                        main.exit()
+                    except Exception:
+                        main.log.exception( self.name + ": Uncaught exception!" )
+                        main.cleanup()
+                        main.exit()
             else:
                 for j in range( 0, n ):
                     if j not in main.partition:
-                        foe = main.nodes[ j ]
-                        main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
+                        foe = main.Cluster.runningNodes[ j ]
+                        main.log.warn( "Setting IP Tables rule from {} to {}. ".format( iCtrl.ipAddress, foe.ipAddress ) )
                         #CMD HERE
-                        cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
-                        this.handle.sendline( cmdStr )
-                        this.handle.expect( "\$" )
-                        main.log.debug( this.handle.before )
-                main.activeNodes.remove( i )
+                        cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", iCtrl.ipAddress, foe.ipAddress )
+                        try:
+                            this.sendline( cmdStr )
+                            this.expect( "\$" )
+                            main.log.debug( this.before )
+                        except pexpect.EOF:
+                            main.log.error( self.name + ": EOF exception found" )
+                            main.log.error( self.name + ":    " + self.handle.before )
+                            main.cleanup()
+                            main.exit()
+                        except Exception:
+                            main.log.exception( self.name + ": Uncaught exception!" )
+                            main.cleanup()
+                            main.exit()
+                main.Cluster.runningNodes[ i ].active = False
+            iCtrl.Bench.exitFromSsh( this, iCtrl.ipAddress )
         # NOTE: When dynamic clustering is finished, we need to start checking
         #       main.partion nodes still work when partitioned
         utilities.assert_equals( expect=main.TRUE, actual=partitionResults,
@@ -204,28 +218,30 @@
         Healing Partition
         """
         import time
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
-        assert main.CLIs, "main.CLIs not defined"
-        assert main.nodes, "main.nodes not defined"
         assert main.partition, "main.partition not defined"
         main.case( "Healing Partition" )
 
         main.step( "Deleteing firewall rules" )
         healResults = main.TRUE
-        for node in main.nodes:
+        for ctrl in main.Cluster.runningNodes:
             cmdStr = "sudo iptables -F"
-            node.handle.sendline( cmdStr )
-            node.handle.expect( "\$" )
-            main.log.debug( node.handle.before )
+            handle = ctrl.Bench.sshToNode( ctrl.ipAddress )
+            handle.sendline( cmdStr )
+            handle.expect( "\$" )
+            main.log.debug( handle.before )
+            ctrl.Bench.exitFromSsh( handle, ctrl.ipAddress )
         utilities.assert_equals( expect=main.TRUE, actual=healResults,
                                  onpass="Firewall rules removed",
                                  onfail="Error removing firewall rules" )
 
         for node in main.partition:
-            main.activeNodes.append( node )
-        main.activeNodes.sort()
+            main.Cluster.runningNodes[ node ].active = True
+
+        '''
+        # NOTE : Not sure if this can be removed
+         main.activeNodes.sort()
         try:
             assert list( set( main.activeNodes ) ) == main.activeNodes,\
                    "List of active nodes has duplicates, this likely indicates something was run out of order"
@@ -233,11 +249,12 @@
             main.log.exception( "" )
             main.cleanup()
             main.exit()
+        '''
 
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[ main.activeNodes ],
+                                       args=[ main.Cluster.active() ],
                                        sleep=15,
                                        attempts=5 )
 
@@ -246,11 +263,10 @@
                                  onfail="Nodes check NOT successful" )
 
         if not nodeResults:
-            for i in main.activeNodes:
-                cli = main.CLIs[ i ]
+            for ctrl in main.Cluster.active():
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
-                    cli.name,
-                    cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
+                    ctrl.name,
+                    ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
             main.log.error( "Failed to start ONOS, stopping test" )
             main.cleanup()
             main.exit()
@@ -260,7 +276,7 @@
         Check state after ONOS failure
         """
 
-        main.HA.checkStateAfterONOS( main, afterWhich=0 )
+        main.HA.checkStateAfterEvent( main, afterWhich=0 )
 
         main.step( "Leadership Election is still functional" )
         # Test of LeadershipElection
@@ -268,12 +284,11 @@
 
         partitioned = []
         for i in main.partition:
-            partitioned.append( main.nodes[ i ].ip_address )
+            partitioned.append( main.Cluster.runningNodes[ i ].ipAddress )
         leaderResult = main.TRUE
 
-        for i in main.activeNodes:
-            cli = main.CLIs[ i ]
-            leaderN = cli.electionTestLeader()
+        for ctrl in main.Cluster.active():
+            leaderN = ctrl.CLI.electionTestLeader()
             leaderList.append( leaderN )
             if leaderN == main.FALSE:
                 # error in response
@@ -282,12 +297,12 @@
                                  " error logs" )
                 leaderResult = main.FALSE
             elif leaderN is None:
-                main.log.error( cli.name +
+                main.log.error( ctrl.name +
                                  " shows no leader for the election-app was" +
                                  " elected after the old one died" )
                 leaderResult = main.FALSE
             elif leaderN in partitioned:
-                main.log.error( cli.name + " shows " + str( leaderN ) +
+                main.log.error( ctrl.name + " shows " + str( leaderN ) +
                                  " as leader for the election-app, but it " +
                                  "was partitioned" )
                 leaderResult = main.FALSE
diff --git a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.topo b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.topo
index 7c18a98..4bf4bd4 100644
--- a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.topo
+++ b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.topo
@@ -1,183 +1,32 @@
 <TOPOLOGY>
     <COMPONENT>
 
-        <ONOSbench>
-            <host>localhost</host>
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
             <user>sdn</user>
             <password>rocks</password>
-            <type>OnosDriver</type>
+            <type>OnosClusterDriver</type>
             <connect_order>1</connect_order>
             <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOSbench>
-
-        <ONOScli1>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>2</connect_order>
-            <COMPONENTS>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost></diff_clihost>  # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
                 <karaf_username></karaf_username>
                 <karaf_password></karaf_password>
-                <prompt></prompt>
+                <web_user></web_user>
+                <web_pass></web_pass>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home></onos_home>  # defines where onos home is
+                <nodes> 7 </nodes>  # number of nodes in the cluster
             </COMPONENTS>
-        </ONOScli1>
-
-        <ONOScli2>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>3</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli2>
-
-        <ONOScli3>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>4</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli3>
-
-
-        <ONOScli4>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>5</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli4>
-
-
-        <ONOScli5>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>6</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli5>
-
-
-        <ONOScli6>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>7</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli6>
-
-
-        <ONOScli7>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>8</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli7>
-
-        <ONOS1>
-            <host>OC1</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>9</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS1>
-
-        <ONOS2>
-            <host>OC2</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>10</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS2>
-
-        <ONOS3>
-            <host>OC3</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>11</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS3>
-
-        <ONOS4>
-            <host>OC4</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>12</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS4>
-
-        <ONOS5>
-            <host>OC5</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>13</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS5>
-
-        <ONOS6>
-            <host>OC6</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>14</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS6>
-
-        <ONOS7>
-            <host>OC7</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>15</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS7>
+        </ONOScell>
 
         <Mininet1>
             <host>OCN</host>
             <user>sdn</user>
             <password>rocks</password>
             <type>MininetCliDriver</type>
-            <connect_order>16</connect_order>
+            <connect_order>2</connect_order>
             <COMPONENTS>
                 #Specify the Option for mininet
                 <arg1> --custom ~/mininet/custom/obelisk.py </arg1>
@@ -194,7 +43,7 @@
             <user>sdn</user>
             <password>rocks</password>
             <type>RemoteMininetDriver</type>
-            <connect_order>17</connect_order>
+            <connect_order>3</connect_order>
             <COMPONENTS>
                 <prompt></prompt>
             </COMPONENTS>
diff --git a/TestON/tests/HA/HAkillNodes/HAkillNodes.py b/TestON/tests/HA/HAkillNodes/HAkillNodes.py
index b2ae8c5..e81dda9 100644
--- a/TestON/tests/HA/HAkillNodes/HAkillNodes.py
+++ b/TestON/tests/HA/HAkillNodes/HAkillNodes.py
@@ -80,28 +80,22 @@
             main.exit()
         main.testSetUp.envSetupDescription()
         try:
-            from dependencies.Cluster import Cluster
             from tests.HA.dependencies.HA import HA
             main.HA = HA()
-            main.Cluster = Cluster( main.ONOScell.nodes )
             cellName = main.params[ 'ENV' ][ 'cellName' ]
             main.apps = main.params[ 'ENV' ][ 'appString' ]
-            stepResult = main.testSetUp.envSetup( main.Cluster, hasNode=True )
+            stepResult = main.testSetUp.envSetup()
         except Exception as e:
             main.testSetUp.envSetupException( e )
         main.testSetUp.evnSetupConclusion( stepResult )
         main.HA.generateGraph( "HAkillNodes" )
 
         main.step( "Make sure ONOS service doesn't automatically respawn" )
-        handle = main.Cluster.controllers[0].Bench.handle
-        handle.sendline( "sed -i -e 's/^respawn$/#respawn/g' tools/package/init/onos.conf" )
-        handle.expect( "\$" )  # $ from the command
-        handle.sendline( "sed -i -e 's/^Restart=always/Restart=no/g' tools/package/init/onos.service" )
-        handle.expect( "\$" )  # $ from the command
-        handle.expect( "\$" )  # $ from the prompt
+        main.ONOSbench.preventAutoRespawn()
 
         main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
-                                 extraApply=main.HA.customizeOnosGenPartitions,
+                                 extraApply=[ main.HA.startingMininet,
+                                              main.HA.customizeOnosGenPartitions ],
                                  extraClean=main.HA.cleanUpGenPartition )
 
         main.HA.initialSetUp()
@@ -140,7 +134,6 @@
         """
         The Failure case.
         """
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
         main.case( "Kill minority of ONOS nodes" )
@@ -150,11 +143,11 @@
             main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
             main.log.warn( ctrl.checkLogs( ctrl.ipAddress ) )
 
-        n = len( main.Cluster.controllers )  # Number of nodes
+        n = len( main.Cluster.runningNodes )  # Number of nodes
         p = ( ( n + 1 ) / 2 ) + 1  # Number of partitions
-        main.kill = [ main.Cluster.controllers[ 0 ] ]  # ONOS node to kill, listed by index in main.nodes
+        main.kill = [ main.Cluster.runningNodes[ 0 ] ]  # ONOS node to kill, listed by index in main.nodes
         if n > 3:
-            main.kill.append( main.Cluster.controllers[ p - 1 ] )
+            main.kill.append( main.Cluster.runningNodes[ p - 1 ] )
             # NOTE: This only works for cluster sizes of 3,5, or 7.
 
         main.step( "Killing nodes: " + str( main.kill ) )
@@ -202,7 +195,7 @@
         except AttributeError:
             main.kill = []
 
-        main.HA.checkStateAfterONOS( main, afterWhich=0 )
+        main.HA.checkStateAfterEvent( main, afterWhich=0 )
         main.step( "Leadership Election is still functional" )
         # Test of LeadershipElection
         leaderList = []
diff --git a/TestON/tests/HA/HAkillNodes/HAkillNodes.topo b/TestON/tests/HA/HAkillNodes/HAkillNodes.topo
index f3b9278..4bf4bd4 100644
--- a/TestON/tests/HA/HAkillNodes/HAkillNodes.topo
+++ b/TestON/tests/HA/HAkillNodes/HAkillNodes.topo
@@ -1,17 +1,6 @@
 <TOPOLOGY>
     <COMPONENT>
 
-        <ONOSbench>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>1</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOSbench>
-
         <ONOScell>
             <host>localhost</host>  # ONOS "bench" machine
             <user>sdn</user>
@@ -20,6 +9,7 @@
             <connect_order>1</connect_order>
             <COMPONENTS>
                 <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost></diff_clihost>  # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
                 <karaf_username></karaf_username>
                 <karaf_password></karaf_password>
                 <web_user></web_user>
@@ -36,7 +26,7 @@
             <user>sdn</user>
             <password>rocks</password>
             <type>MininetCliDriver</type>
-            <connect_order>16</connect_order>
+            <connect_order>2</connect_order>
             <COMPONENTS>
                 #Specify the Option for mininet
                 <arg1> --custom ~/mininet/custom/obelisk.py </arg1>
@@ -53,7 +43,7 @@
             <user>sdn</user>
             <password>rocks</password>
             <type>RemoteMininetDriver</type>
-            <connect_order>17</connect_order>
+            <connect_order>3</connect_order>
             <COMPONENTS>
                 <prompt></prompt>
             </COMPONENTS>
diff --git a/TestON/tests/HA/HAsanity/HAsanity.py b/TestON/tests/HA/HAsanity/HAsanity.py
index c70d921..8332a2b 100644
--- a/TestON/tests/HA/HAsanity/HAsanity.py
+++ b/TestON/tests/HA/HAsanity/HAsanity.py
@@ -79,13 +79,11 @@
             main.exit()
         main.testSetUp.envSetupDescription()
         try:
-            from dependencies.Cluster import Cluster
             from tests.HA.dependencies.HA import HA
             main.HA = HA()
-            main.Cluster = Cluster( main.ONOScell.nodes )
             cellName = main.params[ 'ENV' ][ 'cellName' ]
             main.apps = main.params[ 'ENV' ][ 'appString' ]
-            stepResult = main.testSetUp.envSetup( main.Cluster, hasNode=True )
+            stepResult = main.testSetUp.envSetup()
         except Exception as e:
             main.testSetUp.envSetupException( e )
         main.testSetUp.evnSetupConclusion( stepResult )
@@ -131,7 +129,6 @@
         The Failure case. Since this is the Sanity test, we do nothing.
         """
         import time
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
         main.case( "Wait 60 seconds instead of inducing a failure" )
@@ -146,7 +143,7 @@
         """
         Check state after ONOS failure
         """
-        main.HA.checkStateAfterONOS( main, afterWhich=0, compareSwitch=True )
+        main.HA.checkStateAfterEvent( main, afterWhich=0, compareSwitch=True )
 
         main.step( "Leadership Election is still functional" )
         # Test of LeadershipElection
diff --git a/TestON/tests/HA/HAsanity/HAsanity.topo b/TestON/tests/HA/HAsanity/HAsanity.topo
index f3b9278..4bf4bd4 100644
--- a/TestON/tests/HA/HAsanity/HAsanity.topo
+++ b/TestON/tests/HA/HAsanity/HAsanity.topo
@@ -1,17 +1,6 @@
 <TOPOLOGY>
     <COMPONENT>
 
-        <ONOSbench>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>1</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOSbench>
-
         <ONOScell>
             <host>localhost</host>  # ONOS "bench" machine
             <user>sdn</user>
@@ -20,6 +9,7 @@
             <connect_order>1</connect_order>
             <COMPONENTS>
                 <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost></diff_clihost>  # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
                 <karaf_username></karaf_username>
                 <karaf_password></karaf_password>
                 <web_user></web_user>
@@ -36,7 +26,7 @@
             <user>sdn</user>
             <password>rocks</password>
             <type>MininetCliDriver</type>
-            <connect_order>16</connect_order>
+            <connect_order>2</connect_order>
             <COMPONENTS>
                 #Specify the Option for mininet
                 <arg1> --custom ~/mininet/custom/obelisk.py </arg1>
@@ -53,7 +43,7 @@
             <user>sdn</user>
             <password>rocks</password>
             <type>RemoteMininetDriver</type>
-            <connect_order>17</connect_order>
+            <connect_order>3</connect_order>
             <COMPONENTS>
                 <prompt></prompt>
             </COMPONENTS>
diff --git a/TestON/tests/HA/HAscaling/HAscaling.py b/TestON/tests/HA/HAscaling/HAscaling.py
index f35fa57..6a0abd4 100644
--- a/TestON/tests/HA/HAscaling/HAscaling.py
+++ b/TestON/tests/HA/HAscaling/HAscaling.py
@@ -92,20 +92,17 @@
             # load some variables from the params file
             cellName = main.params[ 'ENV' ][ 'cellName' ]
             main.apps = main.params[ 'ENV' ][ 'appString' ]
-            main.numCtrls = int( main.params[ 'num_controllers' ] )
-            if main.ONOSbench.maxNodes and\
-                        main.ONOSbench.maxNodes < main.numCtrls:
-                main.numCtrls = int( main.ONOSbench.maxNodes )
-            main.maxNodes = main.numCtrls
-            stepResult = main.testSetUp.envSetup( hasNode=True )
+            stepResult = main.testSetUp.envSetup()
         except Exception as e:
             main.testSetUp.envSetupException( e )
         main.testSetUp.evnSetupConclusion( stepResult )
         main.HA.generateGraph( "HAscaling", index=1 )
 
-        main.testSetUp.ONOSSetUp( main.Mininet1, cellName=cellName, removeLog=True,
-                                 extraApply=main.HA.customizeOnosService,
-                                 arg=main.HA.scalingMetadata,
+        main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
+                                 extraApply=[ main.HA.setServerForCluster,
+                                              main.HA.scalingMetadata,
+                                              main.HA.startingMininet,
+                                              main.HA.copyingBackupConfig ],
                                  extraClean=main.HA.cleanUpOnosService,
                                  installMax=True )
 
@@ -147,11 +144,8 @@
         """
         import time
         import re
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
-        assert main.CLIs, "main.CLIs not defined"
-        assert main.nodes, "main.nodes not defined"
         try:
             main.HAlabels
         except ( NameError, AttributeError ):
@@ -166,10 +160,9 @@
         main.case( "Scale the number of nodes in the ONOS cluster" )
 
         main.step( "Checking ONOS Logs for errors" )
-        for i in main.activeNodes:
-            node = main.nodes[ i ]
-            main.log.debug( "Checking logs for errors on " + node.name + ":" )
-            main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
+        for ctrl in main.Cluster.active():
+            main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
+            main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
 
         """
         pop # of nodes from a list, might look like 1,3b,3,5b,5,7b,7,7b,5,5b,3...
@@ -177,15 +170,15 @@
         install/deactivate node as needed
         """
         try:
-            prevNodes = main.activeNodes
+            prevNodes = main.Cluster.active()
             scale = main.scaling.pop( 0 )
             if "e" in scale:
                 equal = True
             else:
                 equal = False
-            main.numCtrls = int( re.search( "\d+", scale ).group( 0 ) )
-            main.log.info( "Scaling to {} nodes".format( main.numCtrls ) )
-            genResult = main.Server.generateFile( main.numCtrls, equal=equal )
+            main.Cluster.setRunningNode( int( re.search( "\d+", scale ).group( 0 ) ) )
+            main.log.info( "Scaling to {} nodes".format( main.Cluster.numCtrls ) )
+            genResult = main.Server.generateFile( main.Cluster.numCtrls, equal=equal )
             utilities.assert_equals( expect=main.TRUE, actual=genResult,
                                      onpass="New cluster metadata file generated",
                                      onfail="Failled to generate new metadata file" )
@@ -194,54 +187,27 @@
             main.cleanup()
             main.exit()
 
-        main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
-        newNodes = [ x for x in main.activeNodes if x not in prevNodes ]
-
+        activeNodes = [ i for i in range( 0, main.Cluster.numCtrls ) ]
+        newNodes = [ x for x in activeNodes if x not in prevNodes ]
+        main.Cluster.resetActive()
         main.step( "Start new nodes" )  # OR stop old nodes?
         started = main.TRUE
         for i in newNodes:
-            started = main.ONOSbench.onosStart( main.nodes[ i ].ip_address ) and main.TRUE
+            started = main.ONOSbench.onosStart( main.Cluster.runningNodes[ i ].ipAddress ) and main.TRUE
         utilities.assert_equals( expect=main.TRUE, actual=started,
                                  onpass="ONOS started",
                                  onfail="ONOS start NOT successful" )
 
-        main.step( "Checking if ONOS is up yet" )
-        for i in range( 2 ):
-            onosIsupResult = main.TRUE
-            for i in main.activeNodes:
-                node = main.nodes[ i ]
-                main.ONOSbench.onosSecureSSH( node=node.ip_address )
-                started = main.ONOSbench.isup( node.ip_address )
-                if not started:
-                    main.log.error( node.name + " didn't start!" )
-                onosIsupResult = onosIsupResult and started
-            if onosIsupResult == main.TRUE:
-                break
-        utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
-                                 onpass="ONOS started",
-                                 onfail="ONOS start NOT successful" )
+        main.testSetUp.setupSsh( main.Cluster )
 
-        main.step( "Starting ONOS CLI sessions" )
-        cliResults = main.TRUE
-        threads = []
-        for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[ i ].startOnosCli,
-                             name="startOnosCli-" + str( i ),
-                             args=[ main.nodes[ i ].ip_address ] )
-            threads.append( t )
-            t.start()
+        main.testSetUp.checkOnosService( main.Cluster )
 
-        for t in threads:
-            t.join()
-            cliResults = cliResults and t.result
-        utilities.assert_equals( expect=main.TRUE, actual=cliResults,
-                                 onpass="ONOS cli started",
-                                 onfail="ONOS clis did not start" )
+        main.Cluster.startCLIs()
 
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[ main.activeNodes ],
+                                       args=[ main.Cluster.active() ],
                                        attempts=5 )
         utilities.assert_equals( expect=True, actual=nodeResults,
                                  onpass="Nodes check successful",
@@ -249,9 +215,8 @@
 
         for i in range( 10 ):
             ready = True
-            for i in main.activeNodes:
-                cli = main.CLIs[ i ]
-                output = cli.summary()
+            for ctrl in main.Cluster.active():
+                output = ctrl.CLI.summary()
                 if not output:
                     ready = False
             if ready:
@@ -266,11 +231,10 @@
 
         # Rerun for election on new nodes
         runResults = main.TRUE
-        for i in main.activeNodes:
-            cli = main.CLIs[ i ]
-            run = cli.electionTestRun()
+        for ctrl in main.Cluster.active():
+            run = ctrl.CLI.electionTestRun()
             if run != main.TRUE:
-                main.log.error( "Error running for election on " + cli.name )
+                main.log.error( "Error running for election on " + ctrl.name )
             runResults = runResults and run
         utilities.assert_equals( expect=main.TRUE, actual=runResults,
                                  onpass="Reran for election",
@@ -278,28 +242,22 @@
 
         # TODO: Make this configurable
         time.sleep( 60 )
-        for node in main.activeNodes:
-            main.log.warn( "\n****************** {} **************".format( main.nodes[ node ].ip_address ) )
-            main.log.debug( main.CLIs[ node ].nodes( jsonFormat=False ) )
-            main.log.debug( main.CLIs[ node ].leaders( jsonFormat=False ) )
-            main.log.debug( main.CLIs[ node ].partitions( jsonFormat=False ) )
-            main.log.debug( main.CLIs[ node ].apps( jsonFormat=False ) )
+        main.HA.commonChecks()
 
     def CASE7( self, main ):
         """
         Check state after ONOS scaling
         """
 
-        main.HA.checkStateAfterONOS( main, afterWhich=1 )
+        main.HA.checkStateAfterEvent( main, afterWhich=1 )
 
         main.step( "Leadership Election is still functional" )
         # Test of LeadershipElection
         leaderList = []
         leaderResult = main.TRUE
 
-        for i in main.activeNodes:
-            cli = main.CLIs[ i ]
-            leaderN = cli.electionTestLeader()
+        for ctrl in main.Cluster.active():
+            leaderN = ctrl.CLI.electionTestLeader()
             leaderList.append( leaderN )
             if leaderN == main.FALSE:
                 # error in response
diff --git a/TestON/tests/HA/HAscaling/HAscaling.topo b/TestON/tests/HA/HAscaling/HAscaling.topo
index 7c18a98..4bf4bd4 100644
--- a/TestON/tests/HA/HAscaling/HAscaling.topo
+++ b/TestON/tests/HA/HAscaling/HAscaling.topo
@@ -1,183 +1,32 @@
 <TOPOLOGY>
     <COMPONENT>
 
-        <ONOSbench>
-            <host>localhost</host>
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
             <user>sdn</user>
             <password>rocks</password>
-            <type>OnosDriver</type>
+            <type>OnosClusterDriver</type>
             <connect_order>1</connect_order>
             <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOSbench>
-
-        <ONOScli1>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>2</connect_order>
-            <COMPONENTS>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost></diff_clihost>  # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
                 <karaf_username></karaf_username>
                 <karaf_password></karaf_password>
-                <prompt></prompt>
+                <web_user></web_user>
+                <web_pass></web_pass>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home></onos_home>  # defines where onos home is
+                <nodes> 7 </nodes>  # number of nodes in the cluster
             </COMPONENTS>
-        </ONOScli1>
-
-        <ONOScli2>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>3</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli2>
-
-        <ONOScli3>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>4</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli3>
-
-
-        <ONOScli4>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>5</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli4>
-
-
-        <ONOScli5>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>6</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli5>
-
-
-        <ONOScli6>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>7</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli6>
-
-
-        <ONOScli7>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>8</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli7>
-
-        <ONOS1>
-            <host>OC1</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>9</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS1>
-
-        <ONOS2>
-            <host>OC2</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>10</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS2>
-
-        <ONOS3>
-            <host>OC3</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>11</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS3>
-
-        <ONOS4>
-            <host>OC4</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>12</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS4>
-
-        <ONOS5>
-            <host>OC5</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>13</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS5>
-
-        <ONOS6>
-            <host>OC6</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>14</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS6>
-
-        <ONOS7>
-            <host>OC7</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>15</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS7>
+        </ONOScell>
 
         <Mininet1>
             <host>OCN</host>
             <user>sdn</user>
             <password>rocks</password>
             <type>MininetCliDriver</type>
-            <connect_order>16</connect_order>
+            <connect_order>2</connect_order>
             <COMPONENTS>
                 #Specify the Option for mininet
                 <arg1> --custom ~/mininet/custom/obelisk.py </arg1>
@@ -194,7 +43,7 @@
             <user>sdn</user>
             <password>rocks</password>
             <type>RemoteMininetDriver</type>
-            <connect_order>17</connect_order>
+            <connect_order>3</connect_order>
             <COMPONENTS>
                 <prompt></prompt>
             </COMPONENTS>
diff --git a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
index dbc004e..f8d5a49 100644
--- a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
+++ b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
@@ -76,146 +76,39 @@
                                 "installing ONOS, starting Mininet and ONOS" +\
                                 "cli sessions."
 
-        # load some variables from the params file
-        PULLCODE = False
-        if main.params[ 'GIT' ][ 'pull' ] == 'True':
-            PULLCODE = True
-        gitBranch = main.params[ 'GIT' ][ 'branch' ]
-        cellName = main.params[ 'ENV' ][ 'cellName' ]
-
-        main.numCtrls = int( main.params[ 'num_controllers' ] )
-        if main.ONOSbench.maxNodes:
-            if main.ONOSbench.maxNodes < main.numCtrls:
-                main.numCtrls = int( main.ONOSbench.maxNodes )
+        # set global variables
         # These are for csv plotting in jenkins
         main.HAlabels = []
         main.HAdata = []
         try:
+            from tests.dependencies.ONOSSetup import ONOSSetup
+            main.testSetUp = ONOSSetup()
+        except ImportError:
+            main.log.error( "ONOSSetup not found. exiting the test" )
+            main.exit()
+        main.testSetUp.envSetupDescription()
+        try:
             from tests.HA.dependencies.HA import HA
             main.HA = HA()
-        except ImportError as e:
-            main.log.exception( e )
-            main.cleanup()
-            main.exit()
-
-        main.CLIs = []
-        main.nodes = []
-        ipList = []
-        for i in range( 1, int( main.ONOSbench.maxNodes ) + 1 ):
-            try:
-                main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
-                main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
-                ipList.append( main.nodes[ -1 ].ip_address )
-            except AttributeError:
-                break
-
-        main.step( "Create cell file" )
-        cellAppString = main.params[ 'ENV' ][ 'appString' ]
-        main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
-                                       main.Mininet1.ip_address,
-                                       cellAppString, ipList, main.ONOScli1.karafUser )
-        main.step( "Applying cell variable to environment" )
-        cellResult = main.ONOSbench.setCell( cellName )
-        verifyResult = main.ONOSbench.verifyCell()
-
-        # FIXME:this is short term fix
-        main.log.info( "Removing raft logs" )
-        main.ONOSbench.onosRemoveRaftLogs()
-
-        main.log.info( "Uninstalling ONOS" )
-        for node in main.nodes:
-            main.ONOSbench.onosUninstall( node.ip_address )
-
-        # Make sure ONOS is DEAD
-        main.log.info( "Killing any ONOS processes" )
-        killResults = main.TRUE
-        for node in main.nodes:
-            killed = main.ONOSbench.onosKill( node.ip_address )
-            killResults = killResults and killed
-
-        gitPullResult = main.TRUE
-
-        main.HA.startingMininet()
-
-        main.step( "Git checkout and pull " + gitBranch )
-        if PULLCODE:
-            main.ONOSbench.gitCheckout( gitBranch )
-            gitPullResult = main.ONOSbench.gitPull()
-            # values of 1 or 3 are good
-            utilities.assert_lesser( expect=0, actual=gitPullResult,
-                                      onpass="Git pull successful",
-                                      onfail="Git pull failed" )
-        main.ONOSbench.getVersion( report=True )
-
+            # load some variables from the params file
+            cellName = main.params[ 'ENV' ][ 'cellName' ]
+            main.apps = main.params[ 'ENV' ][ 'appString' ]
+            main.numCtrls = int( main.params[ 'num_controllers' ] )
+            stepResult = main.testSetUp.envSetup()
+        except Exception as e:
+            main.testSetUp.envSetupException( e )
+        main.testSetUp.evnSetupConclusion( stepResult )
         main.HA.generateGraph( "HAsingleInstanceRestart" )
-
-        main.CLIs = []
-        main.nodes = []
-        ipList = []
-        for i in range( 1, main.numCtrls + 1 ):
-            main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
-            main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
-            ipList.append( main.nodes[ -1 ].ip_address )
-
-        main.ONOSbench.createCellFile( main.ONOSbench.ip_address, "SingleHA",
-                                       main.Mininet1.ip_address,
-                                       cellAppString, ipList[ 0 ], main.ONOScli1.karafUser )
-        cellResult = main.ONOSbench.setCell( "SingleHA" )
-        verifyResult = main.ONOSbench.verifyCell()
-        main.step( "Creating ONOS package" )
-        packageResult = main.ONOSbench.buckBuild()
-        utilities.assert_equals( expect=main.TRUE, actual=packageResult,
-                                 onpass="ONOS package successful",
-                                 onfail="ONOS package failed" )
-
-        main.step( "Installing ONOS package" )
-        onosInstallResult = main.TRUE
-        for node in main.nodes:
-            tmpResult = main.ONOSbench.onosInstall( options="-f",
-                                                    node=node.ip_address )
-            onosInstallResult = onosInstallResult and tmpResult
-        utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
-                                 onpass="ONOS install successful",
-                                 onfail="ONOS install failed" )
-
-        main.step( "Set up ONOS secure SSH" )
-        secureSshResult = main.TRUE
-        for node in main.nodes:
-            secureSshResult = secureSshResult and main.ONOSbench.onosSecureSSH( node=node.ip_address )
-        utilities.assert_equals( expect=main.TRUE, actual=secureSshResult,
-                                 onpass="Test step PASS",
-                                 onfail="Test step FAIL" )
-
-        main.step( "Checking if ONOS is up yet" )
-        for i in range( 2 ):
-            onosIsupResult = main.TRUE
-            for node in main.nodes:
-                started = main.ONOSbench.isup( node.ip_address )
-                if not started:
-                    main.log.error( node.name + " hasn't started" )
-                onosIsupResult = onosIsupResult and started
-            if onosIsupResult == main.TRUE:
-                break
-        utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
-                                 onpass="ONOS startup successful",
-                                 onfail="ONOS startup failed" )
-
-        main.step( "Starting ONOS CLI sessions" )
-        cliResults = main.TRUE
-        threads = []
-        for i in range( main.numCtrls ):
-            t = main.Thread( target=main.CLIs[ i ].startOnosCli,
-                             name="startOnosCli-" + str( i ),
-                             args=[ main.nodes[ i ].ip_address ] )
-            threads.append( t )
-            t.start()
-
-        for t in threads:
-            t.join()
-            cliResults = cliResults and t.result
-        utilities.assert_equals( expect=main.TRUE, actual=cliResults,
-                                 onpass="ONOS cli startup successful",
-                                 onfail="ONOS cli startup failed" )
+        main.Cluster.setRunningNode( int( main.params[ 'num_controllers' ] ) )
+        ip = main.Cluster.getIps( allNode=True )
+        main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName="SingleHA", removeLog=True,
+                                  extraApply=[ main.testSetUp.createApplyCell,
+                                               main.HA.startingMininet,
+                                               main.testSetUp.createApplyCell ],
+                                  arg=[ [ main.Cluster, True, cellName, main.Mininet1, True, ip ],
+                                        None,
+                                        [ main.Cluster, True, "SingleHA", main.Mininet1,
+                                        True, main.Cluster.runningNodes[ 0 ].ipAddress ] ] )
 
         main.HA.initialSetUp()
 
@@ -248,7 +141,6 @@
         Reading state of ONOS
         """
         import json
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
 
@@ -262,45 +154,19 @@
         mastershipState = '[]'
 
         # Assert that each device has a master
-        rolesNotNull = main.TRUE
-        threads = []
-        for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
-                             name="rolesNotNull-" + str( i ),
-                             args=[] )
-            threads.append( t )
-            t.start()
-
-        for t in threads:
-            t.join()
-            rolesNotNull = rolesNotNull and t.result
-        utilities.assert_equals(
-            expect=main.TRUE,
-            actual=rolesNotNull,
-            onpass="Each device has a master",
-            onfail="Some devices don't have a master assigned" )
+        main.HA.checkRoleNotNull()
 
         main.step( "Get the Mastership of each switch" )
-        ONOS1Mastership = main.ONOScli1.roles()
-        # TODO: Make this a meaningful check
-        if "Error" in ONOS1Mastership or not ONOS1Mastership:
-            main.log.error( "Error in getting ONOS roles" )
-            main.log.warn(
-                "ONOS1 mastership response: " +
-                repr( ONOS1Mastership ) )
-            consistentMastership = main.FALSE
-        else:
-            mastershipState = ONOS1Mastership
-            consistentMastership = main.TRUE
+        main.HA.checkTheRole()
 
         main.step( "Get the intents from each controller" )
         global intentState
         intentState = []
-        ONOS1Intents = main.ONOScli1.intents( jsonFormat=True )
+        ONOSIntents = main.Cluster.runningNodes[ 0 ].CLI.intents( jsonFormat=True )
         intentCheck = main.FALSE
-        if "Error" in ONOS1Intents or not ONOS1Intents:
+        if "Error" in ONOSIntents or not ONOSIntents:
             main.log.error( "Error in getting ONOS intents" )
-            main.log.warn( "ONOS1 intents response: " + repr( ONOS1Intents ) )
+            main.log.warn( "ONOS1 intents response: " + repr( ONOSIntents ) )
         else:
             intentCheck = main.TRUE
 
@@ -308,13 +174,13 @@
         global flowState
         flowState = []
         flowCheck = main.FALSE
-        ONOS1Flows = main.ONOScli1.flows( jsonFormat=True )
-        if "Error" in ONOS1Flows or not ONOS1Flows:
+        ONOSFlows = main.Cluster.runningNodes[ 0 ].CLI.flows( jsonFormat=True )
+        if "Error" in ONOSFlows or not ONOSFlows:
             main.log.error( "Error in getting ONOS flows" )
-            main.log.warn( "ONOS1 flows repsponse: " + ONOS1Flows )
+            main.log.warn( "ONOS1 flows repsponse: " + ONOSFlows )
         else:
             # TODO: Do a better check, maybe compare flows on switches?
-            flowState = ONOS1Flows
+            flowState = ONOSFlows
             flowCheck = main.TRUE
 
         main.step( "Get the OF Table entries" )
@@ -329,20 +195,20 @@
 
         main.step( "Collecting topology information from ONOS" )
         devices = []
-        devices.append( main.ONOScli1.devices() )
+        devices.append( main.Cluster.runningNodes[ 0 ].CLI.devices() )
         hosts = []
-        hosts.append( json.loads( main.ONOScli1.hosts() ) )
+        hosts.append( json.loads( main.Cluster.runningNodes[ 0 ].CLI.hosts() ) )
         ports = []
-        ports.append( main.ONOScli1.ports() )
+        ports.append( main.Cluster.runningNodes[ 0 ].CLI.ports() )
         links = []
-        links.append( main.ONOScli1.links() )
+        links.append( main.Cluster.runningNodes[ 0 ].CLI.links() )
         clusters = []
-        clusters.append( main.ONOScli1.clusters() )
+        clusters.append( main.Cluster.runningNodes[ 0 ].CLI.clusters() )
 
         main.step( "Each host has an IP address" )
         ipResult = main.TRUE
         for controller in range( 0, len( hosts ) ):
-            controllerStr = str( main.activeNodes[ controller ] + 1 )
+            controllerStr = str( main.Cluster.active( controller ) )
             if hosts[ controller ]:
                 for host in hosts[ controller ]:
                     if not host.get( 'ipAddresses', [] ):
@@ -379,8 +245,8 @@
         mnSwitches = main.Mininet1.getSwitches()
         mnLinks = main.Mininet1.getLinks()
         mnHosts = main.Mininet1.getHosts()
-        for controller in main.activeNodes:
-            controllerStr = str( main.activeNodes[ controller ] + 1 )
+        for controller in main.Cluster.getRunningPos():
+            controllerStr = str( main.Cluster.active( controller ) )
             if devices[ controller ] and ports[ controller ] and\
                     "Error" not in devices[ controller ] and\
                     "Error" not in ports[ controller ]:
@@ -452,7 +318,6 @@
         The Failure case.
         """
         import time
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
 
@@ -468,12 +333,12 @@
                                 "sessions once onos is up."
 
         main.step( "Checking ONOS Logs for errors" )
-        for node in main.nodes:
-            main.log.debug( "Checking logs for errors on " + node.name + ":" )
-            main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
-
+        for ctrl in main.Cluster.active():
+            main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
+            main.log.warn( main.ONOSbench.checkLogs( ctrl.ip_address ) )
+        ctrl = main.Cluster.runningNodes[ 0 ]
         main.step( "Killing ONOS processes" )
-        killResult = main.ONOSbench.onosKill( main.nodes[ 0 ].ip_address )
+        killResult = main.ONOSbench.onosKill( ctrl.ipAddress )
         start = time.time()
         utilities.assert_equals( expect=main.TRUE, actual=killResult,
                                  onpass="ONOS Killed",
@@ -482,7 +347,7 @@
         main.step( "Checking if ONOS is up yet" )
         count = 0
         while count < 10:
-            onos1Isup = main.ONOSbench.isup( main.nodes[ 0 ].ip_address )
+            onos1Isup = main.ONOSbench.isup( ctrl.ipAddress )
             if onos1Isup == main.TRUE:
                 elapsed = time.time() - start
                 break
@@ -493,7 +358,7 @@
                                  onfail="ONOS failed to start" )
 
         main.step( "Starting ONOS CLI sessions" )
-        cliResults = main.ONOScli1.startOnosCli( main.nodes[ 0 ].ip_address )
+        cliResults = ctrl.CLI.startOnosCli( ctrl.ipAddress )
         utilities.assert_equals( expect=main.TRUE, actual=cliResults,
                                  onpass="ONOS cli startup successful",
                                  onfail="ONOS cli startup failed" )
@@ -506,58 +371,27 @@
             main.restartTime = -1
         time.sleep( 5 )
         # rerun on election apps
-        main.ONOScli1.electionTestRun()
+        ctrl.CLI.electionTestRun()
 
     def CASE7( self, main ):
         """
         Check state after ONOS failure
         """
         import json
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
         main.case( "Running ONOS Constant State Tests" )
 
-        main.step( "Check that each switch has a master" )
         # Assert that each device has a master
-        rolesNotNull = main.TRUE
-        threads = []
-        for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
-                             name="rolesNotNull-" + str( i ),
-                             args=[] )
-            threads.append( t )
-            t.start()
-
-        for t in threads:
-            t.join()
-            rolesNotNull = rolesNotNull and t.result
-        utilities.assert_equals(
-            expect=main.TRUE,
-            actual=rolesNotNull,
-            onpass="Each device has a master",
-            onfail="Some devices don't have a master assigned" )
+        main.HA.checkRoleNotNull()
 
         main.step( "Check if switch roles are consistent across all nodes" )
-        ONOS1Mastership = main.ONOScli1.roles()
-        # FIXME: Refactor this whole case for single instance
-        if "Error" in ONOS1Mastership or not ONOS1Mastership:
-            main.log.error( "Error in getting ONOS mastership" )
-            main.log.warn( "ONOS1 mastership response: " +
-                           repr( ONOS1Mastership ) )
-            consistentMastership = main.FALSE
-        else:
-            consistentMastership = main.TRUE
-        utilities.assert_equals(
-            expect=main.TRUE,
-            actual=consistentMastership,
-            onpass="Switch roles are consistent across all ONOS nodes",
-            onfail="ONOS nodes have different views of switch roles" )
-
+        ONOSMastership, rolesResult,consistentMastership = main.HA.checkTheRole()
+        ONOSMastership = ONOSMastership[ 0 ]
         description2 = "Compare switch roles from before failure"
         main.step( description2 )
 
-        currentJson = json.loads( ONOS1Mastership )
+        currentJson = json.loads( ONOSMastership )
         oldJson = json.loads( mastershipState )
         mastershipCheck = main.TRUE
         for i in range( 1, 29 ):
@@ -581,11 +415,11 @@
         mastershipCheck = mastershipCheck and consistentMastership
 
         main.step( "Get the intents and compare across all nodes" )
-        ONOS1Intents = main.ONOScli1.intents( jsonFormat=True )
+        ONOSIntents = main.Cluster.runningNodes[ 0 ].CLI.intents( jsonFormat=True )
         intentCheck = main.FALSE
-        if "Error" in ONOS1Intents or not ONOS1Intents:
+        if "Error" in ONOSIntents or not ONOSIntents:
             main.log.error( "Error in getting ONOS intents" )
-            main.log.warn( "ONOS1 intents response: " + repr( ONOS1Intents ) )
+            main.log.warn( "ONOS1 intents response: " + repr( ONOSIntents ) )
         else:
             intentCheck = main.TRUE
         utilities.assert_equals(
@@ -595,7 +429,7 @@
             onfail="ONOS nodes have different views of intents" )
         # Print the intent states
         intents = []
-        intents.append( ONOS1Intents )
+        intents.append( ONOSIntents )
         intentStates = []
         for node in intents:  # Iter through ONOS nodes
             nodeStates = []
@@ -678,12 +512,11 @@
         main.step( "Leadership Election is still functional" )
         # Test of LeadershipElection
 
-        leader = main.nodes[ main.activeNodes[ 0 ] ].ip_address
+        leader = main.Cluster.runningNodes[ 0 ].ipAddress
         leaderResult = main.TRUE
-        for controller in range( 1, main.numCtrls + 1 ):
+        for ctrl in main.Cluster.active():
             # loop through ONOScli handlers
-            node = getattr( main, ( 'ONOScli' + str( controller ) ) )
-            leaderN = node.electionTestLeader()
+            leaderN = ctrl.CLI.electionTestLeader()
             # verify leader is ONOS1
             # NOTE even though we restarted ONOS, it is the only one so onos 1
             # must be leader
@@ -698,7 +531,7 @@
                 leaderResult = main.FALSE
             elif leader != leaderN:
                 leaderResult = main.FALSE
-                main.log.error( "ONOS" + str( controller ) + " sees " +
+                main.log.error( ctrl.name + " sees " +
                                  str( leaderN ) +
                                  " as the leader of the election app. " +
                                  "Leader should be " + str( leader ) )
@@ -714,7 +547,6 @@
         """
         import json
         import time
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
 
@@ -726,6 +558,7 @@
         count = 0
         main.step( "Comparing ONOS topology to MN topology" )
         startTime = time.time()
+        ctrl = main.Cluster.active( 0 )
         # Give time for Gossip to work
         while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
             devicesResults = main.TRUE
@@ -735,9 +568,9 @@
             count += 1
             cliStart = time.time()
             devices = []
-            devices.append( main.ONOScli1.devices() )
+            devices.append( ctrl.CLI.devices() )
             hosts = []
-            hosts.append( json.loads( main.ONOScli1.hosts() ) )
+            hosts.append( json.loads( ctrl.CLI.hosts() ) )
             ipResult = main.TRUE
             for controller in range( 0, len( hosts ) ):
                 controllerStr = str( controller + 1 )
@@ -748,11 +581,11 @@
                             controllerStr + ": " + str( host ) )
                         ipResult = main.FALSE
             ports = []
-            ports.append( main.ONOScli1.ports() )
+            ports.append( ctrl.CLI.ports() )
             links = []
-            links.append( main.ONOScli1.links() )
+            links.append( ctrl.CLI.links() )
             clusters = []
-            clusters.append( main.ONOScli1.clusters() )
+            clusters.append( ctrl.CLI.clusters() )
 
             elapsed = time.time() - startTime
             cliTime = time.time() - cliStart
@@ -761,8 +594,8 @@
             mnSwitches = main.Mininet1.getSwitches()
             mnLinks = main.Mininet1.getLinks()
             mnHosts = main.Mininet1.getHosts()
-            for controller in range( main.numCtrls ):
-                controllerStr = str( controller + 1 )
+            for controller in main.Cluster.getRunningPos():
+                controllerStr = str( controller )
                 if devices[ controller ] and ports[ controller ] and\
                         "Error" not in devices[ controller ] and\
                         "Error" not in ports[ controller ]:
@@ -933,17 +766,17 @@
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[ main.activeNodes ],
+                                       args=[ main.Cluster.active() ],
                                        attempts=5 )
 
         utilities.assert_equals( expect=True, actual=nodeResults,
                                  onpass="Nodes check successful",
                                  onfail="Nodes check NOT successful" )
         if not nodeResults:
-            for i in main.activeNodes:
+            for ctrl in main.Cluster.active():
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
-                    main.CLIs[ i ].name,
-                    main.CLIs[ i ].sendline( "scr:list | grep -v ACTIVE" ) ) )
+                    ctrl.name,
+                    ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
 
         if not topoResult:
             main.cleanup()
diff --git a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.topo b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.topo
index 7c18a98..4bf4bd4 100644
--- a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.topo
+++ b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.topo
@@ -1,183 +1,32 @@
 <TOPOLOGY>
     <COMPONENT>
 
-        <ONOSbench>
-            <host>localhost</host>
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
             <user>sdn</user>
             <password>rocks</password>
-            <type>OnosDriver</type>
+            <type>OnosClusterDriver</type>
             <connect_order>1</connect_order>
             <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOSbench>
-
-        <ONOScli1>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>2</connect_order>
-            <COMPONENTS>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost></diff_clihost>  # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
                 <karaf_username></karaf_username>
                 <karaf_password></karaf_password>
-                <prompt></prompt>
+                <web_user></web_user>
+                <web_pass></web_pass>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home></onos_home>  # defines where onos home is
+                <nodes> 7 </nodes>  # number of nodes in the cluster
             </COMPONENTS>
-        </ONOScli1>
-
-        <ONOScli2>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>3</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli2>
-
-        <ONOScli3>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>4</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli3>
-
-
-        <ONOScli4>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>5</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli4>
-
-
-        <ONOScli5>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>6</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli5>
-
-
-        <ONOScli6>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>7</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli6>
-
-
-        <ONOScli7>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>8</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli7>
-
-        <ONOS1>
-            <host>OC1</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>9</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS1>
-
-        <ONOS2>
-            <host>OC2</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>10</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS2>
-
-        <ONOS3>
-            <host>OC3</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>11</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS3>
-
-        <ONOS4>
-            <host>OC4</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>12</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS4>
-
-        <ONOS5>
-            <host>OC5</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>13</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS5>
-
-        <ONOS6>
-            <host>OC6</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>14</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS6>
-
-        <ONOS7>
-            <host>OC7</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>15</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS7>
+        </ONOScell>
 
         <Mininet1>
             <host>OCN</host>
             <user>sdn</user>
             <password>rocks</password>
             <type>MininetCliDriver</type>
-            <connect_order>16</connect_order>
+            <connect_order>2</connect_order>
             <COMPONENTS>
                 #Specify the Option for mininet
                 <arg1> --custom ~/mininet/custom/obelisk.py </arg1>
@@ -194,7 +43,7 @@
             <user>sdn</user>
             <password>rocks</password>
             <type>RemoteMininetDriver</type>
-            <connect_order>17</connect_order>
+            <connect_order>3</connect_order>
             <COMPONENTS>
                 <prompt></prompt>
             </COMPONENTS>
diff --git a/TestON/tests/HA/HAstopNodes/HAstopNodes.py b/TestON/tests/HA/HAstopNodes/HAstopNodes.py
index c17e11d..8e6bd6f 100644
--- a/TestON/tests/HA/HAstopNodes/HAstopNodes.py
+++ b/TestON/tests/HA/HAstopNodes/HAstopNodes.py
@@ -80,20 +80,19 @@
             main.exit()
         main.testSetUp.envSetupDescription()
         try:
-            from dependencies.Cluster import Cluster
             from tests.HA.dependencies.HA import HA
             main.HA = HA()
-            main.Cluster = Cluster( main.ONOScell.nodes )
             cellName = main.params[ 'ENV' ][ 'cellName' ]
             main.apps = main.params[ 'ENV' ][ 'appString' ]
-            stepResult = main.testSetUp.envSetup( main.Cluster, hasNode=True )
+            stepResult = main.testSetUp.envSetup()
         except Exception as e:
             main.testSetUp.envSetupException( e )
         main.testSetUp.evnSetupConclusion( stepResult )
         main.HA.generateGraph( "HAstopNodes" )
 
         main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
-                                  extraApply=main.HA.customizeOnosGenPartitions,
+                                  extraApply=[ main.HA.startingMininet,
+                                               main.HA.customizeOnosGenPartitions ],
                                   extraClean=main.HA.cleanUpGenPartition )
 
         main.HA.initialSetUp()
@@ -132,7 +131,6 @@
         """
         The Failure case.
         """
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
         main.case( "Stop minority of ONOS nodes" )
@@ -142,11 +140,11 @@
             main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
             main.log.warn( ctrl.checkLogs( ctrl.ipAddress ) )
 
-        n = len( main.Cluster.controllers )  # Number of nodes
+        n = len( main.Cluster.runningNodes )  # Number of nodes
         p = ( ( n + 1 ) / 2 ) + 1  # Number of partitions
-        main.kill = [ main.Cluster.controllers[ 0 ] ]  # ONOS node to kill, listed by index in main.nodes
+        main.kill = [ main.Cluster.runningNodes[ 0 ] ]  # ONOS node to kill, listed by index in main.nodes
         if n > 3:
-            main.kill.append( main.Cluster.controllers[ p - 1 ] )
+            main.kill.append( main.Cluster.runningNodes[ p - 1 ] )
             # NOTE: This only works for cluster sizes of 3,5, or 7.
 
         main.step( "Stopping nodes: " + str( main.kill ) )
@@ -194,7 +192,7 @@
         except AttributeError:
             main.kill = []
 
-        main.HA.checkStateAfterONOS( main, afterWhich=0 )
+        main.HA.checkStateAfterEvent( main, afterWhich=0 )
         main.step( "Leadership Election is still functional" )
         # Test of LeadershipElection
         leaderList = []
diff --git a/TestON/tests/HA/HAstopNodes/HAstopNodes.topo b/TestON/tests/HA/HAstopNodes/HAstopNodes.topo
index f3b9278..4bf4bd4 100644
--- a/TestON/tests/HA/HAstopNodes/HAstopNodes.topo
+++ b/TestON/tests/HA/HAstopNodes/HAstopNodes.topo
@@ -1,17 +1,6 @@
 <TOPOLOGY>
     <COMPONENT>
 
-        <ONOSbench>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>1</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOSbench>
-
         <ONOScell>
             <host>localhost</host>  # ONOS "bench" machine
             <user>sdn</user>
@@ -20,6 +9,7 @@
             <connect_order>1</connect_order>
             <COMPONENTS>
                 <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost></diff_clihost>  # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
                 <karaf_username></karaf_username>
                 <karaf_password></karaf_password>
                 <web_user></web_user>
@@ -36,7 +26,7 @@
             <user>sdn</user>
             <password>rocks</password>
             <type>MininetCliDriver</type>
-            <connect_order>16</connect_order>
+            <connect_order>2</connect_order>
             <COMPONENTS>
                 #Specify the Option for mininet
                 <arg1> --custom ~/mininet/custom/obelisk.py </arg1>
@@ -53,7 +43,7 @@
             <user>sdn</user>
             <password>rocks</password>
             <type>RemoteMininetDriver</type>
-            <connect_order>17</connect_order>
+            <connect_order>3</connect_order>
             <COMPONENTS>
                 <prompt></prompt>
             </COMPONENTS>
diff --git a/TestON/tests/HA/HAswapNodes/HAswapNodes.py b/TestON/tests/HA/HAswapNodes/HAswapNodes.py
index c1e3006..923f67c 100644
--- a/TestON/tests/HA/HAswapNodes/HAswapNodes.py
+++ b/TestON/tests/HA/HAswapNodes/HAswapNodes.py
@@ -90,21 +90,18 @@
             # load some variables from the params file
             cellName = main.params[ 'ENV' ][ 'cellName' ]
             main.apps = main.params[ 'ENV' ][ 'appString' ]
-            main.numCtrls = int( main.params[ 'num_controllers' ] )
-            if main.ONOSbench.maxNodes and\
-                        main.ONOSbench.maxNodes < main.numCtrls:
-                main.numCtrls = int( main.ONOSbench.maxNodes )
-            main.maxNodes = main.numCtrls
-            stepResult = main.testSetUp.envSetup( hasNode=True )
+            stepResult = main.testSetUp.envSetup()
         except Exception as e:
             main.testSetUp.envSetupException( e )
         main.testSetUp.evnSetupConclusion( stepResult )
         main.HA.generateGraph( "HAswapNodes" )
 
 
-        main.testSetUp.ONOSSetUp( main.Mininet1, cellName=cellName, removeLog=True,
-                                 extraApply=main.HA.customizeOnosService,
-                                 arg=main.HA.swapNodeMetadata,
+        main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
+                                 extraApply=[ main.HA.setServerForCluster,
+                                              main.HA.swapNodeMetadata,
+                                              main.HA.startingMininet,
+                                              main.HA.copyingBackupConfig ],
                                  extraClean=main.HA.cleanUpOnosService,
                                  installMax=True )
         main.HA.initialSetUp()
@@ -145,11 +142,8 @@
         """
         import time
         import re
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
-        assert main.CLIs, "main.CLIs not defined"
-        assert main.nodes, "main.nodes not defined"
         try:
             main.HAlabels
         except ( NameError, AttributeError ):
@@ -164,14 +158,15 @@
         main.case( "Swap some of the ONOS nodes" )
 
         main.step( "Checking ONOS Logs for errors" )
-        for i in main.activeNodes:
-            node = main.nodes[ i ]
-            main.log.debug( "Checking logs for errors on " + node.name + ":" )
-            main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
+        for ctrl in main.Cluster.active():
+            main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
+            main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
 
+        activeNodes = main.Cluster.getRunningPos()
+        # Todo : this could be wrong. need to double check.
         main.step( "Generate new metadata file" )
-        old = [ main.activeNodes[ 1 ], main.activeNodes[ -2 ] ]
-        new = range( main.ONOSbench.maxNodes )[ -2: ]
+        old = [ activeNodes[ 1 ], activeNodes[ -2 ] ]
+        new = range( main.Cluster.maxCtrls )[ -2: ]
         assert len( old ) == len( new ), "Length of nodes to swap don't match"
         handle = main.ONOSbench.handle
         for x, y in zip( old, new ):
@@ -181,75 +176,45 @@
             handle.expect( "\$" )  # From the prompt
             ret += handle.before
             main.log.debug( ret )
-            main.activeNodes.remove( x )
-            main.activeNodes.append( y )
+            activeNodes.remove( x )
+            activeNodes.append( y )
 
-        genResult = main.Server.generateFile( main.numCtrls )
+        genResult = main.Server.generateFile( main.Cluster.numCtrls )
         utilities.assert_equals( expect=main.TRUE, actual=genResult,
                                  onpass="New cluster metadata file generated",
                                  onfail="Failled to generate new metadata file" )
         time.sleep( 5 )  # Give time for nodes to read new file
-
+        main.Cluster.resetActive()
+        # Note : done up to this point.
         main.step( "Start new nodes" )  # OR stop old nodes?
         started = main.TRUE
         for i in new:
-            started = main.ONOSbench.onosStart( main.nodes[ i ].ip_address ) and main.TRUE
+            started = main.ONOSbench.onosStart( main.Cluster.controllers[ i ].ipAddress ) and main.TRUE
         utilities.assert_equals( expect=main.TRUE, actual=started,
                                  onpass="ONOS started",
                                  onfail="ONOS start NOT successful" )
 
-        main.step( "Checking if ONOS is up yet" )
-        for i in range( 2 ):
-            onosIsupResult = main.TRUE
-            for i in main.activeNodes:
-                node = main.nodes[ i ]
-                main.ONOSbench.onosSecureSSH( node=node.ip_address )
-                started = main.ONOSbench.isup( node.ip_address )
-                if not started:
-                    main.log.error( node.name + " didn't start!" )
-                onosIsupResult = onosIsupResult and started
-            if onosIsupResult == main.TRUE:
-                break
-        utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
-                                 onpass="ONOS started",
-                                 onfail="ONOS start NOT successful" )
+        main.Cluster.setRunningNode( activeNodes )
 
-        main.step( "Starting ONOS CLI sessions" )
-        cliResults = main.TRUE
-        threads = []
-        for i in main.activeNodes:
-            t = main.Thread( target=main.CLIs[ i ].startOnosCli,
-                             name="startOnosCli-" + str( i ),
-                             args=[ main.nodes[ i ].ip_address ] )
-            threads.append( t )
-            t.start()
+        main.testSetUp.setupSsh( main.Cluster )
+        main.testSetUp.checkOnosService( main.Cluster )
 
-        for t in threads:
-            t.join()
-            cliResults = cliResults and t.result
-        utilities.assert_equals( expect=main.TRUE, actual=cliResults,
-                                 onpass="ONOS cli started",
-                                 onfail="ONOS clis did not start" )
+        main.testSetUp.startOnosClis( main.Cluster )
 
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[ main.activeNodes ],
+                                       args=[ main.Cluster.active() ],
                                        attempts=5 )
         utilities.assert_equals( expect=True, actual=nodeResults,
                                  onpass="Nodes check successful",
                                  onfail="Nodes check NOT successful" )
 
-        for i in range( 10 ):
-            ready = True
-            for i in main.activeNodes:
-                cli = main.CLIs[ i ]
-                output = cli.summary()
-                if not output:
-                    ready = False
-            if ready:
-                break
-            time.sleep( 30 )
+        ready =  utilities.retry( main.Cluster.command,
+                                  False,
+                                  kwargs={ "function":"summary", "contentCheck":True },
+                                  sleep=30,
+                                  attempts=10 )
         utilities.assert_equals( expect=True, actual=ready,
                                  onpass="ONOS summary command succeded",
                                  onfail="ONOS summary command failed" )
@@ -259,22 +224,19 @@
 
         # Rerun for election on new nodes
         runResults = main.TRUE
-        for i in main.activeNodes:
-            cli = main.CLIs[ i ]
-            run = cli.electionTestRun()
+        for ctrl in main.Cluster.active():
+            run = ctrl.CLI.electionTestRun()
             if run != main.TRUE:
-                main.log.error( "Error running for election on " + cli.name )
+                main.log.error( "Error running for election on " + ctrl.name )
             runResults = runResults and run
         utilities.assert_equals( expect=main.TRUE, actual=runResults,
                                  onpass="Reran for election",
                                  onfail="Failed to rerun for election" )
 
-        for node in main.activeNodes:
-            main.log.warn( "\n****************** {} **************".format( main.nodes[ node ].ip_address ) )
-            main.log.debug( main.CLIs[ node ].nodes( jsonFormat=False ) )
-            main.log.debug( main.CLIs[ node ].leaders( jsonFormat=False ) )
-            main.log.debug( main.CLIs[ node ].partitions( jsonFormat=False ) )
-            main.log.debug( main.CLIs[ node ].apps( jsonFormat=False ) )
+        main.HA.commonChecks()
+
+        """
+        # Note: Do we really want this? It will revert the changes we have made from this test case.
 
         main.step( "Reapplying cell variable to environment" )
         cellName = main.params[ 'ENV' ][ 'cellName' ]
@@ -282,22 +244,23 @@
         utilities.assert_equals( expect=main.TRUE, actual=cellResult,
                                  onpass="Set cell successfull",
                                  onfail="Failled to set cell" )
+        """
+
 
     def CASE7( self, main ):
         """
         Check state after ONOS scaling
         """
 
-        main.HA.checkStateAfterONOS( main, afterWhich=1 )
+        main.HA.checkStateAfterEvent( main, afterWhich=1 )
 
         main.step( "Leadership Election is still functional" )
         # Test of LeadershipElection
         leaderList = []
         leaderResult = main.TRUE
 
-        for i in main.activeNodes:
-            cli = main.CLIs[ i ]
-            leaderN = cli.electionTestLeader()
+        for ctrl in main.Cluster.active():
+            leaderN = ctrl.CLI.electionTestLeader()
             leaderList.append( leaderN )
             if leaderN == main.FALSE:
                 # error in response
@@ -306,7 +269,7 @@
                                  " error logs" )
                 leaderResult = main.FALSE
             elif leaderN is None:
-                main.log.error( cli.name +
+                main.log.error( ctrl.name +
                                  " shows no leader for the election-app." )
                 leaderResult = main.FALSE
         if len( set( leaderList ) ) != 1:
diff --git a/TestON/tests/HA/HAswapNodes/HAswapNodes.topo b/TestON/tests/HA/HAswapNodes/HAswapNodes.topo
index 7c18a98..4bf4bd4 100644
--- a/TestON/tests/HA/HAswapNodes/HAswapNodes.topo
+++ b/TestON/tests/HA/HAswapNodes/HAswapNodes.topo
@@ -1,183 +1,32 @@
 <TOPOLOGY>
     <COMPONENT>
 
-        <ONOSbench>
-            <host>localhost</host>
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
             <user>sdn</user>
             <password>rocks</password>
-            <type>OnosDriver</type>
+            <type>OnosClusterDriver</type>
             <connect_order>1</connect_order>
             <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOSbench>
-
-        <ONOScli1>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>2</connect_order>
-            <COMPONENTS>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost></diff_clihost>  # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
                 <karaf_username></karaf_username>
                 <karaf_password></karaf_password>
-                <prompt></prompt>
+                <web_user></web_user>
+                <web_pass></web_pass>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home></onos_home>  # defines where onos home is
+                <nodes> 7 </nodes>  # number of nodes in the cluster
             </COMPONENTS>
-        </ONOScli1>
-
-        <ONOScli2>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>3</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli2>
-
-        <ONOScli3>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>4</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli3>
-
-
-        <ONOScli4>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>5</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli4>
-
-
-        <ONOScli5>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>6</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli5>
-
-
-        <ONOScli6>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>7</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli6>
-
-
-        <ONOScli7>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>8</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli7>
-
-        <ONOS1>
-            <host>OC1</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>9</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS1>
-
-        <ONOS2>
-            <host>OC2</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>10</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS2>
-
-        <ONOS3>
-            <host>OC3</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>11</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS3>
-
-        <ONOS4>
-            <host>OC4</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>12</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS4>
-
-        <ONOS5>
-            <host>OC5</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>13</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS5>
-
-        <ONOS6>
-            <host>OC6</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>14</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS6>
-
-        <ONOS7>
-            <host>OC7</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>15</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS7>
+        </ONOScell>
 
         <Mininet1>
             <host>OCN</host>
             <user>sdn</user>
             <password>rocks</password>
             <type>MininetCliDriver</type>
-            <connect_order>16</connect_order>
+            <connect_order>2</connect_order>
             <COMPONENTS>
                 #Specify the Option for mininet
                 <arg1> --custom ~/mininet/custom/obelisk.py </arg1>
@@ -194,7 +43,7 @@
             <user>sdn</user>
             <password>rocks</password>
             <type>RemoteMininetDriver</type>
-            <connect_order>17</connect_order>
+            <connect_order>3</connect_order>
             <COMPONENTS>
                 <prompt></prompt>
             </COMPONENTS>
diff --git a/TestON/tests/HA/dependencies/HA.py b/TestON/tests/HA/dependencies/HA.py
index d7b64de..11bc039 100644
--- a/TestON/tests/HA/dependencies/HA.py
+++ b/TestON/tests/HA/dependencies/HA.py
@@ -29,7 +29,6 @@
         self.default = ''
 
     def customizeOnosGenPartitions( self ):
-        self.startingMininet()
         # copy gen-partions file to ONOS
         # NOTE: this assumes TestON and ONOS are on the same machine
         srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
@@ -73,6 +72,7 @@
 
     def scalingMetadata( self ):
         import re
+        main.step( "Generate initial metadata file" )
         main.scaling = main.params[ 'scaling' ].split( "," )
         main.log.debug( main.scaling )
         scale = main.scaling.pop( 0 )
@@ -82,23 +82,24 @@
         else:
             equal = False
         main.log.debug( equal )
-        main.numCtrls = int( re.search( "\d+", scale ).group( 0 ) )
-        genResult = main.Server.generateFile( main.numCtrls, equal=equal )
+        main.Cluster.setRunningNode( int( re.search( "\d+", scale ).group( 0 ) ) )
+        genResult = main.Server.generateFile( main.Cluster.numCtrls, equal=equal )
         utilities.assert_equals( expect=main.TRUE, actual=genResult,
                                  onpass="New cluster metadata file generated",
                                  onfail="Failled to generate new metadata file" )
 
     def swapNodeMetadata( self ):
-        if main.numCtrls >= 5:
-            main.numCtrls -= 2
+        main.step( "Generate initial metadata file" )
+        if main.Cluster.numCtrls >= 5:
+            main.Cluster.setRunningNode( main.Cluster.numCtrls - 2 )
         else:
             main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
-        genResult = main.Server.generateFile( main.numCtrls )
+        genResult = main.Server.generateFile( main.Cluster.numCtrls )
         utilities.assert_equals( expect=main.TRUE, actual=genResult,
                                  onpass="New cluster metadata file generated",
                                  onfail="Failled to generate new metadata file" )
 
-    def customizeOnosService( self, metadataMethod ):
+    def setServerForCluster( self ):
         import os
         main.step( "Setup server for cluster metadata file" )
         main.serverPort = main.params[ 'server' ][ 'port' ]
@@ -112,11 +113,7 @@
                                  onpass="Server started",
                                  onfail="Failled to start SimpleHTTPServer" )
 
-        main.step( "Generate initial metadata file" )
-        metadataMethod()
-
-        self.startingMininet()
-
+    def copyingBackupConfig( self ):
         main.step( "Copying backup config files" )
         main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
         cp = main.ONOSbench.scp( main.ONOSbench,
@@ -185,7 +182,7 @@
                     onosCounters.append( json.loads( onosCountersRaw[ i ] ) )
                 except ( ValueError, TypeError ):
                     main.log.error( "Could not parse counters response from " +
-                                    str( main.Cluster.active()[ i ] ) )
+                                    str( main.Cluster.active( i ) ) )
                     main.log.warn( repr( onosCountersRaw[ i ] ) )
                     onosCounters.append( [] )
 
@@ -200,7 +197,7 @@
             for controller in enumerate( onosCounters ):
                 for key, value in controller[ 1 ].iteritems():
                     if 'TestON' in key:
-                        node = str( main.Cluster.active()[ controller[ 0 ] ] )
+                        node = str( main.Cluster.active( controller[ 0 ] ) )
                         try:
                             testCounters[ node ].append( { key: value } )
                         except KeyError:
@@ -236,7 +233,7 @@
                 try:
                     onosValue = current.get( counterName )
                 except AttributeError:
-                    node = str( main.Cluster.active()[ i ] )
+                    node = str( main.Cluster.active( i ) )
                     main.log.exception( node + " counters result " +
                                         "is not as expected" )
                     correctResults = main.FALSE
@@ -341,11 +338,10 @@
 
         if serviceClean:
             main.step( "Clean up ONOS service changes" )
-            ONOSbench = main.Cluster.contollers[0].Bench
-            ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
-            ONOSbench.handle.expect( "\$" )
-            ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
-            ONOSbench.handle.expect( "\$" )
+            main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
+            main.ONOSbench.handle.expect( "\$" )
+            main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
+            main.ONOSbench.handle.expect( "\$" )
 
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( self.nodesCheck,
@@ -374,11 +370,11 @@
             main.log.debug( "Apps: " + str( apps ) )
             activateResult = True
             for app in apps:
-                main.Cluster.active()[0].app( app, "Activate" )
+                main.Cluster.active( 0 ).app( app, "Activate" )
             # TODO: check this worked
             time.sleep( 10 )  # wait for apps to activate
             for app in apps:
-                state = main.Cluster.active()[0].appStatus( app )
+                state = main.Cluster.active( 0 ).appStatus( app )
                 if state == "ACTIVE":
                     activateResult = activateResult and True
                 else:
@@ -506,7 +502,7 @@
         appResults = main.Cluster.command( "appToIDCheck" )
         appCheck = all( i == main.TRUE for i in appResults )
         if not appCheck:
-            ctrl = main.Cluster.active()[0]
+            ctrl = main.Cluster.active( 0 )
             main.log.debug( "%s apps: %s" % ( ctrl.name, ctrl.apps() ) )
             main.log.debug( "%s appIDs: %s" % ( ctrl.name, ctrl.appIDs() ) )
         return appCheck
@@ -548,7 +544,6 @@
         Assign devices to controllers
         """
         import re
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
 
@@ -571,7 +566,7 @@
                 main.log.info( str( response ) )
             except Exception:
                 main.log.info( repr( response ) )
-            for ctrl in main.Cluster.controllers:
+            for ctrl in main.Cluster.runningNodes:
                 if re.search( "tcp:" + ctrl.ipAddress, response ):
                     mastershipCheck = mastershipCheck and main.TRUE
                 else:
@@ -591,7 +586,6 @@
         """
         import time
         import json
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
         try:
@@ -686,7 +680,7 @@
                 host2Id = host2Dict.get( 'id', None )
             if host1Id and host2Id:
                 nodeNum = len( main.Cluster.active() )
-                ctrl = main.Cluster.active()[ i % nodeNum ]
+                ctrl = main.Cluster.active( i % nodeNum )
                 tmpId = ctrl.CLI.addHostIntent( host1Id, host2Id )
                 if tmpId:
                     main.log.info( "Added intent with id: " + tmpId )
@@ -802,7 +796,7 @@
             else:
                 count += 1
         gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
-        maxGossipTime = gossipPeriod * len( main.Cluster.controllers )
+        maxGossipTime = gossipPeriod * len( main.Cluster.runningNodes )
         utilities.assert_greater_equals(
                 expect=maxGossipTime, actual=gossipTime,
                 onpass="ECM anti-entropy for intents worked within " +
@@ -857,7 +851,6 @@
         """
         import json
         import time
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
         main.case( "Verify connectivity by sending traffic across Intents" )
@@ -999,48 +992,23 @@
             onpass="Intents have been installed correctly and pings work",
             onfail="Intents have not been installed correctly, pings failed." )
 
-    def readingState( self, main ):
-        """
-        Reading state of ONOS
-        """
-        import json
-        import time
-        assert main.numCtrls, "main.numCtrls not defined"
-        assert main, "main not defined"
-        assert utilities.assert_equals, "utilities.assert_equals not defined"
-        try:
-            from tests.dependencies.topology import Topology
-        except ImportError:
-            main.log.error( "Topology not found exiting the test" )
-            main.exit()
-        try:
-            main.topoRelated
-        except ( NameError, AttributeError ):
-            main.topoRelated = Topology()
-        main.case( "Setting up and gathering data for current state" )
-        # The general idea for this test case is to pull the state of
-        # ( intents,flows, topology,... ) from each ONOS node
-        # We can then compare them with each other and also with past states
-
+    def checkRoleNotNull( self ):
         main.step( "Check that each switch has a master" )
-        global mastershipState
-        mastershipState = '[]'
-
         # Assert that each device has a master
-        rolesNotNull = all( [ i == main.TRUE for i in main.Cluster.command( "rolesNotNull" ) ] )
+        rolesNotNull = main.Cluster.command( "rolesNotNull", returnBool=True )
         utilities.assert_equals(
             expect=True,
             actual=rolesNotNull,
             onpass="Each device has a master",
             onfail="Some devices don't have a master assigned" )
 
-        main.step( "Get the Mastership of each switch from each controller" )
+    def checkTheRole( self ):
+        main.step( "Read device roles from ONOS" )
         ONOSMastership = main.Cluster.command( "roles" )
-        mastershipCheck = main.FALSE
         consistentMastership = True
         rolesResults = True
         for i in range( len( ONOSMastership ) ):
-            node = str( main.Cluster.active()[ i ] )
+            node = str( main.Cluster.active( i ) )
             if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
                 main.log.error( "Error in getting " + node + " roles" )
                 main.log.warn( node + " mastership response: " +
@@ -1063,10 +1031,62 @@
             actual=consistentMastership,
             onpass="Switch roles are consistent across all ONOS nodes",
             onfail="ONOS nodes have different views of switch roles" )
+        return ONOSMastership, rolesResults, consistentMastership
+
+    def checkingIntents( self ):
+        main.step( "Get the intents from each controller" )
+        ONOSIntents = main.Cluster.command( "intents", specificDriver=2 )
+        intentsResults = True
+        for i in range( len( ONOSIntents ) ):
+            node = str( main.Cluster.active( i ) )
+            if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
+                main.log.error( "Error in getting " + node + " intents" )
+                main.log.warn( node + " intents response: " +
+                               repr( ONOSIntents[ i ] ) )
+                intentsResults = False
+        utilities.assert_equals(
+            expect=True,
+            actual=intentsResults,
+            onpass="No error in reading intents output",
+            onfail="Error in reading intents from ONOS" )
+        return ONOSIntents, intentsResults
+
+    def readingState( self, main ):
+        """
+        Reading state of ONOS
+        """
+        import json
+        import time
+        assert main, "main not defined"
+        assert utilities.assert_equals, "utilities.assert_equals not defined"
+        try:
+            from tests.dependencies.topology import Topology
+        except ImportError:
+            main.log.error( "Topology not found exiting the test" )
+            main.cleanup()
+            main.exit()
+        try:
+            main.topoRelated
+        except ( NameError, AttributeError ):
+            main.topoRelated = Topology()
+        main.case( "Setting up and gathering data for current state" )
+        # The general idea for this test case is to pull the state of
+        # ( intents,flows, topology,... ) from each ONOS node
+        # We can then compare them with each other and also with past states
+
+        global mastershipState
+        mastershipState = '[]'
+
+        self.checkRoleNotNull()
+
+        main.step( "Get the Mastership of each switch from each controller" )
+        mastershipCheck = main.FALSE
+
+        ONOSMastership, consistentMastership, rolesResults = self.checkTheRole()
 
         if rolesResults and not consistentMastership:
             for i in range( len( main.Cluster.active() ) ):
-                node = str( main.Cluster.active()[ i ] )
+                node = str( main.Cluster.active( i ) )
                 try:
                     main.log.warn(
                         node + " roles: ",
@@ -1081,25 +1101,13 @@
             mastershipCheck = main.TRUE
             mastershipState = ONOSMastership[ 0 ]
 
-        main.step( "Get the intents from each controller" )
+
         global intentState
         intentState = []
-        ONOSIntents = main.Cluster.command( "intents" )
+        ONOSIntents, intentsResults = self.checkingIntents()
         intentCheck = main.FALSE
         consistentIntents = True
-        intentsResults = True
-        for i in range( len( ONOSIntents ) ):
-            node = str( main.Cluster.active()[ i ] )
-            if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
-                main.log.error( "Error in getting " + node + " intents" )
-                main.log.warn( node + " intents response: " +
-                               repr( ONOSIntents[ i ] ) )
-                intentsResults = False
-        utilities.assert_equals(
-            expect=True,
-            actual=intentsResults,
-            onpass="No error in reading intents output",
-            onfail="Error in reading intents from ONOS" )
+
 
         main.step( "Check for consistency in Intents from each controller" )
         if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
@@ -1156,7 +1164,7 @@
                                         indent=4,
                                         separators=( ',', ': ' ) ) )
             for i in range( len( ONOSIntents ) ):
-                node = str( main.Cluster.active()[ i ] )
+                node = str( main.Cluster.active( i ) )
                 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
                     main.log.debug( node + " intents: " )
                     main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
@@ -1172,13 +1180,13 @@
         main.step( "Get the flows from each controller" )
         global flowState
         flowState = []
-        ONOSFlows = main.Cluster.command( "flows" ) # TODO: Possible arg: sleep = 30
+        ONOSFlows = main.Cluster.command( "flows", specificDriver=2 ) # TODO: Possible arg: sleep = 30
         ONOSFlowsJson = []
         flowCheck = main.FALSE
         consistentFlows = True
         flowsResults = True
         for i in range( len( ONOSFlows ) ):
-            node = str( main.Cluster.active()[ i ] )
+            node = str( main.Cluster.active( i ) )
             if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
                 main.log.error( "Error in getting " + node + " flows" )
                 main.log.warn( node + " flows response: " +
@@ -1215,7 +1223,7 @@
 
         if flowsResults and not consistentFlows:
             for i in range( len( ONOSFlows ) ):
-                node = str( main.Cluster.active()[ i ] )
+                node = str( main.Cluster.active( i ) )
                 try:
                     main.log.warn(
                         node + " flows: " +
@@ -1281,18 +1289,18 @@
             pingTime=500 )
 
         main.step( "Collecting topology information from ONOS" )
-        devices = main.topoRelated.getAllDevices( main.Cluster.active(), False )
-        hosts = main.topoRelated.getAllHosts( main.Cluster.active(), False, inJson=True )
-        ports = main.topoRelated.getAllPorts( main.Cluster.active(), False )
-        links = main.topoRelated.getAllLinks( main.Cluster.active(), False )
-        clusters = main.topoRelated.getAllClusters( main.Cluster.active(), False )
+        devices = main.topoRelated.getAll( "devices" )
+        hosts = main.topoRelated.getAll( "hosts", inJson=True )
+        ports = main.topoRelated.getAll( "ports" )
+        links = main.topoRelated.getAll( "links" )
+        clusters = main.topoRelated.getAll( "clusters" )
         # Compare json objects for hosts and dataplane clusters
 
         # hosts
         main.step( "Host view is consistent across ONOS nodes" )
         consistentHostsResult = main.TRUE
         for controller in range( len( hosts ) ):
-            controllerStr = str( main.Cluster.active()[ controller ] )
+            controllerStr = str( main.Cluster.active( controller ) )
             if hosts[ controller ] and "Error" not in hosts[ controller ]:
                 if hosts[ controller ] == hosts[ 0 ]:
                     continue
@@ -1319,7 +1327,7 @@
         main.step( "Each host has an IP address" )
         ipResult = main.TRUE
         for controller in range( 0, len( hosts ) ):
-            controllerStr = str( main.Cluster.active()[ controller ] )
+            controllerStr = str( main.Cluster.active( controller ) )
             if hosts[ controller ]:
                 for host in hosts[ controller ]:
                     if not host.get( 'ipAddresses', [] ):
@@ -1336,7 +1344,7 @@
         main.step( "Cluster view is consistent across ONOS nodes" )
         consistentClustersResult = main.TRUE
         for controller in range( len( clusters ) ):
-            controllerStr = str( main.Cluster.active()[ controller ] )
+            controllerStr = str( main.Cluster.active( controller ) )
             if "Error" not in clusters[ controller ]:
                 if clusters[ controller ] == clusters[ 0 ]:
                     continue
@@ -1382,7 +1390,7 @@
         mnLinks = main.Mininet1.getLinks()
         mnHosts = main.Mininet1.getHosts()
         for controller in range( len( main.Cluster.active() ) ):
-            controllerStr = str( main.Cluster.active()[ controller ] )
+            controllerStr = str( main.Cluster.active( controller ) )
             currentDevicesResult = main.topoRelated.compareDevicePort(
                                                 main.Mininet1, controller,
                                                 mnSwitches, devices, ports )
@@ -1448,7 +1456,6 @@
         # TODO: Clean this up so it's not just a cut/paste from the test
         try:
             # Make sure variables are defined/set
-            assert main.numCtrls, "main.numCtrls not defined"
             assert utilities.assert_equals, "utilities.assert_equals not defined"
             assert main.pCounterName, "main.pCounterName not defined"
             assert main.onosSetName, "main.onosSetName not defined"
@@ -1611,7 +1618,7 @@
                                                  args=[ main.onosSetName ] )
             getResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active()[ i ]
+                node = main.Cluster.active( i )
                 if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
@@ -1643,7 +1650,7 @@
                                                   args=[ main.onosSetName ] )
             sizeResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active()[ i ]
+                node = main.Cluster.active( i )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( node +
@@ -1685,7 +1692,7 @@
                                                  args=[ main.onosSetName ] )
             getResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active()[ i ]
+                node = main.Cluster.active( i )
                 if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
@@ -1709,7 +1716,7 @@
                                                   args=[ main.onosSetName ] )
             sizeResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active()[ i ]
+                node = main.Cluster.active( i )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( node + " expected a size of " +
@@ -1751,7 +1758,7 @@
                                                  args=[ main.onosSetName ] )
             getResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active()[ i ]
+                node = main.Cluster.active( i )
                 if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
@@ -1775,7 +1782,7 @@
                                                   args=[ main.onosSetName ] )
             sizeResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active()[ i ]
+                node = main.Cluster.active( i )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( node + " expected a size of " + str( size ) +
@@ -1849,7 +1856,7 @@
                                                  args=[ main.onosSetName ] )
             getResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active()[ i ]
+                node = main.Cluster.active( i )
                 if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
@@ -1873,7 +1880,7 @@
                                                   args=[ main.onosSetName ] )
             sizeResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active()[ i ]
+                node = main.Cluster.active( i )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( node + " expected a size of " + str( size ) +
@@ -1915,7 +1922,7 @@
                                                  args=[ main.onosSetName ] )
             getResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active()[ i ]
+                node = main.Cluster.active( i )
                 if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
@@ -1939,7 +1946,7 @@
                                                   args=[ main.onosSetName ] )
             sizeResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active()[ i ]
+                node = main.Cluster.active( i )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( node + " expected a size of " + str( size ) +
@@ -1981,7 +1988,7 @@
                                                  args=[ main.onosSetName ] )
             getResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active()[ i ]
+                node = main.Cluster.active( i )
                 if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
@@ -2005,7 +2012,7 @@
                                                   args=[ main.onosSetName ] )
             sizeResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active()[ i ]
+                node = main.Cluster.active( i )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( node + " expected a size of " + str( size ) +
@@ -2048,7 +2055,7 @@
                                                  args=[ main.onosSetName ] )
             getResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active()[ i ]
+                node = main.Cluster.active( i )
                 if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
@@ -2072,7 +2079,7 @@
                                                   args=[ main.onosSetName ] )
             sizeResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active()[ i ]
+                node = main.Cluster.active( i )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( node + " expected a size of " + str( size ) +
@@ -2114,7 +2121,7 @@
                                                  args=[ main.onosSetName ] )
             getResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active()[ i ]
+                node = main.Cluster.active( i )
                 if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
@@ -2138,7 +2145,7 @@
                                                   args=[ main.onosSetName ] )
             sizeResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active()[ i ]
+                node = main.Cluster.active( i )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( node + " expected a size of " + str( size ) +
@@ -2181,7 +2188,7 @@
                                                  args=[ main.onosSetName ] )
             getResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active()[ i ]
+                node = main.Cluster.active( i )
                 if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
@@ -2205,7 +2212,7 @@
                                                   args=[ main.onosSetName ] )
             sizeResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active()[ i ]
+                node = main.Cluster.active( i )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( node + " expected a size of " +
@@ -2524,7 +2531,6 @@
         """
         import os
         import time
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
 
@@ -2551,7 +2557,7 @@
             logFiles = [ "karaf.log", "karaf.log.1" ]
             # NOTE: must end in /
             for f in logFiles:
-                for ctrl in main.Cluster.controllers:
+                for ctrl in main.Cluster.runningNodes:
                     dstName = main.logdir + "/" + ctrl.name + "-" + f
                     main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
                                                logFolder + f, dstName )
@@ -2561,7 +2567,7 @@
             logFiles = [ "stderr.log", "stdout.log" ]
             # NOTE: must end in /
             for f in logFiles:
-                for ctrl in main.Cluster.controllers:
+                for ctrl in main.Cluster.runningNodes:
                     dstName = main.logdir + "/" + ctrl.name + "-" + f
                     main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
                                                logFolder + f, dstName )
@@ -2575,7 +2581,7 @@
                                  onfail="MN cleanup NOT successful" )
 
         main.step( "Checking ONOS Logs for errors" )
-        for ctrl in main.Cluster.controllers:
+        for ctrl in main.Cluster.runningNodes:
             main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
             main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
 
@@ -2592,7 +2598,6 @@
         Assign mastership to controllers
         """
         import time
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
 
@@ -2616,45 +2621,45 @@
                 # set up correct variables:
                 if i == 1:
                     c = 0
-                    ip = main.Cluster.active()[ c ].ip_address  # ONOS1
+                    ip = main.Cluster.active( c ).ip_address  # ONOS1
                     deviceId = onosCli.getDevice( "1000" ).get( 'id' )
                 elif i == 2:
-                    c = 1 % main.numCtrls
-                    ip = main.Cluster.active()[ c ].ip_address  # ONOS2
+                    c = 1 % main.Cluster.numCtrls
+                    ip = main.Cluster.active( c ).ip_address  # ONOS2
                     deviceId = onosCli.getDevice( "2000" ).get( 'id' )
                 elif i == 3:
-                    c = 1 % main.numCtrls
-                    ip = main.Cluster.active()[ c ].ip_address  # ONOS2
+                    c = 1 % main.Cluster.numCtrls
+                    ip = main.Cluster.active( c ).ip_address  # ONOS2
                     deviceId = onosCli.getDevice( "3000" ).get( 'id' )
                 elif i == 4:
-                    c = 3 % main.numCtrls
-                    ip = main.Cluster.active()[ c ].ip_address  # ONOS4
+                    c = 3 % main.Cluster.numCtrls
+                    ip = main.Cluster.active( c ).ip_address  # ONOS4
                     deviceId = onosCli.getDevice( "3004" ).get( 'id' )
                 elif i == 5:
-                    c = 2 % main.numCtrls
-                    ip = main.Cluster.active()[ c ].ip_address  # ONOS3
+                    c = 2 % main.Cluster.numCtrls
+                    ip = main.Cluster.active( c ).ip_address  # ONOS3
                     deviceId = onosCli.getDevice( "5000" ).get( 'id' )
                 elif i == 6:
-                    c = 2 % main.numCtrls
-                    ip = main.Cluster.active()[ c ].ip_address  # ONOS3
+                    c = 2 % main.Cluster.numCtrls
+                    ip = main.Cluster.active( c ).ip_address  # ONOS3
                     deviceId = onosCli.getDevice( "6000" ).get( 'id' )
                 elif i == 7:
-                    c = 5 % main.numCtrls
-                    ip = main.Cluster.active()[ c ].ip_address  # ONOS6
+                    c = 5 % main.Cluster.numCtrls
+                    ip = main.Cluster.active( c ).ip_address  # ONOS6
                     deviceId = onosCli.getDevice( "6007" ).get( 'id' )
                 elif i >= 8 and i <= 17:
-                    c = 4 % main.numCtrls
-                    ip = main.Cluster.active()[ c ].ip_address  # ONOS5
+                    c = 4 % main.Cluster.numCtrls
+                    ip = main.Cluster.active( c ).ip_address  # ONOS5
                     dpid = '3' + str( i ).zfill( 3 )
                     deviceId = onosCli.getDevice( dpid ).get( 'id' )
                 elif i >= 18 and i <= 27:
-                    c = 6 % main.numCtrls
-                    ip = main.Cluster.active()[ c ].ip_address  # ONOS7
+                    c = 6 % main.Cluster.numCtrls
+                    ip = main.Cluster.active( c ).ip_address  # ONOS7
                     dpid = '6' + str( i ).zfill( 3 )
                     deviceId = onosCli.getDevice( dpid ).get( 'id' )
                 elif i == 28:
                     c = 0
-                    ip = main.Cluster.active()[ c ].ip_address  # ONOS1
+                    ip = main.Cluster.active( c ).ip_address  # ONOS1
                     deviceId = onosCli.getDevice( "2800" ).get( 'id' )
                 else:
                     main.log.error( "You didn't write an else statement for " +
@@ -2705,7 +2710,6 @@
         The bring up stopped nodes
         """
         import time
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
         assert main.kill, "main.kill not defined"
@@ -2779,8 +2783,20 @@
         utilities.assert_equals( expect=main.TRUE, actual=runResults,
                                  onpass="ONOS nodes reran for election topic",
                                  onfail="Errror rerunning for election" )
+    def tempCell( self, cellName, ipList ):
+        main.step( "Create cell file" )
+        cellAppString = main.params[ 'ENV' ][ 'appString' ]
 
-    def checkStateAfterONOS( self, main, afterWhich, compareSwitch=False, isRestart=False ):
+
+        main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
+                                       main.Mininet1.ip_address,
+                                       cellAppString, ipList , main.ONOScli1.karafUser )
+        main.step( "Applying cell variable to environment" )
+        cellResult = main.ONOSbench.setCell( cellName )
+        verifyResult = main.ONOSbench.verifyCell()
+
+
+    def checkStateAfterEvent( self, main, afterWhich, compareSwitch=False, isRestart=False ):
         """
         afterWhich :
             0: failure
@@ -2790,55 +2806,21 @@
         Check state after ONOS failure/scaling
         """
         import json
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
         main.case( "Running ONOS Constant State Tests" )
 
         OnosAfterWhich = [ "failure" , "scaliing" ]
 
-        main.step( "Check that each switch has a master" )
         # Assert that each device has a master
-        rolesNotNull = all( [ i == main.TRUE for i in main.Cluster.command( "rolesNotNull" ) ] )
-        utilities.assert_equals(
-            expect=True,
-            actual=rolesNotNull,
-            onpass="Each device has a master",
-            onfail="Some devices don't have a master assigned" )
+        self.checkRoleNotNull()
 
-        main.step( "Read device roles from ONOS" )
-        ONOSMastership = main.Cluster.command( "roles" )
+        ONOSMastership, rolesResults, consistentMastership = self.checkTheRole()
         mastershipCheck = main.FALSE
-        consistentMastership = True
-        rolesResults = True
-        for i in range( len( ONOSMastership ) ):
-            node = str( main.Cluster.active()[ i ] )
-            if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
-                main.log.error( "Error in getting " + node + " roles" )
-                main.log.warn( node + " mastership response: " +
-                               repr( ONOSMastership[ i ] ) )
-                rolesResults = False
-        utilities.assert_equals(
-            expect=True,
-            actual=rolesResults,
-            onpass="No error in reading roles output",
-            onfail="Error in reading roles from ONOS" )
-
-        main.step( "Check for consistency in roles from each controller" )
-        if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
-            main.log.info(
-                "Switch roles are consistent across all ONOS nodes" )
-        else:
-            consistentMastership = False
-        utilities.assert_equals(
-            expect=True,
-            actual=consistentMastership,
-            onpass="Switch roles are consistent across all ONOS nodes",
-            onfail="ONOS nodes have different views of switch roles" )
 
         if rolesResults and not consistentMastership:
             for i in range( len( ONOSMastership ) ):
-                node = str( main.Cluster.active()[ i ] )
+                node = str( main.Cluster.active( i ) )
                 main.log.warn( node + " roles: ",
                                json.dumps( json.loads( ONOSMastership[ i ] ),
                                            sort_keys=True,
@@ -2878,23 +2860,9 @@
                 onfail="Mastership of some switches changed" )
 
         # NOTE: we expect mastership to change on controller failure/scaling down
-        main.step( "Get the intents and compare across all nodes" )
-        ONOSIntents = main.Cluster.command( "intents" )
+        ONOSIntents, intentsResults = self.checkingIntents()
         intentCheck = main.FALSE
         consistentIntents = True
-        intentsResults = True
-        for i in range( len( ONOSIntents ) ):
-            if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
-                ctrl = main.Cluster.active()[ i ]
-                main.log.error( "Error in getting " + ctrl.name + " intents" )
-                main.log.warn( ctrl.name + " intents response: " +
-                               repr( ONOSIntents[ i ] ) )
-                intentsResults = False
-        utilities.assert_equals(
-            expect=True,
-            actual=intentsResults,
-            onpass="No error in reading intents output",
-            onfail="Error in reading intents from ONOS" )
 
         main.step( "Check for consistency in Intents from each controller" )
         if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
@@ -3068,13 +3036,13 @@
         """
         import json
         import time
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
         try:
             from tests.dependencies.topology import Topology
         except ImportError:
             main.log.error( "Topology not found exiting the test" )
+            main.cleanup()
             main.exit()
         try:
             main.topoRelated
@@ -3097,18 +3065,18 @@
             hostAttachmentResults = True
             count += 1
             cliStart = time.time()
-            devices = main.topoRelated.getAllDevices( main.Cluster.active(), True,
+            devices = main.topoRelated.getAll( "devices", True,
                                                       kwargs={ 'sleep': 5, 'attempts': 5,
                                                                'randomTime': True } )
             ipResult = main.TRUE
 
-            hosts = main.topoRelated.getAllHosts( main.Cluster.active(), True,
+            hosts = main.topoRelated.getAll( "hosts", True,
                                                   kwargs={ 'sleep': 5, 'attempts': 5,
                                                            'randomTime': True },
                                                   inJson=True )
 
             for controller in range( 0, len( hosts ) ):
-                controllerStr = str( main.Cluster.active()[ controller ] )
+                controllerStr = str( main.Cluster.active( controller ) )
                 if hosts[ controller ]:
                     for host in hosts[ controller ]:
                         if host is None or host.get( 'ipAddresses', [] ) == []:
@@ -3116,13 +3084,13 @@
                                 "Error with host ipAddresses on controller" +
                                 controllerStr + ": " + str( host ) )
                             ipResult = main.FALSE
-            ports = main.topoRelated.getAllPorts( main.Cluster.active() , True,
+            ports = main.topoRelated.getAll( "ports" , True,
                                                   kwargs={ 'sleep': 5, 'attempts': 5,
                                                            'randomTime': True } )
-            links = main.topoRelated.getAllLinks( main.Cluster.active(), True,
+            links = main.topoRelated.getAll( "links", True,
                                                   kwargs={ 'sleep': 5, 'attempts': 5,
                                                            'randomTime': True } )
-            clusters = main.topoRelated.getAllClusters( main.Cluster.active(), True,
+            clusters = main.topoRelated.getAll( "clusters", True,
                                                         kwargs={ 'sleep': 5, 'attempts': 5,
                                                                  'randomTime': True } )
 
@@ -3144,7 +3112,7 @@
             mnLinks = main.Mininet1.getLinks()
             mnHosts = main.Mininet1.getHosts()
             for controller in range( len( main.Cluster.active() ) ):
-                controllerStr = str( main.Cluster.active()[ controller ] )
+                controllerStr = str( main.Cluster.active( controller ) )
                 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1, controller,
                                                           mnSwitches,
                                                           devices, ports )
@@ -3284,7 +3252,7 @@
         main.step( "Hosts view is consistent across all ONOS nodes" )
         consistentHostsResult = main.TRUE
         for controller in range( len( hosts ) ):
-            controllerStr = str( main.Cluster.active()[ controller ] )
+            controllerStr = str( main.Cluster.active( controller ) )
             if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
                 if hosts[ controller ] == hosts[ 0 ]:
                     continue
@@ -3326,7 +3294,7 @@
         main.step( "Clusters view is consistent across all ONOS nodes" )
         consistentClustersResult = main.TRUE
         for controller in range( len( clusters ) ):
-            controllerStr = str( main.Cluster.active()[ controller ] )
+            controllerStr = str( main.Cluster.active( controller ) )
             if "Error" not in clusters[ controller ]:
                 if clusters[ controller ] == clusters[ 0 ]:
                     continue
@@ -3428,7 +3396,6 @@
         Link fromS-toS down
         """
         import time
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
         # NOTE: You should probably run a topology check after this
@@ -3454,7 +3421,6 @@
         Link fromS-toS up
         """
         import time
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
         # NOTE: You should probably run a topology check after this
@@ -3480,7 +3446,6 @@
         """
         # NOTE: You should probably run a topology check after this
         import time
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
 
@@ -3515,7 +3480,6 @@
         """
         # NOTE: You should probably run a topology check after this
         import time
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
 
@@ -3550,7 +3514,6 @@
         """
         start election app on all onos nodes
         """
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
 
@@ -3605,7 +3568,6 @@
                 withdrawl and later before withdrawl vs after re-election
         """
         import time
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
 
@@ -3619,11 +3581,11 @@
         newLeader = ''  # the new leaders fron newLoeaders, None if not same
         oldLeaderCLI = None  # the CLI of the old leader used for re-electing
         expectNoLeader = False  # True when there is only one leader
-        if len( main.Cluster.controllers ) == 1:
+        if len( main.Cluster.runningNodes ) == 1:
             expectNoLeader = True
 
         main.step( "Run for election on each node" )
-        electionResult = all( [ i == main.TRUE for i in main.Cluster.command( "electionTestRun" ) ] )
+        electionResult = main.Cluster.command( "electionTestRun", returnBool=True )
         utilities.assert_equals(
             expect=True,
             actual=electionResult,
@@ -3762,7 +3724,6 @@
         Install Distributed Primitives app
         """
         import time
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"