Add HApowerFailure test

This requires at least one patch to ONOS for the `onos-power` script to
support non-default cell usernames and another patch to the onos warden
to allow multiple node failures.

Also included:
- logging changes to help debug multithreadded sections of the test.
- Some input validation in functions that don't directly call the cli
- Remove some verbose logging
- Distribute some onos commands amongst the active nodes
- Refactor out clearing the ONOS cli pexpect buffer before sending a
  command into it's own function

Change-Id: If1b868b399878209ab0394956f3b3918c0176909
diff --git a/TestON/tests/HA/HApowerFailure/HApowerFailure.params b/TestON/tests/HA/HApowerFailure/HApowerFailure.params
new file mode 100644
index 0000000..0a8fefe
--- /dev/null
+++ b/TestON/tests/HA/HApowerFailure/HApowerFailure.params
@@ -0,0 +1,110 @@
+<PARAMS>
+    #List of test cases:
+    #CASE1:   Compile ONOS and push it to the test machines
+    #CASE2:   Assign mastership to controllers
+    #CASE21:  Assign mastership to controllers
+    #CASE102: Start Spine-Leaf Topology in Mininet
+    #CASE3:   Assign intents
+    #CASE4:   Ping across added host intents
+    #CASE104: Ping between all hosts
+    #CASE5:   Reading state of ONOS
+    #CASE61:  Kill a container
+    #CASE62:  Restart a container
+    #CASE7:   Check state after control plane failure
+    #CASE8:   Compare topo
+    #CASE9:   Link down
+    #CASE10:  Link up
+    #CASE11:  Switch down
+    #CASE12:  Switch up
+    #CASE13:  Clean up
+    #CASE14:  Start election app on all onos nodes
+    #CASE15:  Check that Leadership Election is still functional
+    #CASE16:  Install Distributed Primitives app
+    #CASE17:  Check for basic functionality with distributed primitives
+    <testcases>1,2,8,21,3,4,5,14,15,16,17,[61,17,17,8,7,4,15,17,62],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+
+    <GRAPH>
+        <nodeCluster>VM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
+    <apps></apps>
+    <ONOS_Configuration>
+        <org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
+            <useFlowObjectives>false</useFlowObjectives>
+        </org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
+        <org.onosproject.store.flow.impl.ECFlowRuleStore>
+            <backupCount>3</backupCount>
+        </org.onosproject.store.flow.impl.ECFlowRuleStore>
+    </ONOS_Configuration>
+    <ONOS_Logging>
+        <org.onosproject.events>TRACE</org.onosproject.events>
+    </ONOS_Logging>
+    <ENV>
+        <cellName>HA</cellName>
+        <appString>events,drivers,openflow,proxyarp,mobility</appString>
+    </ENV>
+    <GIT>
+        <pull>False</pull>
+        <branch>master</branch>
+    </GIT>
+    <num_controllers> 7 </num_controllers>
+    <tcpdump> False </tcpdump>
+
+    <CTRL>
+        <port1>6653</port1>
+        <port2>6653</port2>
+        <port3>6653</port3>
+        <port4>6653</port4>
+        <port5>6653</port5>
+        <port6>6653</port6>
+        <port7>6653</port7>
+    </CTRL>
+    <BACKUP>
+        <ENABLED> False </ENABLED>
+        <TESTONUSER>sdn</TESTONUSER>
+        <TESTONIP>10.128.30.9</TESTONIP>
+    </BACKUP>
+    <PING>
+        <source1>h8</source1>
+        <source2>h9</source2>
+        <source3>h10</source3>
+        <source4>h11</source4>
+        <source5>h12</source5>
+        <source6>h13</source6>
+        <source7>h14</source7>
+        <source8>h15</source8>
+        <source9>h16</source9>
+        <source10>h17</source10>
+        <target1>10.0.0.18</target1>
+        <target2>10.0.0.19</target2>
+        <target3>10.0.0.20</target3>
+        <target4>10.0.0.21</target4>
+        <target5>10.0.0.22</target5>
+        <target6>10.0.0.23</target6>
+        <target7>10.0.0.24</target7>
+        <target8>10.0.0.25</target8>
+        <target9>10.0.0.26</target9>
+        <target10>10.0.0.27</target10>
+    </PING>
+    <timers>
+        <LinkDiscovery>12</LinkDiscovery>
+        <SwitchDiscovery>12</SwitchDiscovery>
+        <gossip>5</gossip>
+    </timers>
+    <kill>
+        <linkSrc> s28 </linkSrc>
+        <linkDst> s3 </linkDst>
+        <switch> s5 </switch>
+        <dpid> 0000000000005000 </dpid>
+        <links> h5 s2 s1 s6 </links>
+    </kill>
+    <MNtcpdump>
+        <intf>eth0</intf>
+        <port> </port>
+        <folder>~/packet_captures/</folder>
+    </MNtcpdump>
+    <cell>
+        <user>jon</user>
+    </cell>
+</PARAMS>
diff --git a/TestON/tests/HA/HApowerFailure/HApowerFailure.params.fabric b/TestON/tests/HA/HApowerFailure/HApowerFailure.params.fabric
new file mode 100644
index 0000000..210d216
--- /dev/null
+++ b/TestON/tests/HA/HApowerFailure/HApowerFailure.params.fabric
@@ -0,0 +1,99 @@
+<PARAMS>
+    #List of test cases:
+    #CASE1:   Compile ONOS and push it to the test machines
+    #CASE2:   Assign mastership to controllers
+    #CASE21:  Assign mastership to controllers
+    #CASE102: Start Spine-Leaf Topology in Mininet
+    #CASE3:   Assign intents
+    #CASE4:   Ping across added host intents
+    #CASE104: Ping between all hosts
+    #CASE5:   Reading state of ONOS
+    #CASE61:  Kill a container
+    #CASE62:  Restart a container
+    #CASE7:   Check state after control plane failure
+    #CASE8:   Compare topo
+    #CASE9:   Link down
+    #CASE10:  Link up
+    #CASE11:  Switch down
+    #CASE12:  Switch up
+    #CASE13:  Clean up
+    #CASE14:  Start election app on all onos nodes
+    #CASE15:  Check that Leadership Election is still functional
+    #CASE16:  Install Distributed Primitives app
+    #CASE17:  Check for basic functionality with distributed primitives
+    <testcases>1,102,8,104,5,14,15,16,17,[61,8,7,104,15,17,62],8,7,8,104,15,17,9,8,104,10,8,104,[11,8,104,12,8,104]*0,13</testcases>
+
+    <GRAPH>
+        <nodeCluster>VM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
+    <apps></apps>
+    <ONOS_Configuration>
+        <org.onosproject.store.flow.impl.ECFlowRuleStore>
+            <backupCount>3</backupCount>
+        </org.onosproject.store.flow.impl.ECFlowRuleStore>
+    </ONOS_Configuration>
+    <ONOS_Logging>
+        <org.onosproject.events>TRACE</org.onosproject.events>
+        <org.onosproject.segmentrouting>DEBUG</org.onosproject.segmentrouting>
+        <org.onosproject.driver.pipeline>DEBUG</org.onosproject.driver.pipeline>
+        <org.onosproject.store.group.impl>DEBUG</org.onosproject.store.group.impl>
+        <org.onosproject.net.flowobjective.impl>DEBUG</org.onosproject.net.flowobjective.impl>
+    </ONOS_Logging>
+    <ENV>
+        <cellName>HA</cellName>
+        <appString>events,drivers,openflow,segmentrouting,netcfghostprovider</appString>
+    </ENV>
+    <GIT>
+        <pull>False</pull>
+        <branch>master</branch>
+    </GIT>
+    <num_controllers> 7 </num_controllers>
+    <tcpdump> False </tcpdump>
+
+    <topology>
+        <files>
+            <topo>~/TestON/tests/USECASE/SegmentRouting/dependencies/fabric.py</topo>
+            <dep1>~/TestON/tests/USECASE/SegmentRouting/dependencies/trellislib.py</dep1>
+            <dep2>~/TestON/tests/USECASE/SegmentRouting/dependencies/routinglib.py</dep2>
+        </files>
+        <topoFile>fabric.py</topoFile>
+        <args> --dhcp=0 --routers=0 --ipv6=0 --ipv4=1 </args>
+        <configPath>/HA/dependencies/json/</configPath>
+        <configName>TRELLIS_CONFIG_ipv4=1_ipv6=0_dhcp=0_routers=0.json</configName>
+    </topology>
+    <CTRL>
+        <port1>6653</port1>
+        <port2>6653</port2>
+        <port3>6653</port3>
+        <port4>6653</port4>
+        <port5>6653</port5>
+        <port6>6653</port6>
+        <port7>6653</port7>
+    </CTRL>
+    <BACKUP>
+        <ENABLED> False </ENABLED>
+        <TESTONUSER>sdn</TESTONUSER>
+        <TESTONIP>10.128.30.9</TESTONIP>
+    </BACKUP>
+    <timers>
+        <NetCfg>5</NetCfg>
+        <SRSetup>60</SRSetup>
+        <LinkDiscovery>60</LinkDiscovery>
+        <SwitchDiscovery>60</SwitchDiscovery>
+        <gossip>5</gossip>
+    </timers>
+    <kill>
+        <linkSrc>spine102</linkSrc>
+        <linkDst>leaf1</linkDst>
+        <switch> spine101 </switch>
+        <dpid> 0000000000000101 </dpid>
+        <links> leaf1 leaf2 leaf2 leaf3 leaf3 leaf4 leaf4 leaf5 leaf5 </links>
+    </kill>
+    <MNtcpdump>
+        <intf>eth0</intf>
+        <port> </port>
+        <folder>~/packet_captures/</folder>
+    </MNtcpdump>
+</PARAMS>
diff --git a/TestON/tests/HA/HApowerFailure/HApowerFailure.params.intents b/TestON/tests/HA/HApowerFailure/HApowerFailure.params.intents
new file mode 100644
index 0000000..1ddb8c1
--- /dev/null
+++ b/TestON/tests/HA/HApowerFailure/HApowerFailure.params.intents
@@ -0,0 +1,108 @@
+<PARAMS>
+    #List of test cases:
+    #CASE1:   Compile ONOS and push it to the test machines
+    #CASE2:   Assign mastership to controllers
+    #CASE21:  Assign mastership to controllers
+    #CASE102: Start Spine-Leaf Topology in Mininet
+    #CASE3:   Assign intents
+    #CASE4:   Ping across added host intents
+    #CASE104: Ping between all hosts
+    #CASE5:   Reading state of ONOS
+    #CASE61:  Kill a container
+    #CASE62:  Restart a container
+    #CASE7:   Check state after control plane failure
+    #CASE8:   Compare topo
+    #CASE9:   Link down
+    #CASE10:  Link up
+    #CASE11:  Switch down
+    #CASE12:  Switch up
+    #CASE13:  Clean up
+    #CASE14:  Start election app on all onos nodes
+    #CASE15:  Check that Leadership Election is still functional
+    #CASE16:  Install Distributed Primitives app
+    #CASE17:  Check for basic functionality with distributed primitives
+    <testcases>1,2,8,21,3,4,5,14,15,16,17,[61,8,7,4,15,17,62],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+
+    <GRAPH>
+        <nodeCluster>VM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
+    <apps></apps>
+    <ONOS_Configuration>
+        <org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
+            <useFlowObjectives>false</useFlowObjectives>
+            <defaultFlowObjectiveCompiler>org.onosproject.net.intent.impl.compiler.LinkCollectionIntentObjectiveCompiler</defaultFlowObjectiveCompiler>
+        </org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
+        <org.onosproject.store.flow.impl.ECFlowRuleStore>
+            <backupCount>3</backupCount>
+        </org.onosproject.store.flow.impl.ECFlowRuleStore>
+    </ONOS_Configuration>
+    <ONOS_Logging>
+        <org.onosproject.events>TRACE</org.onosproject.events>
+    </ONOS_Logging>
+    <ENV>
+        <cellName>HA</cellName>
+        <appString>events,drivers,openflow,proxyarp,mobility</appString>
+    </ENV>
+    <GIT>
+        <pull>False</pull>
+        <branch>master</branch>
+    </GIT>
+    <num_controllers> 7 </num_controllers>
+    <tcpdump> False </tcpdump>
+
+    <CTRL>
+        <port1>6653</port1>
+        <port2>6653</port2>
+        <port3>6653</port3>
+        <port4>6653</port4>
+        <port5>6653</port5>
+        <port6>6653</port6>
+        <port7>6653</port7>
+    </CTRL>
+    <BACKUP>
+        <ENABLED> False </ENABLED>
+        <TESTONUSER>sdn</TESTONUSER>
+        <TESTONIP>10.128.30.9</TESTONIP>
+    </BACKUP>
+    <PING>
+        <source1>h8</source1>
+        <source2>h9</source2>
+        <source3>h10</source3>
+        <source4>h11</source4>
+        <source5>h12</source5>
+        <source6>h13</source6>
+        <source7>h14</source7>
+        <source8>h15</source8>
+        <source9>h16</source9>
+        <source10>h17</source10>
+        <target1>10.0.0.18</target1>
+        <target2>10.0.0.19</target2>
+        <target3>10.0.0.20</target3>
+        <target4>10.0.0.21</target4>
+        <target5>10.0.0.22</target5>
+        <target6>10.0.0.23</target6>
+        <target7>10.0.0.24</target7>
+        <target8>10.0.0.25</target8>
+        <target9>10.0.0.26</target9>
+        <target10>10.0.0.27</target10>
+    </PING>
+    <timers>
+        <LinkDiscovery>12</LinkDiscovery>
+        <SwitchDiscovery>12</SwitchDiscovery>
+        <gossip>5</gossip>
+    </timers>
+    <kill>
+        <linkSrc> s28 </linkSrc>
+        <linkDst> s3 </linkDst>
+        <switch> s5 </switch>
+        <dpid> 0000000000005000 </dpid>
+        <links> h5 s2 s1 s6 </links>
+    </kill>
+    <MNtcpdump>
+        <intf>eth0</intf>
+        <port> </port>
+        <folder>~/packet_captures/</folder>
+    </MNtcpdump>
+</PARAMS>
diff --git a/TestON/tests/HA/HApowerFailure/HApowerFailure.py b/TestON/tests/HA/HApowerFailure/HApowerFailure.py
new file mode 100644
index 0000000..dd40e9c
--- /dev/null
+++ b/TestON/tests/HA/HApowerFailure/HApowerFailure.py
@@ -0,0 +1,375 @@
+"""
+Copyright 2018 Open Networking Foundation ( ONF )
+
+Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+
+    TestON is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 2 of the License, or
+    ( at your option ) any later version.
+
+    TestON is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with TestON.  If not, see <http://www.gnu.org/licenses/>.
+"""
+"""
+Description: This test is to determine if ONOS can handle
+    a minority of it's nodes restarting
+
+List of test cases:
+CASE1: Compile ONOS and push it to the test machines
+CASE2: Assign devices to controllers
+CASE21: Assign mastership to controllers
+CASE3: Assign intents
+CASE4: Ping across added host intents
+CASE5: Reading state of ONOS
+CASE61: The Failure inducing case.
+CASE62: The Failure recovery case.
+CASE7: Check state after control plane failure
+CASE8: Compare topo
+CASE9: Link s3-s28 down
+CASE10: Link s3-s28 up
+CASE11: Switch down
+CASE12: Switch up
+CASE13: Clean up
+CASE14: start election app on all onos nodes
+CASE15: Check that Leadership Election is still functional
+CASE16: Install Distributed Primitives app
+CASE17: Check for basic functionality with distributed primitives
+"""
+class HApowerFailure:
+
+    def __init__( self ):
+        self.default = ''
+
+    def CASE1( self, main ):
+        """
+        CASE1 is to compile ONOS and push it to the test machines
+
+        Startup sequence:
+        cell <name>
+        onos-verify-cell
+        NOTE: temporary - onos-remove-raft-logs
+        onos-uninstall
+        start mininet
+        git pull
+        mvn clean install
+        onos-package
+        onos-install -f
+        onos-wait-for-start
+        start cli sessions
+        start tcpdump
+        """
+        main.log.info( "ONOS HA test: Simulate a power failure on a minority of ONOS nodes - " +
+                         "initialization" )
+        # These are for csv plotting in jenkins
+        main.HAlabels = []
+        main.HAdata = []
+        try:
+            from tests.dependencies.ONOSSetup import ONOSSetup
+            main.testSetUp = ONOSSetup()
+        except ImportError:
+            main.log.error( "ONOSSetup not found. exiting the test" )
+            main.cleanAndExit()
+        main.testSetUp.envSetupDescription()
+        try:
+            from tests.HA.dependencies.HA import HA
+            main.HA = HA()
+            cellName = main.params[ 'ENV' ][ 'cellName' ]
+            main.apps = main.params[ 'ENV' ][ 'appString' ]
+            stepResult = main.testSetUp.envSetup( includeCaseDesc=False )
+        except Exception as e:
+            main.testSetUp.envSetupException( e )
+        main.testSetUp.evnSetupConclusion( stepResult )
+
+        applyFuncs = [ main.HA.customizeOnosGenPartitions,
+                       main.HA.copyBackupConfig,
+                       main.ONOSbench.preventAutoRespawn ]
+        applyArgs = [ None, None, None ]
+        try:
+            if main.params[ 'topology' ][ 'topoFile' ]:
+                main.log.info( 'Skipping start of Mininet in this case, make sure you start it elsewhere' )
+            else:
+                applyFuncs.append( main.HA.startingMininet )
+                applyArgs.append( None )
+        except (KeyError, IndexError):
+                applyFuncs.append( main.HA.startingMininet )
+                applyArgs.append( None )
+
+        main.testSetUp.ONOSSetUp( main.Cluster, cellName=cellName, removeLog=True,
+                                  extraApply=applyFuncs,
+                                  applyArgs=applyArgs,
+                                  extraClean=main.HA.cleanUpGenPartition,
+                                  includeCaseDesc=False )
+        main.HA.initialSetUp( serviceClean=True )
+
+        main.step( 'Set logging levels' )
+        logging = True
+        try:
+            logs = main.params.get( 'ONOS_Logging', False )
+            if logs:
+                for namespace, level in logs.items():
+                    for ctrl in main.Cluster.active():
+                        ctrl.CLI.logSet( level, namespace )
+        except AttributeError:
+            logging = False
+        utilities.assert_equals( expect=True, actual=logging,
+                                 onpass="Set log levels",
+                                 onfail="Failed to set log levels" )
+
+    def CASE2( self, main ):
+        """
+        Assign devices to controllers
+        """
+        main.HA.assignDevices( main )
+
+    def CASE102( self, main ):
+        """
+        Set up Spine-Leaf fabric topology in Mininet
+        """
+        main.HA.startTopology( main )
+
+    def CASE21( self, main ):
+        """
+        Assign mastership to controllers
+        """
+        main.HA.assignMastership( main )
+
+    def CASE3( self, main ):
+        """
+        Assign intents
+        """
+        main.HA.assignIntents( main )
+
+    def CASE4( self, main ):
+        """
+        Ping across added host intents
+        """
+        main.HA.pingAcrossHostIntent( main )
+
+    def CASE104( self, main ):
+        """
+        Ping Hosts
+        """
+        main.case( "Check connectivity" )
+        main.step( "Ping between all hosts" )
+        pingResult = main.Mininet1.pingall()
+        utilities.assert_equals( expect=main.TRUE, actual=pingResult,
+                                 onpass="All Pings Passed",
+                                 onfail="Failed to ping between all hosts" )
+
+    def CASE5( self, main ):
+        """
+        Reading state of ONOS
+        """
+        main.HA.readingState( main )
+
+    def CASE61( self, main ):
+        """
+        The Failure case.
+        """
+        assert main, "main not defined"
+        assert utilities.assert_equals, "utilities.assert_equals not defined"
+        main.case( "Simulate a power failure on a minority of ONOS nodes" )
+
+        main.step( "Checking ONOS Logs for errors" )
+        for ctrl in main.Cluster.active():
+            main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
+            main.log.warn( ctrl.checkLogs( ctrl.ipAddress ) )
+
+        main.kill = [ main.Cluster.runningNodes[ 0 ] ]  # ONOS node to kill, listed by index in main.nodes
+        n = len( main.Cluster.runningNodes )  # Number of nodes
+        p = ( ( n + 1 ) / 2 ) + 1  # Number of partitions
+        if n > 3:
+            main.kill.append( main.Cluster.runningNodes[ p - 1 ] )
+            # NOTE: This only works for cluster sizes of 3,5, or 7.
+
+        # NOTE: This is to fix an issue with wiki formating
+        nodeNames = [ node.name for node in main.kill ]
+        # Set the env variables so we actually use the warden power ON/OFF functionality
+        # NOTE: Only works with warden
+        main.ONOSbench.setEnv( "HARD_POWER_OFF", "True" )
+        main.ONOSbench.setEnv( "ONOS_CELL", "borrow" )
+        main.step( "Killing nodes: " + str( nodeNames ) )
+        killResults = main.TRUE
+        userName = main.params[ 'cell' ][ 'user' ]
+        for ctrl in main.kill:
+            killResults = killResults and\
+                          main.ONOSbench.onosPower( ctrl.ipAddress, "off", userName )
+            ctrl.active = False
+        main.Cluster.reset()
+        utilities.assert_equals( expect=main.TRUE, actual=killResults,
+                                 onpass="ONOS nodes killed successfully",
+                                 onfail="ONOS nodes NOT successfully killed" )
+
+        main.step( "Checking ONOS nodes" )
+        nodeResults = utilities.retry( main.Cluster.nodesCheck,
+                                       False,
+                                       sleep=15,
+                                       attempts=5 )
+
+        utilities.assert_equals( expect=True, actual=nodeResults,
+                                 onpass="Nodes check successful",
+                                 onfail="Nodes check NOT successful" )
+
+        if not nodeResults:
+            for ctrl in main.Cluster.active():
+                main.log.debug( "{} components not ACTIVE: \n{}".format(
+                    ctrl.name,
+                    ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
+            main.log.error( "Failed to start ONOS, stopping test" )
+            main.cleanAndExit()
+
+        for i in range( 1, 100 ):
+            main.Cluster.next().summary()
+        for i in range( 1, 100 ):
+            main.Cluster.next().partitions()
+        for ctrl in main.Cluster.active():
+            main.log.warn( repr( ctrl ) )
+
+    def CASE62( self, main ):
+        """
+        The bring up stopped nodes
+        """
+        userName = main.params[ 'cell' ][ 'user' ]
+        # NOTE: The warden will actually power up in reverse alphabetical order of container
+        #       names in a cell, ignoring the ip given.
+        for ctrl in main.kill:
+            main.ONOSbench.onosPower( ctrl.ipAddress, "on", userName )
+            for component in [ ctrl.CLI, ctrl.server ]:
+                component.connect()
+        main.HA.bringUpStoppedNodes( main )
+        for ctrl in main.Cluster.active():
+            main.log.warn( repr( ctrl ) )
+
+    def CASE7( self, main ):
+        """
+        Check state after ONOS failure
+        """
+        try:
+            main.kill
+        except AttributeError:
+            main.kill = []
+
+        main.HA.checkStateAfterEvent( main, afterWhich=0 )
+        main.step( "Leadership Election is still functional" )
+        # Test of LeadershipElection
+        leaderList = []
+
+        restarted = []
+        for ctrl in main.kill:
+            restarted.append( ctrl.ipAddress )
+        leaderResult = main.TRUE
+
+        for ctrl in main.Cluster.active():
+            leaderN = ctrl.electionTestLeader()
+            leaderList.append( leaderN )
+            if leaderN == main.FALSE:
+                # error in response
+                main.log.error( "Something is wrong with " +
+                                 "electionTestLeader function, check the" +
+                                 " error logs" )
+                leaderResult = main.FALSE
+            elif leaderN is None:
+                main.log.error( ctrl.name +
+                                 " shows no leader for the election-app was" +
+                                 " elected after the old one died" )
+                leaderResult = main.FALSE
+            elif leaderN in restarted:
+                main.log.error( ctrl.name + " shows " + str( leaderN ) +
+                                 " as leader for the election-app, but it " +
+                                 "was restarted" )
+                leaderResult = main.FALSE
+        if len( set( leaderList ) ) != 1:
+            leaderResult = main.FALSE
+            main.log.error(
+                "Inconsistent view of leader for the election test app" )
+            main.log.debug( leaderList )
+        utilities.assert_equals(
+            expect=main.TRUE,
+            actual=leaderResult,
+            onpass="Leadership election passed",
+            onfail="Something went wrong with Leadership election" )
+
+    def CASE8( self, main ):
+        """
+        Compare topo
+        """
+        main.HA.compareTopo( main )
+
+    def CASE9( self, main ):
+        """
+        Link down
+        """
+        src = main.params['kill']['linkSrc']
+        dst = main.params['kill']['linkDst']
+        main.HA.linkDown( main, src, dst )
+
+    def CASE10( self, main ):
+        """
+        Link up
+        """
+        src = main.params['kill']['linkSrc']
+        dst = main.params['kill']['linkDst']
+        main.HA.linkUp( main, src, dst )
+
+    def CASE11( self, main ):
+        """
+        Switch Down
+        """
+        # NOTE: You should probably run a topology check after this
+        main.HA.switchDown( main )
+
+    def CASE12( self, main ):
+        """
+        Switch Up
+        """
+        # NOTE: You should probably run a topology check after this
+        main.HA.switchUp( main )
+
+    def CASE13( self, main ):
+        """
+        Clean up
+        """
+        main.HA.cleanUp( main )
+
+    def CASE14( self, main ):
+        """
+        Start election app on all onos nodes
+        """
+        main.HA.startElectionApp( main )
+
+    def CASE15( self, main ):
+        """
+        Check that Leadership Election is still functional
+            15.1 Run election on each node
+            15.2 Check that each node has the same leaders and candidates
+            15.3 Find current leader and withdraw
+            15.4 Check that a new node was elected leader
+            15.5 Check that that new leader was the candidate of old leader
+            15.6 Run for election on old leader
+            15.7 Check that oldLeader is a candidate, and leader if only 1 node
+            15.8 Make sure that the old leader was added to the candidate list
+
+            old and new variable prefixes refer to data from before vs after
+                withdrawl and later before withdrawl vs after re-election
+        """
+        main.HA.isElectionFunctional( main )
+
+    def CASE16( self, main ):
+        """
+        Install Distributed Primitives app
+        """
+        main.HA.installDistributedPrimitiveApp( main )
+
+    def CASE17( self, main ):
+        """
+        Check for basic functionality with distributed primitives
+        """
+        main.HA.checkDistPrimitivesFunc( main )
diff --git a/TestON/tests/HA/HApowerFailure/HApowerFailure.topo b/TestON/tests/HA/HApowerFailure/HApowerFailure.topo
new file mode 100644
index 0000000..4bf4bd4
--- /dev/null
+++ b/TestON/tests/HA/HApowerFailure/HApowerFailure.topo
@@ -0,0 +1,53 @@
+<TOPOLOGY>
+    <COMPONENT>
+
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosClusterDriver</type>
+            <connect_order>1</connect_order>
+            <COMPONENTS>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost></diff_clihost>  # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+                <karaf_username></karaf_username>
+                <karaf_password></karaf_password>
+                <web_user></web_user>
+                <web_pass></web_pass>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home></onos_home>  # defines where onos home is
+                <nodes> 7 </nodes>  # number of nodes in the cluster
+            </COMPONENTS>
+        </ONOScell>
+
+        <Mininet1>
+            <host>OCN</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>MininetCliDriver</type>
+            <connect_order>2</connect_order>
+            <COMPONENTS>
+                #Specify the Option for mininet
+                <arg1> --custom ~/mininet/custom/obelisk.py </arg1>
+                <arg2> --topo obelisk </arg2>
+                <arg3> --switch ovs,protocols=OpenFlow13 </arg3>
+                <controller> none </controller>
+                <home>~/mininet/custom/</home>
+                <prompt></prompt>
+            </COMPONENTS>
+        </Mininet1>
+
+        <Mininet2>
+            <host>OCN</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>RemoteMininetDriver</type>
+            <connect_order>3</connect_order>
+            <COMPONENTS>
+                <prompt></prompt>
+            </COMPONENTS>
+        </Mininet2>
+
+    </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/HA/HApowerFailure/README b/TestON/tests/HA/HApowerFailure/README
new file mode 100644
index 0000000..069e5af
--- /dev/null
+++ b/TestON/tests/HA/HApowerFailure/README
@@ -0,0 +1,26 @@
+This test is designed to verify that an ONOS cluster behaves correctly when
+ONOS nodes die due to power failures. Currently, we will kill nodes so that
+each raft partition will lose a member, but we make sure that there is always
+a majority of nodes available in each partition.
+
+As written, the test only supports an ONOS cluster of 3,5, or 7 nodes.
+This is because the test doesn't apply to a single node cluster, ONOS clusters
+should be deployed in odd numbers, and the partition generation and node
+killing scheme used doesn't give the same properties for clusters of more
+than 7 nodes. Namely, each partition won't have exactly one node killed.
+
+The gerneral structure for the test:
+- Startup
+- Assign switches
+- Verify ONOS state and functionality
+    - Device mastership
+    - Intents
+    - Leadership election
+    - Distributed Primitives
+- Kill some ONOS nodes
+- Verify ONOS state and functionality
+- Restart ONOS nodes
+- Verify ONOS state and functionality
+- Dataplane failures
+    - link down and up
+    - switch down and up
diff --git a/TestON/tests/HA/HApowerFailure/__init__.py b/TestON/tests/HA/HApowerFailure/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/HA/HApowerFailure/__init__.py
diff --git a/TestON/tests/HA/HApowerFailure/dependencies/__init__.py b/TestON/tests/HA/HApowerFailure/dependencies/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/HA/HApowerFailure/dependencies/__init__.py
diff --git a/TestON/tests/HA/HAupgrade/HAupgrade.py b/TestON/tests/HA/HAupgrade/HAupgrade.py
index 8122044..3eb6c94 100644
--- a/TestON/tests/HA/HAupgrade/HAupgrade.py
+++ b/TestON/tests/HA/HAupgrade/HAupgrade.py
@@ -280,11 +280,17 @@
         main.case( "Commit upgrade" )
 
         main.step( "Send the command to commit the upgrade" )
+        for ctrl in main.Cluster.active():
+            status = ctrl.issu()
+            main.log.debug( status )
         ctrl = main.Cluster.next().CLI
         committed = ctrl.issuCommit()
         utilities.assert_equals( expect=main.TRUE, actual=committed,
                                  onpass="Upgrade has been committed",
                                  onfail="Error committing the upgrade" )
+        for ctrl in main.Cluster.active():
+            status = ctrl.issu()
+            main.log.debug( status )
 
         main.step( "Check the status of the upgrade" )
         ctrl = main.Cluster.next().CLI
@@ -300,7 +306,6 @@
                                  onpass="Nodes check successful",
                                  onfail="Nodes check NOT successful" )
 
-
     def CASE7( self, main ):
         """
         Check state after ONOS failure
diff --git a/TestON/tests/HA/dependencies/HA.py b/TestON/tests/HA/dependencies/HA.py
index 0e5da1d..8581248 100644
--- a/TestON/tests/HA/dependencies/HA.py
+++ b/TestON/tests/HA/dependencies/HA.py
@@ -49,7 +49,7 @@
             main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
             main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
             main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
-            main.log.info( " Cleaning custom gen partitions file, response was: \n" +
+            main.log.info( "Cleaning custom gen partitions file, response was: \n" +
                            str( main.ONOSbench.handle.before ) )
         except ( pexpect.TIMEOUT, pexpect.EOF ):
             main.log.exception( "ONOSbench: pexpect exception found:" +
@@ -134,12 +134,10 @@
         ip = main.ONOSbench.getIpAddr( iface=iface )
         metaFile = "cluster.json"
         javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
-        main.log.warn( javaArgs )
         main.log.warn( repr( javaArgs ) )
         handle = main.ONOSbench.handle
         sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs,
                                                                                          main.onosServicepath )
-        main.log.warn( sed )
         main.log.warn( repr( sed ) )
         handle.sendline( sed )
         handle.expect( metaFile )
@@ -188,7 +186,7 @@
                     main.log.error( "Could not parse counters response from " +
                                     str( main.Cluster.active( i ) ) )
                     main.log.warn( repr( onosCountersRaw[ i ] ) )
-                    onosCounters.append( [] )
+                    onosCounters.append( {} )
 
             testCounters = {}
             # make a list of all the "TestON-*" counters in ONOS
@@ -201,7 +199,7 @@
             for controller in enumerate( onosCounters ):
                 for key, value in controller[ 1 ].iteritems():
                     if 'TestON' in key:
-                        node = str( main.Cluster.active( controller[ 0 ] ) )
+                        node = main.Cluster.active( controller[ 0 ] )
                         try:
                             testCounters[ node ].append( { key: value } )
                         except KeyError:
@@ -213,7 +211,7 @@
                 consistent = main.TRUE
             else:
                 consistent = main.FALSE
-                main.log.error( "ONOS nodes have different values for counters:\n" +
+                main.log.error( "ONOS nodes have different values for counters: %s",
                                 testCounters )
             return ( onosCounters, consistent )
         except Exception:
@@ -231,19 +229,19 @@
             onosCounters, consistent = self.consistentCheck()
             # Check for correct values
             for i in range( len( main.Cluster.active() ) ):
+                node = str( main.Cluster.active( i ) )
                 current = onosCounters[ i ]
                 onosValue = None
                 try:
                     onosValue = current.get( counterName )
                 except AttributeError:
-                    node = str( main.Cluster.active( i ) )
                     main.log.exception( node + " counters result " +
                                         "is not as expected" )
                     correctResults = main.FALSE
                 if onosValue == counterValue:
-                    main.log.info( counterName + " counter value is correct" )
+                    main.log.info( "{}: {} counter value is correct".format( node, counterName ) )
                 else:
-                    main.log.error( counterName +
+                    main.log.error( node + ": " + counterName +
                                     " counter value is incorrect," +
                                     " expected value: " + str( counterValue ) +
                                     " current value: " + str( onosValue ) )
@@ -265,19 +263,12 @@
             # Compare leaderboards
             result = all( i == leaderList[ 0 ] for i in leaderList ) and\
                      leaderList is not None
-            main.log.debug( leaderList )
-            main.log.warn( result )
             if result:
                 return ( result, leaderList )
             time.sleep( 5 )  # TODO: paramerterize
         main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
         return ( result, leaderList )
 
-    def generateGraph( self, testName, plotName="Plot-HA", index=2 ):
-        # DEPRECATED: ONOSSetup.py now creates these graphs.
-
-        main.log.debug( "HA.generateGraph() is deprecated; ONOSSetup now creates these graphs." )
-
     def initialSetUp( self, serviceClean=False ):
         """
         rest of initialSetup
@@ -384,13 +375,11 @@
                                      sort_keys=True,
                                      indent=4,
                                      separators=( ',', ': ' ) )
-                main.log.debug( "Leaders: " + output )
                 # check for all intent partitions
                 topics = []
                 for i in range( 14 ):
                     topics.append( "work-partition-" + str( i ) )
                 topics += extraTopics
-                main.log.debug( topics )
                 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
                 for topic in topics:
                     if topic not in ONOStopics:
@@ -600,8 +589,7 @@
 
         # install onos-app-fwd
         main.step( "Install reactive forwarding app" )
-        onosCli = main.Cluster.next()
-        installResults = onosCli.CLI.activateApp( "org.onosproject.fwd" )
+        installResults = main.Cluster.next().CLI.activateApp( "org.onosproject.fwd" )
         utilities.assert_equals( expect=main.TRUE, actual=installResults,
                                  onpass="Install fwd successful",
                                  onfail="Install fwd failed" )
@@ -638,7 +626,7 @@
         time.sleep( 11 )
         # uninstall onos-app-fwd
         main.step( "Uninstall reactive forwarding app" )
-        uninstallResult = onosCli.CLI.deactivateApp( "org.onosproject.fwd" )
+        uninstallResult = main.Cluster.next().CLI.deactivateApp( "org.onosproject.fwd" )
         utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
                                  onpass="Uninstall fwd successful",
                                  onfail="Uninstall fwd failed" )
@@ -663,8 +651,8 @@
             host2 = "00:00:00:00:00:" + \
                 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
             # NOTE: getHost can return None
-            host1Dict = onosCli.CLI.getHost( host1 )
-            host2Dict = onosCli.CLI.getHost( host2 )
+            host1Dict = main.Cluster.next().CLI.getHost( host1 )
+            host2Dict = main.Cluster.next().CLI.getHost( host2 )
             host1Id = None
             host2Id = None
             if host1Dict and host2Dict:
@@ -698,7 +686,7 @@
                                  onfail="Error looking up host ids" )
 
         intentStart = time.time()
-        onosIds = onosCli.getAllIntentsId()
+        onosIds = main.Cluster.next().getAllIntentsId()
         main.log.info( "Submitted intents: " + str( intentIds ) )
         main.log.info( "Intents in ONOS: " + str( onosIds ) )
         for intent in intentIds:
@@ -711,7 +699,7 @@
         else:
             intentStop = None
         # Print the intent states
-        intents = onosCli.CLI.intents()
+        intents = main.Cluster.next().CLI.intents()
         intentStates = []
         installedCheck = True
         main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
@@ -804,11 +792,11 @@
             installedCheck = True
             main.log.info( "Sleeping 60 seconds to see if intents are found" )
             time.sleep( 60 )
-            onosIds = onosCli.getAllIntentsId()
+            onosIds = main.Cluster.next().getAllIntentsId()
             main.log.info( "Submitted intents: " + str( intentIds ) )
             main.log.info( "Intents in ONOS: " + str( onosIds ) )
             # Print the intent states
-            intents = onosCli.CLI.intents()
+            intents = main.Cluster.next().CLI.intents()
             intentStates = []
             main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
             count = 0
@@ -950,8 +938,6 @@
                                ( str( count ), str( i ), str( s ) ) )
         self.commonChecks()
 
-        # Print flowrules
-        main.log.debug( onosCli.CLI.flows() )
         main.step( "Wait a minute then ping again" )
         # the wait is above
         PingResult = main.TRUE
@@ -1609,7 +1595,7 @@
                                                  args=[ main.onosSetName ] )
             getResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active( i )
+                node = str( main.Cluster.active( i ) )
                 if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
@@ -1641,7 +1627,7 @@
                                                   args=[ main.onosSetName ] )
             sizeResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active( i )
+                node = str( main.Cluster.active( i ) )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( node +
@@ -1683,7 +1669,7 @@
                                                  args=[ main.onosSetName ] )
             getResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active( i )
+                node = str( main.Cluster.active( i ) )
                 if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
@@ -1707,7 +1693,7 @@
                                                   args=[ main.onosSetName ] )
             sizeResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active( i )
+                node = str( main.Cluster.active( i ) )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( node + " expected a size of " +
@@ -1749,7 +1735,7 @@
                                                  args=[ main.onosSetName ] )
             getResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active( i )
+                node = str( main.Cluster.active( i ) )
                 if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
@@ -1773,7 +1759,7 @@
                                                   args=[ main.onosSetName ] )
             sizeResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active( i )
+                node = str( main.Cluster.active( i ) )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( node + " expected a size of " + str( size ) +
@@ -1847,7 +1833,7 @@
                                                  args=[ main.onosSetName ] )
             getResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active( i )
+                node = str( main.Cluster.active( i ) )
                 if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
@@ -1871,7 +1857,7 @@
                                                   args=[ main.onosSetName ] )
             sizeResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active( i )
+                node = str( main.Cluster.active( i ) )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( node + " expected a size of " + str( size ) +
@@ -1913,7 +1899,7 @@
                                                  args=[ main.onosSetName ] )
             getResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active( i )
+                node = str( main.Cluster.active( i ) )
                 if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
@@ -1937,7 +1923,7 @@
                                                   args=[ main.onosSetName ] )
             sizeResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active( i )
+                node = str( main.Cluster.active( i ) )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( node + " expected a size of " + str( size ) +
@@ -1979,7 +1965,7 @@
                                                  args=[ main.onosSetName ] )
             getResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active( i )
+                node = str( main.Cluster.active( i ) )
                 if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
@@ -2003,7 +1989,7 @@
                                                   args=[ main.onosSetName ] )
             sizeResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active( i )
+                node = str( main.Cluster.active( i ) )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( node + " expected a size of " + str( size ) +
@@ -2046,7 +2032,7 @@
                                                  args=[ main.onosSetName ] )
             getResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active( i )
+                node = str( main.Cluster.active( i ) )
                 if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
@@ -2070,7 +2056,7 @@
                                                   args=[ main.onosSetName ] )
             sizeResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active( i )
+                node = str( main.Cluster.active( i ) )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( node + " expected a size of " + str( size ) +
@@ -2112,7 +2098,7 @@
                                                  args=[ main.onosSetName ] )
             getResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active( i )
+                node = str( main.Cluster.active( i ) )
                 if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
@@ -2136,7 +2122,7 @@
                                                   args=[ main.onosSetName ] )
             sizeResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active( i )
+                node = str( main.Cluster.active( i ) )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( node + " expected a size of " + str( size ) +
@@ -2179,7 +2165,7 @@
                                                  args=[ main.onosSetName ] )
             getResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active( i )
+                node = str( main.Cluster.active( i ) )
                 if isinstance( getResponses[ i ], list ):
                     current = set( getResponses[ i ] )
                     if len( current ) == len( getResponses[ i ] ):
@@ -2203,7 +2189,7 @@
                                                   args=[ main.onosSetName ] )
             sizeResults = main.TRUE
             for i in range( len( main.Cluster.active() ) ):
-                node = main.Cluster.active( i )
+                node = str( main.Cluster.active( i ) )
                 if size != sizeResponses[ i ]:
                     sizeResults = main.FALSE
                     main.log.error( node + " expected a size of " +
@@ -2248,7 +2234,7 @@
                     if node != tMapValue:
                         valueCheck = False
                 if not valueCheck:
-                    main.log.warn( "Values for key 'Key" + str(n) + "' do not match:" )
+                    main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
                     main.log.warn( getResponses )
                 getCheck = getCheck and valueCheck
             utilities.assert_equals( expect=True,
@@ -2611,7 +2597,6 @@
 
         ipList = []
         deviceList = []
-        onosCli = main.Cluster.next()
         try:
             # Assign mastership to specific controllers. This assignment was
             # determined for a 7 node cluser, but will work with any sized
@@ -2621,45 +2606,45 @@
                 if i == 1:
                     c = 0
                     ip = main.Cluster.active( c ).ip_address  # ONOS1
-                    deviceId = onosCli.getDevice( "1000" ).get( 'id' )
+                    deviceId = main.Cluster.next().getDevice( "1000" ).get( 'id' )
                 elif i == 2:
                     c = 1 % main.Cluster.numCtrls
                     ip = main.Cluster.active( c ).ip_address  # ONOS2
-                    deviceId = onosCli.getDevice( "2000" ).get( 'id' )
+                    deviceId = main.Cluster.next().getDevice( "2000" ).get( 'id' )
                 elif i == 3:
                     c = 1 % main.Cluster.numCtrls
                     ip = main.Cluster.active( c ).ip_address  # ONOS2
-                    deviceId = onosCli.getDevice( "3000" ).get( 'id' )
+                    deviceId = main.Cluster.next().getDevice( "3000" ).get( 'id' )
                 elif i == 4:
                     c = 3 % main.Cluster.numCtrls
                     ip = main.Cluster.active( c ).ip_address  # ONOS4
-                    deviceId = onosCli.getDevice( "3004" ).get( 'id' )
+                    deviceId = main.Cluster.next().getDevice( "3004" ).get( 'id' )
                 elif i == 5:
                     c = 2 % main.Cluster.numCtrls
                     ip = main.Cluster.active( c ).ip_address  # ONOS3
-                    deviceId = onosCli.getDevice( "5000" ).get( 'id' )
+                    deviceId = main.Cluster.next().getDevice( "5000" ).get( 'id' )
                 elif i == 6:
                     c = 2 % main.Cluster.numCtrls
                     ip = main.Cluster.active( c ).ip_address  # ONOS3
-                    deviceId = onosCli.getDevice( "6000" ).get( 'id' )
+                    deviceId = main.Cluster.next().getDevice( "6000" ).get( 'id' )
                 elif i == 7:
                     c = 5 % main.Cluster.numCtrls
                     ip = main.Cluster.active( c ).ip_address  # ONOS6
-                    deviceId = onosCli.getDevice( "6007" ).get( 'id' )
+                    deviceId = main.Cluster.next().getDevice( "6007" ).get( 'id' )
                 elif i >= 8 and i <= 17:
                     c = 4 % main.Cluster.numCtrls
                     ip = main.Cluster.active( c ).ip_address  # ONOS5
                     dpid = '3' + str( i ).zfill( 3 )
-                    deviceId = onosCli.getDevice( dpid ).get( 'id' )
+                    deviceId = main.Cluster.next().getDevice( dpid ).get( 'id' )
                 elif i >= 18 and i <= 27:
                     c = 6 % main.Cluster.numCtrls
                     ip = main.Cluster.active( c ).ip_address  # ONOS7
                     dpid = '6' + str( i ).zfill( 3 )
-                    deviceId = onosCli.getDevice( dpid ).get( 'id' )
+                    deviceId = main.Cluster.next().getDevice( dpid ).get( 'id' )
                 elif i == 28:
                     c = 0
                     ip = main.Cluster.active( c ).ip_address  # ONOS1
-                    deviceId = onosCli.getDevice( "2800" ).get( 'id' )
+                    deviceId = main.Cluster.next().getDevice( "2800" ).get( 'id' )
                 else:
                     main.log.error( "You didn't write an else statement for " +
                                     "switch s" + str( i ) )
@@ -2667,12 +2652,12 @@
                 # Assign switch
                 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
                 # TODO: make this controller dynamic
-                roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
+                roleCall = roleCall and main.Cluster.next().deviceRole( deviceId, ip )
                 ipList.append( ip )
                 deviceList.append( deviceId )
         except ( AttributeError, AssertionError ):
             main.log.exception( "Something is wrong with ONOS device view" )
-            main.log.info( onosCli.devices() )
+            main.log.info( main.Cluster.next().devices() )
         utilities.assert_equals(
             expect=main.TRUE,
             actual=roleCall,
@@ -2688,7 +2673,7 @@
             ip = ipList[ i ]
             deviceId = deviceList[ i ]
             # Check assignment
-            master = onosCli.getRole( deviceId ).get( 'master' )
+            master = main.Cluster.next().getRole( deviceId ).get( 'master' )
             if ip in master:
                 roleCheck = roleCheck and main.TRUE
             else:
@@ -3258,8 +3243,6 @@
 
                                     port = locations[0].get( 'port' )
                                     assert port, "port field could not be found for this host location object"
-                                    main.log.debug( "Host: {}\nmac: {}\n location(s): {}\ndevice: {}\n port: {}".format(
-                                        ctrl.pprint( host ), mac, ctrl.pprint( locations ), device, port ) )
 
                                     # Now check if this matches where they should be
                                     if mac and device and port:
@@ -3508,7 +3491,6 @@
         switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
 
         description = "Killing a switch to ensure it is discovered correctly"
-        onosCli = main.Cluster.next()
         main.case( description )
         switch = main.params[ 'kill' ][ 'switch' ]
         switchDPID = main.params[ 'kill' ][ 'dpid' ]
@@ -3520,7 +3502,7 @@
         main.log.info( "Waiting " + str( switchSleep ) +
                        " seconds for switch down to be discovered" )
         time.sleep( switchSleep )
-        device = onosCli.getDevice( dpid=switchDPID )
+        device = main.Cluster.next().getDevice( dpid=switchDPID )
         # Peek at the deleted switch
         main.log.warn( "Bringing down switch " + str( device ) )
         result = main.FALSE
@@ -3543,7 +3525,6 @@
         switch = main.params[ 'kill' ][ 'switch' ]
         switchDPID = main.params[ 'kill' ][ 'dpid' ]
         links = main.params[ 'kill' ][ 'links' ].split()
-        onosCli = main.Cluster.next()
         description = "Adding a switch to ensure it is discovered correctly"
         main.case( description )
 
@@ -3556,7 +3537,7 @@
         main.log.info( "Waiting " + str( switchSleep ) +
                        " seconds for switch up to be discovered" )
         time.sleep( switchSleep )
-        device = onosCli.getDevice( dpid=switchDPID )
+        device = main.Cluster.next().getDevice( dpid=switchDPID )
         # Peek at the deleted switch
         main.log.debug( "Added device: " + str( device ) )
         result = main.FALSE
@@ -3575,8 +3556,7 @@
 
         main.case( "Start Leadership Election app" )
         main.step( "Install leadership election app" )
-        onosCli = main.Cluster.next()
-        appResult = onosCli.CLI.activateApp( "org.onosproject.election" )
+        appResult = main.Cluster.next().CLI.activateApp( "org.onosproject.election" )
         utilities.assert_equals(
             expect=main.TRUE,
             actual=appResult,
@@ -3584,7 +3564,7 @@
             onfail="Something went wrong with installing Leadership election" )
 
         main.step( "Run for election on each node" )
-        onosCli.electionTestRun()
+        main.Cluster.next().electionTestRun()
         main.Cluster.command( "electionTestRun" )
         time.sleep( 5 )
         sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
@@ -3901,7 +3881,7 @@
                 if pushedHost != onosHost:
                     cfgResult = False
                     main.log.error( "Pushed Network configuration does not match what is in " +
-                                    "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedHost),
+                                    "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedHost ),
                                                                           ctrl.pprint( onosHost ) ) )
         utilities.assert_equals( expect=True,
                                  actual=cfgResult,