Update Cluster Driver

Change-Id: I8a3a57e19637ff210548e57d41178e6f194cf694
diff --git a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
index 7e10a09..475759f 100644
--- a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
+++ b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
@@ -90,21 +90,16 @@
             # load some variables from the params file
             cellName = main.params[ 'ENV' ][ 'cellName' ]
             main.apps = main.params[ 'ENV' ][ 'appString' ]
-            main.numCtrls = int( main.params[ 'num_controllers' ] )
-            if main.ONOSbench.maxNodes and\
-                        main.ONOSbench.maxNodes < main.numCtrls:
-                main.numCtrls = int( main.ONOSbench.maxNodes )
-            main.maxNodes = main.numCtrls
-            stepResult = main.testSetUp.envSetup( hasNode=True )
+            stepResult = main.testSetUp.envSetup()
         except Exception as e:
             main.testSetUp.envSetupException( e )
         main.testSetUp.evnSetupConclusion( stepResult )
         main.HA.generateGraph( "HAfullNetPartition" )
 
-        main.testSetUp.ONOSSetUp( main.Mininet1, cellName=cellName, removeLog=True,
-                                 extraApply=main.HA.customizeOnosGenPartitions,
-                                 extraClean=main.HA.cleanUpGenPartition )
-
+        main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
+                                  extraApply=[ main.HA.startingMininet,
+                                               main.HA.customizeOnosGenPartitions ],
+                                  extraClean=main.HA.cleanUpGenPartition )
         main.HA.initialSetUp()
 
 
@@ -143,21 +138,18 @@
         The Failure case.
         """
         import math
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
-        assert main.CLIs, "main.CLIs not defined"
-        assert main.nodes, "main.nodes not defined"
         main.case( "Partition ONOS nodes into two distinct partitions" )
 
         main.step( "Checking ONOS Logs for errors" )
-        for node in main.nodes:
-            main.log.debug( "Checking logs for errors on " + node.name + ":" )
-            main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
+        for ctrl in main.Cluster.runningNodes:
+            main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
+            main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
 
-        main.log.debug( main.CLIs[ 0 ].roles( jsonFormat=False ) )
+        main.log.debug( main.Cluster.next().CLI.roles( jsonFormat=False ) )
 
-        n = len( main.nodes )  # Number of nodes
+        n = len( main.Cluster.runningNodes )  # Number of nodes
         p = ( ( n + 1 ) / 2 ) + 1  # Number of partitions
         main.partition = [ 0 ]  # ONOS node to partition, listed by index in main.nodes
         if n > 3:
@@ -169,27 +161,49 @@
         main.log.info( "Nodes to be partitioned: " + str( nodeList ) )
         partitionResults = main.TRUE
         for i in range( 0, n ):
-            this = main.nodes[ i ]
+            iCtrl = main.Cluster.runningNodes[ i ]
+            this = iCtrl.Bench.sshToNode( iCtrl.ipAddress )
             if i not in main.partition:
                 for j in main.partition:
-                    foe = main.nodes[ j ]
-                    main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
+                    foe =  main.Cluster.runningNodes[ j ]
+                    main.log.warn( "Setting IP Tables rule from {} to {}. ".format( iCtrl.ipAddress, foe.ipAddress ) )
                     #CMD HERE
-                    cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
-                    this.handle.sendline( cmdStr )
-                    this.handle.expect( "\$" )
-                    main.log.debug( this.handle.before )
+                    try:
+                        cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", iCtrl.ipAddress, foe.ipAddress )
+                        this.sendline( cmdStr )
+                        this.expect( "\$" )
+                        main.log.debug( this.before )
+                    except pexpect.EOF:
+                        main.log.error( self.name + ": EOF exception found" )
+                        main.log.error( self.name + ":    " + self.handle.before )
+                        main.cleanup()
+                        main.exit()
+                    except Exception:
+                        main.log.exception( self.name + ": Uncaught exception!" )
+                        main.cleanup()
+                        main.exit()
             else:
                 for j in range( 0, n ):
                     if j not in main.partition:
-                        foe = main.nodes[ j ]
-                        main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
+                        foe = main.Cluster.runningNodes[ j ]
+                        main.log.warn( "Setting IP Tables rule from {} to {}. ".format( iCtrl.ipAddress, foe.ipAddress ) )
                         #CMD HERE
-                        cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
-                        this.handle.sendline( cmdStr )
-                        this.handle.expect( "\$" )
-                        main.log.debug( this.handle.before )
-                main.activeNodes.remove( i )
+                        cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", iCtrl.ipAddress, foe.ipAddress )
+                        try:
+                            this.sendline( cmdStr )
+                            this.expect( "\$" )
+                            main.log.debug( this.before )
+                        except pexpect.EOF:
+                            main.log.error( self.name + ": EOF exception found" )
+                            main.log.error( self.name + ":    " + self.handle.before )
+                            main.cleanup()
+                            main.exit()
+                        except Exception:
+                            main.log.exception( self.name + ": Uncaught exception!" )
+                            main.cleanup()
+                            main.exit()
+                main.Cluster.runningNodes[ i ].active = False
+            iCtrl.Bench.exitFromSsh( this, iCtrl.ipAddress )
         # NOTE: When dynamic clustering is finished, we need to start checking
         #       main.partion nodes still work when partitioned
         utilities.assert_equals( expect=main.TRUE, actual=partitionResults,
@@ -204,28 +218,30 @@
         Healing Partition
         """
         import time
-        assert main.numCtrls, "main.numCtrls not defined"
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
-        assert main.CLIs, "main.CLIs not defined"
-        assert main.nodes, "main.nodes not defined"
         assert main.partition, "main.partition not defined"
         main.case( "Healing Partition" )
 
         main.step( "Deleteing firewall rules" )
         healResults = main.TRUE
-        for node in main.nodes:
+        for ctrl in main.Cluster.runningNodes:
             cmdStr = "sudo iptables -F"
-            node.handle.sendline( cmdStr )
-            node.handle.expect( "\$" )
-            main.log.debug( node.handle.before )
+            handle = ctrl.Bench.sshToNode( ctrl.ipAddress )
+            handle.sendline( cmdStr )
+            handle.expect( "\$" )
+            main.log.debug( handle.before )
+            ctrl.Bench.exitFromSsh( handle, ctrl.ipAddress )
         utilities.assert_equals( expect=main.TRUE, actual=healResults,
                                  onpass="Firewall rules removed",
                                  onfail="Error removing firewall rules" )
 
         for node in main.partition:
-            main.activeNodes.append( node )
-        main.activeNodes.sort()
+            main.Cluster.runningNodes[ node ].active = True
+
+        '''
+        # NOTE : Not sure if this can be removed
+         main.activeNodes.sort()
         try:
             assert list( set( main.activeNodes ) ) == main.activeNodes,\
                    "List of active nodes has duplicates, this likely indicates something was run out of order"
@@ -233,11 +249,12 @@
             main.log.exception( "" )
             main.cleanup()
             main.exit()
+        '''
 
         main.step( "Checking ONOS nodes" )
         nodeResults = utilities.retry( main.HA.nodesCheck,
                                        False,
-                                       args=[ main.activeNodes ],
+                                       args=[ main.Cluster.active() ],
                                        sleep=15,
                                        attempts=5 )
 
@@ -246,11 +263,10 @@
                                  onfail="Nodes check NOT successful" )
 
         if not nodeResults:
-            for i in main.activeNodes:
-                cli = main.CLIs[ i ]
+            for ctrl in main.Cluster.active():
                 main.log.debug( "{} components not ACTIVE: \n{}".format(
-                    cli.name,
-                    cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
+                    ctrl.name,
+                    ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
             main.log.error( "Failed to start ONOS, stopping test" )
             main.cleanup()
             main.exit()
@@ -260,7 +276,7 @@
         Check state after ONOS failure
         """
 
-        main.HA.checkStateAfterONOS( main, afterWhich=0 )
+        main.HA.checkStateAfterEvent( main, afterWhich=0 )
 
         main.step( "Leadership Election is still functional" )
         # Test of LeadershipElection
@@ -268,12 +284,11 @@
 
         partitioned = []
         for i in main.partition:
-            partitioned.append( main.nodes[ i ].ip_address )
+            partitioned.append( main.Cluster.runningNodes[ i ].ipAddress )
         leaderResult = main.TRUE
 
-        for i in main.activeNodes:
-            cli = main.CLIs[ i ]
-            leaderN = cli.electionTestLeader()
+        for ctrl in main.Cluster.active():
+            leaderN = ctrl.CLI.electionTestLeader()
             leaderList.append( leaderN )
             if leaderN == main.FALSE:
                 # error in response
@@ -282,12 +297,12 @@
                                  " error logs" )
                 leaderResult = main.FALSE
             elif leaderN is None:
-                main.log.error( cli.name +
+                main.log.error( ctrl.name +
                                  " shows no leader for the election-app was" +
                                  " elected after the old one died" )
                 leaderResult = main.FALSE
             elif leaderN in partitioned:
-                main.log.error( cli.name + " shows " + str( leaderN ) +
+                main.log.error( ctrl.name + " shows " + str( leaderN ) +
                                  " as leader for the election-app, but it " +
                                  "was partitioned" )
                 leaderResult = main.FALSE
diff --git a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.topo b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.topo
index 7c18a98..4bf4bd4 100644
--- a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.topo
+++ b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.topo
@@ -1,183 +1,32 @@
 <TOPOLOGY>
     <COMPONENT>
 
-        <ONOSbench>
-            <host>localhost</host>
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
             <user>sdn</user>
             <password>rocks</password>
-            <type>OnosDriver</type>
+            <type>OnosClusterDriver</type>
             <connect_order>1</connect_order>
             <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOSbench>
-
-        <ONOScli1>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>2</connect_order>
-            <COMPONENTS>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost></diff_clihost>  # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
                 <karaf_username></karaf_username>
                 <karaf_password></karaf_password>
-                <prompt></prompt>
+                <web_user></web_user>
+                <web_pass></web_pass>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home></onos_home>  # defines where onos home is
+                <nodes> 7 </nodes>  # number of nodes in the cluster
             </COMPONENTS>
-        </ONOScli1>
-
-        <ONOScli2>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>3</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli2>
-
-        <ONOScli3>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>4</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli3>
-
-
-        <ONOScli4>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>5</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli4>
-
-
-        <ONOScli5>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>6</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli5>
-
-
-        <ONOScli6>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>7</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli6>
-
-
-        <ONOScli7>
-            <host>localhost</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosCliDriver</type>
-            <connect_order>8</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOScli7>
-
-        <ONOS1>
-            <host>OC1</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>9</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS1>
-
-        <ONOS2>
-            <host>OC2</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>10</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS2>
-
-        <ONOS3>
-            <host>OC3</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>11</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS3>
-
-        <ONOS4>
-            <host>OC4</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>12</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS4>
-
-        <ONOS5>
-            <host>OC5</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>13</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS5>
-
-        <ONOS6>
-            <host>OC6</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>14</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS6>
-
-        <ONOS7>
-            <host>OC7</host>
-            <user>sdn</user>
-            <password>rocks</password>
-            <type>OnosDriver</type>
-            <connect_order>15</connect_order>
-            <COMPONENTS>
-                <prompt></prompt>
-            </COMPONENTS>
-        </ONOS7>
+        </ONOScell>
 
         <Mininet1>
             <host>OCN</host>
             <user>sdn</user>
             <password>rocks</password>
             <type>MininetCliDriver</type>
-            <connect_order>16</connect_order>
+            <connect_order>2</connect_order>
             <COMPONENTS>
                 #Specify the Option for mininet
                 <arg1> --custom ~/mininet/custom/obelisk.py </arg1>
@@ -194,7 +43,7 @@
             <user>sdn</user>
             <password>rocks</password>
             <type>RemoteMininetDriver</type>
-            <connect_order>17</connect_order>
+            <connect_order>3</connect_order>
             <COMPONENTS>
                 <prompt></prompt>
             </COMPONENTS>