Controller active flag fixes
- Detect when iterator needs to be reset
- Reset iterator after changing active flag
- renamed resetActive() to clearActive() to avoid confusion with reset()
Change-Id: Ia5fa8f0827918ac0c912a662de67144267ecadac
diff --git a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
index f04e878..886c672 100644
--- a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
+++ b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
@@ -249,6 +249,7 @@
utilities.assert_equals( expect=main.TRUE, actual=partitionResults,
onpass="Firewall rules set successfully",
onfail="Error setting firewall rules" )
+ main.Cluster.reset()
main.step( "Sleeping 60 seconds" )
time.sleep( 60 )
@@ -277,6 +278,7 @@
for node in main.partition:
main.Cluster.runningNodes[ node ].active = True
+ main.Cluster.reset()
main.step( "Checking ONOS nodes" )
nodeResults = utilities.retry( main.Cluster.nodesCheck,
diff --git a/TestON/tests/HA/HAscaling/HAscaling.py b/TestON/tests/HA/HAscaling/HAscaling.py
index e46271d..4e18b30 100644
--- a/TestON/tests/HA/HAscaling/HAscaling.py
+++ b/TestON/tests/HA/HAscaling/HAscaling.py
@@ -226,7 +226,7 @@
activeNodes = range( 0, main.Cluster.numCtrls )
newNodes = [ x for x in activeNodes if x not in prevNodes ]
- main.Cluster.resetActive()
+ main.Cluster.clearActive()
main.step( "Start new nodes" ) # OR stop old nodes?
started = main.TRUE
for i in newNodes:
diff --git a/TestON/tests/HA/HAstopNodes/HAstopNodes.py b/TestON/tests/HA/HAstopNodes/HAstopNodes.py
index bbb8db7..62b8ee5 100644
--- a/TestON/tests/HA/HAstopNodes/HAstopNodes.py
+++ b/TestON/tests/HA/HAstopNodes/HAstopNodes.py
@@ -196,6 +196,7 @@
killResults = killResults and\
ctrl.onosStop( ctrl.ipAddress )
ctrl.active = False
+ main.Cluster.reset()
utilities.assert_equals( expect=main.TRUE, actual=killResults,
onpass="ONOS nodes stopped successfully",
onfail="ONOS nodes NOT successfully stopped" )
diff --git a/TestON/tests/HA/HAswapNodes/HAswapNodes.py b/TestON/tests/HA/HAswapNodes/HAswapNodes.py
index 9c2d180..bf1a589 100644
--- a/TestON/tests/HA/HAswapNodes/HAswapNodes.py
+++ b/TestON/tests/HA/HAswapNodes/HAswapNodes.py
@@ -222,7 +222,7 @@
onpass="New cluster metadata file generated",
onfail="Failled to generate new metadata file" )
time.sleep( 5 ) # Give time for nodes to read new file
- main.Cluster.resetActive()
+ main.Cluster.clearActive()
# Note : done up to this point.
main.step( "Start new nodes" ) # OR stop old nodes?
started = main.TRUE
diff --git a/TestON/tests/dependencies/Cluster.py b/TestON/tests/dependencies/Cluster.py
index 61c141c..a67bad5 100644
--- a/TestON/tests/dependencies/Cluster.py
+++ b/TestON/tests/dependencies/Cluster.py
@@ -72,12 +72,10 @@
return ips
- def resetActive( self ):
+ def clearActive( self ):
"""
Description:
- reset the activeness of the cluster to be false
- Required:
- Returns:
+ Sets the activeness of each cluster node to be False
"""
for ctrl in self.controllers:
ctrl.active = False
@@ -126,14 +124,16 @@
def next( self ):
"""
- An iterator for the cluster's controllers that
+ An iterator for the cluster's active controllers that
resets when there are no more elements.
Returns the next controller in the cluster
"""
try:
- return self.iterator.next()
- except StopIteration:
+ node = self.iterator.next()
+ assert node.active
+ return node
+ except ( StopIteration, AssertionError ):
self.reset()
try:
return self.iterator.next()