Refactor Network partitioning test
- Changed Iptables rules to allow for external atomix cluster
- Add ip6table rules as well
- Add ip6table rule cleanup to cleanup script
Change-Id: Ie15c13ab5cca3937137f6a6bdaa7f407821f1576
diff --git a/TestON/bin/cleanup.sh b/TestON/bin/cleanup.sh
index b6a937b..23b288a 100755
--- a/TestON/bin/cleanup.sh
+++ b/TestON/bin/cleanup.sh
@@ -54,5 +54,8 @@
echo "Restoring iptables rules on ${i}"
ssh sdn@$i "sudo iptables -F"
ssh sdn@$i "sudo iptables-restore < /etc/iptables/rules.v4"
+ echo "Restoring ip6tables rules on ${i}"
+ ssh sdn@$i "sudo ip6tables -F"
+ ssh sdn@$i "sudo ip6tables-restore < /etc/iptables/rules.v6"
done
fi
diff --git a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
index 92815f6..417bc39 100644
--- a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
+++ b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
@@ -198,22 +198,33 @@
nodeList = [ str( i + 1 ) for i in main.partition ]
main.log.info( "Nodes to be partitioned: " + str( nodeList ) )
partitionResults = main.TRUE
+ standalonePort = "5679" # use params file and default to this
+ embeddedPort = "9876" # use params file and default to this
+ # We are blocking traffic from the embedded Atomix instance on nodes in main.partition
for i in range( 0, n ):
iCtrl = main.Cluster.runningNodes[ i ]
this = iCtrl.Bench.sshToNode( iCtrl.ipAddress )
- if i not in main.partition:
- for j in main.partition:
+ if i not in main.partition: # i is in the majority partition
+ for j in main.partition: # j is in the minority partition
foe = main.Cluster.runningNodes[ j ]
main.log.warn( "Setting IP Tables rule from {} to {}. ".format( iCtrl.ipAddress,
foe.ipAddress ) )
# CMD HERE
+ # On node i in majority partition: add rule to block from j in minority partition
+ cmdList = [ ]
+ cmdList.append( "sudo iptables -A {} -p tcp --sp {} -d {} -s {} -j DROP".format( "INPUT",
+ embeddedPort,
+ iCtrl.ipAddress,
+ foe.ipAddress ) )
+ cmdList.append( "sudo ip6tables -A {} -p tcp --sp {} -d {} -s {} -j DROP".format( "INPUT",
+ embeddedPort,
+ iCtrl.ipAddress,
+ foe.ipAddress ) )
try:
- cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT",
- iCtrl.ipAddress,
- foe.ipAddress )
- this.sendline( cmdStr )
- this.expect( "\$" )
- main.log.debug( this.before )
+ for cmd in cmdList:
+ this.sendline( cmd )
+ this.expect( "\$" )
+ main.log.debug( this.before )
except pexpect.EOF:
main.log.error( iCtrl.name + ": EOF exception found" )
main.log.error( iCtrl.name + ": " + this.before )
@@ -224,17 +235,43 @@
else:
for j in range( 0, n ):
if j not in main.partition:
+ # On node i in minority partition: add rule to block from j in majority partition
foe = main.Cluster.runningNodes[ j ]
main.log.warn( "Setting IP Tables rule from {} to {}. ".format( iCtrl.ipAddress,
foe.ipAddress ) )
# CMD HERE
- cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT",
- iCtrl.ipAddress,
- foe.ipAddress )
+ cmdList = [ ]
+ cmdList.append( "sudo iptables -A {} -p tcp --dp {} -d {} -s {} -j DROP".format( "INPUT",
+ embeddedPort,
+ iCtrl.ipAddress,
+ foe.ipAddress ) )
+ cmdList.append( "sudo ip6tables -A {} -p tcp --dp {} -d {} -s {} -j DROP".format( "INPUT",
+ embeddedPort,
+ iCtrl.ipAddress,
+ foe.ipAddress ) )
+ cmdList.append( "sudo iptables -A {} -p tcp --sp {} -s {} -j DROP".format( "OUTPUT",
+ embeddedPort,
+ iCtrl.ipAddress ) )
+ cmdList.append( "sudo ip6tables -A {} -p tcp --sp {} -s {} -j DROP".format( "OUTPUT",
+ embeddedPort,
+ iCtrl.ipAddress ) )
+ cmdList.append( "sudo iptables -A {} -p tcp --sp {} -s {} -m conntrack --ctstate ESTABLISHED -j DROP".format( "OUTPUT",
+ embeddedPort,
+ iCtrl.ipAddress ) )
+ cmdList.append( "sudo ip6tables -A {} -p tcp --sp {} -s {} -m conntrack --ctstate ESTABLISHED -j DROP".format( "OUTPUT",
+ embeddedPort,
+ iCtrl.ipAddress ) )
+ cmdList.append( "sudo iptables -A {} -p tcp --sp {} -s {} -m conntrack --ctstate ESTABLISHED -j DROP".format( "INPUT",
+ embeddedPort,
+ foe.ipAddress ) )
+ cmdList.append( "sudo ip6tables -A {} -p tcp --sp {} -s {} -m conntrack --ctstate ESTABLISHED -j DROP".format( "INPUT",
+ embeddedPort,
+ foe.ipAddress ) )
try:
- this.sendline( cmdStr )
- this.expect( "\$" )
- main.log.debug( this.before )
+ for cmd in cmdList:
+ this.sendline( cmd )
+ this.expect( "\$" )
+ main.log.debug( this.before )
except pexpect.EOF:
main.log.error( iCtrl.name + ": EOF exception found" )
main.log.error( iCtrl.name + ": " + this.before )
@@ -242,14 +279,43 @@
except Exception:
main.log.exception( iCtrl.name + ": Uncaught exception!" )
main.cleanAndExit()
+ # From embedded atomix to standalone atomix on same node, node i in minority partition
+ foe = main.Cluster.runningNodes[ i ]
+ main.log.warn( "Setting IP Tables rule from {} to {}. ".format( iCtrl.ipAddress,
+ foe.ipAddress ) )
+ # CMD HERE
+ cmdStr = "sudo iptables -A {} -p tcp --sp {} -d {} -s {} -j DROP".format( "INPUT",
+ embeddedPort,
+ iCtrl.ipAddress,
+ foe.ipAddress )
+ cmdStr2 = "sudo iptables -A {} -p tcp --dp {} -d {} -s {} -j DROP".format( "INPUT",
+ embeddedPort,
+ iCtrl.ipAddress,
+ foe.ipAddress )
+ cmdStr3 = "sudo iptables -A {} -p tcp --sp {} -j DROP".format( "OUTPUT",
+ embeddedPort )
+ try:
+ this.sendline( cmdStr )
+ this.expect( "\$" )
+ main.log.debug( this.before )
+ this.sendline( cmdStr2 )
+ this.expect( "\$" )
+ main.log.debug( this.before )
+ except pexpect.EOF:
+ main.log.error( iCtrl.name + ": EOF exception found" )
+ main.log.error( iCtrl.name + ": " + this.before )
+ main.cleanAndExit()
+ except Exception:
+ main.log.exception( iCtrl.name + ": Uncaught exception!" )
+ main.cleanAndExit()
main.Cluster.runningNodes[ i ].active = False
+ # end if/else
iCtrl.Bench.exitFromSsh( this, iCtrl.ipAddress )
- # NOTE: When dynamic clustering is finished, we need to start checking
- # main.partion nodes still work when partitioned
utilities.assert_equals( expect=main.TRUE, actual=partitionResults,
onpass="Firewall rules set successfully",
onfail="Error setting firewall rules" )
main.Cluster.reset()
+ main.log.debug( main.Cluster.active )
main.step( "Sleeping 60 seconds" )
time.sleep( 60 )
diff --git a/TestON/tests/HA/dependencies/HA.py b/TestON/tests/HA/dependencies/HA.py
index 4b61c1e..88d122d 100644
--- a/TestON/tests/HA/dependencies/HA.py
+++ b/TestON/tests/HA/dependencies/HA.py
@@ -1429,7 +1429,6 @@
main.caseExplanation = "Test the methods of the distributed " +\
"primitives (counters and sets) throught the cli"
# DISTRIBUTED ATOMIC COUNTERS
- # Partitioned counters
main.step( "Increment then get a default counter on each node" )
pCounters = main.Cluster.command( "counterTestAddAndGet",
args=[ main.pCounterName ] )
@@ -3631,7 +3630,12 @@
oldLeaderCLI = ctrl
break
else: # FOR/ELSE statement
- main.log.error( "Leader election, could not find current leader" )
+ main.log.error( "Leader election, could not find current leader amongst active nodes" )
+ for ctrl in main.Cluster.controllers:
+ if oldLeader == ctrl.ipAddress:
+ oldLeaderCLI = ctrl
+ main.log.warn( "Old leader was found as node " + str( ctrl.ipAddress ) )
+ # Should we skip the next if statement then? There should be a new leader elected?
if oldLeader:
withdrawResult = oldLeaderCLI.electionTestWithdraw()
utilities.assert_equals(