Update tests for aether pods
- Update test for QA-POD
- SRStaging for testing connecting to Staging pod
- Add some functions for a kubernetes deployed cluster
- Connect to ONOS nodes with kubernetes
- Add option to connect to components through jump hosts
- Fixes for installing ONOS in custom locations
- Invoke python2 instead of python
- If using an ssh agent, also use that for pexpect ssh sessions,
E.G. Jenkins initiated tests
Change-Id: I1fc345c8eab60a5b00c17e6ed677a63489a74a19
diff --git a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params
index 2deaa95..7021d41 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params
@@ -46,8 +46,6 @@
</ONOS_Logging>
<GIT>
- <pull>False</pull>
- <branch>master</branch>
</GIT>
<CTRL>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params.tofino b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params.tofino
index 88d669b..00666bc 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params.tofino
+++ b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params.tofino
@@ -2,7 +2,7 @@
<testcases>1</testcases>
<GRAPH>
- <nodeCluster>Fabric</nodeCluster>
+ <nodeCluster>QA-Pod</nodeCluster>
<builds>20</builds>
<jobName>SRBridging-tofino</jobName>
<branch>master</branch>
@@ -18,7 +18,7 @@
<useCommonTopo>True</useCommonTopo>
<useBmv2>True</useBmv2>
<bmv2SwitchType>stratum</bmv2SwitchType>
- <switchPrefix>tofino</switchPrefix>
+ <switchPrefix></switchPrefix>
<stratumRoot>~/stratum</stratumRoot>
<topology>trellis_fabric.py</topology>
<lib>routinglib.py,trellislib.py,stratum.py</lib>
@@ -28,27 +28,32 @@
<persistent_setup>True</persistent_setup>
- <MN_DOCKER>
- <args>--privileged --net host --rm -v topo:/topo -v ~/mininet/custom:/home/root/mininet/custom -v /var/run/openvswitch/:/var/run/openvswitch/ -v /tmp/mn-stratum:/tmp -v /tmp/mn_conf/:/home/root/config --hostname mn-stratum -v /etc/network/interfaces:/etc/network/interfaces -it -d</args>
- <name>trellis_mininet</name>
- <home>/home/root/</home>
- </MN_DOCKER>
- <CLUSTER>
- # Params for onos docker
- <dockerSkipBuild>True</dockerSkipBuild>
- <dockerBuildCmd>make ONOS_VERSION=onos-2.2 DOCKER_TAG=TestON-onos-2.2 onos-build trellis-control-build trellis-t3-build tost-build</dockerBuildCmd> # If using another cmd like make
- <dockerBuildTimeout>1200</dockerBuildTimeout>
- <dockerFilePath>~/tost-onos</dockerFilePath>
- <dockerImageTag>registry.aetherproject.org/tost/tost:master</dockerImageTag>
- <dockerOptions>-d --rm --network host -v ~/.ssh/authorized_keys:/root/.ssh/authorized_keys -v /tmp/cluster.json:/root/onos/config/cluster.json </dockerOptions> # We start the container detached, so the docker component can connect to cli instead of logs
- <atomixImageTag>atomix/atomix:3.1.5</atomixImageTag>
- <atomixOptions>-d --rm --network host -v ~/.ssh/authorized_keys:/root/.ssh/authorized_keys -v /tmp/atomix.json:/opt/atomix/conf/atomix.json </atomixOptions>
- </CLUSTER>
+ <kubernetes>
+ <appName>onos-tost-onos-classic</appName>
+ <namespace>tost</namespace>
+ </kubernetes>
+
+ <PERF>
+ <traffic_host>Host3</traffic_host>
+ <traffic_container>mlabbe/iperf</traffic_container>
+ <traffic_container_arguments>--net=host -v /proc/net/arp:/host/arp --rm</traffic_container_arguments>
+ <traffic_cmd_arguments> -u -b 20M -t 20</traffic_cmd_arguments>
+
+ <pcap_host>Host4</pcap_host>
+ <pcap_container>toendeavour/tshark</pcap_container>
+ <pcap_container_arguments>--cap-add=NET_RAW --cap-add=NET_ADMIN --net=host --rm -v ~/TestON/tshark/:/tshark</pcap_container_arguments>
+ <pcap_cmd_arguments>-t e -F pcap</pcap_cmd_arguments>
+ <pcap_cmd_arguments2>-t e -F pcap</pcap_cmd_arguments2>
+
+ </PERF>
+
+
+
<ENV>
<cellName>productionCell</cellName>
- <cellApps>drivers,fpm,lldpprovider,hostprovider,netcfghostprovider,drivers.bmv2,org.opencord.fabric-tofino,pipelines.fabric,org.stratumproject.fabric-tna,drivers.barefoot,segmentrouting,t3</cellApps>
+ <cellApps>drivers,fpm,lldpprovider,hostprovider,netcfghostprovider,drivers.bmv2,pipelines.fabric,org.stratumproject.fabric-tna,drivers.barefoot,segmentrouting,t3</cellApps>
</ENV>
<EXTERNAL_APPS>
@@ -79,7 +84,6 @@
<org.onosproject.drivers.gnoi>DEBUG</org.onosproject.drivers.gnoi>
<org.onosproject.drivers.gmni>DEBUG</org.onosproject.drivers.gmni>
<org.onosproject.drivers.barefoot>DEBUG</org.onosproject.drivers.barefoot>
- <org.opencord.fabric.tofino>DEBUG</org.opencord.fabric.tofino>
<org.onosproject.bmv2>DEBUG</org.onosproject.bmv2>
</ONOS_Logging>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo.0x1.physical b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo.0x1.physical
index d7a2bad..e67e354 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo.0x1.physical
+++ b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.topo.0x1.physical
@@ -8,12 +8,13 @@
<connect_order>1</connect_order>
<home></home> # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
<COMPONENTS>
+ <kubeConfig>~/.kube/qa-ace-menlo</kubeConfig> # If set, will attempt to use this file for setting up port-forwarding
<useDocker>True</useDocker> # Whether to use docker for ONOS nodes
- <docker_prompt>~/onos#</docker_prompt>
+ <docker_prompt>\$</docker_prompt>
<cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
<diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
- <karaf_username></karaf_username>
- <karaf_password></karaf_password>
+ <karaf_username>karaf</karaf_username>
+ <karaf_password>karaf</karaf_password>
<web_user>sdn</web_user>
<web_pass>rocks</web_pass>
<rest_port></rest_port>
@@ -30,13 +31,14 @@
<type>StratumOSSwitchDriver</type>
<connect_order>2</connect_order>
<COMPONENTS>
+ <prompt>#</prompt>
<shortName>leaf1</shortName>
<port1>1</port1>
<link1>Host1</link1>
<port2>2</port2>
<link2>Host2</link2>
- <onosConfigPath>~/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/json/</onosConfigPath>
- <onosConfigFile>tofino-onos-netcfg.json</onosConfigFile>
+ <onosConfigPath></onosConfigPath>
+ <onosConfigFile></onosConfigFile>
</COMPONENTS>
</SwitchLeaf1>
@@ -49,14 +51,14 @@
<COMPONENTS>
<mac>3c:fd:fe:a8:ea:30</mac>
<inband>false</inband>
- <ip>10.0.2.1</ip>
+ <ip>192.168.102.2</ip>
<shortName>h1</shortName>
<port1>0</port1>
<link1>SwitchLeaf1</link1>
<interfaceName>ens6f0</interfaceName>
<routes>
<route1>
- <network>10.0.2.0</network>
+ <network>192.168.102.1</network>
<netmask>24</netmask>
<gw></gw>
<interface></interface>
@@ -74,14 +76,14 @@
<COMPONENTS>
<mac>3c:fd:fe:a8:ea:31</mac>
<inband>false</inband>
- <ip>10.0.2.2</ip>
+ <ip>192.168.102.11</ip>
<shortName>h2</shortName>
<port1>0</port1>
<link1>SwitchLeaf1</link1>
<interfaceName>ens6f1</interfaceName>
<routes>
<route1>
- <network>10.0.2.0</network>
+ <network>192.168.102.1</network>
<netmask>24</netmask>
<gw></gw>
<interface></interface>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/SRBridgingTest.py b/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/SRBridgingTest.py
index f88c85a..bfba8df 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/SRBridgingTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/SRBridgingTest.py
@@ -65,18 +65,21 @@
main.Cluster.setRunningNode( onosNodes )
run.installOnos( main, skipPackage=skipPackage, cliSleep=5 )
if main.useBmv2:
- switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
+ switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', '' )
+ if switchPrefix is None:
+ switchPrefix = ''
# Translate configuration file from OVS-OFDPA to BMv2 driver
translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
else:
translator.bmv2ToOfdpa( main )
- suf = main.params.get( 'jsonFileSuffix', None)
- if suf:
- run.loadJson( main, suffix=suf )
- else:
- run.loadJson( main )
- run.loadChart( main )
+ if not main.persistentSetup:
+ suf = main.params.get( 'jsonFileSuffix', None)
+ if suf:
+ run.loadJson( main, suffix=suf )
+ else:
+ run.loadJson( main )
+ run.loadChart( main ) # stores hosts to ping and expected results
if hasattr( main, 'Mininet1' ):
run.mnDockerSetup( main ) # optionally create and setup docker image
@@ -94,15 +97,14 @@
else:
# Run the test with physical devices
- run.connectToPhysicalNetwork( main )
+ run.connectToPhysicalNetwork( main, hostDiscovery=False ) # We don't want to do host discovery in the pod
run.checkFlows( main, minFlowCount=self.topo[ topology ][ 5 if main.useBmv2 else 4 ] * self.topo[ topology ][ 1 ], sleep=5 )
if main.useBmv2:
- switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix' )
- if switchPrefix == "tofino":
- leaf_dpid = [ "device:tofino:leaf%d" % ( ls + 1 ) for ls in range( self.topo[ topology ][ 1 ]) ]
- else:
- leaf_dpid = [ "device:bmv2:leaf%d" % ( ls + 1 ) for ls in range( self.topo[ topology ][ 1 ]) ]
+ switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', '' )
+ if switchPrefix is not '' and switchPrefix is not None:
+ switchPrefix += ':'
+ leaf_dpid = [ "device:%sleaf%d" % ( switchPrefix, ls + 1 ) for ls in range( self.topo[ topology ][ 1 ]) ]
else:
leaf_dpid = [ "of:%016d" % ( ls + 1 ) for ls in range( self.topo[ topology ][ 1 ] ) ]
for dpid in leaf_dpid:
diff --git a/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/dependencies/SRClusterRestartFuncs.py b/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/dependencies/SRClusterRestartFuncs.py
index cead0b4..af86ca8 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/dependencies/SRClusterRestartFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/dependencies/SRClusterRestartFuncs.py
@@ -40,7 +40,8 @@
main.cfgName = Topo
main.Cluster.setRunningNode( numNodes )
run.installOnos( main )
- run.loadJson( main )
+ if not main.persistentSetup:
+ run.loadJson( main )
run.loadChart( main )
if hasattr( main, 'Mininet1' ):
run.startMininet( main, 'cord_fabric.py', args=self.topo[ Topo ][ 2 ] )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/dependencies/SRDhcprelayTest.py b/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/dependencies/SRDhcprelayTest.py
index d5f89a6..325af80 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/dependencies/SRDhcprelayTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/dependencies/SRDhcprelayTest.py
@@ -49,11 +49,14 @@
if main.useBmv2:
# Translate configuration file from OVS-OFDPA to BMv2 driver
translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
- switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
+ switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', '' )
+ if switchPrefix is None:
+ switchPrefix = ''
translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
else:
translator.bmv2ToOfdpa( main )
- run.loadJson( main )
+ if not main.persistentSetup:
+ run.loadJson( main )
run.loadHost( main )
if hasattr( main, 'Mininet1' ):
run.mnDockerSetup( main )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDynamic/dependencies/SRDynamicFuncs.py b/TestON/tests/USECASE/SegmentRouting/SRDynamic/dependencies/SRDynamicFuncs.py
index 0ca4c91..1e2d0a3 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRDynamic/dependencies/SRDynamicFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRDynamic/dependencies/SRDynamicFuncs.py
@@ -47,7 +47,8 @@
main.cfgName = Topo
main.Cluster.setRunningNode( numNodes )
run.installOnos( main )
- run.loadJson( main )
+ if not main.persistentSetup:
+ run.loadJson( main )
run.loadChart( main )
run.startMininet( main, 'cord_fabric.py',
args=self.topo[ Topo ][ 2 ] )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDynamicConf/dependencies/SRDynamicConfTest.py b/TestON/tests/USECASE/SegmentRouting/SRDynamicConf/dependencies/SRDynamicConfTest.py
index 5c8d277..fb44e2e 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRDynamicConf/dependencies/SRDynamicConfTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRDynamicConf/dependencies/SRDynamicConfTest.py
@@ -75,11 +75,14 @@
if main.useBmv2:
# Translate configuration file from OVS-OFDPA to BMv2 driver
translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
- switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
+ switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', '' )
+ if switchPrefix is None:
+ switchPrefix = ''
translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
else:
translator.bmv2ToOfdpa( main )
- run.loadJson( main )
+ if not main.persistentSetup:
+ run.loadJson( main )
run.loadChart( main )
# Provide topology-specific interface configuration
diff --git a/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/dependencies/SRHAFuncs.py b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/dependencies/SRHAFuncs.py
index 561ebfc..5f71d28 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/dependencies/SRHAFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/dependencies/SRHAFuncs.py
@@ -44,11 +44,13 @@
main.case( description )
run.config( main, Topo )
run.installOnos( main )
- run.loadJson( main )
+ if not main.persistentSetup:
+ run.loadJson( main )
run.loadChart( main )
run.startMininet( main, 'cord_fabric.py', args=self.topo[ Topo ][ 2 ] )
- # xconnects need to be loaded after topology
- run.loadXconnects( main )
+ if not main.persistentSetup:
+ # xconnects need to be loaded after topology
+ run.loadXconnects( main )
# pre-configured routing and bridging test
run.checkFlows( main, minFlowCount=minFlow )
run.pingAll( main )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/dependencies/SRLinkFailFuncs.py b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/dependencies/SRLinkFailFuncs.py
index 4664c9e..ea55f04 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/dependencies/SRLinkFailFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/dependencies/SRLinkFailFuncs.py
@@ -61,10 +61,11 @@
else:
translator.bmv2ToOfdpa( main )
translator.bmv2ToOfdpa( main, cfgFile=xconnectFile )
- if suf:
- run.loadJson( main, suffix=suf )
- else:
- run.loadJson( main )
+ if not main.persistentSetup:
+ if suf:
+ run.loadJson( main, suffix=suf )
+ else:
+ run.loadJson( main )
run.loadChart( main )
if hasattr( main, 'Mininet1' ):
run.mnDockerSetup( main ) # optionally create and setup docker image
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/SRMulticastTest.py b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/SRMulticastTest.py
index af9250c..4dfcd53 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/SRMulticastTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/SRMulticastTest.py
@@ -44,11 +44,14 @@
if main.useBmv2:
# Translate configuration file from OVS-OFDPA to BMv2 driver
translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
- switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
+ switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', '' )
+ if switchPrefix is None:
+ switchPrefix = ''
translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
else:
translator.bmv2ToOfdpa( main )
- lib.loadJson( main )
+ if not main.persistentSetup:
+ lib.loadJson( main )
time.sleep( float( main.params[ "timers" ][ "loadNetcfgSleep" ] ) )
main.cfgName = "common" if hasattr( main, "Mininet1" ) else main.params[ "DEPENDENCY" ][ "confName" ]
lib.loadMulticastConfig( main )
diff --git a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/dependencies/SROnosFailureFuncs.py b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/dependencies/SROnosFailureFuncs.py
index 96bf84d..f374c47 100644
--- a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/dependencies/SROnosFailureFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/dependencies/SROnosFailureFuncs.py
@@ -55,10 +55,11 @@
else:
translator.bmv2ToOfdpa( main )
translator.bmv2ToOfdpa( main, cfgFile=xconnectFile )
- if suf:
- run.loadJson( main, suffix=suf )
- else:
- run.loadJson( main )
+ if not main.persistentSetup:
+ if suf:
+ run.loadJson( main, suffix=suf )
+ else:
+ run.loadJson( main )
run.loadChart( main )
if hasattr( main, 'Mininet1' ):
run.mnDockerSetup( main ) # optionally create and setup docker image
@@ -74,8 +75,9 @@
# Run the test with physical devices
# TODO: connect TestON to the physical network
pass
- # xconnects need to be loaded after topology
- run.loadXconnects( main )
+ if not main.persistentSetup:
+ # xconnects need to be loaded after topology
+ run.loadXconnects( main )
switches = self.topo[ Topo ][ 0 ] + self.topo[ Topo ][ 1 ]
links = ( self.topo[ Topo ][ 0 ] * self.topo[ Topo ][ 1 ] ) * 2
# pre-configured routing and bridging test
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py
index 9c1b8c1..1b544dc 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py
@@ -66,11 +66,14 @@
if main.useBmv2:
# Translate configuration file from OVS-OFDPA to BMv2 driver
translator.bmv2ToOfdpa( main ) # Try to cleanup if switching between switch types
- switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', "bmv2" )
+ switchPrefix = main.params[ 'DEPENDENCY' ].get( 'switchPrefix', '' )
+ if switchPrefix is None:
+ switchPrefix = ''
translator.ofdpaToBmv2( main, switchPrefix=switchPrefix )
else:
translator.bmv2ToOfdpa( main )
- lib.loadJson( main )
+ if not main.persistentSetup:
+ lib.loadJson( main )
main.log.debug( "sleeping %i seconds" % float( main.params[ 'timers' ][ 'loadNetcfgSleep' ] ) )
time.sleep( float( main.params[ 'timers' ][ 'loadNetcfgSleep' ] ) )
lib.loadHost( main )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRSanity/dependencies/SRSanityFuncs.py b/TestON/tests/USECASE/SegmentRouting/SRSanity/dependencies/SRSanityFuncs.py
index 9115ff0..f079be4 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRSanity/dependencies/SRSanityFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRSanity/dependencies/SRSanityFuncs.py
@@ -41,11 +41,13 @@
main.cfgName = Topo
main.Cluster.setRunningNode( numNodes )
run.installOnos( main )
- run.loadJson( main )
+ if not main.persistentSetup:
+ run.loadJson( main )
run.loadChart( main )
run.startMininet( main, 'cord_fabric.py', args=self.topo[ Topo ][ 2 ] )
- # xconnects need to be loaded after topology
- run.loadXconnects( main )
+ if not main.persistentSetup:
+ # xconnects need to be loaded after topology
+ run.loadXconnects( main )
# pre-configured routing and bridging test
run.checkFlows( main, minFlowCount=minFlow )
run.pingAll( main )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.params b/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.params
new file mode 100644
index 0000000..cb37498
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.params
@@ -0,0 +1,80 @@
+<PARAMS>
+ <testcases>7</testcases>
+
+ <GRAPH>
+ <nodeCluster>staging</nodeCluster>
+ <builds>20</builds>
+ <jobName>SRStaging</jobName>
+ <branch>master</branch>
+ </GRAPH>
+
+ <SCALE>
+ <size>3</size>
+ <max>3</max>
+ </SCALE>
+
+ <DEPENDENCY>
+ <useCommonConf>False</useCommonConf>
+ <useCommonTopo>True</useCommonTopo>
+ <useBmv2>True</useBmv2>
+ <bmv2SwitchType>stratum</bmv2SwitchType>
+ <switchPrefix></switchPrefix>
+ <stratumRoot>~/stratum</stratumRoot>
+ <topology>trellis_fabric.py</topology>
+ <lib>routinglib.py,trellislib.py,stratum.py</lib>
+ </DEPENDENCY>
+
+ <jsonFileSuffix>.hw</jsonFileSuffix>
+
+ <persistent_setup>True</persistent_setup>
+
+ <kubernetes>
+ <appName>onos-tost-onos-classic</appName>
+ <namespace>tost</namespace>
+ </kubernetes>
+
+ <PERF>
+ <traffic_host>Host1 Host2 Host3</traffic_host>
+ <traffic_cmd_arguments> -u -b 20M -t 20</traffic_cmd_arguments>
+
+ <pcap_host>ng40vm</pcap_host>
+ <pcap_cmd_arguments>-t e -F pcap -s 100 </pcap_cmd_arguments>
+
+ </PERF>
+ <ONOS_Logging>
+ <org.onosproject.p4runtime.ctl.client>DEBUG</org.onosproject.p4runtime.ctl.client>
+ <org.onosproject.segmentrouting>DEBUG</org.onosproject.segmentrouting>
+ <org.onosproject.gnmi.ctl>TRACE</org.onosproject.gnmi.ctl>
+ </ONOS_Logging>
+
+
+ <ENV>
+ <cellName>productionCell</cellName>
+ <cellApps>drivers,fpm,lldpprovider,hostprovider,netcfghostprovider,drivers.bmv2,org.opencord.fabric-tofino,pipelines.fabric,org.stratumproject.fabric-tna,drivers.barefoot,segmentrouting,t3</cellApps>
+ </ENV>
+
+ <EXTERNAL_APPS>
+ </EXTERNAL_APPS>
+
+ <CTRL>
+ <port>6653</port>
+ </CTRL>
+
+ <timers>
+ <LinkDiscovery>12</LinkDiscovery>
+ <SwitchDiscovery>12</SwitchDiscovery>
+ </timers>
+
+ <SLEEP>
+ <startup>10</startup>
+ </SLEEP>
+
+ <TOPO>
+ <switchNum>4</switchNum>
+ <linkNum>16</linkNum>
+ </TOPO>
+
+ <ALARM>
+ <minPassPercent>100</minPassPercent>
+ </ALARM>
+</PARAMS>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.py b/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.py
new file mode 100644
index 0000000..124fd50
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.py
@@ -0,0 +1,91 @@
+class SRStaging:
+ def __init__( self ):
+ self.default = ''
+
+ def CASE1( self, main ):
+ main.case("Testing connections")
+ main.persistentSetup = True
+ def CASE7( self, main ):
+ """
+ Tests connectivity between two untagged hosts
+ (Ports are configured as vlan-untagged)
+
+ Sets up 3 ONOS instance
+ Start 2x2 leaf-spine topology
+ Pingall
+ """
+ try:
+ from tests.USECASE.SegmentRouting.SRStaging.dependencies.SRStagingTest import SRStagingTest
+ except ImportError:
+ main.log.error( "SRStagingTest not found. Exiting the test" )
+ main.cleanAndExit()
+ try:
+ main.funcs
+ except ( NameError, AttributeError ):
+ main.funcs = SRStagingTest()
+ # Load kubeconfig
+ # Setup ssh tunnel
+ # connect to ONOS CLI
+
+
+ main.funcs.setupTest( main,
+ test_idx=7,
+ topology='2x2staging',
+ onosNodes=3,
+ description="Developing tests on the staging pod" )
+ srcComponentNames = main.params[ 'PERF' ][ 'traffic_host' ].split()
+ srcComponentList = []
+ for name in srcComponentNames:
+ srcComponentList.append( getattr( main, name ) )
+ dstComponent = getattr( main, main.params[ 'PERF' ][ 'pcap_host' ] )
+
+ main.downtimeResults = {}
+
+
+ # TODO: MOVE TO CONFIG FILE
+ device = "device:leaf2"
+ port1 = "268"
+ port2 = "284"
+ port3 = "260"
+ port4 = "276"
+
+ descPrefix = "Upstream_Leaf_Spine_Portstate"
+ # TODO: Move most of this logic into linkDown/linkUp
+ ## First Link Down
+ shortDesc = descPrefix + "-Failure1"
+ longDesc = "%s Failure: Bring down %s/%s" % ( descPrefix, device, port1 )
+ main.funcs.linkDown( device, port1, srcComponentList, dstComponent, shortDesc, longDesc )
+ ## Second Link Down
+ shortDesc = descPrefix + "-Failure2"
+ longDesc = "%s Failure: Bring down %s/%s" % ( descPrefix, device, port2 )
+ main.funcs.linkDown( device, port2, srcComponentList, dstComponent, shortDesc, longDesc )
+ ## First Link Up
+ # TODO Check these are set correctly
+ shortDesc = descPrefix + "-Recovery1"
+ longDesc = "%s Recovery: Bring up %s/%s" % ( descPrefix, device, port1 )
+ main.funcs.linkUp( device, port1, srcComponentList, dstComponent, shortDesc, longDesc )
+ ## Second Link Up
+ shortDesc = descPrefix + "-Recovery2"
+ longDesc = "%s Recovery: Bring up %s/%s" % ( descPrefix, device, port2 )
+ main.funcs.linkUp( device, port2, srcComponentList, dstComponent, shortDesc, longDesc )
+ ## Third Link Down
+ shortDesc = descPrefix + "-Failure3"
+ longDesc = "%s Failure: Bring down %s/%s" % ( descPrefix, device, port3 )
+ main.funcs.linkDown( device, port3, srcComponentList, dstComponent, shortDesc, longDesc )
+ ## Forth Link Down
+ shortDesc = descPrefix + "-Failure4"
+ longDesc = "%s Failure: Bring down %s/%s" % ( descPrefix, device, port4 )
+ main.funcs.linkDown( device, port4, srcComponentList, dstComponent, shortDesc, longDesc )
+ ## Third Link Up
+ shortDesc = descPrefix + "-Recovery3"
+ longDesc = "%s Recovery: Bring upn %s/%s" % ( descPrefix, device, port3 )
+ main.funcs.linkUp( device, port3, srcComponentList, dstComponent, shortDesc, longDesc )
+ ## Forth Link Up
+ shortDesc = descPrefix + "-Recovery4"
+ longDesc = "%s Recovery: Bring up %s/%s" % ( descPrefix, device, port4 )
+ main.funcs.linkUp( device, port4, srcComponentList, dstComponent, shortDesc, longDesc )
+
+ main.log.warn( main.downtimeResults )
+ import json
+ main.log.warn( json.dumps( main.downtimeResults, indent=4, sort_keys=True ) )
+ main.funcs.cleanup( main )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.topo b/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.topo
new file mode 100644
index 0000000..8c25811
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.topo
@@ -0,0 +1,218 @@
+<TOPOLOGY>
+ <COMPONENT>
+ <ONOScell>
+ <host>localhost</host> # ONOS "bench" machine
+ <user>jenkins</user>
+ <password></password>
+ <type>OnosClusterDriver</type>
+ <connect_order>50</connect_order>
+ <jump_host></jump_host>
+ <home>~/Projects/onos/</home> # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
+ <COMPONENTS>
+ <kubeConfig>~/.kube/stg-ace-menlo</kubeConfig> # If set, will attempt to use this file for setting up port-forwarding
+ <useDocker>True</useDocker> # Whether to use docker for ONOS nodes
+ <docker_prompt>\$</docker_prompt>
+ <cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
+ <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+ <karaf_username>karaf</karaf_username>
+ <karaf_password>karaf</karaf_password>
+ <web_user>sdn</web_user>
+ <web_pass>rocks</web_pass>
+ <rest_port></rest_port>
+ <prompt></prompt> # TODO: we technically need a few of these, one per component
+ <onos_home>~/Projects/onos/</onos_home> # defines where onos home is on the target cell machine. Defaults to entry in "home" if empty.
+ <nodes> 3 </nodes> # number of nodes in the cluster
+ </COMPONENTS>
+ </ONOScell>
+
+ <SwitchLeaf1>
+ <host>10.32.4.132</host>
+ <user>root</user>
+ <password>onl</password>
+ <type>StratumOSSwitchDriver</type>
+ <connect_order>12</connect_order>
+ <jump_host>NetworkBench</jump_host>
+ <COMPONENTS>
+ <shortName>leaf1</shortName>
+ <port1></port1>
+ <link1></link1>
+ <port2></port2>
+ <link2></link2>
+ <onosConfigPath></onosConfigPath>
+ <onosConfigFile></onosConfigFile>
+ </COMPONENTS>
+ </SwitchLeaf1>
+
+ <SwitchLeaf2>
+ <host>10.32.4.136</host>
+ <user>root</user>
+ <password>onl</password>
+ <type>StratumOSSwitchDriver</type>
+ <connect_order>13</connect_order>
+ <jump_host>NetworkBench</jump_host>
+ <COMPONENTS>
+ <shortName>leaf2</shortName>
+ <port1></port1>
+ <link1></link1>
+ <port2></port2>
+ <link2></link2>
+ <onosConfigPath></onosConfigPath>
+ <onosConfigFile></onosConfigFile>
+ </COMPONENTS>
+ </SwitchLeaf2>
+
+ <SwitchSpine1>
+ <host>10.32.4.130</host>
+ <user>root</user>
+ <password>onl</password>
+ <type>StratumOSSwitchDriver</type>
+ <connect_order>14</connect_order>
+ <jump_host>NetworkBench</jump_host>
+ <COMPONENTS>
+ <shortName>spine1</shortName>
+ <port1></port1>
+ <link1></link1>
+ <port2></port2>
+ <link2></link2>
+ <onosConfigPath></onosConfigPath>
+ <onosConfigFile></onosConfigFile>
+ </COMPONENTS>
+ </SwitchSpine1>
+
+ <SwitchSpine2>
+ <host>10.32.4.134</host>
+ <user>root</user>
+ <password>onl</password>
+ <type>StratumOSSwitchDriver</type>
+ <connect_order>15</connect_order>
+ <jump_host>NetworkBench</jump_host>
+ <COMPONENTS>
+ <shortName>spine2</shortName>
+ <port1></port1>
+ <link1></link1>
+ <port2></port2>
+ <link2></link2>
+ <onosConfigPath></onosConfigPath>
+ <onosConfigFile></onosConfigFile>
+ </COMPONENTS>
+ </SwitchSpine2>
+
+ <Host1>
+ <host>10.32.4.138</host>
+ <user>jenkins</user>
+ <password></password>
+ <type>HostDriver</type>
+ <connect_order>6</connect_order>
+ <jump_host>NetworkBench</jump_host>
+ <COMPONENTS>
+ <mac></mac>
+ <inband>false</inband>
+ <ip>10.32.5.161</ip>
+ <shortName>h1</shortName>
+ <port1></port1>
+ <link1></link1>
+ <interfaceName>enp175s0f0</interfaceName>
+ <routes>
+ <route1>
+ <network></network>
+ <netmask></netmask>
+ <gw></gw>
+ <interface></interface>
+ </route1>
+ </routes>
+ <sudo_required>false</sudo_required>
+ </COMPONENTS>
+ </Host1>
+
+ <Host2>
+ <host>10.32.4.139</host>
+ <user>jenkins</user>
+ <password></password>
+ <type>HostDriver</type>
+ <connect_order>7</connect_order>
+ <jump_host>NetworkBench</jump_host>
+ <COMPONENTS>
+ <mac></mac>
+ <inband>false</inband>
+ <ip>10.32.5.162</ip>
+ <shortName>h2</shortName>
+ <port1></port1>
+ <link1></link1>
+ <interfaceName>enp175s0f0</interfaceName>
+ <routes>
+ <route1>
+ <network></network>
+ <netmask></netmask>
+ <gw></gw>
+ <interface></interface>
+ </route1>
+ </routes>
+ <sudo_required>false</sudo_required>
+ </COMPONENTS>
+ </Host2>
+
+ <Host3>
+ <host>10.32.4.140</host>
+ <user>jenkins</user>
+ <password></password>
+ <type>HostDriver</type>
+ <connect_order>8</connect_order>
+ <jump_host>NetworkBench</jump_host>
+ <COMPONENTS>
+ <mac></mac>
+ <inband>false</inband>
+ <ip>10.32.5.163</ip>
+ <shortName>h3</shortName>
+ <port1></port1>
+ <link1></link1>
+ <interfaceName>enp175s0f0</interfaceName>
+ <routes>
+ <route1>
+ <network></network>
+ <netmask></netmask>
+ <gw></gw>
+ <interface></interface>
+ </route1>
+ </routes>
+ <sudo_required>false</sudo_required>
+ </COMPONENTS>
+ </Host3>
+
+ <ng40vm>
+ <host>10.32.5.6</host>
+ <user>ng40</user>
+ <password>ng40</password>
+ <type>HostDriver</type>
+ <connect_order>8</connect_order>
+ <jump_host>NetworkBench</jump_host>
+ <COMPONENTS>
+ <mac></mac>
+ <inband>false</inband>
+ <ip>10.32.5.6</ip>
+ <shortName>ng40</shortName>
+ <port1></port1>
+ <link1></link1>
+ <interfaceName>ens8</interfaceName>
+ <routes>
+ <route1>
+ <network></network>
+ <netmask></netmask>
+ <gw></gw>
+ <interface></interface>
+ </route1>
+ </routes>
+ </COMPONENTS>
+ </ng40vm>
+
+ <NetworkBench>
+ <host>66.201.42.222</host>
+ <user>jenkins</user>
+ <password></password>
+ <type>NetworkDriver</type>
+ <connect_order>1</connect_order>
+ <COMPONENTS>
+ </COMPONENTS>
+ </NetworkBench>
+
+ </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRStaging/__init__.py b/TestON/tests/USECASE/SegmentRouting/SRStaging/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRStaging/__init__.py
diff --git a/TestON/tests/USECASE/SegmentRouting/SRStaging/dependencies/SRStagingTest.py b/TestON/tests/USECASE/SegmentRouting/SRStaging/dependencies/SRStagingTest.py
new file mode 100644
index 0000000..5dd43da
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRStaging/dependencies/SRStagingTest.py
@@ -0,0 +1,393 @@
+"""
+Copyright 2017 Open Networking Foundation ( ONF )
+
+Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+
+ TestON is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 2 of the License, or
+ ( at your option ) any later version.
+
+ TestON is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TestON. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as run
+import time
+import re
+import json
+import pexpect
+
+class SRStagingTest ():
+
+ def __init__( self ):
+ self.default = ''
+ self.topo = dict()
+ # TODO: Check minFlowCount of leaf for BMv2 switch
+ # (number of spine switch, number of leaf switch, dual-homed, description, minFlowCount - leaf (OvS), minFlowCount - leaf (BMv2))
+ self.topo[ '0x1' ] = ( 0, 1, False, 'single ToR', 28, 20 )
+ self.topo[ '0x2' ] = ( 0, 2, True, 'dual-homed ToR', 37, 37 )
+ self.topo[ '2x2' ] = ( 2, 2, False, '2x2 leaf-spine topology', 37, 32 )
+ self.topo[ '2x2staging' ] = ( 2, 2, True, '2x2 leaf-spine topology', 37, 32 )
+ # TODO: Implement 2x3 topology
+ # topo[ '2x3' ] = ( 2, 3, True, '2x3 leaf-spine topology with dual ToR and single ToR', 28 )
+ self.topo[ '2x4' ] = ( 2, 4, True, '2x4 dual-homed leaf-spine topology', 53, 53 )
+ self.topo[ '2x4' ] = ( 2, 4, True, '2x4 dual-homed leaf-spine topology', 53, 53 )
+ self.switchNames = {}
+ self.switchNames[ '0x1' ] = [ "leaf1" ]
+ self.switchNames[ '2x2' ] = [ "leaf1", "leaf2", "spine101", "spine102" ]
+ main.switchType = "ovs"
+
+ def setupTest( self, main, test_idx, topology, onosNodes, description, vlan = [] ):
+ try:
+ skipPackage = False
+ init = False
+ if not hasattr( main, 'apps' ):
+ init = True
+ run.initTest( main )
+ # Skip onos packaging if the cluster size stays the same
+ if not init and onosNodes == main.Cluster.numCtrls:
+ skipPackage = True
+
+ main.case( '%s, with %s, %s switches and %d ONOS instance%s' %
+ ( description, self.topo[ topology ][ 3 ],
+ main.switchType,
+ onosNodes,
+ 's' if onosNodes > 1 else '' ) )
+
+ main.cfgName = 'CASE%01d%01d' % ( test_idx / 10, ( ( test_idx - 1 ) % 10 ) % 4 + 1 )
+ main.Cluster.setRunningNode( onosNodes )
+ # Set ONOS Log levels
+ # TODO: Check levels before and reset them after
+ run.installOnos( main, skipPackage=skipPackage, cliSleep=5 )
+
+ if hasattr( main, 'Mininet1' ):
+ run.mnDockerSetup( main ) # optionally create and setup docker image
+
+ # Run the test with Mininet
+ mininet_args = ' --spine=%d --leaf=%d' % ( self.topo[ topology ][ 0 ], self.topo[ topology ][ 1 ] )
+ if self.topo[ topology ][ 2 ]:
+ mininet_args += ' --dual-homed'
+ if len( vlan ) > 0 :
+ mininet_args += ' --vlan=%s' % ( ','.join( ['%d' % vlanId for vlanId in vlan ] ) )
+ if main.useBmv2:
+ mininet_args += ' --switch %s' % main.switchType
+ main.log.info( "Using %s switch" % main.switchType )
+
+ run.startMininet( main, 'trellis_fabric.py', args=mininet_args )
+
+ else:
+ # Run the test with physical devices
+ run.connectToPhysicalNetwork( main, hostDiscovery=False ) # We don't want to do host discovery in the pod
+ except Exception as e:
+ main.log.exception( "Error in setupTest" )
+ main.skipCase( result="FAIL", msg=e )
+
+ def startCapturing( self, main, srcList, dst, shortDesc=None, longDesc=None ):
+ """
+ Starts logging, traffic generation, traffic filters, etc before a failure is induced
+ src: the src component that sends the traffic
+ dst: the dst component that receives the traffic
+ """
+ try:
+ # ping right before to make sure arp is cached and sudo is authenticated
+ for src in srcList:
+ src.handle.sendline( "sudo /bin/ping -c 1 %s" % dst.ip_address )
+ try:
+ i = src.handle.expect( [ "password", src.prompt ] )
+ if i == 0:
+ src.handle.sendline( src.pwd )
+ src.handle.expect( src.prompt )
+ except Exception:
+ main.log.error( "Unexpected response from ping" )
+ src.handle.send( '\x03' ) # ctrl-c
+ src.handle.expect( src.prompt )
+ main.log.warn( "%s: %s" % ( src.name, src.handle.before ) )
+ # TODO: Create new components for iperf and tshark?
+ # Also generate more streams with differnt udp ports or some other
+ # method of guranteeing we kill a link with traffic
+ # Start traffic
+ # TODO: ASSERTS
+ main.pingStart = time.time()
+ dstIp = dst.interfaces[0]['ips'][0]
+ for src in srcList:
+ srcIp = src.interfaces[0]['ips'][0]
+ iperfArgs = "%s --bind %s -c %s" % ( main.params[ 'PERF' ][ 'traffic_cmd_arguments' ],
+ srcIp,
+ dstIp )
+ main.log.info( "Starting iperf" )
+ src.handle.sendline( "/usr/bin/iperf %s &> /dev/null &" % iperfArgs )
+ src.handle.expect( src.prompt )
+ # Check path of traffic, to use in failures
+ # TODO: Do we need to add udp port to filter?
+ # TODO: Dynamically find the interface to filter on
+ # Start packet capture
+ pcapFileReceiver = "%s/tshark/%s-%s-tsharkReceiver" % ( "~/TestON",
+ shortDesc if shortDesc else "tshark",
+ dst.name )
+ tsharkArgsReceiver = "%s -i %s -f 'udp && host %s' -w %s" % ( main.params[ 'PERF' ][ 'pcap_cmd_arguments' ],
+ dst.interfaces[0]['name'],
+ dstIp,
+ pcapFileReceiver )
+ commands = [ 'mkdir -p ~/TestON/tshark',
+ 'rm %s' % pcapFileReceiver,
+ 'touch %s' % pcapFileReceiver,
+ 'chmod o=rw %s' % pcapFileReceiver ]
+ for command in commands:
+ dst.handle.sendline( command )
+ dst.handle.expect( dst.prompt )
+ main.log.debug( "%s: %s" % (dst.name, dst.handle.before ) )
+ main.log.info( "Starting tshark on %s " % dst.name )
+ dst.handle.sendline( "sudo /usr/bin/tshark %s &> /dev/null &" % tsharkArgsReceiver )
+ dst.handle.expect( dst.prompt )
+
+ for src in srcList:
+ srcIp = src.interfaces[0]['ips'][0]
+ pcapFileSender = "%s/tshark/%s-%s-tsharkSender" % ( "~/TestON",
+ shortDesc if shortDesc else "tshark",
+ src.name )
+ tsharkArgsSender = "%s -i %s -f 'udp && host %s' -w %s" % ( main.params[ 'PERF' ][ 'pcap_cmd_arguments' ],
+ src.interfaces[0]['name'],
+ srcIp,
+ pcapFileSender )
+ # Prepare file with correct permissions
+ commands = [ 'mkdir -p ~/TestON/tshark',
+ 'rm %s' % pcapFileSender,
+ 'touch %s' % pcapFileSender,
+ 'chmod o=rw %s' % pcapFileSender ]
+ for command in commands:
+ src.handle.sendline( command )
+ src.handle.expect( src.prompt )
+ main.log.debug( "%s: %s" % (src.name, src.handle.before ) )
+
+ main.log.info( "Starting tshark on %s " % src.name )
+ for src in srcList:
+ src.handle.sendline( "sudo /usr/bin/tshark %s &> /dev/null &" % tsharkArgsSender )
+ src.handle.expect( src.prompt )
+ # Timestamp used for EVENT START
+ main.eventStart = time.time()
+ # LOG Event start in ONOS logs
+ for ctrl in main.Cluster.active():
+ ctrl.CLI.log( "'%s START'" % longDesc, level="INFO" )
+ except Exception as e:
+ main.log.exception( "Error in startCapturing" )
+ main.skipCase( result="FAIL", msg=e )
+
+ def stopCapturing( self, main, srcList, dst, shortDesc=None, longDesc=None ):
+ try:
+ pcapFileReceiver = "%s/tshark/%s-%s-tsharkReceiver" % ( "~/TestON",
+ shortDesc if shortDesc else "tshark",
+ dst.name )
+ # Timestamp used for EVENT STOP
+ main.eventStop = time.time()
+ # LOG Event stop in ONOS logs
+ for ctrl in main.Cluster.active():
+ ctrl.CLI.log( "'%s STOP'" % longDesc, level="INFO" )
+ # Stop packet capture
+ dst.handle.sendline( 'fg' ) # Bring process to front
+ dst.handle.send( '\x03' ) # send ctrl-c
+ dst.handle.expect( dst.prompt )
+ for src in srcList:
+ src.handle.sendline( 'fg' ) # Bring process to front
+ src.handle.send( '\x03' ) # send ctrl-c
+ src.handle.expect( src.prompt )
+ # Stop traffic
+ for src in srcList:
+ src.handle.sendline( 'fg' ) # Bring process to front
+ src.handle.send( '\x03' ) # send ctrl-c
+ src.handle.expect( src.prompt )
+ main.pingStop = time.time()
+ main.log.warn( "It took %s seconds since we started ping for us to stop pcap" % ( main.pingStop - main.pingStart ) )
+
+ main.downtimeResults[ shortDesc ] = {}
+ for src in srcList:
+ pcapFileSender = "%s/tshark/%s-%s-tsharkSender" % ( "~/TestON",
+ shortDesc if shortDesc else "tshark",
+ src.name )
+ main.downtimeResults[ shortDesc ].update( { src.name: self.analyzePcap( src, pcapFileSender, "'udp && ip.src == %s'" % src.interfaces[0]['ips'][0], debug=False) } )
+ main.downtimeResults[ shortDesc ].update( { "%s-%s" % ( src.name, dst.name ): self.analyzePcap( dst, pcapFileReceiver, "'udp && ip.src == %s'" % src.interfaces[0]['ips'][0], debug=False) } )
+ # Grab pcap
+ senderSCP = main.ONOSbench.scp( src, pcapFileSender, main.logdir, direction="from" )
+ # Grab logs
+ # Grab pcap
+ receiverSCP = main.ONOSbench.scp( dst, pcapFileReceiver, main.logdir, direction="from" )
+ # Grab Write logs on switches
+ # TODO: kubectl cp write-reqs.txt
+
+ except Exception as e:
+ main.log.exception( "Error in stopCapturing" )
+
+ def linkDown( self, device, port, srcComponentList, dstComponent, shortDesc, longDesc ):
+ """"
+ High level function that handles an event including monitoring
+ Arguments:
+ device - String of the device uri in ONOS
+ port - String of the port uri in ONOS
+ srcComponentLsit - List containing src components, used for sending traffic
+ dstComponent - Component used for receiving taffic
+ shortDesc - String, Short description, used in reporting and file prefixes
+ longDesc - String, Longer description, used in logging
+ """
+ import time
+ try:
+ main.step( "Start Capturing" )
+ main.funcs.startCapturing( main,
+ srcComponentList,
+ dstComponent,
+ shortDesc=shortDesc,
+ longDesc=longDesc )
+ main.step( "Port down" )
+ ctrl = main.Cluster.active( 0 ).CLI
+ portDown = ctrl.portstate( dpid=device, port=port, state="disable" )
+ portsJson = json.loads( ctrl.ports() )
+ for d in portsJson:
+ if d['device']['id'] == device:
+ for p in d['ports']:
+ if "(%s)" % port in p['port']:
+ adminState = p['isEnabled']
+ main.log.debug( adminState )
+ #TODO ASSERTS
+ main.log.info( "Sleeping 10 seconds" )
+ time.sleep(10)
+ main.step( "Stop Capturing" )
+ main.funcs.stopCapturing( main,
+ srcComponentList,
+ dstComponent,
+ shortDesc=shortDesc,
+ longDesc=longDesc )
+ except Exception as e:
+ main.log.exception( "Error in linkDown" )
+
+ def linkUp( self, device, port, srcComponentList, dstComponent, shortDesc, longDesc ):
+ """"
+ High level function that handles an event including monitoring
+ Arguments:
+ device - String of the device uri in ONOS
+ port - String of the port uri in ONOS
+ srcComponentLsit - List containing src components, used for sending traffic
+ dstComponent - Component used for receiving taffic
+ shortDesc - String, Short description, used in reporting and file prefixes
+ longDesc - String, Longer description, used in logging
+ """
+ import time
+ try:
+ main.step( "Start Capturing" )
+ main.funcs.startCapturing( main,
+ srcComponentList,
+ dstComponent,
+ shortDesc=shortDesc,
+ longDesc=longDesc )
+ main.step( "Port Up" )
+ ctrl = main.Cluster.active( 0 ).CLI
+ portUp = ctrl.portstate( dpid=device, port=port, state="enable" )
+ portsJson = json.loads( ctrl.ports() )
+ for d in portsJson:
+ if d['device']['id'] == device:
+ for p in d['ports']:
+ if "(%s)" % port in p['port']:
+ adminState = p['isEnabled']
+ main.log.debug( adminState )
+ #TODO ASSERTS
+ main.log.info( "Sleeping 10 seconds" )
+ time.sleep(10)
+ main.step( "Stop Capturing" )
+ main.funcs.stopCapturing( main,
+ srcComponentList,
+ dstComponent,
+ shortDesc=shortDesc,
+ longDesc=longDesc )
+ except Exception as e:
+ main.log.exception( "Error in linkUp" )
+
+ def switchDown( self ):
+ try:
+ pass
+ except Exception as e:
+ main.log.exception( "Error in switchDown" )
+
+ def switchUp( self ):
+ try:
+ pass
+ except Exception as e:
+ main.log.exception( "Error in switchUp" )
+
+ def onosDown( self ):
+ try:
+ pass
+ except Exception as e:
+ main.log.exception( "Error in onosDown" )
+
+ def analyzePcap( self, component, filePath, packetFilter, debug=False ):
+ try:
+ try:
+ output = ""
+ component.handle.sendline( "" )
+ while True:
+ component.handle.expect( component.prompt, timeout=1 )
+ output += component.handle.before + str( component.handle.after )
+ except pexpect.TIMEOUT:
+ main.log.debug( "%s: %s" % ( component.name, output ) )
+ except Exception as e:
+ main.log.exception( "Error in onosDown" )
+ lineRE = r'^\s*\d+\s+([0-9.]+)'
+ tsharkOptions = "-t dd -r %s -Y %s -T fields -e frame.number -e frame.time_delta -e ip.src -e ip.dst -e udp" % ( filePath, packetFilter )
+ component.handle.sendline( "sudo /usr/bin/tshark %s" % tsharkOptions )
+ i = component.handle.expect( [ "appears to be damaged or corrupt.", "Malformed Packet", component.prompt, pexpect.TIMEOUT ], timeout=60 )
+ if i != 2:
+ main.log.error( "Error Reading pcap file" )
+ component.handle.send( '\x03' ) # CTRL-C to end process
+ component.handle.expect( component.prompt )
+ main.log.debug( component.handle.before )
+ return 0
+ output = component.handle.before
+ deltas = []
+ for line in output.splitlines():
+ # Search for a packet in each line
+ # If match, save the delta time of the packet
+ m = re.search( lineRE, line )
+ if m:
+ if debug:
+ main.log.debug( repr( line ) )
+ main.log.info( m.groups() )
+ deltas.append( float( m.group(1) ) * 1000 )
+ else:
+ main.log.warn( repr( line ) )
+ if not deltas:
+ main.log.error( "No Packets found" )
+ return 0
+ # Print largest timestamp gap
+ deltas.sort()
+ if debug:
+ main.log.debug( deltas[ -10: ] ) # largest 10
+ main.log.info( "%s: Detected downtime (longest gap between packets): %s ms" % ( component.name, deltas[ -1 ] ) )
+ return deltas[ -1 ]
+ except Exception as e:
+ main.log.exception( "Error in analyzePcap" )
+
+ def dbWrite( self, main, filename ):
+ try:
+ dbFileName = "%s/%s" % ( main.logdir, filename )
+ dbfile = open( dbFileName, "w+" )
+ header = []
+ row = []
+ for eventName, results in main.downtimeResults.iteritems():
+ for measurementName, value in results.iteritems():
+ header.append( "'%s-%s'" % ( eventName, measurementName ) )
+ row.append( "'%s'" % value )
+ dbfile.write( ",".join( header ) + "\n" + ",".join( row ) + "\n" )
+ dbfile.close()
+ except IOError:
+ main.log.warn( "Error opening " + dbFileName + " to write results." )
+
+ def cleanup( self, main ):
+ # TODO: Do things like restore log levels here
+ run.cleanup( main )
+ self.dbWrite( main, "SRStaging-dbfile.csv")
+
diff --git a/TestON/tests/USECASE/SegmentRouting/SRStaging/dependencies/__init__.py b/TestON/tests/USECASE/SegmentRouting/SRStaging/dependencies/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRStaging/dependencies/__init__.py
diff --git a/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/dependencies/SRSwitchFailureFuncs.py b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/dependencies/SRSwitchFailureFuncs.py
index b25ace4..82a4616 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/dependencies/SRSwitchFailureFuncs.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/dependencies/SRSwitchFailureFuncs.py
@@ -55,10 +55,11 @@
else:
translator.bmv2ToOfdpa( main )
translator.bmv2ToOfdpa( main, cfgFile=xconnectFile )
- if suf:
- run.loadJson( main, suffix=suf )
- else:
- run.loadJson( main )
+ if not main.persistentSetup:
+ if suf:
+ run.loadJson( main, suffix=suf )
+ else:
+ run.loadJson( main )
run.loadChart( main )
if hasattr( main, 'Mininet1' ):
run.mnDockerSetup( main ) # optionally create and setup docker image
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py b/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
index 699370c..5ed1784 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
@@ -365,7 +365,7 @@
network.discoverHosts()
@staticmethod
- def connectToPhysicalNetwork( main ):
+ def connectToPhysicalNetwork( main, hostDiscovery=True ):
main.step( "Connecting to physical netowrk" )
main.physicalNet = True
topoResult = main.NetworkBench.connectToNet()
@@ -414,7 +414,8 @@
actual=stepResult,
onpass="Successfully connected inband hosts",
onfail="Failed to connect inband hosts" )
- Testcaselib.discoverHosts( main )
+ if hostDiscovery:
+ Testcaselib.discoverHosts( main )
@staticmethod
def saveOnosDiagnostics( main ):
@@ -540,11 +541,13 @@
main.ONOSbench.dumpONOSCmd( main.Cluster.active( 0 ).ipAddress,
"flows",
main.logdir,
- tag + "_FlowsBefore" )
+ tag + "_FlowsBefore",
+ cliPort=main.Cluster.active(0).CLI.karafPort )
main.ONOSbench.dumpONOSCmd( main.Cluster.active( 0 ).ipAddress,
"groups",
main.logdir,
- tag + "_GroupsBefore" )
+ tag + "_GroupsBefore",
+ cliPort=main.Cluster.active(0).CLI.karafPort )
@staticmethod
def checkDevices( main, switches, tag="", sleep=10 ):
@@ -630,7 +633,7 @@
@staticmethod
def pingAll( main, tag="", dumpflows=True, acceptableFailed=0, basedOnIp=False,
- sleep=10, retryAttempts=1, skipOnFail=False ):
+ sleep=10, retryAttempts=1, skipOnFail=False, useScapy=True ):
'''
Verify connectivity between hosts according to the ping chart
acceptableFailed: max number of acceptable failed pings.
@@ -697,7 +700,7 @@
onpass="IPv6 connectivity successfully tested",
onfail="IPv6 connectivity failed" )
elif main.physicalNet:
- pa = main.Network.pingallHosts( hosts, ipv6=True, useScapy=True )
+ pa = main.Network.pingallHosts( hosts, ipv6=True, useScapy=useScapy )
utilities.assert_equals( expect=expect, actual=pa,
onpass="IP connectivity successfully tested",
onfail="IP connectivity failed" )
@@ -716,11 +719,13 @@
main.ONOSbench.dumpONOSCmd( main.Cluster.active( 0 ).ipAddress,
"flows",
main.logdir,
- tag + "_FlowsOn" )
+ tag + "_FlowsOn",
+ cliPort=main.Cluster.active(0).CLI.karafPort )
main.ONOSbench.dumpONOSCmd( main.Cluster.active( 0 ).ipAddress,
"groups",
main.logdir,
- tag + "_GroupsOn" )
+ tag + "_GroupsOn",
+ cliPort=main.Cluster.active(0).CLI.karafPort )
@staticmethod
def killLink( main, end1, end2, switches, links, sleep=None ):
@@ -1053,6 +1058,8 @@
if not main.persistentSetup:
for ctrl in main.Cluster.active():
main.ONOSbench.onosStop( ctrl.ipAddress )
+ else:
+ Testcaselib.resetOnosLogLevels( main )
Testcaselib.mnDockerTeardown( main )
@staticmethod
@@ -1758,6 +1765,10 @@
Read and Set onos log levels from the params file
"""
main.step( 'Set logging levels' )
+ # Get original values incase we want to reset them
+ ctrl = main.Cluster.active(0)
+ ctrl.CLI.logList()
+
logging = True
try:
logs = main.params.get( 'ONOS_Logging', False )
@@ -1770,3 +1781,32 @@
utilities.assert_equals( expect=True, actual=logging,
onpass="Set log levels",
onfail="Failed to set log levels" )
+
+ @staticmethod
+ def resetOnosLogLevels( main ):
+ """
+ Read and reset onos log levels to a previously read set of values
+ """
+ main.step( 'Reset logging levels' )
+ # Get original values incase we want to reset them
+ ctrl = main.Cluster.active(0)
+ currentLevels = ctrl.CLI.logList( saveValues=False )
+ origLevels = ctrl.CLI.logLevels
+ toBeSet = {}
+ for logger, level in currentLevels.iteritems():
+ if logger not in origLevels:
+ toBeSet[ logger ] = origLevels[ 'ROOT' ]
+ else:
+ oldLevel = origLevels[ logger ]
+ if level != oldLevel:
+ toBeSet[ logger ] = oldLevel
+ logging = True
+ try:
+ for logger, level in toBeSet.iteritems():
+ for ctrl in main.Cluster.active():
+ ctrl.CLI.logSet( level, logger )
+ except AttributeError:
+ logging = False
+ utilities.assert_equals( expect=True, actual=logging,
+ onpass="Reset log levels",
+ onfail="Failed to reset log levels" )
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/cfgtranslator.py b/TestON/tests/USECASE/SegmentRouting/dependencies/cfgtranslator.py
index b978b73..6b538a9 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/cfgtranslator.py
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/cfgtranslator.py
@@ -100,6 +100,10 @@
# Translate configuration JSON file from OFDPA-OVS driver to BMv2 driver.
def ofdpaToBmv2( main, switchPrefix="bmv2", cfgFile="", roleMap={r'0*[1-9]([0-9]){2}': 'spine', r'0{15}[1-9]': "leaf"} ):
didRE = r"of:0*(?P<swNum>[1-9][0-9]*)(/(?P<portNum>[0-9]+))?"
+ if switchPrefix is None:
+ switchPrefix = ''
+ else:
+ switchPrefix += ':'
if not cfgFile:
cfgFile = "%s%s.json" % ( main.configPath + main.forJson,
main.cfgName )
@@ -117,7 +121,7 @@
if roleMatch:
role = roleValue
break
- new_port = 'device:' + switchPrefix + ':' + role + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' )
+ new_port = 'device:' + switchPrefix + role + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' )
netcfg[ 'ports' ][ new_port ] = netcfg[ 'ports' ].pop( port )
if 'hosts' in netcfg.keys():
@@ -134,7 +138,7 @@
if roleMatch:
role = roleValue
break
- new_locations.append( 'device:' + switchPrefix + ':' + role + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' ) )
+ new_locations.append( 'device:' + switchPrefix + role + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' ) )
else:
new_locations.append( location )
netcfg[ 'hosts' ][ host ][ 'basic' ][ 'locations' ] = new_locations
@@ -149,7 +153,7 @@
if roleMatch:
role = roleValue
break
- new_location = 'device:' + switchPrefix + ':' + role + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' )
+ new_location = 'device:' + switchPrefix + role + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' )
netcfg[ 'hosts' ][ host ][ 'basic' ][ 'locations' ] = new_location
if 'devices' in netcfg.keys():
@@ -160,9 +164,9 @@
#TODO This or roleMap? maybe use this to populate role Map?
isLeaf = netcfg[ 'devices' ][ device ][ SR_APP ][ 'isEdgeRouter' ]
if isLeaf is True:
- new_device = 'device:' + switchPrefix + ':leaf' + searchObj.group( 'swNum' )
+ new_device = 'device:' + switchPrefix + 'leaf' + searchObj.group( 'swNum' )
else:
- new_device = 'device:' + switchPrefix + ':spine' + searchObj.group( 'swNum' )
+ new_device = 'device:' + switchPrefix + 'spine' + searchObj.group( 'swNum' )
netcfg[ 'devices' ][ new_device ] = netcfg[ 'devices' ].pop( device )
if 'pairDeviceId' in netcfg[ 'devices' ][ new_device ][ SR_APP ].keys():
searchObj = re.search( didRE,
@@ -175,7 +179,7 @@
if roleMatch:
role = roleValue
break
- netcfg[ 'devices' ][ new_device ][ SR_APP ][ 'pairDeviceId' ] = 'device:' + switchPrefix + ':' + role + \
+ netcfg[ 'devices' ][ new_device ][ SR_APP ][ 'pairDeviceId' ] = 'device:' + switchPrefix + role + \
searchObj.group( 'swNum' )
if 'basic' in netcfg[ 'devices' ][ new_device ].keys():
if 'driver' in netcfg[ 'devices' ][ new_device ][ 'basic' ].keys():
@@ -196,7 +200,7 @@
role = roleValue
break
netcfg[ 'apps' ][ DHCP_APP_ID ][ 'default' ][ i ][ 'dhcpServerConnectPoint' ] = \
- 'device:' + switchPrefix + ':' + role + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' )
+ 'device:' + switchPrefix + role + searchObj.group( 'swNum' ) + '/' + searchObj.group( 'portNum' )
if 'xconnects' in netcfg.keys():
new_xconnects = []
@@ -210,7 +214,7 @@
if roleMatch:
role = roleValue
break
- new_device = 'device:' + switchPrefix + ':' + role + searchObj.group( 'swNum' )
+ new_device = 'device:' + switchPrefix + role + searchObj.group( 'swNum' )
xconnect[ 'deviceId' ] = new_device
new_xconnects.append( xconnect )
netcfg[ 'xconnects' ] = new_xconnects
diff --git a/TestON/tests/dependencies/Cluster.py b/TestON/tests/dependencies/Cluster.py
index aa63714..2e79481 100644
--- a/TestON/tests/dependencies/Cluster.py
+++ b/TestON/tests/dependencies/Cluster.py
@@ -109,7 +109,7 @@
nodeList = self.runningNodes
for ctrl in nodeList:
- ips.append( ctrl.ipAddress )
+ ips.append( ctrl.ipAddress if ctrl.ipAddress is not 'localhost' else ctrl.address )
return ips
@@ -310,18 +310,21 @@
Returns:
Returns main.TRUE if it successfully set and verify cell.
"""
+ result = main.TRUE
setCellResult = self.command( "setCell",
args=[ cellName ],
specificDriver=1,
getFrom="all" )
- benchCellResult = main.ONOSbench.setCell( cellName )
- verifyResult = self.command( "verifyCell",
- specificDriver=1,
- getFrom="all" )
- result = main.TRUE
for i in range( len( setCellResult ) ):
- result = result and setCellResult[ i ] and verifyResult[ i ]
+ result = result and setCellResult[ i ]
+ benchCellResult = main.ONOSbench.setCell( cellName )
result = result and benchCellResult
+ if not self.useDocker:
+ verifyResult = self.command( "verifyCell",
+ specificDriver=1,
+ getFrom="all" )
+ for i in range( len( verifyResult ) ):
+ result = result and verifyResult[ i ]
return result
def checkService( self ):
@@ -738,6 +741,7 @@
Returns True if it successfully checked
"""
results = True
+ self.command( "getAddress", specificDriver=2 )
nodesOutput = self.command( "nodes", specificDriver=2 )
ips = sorted( self.getIps( activeOnly=True ) )
for i in nodesOutput:
diff --git a/TestON/tests/dependencies/ONOSSetup.py b/TestON/tests/dependencies/ONOSSetup.py
index 03275a1..042eff5 100644
--- a/TestON/tests/dependencies/ONOSSetup.py
+++ b/TestON/tests/dependencies/ONOSSetup.py
@@ -695,6 +695,7 @@
elif main.persistentSetup:
for ctrl in cluster.getRunningNodes():
ctrl.inDocker = True
+ ctrl.CLI.inDocker = True
onosCliResult = main.TRUE
if startOnosCli:
diff --git a/TestON/tests/dependencies/topology.py b/TestON/tests/dependencies/topology.py
index 7cdd9ab..26c3309 100644
--- a/TestON/tests/dependencies/topology.py
+++ b/TestON/tests/dependencies/topology.py
@@ -305,7 +305,8 @@
main.log.debug( "t3 command: {}".format( cmd ) )
main.ONOSbench.dumpONOSCmd( main.Cluster.active( 0 ).ipAddress, cmd, main.logdir,
"t3-CASE{}-{}-{}-route{}-".format( main.CurrentTestCaseNumber, srcIp, dstIp, i ),
- timeout=10 )
+ timeout=10,
+ cliPort=main.Cluster.active(0).CLI.karafPort )
return main.FALSE if unexpectedPings else main.TRUE
def sendScapyPackets( self, sender, receiver, pktFilter, pkt, sIface=None, dIface=None, expect=True, acceptableFailed=0, collectT3=True, t3Command="" ):
@@ -337,7 +338,8 @@
main.log.debug( "Collecting t3 with source {} and destination {}".format( sender.name, receiver.name ) )
main.log.debug( "t3 command: {}".format( t3Command ) )
main.ONOSbench.dumpONOSCmd( main.Cluster.active( 0 ).ipAddress, t3Command, main.logdir,
- "t3-CASE{}-{}-{}-".format( main.CurrentTestCaseNumber, sender.name, receiver.name ) )
+ "t3-CASE{}-{}-{}-".format( main.CurrentTestCaseNumber, sender.name, receiver.name ),
+ cliPort=main.Cluster.active(0).CLI.karafPort )
return scapyResult
def sendScapyPacketsHelper( self, sender, receiver, pktFilter, pkt, sIface=None, dIface=None, expect=True ):
@@ -418,7 +420,8 @@
main.log.debug( "t3 command: {}".format( cmd ) )
main.ONOSbench.dumpONOSCmd( main.Cluster.active( 0 ).ipAddress, cmd, main.logdir,
"t3-CASE{}-{}-{}-route{}-".format( main.CurrentTestCaseNumber, srcIp, dstIp, i ),
- timeout=10 )
+ timeout=10,
+ cliPort=main.Cluster.active(0).CLI.karafPort )
return trafficResult
def pingAndCaptureHelper( self, srcHost, dstIp, dstHost, dstIntf, ipv6=False, expect=True ):
diff --git a/TestON/tests/dependencies/utils.py b/TestON/tests/dependencies/utils.py
index 3cf849a..de61ae2 100644
--- a/TestON/tests/dependencies/utils.py
+++ b/TestON/tests/dependencies/utils.py
@@ -73,8 +73,12 @@
stepResult = main.TRUE
scpResult = main.TRUE
copyResult = main.TRUE
+ isKube = False
for ctrl in main.Cluster.runningNodes:
- if ctrl.inDocker:
+ if ctrl.k8s:
+ isKube = True
+ continue
+ elif ctrl.inDocker:
scpResult = scpResult and ctrl.server.dockerCp( ctrl.name,
"/opt/onos/log/karaf.log",
"/tmp/karaf.log",
@@ -97,6 +101,28 @@
stepResult = main.TRUE and stepResult
else:
stepResult = main.FALSE and stepResult
+ if isKube:
+ # TODO: Look into using Stern, kail, or just use `kubectl logs <pod>`
+ # We also need to save the pod name to switch name mapping
+ main.ONOSbench.kubectlPodNodes( dstPath=main.logdir + "/podMapping.txt",
+ kubeconfig=ctrl.k8s.kubeConfig,
+ namespace=main.params[ 'kubernetes' ][ 'namespace' ] )
+ # TODO Get stratum write logs
+ # Save image for pods, based on "describe pods"
+ main.ONOSbench.kubectlDescribe( "pods",
+ main.logdir + "/describePods.txt",
+ kubeconfig=ctrl.k8s.kubeConfig,
+ namespace=main.params[ 'kubernetes' ][ 'namespace' ] )
+ # Get the pod logs
+ pods = main.ONOSbench.kubectlGetPodNames( kubeconfig=ctrl.k8s.kubeConfig,
+ namespace=main.params[ 'kubernetes' ][ 'namespace' ] )
+
+ for pod in pods:
+ path = "%s/%s.log" % ( main.logdir, pod )
+ stratumPods = main.ONOSbench.kubectlLogs( pod,
+ path,
+ kubeconfig=ctrl.k8s.kubeConfig,
+ namespace=main.params[ 'kubernetes' ][ 'namespace' ] )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully copied remote ONOS logs",