[SDFAB-552] QoS test for Leaf pair with mobile traffic classification
Also, adding a check to ensure we are generating congestion, based on the live ports stats rather than directly on average TRex port stats.
Change-Id: I4337fe8ad8f59e6873a73e4d105853b18d199921
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/QOS.params b/TestON/tests/USECASE/SegmentRouting/QOS/QOS.params
index ceb29b6..427917f 100644
--- a/TestON/tests/USECASE/SegmentRouting/QOS/QOS.params
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/QOS.params
@@ -1,5 +1,5 @@
<PARAMS>
- <testcases>1</testcases>
+ <testcases>1,2</testcases>
<GRAPH>
<nodeCluster>pairedleaves</nodeCluster>
@@ -48,8 +48,61 @@
</UP4>
<TREX>
- <port_stats>0,2</port_stats> <!-- TRex port 0 = PDN, TRex port 2 = eNodeB -->
+ <port_stats>0,2,3</port_stats> <!-- TRex port 0 = PDN, TRex port 2 = eNodeB, TRex port 3 = second eNB -->
<flows>
+ <BE1_FROM_UE>
+ <name>Best Effort 1</name>
+ <l1_bps>40000000000</l1_bps>
+ <trex_port>2</trex_port>
+ <packet>
+ <pktlen>1400</pktlen>
+ <ip_src>10.240.0.1</ip_src>
+ <ip_dst>10.32.11.125</ip_dst>
+ <eth_src>40:A6:B7:22:AB:20</eth_src>
+ <eth_dst>00:00:0A:4C:1C:46</eth_dst>
+ <gtp_teid>100</gtp_teid>
+ <s1u_addr>10.32.11.126</s1u_addr>
+ <enb_addr>10.32.11.124</enb_addr>
+ </packet>
+ </BE1_FROM_UE>
+ <BE2_FROM_UE>
+ <name>Best Effort 2</name>
+ <l1_bps>35000000000</l1_bps>
+ <trex_port>3</trex_port>
+ <packet>
+ <pktlen>1400</pktlen>
+ <ip_src>10.240.0.1</ip_src>
+ <ip_dst>10.32.11.125</ip_dst>
+ <eth_src>40:A6:B7:22:AB:21</eth_src>
+ <eth_dst>00:00:0A:4C:1C:46</eth_dst>
+ <gtp_teid>100</gtp_teid>
+ <s1u_addr>10.32.11.126</s1u_addr>
+ <enb_addr>10.32.11.123</enb_addr>
+ </packet>
+ </BE2_FROM_UE>
+ <RT_FROM_UE>
+ <name>Real Time</name>
+ <l1_bps>22000000</l1_bps> <!-- Smaller than the smaller RT max BW -->
+ <trex_port>3</trex_port>
+ <packet>
+ <pktlen>1400</pktlen>
+ <ip_src>10.240.0.2</ip_src>
+ <ip_dst>10.32.11.125</ip_dst>
+ <eth_src>40:A6:B7:22:AB:21</eth_src>
+ <eth_dst>00:00:0A:4C:1C:46</eth_dst>
+ <gtp_teid>200</gtp_teid>
+ <s1u_addr>10.32.11.126</s1u_addr>
+ <enb_addr>10.32.11.123</enb_addr>
+ </packet>
+ <latency_stats>true</latency_stats>
+ <flow_id>10</flow_id>
+ <delay>50000</delay>
+ <expected_max_dropped>0</expected_max_dropped>
+ <expected_max_latency>1500</expected_max_latency>
+ <!-- Verify the 90th percentile instead of 99.9th because of latency introduced by TRex SW Mode -->
+ <expected_90_percentile_latency>150</expected_90_percentile_latency>
+ </RT_FROM_UE>
+
<BE_FROM_PDN>
<name>Best Effort</name>
<l1_bps>2000000000</l1_bps>
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/QOS.py b/TestON/tests/USECASE/SegmentRouting/QOS/QOS.py
index 2a851c4..45414f1 100644
--- a/TestON/tests/USECASE/SegmentRouting/QOS/QOS.py
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/QOS.py
@@ -4,87 +4,42 @@
self.default = ''
def CASE1(self, main):
- main.case("Leaf-Edge with Mobile Traffic Classification")
+ main.case("Leaf Edge with Mobile Traffic Classification")
# Leaf-Edge-Mobile
# Attach 2 UEs with different QFI
# Generate traffic with Trex for the two UEs
# --> no packet drop on RT flow, reasonable latency on RT flow
try:
- from tests.USECASE.SegmentRouting.dependencies.up4 import UP4, \
- N_FLOWS_PER_UE
- from tests.USECASE.SegmentRouting.dependencies.trex import Trex
- from tests.USECASE.SegmentRouting.dependencies.Testcaselib import \
- Testcaselib as run
- import json
+ from tests.USECASE.SegmentRouting.QOS.dependencies.QOSTest import \
+ QOSTest
except ImportError as e:
main.log.error("Import not found. Exiting the test")
main.log.error(e)
main.cleanAndExit()
- n_switches = int(main.params["TOPO"]["switchNum"])
-
- run.initTest(main)
- main.log.info(main.Cluster.numCtrls)
- main.Cluster.setRunningNode(3)
- run.installOnos(main, skipPackage=True, cliSleep=5)
-
- main.step("Start P4rt client and setup TRex")
- # Use the first available ONOS instance CLI
- onos_cli = main.Cluster.active(0).CLI
- initial_flow_count = onos_cli.checkFlowCount()
- up4 = UP4()
- trex = Trex()
- # Get the P4RT client connected to UP4 in the first available ONOS instance
- up4.setup(main.Cluster.active(0).p4rtUp4, no_host=True)
- trex.setup(main.TRexClient)
-
- main.step("Program PDRs and FARs via UP4")
- up4.attachUes()
-
- main.step("Verify PDRs and FARs in ONOS")
- up4.verifyUp4Flow(onos_cli)
-
- run.checkFlows(
+ test = QOSTest()
+ test.runTest(
main,
- minFlowCount=initial_flow_count + (
- len(up4.emulated_ues) * N_FLOWS_PER_UE * n_switches)
+ test_idx=1,
+ n_switches=int(main.params["TOPO"]["switchNum"])
)
- # Load traffic config for the current test case
- main.step("Load test JSON config")
- cfgFile = main.configPath + "/tests/" + "leaf_edge_mobile.json"
- with open(cfgFile) as cfg:
- testCfg = json.load(cfg)
-
- main.step("Send traffic with TRex")
- for flow in testCfg["flows"]:
- trex.createFlow(flow)
- trex.sendAndReceiveTraffic(testCfg["duration"])
-
- main.step("Log port and flow stats")
- trex.logPortStats()
- for flow in testCfg["flows"]:
- trex.logFlowStats(flow)
-
- # Assert Flow Stats
- for flow in testCfg["flows"]:
- if trex.isFlowStats(flow):
- main.step("{}: Assert RX Packets".format(flow))
- trex.assertRxPackets(flow)
- main.step("{}: Assert Dropped Packets".format(flow))
- trex.assertDroppedPacket(flow)
- main.step("{}: Assert 99.9 Percentile Latency".format(flow))
- trex.assert99_9PercentileLatency(flow)
-
- main.step("Remove PDRs and FARs via UP4")
- up4.detachUes()
-
- main.step("Verify removed PDRs and FARs from ONOS")
- up4.verifyNoUesFlow(onos_cli)
-
- run.checkFlows(main, minFlowCount=initial_flow_count)
-
- main.step("Teardown")
- trex.teardown()
- up4.teardown()
- run.saveOnosDiagsIfFailure(main)
- run.cleanup(main)
+ def CASE2(self, main):
+ main.case("Leaf Pair Link with Mobile Traffic Classification")
+ # Leaf Infra Mobile Traffic
+ # Attach 2 UEs with different QFI
+ # Generate traffic with TRex from UEs towards PDN, generating congestion
+ # on the 40Gbps pair link
+ # --> no packet drop on RT flow, reasonable latency on RT flow
+ try:
+ from tests.USECASE.SegmentRouting.QOS.dependencies.QOSTest import \
+ QOSTest
+ except ImportError as e:
+ main.log.error("Import not found. Exiting the test")
+ main.log.error(e)
+ main.cleanAndExit()
+ test = QOSTest()
+ test.runTest(
+ main,
+ test_idx=2,
+ n_switches=int(main.params["TOPO"]["switchNum"])
+ )
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/QOSTest.py b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/QOSTest.py
new file mode 100644
index 0000000..fcb4b7c
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/QOSTest.py
@@ -0,0 +1,77 @@
+from tests.USECASE.SegmentRouting.dependencies.Testcaselib import \
+ Testcaselib as run
+from tests.USECASE.SegmentRouting.dependencies.trex import Trex
+from tests.USECASE.SegmentRouting.dependencies.up4 import UP4, N_FLOWS_PER_UE
+import json
+
+
+class QOSTest:
+
+ def runTest(self, main, test_idx, n_switches):
+ run.initTest(main)
+ main.log.info(main.Cluster.numCtrls)
+ main.Cluster.setRunningNode(3)
+ run.installOnos(main, skipPackage=True, cliSleep=5)
+
+ main.step("Start P4rt client and setup TRex")
+ # Use the first available ONOS instance CLI
+ onos_cli = main.Cluster.active(0).CLI
+ initial_flow_count = onos_cli.checkFlowCount()
+ up4 = UP4()
+ trex = Trex()
+ # Get the P4RT client connected to UP4 in the first available ONOS instance
+ up4.setup(main.Cluster.active(0).p4rtUp4)
+ trex.setup(main.TRexClient)
+
+ main.step("Program PDRs and FARs via UP4")
+ up4.attachUes()
+ up4.verifyUp4Flow(onos_cli)
+
+ run.checkFlows(
+ main,
+ minFlowCount=initial_flow_count + (
+ len(up4.emulated_ues) * N_FLOWS_PER_UE * n_switches)
+ )
+
+ # Load traffic config for the current test case
+ main.step("Load test JSON config")
+ cfgFile = main.configPath + "/tests/" + "CASE_%d.json" % test_idx
+ with open(cfgFile) as cfg:
+ testCfg = json.load(cfg)
+
+ main.step("Send traffic with TRex")
+ for flow in testCfg["flows"]:
+ trex.createFlow(flow)
+ results = trex.sendAndReceiveTraffic(testCfg["duration"])
+ trex.verifyCongestion(
+ results,
+ multiplier=float(testCfg.get("multiplier", "1"))
+ )
+
+ main.step("Log port and flow stats")
+ trex.logPortStats()
+ for flow in testCfg["flows"]:
+ trex.logFlowStats(flow)
+
+ # Assert Flow Stats
+ for flow in testCfg["flows"]:
+ if trex.isFlowStats(flow):
+ main.step("{}: Assert RX Packets".format(flow))
+ trex.assertRxPackets(flow)
+ main.step("{}: Assert Dropped Packets".format(flow))
+ trex.assertDroppedPacket(flow)
+ main.step("{}: Assert 90 Percentile Latency".format(flow))
+ trex.assert90PercentileLatency(flow)
+ main.step("{}: Assert 99.9 Percentile Latency".format(flow))
+ trex.assert99_9PercentileLatency(flow)
+
+ main.step("Remove PDRs and FARs via UP4")
+ up4.detachUes()
+ up4.verifyNoUesFlow(onos_cli)
+
+ run.checkFlows(main, minFlowCount=initial_flow_count)
+
+ main.step("Teardown")
+ trex.teardown()
+ up4.teardown()
+ run.cleanup(main)
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/tests/CASE_1.json b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/tests/CASE_1.json
new file mode 100644
index 0000000..f2258f7
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/tests/CASE_1.json
@@ -0,0 +1,6 @@
+{
+ "flows": ["BE_FROM_PDN", "RT_FROM_PDN"],
+ "duration": 10,
+ "multiplier": 1.02571
+}
+
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/tests/CASE_2.json b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/tests/CASE_2.json
new file mode 100644
index 0000000..6ac3bfd
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/tests/CASE_2.json
@@ -0,0 +1,5 @@
+{
+ "flows": ["BE1_FROM_UE", "BE2_FROM_UE", "RT_FROM_UE"],
+ "duration": 10,
+ "multiplier": 0.97428
+}
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/tests/leaf_edge_mobile.json b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/tests/leaf_edge_mobile.json
deleted file mode 100644
index f32fa79..0000000
--- a/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/tests/leaf_edge_mobile.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "flows": ["BE_FROM_PDN", "RT_FROM_PDN"],
- "duration": 10
-}
-
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/trex_config.yaml b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/trex_config.yaml
index 00a1f85..5309605 100644
--- a/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/trex_config.yaml
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/trex_config.yaml
@@ -1,13 +1,13 @@
-# TRex Port ID=0 --> PCI BUS: d8:00.0, Linux Intf: enp216s0f0 connected to leaf1/0 (PDN)
-# TRex Port ID=1 --> PCI BUS: d8:00.1, Linux Intf: enp216s0f1 not connected, but required by TRex to have an even number of interfaces
-# TRex Port ID=2 --> PCI BUS: 5e:00.0, Linux Intf: enp94s0f0 connected to leaf2/0 (eNodeB)
-# TRex Port ID=3 --> PCI BUS: 5e:00.1, Linux Intf: enp94s0f1 connected to leaf2/4
+# TRex Port ID=0 --> PCI BUS: d8:00.0, NUMA: 1 (CPU2), Linux Intf: enp216s0f0 connected to leaf1/0 (PDN)
+# TRex Port ID=1 --> PCI BUS: d8:00.1, NUMA: 1 (CPU2), Linux Intf: enp216s0f1 not connected, but required by TRex to have an even number of interfaces
+# TRex Port ID=2 --> PCI BUS: 5e:00.0, NUMA: 0 (CPU1), Linux Intf: enp94s0f0 connected to leaf2/0 (eNodeB)
+# TRex Port ID=3 --> PCI BUS: 5e:00.1, NUMA: 0 (CPU1), Linux Intf: enp94s0f1 connected to leaf2/4
- version: 2
port_limit: 4
interfaces: [ 'd8:00.0', 'd8:00.1', '5e:00.0', '5e:00.1']
port_bandwidth_gb: 40
- c: 16
+ c: 20
port_info:
- src_mac: 40:A6:B7:22:AB:40
dest_mac: 00:00:0A:4C:1C:46
diff --git a/TestON/tests/USECASE/SegmentRouting/QOSNonMobile/dependencies/QOSNonMobileTest.py b/TestON/tests/USECASE/SegmentRouting/QOSNonMobile/dependencies/QOSNonMobileTest.py
index 45e9d8c..d3b724f 100644
--- a/TestON/tests/USECASE/SegmentRouting/QOSNonMobile/dependencies/QOSNonMobileTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/QOSNonMobile/dependencies/QOSNonMobileTest.py
@@ -98,7 +98,8 @@
main.step("Send traffic with TRex")
for flow in testCfg["flows"]:
trex.createFlow(flow)
- trex.sendAndReceiveTraffic(testCfg["duration"])
+ results = trex.sendAndReceiveTraffic(testCfg["duration"])
+ trex.verifyCongestion(results)
trex.logPortStats()
for flow in testCfg["flows"]:
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/trex.py b/TestON/tests/USECASE/SegmentRouting/dependencies/trex.py
index d710d07..93a7c70 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/trex.py
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/trex.py
@@ -75,7 +75,7 @@
Connect the client, create the flows in trex (with packets created with
createFlow, send and receive the traffic, and disconnect the client.
:param duration: traffic duration
- :return:
+ :return: port statistics collected while running the test
"""
self.trex_client.connectTrexClient()
for flow_name, packet in self.packets.items():
@@ -88,8 +88,38 @@
delay=flow_config["delay"],
flow_id=flow_config["flow_id"],
flow_stats=flow_config["latency_stats"])
- self.trex_client.startAndWaitTraffic(duration=duration)
+ result = self.trex_client.startAndWaitTraffic(duration=duration,
+ ports=self.port_stats)
self.trex_client.disconnectTrexClient()
+ return result
+
+ def verifyCongestion(self, live_stats, multiplier=1):
+ """
+ Verify and assert that the test was able to generate congestion by
+ checking that average TX traffic is greater than average RX traffic from
+ stats collected during the test.
+
+ :param live_stats: Stats collected during tests
+ :param multiplier: Multiplier for RX traffic in case we encap/decap traffic
+ :return:
+ """
+ avg_tx = sum(
+ [sum(v["tx_bps"]) / len(v["tx_bps"])
+ for (k, v) in live_stats.items() if k != "duration"]
+ )
+ avg_rx = sum(
+ [sum(v["rx_bps"]) / len(v["rx_bps"])
+ for (k, v) in live_stats.items() if k != "duration"]
+ )
+
+ utilities.assert_equals(
+ expect=True,
+ actual=avg_tx > avg_rx * multiplier,
+ onpass="Congestion created: AVG TX ({}) > AVG RX ({})".format(
+ avg_tx, avg_rx),
+ onfail="NO Congestion: AVG TX ({}) <= AVG RX ({})".format(
+ avg_tx, avg_rx)
+ )
def assertRxPackets(self, flow_name):
if not self.isFlowStats(flow_name):
@@ -137,6 +167,10 @@
def assert99_9PercentileLatency(self, flow_name):
if not self.isFlowStats(flow_name):
main.log.info("No flow stats for flow {}".format(flow_name))
+ return
+ if not "expected_99_9_percentile_latency" in self.traffic_flows[flow_name].keys():
+ main.log.info("No 99.9th percentile parameter for test")
+ return
expected_99_9_percentile_latency = int(
self.traffic_flows[flow_name].get(
"expected_99_9_percentile_latency", "0"))
@@ -149,6 +183,25 @@
onfail="Traffic Flow {}: 99.9th percentile latency is too high {}".format(
flow_name, latency_stats.percentile_99_9))
+ def assert90PercentileLatency(self, flow_name):
+ if not self.isFlowStats(flow_name):
+ main.log.info("No flow stats for flow {}".format(flow_name))
+ return
+ if not "expected_90_percentile_latency" in self.traffic_flows[flow_name].keys():
+ main.log.info("No 90th percentile parameter for test")
+ return
+ expected_90_percentile_latency = int(
+ self.traffic_flows[flow_name].get(
+ "expected_90_percentile_latency", "0"))
+ latency_stats = self.__getLatencyStats(flow_name)
+ utilities.assert_equals(
+ expect=True,
+ actual=latency_stats.percentile_90 <= expected_90_percentile_latency,
+ onpass="Traffic Flow {}: 90th percentile latency below threshold".format(
+ flow_name),
+ onfail="Traffic Flow {}: 90th percentile latency is too high {}".format(
+ flow_name, latency_stats.percentile_90))
+
def logPortStats(self):
main.log.debug(self.port_stats)
for port in self.port_stats: