[SDFAB-552] QoS test for Leaf pair with mobile traffic classification
Also, adding a check to ensure we are generating congestion, based on the live ports stats rather than directly on average TRex port stats.
Change-Id: I4337fe8ad8f59e6873a73e4d105853b18d199921
diff --git a/TestON/drivers/common/api/controller/trexclientdriver.py b/TestON/drivers/common/api/controller/trexclientdriver.py
index a3923c9..50c8c39 100644
--- a/TestON/drivers/common/api/controller/trexclientdriver.py
+++ b/TestON/drivers/common/api/controller/trexclientdriver.py
@@ -230,11 +230,13 @@
self.all_sender_port.add(trex_port)
return True
- def startAndWaitTraffic(self, duration=10):
+ def startAndWaitTraffic(self, duration=10, ports=[]):
"""
Start generating traffic and wait traffic to be send
+
:param duration: Traffic generation duration
- :return:
+ :param ports: Ports IDs to monitor while traffic is active
+ :return: port statistics collected while traffic is active
"""
if not self.trex_client:
main.log.error(
@@ -244,6 +246,7 @@
self.trex_client.start(list(self.all_sender_port), mult="1",
duration=duration)
main.log.info("Waiting until all traffic is sent..")
+ result = self.__monitor_port_stats(ports)
self.trex_client.wait_on_traffic(ports=list(self.all_sender_port),
rx_delay_ms=100)
main.log.info("...traffic sent!")
@@ -251,7 +254,7 @@
self.all_sender_port = set()
main.log.info("Getting stats")
self.stats = self.trex_client.get_stats()
- main.log.info("GOT stats")
+ return result
def getFlowStats(self, flow_id):
if self.stats is None:
@@ -319,7 +322,8 @@
trex_daemon_client.start_stateless(cfg=trex_config_file_on_server)
except ConnectionRefusedError:
main.log.error(
- "Unable to connect to server %s.\n" + "Did you start the Trex daemon?" % trex_address)
+ "Unable to connect to server %s.\n" +
+ "Did you start the Trex daemon?" % trex_address)
return False
return True
@@ -354,6 +358,87 @@
M = 1000 * K
G = 1000 * M
+ def __monitor_port_stats(self, ports, time_interval=1):
+ """
+ List some port stats continuously while traffic is active
+
+ :param ports: List of ports ids to monitor
+ :param time_interval: Interval between read
+ :return: Statistics read while traffic is active
+ """
+
+ results = {
+ port_id: {"rx_bps": [], "tx_bps": [], "rx_pps": [], "tx_pps": []}
+ for port_id in ports
+ }
+ results["duration"] = []
+
+ prev = {
+ port_id: {
+ "opackets": 0,
+ "ipackets": 0,
+ "obytes": 0,
+ "ibytes": 0,
+ "time": time.time(),
+ }
+ for port_id in ports
+ }
+
+ s_time = time.time()
+ while self.trex_client.is_traffic_active():
+ stats = self.trex_client.get_stats(ports=ports)
+ if not stats:
+ break
+
+ main.log.debug(
+ "\nTRAFFIC RUNNING {:.2f} SEC".format(time.time() - s_time))
+ main.log.debug(
+ "{:^4} | {:<10} | {:<10} | {:<10} | {:<10} |".format(
+ "Port", "RX bps", "TX bps", "RX pps", "TX pps"
+ )
+ )
+ main.log.debug(
+ "----------------------------------------------------------")
+
+ for port in ports:
+ opackets = stats[port]["opackets"]
+ ipackets = stats[port]["ipackets"]
+ obytes = stats[port]["obytes"]
+ ibytes = stats[port]["ibytes"]
+ time_diff = time.time() - prev[port]["time"]
+
+ rx_bps = 8 * (ibytes - prev[port]["ibytes"]) / time_diff
+ tx_bps = 8 * (obytes - prev[port]["obytes"]) / time_diff
+ rx_pps = ipackets - prev[port]["ipackets"] / time_diff
+ tx_pps = opackets - prev[port]["opackets"] / time_diff
+
+ main.log.debug(
+ "{:^4} | {:<10} | {:<10} | {:<10} | {:<10} |".format(
+ port,
+ TrexClientDriver.__to_readable(rx_bps, "bps"),
+ TrexClientDriver.__to_readable(tx_bps, "bps"),
+ TrexClientDriver.__to_readable(rx_pps, "pps"),
+ TrexClientDriver.__to_readable(tx_pps, "pps"),
+ )
+ )
+
+ results["duration"].append(time.time() - s_time)
+ results[port]["rx_bps"].append(rx_bps)
+ results[port]["tx_bps"].append(tx_bps)
+ results[port]["rx_pps"].append(rx_pps)
+ results[port]["tx_pps"].append(tx_pps)
+
+ prev[port]["opackets"] = opackets
+ prev[port]["ipackets"] = ipackets
+ prev[port]["obytes"] = obytes
+ prev[port]["ibytes"] = ibytes
+ prev[port]["time"] = time.time()
+
+ time.sleep(time_interval)
+ main.log.debug("")
+
+ return results
+
@staticmethod
def __to_readable(src, unit="bps"):
"""
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/QOS.params b/TestON/tests/USECASE/SegmentRouting/QOS/QOS.params
index ceb29b6..427917f 100644
--- a/TestON/tests/USECASE/SegmentRouting/QOS/QOS.params
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/QOS.params
@@ -1,5 +1,5 @@
<PARAMS>
- <testcases>1</testcases>
+ <testcases>1,2</testcases>
<GRAPH>
<nodeCluster>pairedleaves</nodeCluster>
@@ -48,8 +48,61 @@
</UP4>
<TREX>
- <port_stats>0,2</port_stats> <!-- TRex port 0 = PDN, TRex port 2 = eNodeB -->
+ <port_stats>0,2,3</port_stats> <!-- TRex port 0 = PDN, TRex port 2 = eNodeB, TRex port 3 = second eNB -->
<flows>
+ <BE1_FROM_UE>
+ <name>Best Effort 1</name>
+ <l1_bps>40000000000</l1_bps>
+ <trex_port>2</trex_port>
+ <packet>
+ <pktlen>1400</pktlen>
+ <ip_src>10.240.0.1</ip_src>
+ <ip_dst>10.32.11.125</ip_dst>
+ <eth_src>40:A6:B7:22:AB:20</eth_src>
+ <eth_dst>00:00:0A:4C:1C:46</eth_dst>
+ <gtp_teid>100</gtp_teid>
+ <s1u_addr>10.32.11.126</s1u_addr>
+ <enb_addr>10.32.11.124</enb_addr>
+ </packet>
+ </BE1_FROM_UE>
+ <BE2_FROM_UE>
+ <name>Best Effort 2</name>
+ <l1_bps>35000000000</l1_bps>
+ <trex_port>3</trex_port>
+ <packet>
+ <pktlen>1400</pktlen>
+ <ip_src>10.240.0.1</ip_src>
+ <ip_dst>10.32.11.125</ip_dst>
+ <eth_src>40:A6:B7:22:AB:21</eth_src>
+ <eth_dst>00:00:0A:4C:1C:46</eth_dst>
+ <gtp_teid>100</gtp_teid>
+ <s1u_addr>10.32.11.126</s1u_addr>
+ <enb_addr>10.32.11.123</enb_addr>
+ </packet>
+ </BE2_FROM_UE>
+ <RT_FROM_UE>
+ <name>Real Time</name>
+ <l1_bps>22000000</l1_bps> <!-- Smaller than the smaller RT max BW -->
+ <trex_port>3</trex_port>
+ <packet>
+ <pktlen>1400</pktlen>
+ <ip_src>10.240.0.2</ip_src>
+ <ip_dst>10.32.11.125</ip_dst>
+ <eth_src>40:A6:B7:22:AB:21</eth_src>
+ <eth_dst>00:00:0A:4C:1C:46</eth_dst>
+ <gtp_teid>200</gtp_teid>
+ <s1u_addr>10.32.11.126</s1u_addr>
+ <enb_addr>10.32.11.123</enb_addr>
+ </packet>
+ <latency_stats>true</latency_stats>
+ <flow_id>10</flow_id>
+ <delay>50000</delay>
+ <expected_max_dropped>0</expected_max_dropped>
+ <expected_max_latency>1500</expected_max_latency>
+ <!-- Verify the 90th percentile instead of 99.9th because of latency introduced by TRex SW Mode -->
+ <expected_90_percentile_latency>150</expected_90_percentile_latency>
+ </RT_FROM_UE>
+
<BE_FROM_PDN>
<name>Best Effort</name>
<l1_bps>2000000000</l1_bps>
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/QOS.py b/TestON/tests/USECASE/SegmentRouting/QOS/QOS.py
index 2a851c4..45414f1 100644
--- a/TestON/tests/USECASE/SegmentRouting/QOS/QOS.py
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/QOS.py
@@ -4,87 +4,42 @@
self.default = ''
def CASE1(self, main):
- main.case("Leaf-Edge with Mobile Traffic Classification")
+ main.case("Leaf Edge with Mobile Traffic Classification")
# Leaf-Edge-Mobile
# Attach 2 UEs with different QFI
# Generate traffic with Trex for the two UEs
# --> no packet drop on RT flow, reasonable latency on RT flow
try:
- from tests.USECASE.SegmentRouting.dependencies.up4 import UP4, \
- N_FLOWS_PER_UE
- from tests.USECASE.SegmentRouting.dependencies.trex import Trex
- from tests.USECASE.SegmentRouting.dependencies.Testcaselib import \
- Testcaselib as run
- import json
+ from tests.USECASE.SegmentRouting.QOS.dependencies.QOSTest import \
+ QOSTest
except ImportError as e:
main.log.error("Import not found. Exiting the test")
main.log.error(e)
main.cleanAndExit()
- n_switches = int(main.params["TOPO"]["switchNum"])
-
- run.initTest(main)
- main.log.info(main.Cluster.numCtrls)
- main.Cluster.setRunningNode(3)
- run.installOnos(main, skipPackage=True, cliSleep=5)
-
- main.step("Start P4rt client and setup TRex")
- # Use the first available ONOS instance CLI
- onos_cli = main.Cluster.active(0).CLI
- initial_flow_count = onos_cli.checkFlowCount()
- up4 = UP4()
- trex = Trex()
- # Get the P4RT client connected to UP4 in the first available ONOS instance
- up4.setup(main.Cluster.active(0).p4rtUp4, no_host=True)
- trex.setup(main.TRexClient)
-
- main.step("Program PDRs and FARs via UP4")
- up4.attachUes()
-
- main.step("Verify PDRs and FARs in ONOS")
- up4.verifyUp4Flow(onos_cli)
-
- run.checkFlows(
+ test = QOSTest()
+ test.runTest(
main,
- minFlowCount=initial_flow_count + (
- len(up4.emulated_ues) * N_FLOWS_PER_UE * n_switches)
+ test_idx=1,
+ n_switches=int(main.params["TOPO"]["switchNum"])
)
- # Load traffic config for the current test case
- main.step("Load test JSON config")
- cfgFile = main.configPath + "/tests/" + "leaf_edge_mobile.json"
- with open(cfgFile) as cfg:
- testCfg = json.load(cfg)
-
- main.step("Send traffic with TRex")
- for flow in testCfg["flows"]:
- trex.createFlow(flow)
- trex.sendAndReceiveTraffic(testCfg["duration"])
-
- main.step("Log port and flow stats")
- trex.logPortStats()
- for flow in testCfg["flows"]:
- trex.logFlowStats(flow)
-
- # Assert Flow Stats
- for flow in testCfg["flows"]:
- if trex.isFlowStats(flow):
- main.step("{}: Assert RX Packets".format(flow))
- trex.assertRxPackets(flow)
- main.step("{}: Assert Dropped Packets".format(flow))
- trex.assertDroppedPacket(flow)
- main.step("{}: Assert 99.9 Percentile Latency".format(flow))
- trex.assert99_9PercentileLatency(flow)
-
- main.step("Remove PDRs and FARs via UP4")
- up4.detachUes()
-
- main.step("Verify removed PDRs and FARs from ONOS")
- up4.verifyNoUesFlow(onos_cli)
-
- run.checkFlows(main, minFlowCount=initial_flow_count)
-
- main.step("Teardown")
- trex.teardown()
- up4.teardown()
- run.saveOnosDiagsIfFailure(main)
- run.cleanup(main)
+ def CASE2(self, main):
+ main.case("Leaf Pair Link with Mobile Traffic Classification")
+ # Leaf Infra Mobile Traffic
+ # Attach 2 UEs with different QFI
+ # Generate traffic with TRex from UEs towards PDN, generating congestion
+ # on the 40Gbps pair link
+ # --> no packet drop on RT flow, reasonable latency on RT flow
+ try:
+ from tests.USECASE.SegmentRouting.QOS.dependencies.QOSTest import \
+ QOSTest
+ except ImportError as e:
+ main.log.error("Import not found. Exiting the test")
+ main.log.error(e)
+ main.cleanAndExit()
+ test = QOSTest()
+ test.runTest(
+ main,
+ test_idx=2,
+ n_switches=int(main.params["TOPO"]["switchNum"])
+ )
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/QOSTest.py b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/QOSTest.py
new file mode 100644
index 0000000..fcb4b7c
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/QOSTest.py
@@ -0,0 +1,77 @@
+from tests.USECASE.SegmentRouting.dependencies.Testcaselib import \
+ Testcaselib as run
+from tests.USECASE.SegmentRouting.dependencies.trex import Trex
+from tests.USECASE.SegmentRouting.dependencies.up4 import UP4, N_FLOWS_PER_UE
+import json
+
+
+class QOSTest:
+
+ def runTest(self, main, test_idx, n_switches):
+ run.initTest(main)
+ main.log.info(main.Cluster.numCtrls)
+ main.Cluster.setRunningNode(3)
+ run.installOnos(main, skipPackage=True, cliSleep=5)
+
+ main.step("Start P4rt client and setup TRex")
+ # Use the first available ONOS instance CLI
+ onos_cli = main.Cluster.active(0).CLI
+ initial_flow_count = onos_cli.checkFlowCount()
+ up4 = UP4()
+ trex = Trex()
+ # Get the P4RT client connected to UP4 in the first available ONOS instance
+ up4.setup(main.Cluster.active(0).p4rtUp4)
+ trex.setup(main.TRexClient)
+
+ main.step("Program PDRs and FARs via UP4")
+ up4.attachUes()
+ up4.verifyUp4Flow(onos_cli)
+
+ run.checkFlows(
+ main,
+ minFlowCount=initial_flow_count + (
+ len(up4.emulated_ues) * N_FLOWS_PER_UE * n_switches)
+ )
+
+ # Load traffic config for the current test case
+ main.step("Load test JSON config")
+ cfgFile = main.configPath + "/tests/" + "CASE_%d.json" % test_idx
+ with open(cfgFile) as cfg:
+ testCfg = json.load(cfg)
+
+ main.step("Send traffic with TRex")
+ for flow in testCfg["flows"]:
+ trex.createFlow(flow)
+ results = trex.sendAndReceiveTraffic(testCfg["duration"])
+ trex.verifyCongestion(
+ results,
+ multiplier=float(testCfg.get("multiplier", "1"))
+ )
+
+ main.step("Log port and flow stats")
+ trex.logPortStats()
+ for flow in testCfg["flows"]:
+ trex.logFlowStats(flow)
+
+ # Assert Flow Stats
+ for flow in testCfg["flows"]:
+ if trex.isFlowStats(flow):
+ main.step("{}: Assert RX Packets".format(flow))
+ trex.assertRxPackets(flow)
+ main.step("{}: Assert Dropped Packets".format(flow))
+ trex.assertDroppedPacket(flow)
+ main.step("{}: Assert 90 Percentile Latency".format(flow))
+ trex.assert90PercentileLatency(flow)
+ main.step("{}: Assert 99.9 Percentile Latency".format(flow))
+ trex.assert99_9PercentileLatency(flow)
+
+ main.step("Remove PDRs and FARs via UP4")
+ up4.detachUes()
+ up4.verifyNoUesFlow(onos_cli)
+
+ run.checkFlows(main, minFlowCount=initial_flow_count)
+
+ main.step("Teardown")
+ trex.teardown()
+ up4.teardown()
+ run.cleanup(main)
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/tests/CASE_1.json b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/tests/CASE_1.json
new file mode 100644
index 0000000..f2258f7
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/tests/CASE_1.json
@@ -0,0 +1,6 @@
+{
+ "flows": ["BE_FROM_PDN", "RT_FROM_PDN"],
+ "duration": 10,
+ "multiplier": 1.02571
+}
+
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/tests/CASE_2.json b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/tests/CASE_2.json
new file mode 100644
index 0000000..6ac3bfd
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/tests/CASE_2.json
@@ -0,0 +1,5 @@
+{
+ "flows": ["BE1_FROM_UE", "BE2_FROM_UE", "RT_FROM_UE"],
+ "duration": 10,
+ "multiplier": 0.97428
+}
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/tests/leaf_edge_mobile.json b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/tests/leaf_edge_mobile.json
deleted file mode 100644
index f32fa79..0000000
--- a/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/tests/leaf_edge_mobile.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "flows": ["BE_FROM_PDN", "RT_FROM_PDN"],
- "duration": 10
-}
-
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/trex_config.yaml b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/trex_config.yaml
index 00a1f85..5309605 100644
--- a/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/trex_config.yaml
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/trex_config.yaml
@@ -1,13 +1,13 @@
-# TRex Port ID=0 --> PCI BUS: d8:00.0, Linux Intf: enp216s0f0 connected to leaf1/0 (PDN)
-# TRex Port ID=1 --> PCI BUS: d8:00.1, Linux Intf: enp216s0f1 not connected, but required by TRex to have an even number of interfaces
-# TRex Port ID=2 --> PCI BUS: 5e:00.0, Linux Intf: enp94s0f0 connected to leaf2/0 (eNodeB)
-# TRex Port ID=3 --> PCI BUS: 5e:00.1, Linux Intf: enp94s0f1 connected to leaf2/4
+# TRex Port ID=0 --> PCI BUS: d8:00.0, NUMA: 1 (CPU2), Linux Intf: enp216s0f0 connected to leaf1/0 (PDN)
+# TRex Port ID=1 --> PCI BUS: d8:00.1, NUMA: 1 (CPU2), Linux Intf: enp216s0f1 not connected, but required by TRex to have an even number of interfaces
+# TRex Port ID=2 --> PCI BUS: 5e:00.0, NUMA: 0 (CPU1), Linux Intf: enp94s0f0 connected to leaf2/0 (eNodeB)
+# TRex Port ID=3 --> PCI BUS: 5e:00.1, NUMA: 0 (CPU1), Linux Intf: enp94s0f1 connected to leaf2/4
- version: 2
port_limit: 4
interfaces: [ 'd8:00.0', 'd8:00.1', '5e:00.0', '5e:00.1']
port_bandwidth_gb: 40
- c: 16
+ c: 20
port_info:
- src_mac: 40:A6:B7:22:AB:40
dest_mac: 00:00:0A:4C:1C:46
diff --git a/TestON/tests/USECASE/SegmentRouting/QOSNonMobile/dependencies/QOSNonMobileTest.py b/TestON/tests/USECASE/SegmentRouting/QOSNonMobile/dependencies/QOSNonMobileTest.py
index 45e9d8c..d3b724f 100644
--- a/TestON/tests/USECASE/SegmentRouting/QOSNonMobile/dependencies/QOSNonMobileTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/QOSNonMobile/dependencies/QOSNonMobileTest.py
@@ -98,7 +98,8 @@
main.step("Send traffic with TRex")
for flow in testCfg["flows"]:
trex.createFlow(flow)
- trex.sendAndReceiveTraffic(testCfg["duration"])
+ results = trex.sendAndReceiveTraffic(testCfg["duration"])
+ trex.verifyCongestion(results)
trex.logPortStats()
for flow in testCfg["flows"]:
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/trex.py b/TestON/tests/USECASE/SegmentRouting/dependencies/trex.py
index d710d07..93a7c70 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/trex.py
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/trex.py
@@ -75,7 +75,7 @@
Connect the client, create the flows in trex (with packets created with
createFlow, send and receive the traffic, and disconnect the client.
:param duration: traffic duration
- :return:
+ :return: port statistics collected while running the test
"""
self.trex_client.connectTrexClient()
for flow_name, packet in self.packets.items():
@@ -88,8 +88,38 @@
delay=flow_config["delay"],
flow_id=flow_config["flow_id"],
flow_stats=flow_config["latency_stats"])
- self.trex_client.startAndWaitTraffic(duration=duration)
+ result = self.trex_client.startAndWaitTraffic(duration=duration,
+ ports=self.port_stats)
self.trex_client.disconnectTrexClient()
+ return result
+
+ def verifyCongestion(self, live_stats, multiplier=1):
+ """
+ Verify and assert that the test was able to generate congestion by
+ checking that average TX traffic is greater than average RX traffic from
+ stats collected during the test.
+
+ :param live_stats: Stats collected during tests
+ :param multiplier: Multiplier for RX traffic in case we encap/decap traffic
+ :return:
+ """
+ avg_tx = sum(
+ [sum(v["tx_bps"]) / len(v["tx_bps"])
+ for (k, v) in live_stats.items() if k != "duration"]
+ )
+ avg_rx = sum(
+ [sum(v["rx_bps"]) / len(v["rx_bps"])
+ for (k, v) in live_stats.items() if k != "duration"]
+ )
+
+ utilities.assert_equals(
+ expect=True,
+ actual=avg_tx > avg_rx * multiplier,
+ onpass="Congestion created: AVG TX ({}) > AVG RX ({})".format(
+ avg_tx, avg_rx),
+ onfail="NO Congestion: AVG TX ({}) <= AVG RX ({})".format(
+ avg_tx, avg_rx)
+ )
def assertRxPackets(self, flow_name):
if not self.isFlowStats(flow_name):
@@ -137,6 +167,10 @@
def assert99_9PercentileLatency(self, flow_name):
if not self.isFlowStats(flow_name):
main.log.info("No flow stats for flow {}".format(flow_name))
+ return
+ if not "expected_99_9_percentile_latency" in self.traffic_flows[flow_name].keys():
+ main.log.info("No 99.9th percentile parameter for test")
+ return
expected_99_9_percentile_latency = int(
self.traffic_flows[flow_name].get(
"expected_99_9_percentile_latency", "0"))
@@ -149,6 +183,25 @@
onfail="Traffic Flow {}: 99.9th percentile latency is too high {}".format(
flow_name, latency_stats.percentile_99_9))
+ def assert90PercentileLatency(self, flow_name):
+ if not self.isFlowStats(flow_name):
+ main.log.info("No flow stats for flow {}".format(flow_name))
+ return
+ if not "expected_90_percentile_latency" in self.traffic_flows[flow_name].keys():
+ main.log.info("No 90th percentile parameter for test")
+ return
+ expected_90_percentile_latency = int(
+ self.traffic_flows[flow_name].get(
+ "expected_90_percentile_latency", "0"))
+ latency_stats = self.__getLatencyStats(flow_name)
+ utilities.assert_equals(
+ expect=True,
+ actual=latency_stats.percentile_90 <= expected_90_percentile_latency,
+ onpass="Traffic Flow {}: 90th percentile latency below threshold".format(
+ flow_name),
+ onfail="Traffic Flow {}: 90th percentile latency is too high {}".format(
+ flow_name, latency_stats.percentile_90))
+
def logPortStats(self):
main.log.debug(self.port_stats)
for port in self.port_stats: