[SDFAB-988] QER rate limiting tests

Change-Id: I4c542a5c9a122c0595b36e5e96d2b093682cfc7c
diff --git a/TestON/drivers/common/api/controller/trexclientdriver.py b/TestON/drivers/common/api/controller/trexclientdriver.py
index de65f3a..bdbc303 100644
--- a/TestON/drivers/common/api/controller/trexclientdriver.py
+++ b/TestON/drivers/common/api/controller/trexclientdriver.py
@@ -8,6 +8,7 @@
 """
 import time
 import os
+import copy
 import sys
 import importlib
 import collections
@@ -92,6 +93,7 @@
         self.trex_client = None
         self.trex_daemon_client = None
         self.trex_library_python_path = None
+        self.gen_traffic_per_port = {}
         super(TrexClientDriver, self).__init__()
 
     def connect(self, **connectargs):
@@ -172,8 +174,7 @@
         # incoming packets if the destination mac is not the port mac address.
         self.trex_client.set_port_attr(self.trex_client.get_all_ports(),
                                        promiscuous=True)
-        # Reset the used sender ports
-        self.all_sender_port = set()
+        self.gen_traffic_per_port = {}
         self.stats = None
         return True
 
@@ -227,7 +228,9 @@
                 percentage=percentage,
                 l1_bps=l1_bps)
         self.trex_client.add_streams(traffic_stream, ports=trex_port)
-        self.all_sender_port.add(trex_port)
+        gen_traffic = self.gen_traffic_per_port.get(trex_port, 0)
+        gen_traffic += l1_bps
+        self.gen_traffic_per_port[trex_port] = gen_traffic
         return True
 
     def startAndWaitTraffic(self, duration=10, ports=[]):
@@ -242,16 +245,18 @@
             main.log.error(
                 "Cannot start traffic, first connect the TRex client")
             return False
-        main.log.info("Start sending traffic for %d seconds" % duration)
-        self.trex_client.start(list(self.all_sender_port), mult="1",
+        # Reset stats from previous run
+        self.stats = None
+        main.step("Sending traffic for %d seconds" % duration)
+        self.trex_client.start(self.gen_traffic_per_port.keys(), mult="1",
                                duration=duration)
         main.log.info("Waiting until all traffic is sent..")
-        result = self.__monitor_port_stats(ports)
-        self.trex_client.wait_on_traffic(ports=list(self.all_sender_port),
+        result = self.__monitor_port_stats({p: self.gen_traffic_per_port.get(p, None) for p in ports})
+        self.trex_client.wait_on_traffic(ports=self.gen_traffic_per_port.keys(),
                                          rx_delay_ms=100)
         main.log.info("...traffic sent!")
         # Reset sender port so we can run other tests with the same TRex client
-        self.all_sender_port = set()
+        self.gen_traffic_per_port = {}
         main.log.info("Getting stats")
         self.stats = self.trex_client.get_stats()
         return result
@@ -357,43 +362,53 @@
     M = 1000 * K
     G = 1000 * M
 
-    def __monitor_port_stats(self, ports, time_interval=1):
+    def __monitor_port_stats(self, target_tx_per_port, num_samples=4,
+                             ramp_up_timeout=5, time_interval=1, min_tx_bps_margin=0.95):
         """
-        List some port stats continuously while traffic is active
+        List some port stats continuously while traffic is active and verify that
+        the generated amount traffic is the expected one
 
-        :param ports: List of ports ids to monitor
+        :param target_tx_per_port: Traffic to be generated per port
         :param time_interval: Interval between read
+        :param num_samples: Number of samples of statistics from each monitored ports
+        :param ramp_up_timeout: how many seconds to wait before TRex can reach the target TX rate
         :return: Statistics read while traffic is active, or empty result if no
-                 ports provided.
+                 target_tx_per_port provided.
         """
+
+        ports = target_tx_per_port.keys()
+        local_gen_traffic_per_port = copy.deepcopy(target_tx_per_port)
         results = {
             port_id: {"rx_bps": [], "tx_bps": [], "rx_pps": [], "tx_pps": []}
             for port_id in ports
         }
         results["duration"] = []
 
-        if not ports:
+        if len(ports) == 0:
             return results
 
+        start_time = time.time()
         prev = {
             port_id: {
                 "opackets": 0,
                 "ipackets": 0,
                 "obytes": 0,
                 "ibytes": 0,
-                "time": time.time(),
+                "time": start_time,
             }
             for port_id in ports
         }
 
-        s_time = time.time()
+        time.sleep(time_interval)
         while self.trex_client.is_traffic_active():
             stats = self.trex_client.get_stats(ports=ports)
+            sample_time = time.time()
+            elapsed = sample_time - start_time
             if not stats:
                 break
 
             main.log.debug(
-                "\nTRAFFIC RUNNING {:.2f} SEC".format(time.time() - s_time))
+                "\nTRAFFIC RUNNING {:.2f} SEC".format(elapsed))
             main.log.debug(
                 "{:^4} | {:<10} | {:<10} | {:<10} | {:<10} |".format(
                     "Port", "RX bps", "TX bps", "RX pps", "TX pps"
@@ -402,21 +417,21 @@
             main.log.debug(
                 "----------------------------------------------------------")
 
-            for port in ports:
-                opackets = stats[port]["opackets"]
-                ipackets = stats[port]["ipackets"]
-                obytes = stats[port]["obytes"]
-                ibytes = stats[port]["ibytes"]
-                time_diff = time.time() - prev[port]["time"]
+            for (tx_port, target_tx_rate) in local_gen_traffic_per_port.items():
+                opackets = stats[tx_port]["opackets"]
+                ipackets = stats[tx_port]["ipackets"]
+                obytes = stats[tx_port]["obytes"]
+                ibytes = stats[tx_port]["ibytes"]
+                time_diff = sample_time - prev[tx_port]["time"]
 
-                rx_bps = 8 * (ibytes - prev[port]["ibytes"]) / time_diff
-                tx_bps = 8 * (obytes - prev[port]["obytes"]) / time_diff
-                rx_pps = ipackets - prev[port]["ipackets"] / time_diff
-                tx_pps = opackets - prev[port]["opackets"] / time_diff
+                rx_bps = 8 * (ibytes - prev[tx_port]["ibytes"]) / time_diff
+                tx_bps = 8 * (obytes - prev[tx_port]["obytes"]) / time_diff
+                rx_pps = ipackets - prev[tx_port]["ipackets"] / time_diff
+                tx_pps = opackets - prev[tx_port]["opackets"] / time_diff
 
                 main.log.debug(
                     "{:^4} | {:<10} | {:<10} | {:<10} | {:<10} |".format(
-                        port,
+                        tx_port,
                         TrexClientDriver.__to_readable(rx_bps, "bps"),
                         TrexClientDriver.__to_readable(tx_bps, "bps"),
                         TrexClientDriver.__to_readable(rx_pps, "pps"),
@@ -424,21 +439,56 @@
                     )
                 )
 
-                results["duration"].append(time.time() - s_time)
-                results[port]["rx_bps"].append(rx_bps)
-                results[port]["tx_bps"].append(tx_bps)
-                results[port]["rx_pps"].append(rx_pps)
-                results[port]["tx_pps"].append(tx_pps)
+                results["duration"].append(sample_time - start_time)
+                results[tx_port]["rx_bps"].append(rx_bps)
+                results[tx_port]["tx_bps"].append(tx_bps)
+                results[tx_port]["rx_pps"].append(rx_pps)
+                results[tx_port]["tx_pps"].append(tx_pps)
 
-                prev[port]["opackets"] = opackets
-                prev[port]["ipackets"] = ipackets
-                prev[port]["obytes"] = obytes
-                prev[port]["ibytes"] = ibytes
-                prev[port]["time"] = time.time()
+                prev[tx_port]["opackets"] = opackets
+                prev[tx_port]["ipackets"] = ipackets
+                prev[tx_port]["obytes"] = obytes
+                prev[tx_port]["ibytes"] = ibytes
+                prev[tx_port]["time"] = sample_time
+
+                if target_tx_rate is not None:
+                    if tx_bps < (target_tx_rate * min_tx_bps_margin):
+                        if elapsed > ramp_up_timeout:
+                            self.trex_client.stop(ports=ports)
+                            utilities.assert_equal(
+                                expect=True, actual=False,
+                                onpass="Should never reach this",
+                                onfail="TX port ({}) did not reach or sustain min sending rate ({})".format(
+                                    tx_port, target_tx_rate)
+                            )
+                            return {}
+                        else:
+                            results[tx_port]["rx_bps"].pop()
+                            results[tx_port]["tx_bps"].pop()
+                            results[tx_port]["rx_pps"].pop()
+                            results[tx_port]["tx_pps"].pop()
+
+                    if len(results[tx_port]["tx_bps"]) == num_samples:
+                        # Stop monitoring ports for which we have enough samples
+                        del local_gen_traffic_per_port[tx_port]
+
+            if len(local_gen_traffic_per_port) == 0:
+                # Enough samples for all ports
+                utilities.assert_equal(
+                    expect=True, actual=True,
+                    onpass="Enough samples have been generated",
+                    onfail="Should never reach this"
+                )
+                return results
 
             time.sleep(time_interval)
             main.log.debug("")
 
+        utilities.assert_equal(
+            expect=True, actual=True,
+            onpass="Traffic sent correctly",
+            onfail="Should never reach this"
+        )
         return results
 
     @staticmethod
diff --git a/TestON/drivers/common/cli/p4runtimeclidriver.py b/TestON/drivers/common/cli/p4runtimeclidriver.py
index 9f371dc..b2b1756 100644
--- a/TestON/drivers/common/cli/p4runtimeclidriver.py
+++ b/TestON/drivers/common/cli/p4runtimeclidriver.py
@@ -234,6 +234,46 @@
             main.log.exception(self.name + ": Uncaught exception!")
             main.cleanAndExit()
 
+    def modifyMeterEntry(self, meterEntry=None, debug=True):
+        """
+        Modify a meter entry with either the given meter entry or use the saved
+        meter entry in the variable 'me'.
+
+        Example of a valid tableEntry string:
+        me = meter_entry["FabricIngress.upf.app_meter"]; me.cir = 1; me.cburst=1; me.pir=1; me.pburst=1; # nopep8
+
+        :param meterEntry: the string meter entry, if None it uses the meter
+            entry saved in the 'me' variable
+        :param debug: True to enable debug logging, False otherwise
+        :return: main.TRUE or main.FALSE on error
+        """
+        try:
+            main.log.debug(self.name + ": Pushing Meter Entry")
+            if debug:
+                self.handle.sendline("me")
+                self.handle.expect(self.p4rtShPrompt)
+            pushCmd = ""
+            if meterEntry:
+                pushCmd = meterEntry + ";"
+            pushCmd += "me.modify()"
+            response = self.__clearSendAndExpect(pushCmd)
+            if "Traceback" in response or "Error" in response or "INVALID_ARGUMENT" in response:
+                # TODO: other possibile errors?
+                # NameError...
+                main.log.error(
+                    self.name + ": Error in modifying meter entry: " + response)
+                return main.FALSE
+            return main.TRUE
+        except pexpect.TIMEOUT:
+            main.log.exception(self.name + ": Command timed out")
+            return main.FALSE
+        except pexpect.EOF:
+            main.log.exception(self.name + ": connection closed.")
+            main.cleanAndExit()
+        except Exception:
+            main.log.exception(self.name + ": Uncaught exception!")
+            main.cleanAndExit()
+
     def buildP4RtTableEntry(self, tableName, actionName, actionParams={},
                             matchFields={}, priority=0):
         """
@@ -250,7 +290,8 @@
         try:
             main.log.debug("%s: Building P4RT Table Entry "
                            "(table=%s, match=%s, priority=%s, action=%s, params=%s)" % (
-                self.name, tableName, matchFields, priority, actionName, actionParams))
+                               self.name, tableName, matchFields, priority,
+                               actionName, actionParams))
             cmd = 'te = table_entry["%s"](action="%s"); ' % (
                 tableName, actionName)
 
@@ -302,6 +343,48 @@
             main.log.exception(self.name + ": Uncaught exception!")
             main.cleanAndExit()
 
+    def buildP4RtMeterEntry(self, meterName, index, cir=None, cburst=None, pir=None,
+                            pburst=None):
+        # TODO: improve error checking
+        try:
+            main.log.debug(
+                "%s: Building P4RT Meter Entry (meter=%s, index=%d, cir=%s, "
+                "cburst=%s, pir=%s, pburst=%s)" % (
+                    self.name, meterName, index, str(cir), str(cburst), str(pir), str(pburst)
+                )
+            )
+            cmd = 'me = meter_entry["%s"]; ' % meterName
+            cmd += 'me.index=%d; ' % index
+            if cir is not None:
+                cmd += 'me.cir=%d; ' % cir
+            if cburst is not None:
+                cmd += 'me.cburst=%d; ' % cburst
+            if pir is not None:
+                cmd += 'me.pir=%d; ' % pir
+            if pburst is not None:
+                cmd += 'me.pburst=%d; ' % pburst
+
+            response = self.__clearSendAndExpect(cmd)
+            if "meter" in response and "does not exist" in response:
+                main.log.error("Unknown meter: " + response)
+                return main.FALSE
+            if "UNIMPLEMENTED" in response:
+                main.log.error("Error in creating the meter entry: " + response)
+                return main.FALSE
+            if "Traceback" in response:
+                main.log.error("Error in creating the meter entry: " + response)
+                return main.FALSE
+            return main.TRUE
+        except pexpect.TIMEOUT:
+            main.log.exception(self.name + ": Command timed out")
+            return main.FALSE
+        except pexpect.EOF:
+            main.log.exception(self.name + ": connection closed.")
+            main.cleanAndExit()
+        except Exception:
+            main.log.exception(self.name + ": Uncaught exception!")
+            main.cleanAndExit()
+
     def readNumberTableEntries(self, tableName):
         """
         Read table entries and return the number of entries present in a table.
@@ -310,7 +393,8 @@
         :return: Number of entries,
         """
         try:
-            main.log.debug(self.name + ": Reading table entries from " + tableName)
+            main.log.debug(
+                self.name + ": Reading table entries from " + tableName)
             cmd = 'table_entry["%s"].read(lambda te: print(te))' % tableName
             response = self.__clearSendAndExpect(cmd, clearBufferAfter=True)
             # Every table entries starts with "table_id: [P4RT obj ID] ("[tableName]")"
diff --git a/TestON/tests/USECASE/SegmentRouting/Policing/Policing.params b/TestON/tests/USECASE/SegmentRouting/Policing/Policing.params
new file mode 100644
index 0000000..532e13e
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/Policing/Policing.params
@@ -0,0 +1,186 @@
+<PARAMS>
+    <testcases>1,2</testcases>
+
+    <GRAPH>
+        <nodeCluster>pairedleaves</nodeCluster>
+        <builds>20</builds>
+        <jobName>QOS</jobName>
+        <branch>master</branch>
+    </GRAPH>
+
+    <persistent_setup>True</persistent_setup>
+
+    <kubernetes>
+        <appName>onos-classic</appName>
+        <namespace>tost</namespace>
+    </kubernetes>
+    <use_stern>True</use_stern>
+
+    <UP4>
+        <s1u_address>10.32.11.126</s1u_address>
+        <slice_id>1</slice_id>
+        <enodebs>
+            <enodeb_1>
+                <host>TRexClient</host>
+                <enb_address>10.32.11.124</enb_address>
+                <interface>pairbond</interface> <!-- useless for this test, we use TRex to generate traffic -->
+                <ues>ue1,ue2</ues>
+            </enodeb_1>
+        </enodebs>
+        <ues>
+            <ue1>
+                <ue_address>10.240.0.1</ue_address>
+                <teid>100</teid>
+                <up_id>10</up_id>
+                <down_id>20</down_id>
+                <tc>3</tc>
+                <five_g>False</five_g>
+                <max_bps>200000000</max_bps>
+            </ue1>
+            <ue2>
+                <ue_address>10.240.0.2</ue_address>
+                <teid>200</teid>
+                <up_id>30</up_id>
+                <down_id>40</down_id>
+                <tc>3</tc>
+                <five_g>False</five_g>
+                <max_bps>200000000</max_bps>
+            </ue2>
+        </ues>
+        <app_filters>
+            <allowPort>
+                <app_id>1</app_id>
+                <ip_prefix>10.32.11.125/32</ip_prefix>
+                <ip_proto>17</ip_proto>
+                <port_range>80..80</port_range>
+                <priority>20</priority>
+                <action>allow</action>
+                <max_bps>100000000</max_bps>
+            </allowPort>
+            <allowAll>
+                <app_id>0</app_id>
+                <action>allow</action>
+            </allowAll>
+        </app_filters>
+    </UP4>
+
+    <TREX>
+        <port_stats>0,2,3</port_stats> <!-- TRex port 0 = PDN, TRex port 2 = eNodeB, TRex port 3 = second eNB -->
+        <flows>
+            <UE1>
+                <name>UE1: Conformant Session</name>
+                <l1_bps>200000000</l1_bps>
+                <trex_port>2</trex_port>
+                <packet>
+                    <pktlen>1400</pktlen>
+                    <ip_src>10.240.0.1</ip_src>
+                    <ip_dst>10.32.11.125</ip_dst>
+                    <udp_dport>81</udp_dport>
+                    <eth_src>40:A6:B7:22:AB:20</eth_src>
+                    <eth_dst>00:00:0A:4C:1C:46</eth_dst>
+                    <gtp_teid>100</gtp_teid>
+                    <s1u_addr>10.32.11.126</s1u_addr>
+                    <enb_addr>10.32.11.124</enb_addr>
+                </packet>
+                <latency_stats>true</latency_stats>
+                <flow_id>20</flow_id>
+                <expected_max_dropped>0</expected_max_dropped>
+                <expected_rx_bps>200000000</expected_rx_bps>
+            </UE1>
+            <UE2>
+                <name>UE2: Non-Conformant Session</name>
+                <l1_bps>300000000</l1_bps>
+                <trex_port>2</trex_port>
+                <packet>
+                    <pktlen>1400</pktlen>
+                    <ip_src>10.240.0.2</ip_src>
+                    <ip_dst>10.32.11.125</ip_dst>
+                    <udp_dport>81</udp_dport>
+                    <eth_src>40:A6:B7:22:AB:20</eth_src>
+                    <eth_dst>00:00:0A:4C:1C:46</eth_dst>
+                    <gtp_teid>200</gtp_teid>
+                    <s1u_addr>10.32.11.126</s1u_addr>
+                    <enb_addr>10.32.11.124</enb_addr>
+                </packet>
+                <latency_stats>true</latency_stats>
+                <flow_id>21</flow_id>
+                <expected_rx_bps>200000000</expected_rx_bps>
+            </UE2>
+            <APP1>
+                <name>APP1: Conformant Application</name>
+                <l1_bps>100000000</l1_bps>
+                <trex_port>2</trex_port>
+                <packet>
+                    <pktlen>1400</pktlen>
+                    <ip_src>10.240.0.1</ip_src>
+                    <ip_dst>10.32.11.125</ip_dst>
+                    <udp_dport>80</udp_dport>
+                    <eth_src>40:A6:B7:22:AB:20</eth_src>
+                    <eth_dst>00:00:0A:4C:1C:46</eth_dst>
+                    <gtp_teid>100</gtp_teid>
+                    <s1u_addr>10.32.11.126</s1u_addr>
+                    <enb_addr>10.32.11.124</enb_addr>
+                </packet>
+                <latency_stats>true</latency_stats>
+                <flow_id>30</flow_id>
+                <expected_max_dropped>0</expected_max_dropped>
+                <expected_rx_bps>100000000</expected_rx_bps>
+            </APP1>
+            <APP2>
+                <name>APP2: Non-Conformant Application</name>
+                <l1_bps>200000000</l1_bps>
+                <trex_port>2</trex_port>
+                <packet>
+                    <pktlen>1400</pktlen>
+                    <ip_src>10.240.0.2</ip_src>
+                    <ip_dst>10.32.11.125</ip_dst>
+                    <udp_dport>80</udp_dport>
+                    <eth_src>40:A6:B7:22:AB:20</eth_src>
+                    <eth_dst>00:00:0A:4C:1C:46</eth_dst>
+                    <gtp_teid>200</gtp_teid>
+                    <s1u_addr>10.32.11.126</s1u_addr>
+                    <enb_addr>10.32.11.124</enb_addr>
+                </packet>
+                <latency_stats>true</latency_stats>
+                <flow_id>31</flow_id>
+                <expected_rx_bps>100000000</expected_rx_bps>
+            </APP2>
+        </flows>
+    </TREX>
+
+    <TOPO>
+        <switchNum>2</switchNum>
+        <linkNum>2</linkNum>
+    </TOPO>
+
+    <ONOS_Logging>
+        <org.onosproject.segmentrouting>DEBUG</org.onosproject.segmentrouting>
+        <org.omecproject.up4>TRACE</org.omecproject.up4>
+    </ONOS_Logging>
+    <ONOS_Logging_Reset>
+        <org.onosproject.segmentrouting>DEBUG</org.onosproject.segmentrouting>
+        <org.omecproject.up4>INFO</org.omecproject.up4>
+    </ONOS_Logging_Reset>
+
+    <ENV>
+        <cellName>productionCell</cellName>
+        <cellApps>drivers,fpm,lldpprovider,hostprovider,netcfghostprovider,drivers.bmv2,org.opencord.fabric-tofino,pipelines.fabric,org.stratumproject.fabric-tna,drivers.barefoot,segmentrouting,up4</cellApps>
+    </ENV>
+
+    <DEPENDENCY>
+        <useCommonConf>False</useCommonConf>
+        <useCommonTopo>True</useCommonTopo>
+        <useBmv2>True</useBmv2>
+        <bmv2SwitchType>stratum</bmv2SwitchType>
+        <switchPrefix></switchPrefix>
+        <stratumRoot>~/stratum</stratumRoot>
+        <topology>trellis_fabric.py</topology>
+        <lib></lib>
+    </DEPENDENCY>
+
+    <SCALE>
+        <size>3</size>
+        <max>3</max>
+    </SCALE>
+
+</PARAMS>
diff --git a/TestON/tests/USECASE/SegmentRouting/Policing/Policing.py b/TestON/tests/USECASE/SegmentRouting/Policing/Policing.py
new file mode 100644
index 0000000..730d20e
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/Policing/Policing.py
@@ -0,0 +1,34 @@
+class Policing:
+
+    def __init__(self):
+        self.default = ''
+
+    def CASE1(self, main):
+        main.case("Session level QER")
+        try:
+            from tests.USECASE.SegmentRouting.Policing.dependencies.PolicingTest import \
+                PolicingTest
+        except ImportError as e:
+            main.log.error("Import not found. Exiting the test")
+            main.log.error(e)
+            main.cleanAndExit()
+        test = PolicingTest()
+        test.runTest(
+            main,
+            test_idx=1
+        )
+
+    def CASE2(self, main):
+        main.case("Application level QER")
+        try:
+            from tests.USECASE.SegmentRouting.Policing.dependencies.PolicingTest import \
+                PolicingTest
+        except ImportError as e:
+            main.log.error("Import not found. Exiting the test")
+            main.log.error(e)
+            main.cleanAndExit()
+        test = PolicingTest()
+        test.runTest(
+            main,
+            test_idx=2
+        )
diff --git a/TestON/tests/USECASE/SegmentRouting/Policing/Policing.topo b/TestON/tests/USECASE/SegmentRouting/Policing/Policing.topo
new file mode 100644
index 0000000..7aee367
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/Policing/Policing.topo
@@ -0,0 +1,56 @@
+<TOPOLOGY>
+    <COMPONENT>
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
+            <user>jenkins</user>
+            <password></password>
+            <type>OnosClusterDriver</type>
+            <connect_order>50</connect_order>
+            <jump_host></jump_host>
+            <home>~/onos</home>   # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
+            <COMPONENTS>
+                <kubeConfig>~/.kube/dev-pairedleaves-tucson</kubeConfig>  # If set, will attempt to use this file for setting up port-forwarding
+                <useDocker>True</useDocker>  # Whether to use docker for ONOS nodes
+                <docker_prompt>\$</docker_prompt>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+                <karaf_username>karaf</karaf_username>
+                <karaf_password>karaf</karaf_password>
+                <web_user>karaf</web_user>
+                <web_pass>karaf</web_pass>
+                <karafPrompt_username>karaf</karafPrompt_username>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home>~/onos/</onos_home>  # defines where onos home is on the target cell machine. Defaults to entry in "home" if empty.
+                <nodes> 3 </nodes>  # number of nodes in the cluster
+                <up4_port>51001</up4_port> # Port where the UP4 P4Runtime server is listening
+            </COMPONENTS>
+        </ONOScell>
+
+        <!-- No need for any HostDriver components, traffic is being generated by TRex-->
+        <TRexClient>
+            <host>localhost</host>
+            <type>TrexClientDriver</type>
+            <connect_order>5</connect_order>
+            <COMPONENTS>
+                <trex_address>10.76.28.72</trex_address> <!-- Compute2 -->
+                <trex_config>trex_config.yaml</trex_config> <!-- relative path starting from ./dependencies-->
+                <force_restart>True</force_restart>
+                <software_mode>True</software_mode>
+                <trex_library_python_path>/home/jenkins/trex_python</trex_library_python_path>
+            </COMPONENTS>
+        </TRexClient>
+
+    <!--  This component is not needed, but required to use the Testcaselib  -->
+        <NetworkBench>
+            <host>10.76.28.66</host>
+            <user>jenkins</user>
+            <password></password>
+            <type>NetworkDriver</type>
+            <connect_order>1</connect_order>
+            <COMPONENTS>
+            </COMPONENTS>
+        </NetworkBench>
+
+    </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/USECASE/SegmentRouting/Policing/__init__.py b/TestON/tests/USECASE/SegmentRouting/Policing/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/Policing/__init__.py
diff --git a/TestON/tests/USECASE/SegmentRouting/Policing/dependencies/PolicingTest.py b/TestON/tests/USECASE/SegmentRouting/Policing/dependencies/PolicingTest.py
new file mode 100644
index 0000000..0ef3465
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/Policing/dependencies/PolicingTest.py
@@ -0,0 +1,59 @@
+from tests.USECASE.SegmentRouting.dependencies.Testcaselib import \
+    Testcaselib as run
+from tests.USECASE.SegmentRouting.dependencies.trex import Trex
+from tests.USECASE.SegmentRouting.dependencies.up4 import UP4
+import json
+
+
+class PolicingTest:
+
+    def runTest(self, main, test_idx):
+        run.initTest(main)
+        main.log.info(main.Cluster.numCtrls)
+        main.Cluster.setRunningNode(3)
+        run.installOnos(main, skipPackage=True, cliSleep=5)
+
+        main.step("Start P4rt client and setup TRex")
+        # Use the first available ONOS instance CLI
+        onos_cli = main.Cluster.active(0).CLI
+        up4 = UP4()
+        trex = Trex()
+        # Get the P4RT client connected to UP4 in the first available ONOS instance
+        up4.setup(main.Cluster.active(0).p4rtUp4, no_host=True)
+        trex.setup(main.TRexClient)
+
+        try:
+            main.step("Program UPF entities via UP4")
+            up4.attachUes()
+            up4.verifyUp4Flow(onos_cli)
+
+            # Load traffic config for the current test case
+            main.step("Load test JSON config")
+            cfgFile = main.configPath + "/tests/" + "CASE_%d.json" % test_idx
+            with open(cfgFile) as cfg:
+                testCfg = json.load(cfg)
+
+            for flow in testCfg["flows"]:
+                trex.createFlow(flow)
+            results = trex.sendAndReceiveTraffic(testCfg["duration"])
+
+            main.step("Log port and flow stats")
+            trex.logPortStats()
+            for flow in testCfg["flows"]:
+                trex.logFlowStats(flow)
+
+            for flow in testCfg["flows"]:
+                if trex.isFlowStats(flow):
+                    main.step("{}: Assert RX Packets".format(flow))
+                    trex.assertRxPackets(flow)
+                    # Assert received traffic is similar to expected for that flow
+                    trex.assertRxRate(flow, results["duration"][-1])
+        finally:
+            main.step("Remove UPF entities via UP4")
+            up4.detachUes()
+            up4.verifyNoUesFlow(onos_cli)
+
+            main.step("Teardown")
+            trex.teardown()
+            up4.teardown()
+            run.cleanup(main)
diff --git a/TestON/tests/USECASE/SegmentRouting/Policing/dependencies/__init__.py b/TestON/tests/USECASE/SegmentRouting/Policing/dependencies/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/Policing/dependencies/__init__.py
diff --git a/TestON/tests/USECASE/SegmentRouting/Policing/dependencies/tests/CASE_1.json b/TestON/tests/USECASE/SegmentRouting/Policing/dependencies/tests/CASE_1.json
new file mode 100644
index 0000000..c1bf2f9
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/Policing/dependencies/tests/CASE_1.json
@@ -0,0 +1,4 @@
+{
+  "flows": ["UE1", "UE2"],
+  "duration": 10
+}
diff --git a/TestON/tests/USECASE/SegmentRouting/Policing/dependencies/tests/CASE_2.json b/TestON/tests/USECASE/SegmentRouting/Policing/dependencies/tests/CASE_2.json
new file mode 100644
index 0000000..483a7a5
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/Policing/dependencies/tests/CASE_2.json
@@ -0,0 +1,4 @@
+{
+  "flows": ["APP1", "APP2"],
+  "duration": 10
+}
diff --git a/TestON/tests/USECASE/SegmentRouting/Policing/dependencies/trex_config.yaml b/TestON/tests/USECASE/SegmentRouting/Policing/dependencies/trex_config.yaml
new file mode 100644
index 0000000..5309605
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/Policing/dependencies/trex_config.yaml
@@ -0,0 +1,19 @@
+# TRex Port ID=0 --> PCI BUS: d8:00.0, NUMA: 1 (CPU2), Linux Intf: enp216s0f0 connected to leaf1/0 (PDN)
+# TRex Port ID=1 --> PCI BUS: d8:00.1, NUMA: 1 (CPU2), Linux Intf: enp216s0f1 not connected, but required by TRex to have an even number of interfaces
+# TRex Port ID=2 --> PCI BUS: 5e:00.0, NUMA: 0 (CPU1), Linux Intf: enp94s0f0 connected to leaf2/0 (eNodeB)
+# TRex Port ID=3 --> PCI BUS: 5e:00.1, NUMA: 0 (CPU1), Linux Intf: enp94s0f1 connected to leaf2/4
+
+- version: 2
+  port_limit: 4
+  interfaces: [ 'd8:00.0', 'd8:00.1', '5e:00.0', '5e:00.1']
+  port_bandwidth_gb: 40
+  c: 20
+  port_info:
+    - src_mac: 40:A6:B7:22:AB:40
+      dest_mac: 00:00:0A:4C:1C:46
+    - src_mac: 40:A6:B7:22:AB:41
+      dest_mac: 00:00:0A:4C:1C:46
+    - src_mac: 40:A6:B7:22:AB:20
+      dest_mac: 00:00:0A:4C:1C:46
+    - src_mac: 40:A6:B7:22:AB:21
+      dest_mac: 00:00:0A:4C:1C:46
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/QOS.params b/TestON/tests/USECASE/SegmentRouting/QOS/QOS.params
index 9f24236..21210b0 100644
--- a/TestON/tests/USECASE/SegmentRouting/QOS/QOS.params
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/QOS.params
@@ -59,7 +59,7 @@
         <flows>
             <BE1_FROM_UE>
                 <name>Best Effort 1</name>
-                <l1_bps>40000000000</l1_bps>
+                <l1_bps>25000000000</l1_bps>
                 <trex_port>2</trex_port>
                 <packet>
                     <pktlen>1400</pktlen>
@@ -74,7 +74,7 @@
             </BE1_FROM_UE>
             <BE2_FROM_UE>
                 <name>Best Effort 2</name>
-                <l1_bps>35000000000</l1_bps>
+                <l1_bps>25000000000</l1_bps>
                 <trex_port>3</trex_port>
                 <packet>
                     <pktlen>1400</pktlen>
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/QOSTest.py b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/QOSTest.py
index 217f639..801e30a 100644
--- a/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/QOSTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/QOSTest.py
@@ -22,47 +22,48 @@
         up4.setup(main.Cluster.active(0).p4rtUp4, no_host=True)
         trex.setup(main.TRexClient)
 
-        main.step("Program UPF entities via UP4")
-        up4.attachUes()
-        up4.verifyUp4Flow(onos_cli)
+        try:
+            main.step("Program UPF entities via UP4")
+            up4.attachUes()
+            up4.verifyUp4Flow(onos_cli)
 
-        # Load traffic config for the current test case
-        main.step("Load test JSON config")
-        cfgFile = main.configPath + "/tests/" + "CASE_%d.json" % test_idx
-        with open(cfgFile) as cfg:
-            testCfg = json.load(cfg)
+            # Load traffic config for the current test case
+            main.step("Load test JSON config")
+            cfgFile = main.configPath + "/tests/" + "CASE_%d.json" % test_idx
+            with open(cfgFile) as cfg:
+                testCfg = json.load(cfg)
 
-        main.step("Send traffic with TRex")
-        for flow in testCfg["flows"]:
-            trex.createFlow(flow)
-        results = trex.sendAndReceiveTraffic(testCfg["duration"])
-        trex.verifyCongestion(
-            results,
-            multiplier=float(testCfg.get("multiplier", "1"))
-        )
+            for flow in testCfg["flows"]:
+                trex.createFlow(flow)
+            results = trex.sendAndReceiveTraffic(testCfg["duration"])
+            main.step("Verify congestion")
+            trex.verifyCongestion(
+                results,
+                multiplier=float(testCfg.get("multiplier", "1"))
+            )
 
-        main.step("Log port and flow stats")
-        trex.logPortStats()
-        for flow in testCfg["flows"]:
-            trex.logFlowStats(flow)
+            main.step("Log port and flow stats")
+            trex.logPortStats()
+            for flow in testCfg["flows"]:
+                trex.logFlowStats(flow)
 
-        # Assert Flow Stats
-        for flow in testCfg["flows"]:
-            if trex.isFlowStats(flow):
-                main.step("{}: Assert RX Packets".format(flow))
-                trex.assertRxPackets(flow)
-                main.step("{}: Assert Dropped Packets".format(flow))
-                trex.assertDroppedPacket(flow)
-                main.step("{}: Assert 90 Percentile Latency".format(flow))
-                trex.assert90PercentileLatency(flow)
-                main.step("{}: Assert 99.9 Percentile Latency".format(flow))
-                trex.assert99_9PercentileLatency(flow)
+            # Assert Flow Stats
+            for flow in testCfg["flows"]:
+                if trex.isFlowStats(flow):
+                    main.step("{}: Assert RX Packets".format(flow))
+                    trex.assertRxPackets(flow)
+                    main.step("{}: Assert Dropped Packets".format(flow))
+                    trex.assertDroppedPacket(flow)
+                    main.step("{}: Assert 90 Percentile Latency".format(flow))
+                    trex.assert90PercentileLatency(flow)
+                    main.step("{}: Assert 99.9 Percentile Latency".format(flow))
+                    trex.assert99_9PercentileLatency(flow)
+        finally:
+            main.step("Remove UPF entities via UP4")
+            up4.detachUes()
+            up4.verifyNoUesFlow(onos_cli)
 
-        main.step("Remove UPF entities via UP4")
-        up4.detachUes()
-        up4.verifyNoUesFlow(onos_cli)
-
-        main.step("Teardown")
-        trex.teardown()
-        up4.teardown()
-        run.cleanup(main)
+            main.step("Teardown")
+            trex.teardown()
+            up4.teardown()
+            run.cleanup(main)
diff --git a/TestON/tests/USECASE/SegmentRouting/QOSNonMobile/QOSNonMobile.params b/TestON/tests/USECASE/SegmentRouting/QOSNonMobile/QOSNonMobile.params
index 752e337..dce6647 100644
--- a/TestON/tests/USECASE/SegmentRouting/QOSNonMobile/QOSNonMobile.params
+++ b/TestON/tests/USECASE/SegmentRouting/QOSNonMobile/QOSNonMobile.params
@@ -101,7 +101,7 @@
             </RT_TO_ENB>
             <BE_1_TO_PDN>
                 <name>Best Effort 1</name>
-                <l1_bps>40000000000</l1_bps>
+                <l1_bps>25000000000</l1_bps>
                 <trex_port>2</trex_port>
                 <packet>
                     <pktlen>1400</pktlen>
diff --git a/TestON/tests/USECASE/SegmentRouting/QOSNonMobile/dependencies/QOSNonMobileTest.py b/TestON/tests/USECASE/SegmentRouting/QOSNonMobile/dependencies/QOSNonMobileTest.py
index d53b3f8..456699e 100644
--- a/TestON/tests/USECASE/SegmentRouting/QOSNonMobile/dependencies/QOSNonMobileTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/QOSNonMobile/dependencies/QOSNonMobileTest.py
@@ -7,28 +7,26 @@
 class QOSNonMobileTest:
 
     def runTest(self, main, test_idx, n_switches):
+        run.initTest(main)
+        main.log.info(main.Cluster.numCtrls)
+        main.Cluster.setRunningNode(3)
+        run.installOnos(main, skipPackage=True, cliSleep=5)
+
+        # Use the first available ONOS instance CLI
+        onos_rest = main.Cluster.active(0).REST
+        onos_cli = main.Cluster.active(0).CLI
+
+        trex = Trex()
+        trex.setup(main.TRexClient)
         try:
-            run.initTest(main)
-            main.log.info(main.Cluster.numCtrls)
-            main.Cluster.setRunningNode(3)
-            run.installOnos(main, skipPackage=True, cliSleep=5)
-
-            # Use the first available ONOS instance CLI
-            onos_rest = main.Cluster.active(0).REST
-            onos_cli = main.Cluster.active(0).CLI
-
             # Load traffic config for the current test case
             cfgFile = "%s/tests/CASE_%d.json" % (main.configPath, test_idx)
             with open(cfgFile) as cfg:
                 testCfg = json.load(cfg)
 
-            trex = Trex()
-            trex.setup(main.TRexClient)
-
             original_flows_number = onos_cli.checkFlowCount()
 
             main.step("Verify slices and traffic Classes")
-
             slices_onos = onos_rest.getSlices(debug=True)
 
             # Sanity check for the API, at least the default slice should be there.
@@ -95,10 +93,10 @@
                 minFlowCount=original_flows_number + (new_flows * n_switches)
             )
 
-            main.step("Send traffic with TRex")
             for flow in testCfg["flows"]:
                 trex.createFlow(flow)
             results = trex.sendAndReceiveTraffic(testCfg["duration"])
+            main.step("Verify congestion")
             trex.verifyCongestion(results)
 
             trex.logPortStats()
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/scapy_helper.py b/TestON/tests/USECASE/SegmentRouting/dependencies/scapy_helper.py
index 80546f4..f9e049e 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/scapy_helper.py
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/scapy_helper.py
@@ -18,6 +18,8 @@
         eth_src=None,
         ip_src="192.168.0.1",
         ip_dst="192.168.0.2",
+        udp_sport=1234,
+        udp_dport=80,
         s1u_addr="100.0.0.1",
         enb_addr="192.168.101.1",
         ip_ttl=64,
@@ -30,7 +32,8 @@
     if ext_psc_type is not None:
         pktlen = pktlen - GTPU_OPTIONS_HDR_BYTES - GTPU_EXT_PSC_BYTES
     pkt = simple_udp_packet(eth_src=eth_src, eth_dst=eth_dst, ip_src=ip_src,
-                            ip_dst=ip_dst, pktlen=pktlen)
+                            ip_dst=ip_dst, udp_sport=udp_sport, udp_dport=udp_dport,
+                            pktlen=pktlen)
     gtp_pkt = pkt_add_gtp(
         pkt,
         out_ipv4_src=enb_addr,
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/trex.py b/TestON/tests/USECASE/SegmentRouting/dependencies/trex.py
index 93a7c70..03a534a 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/trex.py
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/trex.py
@@ -121,48 +121,79 @@
                 avg_tx, avg_rx)
         )
 
+    def assertRxRate(self, flow_name, duration, delta=0.05):
+        if not self.isFlowStats(flow_name):
+            main.log.info("No flow stats for flow {}".format(flow_name))
+            utilities.assert_equals(
+                expect=True,
+                actual=False,
+                onpass="",
+                onfail="No Flow stats for requested flow: {}".format(flow_name))
+            return
+        expected_rx_rate_bps = int(
+            self.traffic_flows[flow_name].get("expected_rx_bps", "0"))
+        flow_label = self.traffic_flows[flow_name].get("name", flow_name)
+        flow_id = self.traffic_flows[flow_name]["flow_id"]
+        flow_stats = self.trex_client.getFlowStats(flow_id)
+        actual_rx_rate_bps = (flow_stats.rx_bytes * 8) / duration
+        rates_within_delta = abs((actual_rx_rate_bps/expected_rx_rate_bps) - 1) <= delta
+        utilities.assert_equals(
+            expect=True,
+            actual=rates_within_delta,
+            onpass="Traffic Flow {}: Expected rate ({}) within delta ({}) to actual rate ({})".format(
+                flow_label, expected_rx_rate_bps, delta, actual_rx_rate_bps),
+            onfail="Traffic Flow {}: Expected rate ({}) outside delta ({}) to actual rate ({})".format(
+                flow_label, expected_rx_rate_bps, delta, actual_rx_rate_bps)
+        )
+
     def assertRxPackets(self, flow_name):
         if not self.isFlowStats(flow_name):
             main.log.info("No flow stats for flow {}".format(flow_name))
         expected_min_received = int(
             self.traffic_flows[flow_name].get("expected_min_received", "1"))
+        flow_label = self.traffic_flows[flow_name].get("name", flow_name)
         flow_id = self.traffic_flows[flow_name]["flow_id"]
         flow_stats = self.trex_client.getFlowStats(flow_id)
         utilities.assert_equals(
             expect=True,
             actual=flow_stats.rx_packets >= expected_min_received,
-            onpass="Traffic Flow {}: Received traffic".format(flow_name),
-            onfail="Traffic Flow {}: No traffic received".format(flow_name))
+            onpass="Traffic Flow {}: Received traffic".format(flow_label),
+            onfail="Traffic Flow {}: No traffic received".format(flow_label))
 
     def assertDroppedPacket(self, flow_name):
         if not self.isFlowStats(flow_name):
             main.log.info("No flow stats for flow {}".format(flow_name))
         expected_max_dropped = int(
             self.traffic_flows[flow_name].get("expected_max_dropped", "0"))
-        latency_stats = self.__getLatencyStats(flow_name)
+        flow_label = self.traffic_flows[flow_name].get("name", flow_name)
+        flow_id = self.traffic_flows[flow_name]["flow_id"]
+        flow_stats = self.trex_client.getFlowStats(flow_id)
+        actual_dropped = flow_stats.tx_packets - flow_stats.rx_packets
         utilities.assert_equals(
             expect=True,
-            actual=latency_stats.dropped <= expected_max_dropped,
-            onpass="Traffic Flow {}: {} packets dropped, below threshold ({})".format(
-                flow_name, latency_stats.dropped,
-                expected_max_dropped),
-            onfail="Traffic Flow {}: {} packets dropped, above threshold ({})".format(
-                flow_name, latency_stats.dropped,
-                expected_max_dropped))
+            actual=actual_dropped <= expected_max_dropped,
+            onpass="Traffic Flow {}: {} packets dropped, below threshold={}".format(
+                flow_label, actual_dropped, expected_max_dropped
+            ),
+            onfail="Traffic Flow {}: {} packets dropped, above threshold={}".format(
+                flow_label, actual_dropped, expected_max_dropped
+            )
+        )
 
     def assertMaxLatency(self, flow_name):
         if not self.isFlowStats(flow_name):
             main.log.info("No flow stats for flow {}".format(flow_name))
         expected_max_latency = int(
             self.traffic_flows[flow_name].get("expected_max_latency", "0"))
+        flow_label = self.traffic_flows[flow_name].get("name", flow_name)
         latency_stats = self.__getLatencyStats(flow_name)
         utilities.assert_equals(
             expect=True,
             actual=latency_stats.total_max <= expected_max_latency,
             onpass="Traffic Flow {}: Maximum latency below threshold".format(
-                flow_name),
+                flow_label),
             onfail="Traffic Flow {}: Maximum latency is too high {}".format(
-                flow_name, latency_stats.total_max))
+                flow_label, latency_stats.total_max))
 
     def assert99_9PercentileLatency(self, flow_name):
         if not self.isFlowStats(flow_name):
@@ -174,14 +205,15 @@
         expected_99_9_percentile_latency = int(
             self.traffic_flows[flow_name].get(
                 "expected_99_9_percentile_latency", "0"))
+        flow_label = self.traffic_flows[flow_name].get("name", flow_name)
         latency_stats = self.__getLatencyStats(flow_name)
         utilities.assert_equals(
             expect=True,
             actual=latency_stats.percentile_99_9 <= expected_99_9_percentile_latency,
             onpass="Traffic Flow {}: 99.9th percentile latency below threshold".format(
-                flow_name),
+                flow_label),
             onfail="Traffic Flow {}: 99.9th percentile latency is too high {}".format(
-                flow_name, latency_stats.percentile_99_9))
+                flow_label, latency_stats.percentile_99_9))
 
     def assert90PercentileLatency(self, flow_name):
         if not self.isFlowStats(flow_name):
@@ -193,14 +225,15 @@
         expected_90_percentile_latency = int(
             self.traffic_flows[flow_name].get(
                 "expected_90_percentile_latency", "0"))
+        flow_label = self.traffic_flows[flow_name].get("name", flow_name)
         latency_stats = self.__getLatencyStats(flow_name)
         utilities.assert_equals(
             expect=True,
             actual=latency_stats.percentile_90 <= expected_90_percentile_latency,
             onpass="Traffic Flow {}: 90th percentile latency below threshold".format(
-                flow_name),
+                flow_label),
             onfail="Traffic Flow {}: 90th percentile latency is too high {}".format(
-                flow_name, latency_stats.percentile_90))
+                flow_label, latency_stats.percentile_90))
 
     def logPortStats(self):
         main.log.debug(self.port_stats)
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/up4.py b/TestON/tests/USECASE/SegmentRouting/dependencies/up4.py
index bb285d4..461fe15 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/up4.py
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/up4.py
@@ -59,6 +59,7 @@
                 <!-- TC 0 means BEST EFFORT -->
                 <tc>2</tc>
                 <five_g>False</five_g>
+                <max_bps>200000000</max_bps>
             </ue2>
         </ues>
         <switch_to_kill>Leaf2</switch_to_kill> # Component name of the switch to kill in CASE 5
@@ -137,9 +138,6 @@
                                            enableGtp=True)
             if self.pdn_host is not None:
                 self.pdn_host.startScapy(ifaceName=self.pdn_interface["name"])
-        # TODO: configure interfaces table. Currently, we rely on netcfg or
-        #  PFCP agent to push interface entries, but we should explicitly push
-        #  them here
 
     def startMockSmfPcap(self, smfComponent, pcapIface="eth0"):
         compName = "smf-pcap"
@@ -563,19 +561,29 @@
             app_id
         )
 
-    def upUeSessionOnosString(self, teid=None, teid_up=None, sess_meter_idx=DEFAULT_SESSION_METER_IDX, **kwargs):
+    def upUeSessionOnosString(self, teid=None, teid_up=None, up_id=None,
+                              sess_meter_idx=None, max_bps=None, **kwargs):
         if teid_up is None and teid is not None:
             teid_up = teid
+        if up_id is not None:
+            if max_bps is not None:
+                sess_meter_idx = up_id
+            else:
+                sess_meter_idx = DEFAULT_SESSION_METER_IDX
         if sess_meter_idx is None:
             sess_meter_idx = "\d+"
         return "UpfSessionUL\(Match\(tun_dst_addr={}, teid={}\) -> Action\(FWD, session_meter_idx={}\)\)".format(
             self.s1u_address, teid_up, sess_meter_idx)
 
     def downUeSessionOnosString(self, ue_address, down_id=None,
-                                tunn_peer_id=None, sess_meter_idx=DEFAULT_SESSION_METER_IDX,
+                                tunn_peer_id=None, sess_meter_idx=None, max_bps=None,
                                 **kwargs):
         if down_id is not None:
             tunn_peer_id = down_id
+            if max_bps is not None:
+                sess_meter_idx = down_id
+            else:
+                sess_meter_idx = DEFAULT_SESSION_METER_IDX
         if tunn_peer_id is None:
             tunn_peer_id = "\d+"
         if sess_meter_idx is None:
@@ -584,9 +592,13 @@
             ue_address, tunn_peer_id, sess_meter_idx)
 
     def upTerminationOnosString(self, ue_address, app_filter, up_id=None,
-                                ctr_id_up=None, tc=None, app_meter_idx=DEFAULT_APP_METER_IDX, **kwargs):
+                                ctr_id_up=None, tc=None, app_meter_idx=None, **kwargs):
         if up_id is not None:
             ctr_id_up = up_id
+            if "max_bps" in app_filter:
+                app_meter_idx = int(up_id) + int(app_filter["app_id"])
+            else:
+                app_meter_idx = DEFAULT_APP_METER_IDX
         if ctr_id_up is None:
             ctr_id_up = "\d+"
         if tc is None or int(tc) == 0:
@@ -605,7 +617,7 @@
 
     def downTerminationOnosString(self, ue_address, app_filter, teid=None,
                                   down_id=None, ctr_id_down=None, teid_down=None,
-                                  tc=None, app_meter_idx=DEFAULT_APP_METER_IDX,
+                                  tc=None, app_meter_idx=None,
                                   **kwargs):
         if down_id is not None:
             ctr_id_down = down_id
@@ -613,6 +625,10 @@
             ctr_id_down = "\d+"
         if teid_down is None and teid is not None:
             teid_down = int(teid) + 1
+            if "max_bps" in app_filter:
+                app_meter_idx = int(down_id) + int(app_filter["app_id"])
+            else:
+                app_meter_idx = DEFAULT_APP_METER_IDX
         if tc is None or int(tc) == 0:
             tc = "(?:0|null)"
         if app_meter_idx is None:
@@ -643,6 +659,11 @@
             ue["five_g"] = bool(strtobool(ue["five_g"]))
         if "tc" in ue and ue["tc"] == "":
             ue["tc"] = 0
+        if "max_bps" in ue:
+            if ue["max_bps"] == "" or ue["max_bps"] is None:
+                ue["max_bps"] = None
+            else:
+                ue["max_bps"] = int(ue["max_bps"])
         if smf:
             ue["up_id"] = None
             ue["down_id"] = None
@@ -695,28 +716,34 @@
                  teid_up=None, teid_down=None,
                  ctr_id_up=None, ctr_id_down=None,
                  tunn_peer_id=None,
-                 tc=None, five_g=False, **kwargs):
+                 tc=None, five_g=False, max_bps=None,
+                 sess_meter_idx_up=None, sess_meter_idx_down=None, **kwargs):
         self.__programUeRules(ue_name,
                               ue_address,
                               teid, up_id, down_id,
                               teid_up, teid_down,
                               ctr_id_up, ctr_id_down,
                               tunn_peer_id,
-                              tc, five_g, op="program")
+                              tc, five_g, max_bps,
+                              sess_meter_idx_up, sess_meter_idx_down,
+                              op="program")
 
     def detachUe(self, ue_name, ue_address,
                  teid=None, up_id=None, down_id=None,
                  teid_up=None, teid_down=None,
                  ctr_id_up=None, ctr_id_down=None,
                  tunn_peer_id=None,
-                 tc=None, five_g=False, **kwargs):
+                 tc=None, five_g=False, max_bps=None,
+                 sess_meter_idx_up=None, sess_meter_idx_down=None, **kwargs):
         self.__programUeRules(ue_name,
                               ue_address,
                               teid, up_id, down_id,
                               teid_up, teid_down,
                               ctr_id_up, ctr_id_down,
                               tunn_peer_id,
-                              tc, five_g, op="clear")
+                              tc, five_g, max_bps,
+                              sess_meter_idx_up, sess_meter_idx_down,
+                              op="clear")
 
     def __programAppFilter(self, app_id, slice_id, ip_prefix=None, ip_proto=None,
                            port_range=None, priority=0, op="program", **kwargs):
@@ -769,13 +796,21 @@
                          teid=None, up_id=None, down_id=None,
                          teid_up=None, teid_down=None, ctr_id_up=None,
                          ctr_id_down=None, tunn_peer_id=None,
-                         tc=0, five_g=False,
+                         tc=0, five_g=False, max_bps=None,
+                         sess_meter_idx_up=None, sess_meter_idx_down=None,
                          op="program"):
+        if max_bps is None:
+            sess_meter_idx_up = DEFAULT_SESSION_METER_IDX
+            sess_meter_idx_down = DEFAULT_SESSION_METER_IDX
         if up_id is not None:
             ctr_id_up = up_id
+            if max_bps is not None:
+                sess_meter_idx_up = int(up_id)
         if down_id is not None:
             tunn_peer_id = down_id
             ctr_id_down = down_id
+            if max_bps is not None:
+                sess_meter_idx_down = int(down_id)
         if teid is not None:
             teid_up = teid
             teid_down = int(teid) + 1
@@ -786,6 +821,25 @@
         enb_address = self.__getEnbAddress(ue_name)
 
         # ========================#
+        # Session Meters
+        # ========================#
+        if max_bps is not None:
+            if not self.__mod_meter(
+                    'PreQosPipe.session_meter',
+                    sess_meter_idx_up,
+                    max_bps,
+                    op
+            ):
+                return False
+            if not self.__mod_meter(
+                    'PreQosPipe.session_meter',
+                    sess_meter_idx_down,
+                    max_bps,
+                    op
+            ):
+                return False
+
+        # ========================#
         # UE Session Entries
         # ========================#
 
@@ -798,7 +852,7 @@
         matchFields['n3_address'] = str(self.s1u_address)
         matchFields['teid'] = str(teid_up)
         # Action params
-        actionParams["session_meter_idx"] = str(DEFAULT_SESSION_METER_IDX)
+        actionParams["session_meter_idx"] = str(sess_meter_idx_up)
         if five_g:
             # TODO: currently QFI match is unsupported in TNA
             main.log.warn("Matching on QFI is currently unsupported in TNA")
@@ -815,7 +869,7 @@
         matchFields['ue_address'] = str(ue_address)
         # Action params
         actionParams['tunnel_peer_id'] = str(tunn_peer_id)
-        actionParams["session_meter_idx"] = str(DEFAULT_SESSION_METER_IDX)
+        actionParams["session_meter_idx"] = str(sess_meter_idx_down)
         if not self.__add_entry(tableName, actionName, matchFields,
                                 actionParams, entries, op):
             return False
@@ -828,6 +882,17 @@
 
         # Uplink
         for f in self.app_filters.values():
+            if "max_bps" in f:
+                app_meter_idx_up = sess_meter_idx_up + int(f['app_id'])
+                if not self.__mod_meter(
+                        'PreQosPipe.app_meter',
+                        app_meter_idx_up,
+                        int(f["max_bps"]),
+                        op
+                ):
+                    return False
+            else:
+                app_meter_idx_up = DEFAULT_APP_METER_IDX
             tableName = 'PreQosPipe.terminations_uplink'
             matchFields = {}
             actionParams = {}
@@ -839,7 +904,7 @@
             # Action params
             if f['action'] == 'allow':
                 actionName = 'PreQosPipe.uplink_term_fwd'
-                actionParams['app_meter_idx'] = str(DEFAULT_APP_METER_IDX)
+                actionParams['app_meter_idx'] = str(app_meter_idx_up)
                 actionParams['tc'] = str(tc)
             else:
                 actionName = 'PreQosPipe.uplink_term_drop'
@@ -851,6 +916,17 @@
 
         # Downlink
         for f in self.app_filters.values():
+            if "max_bps" in f:
+                app_meter_idx_down = sess_meter_idx_down + int(f['app_id'])
+                if not self.__mod_meter(
+                        'PreQosPipe.app_meter',
+                        app_meter_idx_down,
+                        int(f["max_bps"]),
+                        op
+                ):
+                    return False
+            else:
+                app_meter_idx_down = DEFAULT_APP_METER_IDX
             tableName = 'PreQosPipe.terminations_downlink'
             matchFields = {}
             actionParams = {}
@@ -866,7 +942,7 @@
                 # 1-1 mapping between QFI and TC
                 actionParams['tc'] = str(tc)
                 actionParams['qfi'] = str(tc)
-                actionParams['app_meter_idx'] = str(DEFAULT_APP_METER_IDX)
+                actionParams['app_meter_idx'] = str(app_meter_idx_down)
             else:
                 actionName = 'PreQosPipe.downlink_term_drop'
             actionParams['ctr_idx'] = str(ctr_id_down)
@@ -916,6 +992,28 @@
         })
         return True
 
+    def __mod_meter(self, name, index, max_bps, op):
+        cir = 0
+        cburst = 0
+        pir = max_bps // 8
+        # TRex/DPDK can generate burst of 32 packets, considering MTU=1500, 32x1500B=48KB
+        # Burst must be greater than 48KB
+        pburst = 100000
+        if op == "program":
+            self.up4_client.buildP4RtMeterEntry(
+                meterName=name, index=index, cir=cir, cburst=cburst, pir=pir,
+                pburst=pburst
+            )
+        else:
+            # in case of "clear" don't specify bands to clear meters
+            self.up4_client.buildP4RtMeterEntry(meterName=name, index=index)
+        if self.up4_client.modifyMeterEntry(debug=True) == main.TRUE:
+            main.log.info("*** Meter modified.")
+        else:
+            main.log.error("Error during meter modification")
+            return False
+        return True
+
     def __clear_entries(self, entries):
         for i, entry in enumerate(entries):
             self.up4_client.buildP4RtTableEntry(**entry)