Merge "[SDFAB-543] Add instruction and scripts of Docker environment"
diff --git a/TestON/bin/cleanup.sh b/TestON/bin/cleanup.sh
index 4810002..571b0c9 100755
--- a/TestON/bin/cleanup.sh
+++ b/TestON/bin/cleanup.sh
@@ -25,10 +25,11 @@
 
 # TODO: Add help to this file, and some more options?
 #       Maybe kill/uninstall ONOS?
-sudo kill -9 `ps -ef | grep "./cli.py" | grep -v grep | awk '{print $2}'`
-sudo kill -9 `ps -ef | grep "bin/teston" | grep -v grep | awk '{print $2}'`
-sudo kill -9 `ps -ef | grep "ssh -X" | grep -v grep | awk '{print $2}'`
-sudo kill -9 `ps ax | grep '[p]ython -m SimpleHTTPServer 8000' | awk '{print $1}'`
+sudo kill -9 `ps -ef | grep "./cli.py" | grep -v grep | awk '{print $2}'` &> /dev/null
+sudo kill -9 `ps -ef | grep "bin/teston" | grep -v grep | awk '{print $2}'` &> /dev/null
+sudo kill -9 `ps -ef | grep "/usr/bin/ssh" | grep -v grep | awk '{print $2}'` &> /dev/null
+sudo kill -9 `ps ax | grep '[p]ython -m SimpleHTTPServer 8000' | awk '{print $1}'` &> /dev/null
+sudo kill -9 `ps ax | grep 'kubectl' | awk '{print $2}'` &> /dev/null
 
 export user=${ONOS_USER:-$USER}
 if [[ -z "${OCN}" ]]; then
diff --git a/TestON/drivers/common/api/controller/trexclientdriver.py b/TestON/drivers/common/api/controller/trexclientdriver.py
new file mode 100644
index 0000000..29c8a1a
--- /dev/null
+++ b/TestON/drivers/common/api/controller/trexclientdriver.py
@@ -0,0 +1,581 @@
+"""
+Copyright 2021 Open Networking Foundation (ONF)
+
+Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+
+"""
+import time
+import os
+import collections
+import numpy as np
+
+from drivers.common.api.controllerdriver import Controller
+from trex.stl.api import STLClient, STLStreamDstMAC_PKT
+from trex_stf_lib.trex_client import CTRexClient
+from trex_stl_lib.api import STLFlowLatencyStats, STLPktBuilder, STLStream, \
+    STLTXCont
+
+from socket import error as ConnectionRefusedError
+from distutils.util import strtobool
+
+TREX_FILES_DIR = "/tmp/trex_files/"
+
+LatencyStats = collections.namedtuple(
+    "LatencyStats",
+    [
+        "pg_id",
+        "jitter",
+        "average",
+        "total_max",
+        "total_min",
+        "last_max",
+        "histogram",
+        "dropped",
+        "out_of_order",
+        "duplicate",
+        "seq_too_high",
+        "seq_too_low",
+        "percentile_50",
+        "percentile_75",
+        "percentile_90",
+        "percentile_99",
+        "percentile_99_9",
+        "percentile_99_99",
+        "percentile_99_999",
+    ],
+)
+
+PortStats = collections.namedtuple(
+    "PortStats",
+    [
+        "tx_packets",
+        "rx_packets",
+        "tx_bytes",
+        "rx_bytes",
+        "tx_errors",
+        "rx_errors",
+        "tx_bps",
+        "tx_pps",
+        "tx_bps_L1",
+        "tx_util",
+        "rx_bps",
+        "rx_pps",
+        "rx_bps_L1",
+        "rx_util",
+    ],
+)
+
+FlowStats = collections.namedtuple(
+    "FlowStats",
+    [
+        "pg_id",
+        "tx_packets",
+        "rx_packets",
+        "tx_bytes",
+        "rx_bytes",
+    ],
+)
+
+
+class TrexClientDriver(Controller):
+    """
+    Implements a Trex Client Driver
+    """
+
+    def __init__(self):
+        self.trex_address = "localhost"
+        self.trex_config = None  # Relative path in dependencies of the test using this driver
+        self.force_restart = True
+        self.sofware_mode = False
+        self.setup_successful = False
+        self.stats = None
+        self.trex_client = None
+        self.trex_daemon_client = None
+        super(TrexClientDriver, self).__init__()
+
+    def connect(self, **connectargs):
+        try:
+            for key in connectargs:
+                vars(self)[key] = connectargs[key]
+            for key in self.options:
+                if key == "trex_address":
+                    self.trex_address = self.options[key]
+                elif key == "trex_config":
+                    self.trex_config = self.options[key]
+                elif key == "force_restart":
+                    self.force_restart = bool(strtobool(self.options[key]))
+                elif key == "software_mode":
+                    self.software_mode = bool(strtobool(self.options[key]))
+            self.name = self.options["name"]
+        except Exception as inst:
+            main.log.error("Uncaught exception: " + str(inst))
+            main.cleanAndExit()
+        return super(TrexClientDriver, self).connect()
+
+    def disconnect(self):
+        """
+        Called when Test is complete
+        """
+        self.disconnectTrexClient()
+        self.stopTrexServer()
+        return main.TRUE
+
+    def setupTrex(self, pathToTrexConfig):
+        """
+        Setup TRex server passing the TRex configuration.
+        :return: True if setup successful, False otherwise
+        """
+        main.log.debug(self.name + ": Setting up TRex server")
+        if self.software_mode:
+            trex_args = "--software --no-hw-flow-stat"
+        else:
+            trex_args = None
+        self.trex_daemon_client = CTRexClient(self.trex_address,
+                                              trex_args=trex_args)
+        success = self.__set_up_trex_server(
+            self.trex_daemon_client, self.trex_address,
+            pathToTrexConfig + self.trex_config,
+            self.force_restart
+        )
+        if not success:
+            main.log.error("Failed to set up TRex daemon!")
+            return False
+        self.setup_successful = True
+        return True
+
+    def connectTrexClient(self):
+        if not self.setup_successful:
+            main.log.error("Cannot connect TRex Client, first setup TRex")
+            return False
+        main.log.info("Connecting TRex Client")
+        self.trex_client = STLClient(server=self.trex_address)
+        self.trex_client.connect()
+        self.trex_client.acquire()
+        self.trex_client.reset()  # Resets configs from all ports
+        self.trex_client.clear_stats()  # Clear status from all ports
+        # Put all ports to promiscuous mode, otherwise they will drop all
+        # incoming packets if the destination mac is not the port mac address.
+        self.trex_client.set_port_attr(self.trex_client.get_all_ports(),
+                                       promiscuous=True)
+        # Reset the used sender ports
+        self.all_sender_port = set()
+        self.stats = None
+        return True
+
+    def disconnectTrexClient(self):
+        # Teardown TREX Client
+        if self.trex_client is not None:
+            main.log.info("Tearing down STLClient...")
+            self.trex_client.stop()
+            self.trex_client.release()
+            self.trex_client.disconnect()
+            self.trex_client = None
+            # Do not reset stats
+
+    def stopTrexServer(self):
+        if self.trex_daemon_client is not None:
+            self.trex_daemon_client.stop_trex()
+            self.trex_daemon_client = None
+
+    def addStream(self, pkt, trex_port, l1_bps=None, percentage=None,
+                  delay=0, flow_id=None, flow_stats=False):
+        """
+        :param pkt: Scapy packet, TRex will send copy of this packet
+        :param trex_port: Port number to send packet from, must match a port in the TRex config file
+        :param l1_bps: L1 Throughput generated by TRex (mutually exclusive with percentage)
+        :param percentage: Percentage usage of the selected port bandwidth (mutually exlusive with l1_bps)
+        :param flow_id: Flow ID, required when saving latency statistics
+        :param flow_stats: True to measure flow statistics (latency and packet), False otherwise, might require software mode
+        :return: True if the stream is create, false otherwise
+        """
+        if (percentage is None and l1_bps is None) or (
+                percentage is not None and l1_bps is not None):
+            main.log.error(
+                "Either percentage or l1_bps must be provided when creating a stream")
+            return False
+        main.log.debug("Creating flow stream")
+        main.log.debug(
+            "port: %d, l1_bps: %s, percentage: %s, delay: %d, flow_id:%s, flow_stats: %s" % (
+                trex_port, str(l1_bps), str(percentage), delay, str(flow_id),
+                str(flow_stats)))
+        main.log.debug(pkt.summary())
+        if flow_stats:
+            traffic_stream = self.__create_latency_stats_stream(
+                pkt,
+                pg_id=flow_id,
+                isg=delay,
+                percentage=percentage,
+                l1_bps=l1_bps)
+        else:
+            traffic_stream = self.__create_background_stream(
+                pkt,
+                percentage=percentage,
+                l1_bps=l1_bps)
+        self.trex_client.add_streams(traffic_stream, ports=trex_port)
+        self.all_sender_port.add(trex_port)
+        return True
+
+    def startAndWaitTraffic(self, duration=10):
+        """
+        Start generating traffic and wait traffic to be send
+        :param duration: Traffic generation duration
+        :return:
+        """
+        if not self.trex_client:
+            main.log.error(
+                "Cannot start traffic, first connect the TRex client")
+            return False
+        main.log.info("Start sending traffic for %d seconds" % duration)
+        self.trex_client.start(list(self.all_sender_port), mult="1",
+                               duration=duration)
+        main.log.info("Waiting until all traffic is sent..")
+        self.trex_client.wait_on_traffic(ports=list(self.all_sender_port),
+                                         rx_delay_ms=100)
+        main.log.info("...traffic sent!")
+        # Reset sender port so we can run other tests with the same TRex client
+        self.all_sender_port = set()
+        main.log.info("Getting stats")
+        self.stats = self.trex_client.get_stats()
+        main.log.info("GOT stats")
+
+    def getFlowStats(self, flow_id):
+        if self.stats is None:
+            main.log.error("No stats saved!")
+            return None
+        return TrexClientDriver.__get_flow_stats(flow_id, self.stats)
+
+    def logFlowStats(self, flow_id):
+        main.log.info("Statistics for flow {}: {}".format(
+            flow_id,
+            TrexClientDriver.__get_readable_flow_stats(
+                self.getFlowStats(flow_id))))
+
+    def getLatencyStats(self, flow_id):
+        if self.stats is None:
+            main.log.error("No stats saved!")
+            return None
+        return TrexClientDriver.__get_latency_stats(flow_id, self.stats)
+
+    def logLatencyStats(self, flow_id):
+        main.log.info("Latency statistics for flow {}: {}".format(
+            flow_id,
+            TrexClientDriver.__get_readable_latency_stats(
+                self.getLatencyStats(flow_id))))
+
+    def getPortStats(self, port_id):
+        if self.stats is None:
+            main.log.error("No stats saved!")
+            return None
+        return TrexClientDriver.__get_port_stats(port_id, self.stats)
+
+    def logPortStats(self, port_id):
+        if self.stats is None:
+            main.log.error("No stats saved!")
+            return None
+        main.log.info("Statistics for port {}: {}".format(
+            port_id, TrexClientDriver.__get_readable_port_stats(
+                self.stats.get(port_id))))
+
+    # From ptf/test/common/ptf_runner.py
+    def __set_up_trex_server(self, trex_daemon_client, trex_address,
+                             trex_config,
+                             force_restart):
+        try:
+            main.log.info("Pushing Trex config %s to the server" % trex_config)
+            if not trex_daemon_client.push_files(trex_config):
+                main.log.error("Unable to push %s to Trex server" % trex_config)
+                return False
+
+            if force_restart:
+                main.log.info("Restarting TRex")
+                trex_daemon_client.kill_all_trexes()
+                time.sleep(1)
+
+            if not trex_daemon_client.is_idle():
+                main.log.info("The Trex server process is running")
+                main.log.warn(
+                    "A Trex server process is still running, "
+                    + "use --force-restart to kill it if necessary."
+                )
+                return False
+
+            trex_config_file_on_server = TREX_FILES_DIR + os.path.basename(
+                trex_config)
+            trex_daemon_client.start_stateless(cfg=trex_config_file_on_server)
+        except ConnectionRefusedError:
+            main.log.error(
+                "Unable to connect to server %s.\n" + "Did you start the Trex daemon?" % trex_address)
+            return False
+
+        return True
+
+    def __create_latency_stats_stream(self, pkt, pg_id,
+                                      name=None,
+                                      l1_bps=None,
+                                      percentage=None,
+                                      isg=0):
+        assert (percentage is None and l1_bps is not None) or (
+                percentage is not None and l1_bps is None)
+        return STLStream(
+            name=name,
+            packet=STLPktBuilder(pkt=pkt),
+            mode=STLTXCont(bps_L1=l1_bps, percentage=percentage),
+            isg=isg,
+            flow_stats=STLFlowLatencyStats(pg_id=pg_id)
+        )
+
+    def __create_background_stream(self, pkt, name=None, percentage=None,
+                                   l1_bps=None):
+        assert (percentage is None and l1_bps is not None) or (
+                percentage is not None and l1_bps is None)
+        return STLStream(
+            name=name,
+            packet=STLPktBuilder(pkt=pkt),
+            mode=STLTXCont(bps_L1=l1_bps, percentage=percentage)
+        )
+
+    # Multiplier for data rates
+    K = 1000
+    M = 1000 * K
+    G = 1000 * M
+
+    @staticmethod
+    def __to_readable(src, unit="bps"):
+        """
+        Convert number to human readable string.
+        For example: 1,000,000 bps to 1Mbps. 1,000 bytes to 1KB
+
+        :parameters:
+            src : int
+                the original data
+            unit : str
+                the unit ('bps', 'pps', or 'bytes')
+        :returns:
+            A human readable string
+        """
+        if src < 1000:
+            return "{:.1f} {}".format(src, unit)
+        elif src < 1000000:
+            return "{:.1f} K{}".format(src / 1000, unit)
+        elif src < 1000000000:
+            return "{:.1f} M{}".format(src / 1000000, unit)
+        else:
+            return "{:.1f} G{}".format(src / 1000000000, unit)
+
+    @staticmethod
+    def __get_readable_port_stats(port_stats):
+        opackets = port_stats.get("opackets", 0)
+        ipackets = port_stats.get("ipackets", 0)
+        obytes = port_stats.get("obytes", 0)
+        ibytes = port_stats.get("ibytes", 0)
+        oerrors = port_stats.get("oerrors", 0)
+        ierrors = port_stats.get("ierrors", 0)
+        tx_bps = port_stats.get("tx_bps", 0)
+        tx_pps = port_stats.get("tx_pps", 0)
+        tx_bps_L1 = port_stats.get("tx_bps_L1", 0)
+        tx_util = port_stats.get("tx_util", 0)
+        rx_bps = port_stats.get("rx_bps", 0)
+        rx_pps = port_stats.get("rx_pps", 0)
+        rx_bps_L1 = port_stats.get("rx_bps_L1", 0)
+        rx_util = port_stats.get("rx_util", 0)
+        return """
+        Output packets: {}
+        Input packets: {}
+        Output bytes: {} ({})
+        Input bytes: {} ({})
+        Output errors: {}
+        Input errors: {}
+        TX bps: {} ({})
+        TX pps: {} ({})
+        L1 TX bps: {} ({})
+        TX util: {}
+        RX bps: {} ({})
+        RX pps: {} ({})
+        L1 RX bps: {} ({})
+        RX util: {}""".format(
+            opackets,
+            ipackets,
+            obytes,
+            TrexClientDriver.__to_readable(obytes, "Bytes"),
+            ibytes,
+            TrexClientDriver.__to_readable(ibytes, "Bytes"),
+            oerrors,
+            ierrors,
+            tx_bps,
+            TrexClientDriver.__to_readable(tx_bps),
+            tx_pps,
+            TrexClientDriver.__to_readable(tx_pps, "pps"),
+            tx_bps_L1,
+            TrexClientDriver.__to_readable(tx_bps_L1),
+            tx_util,
+            rx_bps,
+            TrexClientDriver.__to_readable(rx_bps),
+            rx_pps,
+            TrexClientDriver.__to_readable(rx_pps, "pps"),
+            rx_bps_L1,
+            TrexClientDriver.__to_readable(rx_bps_L1),
+            rx_util,
+        )
+
+    @staticmethod
+    def __get_port_stats(port, stats):
+        """
+        :param port: int
+        :param stats:
+        :return:
+        """
+        port_stats = stats.get(port)
+        return PortStats(
+            tx_packets=port_stats.get("opackets", 0),
+            rx_packets=port_stats.get("ipackets", 0),
+            tx_bytes=port_stats.get("obytes", 0),
+            rx_bytes=port_stats.get("ibytes", 0),
+            tx_errors=port_stats.get("oerrors", 0),
+            rx_errors=port_stats.get("ierrors", 0),
+            tx_bps=port_stats.get("tx_bps", 0),
+            tx_pps=port_stats.get("tx_pps", 0),
+            tx_bps_L1=port_stats.get("tx_bps_L1", 0),
+            tx_util=port_stats.get("tx_util", 0),
+            rx_bps=port_stats.get("rx_bps", 0),
+            rx_pps=port_stats.get("rx_pps", 0),
+            rx_bps_L1=port_stats.get("rx_bps_L1", 0),
+            rx_util=port_stats.get("rx_util", 0),
+        )
+
+    @staticmethod
+    def __get_latency_stats(pg_id, stats):
+        """
+        :param pg_id: int
+        :param stats:
+        :return:
+        """
+
+        lat_stats = stats["latency"].get(pg_id)
+        lat = lat_stats["latency"]
+        # Estimate latency percentiles from the histogram.
+        l = list(lat["histogram"].keys())
+        l.sort()
+        all_latencies = []
+        for sample in l:
+            range_start = sample
+            if range_start == 0:
+                range_end = 10
+            else:
+                range_end = range_start + pow(10, (len(str(range_start)) - 1))
+            val = lat["histogram"][sample]
+            # Assume whole the bucket experienced the range_end latency.
+            all_latencies += [range_end] * val
+        q = [50, 75, 90, 99, 99.9, 99.99, 99.999]
+        percentiles = np.percentile(all_latencies, q)
+
+        ret = LatencyStats(
+            pg_id=pg_id,
+            jitter=lat["jitter"],
+            average=lat["average"],
+            total_max=lat["total_max"],
+            total_min=lat["total_min"],
+            last_max=lat["last_max"],
+            histogram=lat["histogram"],
+            dropped=lat_stats["err_cntrs"]["dropped"],
+            out_of_order=lat_stats["err_cntrs"]["out_of_order"],
+            duplicate=lat_stats["err_cntrs"]["dup"],
+            seq_too_high=lat_stats["err_cntrs"]["seq_too_high"],
+            seq_too_low=lat_stats["err_cntrs"]["seq_too_low"],
+            percentile_50=percentiles[0],
+            percentile_75=percentiles[1],
+            percentile_90=percentiles[2],
+            percentile_99=percentiles[3],
+            percentile_99_9=percentiles[4],
+            percentile_99_99=percentiles[5],
+            percentile_99_999=percentiles[6],
+        )
+        return ret
+
+    @staticmethod
+    def __get_readable_latency_stats(stats):
+        """
+        :param stats: LatencyStats
+        :return:
+        """
+        histogram = ""
+        # need to listify in order to be able to sort them.
+        l = list(stats.histogram.keys())
+        l.sort()
+        for sample in l:
+            range_start = sample
+            if range_start == 0:
+                range_end = 10
+            else:
+                range_end = range_start + pow(10, (len(str(range_start)) - 1))
+            val = stats.histogram[sample]
+            histogram = (
+                    histogram
+                    + "\n        Packets with latency between {0:>5} us and {1:>5} us: {2:>10}".format(
+                range_start, range_end, val
+            )
+            )
+
+        return """
+        Latency info for pg_id {}
+        Dropped packets: {}
+        Out-of-order packets: {}
+        Sequence too high packets: {}
+        Sequence too low packets: {}
+        Maximum latency: {} us
+        Minimum latency: {} us
+        Maximum latency in last sampling period: {} us
+        Average latency: {} us
+        50th percentile latency: {} us
+        75th percentile latency: {} us
+        90th percentile latency: {} us
+        99th percentile latency: {} us
+        99.9th percentile latency: {} us
+        99.99th percentile latency: {} us
+        99.999th percentile latency: {} us
+        Jitter: {} us
+        Latency distribution histogram: {}
+        """.format(stats.pg_id, stats.dropped, stats.out_of_order,
+                   stats.seq_too_high, stats.seq_too_low, stats.total_max,
+                   stats.total_min, stats.last_max, stats.average,
+                   stats.percentile_50, stats.percentile_75,
+                   stats.percentile_90,
+                   stats.percentile_99, stats.percentile_99_9,
+                   stats.percentile_99_99,
+                   stats.percentile_99_999, stats.jitter, histogram)
+
+    @staticmethod
+    def __get_flow_stats(pg_id, stats):
+        """
+        :param pg_id: int
+        :param stats:
+        :return:
+        """
+        FlowStats = collections.namedtuple(
+            "FlowStats",
+            ["pg_id", "tx_packets", "rx_packets", "tx_bytes", "rx_bytes", ],
+        )
+        flow_stats = stats["flow_stats"].get(pg_id)
+        ret = FlowStats(
+            pg_id=pg_id,
+            tx_packets=flow_stats["tx_pkts"]["total"],
+            rx_packets=flow_stats["rx_pkts"]["total"],
+            tx_bytes=flow_stats["tx_bytes"]["total"],
+            rx_bytes=flow_stats["rx_bytes"]["total"],
+        )
+        return ret
+
+    @staticmethod
+    def __get_readable_flow_stats(stats):
+        """
+        :param stats: FlowStats
+        :return:
+        """
+        return """Flow info for pg_id {}
+        TX packets: {}
+        RX packets: {}
+        TX bytes: {}
+        RX bytes: {}""".format(stats.pg_id, stats.tx_packets,
+                               stats.rx_packets, stats.tx_bytes,
+                               stats.rx_bytes)
diff --git a/TestON/drivers/common/cli/onosclusterdriver.py b/TestON/drivers/common/cli/onosclusterdriver.py
index eebc5ad..8b25d22 100755
--- a/TestON/drivers/common/cli/onosclusterdriver.py
+++ b/TestON/drivers/common/cli/onosclusterdriver.py
@@ -54,7 +54,7 @@
         name. This method should return the (computed) attribute value
         or raise an AttributeError exception.
 
-        We will look into each of the node's component handles to try to find the attreibute, looking at REST first
+        We will look into each of the node's component handles to try to find the attribute, looking at REST first
         """
         if hasattr( self.REST, name ):
             main.log.debug( "%s: Using Rest driver's attribute for '%s'" % ( self.name, name ) )
@@ -65,10 +65,13 @@
         if hasattr( self.Bench, name ):
             main.log.debug( "%s: Using Bench driver's attribute for '%s'" % ( self.name, name ) )
             return getattr( self.Bench, name )
+        if hasattr( self.p4rtUp4, name ):
+            main.log.debug( "%s: Using UP4 driver's attribute for '%s'" % ( self.name, name ) )
+            return getattr( self.p4rtUp4, name )
         raise AttributeError( "Could not find the attribute %s in %r or it's component handles" % ( name, self ) )
 
     def __init__( self, name, ipAddress, CLI=None, REST=None, Bench=None, pos=None,
-                  userName=None, server=None, k8s=None, dockerPrompt=None ):
+                  userName=None, server=None, k8s=None, p4rtUp4=None, dockerPrompt=None ):
         # TODO: validate these arguments
         self.name = str( name )
         self.ipAddress = ipAddress
@@ -81,6 +84,7 @@
         self.user_name = userName
         self.server = server
         self.k8s = k8s
+        self.p4rtUp4 = p4rtUp4
         self.dockerPrompt = dockerPrompt
 
 class OnosClusterDriver( CLI ):
@@ -101,6 +105,7 @@
         self.nodeUser = None
         self.nodePass = None
         self.nodes = []
+        self.up4Port = None
         super( OnosClusterDriver, self ).__init__()
 
     def connect( self, **connectargs ):
@@ -145,6 +150,9 @@
                     self.maxNodes = self.options[ key ]
                 elif key == "kubeConfig":
                     self.kubeConfig = self.options[ key ]
+                elif key == "up4_port":
+                    # Defining up4_port triggers the creation of the P4RuntimeCliDriver component
+                    self.up4Port = self.options[ key ]
 
             self.home = self.checkOptions( self.home, "~/onos" )
             self.karafUser = self.checkOptions( self.karafUser, self.user_name )
@@ -160,6 +168,7 @@
             self.dockerPrompt = self.checkOptions( self.dockerPrompt, "~/onos#" )
             self.maxNodes = int( self.checkOptions( self.maxNodes, 100 ) )
             self.kubeConfig = self.checkOptions( self.kubeConfig, None )
+            self.up4Port = self.checkOptions(self.up4Port, None)
 
             self.name = self.options[ 'name' ]
 
@@ -243,14 +252,19 @@
                         # Setup port-forwarding and save the local port
                         guiPort = 8181
                         cliPort = 8101
+                        fwdPorts = [ guiPort, cliPort ]
+                        if self.up4Port:
+                            fwdPorts.append( int( self.up4Port ) )
                         portsList = ""
-                        for port in [ guiPort, cliPort ]:
+                        for port in fwdPorts:
                             localPort = port + index + 1
                             portsList += "%s:%s " % ( localPort, port )
                             if port == cliPort:
                                 node.CLI.karafPort = localPort
                             elif port == guiPort:
                                 node.REST.port = localPort
+                            elif self.up4Port and port == int( self.up4Port ):
+                                node.p4rtUp4.p4rtPort = localPort
                         main.log.info( "Setting up port forward for pod %s: [ %s ]" % ( self.podNames[ index ], portsList ) )
                         pf = kubectl.kubectlPortForward( self.podNames[ index ],
                                                          portsList,
@@ -466,6 +480,51 @@
             main.log.error( name + " component already exists!" )
             main.cleanAndExit()
 
+    def setP4rtCLIOptions( self, name, ipAddress ):
+        """
+        Parse the cluster options to create an UP4 component with the given name
+
+        Arguments:
+            name - The name of the P4RuntimeCLI component
+            ipAddress - The ip address of the ONOS instance
+        """
+        main.componentDictionary[name] = main.componentDictionary[self.name].copy()
+        main.componentDictionary[name]['type'] = "P4RuntimeCliDriver"
+        main.componentDictionary[name]['host'] = ipAddress
+        port = main.componentDictionary[name]['COMPONENTS'].get( "p4rt_port", "9559" )
+        main.componentDictionary[name]['p4rt_port'] = self.checkOptions( port, "9559" )
+        main.componentDictionary[name]['connect_order'] = str( int( main.componentDictionary[name]['connect_order'] ) + 1 )
+
+    def createP4rtCLIComponent( self, name, ipAddress ):
+        """
+        Creates a new P4Runtime CLI component. This will be connected to the node
+        ONOS is running on.
+
+        Arguments:
+            name - The string of the name of this component. The new component
+                   will be assigned to main.<name> .
+                   In addition, main.<name>.name = str( name )
+            ipAddress - The ip address of the server
+        """
+        try:
+            # look to see if this component already exists
+            getattr( main, name )
+        except AttributeError:
+            # namespace is clear, creating component
+            self.setP4rtCLIOptions( name, ipAddress )
+            return main.componentInit( name )
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":     " + self.handle.before )
+            main.cleanAndExit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanAndExit()
+        else:
+            # namespace is not clear!
+            main.log.error( name + " component already exists!" )
+            main.cleanAndExit()
+
     def createComponents( self, prefix='', createServer=True ):
         """
         Creates a CLI and REST component for each nodes in the cluster
@@ -476,6 +535,7 @@
         benchPrefix = prefix + "bench"
         serverPrefix = prefix + "server"
         k8sPrefix = prefix + "k8s"
+        up4Prefix = prefix + "up4cl"
         for i in xrange( 1, self.maxNodes + 1 ):
             cliName = cliPrefix + str( i  )
             restName = restPrefix + str( i )
@@ -483,6 +543,8 @@
             serverName = serverPrefix + str( i )
             if self.kubeConfig:
                 k8sName = k8sPrefix + str( i )
+            if self.up4Port:
+                up4Name = up4Prefix + str( i )
 
             # Unfortunately this means we need to have a cell set beofre running TestON,
             # Even if it is just the entire possible cluster size
@@ -493,9 +555,10 @@
             bench = self.createBenchComponent( benchName )
             server = self.createServerComponent( serverName, ip ) if createServer else None
             k8s = self.createServerComponent( k8sName, ip ) if self.kubeConfig else None
+            p4rtUp4 = self.createP4rtCLIComponent( up4Name, ip ) if self.up4Port else None
             if self.kubeConfig:
                 k8s.kubeConfig = self.kubeConfig
                 k8s.podName = None
             self.nodes.append( Controller( prefix + str( i ), ip, cli, rest, bench, i - 1,
                                            self.user_name, server=server, k8s=k8s,
-                                           dockerPrompt=self.dockerPrompt ) )
+                                           p4rtUp4=p4rtUp4, dockerPrompt=self.dockerPrompt ) )
diff --git a/TestON/drivers/common/cli/p4runtimeclidriver.py b/TestON/drivers/common/cli/p4runtimeclidriver.py
new file mode 100644
index 0000000..6728cba
--- /dev/null
+++ b/TestON/drivers/common/cli/p4runtimeclidriver.py
@@ -0,0 +1,361 @@
+"""
+Copyright 2021 Open Networking Foundation (ONF)
+
+Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+
+"""
+
+import pexpect
+import os
+from drivers.common.clidriver import CLI
+
+
+class P4RuntimeCliDriver(CLI):
+    """
+    Implements a P4Runtime Client CLI-based driver to control devices that uses
+    the P4Runtime Protocol, using the P4Runtime shell CLI.
+
+    This driver requires that P4Runtime CLI is configured to not generated colored
+    text. To do so, add the following lines to a file in
+    ~/.ipython/profile_default/ipython_config.py:
+        c.InteractiveShell.color_info = False
+        c.InteractiveShell.colors = 'NoColor'
+        c.TerminalInteractiveShell.color_info = False
+        c.TerminalInteractiveShell.colors = 'NoColor'
+    """
+
+    def __init__(self):
+        """
+        Initialize client
+        """
+        super(P4RuntimeCliDriver, self).__init__()
+        self.name = None
+        self.handle = None
+        self.p4rtAddress = "localhost"
+        self.p4rtPort = "9559"  # Default P4RT server port
+        self.prompt = "\$"
+        self.p4rtShPrompt = ">>>"
+        self.p4rtDeviceId = "1"
+        self.p4rtElectionId = "0,100"  # (high,low)
+        self.p4rtConfig = None  # Can be used to pass a path to the P4Info and pipeline config
+
+    def connect(self, **connectargs):
+        """
+        Creates the ssh handle for the P4Runtime CLI
+        The ip_address would come from the topo file using the host tag, the
+        value can be an environment variable as well as a "localhost" to get
+        the ip address needed to ssh to the "bench"
+        """
+        try:
+            for key in connectargs:
+                vars(self)[key] = connectargs[key]
+            self.name = self.options.get("name", "")
+            self.p4rtAddress = self.options.get("p4rt_address", "localhost")
+            self.p4rtPort = self.options.get("p4rt_port", "9559")
+            self.p4rtShPrompt = self.options.get("p4rt_sh_prompt", ">>>")
+            self.p4rtDeviceId = self.options.get("p4rt_device_id", "1")
+            self.p4rtElectionId = self.options.get("p4rt_election_id", "0,100")
+            self.p4rtConfig = self.options.get("p4rt_config", None)
+            try:
+                if os.getenv(str(self.ip_address)) is not None:
+                    self.ip_address = os.getenv(str(self.ip_address))
+                else:
+                    main.log.info(self.name + ": ip set to " + self.ip_address)
+            except KeyError:
+                main.log.info(self.name + ": Invalid host name," +
+                              "defaulting to 'localhost' instead")
+                self.ip_address = 'localhost'
+            except Exception as inst:
+                main.log.error("Uncaught exception: " + str(inst))
+
+            self.handle = super(P4RuntimeCliDriver, self).connect(
+                user_name=self.user_name,
+                ip_address=self.ip_address,
+                port=None,
+                pwd=self.pwd)
+            if self.handle:
+                main.log.info("Connection successful to the host " +
+                              self.user_name +
+                              "@" +
+                              self.ip_address)
+                self.handle.sendline("")
+                self.handle.expect(self.prompt)
+                return main.TRUE
+            else:
+                main.log.error("Connection failed to " +
+                               self.user_name +
+                               "@" +
+                               self.ip_address)
+                return main.FALSE
+        except pexpect.EOF:
+            main.log.error(self.name + ": EOF exception found")
+            main.log.error(self.name + ":     " + self.handle.before)
+            main.cleanAndExit()
+        except Exception:
+            main.log.exception(self.name + ": Uncaught exception!")
+            main.cleanAndExit()
+
+    def startP4RtClient(self, pushConfig=False):
+        """
+        Start the P4Runtime shell CLI client
+
+        :param pushConfig: True if you want to push the pipeline config, False otherwise
+            requires the p4rt_config configuration parameter to be set
+        :return:
+        """
+        try:
+            main.log.debug(self.name + ": Starting P4Runtime Shell CLI")
+            grpcAddr = "%s:%s" % (self.p4rtAddress, self.p4rtPort)
+            startP4RtShLine = "python3 -m p4runtime_sh --grpc-addr " + grpcAddr + \
+                              " --device-id " + self.p4rtDeviceId + \
+                              " --election-id " + self.p4rtElectionId
+            if pushConfig:
+                if self.p4rtConfig:
+                    startP4RtShLine += " --config " + self.p4rtConfig
+                else:
+                    main.log.error(
+                        "You should provide a P4 Runtime config to push!")
+                    main.cleanAndExit()
+            response = self.__clearSendAndExpect(startP4RtShLine)
+            self.preDisconnect = self.stopP4RtClient
+        except pexpect.TIMEOUT:
+            main.log.exception(self.name + ": Command timed out")
+            return main.FALSE
+        except pexpect.EOF:
+            main.log.exception(self.name + ": connection closed.")
+            main.cleanAndExit()
+        except Exception:
+            main.log.exception(self.name + ": Uncaught exception!")
+            main.cleanAndExit()
+
+    def stopP4RtClient(self):
+        """
+        Exit the P4Runtime shell CLI
+        """
+        try:
+            main.log.debug(self.name + ": Stopping P4Runtime Shell CLI")
+            self.handle.sendline("exit")
+            self.handle.expect(self.prompt)
+            return main.TRUE
+        except pexpect.TIMEOUT:
+            main.log.exception(self.name + ": Command timed out")
+            return main.FALSE
+        except pexpect.EOF:
+            main.log.exception(self.name + ": connection closed.")
+            main.cleanAndExit()
+        except Exception:
+            main.log.exception(self.name + ": Uncaught exception!")
+            main.cleanAndExit()
+
+    def pushTableEntry(self, tableEntry=None, debug=True):
+        """
+        Push a table entry with either the given table entry or use the saved
+        table entry in the variable 'te'.
+
+        Example of a valid tableEntry string:
+        te = table_entry["FabricIngress.forwarding.routing_v4"](action="set_next_id_routing_v4"); te.action["next_id"] = "10"; te.match["ipv4_dst"] = "10.0.0.0" # nopep8
+
+        :param tableEntry: the string table entry, if None it uses the table
+            entry saved in the 'te' variable
+        :param debug: True to enable debug logging, False otherwise
+        :return: main.TRUE or main.FALSE on error
+        """
+        try:
+            main.log.debug(self.name + ": Pushing Table Entry")
+            if debug:
+                self.handle.sendline("te")
+                self.handle.expect(self.p4rtShPrompt)
+            pushCmd = ""
+            if tableEntry:
+                pushCmd = tableEntry + ";"
+            pushCmd += "te.insert()"
+            response = self.__clearSendAndExpect(pushCmd)
+            if "Traceback" in response or "Error" in response:
+                # TODO: other possibile errors?
+                # NameError...
+                main.log.error(
+                    self.name + ": Error in pushing table entry: " + response)
+                return main.FALSE
+            return main.TRUE
+        except pexpect.TIMEOUT:
+            main.log.exception(self.name + ": Command timed out")
+            return main.FALSE
+        except pexpect.EOF:
+            main.log.exception(self.name + ": connection closed.")
+            main.cleanAndExit()
+        except Exception:
+            main.log.exception(self.name + ": Uncaught exception!")
+            main.cleanAndExit()
+
+    def deleteTableEntry(self, tableEntry=None, debug=True):
+        """
+        Deletes a table entry with either the given table entry or use the saved
+        table entry in the variable 'te'.
+
+        Example of a valid tableEntry string:
+        te = table_entry["FabricIngress.forwarding.routing_v4"](action="set_next_id_routing_v4"); te.action["next_id"] = "10"; te.match["ipv4_dst"] = "10.0.0.0" # nopep8
+
+        :param tableEntry: the string table entry, if None it uses the table
+            entry saved in the 'te' variable
+        :param debug: True to enable debug logging, False otherwise
+        :return: main.TRUE or main.FALSE on error
+        """
+        try:
+            main.log.debug(self.name + ": Deleting Table Entry")
+            if debug:
+                self.__clearSendAndExpect("te")
+            pushCmd = ""
+            if tableEntry:
+                pushCmd = tableEntry + ";"
+            pushCmd += "te.delete()"
+            response = self.__clearSendAndExpect(pushCmd)
+            main.log.debug(
+                self.name + ": Delete table entry response: {}".format(
+                    response))
+            if "Traceback" in response or "Error" in response:
+                # TODO: other possibile errors?
+                # NameError...
+                main.log.error(
+                    self.name + ": Error in deleting table entry: " + response)
+                return main.FALSE
+            return main.TRUE
+        except pexpect.TIMEOUT:
+            main.log.exception(self.name + ": Command timed out")
+            return main.FALSE
+        except pexpect.EOF:
+            main.log.exception(self.name + ": connection closed.")
+            main.cleanAndExit()
+        except Exception:
+            main.log.exception(self.name + ": Uncaught exception!")
+            main.cleanAndExit()
+
+    def buildP4RtTableEntry(self, tableName, actionName, actionParams={},
+                            matchFields={}):
+        """
+        Build a Table Entry
+        :param tableName: The name of table
+        :param actionName: The name of the action
+        :param actionParams: A dictionary containing name and values for the action parameters
+        :param matchFields: A dictionary containing name and values for the match fields
+        :return: main.TRUE or main.FALSE on error
+        """
+        # TODO: improve error checking when creating the table entry, add
+        #  params, and match fields.
+        try:
+            main.log.debug(self.name + ": Building P4RT Table Entry")
+            cmd = 'te = table_entry["%s"](action="%s"); ' % (
+                tableName, actionName)
+
+            # Action Parameters
+            for name, value in actionParams.items():
+                cmd += 'te.action["%s"]="%s";' % (name, str(value))
+
+            # Match Fields
+            for name, value in matchFields.items():
+                cmd += 'te.match["%s"]="%s";' % (name, str(value))
+
+            response = self.__clearSendAndExpect(cmd)
+            if "Unknown action" in response:
+                main.log.error("Unknown action: " + response)
+                return main.FALSE
+            if "AttributeError" in response:
+                main.log.error("Wrong action: " + response)
+                return main.FALSE
+            if "Invalid value" in response:
+                main.log.error("Invalid action value: " + response)
+                return main.FALSE
+            if "Action parameter value must be a string" in response:
+                main.log.error(
+                    "Action parameter value must be a string: " + response)
+                return main.FALSE
+            if "table" in response and "does not exist" in response:
+                main.log.error("Unknown table: " + response)
+                return main.FALSE
+            if "not a valid match field name" in response:
+                main.log.error("Invalid match field name: " + response)
+                return main.FALSE
+            if "is not a valid" in response:
+                main.log.error("Invalid match field: " + response)
+                return main.FALSE
+            if "Traceback" in response:
+                main.log.error("Error in creating the table entry: " + response)
+                return main.FALSE
+            return main.TRUE
+        except pexpect.TIMEOUT:
+            main.log.exception(self.name + ": Command timed out")
+            return main.FALSE
+        except pexpect.EOF:
+            main.log.exception(self.name + ": connection closed.")
+            main.cleanAndExit()
+        except Exception:
+            main.log.exception(self.name + ": Uncaught exception!")
+            main.cleanAndExit()
+
+    def disconnect(self):
+        """
+        Called at the end of the test to stop the p4rt CLI component and
+        disconnect the handle.
+        """
+        response = main.TRUE
+        try:
+            if self.handle:
+                self.handle.sendline("")
+                i = self.handle.expect([self.p4rtShPrompt, pexpect.TIMEOUT],
+                                       timeout=2)
+                if i != 1:
+                    # If the p4rtShell is still connected make sure to
+                    # disconnect it before
+                    self.stopP4RtClient()
+                i = self.handle.expect([self.prompt, pexpect.TIMEOUT],
+                                       timeout=2)
+                if i == 1:
+                    main.log.warn(
+                        self.name + ": timeout when waiting for response")
+                    main.log.warn(
+                        self.name + ": response: " + str(self.handle.before))
+                self.handle.sendline("exit")
+                i = self.handle.expect(["closed", pexpect.TIMEOUT], timeout=2)
+                if i == 1:
+                    main.log.warn(
+                        self.name + ": timeout when waiting for response")
+                    main.log.warn(
+                        self.name + ": response: " + str(self.handle.before))
+            return main.TRUE
+        except TypeError:
+            main.log.exception(self.name + ": Object not as expected")
+            response = main.FALSE
+        except pexpect.EOF:
+            main.log.error(self.name + ": EOF exception found")
+            main.log.error(self.name + ":     " + self.handle.before)
+        except ValueError:
+            main.log.exception("Exception in disconnect of " + self.name)
+            response = main.TRUE
+        except Exception:
+            main.log.exception(self.name + ": Connection failed to the host")
+            response = main.FALSE
+        return response
+
+    def __clearSendAndExpect(self, cmd):
+        self.clearBuffer()
+        self.handle.sendline(cmd)
+        self.handle.expect(self.p4rtShPrompt)
+        return self.handle.before
+
+    def clearBuffer(self, debug=False):
+        """
+        Keep reading from buffer until it's empty
+        """
+        i = 0
+        response = ''
+        while True:
+            try:
+                i += 1
+                # clear buffer
+                if debug:
+                    main.log.warn("%s expect loop iteration" % i)
+                self.handle.expect(self.p4rtShPrompt, timeout=5)
+                response += self.cleanOutput(self.handle.before, debug)
+            except pexpect.TIMEOUT:
+                return response
diff --git a/TestON/tests/USECASE/SegmentRouting/SRStaging/dependencies/SRStagingTest.py b/TestON/tests/USECASE/SegmentRouting/SRStaging/dependencies/SRStagingTest.py
index 7b87198..a3ab6ba 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRStaging/dependencies/SRStagingTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRStaging/dependencies/SRStagingTest.py
@@ -1329,6 +1329,8 @@
                           print in alphabetical order
         """
         try:
+            if not main.downtimeResults:
+                return main.TRUE
             dbFileName = "%s/%s" % ( main.logdir, filename )
             with open( dbFileName, "w+" ) as dbfile:
                 header = []
diff --git a/TestON/tests/USECASE/SegmentRouting/UP4/README.md b/TestON/tests/USECASE/SegmentRouting/UP4/README.md
new file mode 100644
index 0000000..06e23b2
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/UP4/README.md
@@ -0,0 +1,31 @@
+# UP4 System Tests
+
+Tests in this folder use UP4 ONOS APIs (P4Runtime) to simulate the attachment
+and detachment of UEs. Tests also verify upstream and downstream traffic by
+checking that GTP encapsulation and decapsulation is correctly performed by the
+UPF (implemented by the leaves switches). The testing topology a paired-leaves
+topology.
+
+# Requirements to run UP4 tests
+
+The UP4 test uses the P4RuntimeCliDriver. This driver requires
+the `p4runtime-shell` to be installed on the ONOS Bench machine To install it
+run `python3 -m pip install p4runtime-sh==0.0.2` (requires Python>3.7).
+
+The driver also requires an ipython config file. The file should contain the
+following lines:
+
+```
+c.InteractiveShell.color_info = False
+c.InteractiveShell.colors = 'NoColor'
+c.TerminalInteractiveShell.color_info = False
+c.TerminalInteractiveShell.colors = 'NoColor'
+```
+
+and can be placed in `<IPYTHONDIR>/profile_default/ipython_config.py`. A
+different location for the ipython config folder can be set via the `IPYTHONDIR`
+, in that case the `ipython_config.py` file can be placed
+in `<IPYTHONDIR>/profile_default`
+(more info at https://ipython.readthedocs.io/en/stable/development/config.html).
+
+
diff --git a/TestON/tests/USECASE/SegmentRouting/UP4/UP4.params b/TestON/tests/USECASE/SegmentRouting/UP4/UP4.params
new file mode 100644
index 0000000..113d33d
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/UP4/UP4.params
@@ -0,0 +1,87 @@
+<PARAMS>
+    <testcases>1</testcases>
+
+    <GRAPH>
+        <nodeCluster>pairedleaves</nodeCluster>
+        <builds>20</builds>
+        <jobName>UP4</jobName>
+        <branch>master</branch>
+    </GRAPH>
+
+    <persistent_setup>True</persistent_setup>
+
+    <kubernetes>
+        <appName>onos-classic</appName>
+        <namespace>tost</namespace>
+    </kubernetes>
+
+    <UP4>
+        <pdn_host>Compute1</pdn_host>
+        <enodeb_host>Compute3</enodeb_host>
+        <s1u_address>10.32.11.126</s1u_address>
+        <enb_address>10.32.11.194</enb_address>
+        <router_mac>00:00:0A:4C:1C:46</router_mac>
+        <ues>
+            <ue1>
+                <pfcp_session_id>100</pfcp_session_id>
+                <ue_address>10.240.0.1</ue_address>
+                <teid>100</teid>
+                <up_id>10</up_id>
+                <down_id>11</down_id>
+                <qfi></qfi>
+                <five_g>False</five_g>
+            </ue1>
+            <ue2>
+                <pfcp_session_id>100</pfcp_session_id>
+                <ue_address>10.240.0.2</ue_address>
+                <teid>200</teid>
+                <up_id>20</up_id>
+                <down_id>21</down_id>
+                <qfi></qfi>
+                <five_g>False</five_g>
+            </ue2>
+        </ues>
+    </UP4>
+
+    <TOPO>
+        <switchNum>2</switchNum>
+        <linkNum>2</linkNum>
+    </TOPO>
+
+    <ONOS_Logging>
+        <org.onosproject.p4runtime.ctl.client>DEBUG</org.onosproject.p4runtime.ctl.client>
+        <org.onosproject.p4runtime.ctl.client.writerequestimpl>TRACE</org.onosproject.p4runtime.ctl.client.writerequestimpl>
+        <org.onosproject.segmentrouting>DEBUG</org.onosproject.segmentrouting>
+        <org.onosproject.gnmi.ctl>TRACE</org.onosproject.gnmi.ctl>
+        <org.omecproject.up4>TRACE</org.omecproject.up4>
+    </ONOS_Logging>
+    <ONOS_Logging_Reset>
+        <org.onosproject.p4runtime.ctl.client>INFO</org.onosproject.p4runtime.ctl.client>
+        <org.onosproject.p4runtime.ctl.client.writerequestimpl>INFO</org.onosproject.p4runtime.ctl.client.writerequestimpl>
+        <org.onosproject.segmentrouting>DEBUG</org.onosproject.segmentrouting>
+        <org.onosproject.gnmi.ctl>INFO</org.onosproject.gnmi.ctl>
+        <org.omecproject.up4>INFO</org.omecproject.up4>
+    </ONOS_Logging_Reset>
+
+    <ENV>
+        <cellName>productionCell</cellName>
+        <cellApps>drivers,fpm,lldpprovider,hostprovider,netcfghostprovider,drivers.bmv2,org.opencord.fabric-tofino,pipelines.fabric,org.stratumproject.fabric-tna,drivers.barefoot,segmentrouting,t3,up4</cellApps>
+    </ENV>
+
+    <DEPENDENCY>
+        <useCommonConf>False</useCommonConf>
+        <useCommonTopo>True</useCommonTopo>
+        <useBmv2>True</useBmv2>
+        <bmv2SwitchType>stratum</bmv2SwitchType>
+        <switchPrefix></switchPrefix>
+        <stratumRoot>~/stratum</stratumRoot>
+        <topology>trellis_fabric.py</topology>
+        <lib></lib>
+    </DEPENDENCY>
+
+    <SCALE>
+        <size>3</size>
+        <max>3</max>
+    </SCALE>
+
+</PARAMS>
diff --git a/TestON/tests/USECASE/SegmentRouting/UP4/UP4.py b/TestON/tests/USECASE/SegmentRouting/UP4/UP4.py
new file mode 100644
index 0000000..a7f9804
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/UP4/UP4.py
@@ -0,0 +1,203 @@
+class UP4:
+
+    def __init__(self):
+        self.default = ''
+
+    # TODO: add test case that checks entries are being inserted and deleted from ONOS correclty
+    def CASE1(self, main):
+        """
+        Attach UE
+        Generate traffic from UE to PDN
+        Verify traffic received from PDN
+        Generate traffic from PDN to UE
+        Verify traffic received from UE
+        Detach UE
+        """
+        UE_PORT = 400
+        PDN_PORT = 800
+        GPDU_PORT = 2152
+        try:
+            from tests.USECASE.SegmentRouting.dependencies.up4libcli import \
+                Up4LibCli
+            from tests.USECASE.SegmentRouting.dependencies.Testcaselib import \
+                Testcaselib as run
+            from distutils.util import strtobool
+        except ImportError as e:
+            main.log.error("Import not found. Exiting the test")
+            main.log.error(e)
+            main.cleanAndExit()
+
+        # TODO: Move to a setup script
+        run.initTest(main)
+        main.log.info(main.Cluster.numCtrls)
+        main.Cluster.setRunningNode(3)
+        run.installOnos(main, skipPackage=True, cliSleep=5)
+
+        # Get the P4RT client connected to UP4 in the first available ONOS instance
+        up4Client = main.Cluster.active(0).p4rtUp4
+
+        s1u_address = main.params["UP4"]["s1u_address"]
+        enb_address = main.params["UP4"]["enb_address"]
+        router_mac = main.params["UP4"]["router_mac"]
+
+        pdn_host = getattr(main, main.params["UP4"]["pdn_host"])
+        pdn_interface = pdn_host.interfaces[0]
+
+        enodeb_host = getattr(main, main.params["UP4"]["enodeb_host"])
+        enodeb_interface = enodeb_host.interfaces[0]
+
+        emulated_ues = main.params["UP4"]['ues']
+        n_ues = len(emulated_ues)
+
+        main.step("Start scapy and p4rt client")
+        pdn_host.startScapy(ifaceName=pdn_interface["name"])
+        enodeb_host.startScapy(ifaceName=enodeb_interface["name"],
+                               enableGtp=True)
+        up4Client.startP4RtClient()
+
+        # TODO: move to library in dependencies
+        main.step("Attach UEs")
+        for ue in emulated_ues.values():
+            # Sanitize values coming from the params file
+            if "five_g" in ue:
+                ue["five_g"] = bool(strtobool(ue["five_g"]))
+            if "qfi" in ue and ue["qfi"] == "":
+                ue["qfi"] = None
+            Up4LibCli.attachUe(up4Client, s1u_address=s1u_address,
+                               enb_address=enb_address,
+                               **ue)
+
+        # ----------------- Test Upstream traffic (enb->pdn)
+        main.step("Test upstream traffic")
+        # Scapy filter needs to start before sending traffic
+        pkt_filter_upstream = ""
+        for ue in emulated_ues.values():
+            if "ue_address" in ue:
+                if len(pkt_filter_upstream) != 0:
+                    pkt_filter_upstream += " or "
+                pkt_filter_upstream += "src host " + ue["ue_address"]
+        pkt_filter_upstream = "ip and udp dst port %s and (%s) and dst host %s" % \
+                              (PDN_PORT, pkt_filter_upstream,
+                               pdn_interface["ips"][0])
+        main.log.info("Start listening on %s intf %s" %
+                      (main.params["UP4"]["pdn_host"], pdn_interface["name"]))
+        main.log.debug("BPF Filter Upstream: \n %s" % pkt_filter_upstream)
+        pdn_host.startFilter(ifaceName=pdn_interface["name"],
+                             sniffCount=n_ues,
+                             pktFilter=pkt_filter_upstream)
+
+        main.log.info("Sending %d packets from eNodeB host" % len(emulated_ues))
+        for ue in emulated_ues.values():
+            enodeb_host.buildEther()
+            enodeb_host.buildIP(src=enb_address, dst=s1u_address)
+            enodeb_host.buildUDP(ipVersion=4, dport=GPDU_PORT)
+            # FIXME: With newer scapy TEID becomes teid (required for Scapy 2.4.5)
+            enodeb_host.buildGTP(gtp_type=0xFF, TEID=int(ue["teid"]))
+            enodeb_host.buildIP(overGtp=True, src=ue["ue_address"],
+                                dst=pdn_interface["ips"][0])
+            enodeb_host.buildUDP(ipVersion=4, overGtp=True, sport=UE_PORT,
+                                 dport=PDN_PORT)
+
+            enodeb_host.sendPacket(iface=enodeb_interface["name"])
+
+        finished = pdn_host.checkFilter()
+        packets = ""
+        if finished:
+            packets = pdn_host.readPackets(detailed=True)
+            for p in packets.splitlines():
+                main.log.debug(p)
+            # We care only of the last line from readPackets
+            packets = packets.splitlines()[-1]
+        else:
+            kill = pdn_host.killFilter()
+            main.log.debug(kill)
+
+        fail = False
+        if len(emulated_ues) != packets.count('Ether'):
+            fail = True
+            msg = "Failed to capture packets in PDN. "
+        else:
+            msg = "Correctly captured packet in PDN. "
+        # We expect exactly 1 packet per UE
+        pktsFiltered = [packets.count("src=" + ue["ue_address"])
+                        for ue in emulated_ues.values()]
+        if pktsFiltered.count(1) != len(pktsFiltered):
+            fail = True
+            msg += "More than one packet per UE in downstream. "
+        else:
+            msg += "One packet per UE in upstream. "
+
+        utilities.assert_equal(
+            expect=False, actual=fail, onpass=msg, onfail=msg)
+
+        # --------------- Test Downstream traffic (pdn->enb)
+        main.step("Test downstream traffic")
+        pkt_filter_downstream = "ip and udp src port %d and udp dst port %d and dst host %s and src host %s" % (
+            GPDU_PORT, GPDU_PORT, enb_address, s1u_address)
+        main.log.info("Start listening on %s intf %s" % (
+            main.params["UP4"]["enodeb_host"], enodeb_interface["name"]))
+        main.log.debug("BPF Filter Downstream: \n %s" % pkt_filter_downstream)
+        enodeb_host.startFilter(ifaceName=enodeb_interface["name"],
+                                sniffCount=len(emulated_ues),
+                                pktFilter=pkt_filter_downstream)
+
+        main.log.info("Sending %d packets from PDN host" % len(emulated_ues))
+        for ue in emulated_ues.values():
+            # From PDN we have to set dest MAC, otherwise scapy will do ARP
+            # request for the UE IP address.
+            pdn_host.buildEther(dst=router_mac)
+            pdn_host.buildIP(src=pdn_interface["ips"][0],
+                             dst=ue["ue_address"])
+            pdn_host.buildUDP(ipVersion=4, sport=PDN_PORT, dport=UE_PORT)
+            pdn_host.sendPacket(iface=pdn_interface["name"])
+
+        finished = enodeb_host.checkFilter()
+        packets = ""
+        if finished:
+            packets = enodeb_host.readPackets(detailed=True)
+            for p in packets.splitlines():
+                main.log.debug(p)
+            # We care only of the last line from readPackets
+            packets = packets.splitlines()[-1]
+        else:
+            kill = enodeb_host.killFilter()
+            main.log.debug(kill)
+
+        # The BPF filter might capture non-GTP packets because we can't filter
+        # GTP header in BPF. For this reason, check that the captured packets
+        # are from the expected tunnels.
+        # TODO: check inner UDP and IP fields as well
+        # FIXME: with newer scapy TEID becomes teid (required for Scapy 2.4.5)
+        pktsFiltered = [packets.count("TEID=" + hex(int(ue["teid"])) + "L ")
+                        for ue in emulated_ues.values()]
+
+        fail = False
+        if len(emulated_ues) != sum(pktsFiltered):
+            fail = True
+            msg = "Failed to capture packets in eNodeB. "
+        else:
+            msg = "Correctly captured packets in eNodeB. "
+        # We expect exactly 1 packet per UE
+        if pktsFiltered.count(1) != len(pktsFiltered):
+            fail = True
+            msg += "More than one packet per GTP TEID in downstream. "
+        else:
+            msg += "One packet per GTP TEID in downstream. "
+
+        utilities.assert_equal(
+            expect=False, actual=fail, onpass=msg, onfail=msg)
+
+        # Detach UEs
+        main.step("Detach UEs")
+        for ue in emulated_ues.values():
+            # No need to sanitize values, already sanitized during attachment
+            Up4LibCli.detachUe(up4Client, s1u_address=s1u_address,
+                               enb_address=enb_address,
+                               **ue)
+
+        # Teardown
+        main.step("Stop scapy and p4rt client")
+        enodeb_host.stopScapy()
+        pdn_host.stopScapy()
+        up4Client.stopP4RtClient()
+        run.cleanup(main)
diff --git a/TestON/tests/USECASE/SegmentRouting/UP4/UP4.topo b/TestON/tests/USECASE/SegmentRouting/UP4/UP4.topo
new file mode 100644
index 0000000..5dac34a
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/UP4/UP4.topo
@@ -0,0 +1,100 @@
+<TOPOLOGY>
+    <COMPONENT>
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
+            <user>jenkins</user>
+            <password></password>
+            <type>OnosClusterDriver</type>
+            <connect_order>50</connect_order>
+            <jump_host></jump_host>
+            <home>~/onos</home>   # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
+            <COMPONENTS>
+                <kubeConfig>~/.kube/dev-pairedleaves-tucson</kubeConfig>  # If set, will attempt to use this file for setting up port-forwarding
+                <useDocker>True</useDocker>  # Whether to use docker for ONOS nodes
+                <docker_prompt>\$</docker_prompt>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+                <karaf_username>karaf</karaf_username>
+                <karaf_password>karaf</karaf_password>
+                <web_user>karaf</web_user>
+                <web_pass>karaf</web_pass>
+                <karafPrompt_username>karaf</karafPrompt_username>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home>~/onos/</onos_home>  # defines where onos home is on the target cell machine. Defaults to entry in "home" if empty.
+                <nodes> 3 </nodes>  # number of nodes in the cluster
+                <up4_port>51001</up4_port> # Port where the UP4 P4Runtime server is listening
+            </COMPONENTS>
+        </ONOScell>
+
+        <Compute1>
+            <host>10.76.28.74</host>
+            <user>jenkins</user>
+            <password></password>
+            <type>HostDriver</type>
+            <connect_order>6</connect_order>
+            <jump_host></jump_host>
+            <COMPONENTS>
+                <mac></mac>
+                <inband>false</inband>
+                <dhcp>True</dhcp>
+                <ip>10.32.11.2</ip>
+                <shortName>h1</shortName>
+                <port1></port1>
+                <link1></link1>
+                <ifaceName>pairbond</ifaceName>
+                <scapy_path>/usr/bin/scapy</scapy_path>
+                <routes>
+                    <route1>
+                        <network></network>
+                        <netmask></netmask>
+                        <gw></gw>
+                        <interface></interface>
+                    </route1>
+                </routes>
+                <sudo_required>true</sudo_required>
+            </COMPONENTS>
+        </Compute1>
+
+        <Compute3>
+            <host>10.76.28.68</host>
+            <user>jenkins</user>
+            <password></password>
+            <type>HostDriver</type>
+            <connect_order>6</connect_order>
+            <jump_host></jump_host>
+            <COMPONENTS>
+                <mac></mac>
+                <inband>false</inband>
+                <dhcp>True</dhcp>
+                <ip>10.32.11.194</ip>
+                <shortName>h3</shortName>
+                <port1></port1>
+                <link1></link1>
+                <ifaceName>eno2</ifaceName>
+                <scapy_path>/usr/bin/scapy</scapy_path>
+                <routes>
+                    <route1>
+                        <network></network>
+                        <netmask></netmask>
+                        <gw></gw>
+                        <interface></interface>
+                    </route1>
+                </routes>
+                <sudo_required>true</sudo_required>
+            </COMPONENTS>
+        </Compute3>
+
+    <!--  This component is not needed, but required to use the Testcaselib  -->
+        <NetworkBench>
+            <host>10.76.28.66</host>
+            <user>jenkins</user>
+            <password></password>
+            <type>NetworkDriver</type>
+            <connect_order>1</connect_order>
+            <COMPONENTS>
+            </COMPONENTS>
+        </NetworkBench>
+
+    </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/USECASE/SegmentRouting/UP4/__init__.py b/TestON/tests/USECASE/SegmentRouting/UP4/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/UP4/__init__.py
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/up4libcli.py b/TestON/tests/USECASE/SegmentRouting/dependencies/up4libcli.py
new file mode 100644
index 0000000..73dd39b
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/up4libcli.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python
+
+"""
+Copyright 2021 Open Networking Foundation (ONF)
+
+Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+
+"""
+
+FALSE = '0'
+TRUE = '1'
+DIR_UPLINK = '1'
+DIR_DOWNLINK = '2'
+IFACE_ACCESS = '1'
+IFACE_CORE = '2'
+TUNNEL_SPORT = '2152'
+TUNNEL_TYPE_GPDU = '3'
+
+
+class Up4LibCli():
+    """
+    Helper library to attach and detach UEs via UP4 P4Runtime APIs.
+    """
+
+    @staticmethod
+    def attachUe(p4rtCli, s1u_address, enb_address, pfcp_session_id, ue_address,
+                 teid=None, up_id=None, down_id=None,
+                 teid_up=None, teid_down=None,
+                 pdr_id_up=None, far_id_up=None, ctr_id_up=None,
+                 pdr_id_down=None, far_id_down=None, ctr_id_down=None,
+                 qfi=None, five_g=False):
+        Up4LibCli.__programUp4Rules(p4rtCli, s1u_address, enb_address,
+                                 pfcp_session_id,
+                                 ue_address,
+                                 teid, up_id, down_id,
+                                 teid_up, teid_down,
+                                 pdr_id_up, far_id_up, ctr_id_up,
+                                 pdr_id_down, far_id_down, ctr_id_down,
+                                 qfi, five_g, action="program")
+
+    @staticmethod
+    def detachUe(p4rtCli, s1u_address, enb_address, pfcp_session_id, ue_address,
+                 teid=None, up_id=None, down_id=None,
+                 teid_up=None, teid_down=None,
+                 pdr_id_up=None, far_id_up=None, ctr_id_up=None,
+                 pdr_id_down=None, far_id_down=None, ctr_id_down=None,
+                 qfi=None, five_g=False):
+        Up4LibCli.__programUp4Rules(p4rtCli, s1u_address, enb_address,
+                                 pfcp_session_id,
+                                 ue_address,
+                                 teid, up_id, down_id,
+                                 teid_up, teid_down,
+                                 pdr_id_up, far_id_up, ctr_id_up,
+                                 pdr_id_down, far_id_down, ctr_id_down,
+                                 qfi, five_g, action="clear")
+
+    @staticmethod
+    def __programUp4Rules(p4rtCli, s1u_address, enb_address, pfcp_session_id,
+                          ue_address,
+                          teid=None, up_id=None, down_id=None,
+                          teid_up=None, teid_down=None,
+                          pdr_id_up=None, far_id_up=None, ctr_id_up=None,
+                          pdr_id_down=None, far_id_down=None, ctr_id_down=None,
+                          qfi=None, five_g=False, action="program"):
+        if up_id is not None:
+            pdr_id_up = up_id
+            far_id_up = up_id
+            ctr_id_up = up_id
+        if down_id is not None:
+            pdr_id_down = down_id
+            far_id_down = down_id
+            ctr_id_down = down_id
+        if teid is not None:
+            teid_up = teid
+            teid_down = teid
+
+        entries = []
+
+        # ========================#
+        # PDR Entries
+        # ========================#
+
+        # Uplink
+        tableName = 'PreQosPipe.pdrs'
+        actionName = ''
+        matchFields = {}
+        actionParams = {}
+        if qfi is None:
+            actionName = 'PreQosPipe.set_pdr_attributes'
+        else:
+            actionName = 'PreQosPipe.set_pdr_attributes_qos'
+            if five_g:
+                # TODO: currently QFI_MATCH is unsupported in TNA
+                matchFields['has_qfi'] = TRUE
+                matchFields["qfi"] = str(qfi)
+            actionParams['needs_qfi_push'] = FALSE
+            actionParams['qfi'] = str(qfi)
+        # Match fields
+        matchFields['src_iface'] = IFACE_ACCESS
+        matchFields['ue_addr'] = str(ue_address)
+        matchFields['teid'] = str(teid_up)
+        matchFields['tunnel_ipv4_dst'] = str(s1u_address)
+        # Action params
+        actionParams['id'] = str(pdr_id_up)
+        actionParams['fseid'] = str(pfcp_session_id)
+        actionParams['ctr_id'] = str(ctr_id_up)
+        actionParams['far_id'] = str(far_id_up)
+        actionParams['needs_gtpu_decap'] = TRUE
+        if not Up4LibCli.__add_entry(p4rtCli, tableName, actionName, matchFields,
+                                  actionParams, entries, action):
+            return False
+
+        # Downlink
+        tableName = 'PreQosPipe.pdrs'
+        actionName = ''
+        matchFields = {}
+        actionParams = {}
+        if qfi is None:
+            actionName = 'PreQosPipe.set_pdr_attributes'
+        else:
+            actionName = 'PreQosPipe.set_pdr_attributes_qos'
+            # TODO: currently QFI_PUSH is unsupported in TNA
+            actionParams['needs_qfi_push'] = TRUE if five_g else FALSE
+            actionParams['qfi'] = str(qfi)
+        # Match fields
+        matchFields['src_iface'] = IFACE_CORE
+        matchFields['ue_addr'] = str(ue_address)
+        # Action params
+        actionParams['id'] = str(pdr_id_down)
+        actionParams['fseid'] = str(pfcp_session_id)
+        actionParams['ctr_id'] = str(ctr_id_down)
+        actionParams['far_id'] = str(far_id_down)
+        actionParams['needs_gtpu_decap'] = FALSE
+        if not Up4LibCli.__add_entry(p4rtCli, tableName, actionName, matchFields,
+                                  actionParams, entries, action):
+            return False
+
+        # ========================#
+        # FAR Entries
+        # ========================#
+
+        # Uplink
+        tableName = 'PreQosPipe.load_far_attributes'
+        actionName = 'PreQosPipe.load_normal_far_attributes'
+        matchFields = {}
+        actionParams = {}
+
+        # Match fields
+        matchFields['far_id'] = str(far_id_up)
+        matchFields['session_id'] = str(pfcp_session_id)
+        # Action params
+        actionParams['needs_dropping'] = FALSE
+        actionParams['notify_cp'] = FALSE
+        if not Up4LibCli.__add_entry(p4rtCli, tableName, actionName, matchFields,
+                                  actionParams, entries, action):
+            return False
+
+        # Downlink
+        tableName = 'PreQosPipe.load_far_attributes'
+        actionName = 'PreQosPipe.load_tunnel_far_attributes'
+        matchFields = {}
+        actionParams = {}
+
+        # Match fields
+        matchFields['far_id'] = str(far_id_down)
+        matchFields['session_id'] = str(pfcp_session_id)
+        # Action params
+        actionParams['needs_dropping'] = FALSE
+        actionParams['notify_cp'] = FALSE
+        actionParams['needs_buffering'] = FALSE
+        actionParams['tunnel_type'] = TUNNEL_TYPE_GPDU
+        actionParams['src_addr'] = str(s1u_address)
+        actionParams['dst_addr'] = str(enb_address)
+        actionParams['teid'] = str(teid_down)
+        actionParams['sport'] = TUNNEL_SPORT
+        if not Up4LibCli.__add_entry(p4rtCli, tableName, actionName, matchFields,
+                                  actionParams, entries, action):
+            return False
+
+        if action == "program":
+            main.log.info("All entries added successfully.")
+        elif action == "clear":
+            Up4LibCli.__clear_entries(p4rtCli, entries)
+
+    @staticmethod
+    def __add_entry(p4rtCli, tableName, actionName, matchFields, actionParams,
+                    entries, action):
+        if action == "program":
+            p4rtCli.buildP4RtTableEntry(tableName=tableName,
+                                        actionName=actionName,
+                                        actionParams=actionParams,
+                                        matchFields=matchFields)
+            if p4rtCli.pushTableEntry(debug=True) == main.TRUE:
+                main.log.info("*** Entry added.")
+            else:
+                main.log.error("Error during table insertion")
+                Up4LibCli.__clear_entries(p4rtCli, entries)
+                return False
+        entries.append({"tableName": tableName, "actionName": actionName,
+                        "matchFields": matchFields,
+                        "actionParams": actionParams})
+        return True
+
+    @staticmethod
+    def __clear_entries(p4rtCli, entries):
+        for i, entry in enumerate(entries):
+            p4rtCli.buildP4RtTableEntry(**entry)
+            if p4rtCli.deleteTableEntry(debug=True) == main.TRUE:
+                main.log.info("*** Entry %d of %d deleted." % (i + 1, len(entries)))
+            else:
+                main.log.error("Error during table delete")