Merge "changes in vlan-native after tests"
diff --git a/TestON/drivers/common/api/deepinsightapidriver.py b/TestON/drivers/common/api/deepinsightapidriver.py
new file mode 100644
index 0000000..af02256
--- /dev/null
+++ b/TestON/drivers/common/api/deepinsightapidriver.py
@@ -0,0 +1,93 @@
+from deepinsight.client import DeepInsightClient
+from drivers.common.apidriver import API
+
+class DeepInsightApiDriver( API ):
+ def __init__( self ):
+ self.name = None
+ self.serverUrl = None
+ self.accessToken = None
+ self.refreshToken = None
+ self.requestAuthHeader = None
+ self.verifySsl = False
+ self.client = None
+ super( DeepInsightApiDriver, self ).__init__()
+
+ def connect(
+ self,
+ **connectargs
+ ):
+ for key in connectargs:
+ vars(self)[key] = connectargs[key]
+ self.name = self.options["name"]
+ self.client = DeepInsightClient(
+ server_url = self.options["server_url"],
+ username = self.options["username"],
+ password = self.options["password"],
+ verify_ssl = self.options["verify_ssl"] == "True",
+ )
+ self.handle = super( DeepInsightApiDriver, self ).connect()
+ return self.handle
+
+ def disconnect( self, **connectargs ):
+ self.client.logout()
+
+ def getFlows(
+ self,
+ startTimeMs = None,
+ endTimeMs = None,
+ maxResults = 100,
+ srcIp = None,
+ dstIp = None,
+ srcPort = None,
+ dstPort = None,
+ ipProto = None,
+ ):
+ return self.client.get_flows(
+ startTimeMs,
+ endTimeMs,
+ maxResults,
+ srcIp,
+ dstIp,
+ srcPort,
+ dstPort,
+ ipProto,
+ )
+
+ def getSwitchPacketDrop(
+ self,
+ switchId,
+ egressPort = 0,
+ queueId = 0,
+ startTime = None,
+ endTime = None,
+ numBuckets = 100,
+ ):
+ return self.client.get_switch_packet_drop(
+ switchId,
+ egressPort,
+ queueId,
+ startTime,
+ endTime,
+ numBuckets,
+ )
+
+ def getSwitchAnomalies(
+ self, switchId, startTime = None, endTime = None
+ ):
+ return self.client.get_switch_anomalies(
+ switchId, startTime, endTime
+ )
+
+ def getSwitchLatencies(
+ self,
+ switchId,
+ startTime = None,
+ endTime = None,
+ granularity = 1000,
+ ):
+ return self.client.get_switch_latencies(
+ switchId,
+ startTime,
+ endTime,
+ granularity,
+ )
diff --git a/TestON/drivers/common/cli/hostdriver.py b/TestON/drivers/common/cli/hostdriver.py
index a4ab31c..5bfe274 100644
--- a/TestON/drivers/common/cli/hostdriver.py
+++ b/TestON/drivers/common/cli/hostdriver.py
@@ -248,7 +248,7 @@
main.log.error( self.name + ": " + self.handle.before )
return main.FALSE
- def addRouteToHost( self, route, gw, interface=None, sudoRequired=True, purgeOnDisconnect=True ):
+ def addRouteToHost( self, route, gw, interface=None, sudoRequired=True, purgeOnDisconnect=True, cmdPath='/sbin/ip' ):
"""
Adds a static route to the host
Arguments:
@@ -260,7 +260,11 @@
* purgeOnDisconnect - Boolean, remove this route before disconnecting from component
"""
try:
- cmd = "ip route add %s via %s" % ( route, gw )
+ if cmdPath:
+ cmd = cmdPath
+ else:
+ cmd = "ip"
+ cmd += " route add %s via %s" % ( route, gw )
if sudoRequired:
cmd = "sudo %s" % cmd
if interface:
@@ -300,7 +304,7 @@
main.log.error( self.name + ": " + self.handle.before )
return main.FALSE
- def deleteRoute( self, route, gw, interface=None, sudoRequired=True ):
+ def deleteRoute( self, route, gw, interface=None, sudoRequired=True, cmdPath='/sbin/ip' ):
"""
Deletess a static route from the host
Arguments:
@@ -311,7 +315,11 @@
* sudoRequired - Boolean, whether sudo is needed for this command, defaults to True
"""
try:
- cmd = "ip route del %s via %s" % ( route, gw )
+ if cmdPath:
+ cmd = cmdPath
+ else:
+ cmd = "ip"
+ cmd += " route del %s via %s" % ( route, gw )
if sudoRequired:
cmd = "sudo %s" % cmd
if interface:
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/QOS.params b/TestON/tests/USECASE/SegmentRouting/QOS/QOS.params
new file mode 100644
index 0000000..056b731
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/QOS.params
@@ -0,0 +1,120 @@
+<PARAMS>
+ <testcases>1</testcases>
+
+ <GRAPH>
+ <nodeCluster>pairedleaves</nodeCluster>
+ <builds>20</builds>
+ <jobName>QOS</jobName>
+ <branch>master</branch>
+ </GRAPH>
+
+ <persistent_setup>True</persistent_setup>
+
+ <kubernetes>
+ <appName>onos-classic</appName>
+ <namespace>tost</namespace>
+ </kubernetes>
+
+ <UP4>
+ <s1u_address>10.32.11.126</s1u_address>
+ <enb_address>10.32.11.124</enb_address>
+ <ues>
+ <ue1>
+ <pfcp_session_id>100</pfcp_session_id>
+ <ue_address>10.240.0.1</ue_address>
+ <teid>100</teid>
+ <up_id>10</up_id>
+ <down_id>11</down_id>
+ <!-- <qfi>0</qfi>Best Effort -->
+ <five_g>False</five_g>
+ </ue1>
+ <ue2>
+ <pfcp_session_id>100</pfcp_session_id>
+ <ue_address>10.240.0.2</ue_address>
+ <teid>200</teid>
+ <up_id>20</up_id>
+ <down_id>21</down_id>
+ <qfi>2</qfi> <!-- Real Time -->
+ <five_g>False</five_g>
+ </ue2>
+ </ues>
+ </UP4>
+
+ <TREX>
+ <port_stats>0,2</port_stats> <!-- TRex port 0 = PDN, TRex port 2 = eNodeB -->
+ <flows>
+ <BE_FROM_PDN>
+ <name>Best Effort</name>
+ <l1_bps>1500000000</l1_bps>
+ <trex_port>0</trex_port>
+ <packet>
+ <pktlen>1400</pktlen>
+ <ip_src>10.32.11.125</ip_src>
+ <ip_dst>10.240.0.1</ip_dst>
+ <eth_src>3C:EC:EF:3E:0B:A1</eth_src>
+ <eth_dst>00:00:0A:4C:1C:46</eth_dst>
+ </packet>
+ </BE_FROM_PDN>
+ <RT_FROM_PDN>
+ <name>Real Time</name>
+ <l1_bps>40000000</l1_bps>
+ <trex_port>0</trex_port>
+ <packet>
+ <pktlen>1400</pktlen>
+ <ip_src>10.32.11.125</ip_src>
+ <ip_dst>10.240.0.2</ip_dst>
+ <eth_src>3C:EC:EF:3E:0B:A1</eth_src>
+ <eth_dst>00:00:0A:4C:1C:46</eth_dst>
+ </packet>
+ <latency_stats>true</latency_stats>
+ <flow_id>10</flow_id> <!-- Mandatory when latency_stats=true -->
+ <delay>50000</delay> <!-- wait 50 ms till start to let queues fill up -->
+ <expected_min_received>1</expected_min_received>
+ <expected_max_dropped>0</expected_max_dropped>
+ <expected_99_9_percentile_latency>100</expected_99_9_percentile_latency>
+ </RT_FROM_PDN>
+ </flows>
+ </TREX>
+
+ <TOPO>
+ <switchNum>2</switchNum>
+ <linkNum>2</linkNum>
+ </TOPO>
+
+ <ONOS_Logging>
+ <org.onosproject.p4runtime.ctl.client>DEBUG</org.onosproject.p4runtime.ctl.client>
+ <org.onosproject.p4runtime.ctl.client.writerequestimpl>TRACE</org.onosproject.p4runtime.ctl.client.writerequestimpl>
+ <org.onosproject.segmentrouting>DEBUG</org.onosproject.segmentrouting>
+ <org.onosproject.gnmi.ctl>TRACE</org.onosproject.gnmi.ctl>
+ <org.omecproject.up4>TRACE</org.omecproject.up4>
+ </ONOS_Logging>
+ <ONOS_Logging_Reset>
+ <org.onosproject.p4runtime.ctl.client>INFO</org.onosproject.p4runtime.ctl.client>
+ <org.onosproject.p4runtime.ctl.client.writerequestimpl>INFO</org.onosproject.p4runtime.ctl.client.writerequestimpl>
+ <org.onosproject.segmentrouting>DEBUG</org.onosproject.segmentrouting>
+ <org.onosproject.gnmi.ctl>INFO</org.onosproject.gnmi.ctl>
+ <org.omecproject.up4>INFO</org.omecproject.up4>
+ </ONOS_Logging_Reset>
+
+ <ENV>
+ <cellName>productionCell</cellName>
+ <cellApps>drivers,fpm,lldpprovider,hostprovider,netcfghostprovider,drivers.bmv2,org.opencord.fabric-tofino,pipelines.fabric,org.stratumproject.fabric-tna,drivers.barefoot,segmentrouting,t3,up4</cellApps>
+ </ENV>
+
+ <DEPENDENCY>
+ <useCommonConf>False</useCommonConf>
+ <useCommonTopo>True</useCommonTopo>
+ <useBmv2>True</useBmv2>
+ <bmv2SwitchType>stratum</bmv2SwitchType>
+ <switchPrefix></switchPrefix>
+ <stratumRoot>~/stratum</stratumRoot>
+ <topology>trellis_fabric.py</topology>
+ <lib></lib>
+ </DEPENDENCY>
+
+ <SCALE>
+ <size>3</size>
+ <max>3</max>
+ </SCALE>
+
+</PARAMS>
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/QOS.py b/TestON/tests/USECASE/SegmentRouting/QOS/QOS.py
new file mode 100644
index 0000000..df535c5
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/QOS.py
@@ -0,0 +1,77 @@
+class QOS:
+
+ def __init__(self):
+ self.default = ''
+
+ def CASE1(self, main):
+ # Leaf-Edge-Mobile
+ # Attach 2 UEs with different QFI
+ # Generate traffic with Trex for the two UEs
+ # --> no packet drop on RT flow, reasonable latency on RT flow
+ try:
+ from tests.USECASE.SegmentRouting.dependencies.up4 import UP4
+ from tests.USECASE.SegmentRouting.dependencies.trex import Trex
+ from tests.USECASE.SegmentRouting.dependencies.Testcaselib import \
+ Testcaselib as run
+ import json
+ except ImportError as e:
+ main.log.error("Import not found. Exiting the test")
+ main.log.error(e)
+ main.cleanAndExit()
+
+ run.initTest(main)
+ main.log.info(main.Cluster.numCtrls)
+ main.Cluster.setRunningNode(3)
+ run.installOnos(main, skipPackage=True, cliSleep=5)
+
+ main.step("Start P4rt client and setup TRex")
+ # Use the first available ONOS instance CLI
+ onos_cli = main.Cluster.active(0).CLI
+ up4 = UP4()
+ trex = Trex()
+ # Get the P4RT client connected to UP4 in the first available ONOS instance
+ up4.setup(main.Cluster.active(0).p4rtUp4)
+ trex.setup(main.TRexClient)
+
+ main.step("Program PDRs and FARs via UP4")
+ up4.attachUes()
+
+ main.step("Verify PDRs and FARs in ONOS")
+ up4.verifyUp4Flow(onos_cli)
+
+ # Load traffic config for the current test case
+ main.step("Load test JSON config")
+ cfgFile = main.configPath + "/tests/" + "leaf_edge_mobile.json"
+ with open(cfgFile) as cfg:
+ testCfg = json.load(cfg)
+
+ main.step("Send traffic with TRex")
+ for flow in testCfg["flows"]:
+ trex.createFlow(flow)
+ trex.sendAndReceiveTraffic(testCfg["duration"])
+
+ main.step("Log port and flow stats")
+ trex.logPortStats()
+ for flow in testCfg["flows"]:
+ trex.logFlowStats(flow)
+
+ # Assert Flow Stats
+ for flow in testCfg["flows"]:
+ if trex.isFlowStats(flow):
+ main.step("{}: Assert RX Packets".format(flow))
+ trex.assertRxPackets(flow)
+ main.step("{}: Assert Dropped Packets".format(flow))
+ trex.assertDroppedPacket(flow)
+ main.step("{}: Assert 99.9 Percentile Latency".format(flow))
+ trex.assert99_9PercentileLatency(flow)
+
+ main.step("Remove PDRs and FARs via UP4")
+ up4.detachUes()
+
+ main.step("Verify removed PDRs and FARs from ONOS")
+ up4.verifyNoUesFlow(onos_cli)
+
+ main.step("Teardown")
+ trex.teardown()
+ up4.teardown()
+ run.cleanup(main)
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/QOS.topo b/TestON/tests/USECASE/SegmentRouting/QOS/QOS.topo
new file mode 100644
index 0000000..631d4e8
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/QOS.topo
@@ -0,0 +1,55 @@
+<TOPOLOGY>
+ <COMPONENT>
+ <ONOScell>
+ <host>localhost</host> # ONOS "bench" machine
+ <user>jenkins</user>
+ <password></password>
+ <type>OnosClusterDriver</type>
+ <connect_order>50</connect_order>
+ <jump_host></jump_host>
+ <home>~/onos</home> # defines where onos home is on the build machine. Defaults to "~/onos/" if empty.
+ <COMPONENTS>
+ <kubeConfig>~/.kube/dev-pairedleaves-tucson</kubeConfig> # If set, will attempt to use this file for setting up port-forwarding
+ <useDocker>True</useDocker> # Whether to use docker for ONOS nodes
+ <docker_prompt>\$</docker_prompt>
+ <cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
+ <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+ <karaf_username>karaf</karaf_username>
+ <karaf_password>karaf</karaf_password>
+ <web_user>karaf</web_user>
+ <web_pass>karaf</web_pass>
+ <karafPrompt_username>karaf</karafPrompt_username>
+ <rest_port></rest_port>
+ <prompt></prompt> # TODO: we technically need a few of these, one per component
+ <onos_home>~/onos/</onos_home> # defines where onos home is on the target cell machine. Defaults to entry in "home" if empty.
+ <nodes> 3 </nodes> # number of nodes in the cluster
+ <up4_port>51001</up4_port> # Port where the UP4 P4Runtime server is listening
+ </COMPONENTS>
+ </ONOScell>
+
+ <!-- No need for any HostDriver components, traffic is being generated by TRex-->
+ <TRexClient>
+ <host>localhost</host>
+ <type>TrexClientDriver</type>
+ <connect_order>5</connect_order>
+ <COMPONENTS>
+ <trex_address>10.76.28.72</trex_address> <!-- Compute2 -->
+ <trex_config>trex_config.yaml</trex_config> <!-- relative path starting from ./dependencies-->
+ <force_restart>True</force_restart>
+ <software_mode>True</software_mode>
+ </COMPONENTS>
+ </TRexClient>
+
+ <!-- This component is not needed, but required to use the Testcaselib -->
+ <NetworkBench>
+ <host>10.76.28.66</host>
+ <user>jenkins</user>
+ <password></password>
+ <type>NetworkDriver</type>
+ <connect_order>1</connect_order>
+ <COMPONENTS>
+ </COMPONENTS>
+ </NetworkBench>
+
+ </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/README.md b/TestON/tests/USECASE/SegmentRouting/QOS/README.md
new file mode 100644
index 0000000..fdf9a1b
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/README.md
@@ -0,0 +1,4 @@
+# Requirements (TODO)
+
+- ptf
+- trex
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/__init__.py b/TestON/tests/USECASE/SegmentRouting/QOS/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/__init__.py
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/__init__.py b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/__init__.py
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/tests/leaf_edge_mobile.json b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/tests/leaf_edge_mobile.json
new file mode 100644
index 0000000..f32fa79
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/tests/leaf_edge_mobile.json
@@ -0,0 +1,5 @@
+{
+ "flows": ["BE_FROM_PDN", "RT_FROM_PDN"],
+ "duration": 10
+}
+
diff --git a/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/trex_config.yaml b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/trex_config.yaml
new file mode 100644
index 0000000..00a1f85
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/QOS/dependencies/trex_config.yaml
@@ -0,0 +1,19 @@
+# TRex Port ID=0 --> PCI BUS: d8:00.0, Linux Intf: enp216s0f0 connected to leaf1/0 (PDN)
+# TRex Port ID=1 --> PCI BUS: d8:00.1, Linux Intf: enp216s0f1 not connected, but required by TRex to have an even number of interfaces
+# TRex Port ID=2 --> PCI BUS: 5e:00.0, Linux Intf: enp94s0f0 connected to leaf2/0 (eNodeB)
+# TRex Port ID=3 --> PCI BUS: 5e:00.1, Linux Intf: enp94s0f1 connected to leaf2/4
+
+- version: 2
+ port_limit: 4
+ interfaces: [ 'd8:00.0', 'd8:00.1', '5e:00.0', '5e:00.1']
+ port_bandwidth_gb: 40
+ c: 16
+ port_info:
+ - src_mac: 40:A6:B7:22:AB:40
+ dest_mac: 00:00:0A:4C:1C:46
+ - src_mac: 40:A6:B7:22:AB:41
+ dest_mac: 00:00:0A:4C:1C:46
+ - src_mac: 40:A6:B7:22:AB:20
+ dest_mac: 00:00:0A:4C:1C:46
+ - src_mac: 40:A6:B7:22:AB:21
+ dest_mac: 00:00:0A:4C:1C:46
diff --git a/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.topo b/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.topo
index 2f4c8f2..b9d705e 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.topo
+++ b/TestON/tests/USECASE/SegmentRouting/SRStaging/SRStaging.topo
@@ -183,7 +183,7 @@
</Host3>
<ng40vm>
- <host>192.168.122.102</host>
+ <host>10.92.1.95</host>
<user>ng40</user>
<password>ng40</password>
<type>HostDriver</type>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRStaging/SRpairedLeaves/SRpairedLeaves.params b/TestON/tests/USECASE/SegmentRouting/SRStaging/SRpairedLeaves/SRpairedLeaves.params
index 2cb31b3..85f3fd6 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRStaging/SRpairedLeaves/SRpairedLeaves.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRStaging/SRpairedLeaves/SRpairedLeaves.params
@@ -1,5 +1,5 @@
<PARAMS>
- <testcases>1,2,101,102,103,104,201,202,203,204,205,206,207,208,301</testcases>
+ <testcases>1,2,101,102,103,104,301,302</testcases>
<GRAPH>
<nodeCluster>pairedleaves</nodeCluster>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRStaging/SRpairedLeaves/SRpairedLeaves.py b/TestON/tests/USECASE/SegmentRouting/SRStaging/SRpairedLeaves/SRpairedLeaves.py
index 96618cf..5911832 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRStaging/SRpairedLeaves/SRpairedLeaves.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRStaging/SRpairedLeaves/SRpairedLeaves.py
@@ -48,6 +48,7 @@
"""
try:
from tests.USECASE.SegmentRouting.SRStaging.dependencies.SRStagingTest import SRStagingTest
+ from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as run
import json
except ImportError:
main.log.error( "SRStagingTest not found. Exiting the test" )
@@ -63,6 +64,7 @@
topology='0x2',
onosNodes=3,
description="%s tests on the %s pod" % ( descPrefix, pod ) )
+ hosts = [ 'h1', 'h2', 'h3', 'mgmt' ]
run.pingAllFabricIntfs( main, hosts, dumpFlows=False )
main.funcs.cleanup( main )
@@ -670,3 +672,61 @@
# Cleanup
main.log.warn( json.dumps( main.downtimeResults, indent=4, sort_keys=True ) )
main.funcs.cleanup( main )
+
+ def CASE302 ( self, main ):
+ """
+ Send ping packets from one host to another host and check flows from DeepInsight.
+ """
+
+ try:
+ from tests.USECASE.SegmentRouting.SRStaging.dependencies.SRStagingTest import SRStagingTest
+ from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as run
+ from core import utilities
+ import time
+ import socket
+ except ImportError as e:
+ main.log.exception( "SRStagingTest not found. Exiting the test" )
+ main.cleanAndExit()
+ try:
+ main.funcs
+ except ( NameError, AttributeError ):
+ main.funcs = SRStagingTest()
+
+ pod = main.params['GRAPH'].get( 'nodeCluster', "hardware" )
+ main.cfgName = 'CASE302'
+ main.funcs.setupTest( main,
+ topology='0x2',
+ onosNodes=3,
+ description="INT flow report tests on %s POD" % ( pod ) )
+ startTimeMs = ( time.time() - 5 ) * 1000
+ run.verifyPing( main, ['h1'], ['h2'] )
+ endTimeMs = ( time.time() + 5 ) * 1000
+ main.step( "Checking flow report from DeepInsight" )
+
+ def getFiveTupleCount(*args, **kwargs):
+ flows = main.DeepInsight.getFlows(
+ startTimeMs=startTimeMs,
+ endTimeMs=endTimeMs,
+ srcIp=main.h1.interfaces[0]['ips'][0],
+ dstIp=main.h2.interfaces[0]['ips'][0],
+ ipProto=socket.IPPROTO_ICMP
+ )
+ if "FiveTupleCount" in flows:
+ return flows["FiveTupleCount"]
+ else:
+ return 0
+
+ # Need to wait few seconds until DeepInsight database updated.
+ fiveTupleCount = utilities.retry(
+ f=getFiveTupleCount,
+ retValue=0,
+ attempts=60,
+ )
+
+ utilities.assert_equals(
+ expect=1, actual=fiveTupleCount,
+ onpass="Got 1 flow report from DeepInsight as expected.",
+ onfail="Got %d flow reports from DeepInsight (expect 1)" % ( fiveTupleCount )
+ )
+
+ main.funcs.cleanup( main )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRStaging/SRpairedLeaves/SRpairedLeaves.topo b/TestON/tests/USECASE/SegmentRouting/SRStaging/SRpairedLeaves/SRpairedLeaves.topo
index 40bf030..e529c6f 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRStaging/SRpairedLeaves/SRpairedLeaves.topo
+++ b/TestON/tests/USECASE/SegmentRouting/SRStaging/SRpairedLeaves/SRpairedLeaves.topo
@@ -184,5 +184,19 @@
</COMPONENTS>
</NetworkBench>
+ <DeepInsight>
+ <host>10.76.28.74</host>
+ <user>jenkins</user>
+ <password></password>
+ <type>DeepInsightApiDriver</type>
+ <connect_order>1</connect_order>
+ <COMPONENTS>
+ <server_url>https://10.76.28.74:30000</server_url>
+ <username>diadmin</username>
+ <password>diadmin</password>
+ <verify_ssl>False</verify_ssl>
+ </COMPONENTS>
+ </DeepInsight>
+
</COMPONENT>
</TOPOLOGY>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRStaging/dependencies/SRStagingTest.py b/TestON/tests/USECASE/SegmentRouting/SRStaging/dependencies/SRStagingTest.py
index a3ab6ba..f0f30a5 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRStaging/dependencies/SRStagingTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRStaging/dependencies/SRStagingTest.py
@@ -1353,8 +1353,9 @@
def cleanup( self, main, headerOrder=None ):
try:
- for component in main.trafficComponents:
- main.Network.removeComponent( component.name )
+ if hasattr( main, "trafficComponents" ):
+ for component in main.trafficComponents:
+ main.Network.removeComponent( component.name )
main.trafficComponents = []
except Exception:
main.log.exception( "Error cleaning up traffic components" )
diff --git a/TestON/tests/USECASE/SegmentRouting/UP4/UP4.params b/TestON/tests/USECASE/SegmentRouting/UP4/UP4.params
index 113d33d..98dc41e 100644
--- a/TestON/tests/USECASE/SegmentRouting/UP4/UP4.params
+++ b/TestON/tests/USECASE/SegmentRouting/UP4/UP4.params
@@ -1,5 +1,5 @@
<PARAMS>
- <testcases>1</testcases>
+ <testcases>1,2</testcases>
<GRAPH>
<nodeCluster>pairedleaves</nodeCluster>
diff --git a/TestON/tests/USECASE/SegmentRouting/UP4/UP4.py b/TestON/tests/USECASE/SegmentRouting/UP4/UP4.py
index a7f9804..14b845f 100644
--- a/TestON/tests/USECASE/SegmentRouting/UP4/UP4.py
+++ b/TestON/tests/USECASE/SegmentRouting/UP4/UP4.py
@@ -5,199 +5,248 @@
# TODO: add test case that checks entries are being inserted and deleted from ONOS correclty
def CASE1(self, main):
+ main.case("Fabric UPF traffic terminated in the fabric")
"""
- Attach UE
+ Program PDRs and FARs for UEs
+ Verify PDRs and FARs
Generate traffic from UE to PDN
Verify traffic received from PDN
Generate traffic from PDN to UE
Verify traffic received from UE
- Detach UE
+ Remove PDRs and FARs for UEs
+ Verify removed PDRs and FARs
"""
- UE_PORT = 400
- PDN_PORT = 800
- GPDU_PORT = 2152
try:
- from tests.USECASE.SegmentRouting.dependencies.up4libcli import \
- Up4LibCli
+ from tests.USECASE.SegmentRouting.dependencies.up4 import UP4
from tests.USECASE.SegmentRouting.dependencies.Testcaselib import \
Testcaselib as run
- from distutils.util import strtobool
except ImportError as e:
main.log.error("Import not found. Exiting the test")
main.log.error(e)
main.cleanAndExit()
- # TODO: Move to a setup script
run.initTest(main)
main.log.info(main.Cluster.numCtrls)
main.Cluster.setRunningNode(3)
run.installOnos(main, skipPackage=True, cliSleep=5)
- # Get the P4RT client connected to UP4 in the first available ONOS instance
- up4Client = main.Cluster.active(0).p4rtUp4
-
- s1u_address = main.params["UP4"]["s1u_address"]
- enb_address = main.params["UP4"]["enb_address"]
- router_mac = main.params["UP4"]["router_mac"]
-
- pdn_host = getattr(main, main.params["UP4"]["pdn_host"])
- pdn_interface = pdn_host.interfaces[0]
-
- enodeb_host = getattr(main, main.params["UP4"]["enodeb_host"])
- enodeb_interface = enodeb_host.interfaces[0]
-
- emulated_ues = main.params["UP4"]['ues']
- n_ues = len(emulated_ues)
-
main.step("Start scapy and p4rt client")
- pdn_host.startScapy(ifaceName=pdn_interface["name"])
- enodeb_host.startScapy(ifaceName=enodeb_interface["name"],
- enableGtp=True)
- up4Client.startP4RtClient()
+ # Use the first available ONOS instance CLI
+ onos_cli = main.Cluster.active(0).CLI
+ up4 = UP4()
+ # Get the P4RT client connected to UP4 in the first available ONOS instance
+ up4.setup(main.Cluster.active(0).p4rtUp4)
- # TODO: move to library in dependencies
- main.step("Attach UEs")
- for ue in emulated_ues.values():
- # Sanitize values coming from the params file
- if "five_g" in ue:
- ue["five_g"] = bool(strtobool(ue["five_g"]))
- if "qfi" in ue and ue["qfi"] == "":
- ue["qfi"] = None
- Up4LibCli.attachUe(up4Client, s1u_address=s1u_address,
- enb_address=enb_address,
- **ue)
+ main.step("Program PDRs and FARs via UP4")
+ up4.attachUes()
- # ----------------- Test Upstream traffic (enb->pdn)
+ main.step("Verify PDRs and FARs in ONOS")
+ up4.verifyUp4Flow(onos_cli)
+
+ # ------- Test Upstream traffic (enb->pdn)
main.step("Test upstream traffic")
- # Scapy filter needs to start before sending traffic
- pkt_filter_upstream = ""
- for ue in emulated_ues.values():
- if "ue_address" in ue:
- if len(pkt_filter_upstream) != 0:
- pkt_filter_upstream += " or "
- pkt_filter_upstream += "src host " + ue["ue_address"]
- pkt_filter_upstream = "ip and udp dst port %s and (%s) and dst host %s" % \
- (PDN_PORT, pkt_filter_upstream,
- pdn_interface["ips"][0])
- main.log.info("Start listening on %s intf %s" %
- (main.params["UP4"]["pdn_host"], pdn_interface["name"]))
- main.log.debug("BPF Filter Upstream: \n %s" % pkt_filter_upstream)
- pdn_host.startFilter(ifaceName=pdn_interface["name"],
- sniffCount=n_ues,
- pktFilter=pkt_filter_upstream)
+ up4.testUpstreamTraffic()
- main.log.info("Sending %d packets from eNodeB host" % len(emulated_ues))
- for ue in emulated_ues.values():
- enodeb_host.buildEther()
- enodeb_host.buildIP(src=enb_address, dst=s1u_address)
- enodeb_host.buildUDP(ipVersion=4, dport=GPDU_PORT)
- # FIXME: With newer scapy TEID becomes teid (required for Scapy 2.4.5)
- enodeb_host.buildGTP(gtp_type=0xFF, TEID=int(ue["teid"]))
- enodeb_host.buildIP(overGtp=True, src=ue["ue_address"],
- dst=pdn_interface["ips"][0])
- enodeb_host.buildUDP(ipVersion=4, overGtp=True, sport=UE_PORT,
- dport=PDN_PORT)
-
- enodeb_host.sendPacket(iface=enodeb_interface["name"])
-
- finished = pdn_host.checkFilter()
- packets = ""
- if finished:
- packets = pdn_host.readPackets(detailed=True)
- for p in packets.splitlines():
- main.log.debug(p)
- # We care only of the last line from readPackets
- packets = packets.splitlines()[-1]
- else:
- kill = pdn_host.killFilter()
- main.log.debug(kill)
-
- fail = False
- if len(emulated_ues) != packets.count('Ether'):
- fail = True
- msg = "Failed to capture packets in PDN. "
- else:
- msg = "Correctly captured packet in PDN. "
- # We expect exactly 1 packet per UE
- pktsFiltered = [packets.count("src=" + ue["ue_address"])
- for ue in emulated_ues.values()]
- if pktsFiltered.count(1) != len(pktsFiltered):
- fail = True
- msg += "More than one packet per UE in downstream. "
- else:
- msg += "One packet per UE in upstream. "
-
- utilities.assert_equal(
- expect=False, actual=fail, onpass=msg, onfail=msg)
-
- # --------------- Test Downstream traffic (pdn->enb)
+ # ------- Test Downstream traffic (pdn->enb)
main.step("Test downstream traffic")
- pkt_filter_downstream = "ip and udp src port %d and udp dst port %d and dst host %s and src host %s" % (
- GPDU_PORT, GPDU_PORT, enb_address, s1u_address)
- main.log.info("Start listening on %s intf %s" % (
- main.params["UP4"]["enodeb_host"], enodeb_interface["name"]))
- main.log.debug("BPF Filter Downstream: \n %s" % pkt_filter_downstream)
- enodeb_host.startFilter(ifaceName=enodeb_interface["name"],
- sniffCount=len(emulated_ues),
- pktFilter=pkt_filter_downstream)
+ up4.testDownstreamTraffic()
- main.log.info("Sending %d packets from PDN host" % len(emulated_ues))
- for ue in emulated_ues.values():
- # From PDN we have to set dest MAC, otherwise scapy will do ARP
- # request for the UE IP address.
- pdn_host.buildEther(dst=router_mac)
- pdn_host.buildIP(src=pdn_interface["ips"][0],
- dst=ue["ue_address"])
- pdn_host.buildUDP(ipVersion=4, sport=PDN_PORT, dport=UE_PORT)
- pdn_host.sendPacket(iface=pdn_interface["name"])
+ main.step("Remove PDRs and FARs via UP4")
+ up4.detachUes()
- finished = enodeb_host.checkFilter()
- packets = ""
- if finished:
- packets = enodeb_host.readPackets(detailed=True)
- for p in packets.splitlines():
- main.log.debug(p)
- # We care only of the last line from readPackets
- packets = packets.splitlines()[-1]
- else:
- kill = enodeb_host.killFilter()
- main.log.debug(kill)
+ main.step("Verify removed PDRs and FARs from ONOS")
+ up4.verifyNoUesFlow(onos_cli)
- # The BPF filter might capture non-GTP packets because we can't filter
- # GTP header in BPF. For this reason, check that the captured packets
- # are from the expected tunnels.
- # TODO: check inner UDP and IP fields as well
- # FIXME: with newer scapy TEID becomes teid (required for Scapy 2.4.5)
- pktsFiltered = [packets.count("TEID=" + hex(int(ue["teid"])) + "L ")
- for ue in emulated_ues.values()]
-
- fail = False
- if len(emulated_ues) != sum(pktsFiltered):
- fail = True
- msg = "Failed to capture packets in eNodeB. "
- else:
- msg = "Correctly captured packets in eNodeB. "
- # We expect exactly 1 packet per UE
- if pktsFiltered.count(1) != len(pktsFiltered):
- fail = True
- msg += "More than one packet per GTP TEID in downstream. "
- else:
- msg += "One packet per GTP TEID in downstream. "
-
- utilities.assert_equal(
- expect=False, actual=fail, onpass=msg, onfail=msg)
-
- # Detach UEs
- main.step("Detach UEs")
- for ue in emulated_ues.values():
- # No need to sanitize values, already sanitized during attachment
- Up4LibCli.detachUe(up4Client, s1u_address=s1u_address,
- enb_address=enb_address,
- **ue)
-
- # Teardown
main.step("Stop scapy and p4rt client")
- enodeb_host.stopScapy()
- pdn_host.stopScapy()
- up4Client.stopP4RtClient()
+ up4.teardown()
+ run.cleanup(main)
+
+ def CASE2(self):
+ main.case("BESS traffic routed")
+ """
+ Program PDRs and FARs for UEs managed via UP4
+ Verify PDRs and FARs
+ Verify Upstream Traffic: eNB -> Fabric -> BESS (encapped)
+ Verify Upstream Traffic: BESS -> Fabric -> PDN (not encapped)
+ Verify Downstream Traffic: PDN -> Fabric -> BESS (not encapped)
+ Verify Downstream Traffic: BESS -> Fabric -> eNB (encapped)
+ Remove PDRs and FARs for UEs managed via UP4
+ Verify removed PDRs and FARs
+ """
+ BESS_TEID = 300
+ BESS_UE_ADDR = "10.241.0.1"
+ GPDU_PORT = 2152
+ UE_PORT = 400
+ PDN_PORT = 800
+ try:
+ from tests.USECASE.SegmentRouting.dependencies.up4 import UP4
+ from tests.USECASE.SegmentRouting.dependencies.Testcaselib import \
+ Testcaselib as run
+ except ImportError as e:
+ main.log.error("Import not found. Exiting the test")
+ main.log.error(e)
+ main.cleanAndExit()
+
+ run.initTest(main)
+ main.log.info(main.Cluster.numCtrls)
+ main.Cluster.setRunningNode(3)
+ run.installOnos(main, skipPackage=True, cliSleep=5)
+
+ main.step("Start scapy and p4rt client + Scapy on BESS Host")
+ # Use the first available ONOS instance CLI
+ onos_cli = main.Cluster.active(0).CLI
+ up4 = UP4()
+ # Get the P4RT client connected to UP4 in the first available ONOS instance
+ up4.setup(main.Cluster.active(0).p4rtUp4)
+
+ # Setup the emulated BESS host and required parameters
+ bess_host = main.Compute2 # FIXME: Parametrize?
+ bess_interface = bess_host.interfaces[0]
+ bess_s1u_address = bess_interface["ips"][0]
+ bess_host.startScapy(ifaceName=bess_interface["name"], enableGtp=True)
+ enodeb_host = up4.enodeb_host
+ enodeb_interface = up4.enodeb_interface
+ pdn_host = up4.pdn_host
+ pdn_interface = up4.pdn_interface
+
+ main.step("Program PDRs and FARs for UEs via UP4")
+ up4.attachUes()
+
+ main.step("Verify PDRs and FARs in ONOS")
+ up4.verifyUp4Flow(onos_cli)
+
+ # ------------------- UPSTREAM -------------------
+ # ------- eNB -> fabric -> BESS (encapped)
+ main.step("Test upstream eNB -> fabric -> BESS")
+ # Start filter before sending packets, BESS should receive GTP encapped
+ # packets
+ pkt_filter_upstream = "ip and udp src port %d and udp dst port %d and src host %s and dst host %s" % (
+ GPDU_PORT, GPDU_PORT, up4.enb_address, bess_s1u_address)
+ main.log.info("Start listening on %s intf %s" % (
+ bess_host.name, bess_interface["name"]))
+ main.log.debug("BPF Filter BESS Upstream: \n %s" % pkt_filter_upstream)
+ bess_host.startFilter(ifaceName=bess_interface["name"],
+ sniffCount=1,
+ pktFilter=pkt_filter_upstream)
+ # Send GTP Packet
+ UP4.buildGtpPacket(enodeb_host,
+ src_ip_outer=up4.enb_address,
+ dst_ip_outer=bess_s1u_address,
+ src_ip_inner=BESS_UE_ADDR,
+ dst_ip_inner=pdn_interface["ips"][0],
+ src_udp_inner=UE_PORT,
+ dst_udp_inner=PDN_PORT,
+ teid=BESS_TEID)
+ enodeb_host.sendPacket()
+
+ packets = UP4.checkFilterAndGetPackets(bess_host)
+ # FIXME: with newer scapy TEID becomes teid (required for Scapy 2.4.5)
+ n_packets = packets.count("TEID=" + hex(BESS_TEID) + "L ")
+ tot_packets = packets.count('Ether')
+ utilities.assert_equal(expect=True,
+ actual=n_packets == 1 and tot_packets == 1,
+ onpass="BESS correctly received 1 GTP encapped packet",
+ onfail="ERROR: BESS received %d GTP encapped packets and filter captured %d packets" % (
+ n_packets, tot_packets))
+
+ # ------- BESS -> fabric -> PDN (not-encapped)
+ main.step("Test upstream BESS -> fabric -> PDN")
+ # Start filter before sending packets, PDN should receive non-GTP packet
+ pkt_filter_upstream = "ip and udp src port %d and udp dst port %d and src host %s and dst host %s" % (
+ UE_PORT, PDN_PORT, BESS_UE_ADDR, pdn_interface["ips"][0])
+ main.log.info("Start listening on %s intf %s" % (
+ pdn_host.name, pdn_interface["name"]))
+ main.log.debug("BPF Filter PDN Upstream: \n %s" % pkt_filter_upstream)
+ pdn_host.startFilter(ifaceName=pdn_interface["name"],
+ sniffCount=1,
+ pktFilter=pkt_filter_upstream)
+ # Send UDP Packet
+ UP4.buildUdpPacket(bess_host,
+ src_ip=BESS_UE_ADDR,
+ dst_ip=pdn_interface["ips"][0],
+ src_udp=UE_PORT,
+ dst_udp=PDN_PORT)
+ bess_host.sendPacket()
+
+ packets = UP4.checkFilterAndGetPackets(pdn_host)
+ tot_packets = packets.count('Ether')
+ utilities.assert_equal(expect=True,
+ actual=tot_packets == 1,
+ onpass="PDN correctly received 1 packet",
+ onfail="ERROR: PDN received %d packets" % (
+ tot_packets))
+ # ------------------------------------------------
+
+ # ------------------ DOWNSTREAM ------------------
+ # ------- PDN -> fabric -> BESS (not-encapped)
+ main.step("Test downstream PDN -> fabric -> BESS")
+ pkt_filter_downstream = "ip and udp src port %d and udp dst port %d and src host %s and dst host %s" % (
+ PDN_PORT, UE_PORT, pdn_interface["ips"][0], BESS_UE_ADDR)
+ main.log.info("Start listening on %s intf %s" % (
+ bess_host.name, bess_interface["name"]))
+ main.log.debug(
+ "BPF Filter BESS Downstream: \n %s" % pkt_filter_downstream)
+ bess_host.startFilter(ifaceName=bess_interface["name"],
+ sniffCount=1,
+ pktFilter=pkt_filter_downstream)
+ UP4.buildUdpPacket(pdn_host,
+ dst_eth=up4.router_mac,
+ src_ip=pdn_interface["ips"][0],
+ dst_ip=BESS_UE_ADDR,
+ src_udp=PDN_PORT,
+ dst_udp=UE_PORT)
+ pdn_host.sendPacket()
+
+ packets = UP4.checkFilterAndGetPackets(bess_host)
+
+ tot_packets = packets.count('Ether')
+ utilities.assert_equal(expect=True,
+ actual=tot_packets == 1,
+ onpass="BESS correctly received 1 packet",
+ onfail="ERROR: BESS received %d packets" % (
+ tot_packets))
+
+ # ------- BESS -> fabric -> eNB (encapped)
+ main.step("Test downstream BESS -> fabric -> eNB")
+ pkt_filter_downstream = "ip and udp src port %d and udp dst port %d and src host %s and dst host %s" % (
+ GPDU_PORT, GPDU_PORT, bess_s1u_address, up4.enb_address)
+ main.log.info("Start listening on %s intf %s" % (
+ enodeb_host.name, enodeb_interface["name"]))
+ main.log.debug(
+ "BPF Filter BESS Downstream: \n %s" % pkt_filter_downstream)
+ enodeb_host.startFilter(ifaceName=enodeb_interface["name"],
+ sniffCount=1,
+ pktFilter=pkt_filter_downstream)
+ # Build GTP packet from BESS host
+ UP4.buildGtpPacket(bess_host,
+ src_ip_outer=bess_s1u_address,
+ dst_ip_outer=up4.enb_address,
+ src_ip_inner=pdn_interface["ips"][0],
+ dst_ip_inner=BESS_UE_ADDR,
+ src_udp_inner=PDN_PORT,
+ dst_udp_inner=UE_PORT,
+ teid=BESS_TEID)
+ bess_host.sendPacket()
+
+ packets = UP4.checkFilterAndGetPackets(enodeb_host)
+
+ # FIXME: with newer scapy TEID becomes teid (required for Scapy 2.4.5)
+ n_packets = packets.count("TEID=" + hex(BESS_TEID) + "L ")
+ tot_packets = packets.count('Ether')
+ utilities.assert_equal(expect=True,
+ actual=n_packets == 1 and tot_packets == 1,
+ onpass="eNodeB correctly received 1 GTP encapped packet",
+ onfail="ERROR: eNodeb received %d GTP encapped packets and filter captured %d packets" % (
+ n_packets, tot_packets))
+ # ------------------------------------------------
+
+ main.step("Remove PDRs and FARs for UEs via UP4")
+ up4.detachUes()
+
+ main.step("Verify removed PDRs and FARs from ONOS")
+ up4.verifyNoUesFlow(onos_cli)
+
+ main.step("Stop scapy and p4rt client")
+ up4.teardown()
+ bess_host.stopScapy()
run.cleanup(main)
diff --git a/TestON/tests/USECASE/SegmentRouting/UP4/UP4.topo b/TestON/tests/USECASE/SegmentRouting/UP4/UP4.topo
index 5dac34a..8605cad 100644
--- a/TestON/tests/USECASE/SegmentRouting/UP4/UP4.topo
+++ b/TestON/tests/USECASE/SegmentRouting/UP4/UP4.topo
@@ -56,6 +56,35 @@
</COMPONENTS>
</Compute1>
+ <Compute2>
+ <host>10.76.28.72</host>
+ <user>jenkins</user>
+ <password></password>
+ <type>HostDriver</type>
+ <connect_order>7</connect_order>
+ <jump_host></jump_host>
+ <COMPONENTS>
+ <mac></mac>
+ <inband>false</inband>
+ <dhcp>True</dhcp>
+ <ip>10.32.11.3</ip>
+ <shortName>h2</shortName>
+ <port1></port1>
+ <link1></link1>
+ <ifaceName>pairbond</ifaceName>
+ <scapy_path>/usr/bin/scapy</scapy_path>
+ <routes>
+ <route1>
+ <network></network>
+ <netmask></netmask>
+ <gw></gw>
+ <interface></interface>
+ </route1>
+ </routes>
+ <sudo_required>true</sudo_required>
+ </COMPONENTS>
+ </Compute2>
+
<Compute3>
<host>10.76.28.68</host>
<user>jenkins</user>
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/scapy_helper.py b/TestON/tests/USECASE/SegmentRouting/dependencies/scapy_helper.py
new file mode 100644
index 0000000..80546f4
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/scapy_helper.py
@@ -0,0 +1,88 @@
+from scapy.contrib.gtp import GTP_U_Header, GTPPDUSessionContainer
+from scapy.layers.inet import IP, UDP
+from scapy.layers.l2 import Ether
+import codecs
+
+UDP_GTP_PORT = 2152
+DEFAULT_GTP_TUNNEL_SPORT = 1234 # arbitrary, but different from 2152
+
+IP_HDR_BYTES = 20
+UDP_HDR_BYTES = 8
+GTPU_HDR_BYTES = 8
+GTPU_OPTIONS_HDR_BYTES = 4
+GTPU_EXT_PSC_BYTES = 4
+
+
+def simple_gtp_udp_packet(
+ eth_dst=None,
+ eth_src=None,
+ ip_src="192.168.0.1",
+ ip_dst="192.168.0.2",
+ s1u_addr="100.0.0.1",
+ enb_addr="192.168.101.1",
+ ip_ttl=64,
+ gtp_teid=0xFF, # dummy teid
+ pktlen=136,
+ ext_psc_type=None,
+ ext_psc_qfi=0,
+):
+ pktlen = pktlen - IP_HDR_BYTES - UDP_HDR_BYTES - GTPU_HDR_BYTES
+ if ext_psc_type is not None:
+ pktlen = pktlen - GTPU_OPTIONS_HDR_BYTES - GTPU_EXT_PSC_BYTES
+ pkt = simple_udp_packet(eth_src=eth_src, eth_dst=eth_dst, ip_src=ip_src,
+ ip_dst=ip_dst, pktlen=pktlen)
+ gtp_pkt = pkt_add_gtp(
+ pkt,
+ out_ipv4_src=enb_addr,
+ out_ipv4_dst=s1u_addr,
+ teid=gtp_teid,
+ ext_psc_type=ext_psc_type,
+ ext_psc_qfi=ext_psc_qfi,
+ )
+ gtp_pkt[Ether].src = eth_src
+ gtp_pkt[Ether].dst = eth_dst
+ gtp_pkt[IP].ttl = ip_ttl
+ return gtp_pkt
+
+
+def pkt_add_gtp(
+ pkt,
+ out_ipv4_src,
+ out_ipv4_dst,
+ teid,
+ sport=DEFAULT_GTP_TUNNEL_SPORT,
+ dport=UDP_GTP_PORT,
+ ext_psc_type=None,
+ ext_psc_qfi=None,
+):
+ gtp_pkt = (
+ Ether(src=pkt[Ether].src, dst=pkt[Ether].dst)
+ / IP(src=out_ipv4_src, dst=out_ipv4_dst, tos=0, id=0x1513, flags=0,
+ frag=0, )
+ / UDP(sport=sport, dport=dport, chksum=0)
+ / GTP_U_Header(gtp_type=255, teid=teid)
+ )
+ if ext_psc_type is not None:
+ # Add QoS Flow Identifier (QFI) as an extension header (required for 5G RAN)
+ gtp_pkt = gtp_pkt / GTPPDUSessionContainer(type=ext_psc_type,
+ QFI=ext_psc_qfi)
+ return gtp_pkt / pkt[Ether].payload
+
+
+# Simplified version of simple_udp_packet from https://github.com/p4lang/ptf/blob/master/src/ptf/testutils.py
+def simple_udp_packet(
+ pktlen=100,
+ eth_dst="00:01:02:03:04:05",
+ eth_src="00:06:07:08:09:0a",
+ ip_src="192.168.0.1",
+ ip_dst="192.168.0.2",
+ udp_sport=1234,
+ udp_dport=80,
+ udp_payload=None,
+):
+ pkt = Ether(src=eth_src, dst=eth_dst) / IP(src=ip_src, dst=ip_dst) / UDP(
+ sport=udp_sport, dport=udp_dport)
+ if udp_payload:
+ pkt = pkt / udp_payload
+ return pkt / codecs.decode(
+ "".join(["%02x" % (x % 256) for x in range(pktlen - len(pkt))]), "hex")
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/trex.py b/TestON/tests/USECASE/SegmentRouting/dependencies/trex.py
new file mode 100644
index 0000000..9245f16
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/trex.py
@@ -0,0 +1,186 @@
+from distutils.util import strtobool
+from tests.USECASE.SegmentRouting.dependencies import scapy_helper
+
+
+class Trex:
+ """
+ Utility that manages interaction with TRex server via TRexDriver component
+ Example params:
+ <TREX>
+ <port_stats>0,1</port_stats>
+ <flows>
+ <RT_FROM_UE>
+ <name>Real Time</name>
+ <l1_bps>40000000</l1_bps>
+ <trex_port>0</trex_port>
+ <packet>
+ <pktlen>1400</pktlen>
+ <ip_src>10.240.0.2</ip_src>
+ <ip_dst>10.32.11.101</ip_dst>
+ <eth_src>3C:EC:EF:3E:0B:A0</eth_src>
+ <eth_dst>00:00:0A:4C:1C:46</eth_dst>
+ <gtp_teid>200</gtp_teid>
+ <s1u_addr>10.32.11.126</s1u_addr>
+ <enb_addr>10.32.11.100</enb_addr>
+ </packet>
+ <latency_stats>true</latency_stats>
+ <flow_id>10</flow_id> <!-- Mandatory when latency_stats=true -->
+ <delay>50000</delay> <!-- wait 50 ms till start to let queues fill up -->
+ <expected_min_received>1</expected_min_received>
+ <expected_max_dropped>0</expected_max_dropped>
+ <expected_max_latency>1500</expected_max_latency>
+ <expected_99_9_percentile_latency>100</expected_99_9_percentile_latency>
+ </RT_FROM_UE>
+ </flows>
+ <TREX>
+ """
+
+ def __init__(self):
+ self.trex_client = None
+ self.traffic_flows = {}
+ self.port_stats = []
+ self.packets = {} # Per-flow dictionary of packets
+
+ def setup(self, trex_client):
+ self.trex_client = trex_client
+ self.traffic_flows = main.params["TREX"]["flows"]
+ if "port_stats" in main.params["TREX"] and \
+ main.params["TREX"].get("port_stats") is not '':
+ self.port_stats = [int(p) for p in
+ main.params["TREX"].get("port_stats").split(",")]
+ self.trex_client.setupTrex(main.configPath)
+
+ def teardown(self):
+ self.trex_client.stopTrexServer()
+
+ def createFlow(self, flow_name):
+ if flow_name not in self.traffic_flows:
+ main.log.error("CFG flow not present in params")
+ return False
+ self.traffic_flows[flow_name]["packet"] = Trex.__sanitizePacketConfig(
+ self.traffic_flows[flow_name]["packet"])
+ if "gtp_teid" in self.traffic_flows[flow_name]["packet"]:
+ # packets must be GTP encapped
+ self.packets[flow_name] = scapy_helper.simple_gtp_udp_packet(
+ **self.traffic_flows[flow_name]["packet"])
+ else:
+ self.packets[flow_name] = scapy_helper.simple_udp_packet(
+ **self.traffic_flows[flow_name]["packet"])
+
+ def sendAndReceiveTraffic(self, duration):
+ """
+ Connect the client, create the flows in trex (with packets created with
+ createFlow, send and receive the traffic, and disconnect the client.
+ :param duration: traffic duration
+ :return:
+ """
+ self.trex_client.connectTrexClient()
+ for flow_name, packet in self.packets.items():
+ flow_config = self.traffic_flows[flow_name]
+ Trex.__sanitizeFlowConfig(flow_config)
+ self.trex_client.addStream(pkt=packet,
+ trex_port=flow_config["trex_port"],
+ l1_bps=flow_config["l1_bps"],
+ percentage=flow_config["percentage"],
+ delay=flow_config["delay"],
+ flow_id=flow_config["flow_id"],
+ flow_stats=flow_config["latency_stats"])
+ self.trex_client.startAndWaitTraffic(duration=duration)
+ self.trex_client.disconnectTrexClient()
+
+ def assertRxPackets(self, flow_name):
+ if not self.isFlowStats(flow_name):
+ main.log.info("No flow stats for flow {}".format(flow_name))
+ expected_min_received = int(
+ self.traffic_flows[flow_name].get("expected_min_received", "1"))
+ flow_id = self.traffic_flows[flow_name]["flow_id"]
+ flow_stats = self.trex_client.getFlowStats(flow_id)
+ utilities.assert_equals(
+ expect=True,
+ actual=flow_stats.rx_packets >= expected_min_received,
+ onpass="Traffic Flow {}: Received traffic".format(flow_name),
+ onfail="Traffic Flow {}: No traffic received".format(flow_name))
+
+ def assertDroppedPacket(self, flow_name):
+ if not self.isFlowStats(flow_name):
+ main.log.info("No flow stats for flow {}".format(flow_name))
+ expected_max_dropped = int(
+ self.traffic_flows[flow_name].get("expected_max_dropped", "0"))
+ latency_stats = self.__getLatencyStats(flow_name)
+ utilities.assert_equals(
+ expect=True,
+ actual=latency_stats.dropped <= expected_max_dropped,
+ onpass="Traffic Flow {}: {} packets dropped, below threshold ({})".format(
+ flow_name, latency_stats.dropped,
+ expected_max_dropped),
+ onfail="Traffic Flow {}: {} packets dropped, above threshold ({})".format(
+ flow_name, latency_stats.dropped,
+ expected_max_dropped))
+
+ def assertMaxLatency(self, flow_name):
+ if not self.isFlowStats(flow_name):
+ main.log.info("No flow stats for flow {}".format(flow_name))
+ expected_max_latency = int(
+ self.traffic_flows[flow_name].get("expected_max_latency", "0"))
+ latency_stats = self.__getLatencyStats(flow_name)
+ utilities.assert_equals(
+ expect=True,
+ actual=latency_stats.total_max <= expected_max_latency,
+ onpass="Traffic Flow {}: Maximum latency below threshold".format(
+ flow_name),
+ onfail="Traffic Flow {}: Maximum latency is too high {}".format(
+ flow_name, latency_stats.total_max))
+
+ def assert99_9PercentileLatency(self, flow_name):
+ if not self.isFlowStats(flow_name):
+ main.log.info("No flow stats for flow {}".format(flow_name))
+ expected_99_9_percentile_latency = int(
+ self.traffic_flows[flow_name].get(
+ "expected_99_9_percentile_latency", "0"))
+ latency_stats = self.__getLatencyStats(flow_name)
+ utilities.assert_equals(
+ expect=True,
+ actual=latency_stats.percentile_99_9 <= expected_99_9_percentile_latency,
+ onpass="Traffic Flow {}: 99.9th percentile latency below threshold".format(
+ flow_name),
+ onfail="Traffic Flow {}: 99.9th percentile latency is too high {}".format(
+ flow_name, latency_stats.percentile_99_9))
+
+ def logPortStats(self):
+ main.log.debug(self.port_stats)
+ for port in self.port_stats:
+ self.trex_client.logPortStats(port)
+
+ def logFlowStats(self, flow_name):
+ if self.isFlowStats(flow_name):
+ flow_id = self.traffic_flows[flow_name]["flow_id"]
+ self.trex_client.logFlowStats(flow_id)
+ self.trex_client.logLatencyStats(flow_id)
+
+ def isFlowStats(self, flow_name):
+ return self.traffic_flows[flow_name]["latency_stats"]
+
+ def __getLatencyStats(self, flow_name):
+ flow_id = self.traffic_flows[flow_name]["flow_id"]
+ return self.trex_client.getLatencyStats(flow_id)
+
+ @staticmethod
+ def __sanitizePacketConfig(packet):
+ if "gtp_teid" in packet.keys():
+ packet["gtp_teid"] = int(packet["gtp_teid"])
+ if "pktlen" in packet.keys():
+ packet["pktlen"] = int(packet["pktlen"])
+ return packet
+
+ @staticmethod
+ def __sanitizeFlowConfig(flow_config):
+ flow_config["trex_port"] = int(flow_config["trex_port"])
+ flow_config["percentage"] = float(
+ flow_config["percentage"]) if "percentage" in flow_config else None
+ flow_config["l1_bps"] = float(
+ flow_config["l1_bps"]) if "l1_bps" in flow_config else None
+ flow_config["delay"] = int(flow_config.get("delay", 0))
+ flow_config["flow_id"] = int(
+ flow_config["flow_id"]) if "flow_id" in flow_config else None
+ flow_config["latency_stats"] = bool(
+ strtobool(flow_config.get("latency_stats", "False")))
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/up4.py b/TestON/tests/USECASE/SegmentRouting/dependencies/up4.py
new file mode 100644
index 0000000..61f28d9
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/up4.py
@@ -0,0 +1,526 @@
+from distutils.util import strtobool
+
+FALSE = '0'
+TRUE = '1'
+DIR_UPLINK = '1'
+DIR_DOWNLINK = '2'
+IFACE_ACCESS = '1'
+IFACE_CORE = '2'
+TUNNEL_SPORT = '2152'
+TUNNEL_TYPE_GPDU = '3'
+
+UE_PORT = 400
+PDN_PORT = 800
+GPDU_PORT = 2152
+
+
+class UP4:
+ """
+ Utility that manages interaction with UP4 via a P4RuntimeCliDriver available
+ in the cluster. Additionally, can verify connectivity by crafting GTP packets
+ via Scapy with an HostDriver component, specified via <enodeb_host>, <pdn_host>,
+ and <router_mac> parameters.
+
+ Example params file:
+ <UP4>
+ <pdn_host>Compute1</pdn_host> # Needed to verify connectivity with scapy
+ <enodeb_host>Compute3</enodeb_host> # Needed to verify connectivity with scapy
+ <router_mac>00:00:0A:4C:1C:46</router_mac> # Needed to verify connectivity with scapy
+ <s1u_address>10.32.11.126</s1u_address>
+ <enb_address>10.32.11.100</enb_address>
+ <ues>
+ <ue2>
+ <pfcp_session_id>100</pfcp_session_id>
+ <ue_address>10.240.0.2</ue_address>
+ <teid>200</teid>
+ <up_id>20</up_id>
+ <down_id>21</down_id>
+ <qfi>2</qfi>
+ <five_g>False</five_g>
+ </ue2>
+ </ues>
+ </UP4>
+ """
+
+ def __init__(self):
+ self.s1u_address = None
+ self.enb_address = None
+ self.enodeb_host = None
+ self.enodeb_interface = None
+ self.pdn_host = None
+ self.pdn_interface = None
+ self.router_mac = None
+ self.emulated_ues = []
+ self.up4_client = None
+
+ def setup(self, p4rt_client):
+ self.s1u_address = main.params["UP4"]["s1u_address"]
+ self.enb_address = main.params["UP4"]["enb_address"]
+ self.emulated_ues = main.params["UP4"]['ues']
+ self.up4_client = p4rt_client
+
+ # Optional Parameters
+ if "enodeb_host" in main.params["UP4"]:
+ self.enodeb_host = getattr(main, main.params["UP4"]["enodeb_host"])
+ self.enodeb_interface = self.enodeb_host.interfaces[0]
+ if "pdn_host" in main.params["UP4"]:
+ self.pdn_host = getattr(main, main.params["UP4"]["pdn_host"])
+ self.pdn_interface = self.pdn_host.interfaces[0]
+ self.router_mac = main.params["UP4"].get("router_mac", None)
+
+ # Start components
+ self.up4_client.startP4RtClient()
+ if self.enodeb_host is not None:
+ self.enodeb_host.startScapy(ifaceName=self.enodeb_interface["name"],
+ enableGtp=True)
+ if self.pdn_host is not None:
+ self.pdn_host.startScapy(ifaceName=self.pdn_interface["name"])
+
+ def teardown(self):
+ self.up4_client.stopP4RtClient()
+ if self.enodeb_host is not None:
+ self.enodeb_host.stopScapy()
+ if self.pdn_host is not None:
+ self.pdn_host.stopScapy()
+
+ def attachUes(self):
+ for ue in self.emulated_ues.values():
+ ue = UP4.__sanitizeUeData(ue)
+ self.attachUe(**ue)
+
+ def detachUes(self):
+ for ue in self.emulated_ues.values():
+ ue = UP4.__sanitizeUeData(ue)
+ self.detachUe(**ue)
+
+ def testUpstreamTraffic(self):
+ if self.enodeb_host is None or self.pdn_host is None:
+ main.log.error(
+ "Need eNodeB and PDN host params to generate scapy traffic")
+ return
+ # Scapy filter needs to start before sending traffic
+ pkt_filter_upstream = ""
+ for ue in self.emulated_ues.values():
+ if "ue_address" in ue:
+ if len(pkt_filter_upstream) != 0:
+ pkt_filter_upstream += " or "
+ pkt_filter_upstream += "src host " + ue["ue_address"]
+ pkt_filter_upstream = "ip and udp dst port %s and (%s) and dst host %s" % \
+ (PDN_PORT, pkt_filter_upstream,
+ self.pdn_interface["ips"][0])
+ main.log.info("Start listening on %s intf %s" %
+ (self.pdn_host.name, self.pdn_interface["name"]))
+ main.log.debug("BPF Filter Upstream: \n %s" % pkt_filter_upstream)
+ self.pdn_host.startFilter(ifaceName=self.pdn_interface["name"],
+ sniffCount=len(self.emulated_ues),
+ pktFilter=pkt_filter_upstream)
+
+ main.log.info(
+ "Sending %d packets from eNodeB host" % len(self.emulated_ues))
+ for ue in self.emulated_ues.values():
+ UP4.buildGtpPacket(self.enodeb_host,
+ src_ip_outer=self.enb_address,
+ dst_ip_outer=self.s1u_address,
+ src_ip_inner=ue["ue_address"],
+ dst_ip_inner=self.pdn_interface["ips"][0],
+ src_udp_inner=UE_PORT,
+ dst_udp_inner=PDN_PORT,
+ teid=int(ue["teid"]))
+
+ self.enodeb_host.sendPacket(iface=self.enodeb_interface["name"])
+
+ packets = UP4.checkFilterAndGetPackets(self.pdn_host)
+ fail = False
+ if len(self.emulated_ues) != packets.count('Ether'):
+ fail = True
+ msg = "Failed to capture packets in PDN. "
+ else:
+ msg = "Correctly captured packet in PDN. "
+ # We expect exactly 1 packet per UE
+ pktsFiltered = [packets.count("src=" + ue["ue_address"])
+ for ue in self.emulated_ues.values()]
+ if pktsFiltered.count(1) != len(pktsFiltered):
+ fail = True
+ msg += "More than one packet per UE in downstream. "
+ else:
+ msg += "One packet per UE in upstream. "
+
+ utilities.assert_equal(
+ expect=False, actual=fail, onpass=msg, onfail=msg)
+
+ def testDownstreamTraffic(self):
+ if self.enodeb_host is None or self.pdn_host is None:
+ main.log.error(
+ "Need eNodeB and PDN host params to generate scapy traffic")
+ return
+ pkt_filter_downstream = "ip and udp src port %d and udp dst port %d and dst host %s and src host %s" % (
+ GPDU_PORT, GPDU_PORT, self.enb_address, self.s1u_address)
+ main.log.info("Start listening on %s intf %s" % (
+ self.enodeb_host.name, self.enodeb_interface["name"]))
+ main.log.debug("BPF Filter Downstream: \n %s" % pkt_filter_downstream)
+ self.enodeb_host.startFilter(ifaceName=self.enodeb_interface["name"],
+ sniffCount=len(self.emulated_ues),
+ pktFilter=pkt_filter_downstream)
+
+ main.log.info(
+ "Sending %d packets from PDN host" % len(self.emulated_ues))
+ for ue in self.emulated_ues.values():
+ # From PDN we have to set dest MAC, otherwise scapy will do ARP
+ # request for the UE IP address.
+ UP4.buildUdpPacket(self.pdn_host,
+ dst_eth=self.router_mac,
+ src_ip=self.pdn_interface["ips"][0],
+ dst_ip=ue["ue_address"],
+ src_udp=PDN_PORT,
+ dst_udp=UE_PORT)
+ self.pdn_host.sendPacket(iface=self.pdn_interface["name"])
+
+ packets = UP4.checkFilterAndGetPackets(self.enodeb_host)
+
+ # The BPF filter might capture non-GTP packets because we can't filter
+ # GTP header in BPF. For this reason, check that the captured packets
+ # are from the expected tunnels.
+ # TODO: check inner UDP and IP fields as well
+ # FIXME: with newer scapy TEID becomes teid (required for Scapy 2.4.5)
+ pktsFiltered = [packets.count("TEID=" + hex(int(ue["teid"])) + "L ")
+ for ue in self.emulated_ues.values()]
+
+ fail = False
+ if len(self.emulated_ues) != sum(pktsFiltered):
+ fail = True
+ msg = "Failed to capture packets in eNodeB. "
+ else:
+ msg = "Correctly captured packets in eNodeB. "
+ # We expect exactly 1 packet per UE
+ if pktsFiltered.count(1) != len(pktsFiltered):
+ fail = True
+ msg += "More than one packet per GTP TEID in downstream. "
+ else:
+ msg += "One packet per GTP TEID in downstream. "
+
+ utilities.assert_equal(
+ expect=False, actual=fail, onpass=msg, onfail=msg)
+
+ def verifyNoUesFlow(self, onosCli, retries=3):
+ """
+ Verify that no PDRs and FARs are installed in ONOS.
+
+ :param onosCli: An instance of a OnosCliDriver
+ :param retries: number of retries
+ :return:
+ """
+ retValue = utilities.retry(f=UP4.__verifyNoPdrsFarsOnos,
+ retValue=False,
+ args=[onosCli],
+ sleep=1,
+ attempts=retries)
+ utilities.assert_equal(expect=True,
+ actual=retValue,
+ onpass="No PDRs and FARs in ONOS",
+ onfail="Stale PDRs or FARs")
+
+ @staticmethod
+ def __verifyNoPdrsFarsOnos(onosCli):
+ """
+ Verify that no PDRs and FARs are installed in ONOS
+
+ :param onosCli: An instance of a OnosCliDriver
+ """
+ pdrs = onosCli.sendline(cmdStr="up4:read-pdrs", showResponse=True,
+ noExit=True, expectJson=False)
+ fars = onosCli.sendline(cmdStr="up4:read-fars", showResponse=True,
+ noExit=True, expectJson=False)
+ return pdrs == "" and fars == ""
+
+ def verifyUp4Flow(self, onosCli):
+ """
+ Verify PDRs and FARs installed via UP4 using the ONOS CLI.
+
+ :param onosCli: An instance of a OnosCliDriver
+ """
+ pdrs = onosCli.sendline(cmdStr="up4:read-pdrs", showResponse=True,
+ noExit=True, expectJson=False)
+ fars = onosCli.sendline(cmdStr="up4:read-fars", showResponse=True,
+ noExit=True, expectJson=False)
+ fail = False
+ failMsg = ""
+ for ue in self.emulated_ues.values():
+ if pdrs.count(self.upPdrOnosString(**ue)) != 1:
+ failMsg += self.upPdrOnosString(**ue) + "\n"
+ fail = True
+ if pdrs.count(self.downPdrOnosString(**ue)) != 1:
+ failMsg += self.downPdrOnosString(**ue) + "\n"
+ fail = True
+ if fars.count(self.upFarOnosString(**ue)) != 1:
+ failMsg += self.upFarOnosString(**ue) + "\n"
+ fail = True
+ if fars.count(self.downFarOnosString(**ue)) != 1:
+ failMsg += self.downFarOnosString(**ue) + "\n"
+ fail = True
+ utilities.assert_equal(expect=False, actual=fail,
+ onpass="Correct PDRs and FARs in ONOS",
+ onfail="Wrong PDRs and FARs in ONOS. Missing PDR/FAR:\n" + failMsg)
+
+ def upPdrOnosString(self, pfcp_session_id, teid=None, up_id=None,
+ teid_up=None, far_id_up=None, ctr_id_up=None, qfi=None,
+ **kwargs):
+ # TODO: consider that with five_g the output might be different
+ if up_id is not None:
+ far_id_up = up_id
+ ctr_id_up = up_id
+ if teid is not None:
+ teid_up = teid
+ if qfi is not None:
+ return "PDR{{Match(Dst={}, TEID={}) -> LoadParams(SEID={}, FAR={}, CtrIdx={}, QFI={})}}".format(
+ self.s1u_address, hex(int(teid_up)), hex(int(pfcp_session_id)),
+ far_id_up,
+ ctr_id_up, qfi)
+ return "PDR{{Match(Dst={}, TEID={}) -> LoadParams(SEID={}, FAR={}, CtrIdx={})}}".format(
+ self.s1u_address, hex(int(teid_up)), hex(int(pfcp_session_id)),
+ far_id_up, ctr_id_up)
+
+ def downPdrOnosString(self, pfcp_session_id, ue_address, down_id=None,
+ far_id_down=None, ctr_id_down=None, **kwargs):
+ # TODO: consider that with five_g the output might be different
+ if down_id is not None:
+ far_id_down = down_id
+ ctr_id_down = down_id
+ return "PDR{{Match(Dst={}, !GTP) -> LoadParams(SEID={}, FAR={}, CtrIdx={})}}".format(
+ ue_address, hex(int(pfcp_session_id)), far_id_down, ctr_id_down)
+
+ def downFarOnosString(self, pfcp_session_id, teid=None, down_id=None,
+ teid_down=None, far_id_down=None, **kwargs):
+ if down_id is not None:
+ far_id_down = down_id
+ if teid is not None:
+ teid_down = teid
+ return "FAR{{Match(ID={}, SEID={}) -> Encap(Src={}, SPort={}, TEID={}, Dst={})}}".format(
+ far_id_down, hex(int(pfcp_session_id)), self.s1u_address, GPDU_PORT,
+ hex(int(teid_down)),
+ self.enb_address)
+
+ def upFarOnosString(self, pfcp_session_id, up_id=None, far_id_up=None,
+ **kwargs):
+ if up_id is not None:
+ far_id_up = up_id
+ return "FAR{{Match(ID={}, SEID={}) -> Forward()}}".format(
+ far_id_up, hex(int(pfcp_session_id)))
+
+ @staticmethod
+ def __sanitizeUeData(ue):
+ if "five_g" in ue and type(ue["five_g"]) != bool:
+ ue["five_g"] = bool(strtobool(ue["five_g"]))
+ if "qfi" in ue and ue["qfi"] == "":
+ ue["qfi"] = None
+ return ue
+
+ def attachUe(self, pfcp_session_id, ue_address,
+ teid=None, up_id=None, down_id=None,
+ teid_up=None, teid_down=None,
+ pdr_id_up=None, far_id_up=None, ctr_id_up=None,
+ pdr_id_down=None, far_id_down=None, ctr_id_down=None,
+ qfi=None, five_g=False):
+ self.__programUp4Rules(pfcp_session_id,
+ ue_address,
+ teid, up_id, down_id,
+ teid_up, teid_down,
+ pdr_id_up, far_id_up, ctr_id_up,
+ pdr_id_down, far_id_down, ctr_id_down,
+ qfi, five_g, action="program")
+
+ def detachUe(self, pfcp_session_id, ue_address,
+ teid=None, up_id=None, down_id=None,
+ teid_up=None, teid_down=None,
+ pdr_id_up=None, far_id_up=None, ctr_id_up=None,
+ pdr_id_down=None, far_id_down=None, ctr_id_down=None,
+ qfi=None, five_g=False):
+ self.__programUp4Rules(pfcp_session_id,
+ ue_address,
+ teid, up_id, down_id,
+ teid_up, teid_down,
+ pdr_id_up, far_id_up, ctr_id_up,
+ pdr_id_down, far_id_down, ctr_id_down,
+ qfi, five_g, action="clear")
+
+ def __programUp4Rules(self, pfcp_session_id, ue_address,
+ teid=None, up_id=None, down_id=None,
+ teid_up=None, teid_down=None,
+ pdr_id_up=None, far_id_up=None, ctr_id_up=None,
+ pdr_id_down=None, far_id_down=None, ctr_id_down=None,
+ qfi=None, five_g=False, action="program"):
+ if up_id is not None:
+ pdr_id_up = up_id
+ far_id_up = up_id
+ ctr_id_up = up_id
+ if down_id is not None:
+ pdr_id_down = down_id
+ far_id_down = down_id
+ ctr_id_down = down_id
+ if teid is not None:
+ teid_up = teid
+ teid_down = teid
+
+ entries = []
+
+ # ========================#
+ # PDR Entries
+ # ========================#
+
+ # Uplink
+ tableName = 'PreQosPipe.pdrs'
+ actionName = ''
+ matchFields = {}
+ actionParams = {}
+ if qfi is None:
+ actionName = 'PreQosPipe.set_pdr_attributes'
+ else:
+ actionName = 'PreQosPipe.set_pdr_attributes_qos'
+ if five_g:
+ # TODO: currently QFI_MATCH is unsupported in TNA
+ matchFields['has_qfi'] = TRUE
+ matchFields["qfi"] = str(qfi)
+ actionParams['needs_qfi_push'] = FALSE
+ actionParams['qfi'] = str(qfi)
+ # Match fields
+ matchFields['src_iface'] = IFACE_ACCESS
+ matchFields['ue_addr'] = str(ue_address)
+ matchFields['teid'] = str(teid_up)
+ matchFields['tunnel_ipv4_dst'] = str(self.s1u_address)
+ # Action params
+ actionParams['id'] = str(pdr_id_up)
+ actionParams['fseid'] = str(pfcp_session_id)
+ actionParams['ctr_id'] = str(ctr_id_up)
+ actionParams['far_id'] = str(far_id_up)
+ actionParams['needs_gtpu_decap'] = TRUE
+ if not self.__add_entry(tableName, actionName, matchFields,
+ actionParams, entries, action):
+ return False
+
+ # Downlink
+ tableName = 'PreQosPipe.pdrs'
+ matchFields = {}
+ actionParams = {}
+ if qfi is None:
+ actionName = 'PreQosPipe.set_pdr_attributes'
+ else:
+ actionName = 'PreQosPipe.set_pdr_attributes_qos'
+ # TODO: currently QFI_PUSH is unsupported in TNA
+ actionParams['needs_qfi_push'] = TRUE if five_g else FALSE
+ actionParams['qfi'] = str(qfi)
+ # Match fields
+ matchFields['src_iface'] = IFACE_CORE
+ matchFields['ue_addr'] = str(ue_address)
+ # Action params
+ actionParams['id'] = str(pdr_id_down)
+ actionParams['fseid'] = str(pfcp_session_id)
+ actionParams['ctr_id'] = str(ctr_id_down)
+ actionParams['far_id'] = str(far_id_down)
+ actionParams['needs_gtpu_decap'] = FALSE
+ if not self.__add_entry(tableName, actionName, matchFields,
+ actionParams, entries, action):
+ return False
+
+ # ========================#
+ # FAR Entries
+ # ========================#
+
+ # Uplink
+ tableName = 'PreQosPipe.load_far_attributes'
+ actionName = 'PreQosPipe.load_normal_far_attributes'
+ matchFields = {}
+ actionParams = {}
+
+ # Match fields
+ matchFields['far_id'] = str(far_id_up)
+ matchFields['session_id'] = str(pfcp_session_id)
+ # Action params
+ actionParams['needs_dropping'] = FALSE
+ actionParams['notify_cp'] = FALSE
+ if not self.__add_entry(tableName, actionName, matchFields,
+ actionParams, entries, action):
+ return False
+
+ # Downlink
+ tableName = 'PreQosPipe.load_far_attributes'
+ actionName = 'PreQosPipe.load_tunnel_far_attributes'
+ matchFields = {}
+ actionParams = {}
+
+ # Match fields
+ matchFields['far_id'] = str(far_id_down)
+ matchFields['session_id'] = str(pfcp_session_id)
+ # Action params
+ actionParams['needs_dropping'] = FALSE
+ actionParams['notify_cp'] = FALSE
+ actionParams['needs_buffering'] = FALSE
+ actionParams['tunnel_type'] = TUNNEL_TYPE_GPDU
+ actionParams['src_addr'] = str(self.s1u_address)
+ actionParams['dst_addr'] = str(self.enb_address)
+ actionParams['teid'] = str(teid_down)
+ actionParams['sport'] = TUNNEL_SPORT
+ if not self.__add_entry(tableName, actionName, matchFields,
+ actionParams, entries, action):
+ return False
+ if action == "program":
+ main.log.info("All entries added successfully.")
+ elif action == "clear":
+ self.__clear_entries(entries)
+
+ def __add_entry(self, tableName, actionName, matchFields, actionParams,
+ entries, action):
+ if action == "program":
+ self.up4_client.buildP4RtTableEntry(
+ tableName=tableName, actionName=actionName,
+ actionParams=actionParams, matchFields=matchFields)
+ if self.up4_client.pushTableEntry(debug=True) == main.TRUE:
+ main.log.info("*** Entry added.")
+ else:
+ main.log.error("Error during table insertion")
+ self.__clear_entries(entries)
+ return False
+ entries.append({"tableName": tableName, "actionName": actionName,
+ "matchFields": matchFields,
+ "actionParams": actionParams})
+ return True
+
+ def __clear_entries(self, entries):
+ for i, entry in enumerate(entries):
+ self.up4_client.buildP4RtTableEntry(**entry)
+ if self.up4_client.deleteTableEntry(debug=True) == main.TRUE:
+ main.log.info(
+ "*** Entry %d of %d deleted." % (i + 1, len(entries)))
+ else:
+ main.log.error("Error during table delete")
+
+ @staticmethod
+ def buildGtpPacket(host, src_ip_outer, dst_ip_outer, src_ip_inner,
+ dst_ip_inner, src_udp_inner, dst_udp_inner, teid):
+ host.buildEther()
+ host.buildIP(src=src_ip_outer, dst=dst_ip_outer)
+ host.buildUDP(ipVersion=4, dport=GPDU_PORT)
+ # FIXME: With newer scapy TEID becomes teid (required for Scapy 2.4.5)
+ host.buildGTP(gtp_type=0xFF, TEID=teid)
+ host.buildIP(overGtp=True, src=src_ip_inner, dst=dst_ip_inner)
+ host.buildUDP(ipVersion=4, overGtp=True, sport=src_udp_inner,
+ dport=dst_udp_inner)
+
+ @staticmethod
+ def buildUdpPacket(host, src_ip, dst_ip, src_udp, dst_udp, src_eth=None,
+ dst_eth=None):
+ host.buildEther(src=src_eth, dst=dst_eth)
+ host.buildIP(src=src_ip, dst=dst_ip)
+ host.buildUDP(ipVersion=4, sport=src_udp, dport=dst_udp)
+
+ @staticmethod
+ def checkFilterAndGetPackets(host):
+ finished = host.checkFilter()
+ if finished:
+ packets = host.readPackets(detailed=True)
+ for p in packets.splitlines():
+ main.log.debug(p)
+ # We care only of the last line from readPackets
+ return packets.splitlines()[-1]
+ else:
+ kill = host.killFilter()
+ main.log.debug(kill)
+ return ""
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/up4libcli.py b/TestON/tests/USECASE/SegmentRouting/dependencies/up4libcli.py
deleted file mode 100644
index 73dd39b..0000000
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/up4libcli.py
+++ /dev/null
@@ -1,213 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Copyright 2021 Open Networking Foundation (ONF)
-
-Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
-the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
-or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
-
-"""
-
-FALSE = '0'
-TRUE = '1'
-DIR_UPLINK = '1'
-DIR_DOWNLINK = '2'
-IFACE_ACCESS = '1'
-IFACE_CORE = '2'
-TUNNEL_SPORT = '2152'
-TUNNEL_TYPE_GPDU = '3'
-
-
-class Up4LibCli():
- """
- Helper library to attach and detach UEs via UP4 P4Runtime APIs.
- """
-
- @staticmethod
- def attachUe(p4rtCli, s1u_address, enb_address, pfcp_session_id, ue_address,
- teid=None, up_id=None, down_id=None,
- teid_up=None, teid_down=None,
- pdr_id_up=None, far_id_up=None, ctr_id_up=None,
- pdr_id_down=None, far_id_down=None, ctr_id_down=None,
- qfi=None, five_g=False):
- Up4LibCli.__programUp4Rules(p4rtCli, s1u_address, enb_address,
- pfcp_session_id,
- ue_address,
- teid, up_id, down_id,
- teid_up, teid_down,
- pdr_id_up, far_id_up, ctr_id_up,
- pdr_id_down, far_id_down, ctr_id_down,
- qfi, five_g, action="program")
-
- @staticmethod
- def detachUe(p4rtCli, s1u_address, enb_address, pfcp_session_id, ue_address,
- teid=None, up_id=None, down_id=None,
- teid_up=None, teid_down=None,
- pdr_id_up=None, far_id_up=None, ctr_id_up=None,
- pdr_id_down=None, far_id_down=None, ctr_id_down=None,
- qfi=None, five_g=False):
- Up4LibCli.__programUp4Rules(p4rtCli, s1u_address, enb_address,
- pfcp_session_id,
- ue_address,
- teid, up_id, down_id,
- teid_up, teid_down,
- pdr_id_up, far_id_up, ctr_id_up,
- pdr_id_down, far_id_down, ctr_id_down,
- qfi, five_g, action="clear")
-
- @staticmethod
- def __programUp4Rules(p4rtCli, s1u_address, enb_address, pfcp_session_id,
- ue_address,
- teid=None, up_id=None, down_id=None,
- teid_up=None, teid_down=None,
- pdr_id_up=None, far_id_up=None, ctr_id_up=None,
- pdr_id_down=None, far_id_down=None, ctr_id_down=None,
- qfi=None, five_g=False, action="program"):
- if up_id is not None:
- pdr_id_up = up_id
- far_id_up = up_id
- ctr_id_up = up_id
- if down_id is not None:
- pdr_id_down = down_id
- far_id_down = down_id
- ctr_id_down = down_id
- if teid is not None:
- teid_up = teid
- teid_down = teid
-
- entries = []
-
- # ========================#
- # PDR Entries
- # ========================#
-
- # Uplink
- tableName = 'PreQosPipe.pdrs'
- actionName = ''
- matchFields = {}
- actionParams = {}
- if qfi is None:
- actionName = 'PreQosPipe.set_pdr_attributes'
- else:
- actionName = 'PreQosPipe.set_pdr_attributes_qos'
- if five_g:
- # TODO: currently QFI_MATCH is unsupported in TNA
- matchFields['has_qfi'] = TRUE
- matchFields["qfi"] = str(qfi)
- actionParams['needs_qfi_push'] = FALSE
- actionParams['qfi'] = str(qfi)
- # Match fields
- matchFields['src_iface'] = IFACE_ACCESS
- matchFields['ue_addr'] = str(ue_address)
- matchFields['teid'] = str(teid_up)
- matchFields['tunnel_ipv4_dst'] = str(s1u_address)
- # Action params
- actionParams['id'] = str(pdr_id_up)
- actionParams['fseid'] = str(pfcp_session_id)
- actionParams['ctr_id'] = str(ctr_id_up)
- actionParams['far_id'] = str(far_id_up)
- actionParams['needs_gtpu_decap'] = TRUE
- if not Up4LibCli.__add_entry(p4rtCli, tableName, actionName, matchFields,
- actionParams, entries, action):
- return False
-
- # Downlink
- tableName = 'PreQosPipe.pdrs'
- actionName = ''
- matchFields = {}
- actionParams = {}
- if qfi is None:
- actionName = 'PreQosPipe.set_pdr_attributes'
- else:
- actionName = 'PreQosPipe.set_pdr_attributes_qos'
- # TODO: currently QFI_PUSH is unsupported in TNA
- actionParams['needs_qfi_push'] = TRUE if five_g else FALSE
- actionParams['qfi'] = str(qfi)
- # Match fields
- matchFields['src_iface'] = IFACE_CORE
- matchFields['ue_addr'] = str(ue_address)
- # Action params
- actionParams['id'] = str(pdr_id_down)
- actionParams['fseid'] = str(pfcp_session_id)
- actionParams['ctr_id'] = str(ctr_id_down)
- actionParams['far_id'] = str(far_id_down)
- actionParams['needs_gtpu_decap'] = FALSE
- if not Up4LibCli.__add_entry(p4rtCli, tableName, actionName, matchFields,
- actionParams, entries, action):
- return False
-
- # ========================#
- # FAR Entries
- # ========================#
-
- # Uplink
- tableName = 'PreQosPipe.load_far_attributes'
- actionName = 'PreQosPipe.load_normal_far_attributes'
- matchFields = {}
- actionParams = {}
-
- # Match fields
- matchFields['far_id'] = str(far_id_up)
- matchFields['session_id'] = str(pfcp_session_id)
- # Action params
- actionParams['needs_dropping'] = FALSE
- actionParams['notify_cp'] = FALSE
- if not Up4LibCli.__add_entry(p4rtCli, tableName, actionName, matchFields,
- actionParams, entries, action):
- return False
-
- # Downlink
- tableName = 'PreQosPipe.load_far_attributes'
- actionName = 'PreQosPipe.load_tunnel_far_attributes'
- matchFields = {}
- actionParams = {}
-
- # Match fields
- matchFields['far_id'] = str(far_id_down)
- matchFields['session_id'] = str(pfcp_session_id)
- # Action params
- actionParams['needs_dropping'] = FALSE
- actionParams['notify_cp'] = FALSE
- actionParams['needs_buffering'] = FALSE
- actionParams['tunnel_type'] = TUNNEL_TYPE_GPDU
- actionParams['src_addr'] = str(s1u_address)
- actionParams['dst_addr'] = str(enb_address)
- actionParams['teid'] = str(teid_down)
- actionParams['sport'] = TUNNEL_SPORT
- if not Up4LibCli.__add_entry(p4rtCli, tableName, actionName, matchFields,
- actionParams, entries, action):
- return False
-
- if action == "program":
- main.log.info("All entries added successfully.")
- elif action == "clear":
- Up4LibCli.__clear_entries(p4rtCli, entries)
-
- @staticmethod
- def __add_entry(p4rtCli, tableName, actionName, matchFields, actionParams,
- entries, action):
- if action == "program":
- p4rtCli.buildP4RtTableEntry(tableName=tableName,
- actionName=actionName,
- actionParams=actionParams,
- matchFields=matchFields)
- if p4rtCli.pushTableEntry(debug=True) == main.TRUE:
- main.log.info("*** Entry added.")
- else:
- main.log.error("Error during table insertion")
- Up4LibCli.__clear_entries(p4rtCli, entries)
- return False
- entries.append({"tableName": tableName, "actionName": actionName,
- "matchFields": matchFields,
- "actionParams": actionParams})
- return True
-
- @staticmethod
- def __clear_entries(p4rtCli, entries):
- for i, entry in enumerate(entries):
- p4rtCli.buildP4RtTableEntry(**entry)
- if p4rtCli.deleteTableEntry(debug=True) == main.TRUE:
- main.log.info("*** Entry %d of %d deleted." % (i + 1, len(entries)))
- else:
- main.log.error("Error during table delete")
diff --git a/TestON/tests/dependencies/topology.py b/TestON/tests/dependencies/topology.py
index 9d20cda..56a29a9 100644
--- a/TestON/tests/dependencies/topology.py
+++ b/TestON/tests/dependencies/topology.py
@@ -263,6 +263,15 @@
srcIpList[ src ] = main.Network.getIPAddress( src, proto='IPV6' if ipv6 else 'IPV4', iface=hostHandle.interfaces[0].get("name") )
unexpectedPings = []
for dst in dstList:
+ if not hasattr( main, dst ):
+ main.log.info( "Creating component for host {}".format( dst ) )
+ main.Network.createHostComponent( dst )
+ hostHandle = getattr( main, dst )
+ if hasattr( main, 'Mininet1' ):
+ main.log.info( "Starting CLI on host {}".format( dst ) )
+ hostHandle.startHostCli()
+ else:
+ hostHandle.connectInband()
hostHandle = getattr( main, dst )
dstIp = main.Network.getIPAddress( dst, proto='IPV6' if ipv6 else 'IPV4', iface=hostHandle.interfaces[0].get("name") )
# Start pings from src hosts in parallel