Merge "Fix for SCPF Jenkinsfile error"
diff --git a/TestON/drivers/common/cli/onosclidriver.py b/TestON/drivers/common/cli/onosclidriver.py
index c4ea4c8..18f0914 100755
--- a/TestON/drivers/common/cli/onosclidriver.py
+++ b/TestON/drivers/common/cli/onosclidriver.py
@@ -5101,23 +5101,25 @@
for i in range( 1, logNum ):
logPaths = logPath + str( i ) + " " + logPaths
cmd = "cat " + logPaths
+ if startLine:
+ # 100000000 is just a extreme large number to make sure this function can grep all the lines after startLine
+ cmd = cmd + " | grep -A 100000000 \'" + startLine + "\'"
if mode == 'all':
cmd = cmd + " | grep \'" + searchTerm + "\'"
- if mode == 'last':
+ elif mode == 'last':
cmd = cmd + " | grep \'" + searchTerm + "\'" + " | tail -n 1"
- if mode == 'first':
- if startLine != '':
- # 100000000 is just a extreme large number to make sure this function can grep all the lines after startLine
- cmd = cmd + " | grep -A 100000000 \'" + startLine + "\' | grep \'" + searchTerm + "\'" + "| head -n 1"
- else:
- cmd = cmd + " | grep \'" + searchTerm + "\'" + " | head -n 1"
- if mode == 'num':
+ elif mode == 'first':
+ cmd = cmd + " | grep \'" + searchTerm + "\'" + " | head -n 1"
+ elif mode == 'num':
cmd = cmd + " | grep -c \'" + searchTerm + "\'"
num = self.sendline( cmd )
return num
- if mode == 'total':
+ elif mode == 'total':
totalLines = self.sendline( "cat /opt/onos/log/karaf.log | wc -l" )
return int(totalLines)
+ else:
+ main.log.error( self.name + " unsupported mode" )
+ return main.ERROR
before = self.sendline( cmd )
before = before.splitlines()
# make sure the returned list only contains the search term
diff --git a/TestON/tests/FUNC/FUNCnetCfg/FUNCnetCfg.py b/TestON/tests/FUNC/FUNCnetCfg/FUNCnetCfg.py
index 77e45ea..6228f8a 100644
--- a/TestON/tests/FUNC/FUNCnetCfg/FUNCnetCfg.py
+++ b/TestON/tests/FUNC/FUNCnetCfg/FUNCnetCfg.py
@@ -404,6 +404,8 @@
to all nodes
"""
import time
+ import json
+ import os
main.case( "Add Network configurations to the cluster" )
main.caseExplanation = "Add Network Configurations for devices" +\
" not discovered yet. One device is allowed" +\
@@ -411,10 +413,16 @@
pprint = main.nodes[0].pprint
main.step( "Add Net Cfg for switch1" )
- s1Json = { "rackAddress": 1,
- "name": "Switch1",
- "owner": "Jimmy",
- "allowed": True }
+
+ try:
+ with open( os.path.dirname( main.testFile ) + '/dependencies/s1Json', 'r' ) as s1Jsondata:
+ s1Json = json.load( s1Jsondata )
+ except IOError:
+ main.log.exception( "s1Json File not found." )
+ main.cleanup()
+ main.exit()
+ main.log.info( "s1Json:" + str( s1Json ) )
+
main.s1Json = s1Json
setS1Allow = main.ONOSrest1.setNetCfg( s1Json,
subjectClass="devices",
@@ -444,10 +452,16 @@
onfail="Net Cfg for device s1 not correctly set" )
main.step( "Add Net Cfg for switch3" )
- s3Json = { "rackAddress": 3,
- "name": "Switch3",
- "owner": "Jane",
- "allowed": False }
+
+ try:
+ with open( os.path.dirname( main.testFile ) + '/dependencies/s3Json', 'r' ) as s3Jsondata:
+ s3Json = json.load( s3Jsondata )
+ except IOError:
+ main.log.exception( "s3Json File not found" )
+ main.cleanup()
+ main.exit()
+ main.log.info( "s3Json:" + str( s3Json) )
+
main.s3Json = s3Json
setS3Disallow = main.ONOSrest1.setNetCfg( s3Json,
subjectClass="devices",
@@ -546,10 +560,14 @@
pprint = main.nodes[0].pprint
main.step( "Add Net Cfg for switch2" )
- s2Json = { "rackAddress": 2,
- "name": "Switch2",
- "owner": "Jenny",
- "allowed": True }
+ try:
+ with open( os.path.dirname( main.testFile ) + '/dependencies/s2Json', 'r' ) as s2Jsondata:
+ s2Json = json.load( s2Jsondata )
+ except IOError:
+ main.log.exception( "s2Json File not found" )
+ main.cleanup()
+ main.exit()
+ main.log.info( "s2Json:" + str( s2Json ) )
main.s2Json = s2Json
setS2Allow = main.ONOSrest2.setNetCfg( s2Json,
subjectClass="devices",
@@ -576,10 +594,15 @@
onfail="Net Cfg for device s2 not correctly set" )
main.step( "Add Net Cfg for switch4" )
- s4Json = { "rackAddress": 4,
- "name": "Switch4",
- "owner": "John",
- "allowed": False }
+
+ try:
+ with open( os.path.dirname( main.testFile ) + '/dependencies/s4Json', 'r' ) as s4Jsondata:
+ s4Json = json.load( s4Jsondata )
+ except IOError:
+ main.log.exception( "s4Json File not found" )
+ main.cleanup()
+ main.exit()
+ main.log.info( "s4Json:" + str( s4Json ) )
main.s4Json = s4Json
setS4Disallow = main.ONOSrest3.setNetCfg( s4Json,
subjectClass="devices",
@@ -826,8 +849,24 @@
" of allowed devices" )
main.step( "Removing allowed status from Switches 5 and 6" )
- main.s5Json = { "allowed": False }
- main.s6Json = { "allowed": False }
+ try:
+ with open( os.path.dirname( main.testFile ) + '/dependencies/s5Json', 'r' ) as s5Jsondata:
+ main.s5Json = json.load( s5Jsondata )
+ except IOError:
+ main.log.exception( "s5Json File not found" )
+ main.cleanup()
+ main.exit()
+ main.log.info( "s5Json:" + str( main.s5Json ) )
+
+ try:
+ with open( os.path.dirname( main.testFile ) + '/dependencies/s6Json', 'r' ) as s6Jsondata:
+ main.s6Json = json.load( s6Jsondata )
+ except IOError:
+ main.log.exception( "s6Json File not found" )
+ main.cleanup()
+ main.exit()
+ main.log.info( "s6Json:" + str( main.s6Json ) )
+
s5Json = main.s5Json
setS1 = main.ONOSrest1.setNetCfg( s5Json,
subjectClass="devices",
diff --git a/TestON/tests/FUNC/FUNCnetCfg/dependencies/s1Json b/TestON/tests/FUNC/FUNCnetCfg/dependencies/s1Json
new file mode 100644
index 0000000..8576c24
--- /dev/null
+++ b/TestON/tests/FUNC/FUNCnetCfg/dependencies/s1Json
@@ -0,0 +1,6 @@
+{
+"rackAddress":1,
+"name":"Switch1",
+"owner":"Jimmy",
+"allowed":true
+}
diff --git a/TestON/tests/FUNC/FUNCnetCfg/dependencies/s2Json b/TestON/tests/FUNC/FUNCnetCfg/dependencies/s2Json
new file mode 100644
index 0000000..98108dc
--- /dev/null
+++ b/TestON/tests/FUNC/FUNCnetCfg/dependencies/s2Json
@@ -0,0 +1,6 @@
+{
+"rackAddress":2,
+"name":"Switch2",
+"owner":"Jenny",
+"allowed":true
+}
diff --git a/TestON/tests/FUNC/FUNCnetCfg/dependencies/s3Json b/TestON/tests/FUNC/FUNCnetCfg/dependencies/s3Json
new file mode 100644
index 0000000..a6ea22a
--- /dev/null
+++ b/TestON/tests/FUNC/FUNCnetCfg/dependencies/s3Json
@@ -0,0 +1,6 @@
+{
+"rackAddress":3,
+"name":"Switch3",
+"owner":"Jane",
+"allowed":false
+}
diff --git a/TestON/tests/FUNC/FUNCnetCfg/dependencies/s4Json b/TestON/tests/FUNC/FUNCnetCfg/dependencies/s4Json
new file mode 100644
index 0000000..169f84e
--- /dev/null
+++ b/TestON/tests/FUNC/FUNCnetCfg/dependencies/s4Json
@@ -0,0 +1,6 @@
+{
+"rackAddress":4,
+"name":"Switch4",
+"owner":"John",
+"allowed":false
+}
diff --git a/TestON/tests/FUNC/FUNCnetCfg/dependencies/s5Json b/TestON/tests/FUNC/FUNCnetCfg/dependencies/s5Json
new file mode 100644
index 0000000..fb92470
--- /dev/null
+++ b/TestON/tests/FUNC/FUNCnetCfg/dependencies/s5Json
@@ -0,0 +1,3 @@
+{
+"allowed":false
+}
diff --git a/TestON/tests/FUNC/FUNCnetCfg/dependencies/s6Json b/TestON/tests/FUNC/FUNCnetCfg/dependencies/s6Json
new file mode 100644
index 0000000..fb92470
--- /dev/null
+++ b/TestON/tests/FUNC/FUNCnetCfg/dependencies/s6Json
@@ -0,0 +1,3 @@
+{
+"allowed":false
+}
diff --git a/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.params b/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.params
index c3c0ff6..64e0ab5 100644
--- a/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.params
+++ b/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.params
@@ -7,7 +7,7 @@
<ENV>
<cellName>intentRerouteCell</cellName>
- <cellApps>drivers,null,intentperf,metrics</cellApps>
+ <cellApps>drivers,null,metrics</cellApps>
</ENV>
<DEPENDENCY>
@@ -60,7 +60,7 @@
<startup>5</startup>
<setmaster>5</setmaster>
<install>10</install>
- <verify>5</verify>
+ <verify>10</verify>
# timeout for pexpect
<timeout>300</timeout>
</SLEEP>
diff --git a/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.py b/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.py
index ac0a673..ae0af63 100644
--- a/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.py
+++ b/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.py
@@ -248,7 +248,8 @@
main.CLIs[0].setCfg("org.onosproject.provider.nil.NullProviders", "deviceCount", value=main.deviceCount)
main.CLIs[0].setCfg("org.onosproject.provider.nil.NullProviders", "topoShape", value="reroute")
main.CLIs[0].setCfg("org.onosproject.provider.nil.NullProviders", "enabled", value="true")
- main.CLIs[0].setCfg("org.onosproject.store.flow.impl.DistributedFlowRuleStore", "backupEnabled", value="false")
+ # "bakcupEnabled" should be true by default. Not sure if it's intended or not to disable it. Seems no impact to test results.
+ #main.CLIs[0].setCfg("org.onosproject.store.flow.impl.DistributedFlowRuleStore", "backupEnabled", value="false")
if main.flowObj:
main.CLIs[0].setCfg("org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator",
"useFlowObjectives", value="true")
@@ -271,24 +272,21 @@
import json
# from scipy import stats
- ts = time.time()
print(main.intentsList)
for batchSize in main.intentsList:
main.batchSize = batchSize
main.log.report("Intent Batch size: " + str(batchSize) + "\n ")
- main.LatencyList = []
- main.LatencyListTopoToFirstInstalled = []
- main.LatencyListFirstInstalledToLastInstalled = []
+ firstLocalLatencies = []
+ lastLocalLatencies = []
+ firstGlobalLatencies = []
+ lastGlobalLatencies = []
+ main.startLine = {}
main.validRun = 0
main.invalidRun = 0
- # initial a variables to record the term of startLine in karaf logs of each node
- main.totalLines = []
- for i in range( main.numCtrls ):
- main.totalLines.append( '' )
while main.validRun <= main.warmUp + main.sampleSize and main.invalidRun <= main.maxInvalidRun:
if main.validRun >= main.warmUp:
main.log.info("================================================")
- main.log.info("Starting test iteration: {} ".format( main.validRun - main.warmUp))
+ main.log.info("Valid iteration: {} ".format( main.validRun - main.warmUp))
main.log.info("Total iteration: {}".format( main.validRun + main.invalidRun))
main.log.info("================================================")
else:
@@ -298,140 +296,112 @@
main.CLIs[0].pushTestIntents(main.ingress, main.egress, main.batchSize,
offset=1, options="-i", timeout=main.timeout)
- # check links and flows
- k = 0
- main.verify = main.FALSE
- linkCheck = 0
- flowsCheck = 0
- while k <= main.verifyAttempts:
- time.sleep(main.verifySleep)
- summary = json.loads(main.CLIs[0].summary(timeout=main.timeout))
- linkCheck = summary.get("links")
- flowsCheck = summary.get("flows")
- if linkCheck == main.deviceCount * 2 and flowsCheck == batchSize * (main.deviceCount - 1 ):
- main.log.info("links: {}, flows: {} ".format(linkCheck, flowsCheck))
- main.verify = main.TRUE
- break
- k += 1
+ # check links, flows and intents
+ main.intentRerouteLatFuncs.sanityCheck( main, main.deviceCount * 2, batchSize * (main.deviceCount - 1 ), main.batchSize )
if not main.verify:
- main.log.warn("Links or flows number not as expected")
- main.log.warn("links: {}, flows: {} ".format(linkCheck, flowsCheck))
- # bring back topology
- main.intentRerouteLatFuncs.bringBackTopology( main )
- if main.validRun >= main.warmUp:
- main.invalidRun += 1
- continue
- else:
- main.validRun += 1
- continue
- # Bring link down
+ main.log.warn( "Sanity check failed, skipping this iteration..." )
+ continue
+
+ # Insert one line in karaf.log before link down
+ for i in range( main.numCtrls ):
+ main.CLIs[ i ].log( "\'Scale: {}, Batch:{}, Iteration: {}\'".format( main.numCtrls, batchSize, main.validRun + main.invalidRun ) )
+
+ # bring link down
main.CLIs[0].link( main.end1[ 'port' ], main.end2[ 'port' ], "down",
timeout=main.timeout, showResponse=False)
- main.verify = main.FALSE
- k = 0
- while k <= main.verifyAttempts:
- time.sleep(main.verifySleep)
- summary = json.loads(main.CLIs[0].summary(timeout=main.timeout))
- linkCheck = summary.get("links")
- flowsCheck = summary.get("flows")
- if linkCheck == (main.deviceCount - 1) * 2 and flowsCheck == batchSize * main.deviceCount:
- main.log.info("links: {}, flows: {} ".format(linkCheck, flowsCheck))
- main.verify = main.TRUE
- break
- k += 1
+
+ # check links, flows and intents
+ main.intentRerouteLatFuncs.sanityCheck( main, (main.deviceCount - 1) * 2, batchSize * main.deviceCount, main.batchSize )
if not main.verify:
- main.log.warn("Links or flows number not as expected")
- main.log.warn("links: {}, flows: {} ".format(linkCheck, flowsCheck))
- # bring back topology
- main.intentRerouteLatFuncs.bringBackTopology( main )
- if main.validRun >= main.warmUp:
- main.invalidRun += 1
- continue
- else:
- main.validRun += 1
- continue
- # record the link romving time as the startLine
+ main.log.warn( "Sanity check failed, skipping this iteration..." )
+ continue
+
+ # Get timestamp of last LINK_REMOVED event as separator between iterations
+ skip = False
for i in range( main.numCtrls ):
- logNum = main.intentRerouteLatFuncs.checkLog( main, i )
- main.totalLines[i] = str(main.CLIs[ i ].getTimeStampFromLog( "last", "LINK_REMOVED", "time = ", " ", logNum=logNum ))
- main.log.info("Node " + str( i+1 ) + ": the start timestamp is " + main.totalLines[i] + " this iteration" )
- #Calculate values
- lastTopologyToFirstInstalled, firstInstalledToLastInstalled, totalTime = main.intentRerouteLatFuncs.getValues( main )
- if totalTime == -1:
+ logNum = main.intentRerouteLatFuncs.getLogNum( main, i )
+ timestamp = str( main.CLIs[ i ].getTimeStampFromLog( "last", "LINK_REMOVED", "time = ", " ", logNum=logNum ) )
+ if timestamp == main.ERROR:
+ # Try again in case that the log number just increased
+ logNum = main.intentRerouteLatFuncs.getLogNum( main, i )
+ timestamp = str( main.CLIs[ i ].getTimeStampFromLog( "last", "LINK_REMOVED", "time = ", " ", logNum=logNum ) )
+ if timestamp == main.ERROR:
+ main.log.warn( "Cannot find the event we want in the log, skipping this iteration..." )
+ main.intentRerouteLatFuncs.bringBackTopology( main )
+ if main.validRun >= main.warmUp:
+ main.invalidRun += 1
+ else:
+ main.validRun += 1
+ skip = True
+ break
+ else:
+ main.startLine[ i ] = timestamp
+ main.log.info( "Timestamp of last LINK_REMOVED event on node {} is {}".format( i+1, main.startLine[ i ] ) )
+ if skip: continue
+
+ # calculate values
+ topologyTimestamps = main.intentRerouteLatFuncs.getTopologyTimestamps( main )
+ intentTimestamps = main.intentRerouteLatFuncs.getIntentTimestamps( main )
+ if intentTimestamps == main.ERROR or topologyTimestamps == main.ERROR:
+ main.log.info( "Got invalid timestamp, skipping this iteration..." )
+ main.intentRerouteLatFuncs.bringBackTopology( main )
if main.validRun >= main.warmUp:
main.invalidRun += 1
else:
main.validRun += 1
continue
else:
- main.log.info("Get valid latency")
+ main.log.info( "Got valid timestamps" )
+
+ firstLocalLatnecy, lastLocalLatnecy, firstGlobalLatency, lastGlobalLatnecy = main.intentRerouteLatFuncs.calculateLatency( main, topologyTimestamps, intentTimestamps )
+ if firstLocalLatnecy < 0:
+ main.log.info( "Got negative latency, skipping this iteration..." )
+ main.intentRerouteLatFuncs.bringBackTopology( main )
+ if main.validRun >= main.warmUp:
+ main.invalidRun += 1
+ else:
+ main.validRun += 1
+ continue
+ else:
+ main.log.info( "Got valid latencies" )
main.validRun += 1
- # Verify Summary after we bring up link, and withdrawn intents
+ firstLocalLatencies.append( firstLocalLatnecy )
+ lastLocalLatencies.append( lastLocalLatnecy )
+ firstGlobalLatencies.append( firstGlobalLatency )
+ lastGlobalLatencies.append( lastGlobalLatnecy )
+
+ # bring up link and withdraw intents
main.CLIs[0].link( main.end1[ 'port' ], main.end2[ 'port' ], "up",
timeout=main.timeout)
- k = 0
- main.verify = main.FALSE
- linkCheck = 0
- flowsCheck = 0
- while k <= main.verifyAttempts:
- time.sleep(main.verifySleep)
- main.CLIs[0].pushTestIntents(main.ingress, main.egress, batchSize,
- offset=1, options="-w", timeout=main.timeout)
- main.CLIs[0].purgeWithdrawnIntents()
- summary = json.loads(main.CLIs[0].summary())
- linkCheck = summary.get("links")
- flowsCheck = summary.get("flows")
- intentCheck = summary.get("intents")
- if linkCheck == main.deviceCount * 2 and flowsCheck == 0 and intentCheck == 0:
- main.log.info("links: {}, flows: {}, intents: {} ".format(linkCheck, flowsCheck, intentCheck))
- main.verify = main.TRUE
- break
- k += 1
+ main.CLIs[0].pushTestIntents(main.ingress, main.egress, batchSize,
+ offset=1, options="-w", timeout=main.timeout)
+ main.CLIs[0].purgeWithdrawnIntents()
+
+ # check links, flows and intents
+ main.intentRerouteLatFuncs.sanityCheck( main, main.deviceCount * 2, 0, 0 )
if not main.verify:
- main.log.error("links, flows or intents number not as expected")
- main.log.info("links: {}, flows: {}, intents: {} ".format(linkCheck, flowsCheck, intentCheck))
- # bring back topology
- main.intentRerouteLatFuncs.bringBackTopology( main )
continue
- #main.log.info("total negative results num: " + str( main.record ) )
- aveLatency = 0
- aveLatencyTopoToFirstInstalled = 0
- aveLatencyFirstInstalledToLastInstalled = 0
-
- stdLatency = 0
- stdLatencyTopoToFirstInstalled = 0
- stdLatencyFirstInstalledToLastInstalled = 0
-
- aveLatency = numpy.average( main.LatencyList )
- aveLatencyTopoToFirstInstalled = numpy.average( main.LatencyListTopoToFirstInstalled )
- aveLatencyFirstInstalledToLastInstalled = numpy.average( main.LatencyListFirstInstalledToLastInstalled )
-
- stdLatency = numpy.std( main.LatencyList )
- stdLatencyTopoToFirstInstalled = numpy.std( main.LatencyListTopoToFirstInstalled )
- stdLatencyFirstInstalledToLastInstalled = numpy.std( main.LatencyListFirstInstalledToLastInstalled )
+ aveLocalLatency = numpy.average( lastLocalLatencies )
+ aveGlobalLatency = numpy.average( lastGlobalLatencies )
+ stdLocalLatency = numpy.std( lastLocalLatencies )
+ stdGlobalLatency = numpy.std( lastGlobalLatencies )
main.log.report( "Scale: " + str( main.numCtrls ) + " \tIntent batch: " + str( batchSize ) )
- main.log.report( "Total Latency average:................" + str( aveLatency ) )
- main.log.report( "Latency standard deviation:..........." + str( stdLatency ) )
- main.log.report( "Last Topology to first installed Latency average:................." + str( aveLatencyTopoToFirstInstalled ) )
- main.log.report( "Last Topology to first installed Latency standard deviation:......" + str( stdLatencyTopoToFirstInstalled ) )
- main.log.report( "First installed to last installed Latency average:................" + str( aveLatencyFirstInstalledToLastInstalled ) )
- main.log.report( "First installed to last installed Latency standard deviation:....." + str( stdLatencyFirstInstalledToLastInstalled ) )
+ main.log.report( "Local latency average:................" + str( aveLocalLatency ) )
+ main.log.report( "Global latency average:................" + str( aveGlobalLatency ) )
+ main.log.report( "Local latency std:................" + str( stdLocalLatency ) )
+ main.log.report( "Global latency std:................" + str( stdGlobalLatency ) )
main.log.report( "________________________________________________________" )
- if not (numpy.isnan(aveLatency) or numpy.isnan(stdLatency)):
+ if not ( numpy.isnan( aveLocalLatency ) or numpy.isnan( aveGlobalLatency ) ):
# check if got NaN for result
- resultsDB = open(main.dbFileName, "a")
- resultsDB.write("'" + main.commit + "',")
- resultsDB.write(str(main.numCtrls) + ",")
- resultsDB.write(str(batchSize) + ",")
- resultsDB.write(str(aveLatency) + ",")
- resultsDB.write(str(stdLatency) + ",")
- resultsDB.write(str(aveLatencyTopoToFirstInstalled) + ",")
- resultsDB.write(str(stdLatencyTopoToFirstInstalled) + ",")
- resultsDB.write(str(aveLatencyFirstInstalledToLastInstalled) + ",")
- resultsDB.write(str(stdLatencyFirstInstalledToLastInstalled) + "\n")
+ resultsDB = open( main.dbFileName, "a" )
+ resultsDB.write( "'" + main.commit + "'," )
+ resultsDB.write( str( main.numCtrls ) + "," )
+ resultsDB.write( str( batchSize ) + "," )
+ resultsDB.write( str( aveLocalLatency ) + "," )
+ resultsDB.write( str( stdLocalLatency ) + "\n" )
resultsDB.close()
- del main.scale[0]
+ del main.scale[ 0 ]
diff --git a/TestON/tests/SCPF/SCPFintentRerouteLat/dependencies/intentRerouteLatFuncs.py b/TestON/tests/SCPF/SCPFintentRerouteLat/dependencies/intentRerouteLatFuncs.py
index 24cf32b..1f830ad 100644
--- a/TestON/tests/SCPF/SCPFintentRerouteLat/dependencies/intentRerouteLatFuncs.py
+++ b/TestON/tests/SCPF/SCPFintentRerouteLat/dependencies/intentRerouteLatFuncs.py
@@ -4,20 +4,40 @@
'''
import numpy
import time
+import json
def _init_( self ):
self.default = ''
-def checkLog( main, nodeId ):
- try:
- logNames = main.ONOSbench.listLog( main.onosIp[ nodeId ] )
- assert logNames is not None
- if len( logNames ) >= 2:
- return 2
- return 1
- except AssertionError:
- main.log.error("There is no karaf log")
- return -1
+def sanityCheck( main, linkNumExpected, flowNumExpected, intentNumExpected ):
+ '''
+ Sanity check on numbers of links, flows and intents in ONOS
+ '''
+ attemps = 0
+ main.verify = main.FALSE
+ linkNum = 0
+ flowNum = 0
+ intentNum = 0
+ while attemps <= main.verifyAttempts:
+ time.sleep(main.verifySleep)
+ summary = json.loads( main.CLIs[0].summary( timeout=main.timeout ) )
+ linkNum = summary.get("links")
+ flowNum = summary.get("flows")
+ intentNum = summary.get("intents")
+ if linkNum == linkNumExpected and flowNum == flowNumExpected and intentNum == intentNumExpected:
+ main.log.info("links: {}, flows: {}, intents: {}".format(linkNum, flowNum, intentNum))
+ main.verify = main.TRUE
+ break
+ attemps += 1
+ if not main.verify:
+ main.log.warn("Links or flows or intents number not as expected")
+ main.log.warn("links: {}, flows: {}, intents: {}".format(linkNum, flowNum, intentNum))
+ # bring back topology
+ bringBackTopology( main )
+ if main.validRun >= main.warmUp:
+ main.invalidRun += 1
+ else:
+ main.validRun += 1
def bringBackTopology( main ):
main.log.info( "Bring back topology " )
@@ -35,75 +55,86 @@
main.CLIs[ 0 ].deviceRole(main.end2[ 'name' ], main.ONOSip[ 0 ])
time.sleep( main.setMasterSleep )
-def getValues( main ):
+def getLogNum( main, nodeId ):
'''
- Calculated the wanted values for intentRerouteTest
-
- 1. Get the first "last topology timestamp" from karaf.log in different node
- 2. Get the first "first intent installed timestamp" from karaf log in different node
- 3. Get the last "last intent installed timestamp" from karaf log in different node
-
- Return:
- last_topology_to_first_installed: The time from the last topology to the first intent installed
- first_installed_to_last_installed: Time time from the first topology to the last intent installed
- totalTime: The time from the last topology to the last intent installed
-
+ Return the number of karaf log files
'''
- lastTopologyTimestamp = compareTimestamp( main, main.searchTerm[ "TopologyTime" ], "creationTime=", ",", 'last',func='min' )
- firstIntentInstalledTimestamp = compareTimestamp( main, main.searchTerm[ "InstallTime" ], "time = ", " ", 'first',func='min' )
- lastIntentInstalledTimestamp = compareTimestamp( main, main.searchTerm[ "InstallTime" ], "time = ", " ", 'last',func='max' )
-
- if lastTopologyTimestamp == -1 or firstIntentInstalledTimestamp == -1 or lastIntentInstalledTimestamp == -1:
- main.log.warn( "Can't get timestamp from karaf log! " )
- bringBackTopology( main )
- return -1, -1, -1
-
- #calculate values
- lastTopologyToFirstInstalled = firstIntentInstalledTimestamp - lastTopologyTimestamp
- if lastTopologyToFirstInstalled < 0:
- main.record = main.record + 1
-
- firstInstalledToLastInstalled = lastIntentInstalledTimestamp - firstIntentInstalledTimestamp
- totalTime = lastIntentInstalledTimestamp - lastTopologyTimestamp
-
- if main.validRun >= main.warmUp and main.verify:
- main.log.info( "Last topology time stamp: {0:f}".format( lastTopologyTimestamp ))
- main.log.info( "First installed time stamp: {0:f}".format( firstIntentInstalledTimestamp ))
- main.log.info( "Last installed time stamp: {0:f}".format( lastIntentInstalledTimestamp ))
- main.log.info( "Last topology to first installed latency:{0:f}".format( lastTopologyToFirstInstalled ))
- main.log.info( "First installed to last installed latency:{0:f}".format( firstInstalledToLastInstalled ))
- main.log.info( "Overall latency:{0:f}".format( totalTime ))
- main.LatencyList.append( totalTime )
- main.LatencyListTopoToFirstInstalled.append( lastTopologyToFirstInstalled )
- main.LatencyListFirstInstalledToLastInstalled.append( firstInstalledToLastInstalled )
- return lastTopologyToFirstInstalled, firstInstalledToLastInstalled, totalTime
-
-def compareTimestamp( main, compareTerm, splitTerm_before, splitTerm_after, mode, func='max' ):
- '''
- Compare all the timestamps of compareTerm from different node.
-
- func:
- max: Compare which one is the biggest and retun it
- min: Compare which one is the smallest and return it
-
- return:
- This function will return the biggest or smallest timestamps of the compareTerm.
-
- '''
- compareTermList = []
- for i in range( main.numCtrls ):
- timestamp = main.CLIs[ i ].getTimeStampFromLog( mode, compareTerm, splitTerm_before, splitTerm_after, startLine=main.totalLines[ i ], logNum=checkLog( main, i ) )
- compareTermList.append( timestamp )
- main.log.info("-----------------------------------------------")
- for i in range( main.numCtrls ):
- main.log.info( "ONOS Node {} {} {} time stamp: {}".format((i+1), mode, compareTerm, compareTermList[ i ]))
- x = min( compareTermList )
- main.log.info("-----------------------------------------------")
- if x == -1:
- main.log.warn( "Can't compare timestamps" )
+ try:
+ logNameList = main.ONOSbench.listLog( main.onosIp[ nodeId ] )
+ assert logNameList is not None
+ # FIXME: are two karaf logs enough to cover the events we want?
+ if len( logNameList ) >= 2:
+ return 2
+ return 1
+ except AssertionError:
+ main.log.error("There is no karaf log")
return -1
- else:
- if func == 'max':
- return max( compareTermList )
- if func == 'min':
- return min( compareTermList )
+
+def getTopologyTimestamps( main ):
+ '''
+ Get timestamps for the last topology events on all cluster nodes
+ '''
+ timestamps = []
+ for i in range( main.numCtrls ):
+ # Search for last topology event in karaf log
+ lines = main.CLIs[ i ].logSearch( mode='last', searchTerm=main.searchTerm[ "TopologyTime" ], startLine=main.startLine[ i ], logNum=getLogNum( main, i ) )
+ if lines is None or len( lines ) != 1:
+ main.log.error( "Error when trying to get topology event timestamp" )
+ return main.ERROR
+ try:
+ timestampField = lines[0].split( "creationTime=" )
+ timestamp = timestampField[ 1 ].split( "," )
+ timestamp = int( timestamp[ 0 ] )
+ timestamps.append( timestamp )
+ except KeyError:
+ main.log.error( "Error when trying to get intent key or timestamp" )
+ return main.ERROR
+ return timestamps
+
+def getIntentTimestamps( main ):
+ '''
+ Get timestamps for all intent keys on all cluster nodes
+ '''
+ timestamps = {}
+ for i in range( main.numCtrls ):
+ # Search for intent INSTALLED event in karaf log
+ lines = main.CLIs[ i ].logSearch( mode='all', searchTerm=main.searchTerm[ "InstallTime" ], startLine=main.startLine[ i ], logNum=getLogNum( main, i ) )
+ if lines is None or len( lines ) == 0:
+ main.log.error( "Error when trying to get intent key or timestamp" )
+ return main.ERROR
+ for line in lines:
+ try:
+ # Get intent key
+ keyField = line.split( "key=" )
+ key = keyField[ 1 ].split( "," )
+ key = key[ 0 ]
+ if not key in timestamps.keys():
+ timestamps[ key ] = []
+ # Get timestamp
+ timestampField = line.split( "time = " )
+ timestamp = timestampField[ 1 ].split( " " )
+ timestamp = int( timestamp[ 0 ] )
+ timestamps[ key ].append( timestamp )
+ except KeyError:
+ main.log.error( "Error when trying to get intent key or timestamp" )
+ return main.ERROR
+ return timestamps
+
+def calculateLatency( main, topologyTimestamps, intentTimestamps ):
+ '''
+ Calculate reroute latency values using timestamps
+ '''
+ topologyTimestamp = numpy.min( topologyTimestamps )
+ firstInstalledLatency = {}
+ lastInstalledLatency = {}
+ for key in intentTimestamps.keys():
+ firstInstalledTimestamp = numpy.min( intentTimestamps[ key ] )
+ lastInstalledTimestamp = numpy.max( intentTimestamps[ key ] )
+ firstInstalledLatency[ key ] = firstInstalledTimestamp - topologyTimestamp
+ lastInstalledLatency[ key ] = lastInstalledTimestamp - topologyTimestamp
+ firstLocalLatnecy = numpy.min( firstInstalledLatency.values() )
+ lastLocalLatnecy = numpy.max( firstInstalledLatency.values() )
+ firstGlobalLatency = numpy.min( lastInstalledLatency.values() )
+ lastGlobalLatnecy = numpy.max( lastInstalledLatency.values() )
+ main.log.info( "firstLocalLatnecy: {}, lastLocalLatnecy: {}, firstGlobalLatency: {}, lastGlobalLatnecy: {}".format( firstLocalLatnecy, lastLocalLatnecy, firstGlobalLatency, lastGlobalLatnecy ) )
+ return firstLocalLatnecy, lastLocalLatnecy, firstGlobalLatency, lastGlobalLatnecy