Merge "Add HA test for restoring from and offline backup"
diff --git a/TestON/JenkinsFile/FUNCJenkinsFile b/TestON/JenkinsFile/FUNCJenkinsFile
index 857ff54..4a820eb 100644
--- a/TestON/JenkinsFile/FUNCJenkinsFile
+++ b/TestON/JenkinsFile/FUNCJenkinsFile
@@ -1,192 +1,39 @@
#!groovy
-import groovy.time.*
+funcs = evaluate readTrusted( 'TestON/JenkinsFile/JenkinsCommonFuncs.groovy' )
+funcs.initialize( "FUNC" );
// This is a Jenkinsfile for a scripted pipeline for the FUNC tests
def prop = null
-node("TestStation-VMs"){
- prop = readProperties(file:'/var/jenkins/TestONOS.property')
-}
-// TODO: Exception handling around steps
+prop = funcs.getProperties()
FUNC = [
-"FUNCipv6Intent" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCipv6Intent", wiki_file:"FUNCipv6IntentWiki.txt", ],
-"FUNCoptical" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCoptical", wiki_file:"FUNCopticalWiki.txt"],
-"FUNCflow" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCflow", wiki_file:"FUNCflowWiki.txt"],
-"FUNCnetCfg": [wiki_link:prop["WikiPrefix"]+"-"+"FUNCnetCfg", wiki_file:"FUNCnetCfgWiki.txt"],
-"FUNCovsdbtest" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCovsdbtestWiki", wiki_file:"FUNCovsdbtestWiki.txt"],
-"FUNCnetconf" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCnetconf", wiki_file:"FUNCnetconfWiki.txt"],
-"FUNCgroup" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCgroup", wiki_file:"FUNCgroupWiki.txt"],
-"FUNCintent" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCintent", wiki_file:"FUNCintentWiki.txt"],
-"FUNCintentRest" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCintentRest", wiki_file:"FUNCintentRestWiki.txt"],
-"FUNCformCluster" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCformCluster", wiki_file:"FUNCformClusterWiki.txt"]
+"FUNCipv6Intent" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "FUNCipv6Intent", wiki_file:"FUNCipv6IntentWiki.txt" ],
+"FUNCoptical" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "FUNCoptical", wiki_file:"FUNCopticalWiki.txt" ],
+"FUNCflow" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "FUNCflow", wiki_file:"FUNCflowWiki.txt" ],
+"FUNCnetCfg": [ wiki_link:prop[ "WikiPrefix" ] + "-" + "FUNCnetCfg", wiki_file:"FUNCnetCfgWiki.txt" ],
+"FUNCovsdbtest" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "FUNCovsdbtestWiki", wiki_file:"FUNCovsdbtestWiki.txt" ],
+"FUNCnetconf" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "FUNCnetconf", wiki_file:"FUNCnetconfWiki.txt" ],
+"FUNCgroup" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "FUNCgroup", wiki_file:"FUNCgroupWiki.txt" ],
+"FUNCintent" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "FUNCintent", wiki_file:"FUNCintentWiki.txt" ],
+"FUNCintentRest" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "FUNCintentRest", wiki_file:"FUNCintentRestWiki.txt" ],
+"FUNCformCluster" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "FUNCformCluster", wiki_file:"FUNCformClusterWiki.txt" ]
]
-table_name = "executed_test_tests"
-result_name = "executed_test_results"
graph_generator_file = "~/OnosSystemTest/TestON/JenkinsFile/scripts/testCaseGraphGenerator.R"
-graph_saved_directory = "/var/jenkins/workspace/Pipeline_postjob_VM/"
-echo("Testcases:")
+graph_saved_directory = "/var/jenkins/workspace/postjob-VM/"
+echo( "Testcases:" )
def testsToRun = null
-testsToRun = prop["Tests"].tokenize("\n;, ")
+testsToRun = funcs.getTestsToRun( prop[ "Tests" ] )
+funcs.printTestToRun( testsToRun )
-for ( String test : testsToRun ) {
- println test
-}
def tests = [:]
for( String test : FUNC.keySet() ){
toBeRun = testsToRun.contains( test )
def stepName = ( toBeRun ? "" : "Not " ) + "Running $test"
- tests[stepName] = FUNCTest(test, toBeRun, prop)
+ tests[stepName] = funcs.runTest( test, toBeRun, prop, test, false, FUNC, graph_generator_file, graph_saved_directory )
}
-def now = new Date()
+start = funcs.getCurrentTime()
// run the tests
for ( test in tests.keySet() ){
- tests[test].call()
+ tests[ test ].call()
}
-try{
- if( prop["manualRun"] == "false" ){
- def end = new Date()
- TimeDuration duration = TimeCategory.minus( end, now )
- slackSend( color:"#5816EE", message: "FUNC tests ended at: " + end.toString() + "\nTime took : " + duration )
- }
-}
-catch(all){}
-// The testName should be the key from the FUNC
-def FUNCTest( testName, toBeRun, prop ) {
- return {
- catchError{
- stage(testName) {
- if ( toBeRun ){
- workSpace = "/var/jenkins/workspace/"+testName
- def fileContents = ""
- node("TestStation-VMs"){
- withEnv(['ONOSBranch='+prop["ONOSBranch"],
- 'ONOSJVMHeap='+prop["ONOSJVMHeap"],
- 'TestONBranch='+prop["TestONBranch"],
- 'ONOSTag='+prop["ONOSTag"],
- 'WikiPrefix='+prop["WikiPrefix"],
- 'WORKSPACE='+workSpace]){
- sh '''#!/bin/bash -l
- set -i # interactive
- set +e
- shopt -s expand_aliases # expand alias in non-interactive mode
- export PYTHONUNBUFFERED=1
-
- ifconfig
-
- echo "ONOS Branch is: $ONOSBranch"
- echo "TestON Branch is: $TestONBranch"
- echo "Test date: "
- date
-
- cd ~
- export PATH=$PATH:onos/tools/test/bin
-
- timeout 240 stc shutdown | head -100
- timeout 240 stc teardown | head -100
- timeout 240 stc shutdown | head -100
-
- cd ~/OnosSystemTest/TestON/bin
- git log |head
- ./cleanup.sh
- ''' + "./cli.py run " + testName + '''
- # cleanup config changes
- cd ~/onos/tools/package/config
- git clean -df'''
-
- // For the Wiki page
- sh '''#!/bin/bash -i
- set +e
- echo "ONOS Branch is: ${ONOSBranch}"
- echo "TestON Branch is: ${TestONBranch}"
-
- echo "Job name is: "''' + testName + '''
- echo "Workspace is: ${WORKSPACE}/"
-
- echo "Wiki page to post is: ${WikiPrefix}-"
-
- # remove any leftover files from previous tests
- sudo rm ${WORKSPACE}/*Wiki.txt
- sudo rm ${WORKSPACE}/*Summary.txt
- sudo rm ${WORKSPACE}/*Result.txt
- sudo rm ${WORKSPACE}/*.csv
-
- #copy files to workspace
- cd `ls -t ~/OnosSystemTest/TestON/logs/*/ | head -1 | sed 's/://'`
- sudo cp *.txt ${WORKSPACE}/
- sudo cp *.csv ${WORKSPACE}/
- cd ${WORKSPACE}/
- for i in *.csv
- do mv "$i" "$WikiPrefix"-"$i"
- done
- ls -al
- cd '''
- if( prop["manualRun"] == "false" || prop["postResult"] == "true" ){
- // Post Results
- withCredentials([
- string(credentialsId: 'db_pass', variable: 'pass'),
- string(credentialsId: 'db_user', variable: 'user'),
- string(credentialsId: 'db_host', variable: 'host'),
- string(credentialsId: 'db_port', variable: 'port')]) {
- def database_command = pass + "|psql --host=" + host + " --port=" + port + " --username=" + user + " --password --dbname onostest -c \"INSERT INTO " + table_name + " VALUES('\$DATE','" + result_name + "','" + testName + "',\$BUILD_NUMBER, '\$ONOSBranch', \$line);\""
-
- sh '''#!/bin/bash
- export DATE=\$(date +%F_%T)
- cd ~
- pwd
- sed 1d ''' + workSpace + "/" + prop["WikiPrefix"] + "-" + testName + '''.csv | while read line
- do
- echo \$line
- echo ''' + database_command + '''
-
- done
- Rscript ''' + graph_generator_file + " " + host + " " + port + " " + user + " " + pass + " " + testName + " " + prop["ONOSBranch"] + " 20 " + graph_saved_directory
-
- }
- }
- // Fetch Logs
- sh '''#!/bin/bash
- set +e
- cd ~/OnosSystemTest/TestON/logs
- echo "Job Name is: " + ''' + testName + '''
- TestONlogDir=$(ls -t | grep ${TEST_NAME}_ |head -1)
- echo "########################################################################################"
- echo "##### copying ONOS logs from all nodes to TestON/logs directory: ${TestONlogDir}"
- echo "########################################################################################"
- cd $TestONlogDir
- if [ $? -eq 1 ]
- then
- echo "Job name does not match any test suite name to move log!"
- else
- pwd
- for i in $OC{1..7}; do onos-fetch-logs $i || echo log does not exist; done
- fi
- cd'''
- fileContents = readFile workSpace+"/"+FUNC[testName]['wiki_file']
- }
- }
-
- if( prop["manualRun"] == "false" || prop["postResult"] == "true" ){
- def post = build job: "Pipeline_postjob_VM", propagate: false,
- parameters: [
- string(name: 'Wiki_Contents', value: fileContents),
- string(name: 'Wiki_Link', value: FUNC[testName]['wiki_link'])
- ]
- }
- node("TestStation-VMs"){
- resultContents = readFile workSpace + "/" + testName + "Result.txt"
- resultContents = resultContents.split("\n")
- if( resultContents[ 0 ] == "1" ){
- print "All passed"
- }else{
- print "Failed"
- if( prop["manualRun"] == "false" )
- slackSend(color:"FF0000", message: "[" + prop["ONOSBranch"] + "]" + testName + " : Failed!\n"
- + resultContents[ 1 ] + "\n"
- + "https://onos-jenkins.onlab.us/blue/organizations/jenkins/${env.JOB_NAME}/detail/${env.JOB_NAME}/${env.BUILD_NUMBER}/pipeline" )
- Failed
- }
- }
- }
- }
- }
- }
-}
\ No newline at end of file
+funcs.generateOverallGraph( prop, FUNC, graph_saved_directory )
+funcs.sendResultToSlack( start, prop[ "manualRun" ], prop[ "WikiPrefix" ] )
\ No newline at end of file
diff --git a/TestON/JenkinsFile/GeneralFuncs.groovy b/TestON/JenkinsFile/GeneralFuncs.groovy
new file mode 100644
index 0000000..70b0fe0
--- /dev/null
+++ b/TestON/JenkinsFile/GeneralFuncs.groovy
@@ -0,0 +1,18 @@
+#!groovy
+
+def database_command_create( pass, host, port, user ){
+ return pass + "|psql --host=" + host + " --port=" + port + " --username=" + user + " --password --dbname onostest -c "
+}
+def basicGraphPart( rFileName, host, port, user, pass, subject, branchName ){
+ return " Rscript " + rFileName + " " + host + " " + port + " " + user + " " + pass + " " + subject + " " + branchName
+}
+def initBasicVars(){
+ rScriptLocation = "~/OnosSystemTest/TestON/JenkinsFile/scripts/"
+}
+def getTestList( tests ){
+ list = ""
+ for( String test : tests.keySet() )
+ list += test + ","
+ return list[ 0..-2 ]
+}
+return this;
\ No newline at end of file
diff --git a/TestON/JenkinsFile/HAJenkinsFile b/TestON/JenkinsFile/HAJenkinsFile
index 8493f3c..ea06e6a 100644
--- a/TestON/JenkinsFile/HAJenkinsFile
+++ b/TestON/JenkinsFile/HAJenkinsFile
@@ -1,193 +1,39 @@
#!groovy
-import groovy.time.*
+funcs = evaluate readTrusted( 'TestON/JenkinsFile/JenkinsCommonFuncs.groovy' )
+funcs.initialize( "HA" );
// This is a Jenkinsfile for a scripted pipeline for the HA tests
-
def prop = null
-node("TestStation-VMs"){
- prop = readProperties(file:'/var/jenkins/TestONOS.property')
-}
-// TODO: Exception handling around steps
+prop = funcs.getProperties()
HA = [
-"HAsanity" : [wiki_link:prop["WikiPrefix"]+"-"+"HA Sanity", wiki_file:"HAsanityWiki.txt"],
-"HAswapNodes" : [wiki_link:prop["WikiPrefix"]+"-"+"HA Swap Nodes", wiki_file:"HAswapNodesWiki.txt"],
-"HAscaling" : [wiki_link:prop["WikiPrefix"]+"-"+"HA Scaling", wiki_file:"HAscalingWiki.txt"],
-"HAclusterRestart" : [wiki_link:prop["WikiPrefix"]+"-"+"HA Cluster Restart", wiki_file:"HAclusterRestartWiki.txt"],
-"HAstopNodes" : [wiki_link:prop["WikiPrefix"]+"-"+"HA Stop Nodes", wiki_file:"HAstopNodes.txt"],
-"HAfullNetPartition" : [wiki_link:prop["WikiPrefix"]+"-"+"HA Full Network Partition", wiki_file:"HAfullNetPartitionWiki.txt"],
-"HAsingleInstanceRestart" : [wiki_link:prop["WikiPrefix"]+"-"+"HA Single Instance Restart", wiki_file:"HAsingleInstanceRestartWiki.txt"],
-"HAkillNodes" : [wiki_link:prop["WikiPrefix"]+"-"+"HA Kill Nodes", wiki_file:"HAkillNodesWiki.txt"] ]
+"HAsanity" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "HA Sanity", wiki_file:"HAsanityWiki.txt" ],
+"HAswapNodes" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "HA Swap Nodes", wiki_file:"HAswapNodesWiki.txt" ],
+"HAscaling" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "HA Scaling", wiki_file:"HAscalingWiki.txt" ],
+"HAclusterRestart" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "HA Cluster Restart", wiki_file:"HAclusterRestartWiki.txt" ],
+"HAstopNodes" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "HA Stop Nodes", wiki_file:"HAstopNodes.txt" ],
+"HAfullNetPartition" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "HA Full Network Partition", wiki_file:"HAfullNetPartitionWiki.txt" ],
+"HAsingleInstanceRestart" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "HA Single Instance Restart", wiki_file:"HAsingleInstanceRestartWiki.txt" ],
+"HAupgrade" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "HA Upgrade", wiki_file:"HAupgradeWiki.txt" ],
+"HAupgradeRollback" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "HA Upgrade Rollback", wiki_file:"HAupgradeRollbackWiki.txt" ] ]
-table_name = "executed_test_tests"
-result_name = "executed_test_results"
graph_generator_file = "~/OnosSystemTest/TestON/JenkinsFile/scripts/testCaseGraphGenerator.R"
-graph_saved_directory = "/var/jenkins/workspace/Pipeline_postjob_VM/"
+graph_saved_directory = "/var/jenkins/workspace/postjob-VM/"
-echo("Testcases:")
+echo( "Testcases:" )
def testsToRun = null
-testsToRun = prop["Tests"].tokenize("\n;, ")
-for ( String test : testsToRun ) {
- println test
-}
+testsToRun = funcs.getTestsToRun( prop[ "Tests" ] )
+funcs.printTestToRun( testsToRun )
def tests = [:]
for( String test : HA.keySet() ){
toBeRun = testsToRun.contains( test )
def stepName = ( toBeRun ? "" : "Not " ) + "Running $test"
- tests[stepName] = HATest(test, toBeRun, prop)
+ tests[stepName] = funcs.runTest( test, toBeRun, prop, test, false, HA, graph_generator_file, graph_saved_directory )
}
-def now = new Date()
+start = funcs.getCurrentTime()
// run the tests
for ( test in tests.keySet() ){
- tests[test].call()
+ tests[ test ].call()
}
-try{
- if( prop["manualRun"] == "false" ){
- def end = new Date()
- TimeDuration duration = TimeCategory.minus( end, now )
- slackSend( color:"#5816EE", message: "HA tests ended at: " + end.toString() + "\nTime took : " + duration )
- }
-}
-catch(all){}
-
-// The testName should be the key from the FUNC
-def HATest( testName, toBeRun, prop ) {
- return {
- catchError{
- stage(testName) {
- if ( toBeRun ){
- workSpace = "/var/jenkins/workspace/"+testName
- def fileContents = ""
- node("TestStation-VMs"){
- withEnv(['ONOSBranch='+prop["ONOSBranch"],
- 'ONOSJVMHeap='+prop["ONOSJVMHeap"],
- 'TestONBranch='+prop["TestONBranch"],
- 'ONOSTag='+prop["ONOSTag"],
- 'WikiPrefix='+prop["WikiPrefix"],
- 'WORKSPACE='+workSpace]){
- sh '''#!/bin/bash -l
- set -i # interactive
- set +e
- shopt -s expand_aliases # expand alias in non-interactive mode
- export PYTHONUNBUFFERED=1
-
- ifconfig
-
- echo "ONOS Branch is: ${ONOSBranch}"
- echo "TestON Branch is: ${TestONBranch}"
- echo "Test date: "
- date
-
- cd ~
- export PATH=$PATH:onos/tools/test/bin
-
- timeout 240 stc shutdown | head -100
- timeout 240 stc teardown | head -100
- timeout 240 stc shutdown | head -100
-
- cd ~/OnosSystemTest/TestON/bin
- git log |head
- ./cleanup.sh -f
- ''' + "./cli.py run " + testName+ '''
- ./cleanup.sh -f
- cd'''
-
- // For the Wiki page
- sh '''#!/bin/bash -i
- set +e
- echo "ONOS Branch is: ${ONOSBranch}"
- echo "TestON Branch is: ${TestONBranch}"
-
- echo "Job name is: "''' + testName + '''
- echo "Workspace is: ${WORKSPACE}/"
-
- echo "Wiki page to post is: ${WikiPrefix}-"
-
- # remove any leftover files from previous tests
- sudo rm ${WORKSPACE}/*Wiki.txt
- sudo rm ${WORKSPACE}/*Summary.txt
- sudo rm ${WORKSPACE}/*Result.txt
- sudo rm ${WORKSPACE}/*.csv
-
- #copy files to workspace
- cd `ls -t ~/OnosSystemTest/TestON/logs/*/ | head -1 | sed 's/://'`
- sudo cp *.txt ${WORKSPACE}/
- sudo cp *.csv ${WORKSPACE}/
- cd ${WORKSPACE}/
- for i in *.csv
- do mv "$i" "$WikiPrefix"-"$i"
- done
- ls -al
- cd '''
-
- if( prop["manualRun"] == "false" || prop["postResult"] == "true" ){
- // Post Results
- withCredentials([
- string(credentialsId: 'db_pass', variable: 'pass'),
- string(credentialsId: 'db_user', variable: 'user'),
- string(credentialsId: 'db_host', variable: 'host'),
- string(credentialsId: 'db_port', variable: 'port')]) {
- def database_command = pass + "|psql --host=" + host + " --port=" + port + " --username=" + user + " --password --dbname onostest -c \"INSERT INTO " + table_name + " VALUES('\$DATE','" + result_name + "','" + testName + "',\$BUILD_NUMBER, '\$ONOSBranch', \$line);\""
-
- sh '''#!/bin/bash
- export DATE=\$(date +%F_%T)
- cd ~
- pwd
- sed 1d ''' + workSpace + "/" + prop["WikiPrefix"] + "-" + testName + '''.csv | while read line
- do
- echo \$line
- echo ''' + database_command + '''
-
- done
- Rscript ''' + graph_generator_file + " " + host + " " + port + " " + user + " " + pass + " " + testName + " " + prop["ONOSBranch"] + " 20 " + graph_saved_directory
-
- }
- }
- // Fetch Logs
- sh '''#!/bin/bash
- set +e
- cd ~/OnosSystemTest/TestON/logs
- echo "Job Name is: " + ''' + testName + '''
- TestONlogDir=$(ls -t | grep ${TEST_NAME}_ |head -1)
- echo "########################################################################################"
- echo "##### copying ONOS logs from all nodes to TestON/logs directory: ${TestONlogDir}"
- echo "########################################################################################"
- cd $TestONlogDir
- if [ $? -eq 1 ]
- then
- echo "Job name does not match any test suite name to move log!"
- else
- pwd
- for i in $OC{1..7}; do onos-fetch-logs $i || echo log does not exist; done
- fi
- cd'''
- fileContents = readFile workSpace+"/"+HA[testName]['wiki_file']
- }
- }
-
- if( prop["manualRun"] == "false" || prop["postResult"] == "true" ){
- def post = build job: "Pipeline_postjob_VM", propagate: false,
- parameters: [
- string(name: 'Wiki_Contents', value: fileContents),
- string(name: 'Wiki_Link', value: HA[testName]['wiki_link'])
- ]
- }
- node("TestStation-VMs"){
- resultContents = readFile workSpace + "/" + testName + "Result.txt"
- resultContents = resultContents.split("\n")
- if( resultContents[ 0 ] == "1" ){
- print "All passed"
- }else{
- print "Failed"
- if( prop["manualRun"] == "false" )
- slackSend(color:"FF0000", message: "[" + prop["ONOSBranch"] + "]" + testName + " : Failed!\n"
- + resultContents[ 1 ] + "\n"
- + "https://onos-jenkins.onlab.us/blue/organizations/jenkins/${env.JOB_NAME}/detail/${env.JOB_NAME}/${env.BUILD_NUMBER}/pipeline" )
- Failed
- }
- }
- }
- }
- }
- }
-}
\ No newline at end of file
+funcs.generateOverallGraph( prop, HA, graph_saved_directory )
+funcs.sendResultToSlack( start, prop[ "manualRun" ], prop[ "WikiPrefix" ] )
\ No newline at end of file
diff --git a/TestON/JenkinsFile/JenkinsCommonFuncs.groovy b/TestON/JenkinsFile/JenkinsCommonFuncs.groovy
new file mode 100644
index 0000000..5925a6f
--- /dev/null
+++ b/TestON/JenkinsFile/JenkinsCommonFuncs.groovy
@@ -0,0 +1,331 @@
+#!groovy
+import groovy.time.*
+generalFuncs = evaluate readTrusted( 'TestON/JenkinsFile/GeneralFuncs.groovy' )
+def initializeTrend( machine ){
+ table_name = "executed_test_tests"
+ result_name = "executed_test_results"
+ testMachine = "TestStation-" + machine + "s";
+ this.machine = machine
+ isSCPF = false
+ generalFuncs.initBasicVars();
+}
+def initialize( type, SCPFfuncs ){
+ init( type )
+ SCPFfunc = SCPFfuncs
+ isSCPF = true
+ machine = machineType[ type ]
+}
+def initialize( type ){
+ init( type )
+ SCPFfunc = null
+ table_name = "executed_test_tests"
+ result_name = "executed_test_results"
+ trend_generator_file = generalFuncs.rScriptLocation + "testCategoryTrend.R"
+ build_stats_generator_file = generalFuncs.rScriptLocation + "testCategoryBuildStats.R"
+ isSCPF = false
+}
+def init( type ){
+ machineType = [ "FUNC" : "VM",
+ "HA" : "VM",
+ "SR" : "VM",
+ "SCPF" : "BM",
+ "USECASE" : "BM" ]
+ testType = type;
+ testMachine = "TestStation-" + machineType[ type ] + "s";
+ generalFuncs.initBasicVars();
+}
+
+def printType(){
+ echo testType;
+ echo testMachine;
+}
+def getProperties(){
+ node( testMachine ){
+ return readProperties( file:'/var/jenkins/TestONOS.property' );
+ }
+}
+def getTestsToRun( testList ){
+ testList.tokenize("\n;, ")
+}
+def getCurrentTime(){
+ return new Date();
+}
+def getTotalTime( start, end ){
+ return TimeCategory.minus( end, start );
+}
+def printTestToRun( testList ){
+ for ( String test : testList ) {
+ println test;
+ }
+}
+def sendResultToSlack( start, isManualRun, branch ){
+ try{
+ if( isManualRun == "false" ){
+ end = getCurrentTime();
+ TimeDuration duration = TimeCategory.minus( end , start );
+ slackSend( color:"#5816EE",
+ message: testType + "-" + branch + " tests ended at: " + end.toString() + "\nTime took : " + duration )
+ }
+ }
+ catch( all ){}
+}
+def initAndRunTest( testName, testCategory ){
+ return '''#!/bin/bash -l
+ set -i # interactive
+ set +e
+ shopt -s expand_aliases # expand alias in non-interactive mode
+ export PYTHONUNBUFFERED=1
+ ifconfig
+ ''' + borrowCell( testName ) + '''
+ echo "ONOS Branch is: $ONOSBranch"
+ echo "TestON Branch is: $TestONBranch"
+ echo "Test date: "
+ date
+ cd ~
+ export PATH=$PATH:onos/tools/test/bin
+ timeout 240 stc shutdown | head -100
+ timeout 240 stc teardown | head -100
+ timeout 240 stc shutdown | head -100
+ cd ~/OnosSystemTest/TestON/bin
+ git log |head
+ ./cleanup.sh -f
+ ''' + "./cli.py run " + ( !isSCPF ? testName : testCategory[ testName ][ 'test' ] ) + '''
+ ./cleanup.sh -f
+ # cleanup config changes
+ cd ~/onos/tools/package/config
+ git clean -df'''
+}
+def copyLogs( testName ){
+ result = ""
+ if( testType == "SR" ){
+ result = '''
+ sudo rm /var/jenkins/workspace/SR-log-${WikiPrefix}/*
+ sudo cp *karaf.log.* /var/jenkins/workspace/SR-log-${WikiPrefix}/
+ sudo cp *Flows* /var/jenkins/workspace/SR-log-${WikiPrefix}/
+ sudo cp *Groups* /var/jenkins/workspace/SR-log-${WikiPrefix}/
+ '''
+ }
+ return result
+}
+def cleanAndCopyFiles( testName ){
+ return '''#!/bin/bash -i
+ set +e
+ echo "ONOS Branch is: ${ONOSBranch}"
+ echo "TestON Branch is: ${TestONBranch}"
+ echo "Job name is: "''' + testName + '''
+ echo "Workspace is: ${WORKSPACE}/"
+ echo "Wiki page to post is: ${WikiPrefix}-"
+ # remove any leftover files from previous tests
+ sudo rm ${WORKSPACE}/*Wiki.txt
+ sudo rm ${WORKSPACE}/*Summary.txt
+ sudo rm ${WORKSPACE}/*Result.txt
+ sudo rm ${WORKSPACE}/*.csv
+ #copy files to workspace
+ cd `ls -t ~/OnosSystemTest/TestON/logs/*/ | head -1 | sed 's/://'`
+ ''' + copyLogs( testName ) + '''
+ sudo cp *.txt ${WORKSPACE}/
+ sudo cp *.csv ${WORKSPACE}/
+ cd ${WORKSPACE}/
+ for i in *.csv
+ do mv "$i" "$WikiPrefix"-"$i"
+ done
+ ls -al
+ cd '''
+}
+def fetchLogs( testName ){
+ return '''#!/bin/bash
+ set +e
+ cd ~/OnosSystemTest/TestON/logs
+ echo "Job Name is: " + ''' + testName + '''
+ TestONlogDir=$(ls -t | grep ${TEST_NAME}_ |head -1)
+ echo "########################################################################################"
+ echo "##### copying ONOS logs from all nodes to TestON/logs directory: ${TestONlogDir}"
+ echo "########################################################################################"
+ cd $TestONlogDir
+ if [ $? -eq 1 ]
+ then
+ echo "Job name does not match any test suite name to move log!"
+ else
+ pwd
+ for i in $OC{1..7}; do onos-fetch-logs $i || echo log does not exist; done
+ fi
+ cd'''
+}
+def isPostingResult( manual, postresult ){
+ return manual == "false" || postresult == "true"
+}
+def postResult( prop, graphOnly ){
+ if( graphOnly || isPostingResult( prop[ "manualRun" ], prop[ "postResult" ] ) ){
+ def post = build job: "postjob-" + ( graphOnly ? machine : machineType[ testType ] ), propagate: false
+ }
+}
+def postLogs( testName, prefix ){
+ resultURL = ""
+ if( testType == "SR" ){
+ def post = build job: "SR-log-" + prefix, propagate: false
+ resultURL = post.getAbsoluteUrl()
+ }
+ return resultURL
+}
+def getSlackChannel(){
+ return "#" + ( testType == "SR" ? "sr-failures" : "jenkins-related" )
+}
+def analyzeResult( prop, workSpace, testName, otherTestName, resultURL, wikiLink, isSCPF ){
+ node( testMachine ){
+ resultContents = readFile workSpace + "/" + testName + "Result.txt"
+ resultContents = resultContents.split("\n")
+ if( resultContents[ 0 ] == "1" ){
+ print "All passed"
+ }else{
+ print "Failed"
+ if( prop[ "manualRun" ] == "false" ){
+ slackSend( channel:getSlackChannel(), color:"FF0000", message: "[" + prop[ "ONOSBranch" ] + "]"
+ + otherTestName + " : Failed!\n" + resultContents[ 1 ] + "\n"
+ + "[TestON log] : \n"
+ + "https://onos-jenkins.onlab.us/blue/organizations/jenkins/${env.JOB_NAME}/detail/${env.JOB_NAME}/${env.BUILD_NUMBER}/pipeline"
+ + ( isSCPF ? "" : ( "\n[Result on Wiki] : \n" + "https://wiki.onosproject.org/display/ONOS/" + wikiLink.replaceAll( "\\s","+" ) ) )
+ + ( resultURL != "" ? ( "\n[Karaf log] : \n" + resultURL + "artifact/" ) : "" ),
+ teamDomain: 'onosproject' )
+ }
+ Failed
+ }
+ }
+}
+def publishToConfluence( prop, wikiLink, file ){
+ if( isPostingResult( prop[ "manualRun" ], prop[ "postResult" ] ) ){
+ publishConfluence siteName: 'wiki.onosproject.org', pageName: wikiLink, spaceName: 'ONOS',
+ attachArchivedArtifacts: true,
+ editorList: [
+ confluenceWritePage( confluenceFile( file ) )
+ ]
+ }
+
+}
+def runTest( testName, toBeRun, prop, pureTestName, graphOnly, testCategory, graph_generator_file, graph_saved_directory ) {
+ return {
+ catchError{
+ stage( testName ) {
+ if ( toBeRun ){
+ workSpace = "/var/jenkins/workspace/" + testName
+ def fileContents = ""
+ node( testMachine ){
+ withEnv( [ 'ONOSBranch=' + prop[ "ONOSBranch" ],
+ 'ONOSJVMHeap=' + prop[ "ONOSJVMHeap" ],
+ 'TestONBranch=' + prop[ "TestONBranch" ],
+ 'ONOSTag=' + prop[ "ONOSTag" ],
+ 'WikiPrefix=' + prop[ "WikiPrefix" ],
+ 'WORKSPACE=' + workSpace ] ){
+ if( ! graphOnly ){
+ sh initAndRunTest( testName, testCategory )
+ // For the Wiki page
+ sh cleanAndCopyFiles( pureTestName )
+ }
+ databaseAndGraph( prop, testName, graphOnly, graph_generator_file, graph_saved_directory )
+ if( ! graphOnly ){
+ sh fetchLogs( pureTestName )
+ if( !isSCPF )
+ publishToConfluence( prop, testCategory[ testName ][ 'wiki_link' ], workSpace + "/" + testCategory[ testName ][ 'wiki_file' ] )
+ }
+ }
+
+
+ }
+ postResult( prop, graphOnly )
+ if( ! graphOnly ){
+ resultURL = postLogs( testName, prop[ "WikiPrefix" ] )
+ analyzeResult( prop, workSpace, pureTestName, testName, resultURL, isSCPF ? "" : testCategory[ testName ][ 'wiki_link' ], isSCPF )
+ }
+ }
+ }
+ }
+ }
+}
+def borrowCell( testName ){
+ result = ""
+ if( testType == "SR" ){
+ result = '''
+ cd
+ source ~/borrow.cell
+ '''
+ }
+ return result
+}
+def databaseAndGraph( prop, testName, graphOnly, graph_generator_file, graph_saved_directory ){
+ if( graphOnly || isPostingResult( prop[ "manualRun" ], prop[ "postResult" ] ) ){
+ // Post Results
+ withCredentials( [
+ string( credentialsId: 'db_pass', variable: 'pass' ),
+ string( credentialsId: 'db_user', variable: 'user' ),
+ string( credentialsId: 'db_host', variable: 'host' ),
+ string( credentialsId: 'db_port', variable: 'port' ) ] ) {
+ def database_command = generalFuncs.database_command_create( pass, host, port, user ) + ( !isSCPF ? sqlCommand( testName ) : SCPFfunc.sqlCommand( testName ) )
+ sh '''#!/bin/bash
+ export DATE=\$(date +%F_%T)
+ cd ~
+ pwd ''' + ( graphOnly ? "" : ( !isSCPF ? databasePart( prop[ "WikiPrefix" ], testName, database_command ) :
+ SCPFfunc.databasePart( testName, database_command ) ) ) + '''
+ ''' + ( !isSCPF ? graphGenerating( host, port, user, pass, testName, prop, graph_saved_directory, graph_generator_file ) : SCPFfunc.getGraphGeneratingCommand( host, port, user, pass, testName, prop ) )
+ }
+ }
+}
+def generateCategoryStatsGraph( manualRun, postresult, stat_file, pie_file, type, branch, testListPart, save_path, pieTestListPart ){
+
+ if( isPostingResult( manualRun, postresult ) ){
+ node( testMachine ){
+
+ withCredentials( [
+ string( credentialsId: 'db_pass', variable: 'pass' ),
+ string( credentialsId: 'db_user', variable: 'user' ),
+ string( credentialsId: 'db_host', variable: 'host' ),
+ string( credentialsId: 'db_port', variable: 'port' ) ] ) {
+ sh '''#!/bin/bash
+ ''' + generalFuncs.basicGraphPart( generalFuncs.rScriptLocation + stat_file, host, port, user, pass, type, branch ) + " \"" + testListPart + "\" latest " + save_path + '''
+ ''' + getOverallPieGraph( generalFuncs.rScriptLocation + pie_file, host, port, user, pass, branch, type, pieTestListPart, 'y', save_path ) + '''
+ ''' + getOverallPieGraph( generalFuncs.rScriptLocation + pie_file, host, port, user, pass, branch, type, pieTestListPart, 'n', save_path )
+ }
+ }
+ postResult( [], true )
+ }
+}
+def makeTestList( list, commaNeeded ){
+ return generalFuncs.getTestList( list ) + ( commaNeeded ? "," : "" )
+}
+def createStatsList( testCategory, list, semiNeeded ){
+ return testCategory + "-" + generalFuncs.getTestList( list ) + ( semiNeeded ? ";" : "" )
+}
+def generateOverallGraph( prop, testCategory, graph_saved_directory ){
+
+ if( isPostingResult( prop[ "manualRun" ], prop[ "postResult" ] ) ){
+ node( testMachine ){
+
+ withCredentials( [
+ string( credentialsId: 'db_pass', variable: 'pass' ),
+ string( credentialsId: 'db_user', variable: 'user' ),
+ string( credentialsId: 'db_host', variable: 'host' ),
+ string( credentialsId: 'db_port', variable: 'port' ) ] ) {
+ testList = generalFuncs.getTestList( testCategory )
+ sh '''#!/bin/bash
+ ''' + generalFuncs.basicGraphPart( trend_generator_file, host, port, user, pass, testType, prop[ "ONOSBranch" ] ) + " " + testList + " 20 " + graph_saved_directory
+ }
+ }
+ postResult( prop, false )
+ }
+}
+def getOverallPieGraph( file, host, port, user, pass, branch, type, testList, yOrN, path ){
+ return generalFuncs.basicGraphPart( file, host, port, user, pass, type, branch ) + " \"" + testList + "\" latest " + yOrN + " " + path
+}
+def sqlCommand( testName ){
+ return "\"INSERT INTO " + table_name + " VALUES('\$DATE','" + result_name + "','" + testName + "',\$BUILD_NUMBER, '\$ONOSBranch', \$line);\" "
+}
+def graphGenerating( host, port, user, pass, testName, prop, graph_saved_directory, graph_generator_file ){
+ return generalFuncs.basicGraphPart( graph_generator_file, host, port, user, pass, testName, prop[ "ONOSBranch" ] ) + " 20 " + graph_saved_directory
+}
+def databasePart( wikiPrefix, testName, database_command ){
+ return '''
+ sed 1d ''' + workSpace + "/" + wikiPrefix + "-" + testName + '''.csv | while read line
+ do
+ echo \$line
+ echo ''' + database_command + '''
+ done '''
+}
+return this;
\ No newline at end of file
diff --git a/TestON/JenkinsFile/JenkinsfileTrigger b/TestON/JenkinsFile/JenkinsfileTrigger
index 6b1da7b..6106351 100644
--- a/TestON/JenkinsFile/JenkinsfileTrigger
+++ b/TestON/JenkinsFile/JenkinsfileTrigger
@@ -1,74 +1,80 @@
#!groovy
-// This is a Jenkinsfile for a scripted pipeline for the SCPF tests
-// Define sets of tests
-previous_version = "1.11"
-before_previous_version = "1.10"
+
+funcs = evaluate readTrusted( 'TestON/JenkinsFile/JenkinsCommonFuncs.groovy' )
+funcs.initializeTrend( "VM" );
+previous_version = "1.12"
+before_previous_version = "1.11"
AllTheTests=
[
"FUNC":[
- "FUNCipv6Intent" : ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
- "FUNCoptical" : ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
- "FUNCflow" : ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
- "FUNCnetCfg": ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
- "FUNCovsdbtest" : ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
- "FUNCnetconf" : ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
- "FUNCgroup" : ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
- "FUNCformCluster" :["basic":false, "extra_A":false, "extra_B":false, "new_Test":true, "day":""],
- "FUNCintent" : ["basic":false, "extra_A":true, "extra_B":false, "new_Test":false, "day":""],
- "FUNCintentRest" : ["basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":""],
+ "FUNCipv6Intent" : [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "FUNCoptical" : [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "FUNCflow" : [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "FUNCnetCfg": [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "FUNCovsdbtest" : [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "FUNCnetconf" : [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "FUNCgroup" : [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "FUNCformCluster" :[ "basic":false, "extra_A":false, "extra_B":false, "new_Test":true, "day":"" ],
+ "FUNCintent" : [ "basic":false, "extra_A":true, "extra_B":false, "new_Test":false, "day":"" ],
+ "FUNCintentRest" : [ "basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":"" ],
],
"HA":[
- "HAsanity" : ["basic":true, "extra_A":false, "extra_B":false, "day":""],
- "HAclusterRestart" : ["basic":true, "extra_A":false, "extra_B":false, "day":""],
- "HAsingleInstanceRestart" : ["basic":true, "extra_A":false, "extra_B":false, "day":""],
- "HAstopNodes" : ["basic":false, "extra_A":true, "extra_B":false, "day":""],
- "HAfullNetPartition" : ["basic":false, "extra_A":true, "extra_B":false, "day":""],
- "HAswapNodes" : ["basic":false, "extra_A":false, "extra_B":true, "day":""],
- "HAscaling" : ["basic":false, "extra_A":false, "extra_B":true, "day":""],
- "HAkillNodes" : ["basic":false, "extra_A":false, "extra_B":true, "day":""]
+ "HAsanity" : [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "HAclusterRestart" : [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "HAsingleInstanceRestart" : [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "HAupgrade" : [ "basic":false, "extra_A":false, "extra_B":false, "new_Test":true, "day":"" ],
+ "HAupgradeRollback" : [ "basic":false, "extra_A":false, "extra_B":false, "new_Test":true, "day":"" ],
+ "HAstopNodes" : [ "basic":false, "extra_A":true, "extra_B":false, "new_Test":false, "day":"" ],
+ "HAfullNetPartition" : [ "basic":false, "extra_A":true, "extra_B":false, "new_Test":false, "day":"" ],
+ "HAswapNodes" : [ "basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":"" ],
+ "HAscaling" : [ "basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":"" ],
+ "HAkillNodes" : [ "basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":"" ]
],
"SCPF":[
- "SCPFswitchLat": ["basic":true, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
- "SCPFcbench": ["basic":true, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
- "SCPFportLat": ["basic":true, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
- "SCPFflowTp1g": ["basic":true, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
- "SCPFintentEventTp": ["basic":true, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
- "SCPFhostLat": ["basic":false, "extra_A":true, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
- // batch will be on extra_A after fixing from the ONOS side.
- "SCPFbatchFlowResp": ["basic":false, "extra_A":true, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
- "SCPFintentRerouteLat": ["basic":false, "extra_A":true, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
- "SCPFintentInstallWithdrawLat": ["basic":false, "extra_A":true, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
- "SCPFflowTp1gWithFlowObj": ["basic":false, "extra_A":false, "extra_B":true, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
- "SCPFintentEventTpWithFlowObj": ["basic":false, "extra_A":false, "extra_B":true, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
- "SCPFintentRerouteLatWithFlowObj": ["basic":false, "extra_A":false, "extra_B":true, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
- "SCPFscalingMaxIntentsWithFlowObj": ["basic":false, "extra_A":false, "extra_B":true, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
- "SCPFintentInstallWithdrawLatWithFlowObj": ["basic":false, "extra_A":false, "extra_B":true, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
- "SCPFscaleTopo": ["basic":false, "extra_A":false, "extra_B":false, "extra_C":true, "extra_D":false, "new_Test":false, day:""],
- "SCPFscalingMaxIntents": ["basic":false, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":true, "new_Test":false, day:""],
- "SCPFmastershipFailoverLat": ["basic":false, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":true, day:""]
+ "SCPFswitchLat": [ "basic":true, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:"" ],
+ "SCPFcbench": [ "basic":true, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:"" ],
+ "SCPFportLat": [ "basic":true, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:"" ],
+ "SCPFflowTp1g": [ "basic":true, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:"" ],
+ "SCPFintentEventTp": [ "basic":true, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:"" ],
+ "SCPFhostLat": [ "basic":false, "extra_A":true, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:"" ],
+ "SCPFbatchFlowResp": [ "basic":false, "extra_A":true, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:"" ],
+ "SCPFintentRerouteLat": [ "basic":false, "extra_A":true, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:"" ],
+ "SCPFintentInstallWithdrawLat": [ "basic":false, "extra_A":true, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:"" ],
+ "SCPFflowTp1gWithFlowObj": [ "basic":false, "extra_A":false, "extra_B":true, "extra_C":false, "extra_D":false, "new_Test":false, day:"" ],
+ "SCPFintentEventTpWithFlowObj": [ "basic":false, "extra_A":false, "extra_B":true, "extra_C":false, "extra_D":false, "new_Test":false, day:"" ],
+ "SCPFintentRerouteLatWithFlowObj": [ "basic":false, "extra_A":false, "extra_B":true, "extra_C":false, "extra_D":false, "new_Test":false, day:"" ],
+ "SCPFscalingMaxIntentsWithFlowObj": [ "basic":false, "extra_A":false, "extra_B":true, "extra_C":false, "extra_D":false, "new_Test":false, day:"" ],
+ "SCPFintentInstallWithdrawLatWithFlowObj": [ "basic":false, "extra_A":false, "extra_B":true, "extra_C":false, "extra_D":false, "new_Test":false, day:"" ],
+ "SCPFscaleTopo": [ "basic":false, "extra_A":false, "extra_B":false, "extra_C":true, "extra_D":false, "new_Test":false, day:"" ],
+ "SCPFscalingMaxIntents": [ "basic":false, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":true, "new_Test":false, day:"" ],
+ "SCPFmastershipFailoverLat": [ "basic":false, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":true, day:"" ]
],
"USECASE":[
- "FUNCvirNetNB" : ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
- "FUNCbgpls" : ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
- "VPLSBasic" : ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
- "USECASE_SdnipFunction": ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
- "USECASE_SdnipFunctionCluster": ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
- "PLATdockertest": ["basic":false, "extra_A":true, "extra_B":false, "new_Test":false, "day":""],
- "SRSanity": ["basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":""],
- "SRSwitchFailure": ["basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":""],
- "SRLinkFailure": ["basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":""],
- "SROnosFailure": ["basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":""],
- "SRClusterRestart": ["basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":""],
- "SRDynamic": ["basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":""],
- "SRHighAvailability": ["basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":""],
- "VPLSfailsafe" : ["basic":false, "extra_A":false, "extra_B":false, "new_Test":true, "day":""]
+ "FUNCvirNetNB" : [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "FUNCbgpls" : [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "VPLSBasic" : [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "USECASE_SdnipFunction": [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "USECASE_SdnipFunctionCluster": [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "PLATdockertest": [ "basic":true, "extra_A":true, "extra_B":false, "new_Test":false, "day":"" ],
+ "SRSanity": [ "basic":false, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "SRSwitchFailure": [ "basic":false, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "SRLinkFailure": [ "basic":false, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "SROnosFailure": [ "basic":false, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "SRClusterRestart": [ "basic":false, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "SRDynamic": [ "basic":false, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "SRHighAvailability": [ "basic":false, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ],
+ "VPLSfailsafe" : [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ]
+ ],
+ "SR":[
+ "SRBridging": [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"" ]
]
]
testcases = [
- "FUNC" : [ tests : "" , nodeName : "VM"],
- "HA" : [ tests : "" , nodeName : "VM"],
- "SCPF" : [ tests : "" , nodeName : "BM"],
- "USECASE" : [ tests : "" , nodeName : "BM"]
+ "FUNC" : [ tests : "" , nodeName : "VM" ],
+ "HA" : [ tests : "" , nodeName : "VM" ],
+ "SR" : [ tests : "", nodeName : "VM" ],
+ "SCPF" : [ tests : "" , nodeName : "BM" ],
+ "USECASE" : [ tests : "" , nodeName : "BM" ]
]
Prefix_organizer = [
"FU" : "FUNC",
@@ -76,48 +82,55 @@
"PL" : "USECASE",
"SA" : "USECASE",
"SC" : "SCPF",
- "SR" : "USECASE",
+ "SR" : "SR",
"US" : "USECASE",
"VP" : "USECASE"
]
-onos_branch = "master"
+onos_b = "master"
test_branch = ""
onos_tag = params.ONOSTag
-isOldFlow = false
+isOldFlow = true
// Set tests based on day of week
def now = new Date()
-echo(now.toString())
-today = now[Calendar.DAY_OF_WEEK]
+echo( now.toString() )
+today = now[ Calendar.DAY_OF_WEEK ]
day = ""
SCPF_choices = ""
USECASE_choices = ""
FUNC_choices = ""
HA_choices = ""
+SR_choices = ""
+stat_graph_generator_file = "testCategoryBuildStats.R"
+pie_graph_generator_file = "testCategoryPiePassFail.R"
+graph_saved_directory = "/var/jenkins/workspace/postjob-VM/"
manually_run = params.manual_run
post_result = params.PostResult
if( !manually_run ){
- sendToSlack( '#03CD9F', ":sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles:\n"
- + "Starting tests on : " + now.toString()
- + "\n:sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles:" )
+ slackSend( color:'#03CD9F',
+ message:":sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles:\n"
+ + "Starting tests on : " + now.toString()
+ + "\n:sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles:" )
testDivider( today )
FUNC_choices = lastCommaRemover( FUNC_choices )
HA_choices = lastCommaRemover( HA_choices )
SCPF_choices = lastCommaRemover( SCPF_choices )
USECASE_choices = lastCommaRemover( USECASE_choices )
+ SR_choices = lastCommaRemover( SR_choices )
}
if ( manually_run ){
organize_tests( params.Tests )
- onos_branch = params.ONOSVersion
+ onos_b = params.ONOSVersion
isOldFlow = params.isOldFlow
println "Tests to be run manually : "
}else{
- testcases["SCPF"]["tests"] = SCPF_choices
- testcases["USECASE"]["tests"] = USECASE_choices
- testcases["FUNC"]["tests"] = FUNC_choices
- testcases["HA"]["tests"] = HA_choices
+ testcases[ "SCPF" ][ "tests" ] = SCPF_choices
+ testcases[ "USECASE" ][ "tests" ] = USECASE_choices
+ testcases[ "FUNC" ][ "tests" ] = FUNC_choices
+ testcases[ "HA" ][ "tests" ] = HA_choices
+ testcases[ "SR" ][ "tests" ] = SR_choices
println "Defaulting to " + day + " tests:"
}
print_tests( testcases )
@@ -127,17 +140,19 @@
]
for( String test in testcases.keySet() ){
println test
- if (testcases[test]["tests"] != ""){
- runTest[testcases[test]["nodeName"]][test] = trigger_pipeline( onos_branch, testcases[test]["tests"], testcases[test]["nodeName"], test, manually_run, onos_tag )
+ if ( testcases[ test ][ "tests" ] != "" ){
+ runTest[ testcases[ test ][ "nodeName" ] ][ test ] = trigger_pipeline( onos_b, testcases[ test ][ "tests" ], testcases[ test ][ "nodeName" ], test, manually_run, onos_tag )
}
}
def finalList = [:]
-finalList["VM"] = runTestSeq( runTest["VM"] )
-finalList["BM"] = runTestSeq( runTest["BM"] )
+finalList[ "VM" ] = runTestSeq( runTest[ "VM" ] )
+finalList[ "BM" ] = runTestSeq( runTest[ "BM" ] )
parallel finalList
+//finalList[ "BM" ].call()
+generateStatGraph()
def testDivider( today ){
- switch (today) {
+ switch ( today ) {
case Calendar.MONDAY:
monday( true )
tuesday( true, false )
@@ -145,7 +160,7 @@
thursday( true, false )
friday( true, false )
day = "Monday"
- sendToSlack( '#FFD988', "Tests to be run this weekdays : \n" + printDaysForTest() )
+ slackSend( color:'#FFD988', message:"Tests to be run this weekdays : \n" + printDaysForTest() )
break
case Calendar.TUESDAY:
tuesday( true, true )
@@ -158,23 +173,23 @@
case Calendar.THURSDAY:
thursday( true, true )
day = "Thursday"
- isOldFlow = true
+ isOldFlow = false
break
case Calendar.FRIDAY:
friday( true, true )
day = "Friday"
- isOldFlow = true
+ isOldFlow = false
break
case Calendar.SATURDAY:
saturday()
- onos_branch= previous_version
+ onos_b= previous_version
day = "Saturday"
break
case Calendar.SUNDAY:
sunday()
- onos_branch= before_previous_version
+ onos_b= before_previous_version
day = "Sunday"
- isOldFlow = true
+ isOldFlow = false
break
}
}
@@ -202,6 +217,8 @@
FUNC_choices += adder( "FUNC", "extra_A", true, "M", getResult )
HA_choices += adder( "HA", "basic", true, "M", getResult )
HA_choices += adder( "HA", "extra_A", true, "M", getResult )
+ //HA_choices += adder( "HA", "new_Test", true, "M", getResult )
+ SR_choices += adder( "SR", "basic", true, "M", getResult )
SCPF_choices += adder( "SCPF", "basic", true, "M", getResult )
SCPF_choices += adder( "SCPF", "extra_B", true, "M", getResult )
}
@@ -211,6 +228,8 @@
FUNC_choices += adder( "FUNC", "extra_B", getDay, "T", getResult )
HA_choices += adder( "HA", "basic", getDay, "T", getResult )
HA_choices += adder( "HA", "extra_B", getDay, "T", getResult )
+ HA_choices += adder( "HA", "new_Test", getDay, "T", getResult )
+ SR_choices += adder( "SR", "basic", getDay, "T", getResult )
SCPF_choices += adder( "SCPF", "basic", getDay, "T", getResult )
SCPF_choices += adder( "SCPF", "extra_C", getDay, "T", getResult )
USECASE_choices += adder( "USECASE", "basic", getDay, "T", getResult )
@@ -223,6 +242,8 @@
FUNC_choices += adder( "FUNC", "extra_A", getDay, "W", getResult )
HA_choices += adder( "HA", "basic", getDay, "W", getResult )
HA_choices += adder( "HA", "extra_A", getDay, "W", getResult )
+ //HA_choices += adder( "HA", "new_Test", getDay, "W", getResult )
+ SR_choices += adder( "SR", "basic", getDay, "W", getResult )
SCPF_choices += adder( "SCPF", "basic", getDay, "W", getResult )
SCPF_choices += adder( "SCPF", "extra_A", getDay, "W", getResult )
SCPF_choices += adder( "SCPF", "new_Test", getDay, "W", getResult )
@@ -233,6 +254,8 @@
FUNC_choices += adder( "FUNC", "extra_B", getDay, "Th", getResult )
HA_choices += adder( "HA", "basic", getDay, "Th", getResult )
HA_choices += adder( "HA", "extra_B", getDay, "Th", getResult )
+ HA_choices += adder( "HA", "new_Test", getDay, "Th", getResult )
+ SR_choices += adder( "SR", "basic", getDay, "Th", getResult )
SCPF_choices += adder( "SCPF", "basic", getDay, "Th", getResult )
SCPF_choices += adder( "SCPF", "extra_B", getDay, "Th", getResult )
}
@@ -242,6 +265,8 @@
FUNC_choices += adder( "FUNC", "extra_A", getDay, "F", getResult )
HA_choices += adder( "HA", "basic", getDay, "F", getResult )
HA_choices += adder( "HA", "extra_A", getDay, "F", getResult )
+ //HA_choices += adder( "HA", "new_Test", getDay, "F", getResult )
+ SR_choices += adder( "SR", "basic", getDay, "F", getResult )
SCPF_choices += adder( "SCPF", "basic", getDay, "F", getResult )
SCPF_choices += adder( "SCPF", "extra_A", getDay, "F", getResult )
SCPF_choices += adder( "SCPF", "extra_D", getDay, "F", getResult )
@@ -250,9 +275,12 @@
FUNC_choices += adder( "FUNC", "basic", false, "Sa", true )
FUNC_choices += adder( "FUNC", "extra_A", false, "Sa", true )
FUNC_choices += adder( "FUNC", "extra_B", false, "Sa", true )
+ FUNC_choices += adder( "FUNC", "new_Test", true, "Sa", true )
HA_choices += adder( "HA", "basic", false, "Sa", true )
HA_choices += adder( "HA", "extra_A", false, "Sa", true )
HA_choices += adder( "HA", "extra_B", false, "Sa", true )
+ HA_choices += adder( "HA", "new_Test", false, "Sa", true )
+ SR_choices += adder( "SR", "basic", false, "Sa", true )
SCPF_choices += adder( "SCPF", "basic", false, "Sa", true )
SCPF_choices += adder( "SCPF", "extra_A", false, "Sa", true )
SCPF_choices += adder( "SCPF", "extra_B", false, "Sa", true )
@@ -260,7 +288,6 @@
SCPF_choices += adder( "SCPF", "extra_D", false, "Sa", true )
SCPF_choices += adder( "SCPF", "new_Test", false, "Sa", true )
USECASE_choices += adder( "USECASE", "basic", false, "Sa", true )
- USECASE_choices += adder( "USECASE", "new_Test", false, "Sa", true )
}
def sunday(){
FUNC_choices += adder( "FUNC", "basic", false, "S", true )
@@ -269,6 +296,7 @@
HA_choices += adder( "HA", "basic", false, "S", true )
HA_choices += adder( "HA", "extra_A", false, "S", true )
HA_choices += adder( "HA", "extra_B", false, "S", true )
+ SR_choices += adder( "SR", "basic", false, "S", true )
SCPF_choices += adder( "SCPF", "basic", false, "S", true )
USECASE_choices += adder( "USECASE", "basic", false, "S", true )
}
@@ -290,52 +318,65 @@
def runTestSeq( testList ){
return{
for ( test in testList.keySet() ){
- testList[test].call()
+ testList[ test ].call()
}
}
}
def print_tests( tests ){
for( String test in tests.keySet() ){
- if( tests[test]["tests"] != "" ){
+ if( tests[ test ][ "tests" ] != "" ){
println test + ":"
- println tests[test]["tests"]
+ println tests[ test ][ "tests" ]
}
}
}
def organize_tests( tests ){
- testList = tests.tokenize("\n;, ")
+ testList = tests.tokenize( "\n;, " )
for( String test in testList )
testcases [ Prefix_organizer[ ( test == "FUNCbgpls" || test == "FUNCvirNetNB" ? "US" : ( test[ 0 ] + test[ 1 ] ) ) ] ][ "tests" ] += test + ","
}
+def borrow_mn( jobOn ){
+ result = ""
+ if( jobOn == "SR" ){
+ result = "~/cell_borrow.sh"
+ }
+ return result
+}
+def trigger( branch, tests, nodeName, jobOn, manuallyRun, onosTag ){
+ println jobOn + "-pipeline-" + manuallyRun ? "manually" : branch
+ wiki = branch
+ if ( branch != "master" ){
+ branch = "onos-" + branch
+ }
+ test_branch = "master"
+ node( "TestStation-" + nodeName + "s" ){
+ envSetup( branch, test_branch, onosTag, jobOn, manuallyRun )
+
+ exportEnvProperty( branch, test_branch, wiki, tests, post_result, manuallyRun, onosTag, isOldFlow )
+ }
+
+ jobToRun = jobOn + "-pipeline-" + ( manuallyRun ? "manually" : wiki )
+ build job: jobToRun, propagate: false
+}
def trigger_pipeline( branch, tests, nodeName, jobOn, manuallyRun, onosTag ){
// nodeName : "BM" or "VM"
// jobOn : "SCPF" or "USECASE" or "FUNC" or "HA"
return{
- if (branch == "master"){
- onos_branch = branch
- }else{
- onos_branch = "onos-" + branch
+ if( jobOn == "SR" ){
+ trigger( "1.11", "SRBridging", nodeName, jobOn, manuallyRun, onosTag )
+ trigger( "1.12", "SRBridging", nodeName, jobOn, manuallyRun, onosTag )
+ trigger( "master", "SRBridging", nodeName, jobOn, manuallyRun, onosTag )
+ returnCell( nodeName )
+ }else{
+ trigger( branch, tests, nodeName, jobOn, manuallyRun, onosTag )
}
- wiki = branch
- test_branch = onos_branch
- if (onos_branch == previous_version)
- test_branch = "master"
- println jobOn + "_Pipeline_" + manuallyRun ? "manually" : branch
- node("TestStation-" + nodeName + "s"){
- envSetup(onos_branch, test_branch, onosTag, jobOn, manuallyRun )
-
- exportEnvProperty( onos_branch, test_branch, wiki, tests, post_result, manuallyRun, onosTag, isOldFlow )
- }
-
- jobToRun = jobOn + "_Pipeline_" + ( manuallyRun ? "manually" : branch )
- build job: jobToRun, propagate: false
}
}
// export Environment properties.
def exportEnvProperty( onos_branch, test_branch, wiki, tests, postResult, manually_run, onosTag, isOldFlow ){
- stage("export Property"){
+ stage( "export Property" ){
sh '''
echo "ONOSBranch=''' + onos_branch +'''" > /var/jenkins/TestONOS.property
echo "TestONBranch=''' + test_branch +'''" >> /var/jenkins/TestONOS.property
@@ -350,24 +391,22 @@
'''
}
}
-def sendToSlack( color, message ){
- slackSend(color:color, message: message)
-}
// Initialize the environment Setup for the onos and OnosSystemTest
def envSetup( onos_branch, test_branch, onos_tag, jobOn, manuallyRun ){
- stage("envSetup") {
+ stage( "envSetup" ) {
sh '''#!/bin/bash -l
set +e
. ~/.bashrc
env
+ ''' + borrow_mn( jobOn ) + '''
''' + preSetup( onos_branch, test_branch, onos_tag, manuallyRun ) + '''
''' + oldFlowCheck( jobOn, onos_branch ) + '''
''' + postSetup( onos_branch, test_branch, onos_tag, manuallyRun )
}
}
-def tagCheck(onos_tag, onos_branch){
+def tagCheck( onos_tag, onos_branch ){
result = "git checkout "
- if (onos_tag == "" )
+ if ( onos_tag == "" )
result += onos_branch //create new local branch
else
result += onos_tag //checkout the tag
@@ -408,7 +447,7 @@
git fetch --all # update all caches from remotes
git reset --hard origin/''' + onos_branch + ''' # force local index to match remote branch
git clean -df # clean any local files
- ''' + tagCheck(onos_tag, onos_branch) + '''
+ ''' + tagCheck( onos_tag, onos_branch ) + '''
git branch
git log -1 --decorate
@@ -428,9 +467,9 @@
}
def oldFlowCheck( jobOn, onos_branch ){
result = ""
- if( isOldFlow && jobOn == "SCPF" && onos_branch== "master" )
- result = '''sed -i -e 's/@Component(immediate = true)/@Component(enabled = false)/g' ~/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/DistributedFlowRuleStore.java
- sed -i -e 's/@Component(enabled = false)/@Component(immediate = true)/g' ~/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/ECFlowRuleStore.java'''
+ if( jobOn == "SCPF" && ( onos_branch== "master" || onos_branch=="onos-1.12" ) )
+ result = '''sed -i -e 's/@Component(immediate = true)/@Component(enabled = false)/g' ~/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/''' + ( isOldFlow ? "DistributedFlowRuleStore" : "ECFlowRuleStore" ) + '''.java
+ sed -i -e 's/@Component(enabled = false)/@Component(immediate = true)/g' ~/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/''' + ( isOldFlow ? "ECFlowRuleStore" : "DistributedFlowRuleStore" ) + ".java"
return result
}
def postSetup( onos_branch, test_branch, onos_tag, isManual ){
@@ -450,4 +489,26 @@
git branch'''
}
return result
+}
+def returnCell( nodeName ){
+ node( "TestStation-" + nodeName + "s" ){
+ sh '''#!/bin/bash -l
+ set +e
+ . ~/.bashrc
+ env
+ ~/./return_cell.sh
+ '''
+ }
+}
+
+def generateStatGraph(){
+ if( !manually_run ){
+ testListPart = funcs.createStatsList( "FUNC", AllTheTests[ "FUNC" ], true ) +
+ funcs.createStatsList( "HA", AllTheTests[ "HA" ], true ) +
+ funcs.createStatsList( "USECASE", AllTheTests[ "USECASE" ], false )
+ pieTestList = funcs.makeTestList( AllTheTests[ "FUNC" ], true ) +
+ funcs.makeTestList( AllTheTests[ "HA" ], true ) +
+ funcs.makeTestList( AllTheTests[ "USECASE" ], false )
+ funcs.generateCategoryStatsGraph( "false", "true", stat_graph_generator_file, pie_graph_generator_file, "ALL", onos_b, testListPart, graph_saved_directory, pieTestList )
+ }
}
\ No newline at end of file
diff --git a/TestON/JenkinsFile/PerformanceFuncs.groovy b/TestON/JenkinsFile/PerformanceFuncs.groovy
new file mode 100644
index 0000000..5761497
--- /dev/null
+++ b/TestON/JenkinsFile/PerformanceFuncs.groovy
@@ -0,0 +1,90 @@
+#!groovy
+//generalFuncs = evaluate readTrusted( 'TestON/JenkinsFile/GeneralFuncs.groovy' )
+def init(){
+ none = [ "" ]
+ batches = [ 1, 100, 1000 ]
+ neighbors = [ 'y', 'n' ]
+ times = [ 'y', 'n' ]
+ SCPF = [
+ SCPFcbench: [ flows:false, test:'SCPFcbench', table:'cbench_bm_tests', results:'cbench_bm_results', file:'CbenchDB', rFile:'SCPFcbench.R', extra:none, finalResult:1, graphTitle:[ 'Cbench Test' ], dbCols:'avg', dbWhere:'', y_axis:'Throughput (Responses/sec)' ],
+ SCPFhostLat: [ flows:false, test:'SCPFhostLat', table:'host_latency_tests', results:'host_latency_results', file:'HostAddLatency', rFile:'SCPFhostLat.R', extra:none,finalResult:1, graphTitle:[ 'Host Latency Test' ], dbCols:'avg', dbWhere:'AND scale=5', y_axis:'Latency (ms)' ],
+ SCPFportLat: [ flows:false, test:'SCPFportLat', table:'port_latency_details', results:'port_latency_results', file:'/tmp/portEventResultDb', rFile:'SCPFportLat.R', extra:none, finalResult:1, graphTitle:[ 'Port Latency Test - Port Up','Port Latency Test - Port Down' ], dbCols:[ 'up_ofp_to_dev_avg, up_dev_to_link_avg, up_link_to_graph_avg', 'down_ofp_to_dev_avg, down_dev_to_link_avg, down_link_to_graph_avg' ], dbWhere:'AND scale=5', y_axis:'Latency (ms)' ],
+ SCPFflowTp1g: [ flows:true, test:'SCPFflowTp1g', table:'flow_tp_tests', results:'flow_tp_results', file:'flowTP1gDB', rFile:'SCPFflowTp1g.R n', extra:neighbors, finalResult:1, graphTitle:[ 'Flow Throughput Test - neighbors=0', 'Flow Throughput Test - neighbors=4' ], dbCols:'avg', dbWhere:[ 'AND scale=5 AND neighbors=0 ','AND scale=5 AND NOT neighbors=0' ], y_axis:'Throughput (,000 Flows/sec)' ],
+ SCPFflowTp1gWithFlowObj: [ flows:true, test:'SCPFflowTp1g --params TEST/flowObj=True', table:'flow_tp_fobj_tests', results:'flow_tp_fobj_results', file:'flowTP1gDBFlowObj', rFile:'SCPFflowTp1g.R y', extra:neighbors, finalResult:0 ],
+ SCPFscaleTopo: [ flows:false, test:'SCPFscaleTopo', table:'scale_topo_latency_details', results:'scale_topo_latency_results', file:'/tmp/scaleTopoResultDb', rFile:'SCPFscaleTopo.R', extra:none, finalResult:1, graphTitle:[ 'Scale Topology Test' ], dbCols:[ 'first_connection_to_last_connection, last_connection_to_last_role_request, last_role_request_to_last_topology' ], dbWhere:'AND scale=20' , y_axis:'Latency (s)' ],
+ SCPFswitchLat: [ flows:false, test:'SCPFswitchLat', table:'switch_latency_details', results:'switch_latency_results', file:'/tmp/switchEventResultDb', rFile:'SCPFswitchLat.R', extra:none, finalResult:1, graphTitle:[ 'Switch Latency Test - Switch Up','Switch Latency Test - Switch Down' ], dbCols:[ 'tcp_to_feature_reply_avg,feature_reply_to_role_request_avg,role_request_to_role_reply_avg,role_reply_to_device_avg,up_device_to_graph_avg', 'fin_ack_to_ack_avg,ack_to_device_avg,down_device_to_graph_avg' ], dbWhere:'AND scale=5', y_axis:'Latency (ms)' ],
+ SCPFbatchFlowResp: [ flows:true, test:'SCPFbatchFlowResp', table:'batch_flow_tests', results:'batch_flow_results', file:'SCPFbatchFlowRespData', rFile:'SCPFbatchFlowResp.R', extra:none, finalResult:1, graphTitle:[ 'Batch Flow Test - Post', 'Batch Flow Test - Del' ], dbCols:[ 'elapsepost, posttoconfrm', 'elapsedel, deltoconfrm' ], dbWhere:'', y_axis:'Latency (ms)' ],
+ SCPFintentEventTp: [ flows:true, test:'SCPFintentEventTp', table:'intent_tp_tests', results:'intent_tp_results', file:'IntentEventTPDB', rFile:'SCPFintentEventTp.R n', extra:neighbors, finalResult:1, graphTitle:[ 'Intent Throughput Test - neighbors=0','Intent Throughput Test - neighbors=4' ], dbCols:'SUM( avg ) as avg', dbWhere:[ 'AND scale=5 AND neighbors=0 GROUP BY date','AND scale=5 AND NOT neighbors=0 GROUP BY date' ], y_axis:'Throughput (Ops/sec)' ],
+ SCPFintentRerouteLat: [ flows:true, test:'SCPFintentRerouteLat', table:'intent_reroute_latency_tests', results:'intent_reroute_latency_results', file:'IntentRerouteLatDB', rFile:'SCPFIntentInstallWithdrawRerouteLat.R n', extra:batches, finalResult:1, graphTitle:[ 'Intent Reroute Test' ], dbCols:'avg', dbWhere:'AND scale=5 AND batch_size=100', y_axis:'Latency (ms)' ],
+ SCPFscalingMaxIntents: [ flows:true, test:'SCPFscalingMaxIntents', table:'max_intents_tests', results:'max_intents_results', file:'ScalingMaxIntentDB', rFile:'SCPFscalingMaxIntents.R n', extra:none, finalResult:0 ],
+ SCPFintentEventTpWithFlowObj: [ flows:true, test:'SCPFintentEventTp --params TEST/flowObj=True', table:'intent_tp_fobj_tests', results:'intent_tp_fobj_results', file:'IntentEventTPflowObjDB', rFile:'SCPFintentEventTp.R y', extra:neighbors,finalResult:0 ],
+ SCPFintentInstallWithdrawLat: [ flows:true, test:'SCPFintentInstallWithdrawLat', table:'intent_latency_tests', results:'intent_latency_results', file:'IntentInstallWithdrawLatDB', rFile:'SCPFIntentInstallWithdrawRerouteLat.R n', extra:batches,finalResult:1, graphTitle:[ 'Intent Installation Test','Intent Withdrawal Test' ], dbCols:[ 'install_avg','withdraw_avg' ], dbWhere:'AND scale=5 AND batch_size=100', y_axis:'Latency (ms)' ],
+ SCPFintentRerouteLatWithFlowObj: [ flows:true, test:'SCPFintentRerouteLat --params TEST/flowObj=True', table:'intent_reroute_latency_fobj_tests', results:'intent_reroute_latency_fobj_results', file:'IntentRerouteLatDBWithFlowObj', rFile:'SCPFIntentInstallWithdrawRerouteLat.R y', extra:batches, finalResult:0 ],
+ SCPFscalingMaxIntentsWithFlowObj: [ flows:true, test:'SCPFscalingMaxIntents --params TEST/flowObj=True', table:'max_intents_fobj_tests', results:'max_intents_fobj_results', file:'ScalingMaxIntentDBWFO', rFile:'SCPFscalingMaxIntents.R y', extra:none, finalResult:0 ],
+ SCPFintentInstallWithdrawLatWithFlowObj: [ flows:true, test:'SCPFintentInstallWithdrawLat --params TEST/flowObj=True', table:'intent_latency_fobj_tests', results:'intent_latency_fobj_results', file:'IntentInstallWithdrawLatDBWFO', rFile:'SCPFIntentInstallWithdrawRerouteLat.R y', extra:batches, finalResult:0 ],
+ SCPFmastershipFailoverLat: [ flows:false, test:'SCPFmastershipFailoverLat', table:'mastership_failover_tests', results:'mastership_failover_results', file:'mastershipFailoverLatDB', rFile:'SCPFmastershipFailoverLat.R', extra:none, finalResult:1, graphTitle:[ 'Mastership Failover Test' ], dbCols:[ 'kill_deact_avg,deact_role_avg' ], dbWhere:'AND scale=5', y_axis:'Latency (ms)' ]
+ ]
+ graph_saved_directory = "/var/jenkins/workspace/postjob-BM/"
+}
+def getGraphCommand( rFileName, extras, host, port, user, pass, testName, branchName, isOldFlow ){
+ result = ""
+ for( extra in extras ){
+ result += generateGraph( rFileName, " " + extra, host, port, user, pass, testName, branchName, isOldFlow ) + ";"
+ }
+ return result
+}
+def generateGraph( rFileName, batch, host, port, user, pass, testName, branchName, isOldFlow ){
+
+ return generalFuncs.basicGraphPart( generalFuncs.rScriptLocation + rFileName, host, port, user, pass, testName, branchName ) +
+ " " + batch + " " + usingOldFlow( isOldFlow, testName ) + graph_saved_directory
+}
+def generateCombinedResultGraph( host, port, user, pass, testName, branchName, isOldFlow ){
+ result = ""
+
+ for ( int i=0; i< SCPF[ testName ][ 'graphTitle' ].size(); i++ ){
+ result += generalFuncs.basicGraphPart( generalFuncs.rScriptLocation + "SCPFLineGraph.R", host, port, user, pass, "\"" + SCPF[ testName ][ 'graphTitle' ][ i ] + "\"", branchName ) +
+ " " + 50 + " \"SELECT " + checkIfList( testName, 'dbCols', i ) + " FROM " + SCPF[ testName ][ 'table' ] + " WHERE branch=\'" + branchName + "\' " + sqlOldFlow( isOldFlow, testName ) +
+ checkIfList( testName, 'dbWhere', i ) + " ORDER BY date DESC LIMIT 50\" \"" + SCPF[ testName ][ 'y_axis' ] + "\" " + hasOldFlow( isOldFlow, testName ) + graph_saved_directory + ";"
+ }
+ return result
+}
+def checkIfList( testName, forWhich, pos ){
+ return SCPF[ testName ][ forWhich ].getClass().getName() != "java.lang.String" ? SCPF[ testName ][ forWhich ][ pos ] : SCPF[ testName ][ forWhich ]
+}
+def sqlOldFlow( isOldFlow, testName ){
+ return SCPF[ testName ][ 'flows' ] ? " AND " + ( isOldFlow == "true" ? "" : "NOT " ) + "is_old_flow " : ""
+}
+def oldFlowRuleCheck( isOldFlow, branch ){
+ this.isOldFlow = isOldFlow
+ if( isOldFlow == "false" ){
+ SCPF[ 'SCPFflowTp1g' ][ 'test' ] += " --params TEST/flows=" + ( branch == "onos-1.11" ? "4000" : "3500" )
+ }
+}
+def affectedByOldFlow( isOldFlow, testName ){
+ return SCPF[ testName ][ 'flows' ] ? "" + isOldFlow + ", " : ""
+}
+def usingOldFlow( isOldFlow, testName ){
+ return SCPF[ testName ][ 'flows' ] ? ( isOldFlow == "true" ? "y" : "n" ) + " " : ""
+}
+def hasOldFlow( isOldFlow, testName ){
+ return ( SCPF[ testName ][ 'flows' ] && isOldFlow == "true" ? "y" : "n" ) + " "
+}
+def sqlCommand( testName ){
+ if ( testName == "SCPFscaleTopo" || testName == "SCPFswitchLat" || testName == "SCPFportLat" )
+ return "\"INSERT INTO " + SCPF[ testName ][ 'table' ] + " VALUES( '\$DATE','" + SCPF[ testName ][ 'results' ] + "','\$BUILD_NUMBER', \$line, '\$ONOSBranch');\""
+ return "\"INSERT INTO " + SCPF[ testName ][ 'table' ] + " VALUES( '\$DATE','" + SCPF[ testName ][ 'results' ] + "','\$BUILD_NUMBER', '\$ONOSBranch', " + affectedByOldFlow( isOldFlow, testName ) + "\$line);\""
+}
+def databasePart( testName, database_command ){
+ return '''
+ cd /tmp
+ while read line
+ do
+ echo \$line
+ echo ''' + database_command + '''
+ done< ''' + SCPF[ testName ][ 'file' ]
+}
+def getGraphGeneratingCommand( host, port, user, pass, testName, prop ){
+ return getGraphCommand( SCPF[ testName ][ 'rFile' ], SCPF[ testName ][ 'extra' ], host, port, user, pass, testName, prop[ "ONOSBranch" ], isOldFlow ) + '''
+ ''' + ( SCPF[ testName ][ 'finalResult' ] ? generateCombinedResultGraph( host, port, user, pass, testName, prop[ "ONOSBranch" ], , isOldFlow ) : "" )
+}
+return this;
\ No newline at end of file
diff --git a/TestON/JenkinsFile/SCPFJenkinsFile b/TestON/JenkinsFile/SCPFJenkinsFile
index 1ca7ab2..03913b6 100644
--- a/TestON/JenkinsFile/SCPFJenkinsFile
+++ b/TestON/JenkinsFile/SCPFJenkinsFile
@@ -1,228 +1,31 @@
#!groovy
-import groovy.time.*
+SCPFfuncs = evaluate readTrusted( 'TestON/JenkinsFile/PerformanceFuncs.groovy' )
+SCPFfuncs.init()
+funcs = evaluate readTrusted( 'TestON/JenkinsFile/JenkinsCommonFuncs.groovy' )
+funcs.initialize( "SCPF", SCPFfuncs );
// This is a Jenkinsfile for a scripted pipeline for the SCPF tests
-// properties([pipelineTriggers([cron('30 19 * * *')])])
-
-// TODO: Exception handling around steps
-
-none = [ "" ]
-batches = [1,100,1000]
-neighbors = ['y', 'n']
-times = [ 'y', 'n' ]
-SCPF = [
- SCPFcbench: [ flows:false, test:'SCPFcbench', table:'cbench_bm_tests', results:'cbench_bm_results', file:'CbenchDB', rFile:'SCPFcbench.R', extra:none, finalResult:1, graphTitle:['Cbench Test'], dbCols:'avg', dbWhere:'', y_axis:'Throughput (Responses/sec)'],
- SCPFhostLat: [ flows:false, test:'SCPFhostLat', table:'host_latency_tests', results:'host_latency_results', file:'HostAddLatency', rFile:'SCPFhostLat.R', extra:none,finalResult:1, graphTitle:['Host Latency Test'], dbCols:'avg', dbWhere:'AND scale=5', y_axis:'Latency (ms)'],
- SCPFportLat: [ flows:false, test:'SCPFportLat', table:'port_latency_details', results:'port_latency_results', file:'/tmp/portEventResultDb', rFile:'SCPFportLat.R', extra:none, finalResult:1, graphTitle:['Port Latency Test - Port Up','Port Latency Test - Port Down'], dbCols:[ 'up_ofp_to_dev_avg,up_dev_to_link_avg,up_link_to_graph_avg', 'down_ofp_to_dev_avg,down_dev_to_link_avg,down_link_to_graph_avg' ], dbWhere:'AND scale=5', y_axis:'Latency (ms)' ],
- SCPFflowTp1g: [ flows:true, test:'SCPFflowTp1g', table:'flow_tp_tests', results:'flow_tp_results', file:'flowTP1gDB', rFile:'SCPFflowTp1g.R n', extra:neighbors,finalResult:1, graphTitle:['Flow Throughput Test - neighbors=0','Flow Throughput Test - neighbors=4'], dbCols:'avg', dbWhere:[ 'AND scale=5 AND neighbors=0 ','AND scale=5 AND NOT neighbors=0' ], y_axis:'Throughput (,000 Flows/sec)' ],
- SCPFflowTp1gWithFlowObj: [ flows:true, test:'SCPFflowTp1g --params TEST/flowObj=True', table:'flow_tp_fobj_tests', results:'flow_tp_fobj_results', file:'flowTP1gDBFlowObj', rFile:'SCPFflowTp1g.R y', extra:neighbors, finalResult:0],
- SCPFscaleTopo: [ flows:false, test:'SCPFscaleTopo', table:'scale_topo_latency_details', results:'scale_topo_latency_results', file:'/tmp/scaleTopoResultDb', rFile:'SCPFscaleTopo.R', extra:none, finalResult:1, graphTitle:['Scale Topology Test'], dbCols:[ 'first_connection_to_last_connection, last_connection_to_last_role_request, last_role_request_to_last_topology' ], dbWhere:'AND scale=20' , y_axis:'Latency (s)'],
- SCPFswitchLat: [ flows:false, test:'SCPFswitchLat', table:'switch_latency_details', results:'switch_latency_results', file:'/tmp/switchEventResultDb', rFile:'SCPFswitchLat.R', extra:none, finalResult:1, graphTitle:['Switch Latency Test - Switch Up','Switch Latency Test - Switch Down'], dbCols:[ 'tcp_to_feature_reply_avg,feature_reply_to_role_request_avg,role_request_to_role_reply_avg,role_reply_to_device_avg,up_device_to_graph_avg', 'fin_ack_to_ack_avg,ack_to_device_avg,down_device_to_graph_avg' ], dbWhere:'AND scale=5', y_axis:'Latency (ms)' ],
- SCPFbatchFlowResp: [ flows:true, test:'SCPFbatchFlowResp', table:'batch_flow_tests', results:'batch_flow_results', file:'SCPFbatchFlowRespData', rFile:'SCPFbatchFlowResp.R', extra:none, finalResult:1, graphTitle:['Batch Flow Test - Post', 'Batch Flow Test - Del'], dbCols:[ 'elapsepost, posttoconfrm', 'elapsedel, deltoconfrm' ], dbWhere:'', y_axis:'Latency (ms)'],
- SCPFintentEventTp: [ flows:true, test:'SCPFintentEventTp', table:'intent_tp_tests', results:'intent_tp_results', file:'IntentEventTPDB', rFile:'SCPFintentEventTp.R n', extra:neighbors, finalResult:1, graphTitle:['Intent Throughput Test - neighbors=0','Intent Throughput Test - neighbors=4'], dbCols:'SUM( avg ) as avg', dbWhere:[ 'AND scale=5 AND neighbors=0 GROUP BY date','AND scale=5 AND NOT neighbors=0 GROUP BY date' ], y_axis:'Throughput (Ops/sec)'],
- SCPFintentRerouteLat: [ flows:true, test:'SCPFintentRerouteLat', table:'intent_reroute_latency_tests', results:'intent_reroute_latency_results', file:'IntentRerouteLatDB', rFile:'SCPFIntentInstallWithdrawRerouteLat.R n', extra:batches, finalResult:1, graphTitle:['Intent Reroute Test'], dbCols:'avg', dbWhere:'AND scale=5 AND batch_size=100', y_axis:'Latency (ms)'],
- SCPFscalingMaxIntents: [ flows:true, test:'SCPFscalingMaxIntents', table:'max_intents_tests', results:'max_intents_results', file:'ScalingMaxIntentDB', rFile:'SCPFscalingMaxIntents.R n', extra:times, finalResult:0],
- SCPFintentEventTpWithFlowObj: [ flows:true, test:'SCPFintentEventTp --params TEST/flowObj=True', table:'intent_tp_fobj_tests', results:'intent_tp_fobj_results', file:'IntentEventTPflowObjDB', rFile:'SCPFintentEventTp.R y', extra:neighbors,finalResult:0],
- SCPFintentInstallWithdrawLat: [ flows:true, test:'SCPFintentInstallWithdrawLat', table:'intent_latency_tests', results:'intent_latency_results', file:'IntentInstallWithdrawLatDB', rFile:'SCPFIntentInstallWithdrawRerouteLat.R n', extra:batches,finalResult:1, graphTitle:['Intent Installation Test','Intent Withdrawal Test'], dbCols:[ 'install_avg','withdraw_avg' ], dbWhere:'AND scale=5 AND batch_size=100', y_axis:'Latency (ms)'],
- SCPFintentRerouteLatWithFlowObj: [ flows:true, test:'SCPFintentRerouteLat --params TEST/flowObj=True', table:'intent_reroute_latency_fobj_tests', results:'intent_reroute_latency_fobj_results', file:'IntentRerouteLatDBWithFlowObj', rFile:'SCPFIntentInstallWithdrawRerouteLat.R y', extra:batches, finalResult:0],
- SCPFscalingMaxIntentsWithFlowObj: [ flows:true, test:'SCPFscalingMaxIntents --params TEST/flowObj=True', table:'max_intents_fobj_tests', results:'max_intents_fobj_results', file:'ScalingMaxIntentDBWFO', rFile:'SCPFscalingMaxIntents.R y', extra:times, finalResult:0],
- SCPFintentInstallWithdrawLatWithFlowObj: [ flows:true, test:'SCPFintentInstallWithdrawLat --params TEST/flowObj=True', table:'intent_latency_fobj_tests', results:'intent_latency_fobj_results', file:'IntentInstallWithdrawLatDBWFO', rFile:'SCPFIntentInstallWithdrawRerouteLat.R y', extra:batches, finalResult:0],
- SCPFmastershipFailoverLat: [ flows:false, test:'SCPFmastershipFailoverLat', table:'mastership_failover_tests', results:'mastership_failover_results', file:'mastershipFailoverLatDB', rFile:'SCPFmastershipFailoverLat.R', extra:none, finalResult:1, graphTitle:['Mastership Failover Test'], dbCols:[ 'kill_deact_avg,deact_role_avg' ], dbWhere:'AND scale=5', y_axis:'Latency (ms)' ]
-]
-
-echo("Testcases:")
-graph_generator_directory = "~/OnosSystemTest/TestON/JenkinsFile/scripts/"
-graph_saved_directory = "/var/jenkins/workspace/Pipeline_postjob_BM/"
-def testsToRun = null
def prop = null
-node("TestStation-BMs"){
- prop = readProperties(file:'/var/jenkins/TestONOS.property') // TODO set defaults
- testsToRun = prop["Tests"].tokenize("\n;, ")
- for ( String test : testsToRun ) {
- println test
- }
-}
+prop = funcs.getProperties()
+
+echo( "Testcases:" )
+def testsToRun = null
+testsToRun = funcs.getTestsToRun( prop[ "Tests" ] )
+funcs.printTestToRun( testsToRun )
+
isOldFlow = prop[ "isOldFlow" ]
-oldFlowRuleCheck( isOldFlow )
+SCPFfuncs.oldFlowRuleCheck( isOldFlow, prop[ "ONOSBranch" ] )
def tests = [:]
-for( String test : SCPF.keySet() ){
+for( String test : SCPFfuncs.SCPF.keySet() ){
toBeRun = testsToRun.contains( test )
def stepName = ( toBeRun ? "" : "Not " ) + "Running $test"
- tests[stepName] = SCPFTest(test, toBeRun, prop)
+
+ pureTestName = test.replaceAll( "WithFlowObj", "" )
+ tests[ stepName ] = funcs.runTest( test, toBeRun, prop, pureTestName, false, SCPFfuncs.SCPF, "", "" )
}
-def now = new Date()
+start = funcs.getCurrentTime()
// run the tests
for ( test in tests.keySet() ){
- tests[test].call()
+ tests[ test ].call()
}
-try{
- if( prop["manualRun"] == "false" ){
- def end = new Date()
- TimeDuration duration = TimeCategory.minus( end, now )
- slackSend( color:"#5816EE", message: "SCPF tests ended at: " + end.toString() + "\nTime took : " + duration )
- }
-}
-catch(all){}
-
-// The testName should be the key from the SCPF map
-def SCPFTest( testName, toBeRun, prop ) {
- return {
- catchError{
- stage(testName) {
- if ( toBeRun ){
- workSpace = "/var/jenkins/workspace/"+testName
- node("TestStation-BMs"){
- withEnv(['ONOSBranch='+prop["ONOSBranch"],
- 'ONOSJVMHeap='+prop["ONOSJVMHeap"],
- 'TestONBranch='+prop["TestONBranch"],
- 'ONOSTag='+prop["ONOSTag"],
- 'WikiPrefix='+prop["WikiPrefix"],
- 'WORKSPACE='+workSpace]){
- sh '''#!/bin/bash -l
- set -i # interactive
- set +e
- shopt -s expand_aliases # expand alias in non-interactive mode
- export PYTHONUNBUFFERED=1
-
- ifconfig
-
- echo "ONOS Branch is: $ONOSBranch"
- echo "TestON Branch is: $TestONBranch"
- echo "Test date: "
- date
-
- cd ~
- export PATH=$PATH:onos/tools/test/bin
-
- timeout 240 stc shutdown | head -100
- timeout 240 stc teardown | head -100
- timeout 240 stc shutdown | head -100
-
- cd ~/OnosSystemTest/TestON/bin
- git log |head
- ./cleanup.sh
- ''' + "./cli.py run " + SCPF[testName]['test']
-
- // For moving results
- sh '''#!/bin/bash -i
- set +e
- # remove any leftover files from previous tests
- sudo rm ${WORKSPACE}/*Result.txt
-
- #copy files to workspace
- cd `ls -t ~/OnosSystemTest/TestON/logs/*/ | head -1 | sed 's/://'`
- sudo cp *Result.txt ${WORKSPACE}/
- cd ${WORKSPACE}/
- ls -al
- cd '''
- // Post Results
- if( prop["manualRun"] == "false" || prop["postResult"] == "true" ){
- withCredentials([
- string(credentialsId: 'db_pass', variable: 'pass'),
- string(credentialsId: 'db_user', variable: 'user'),
- string(credentialsId: 'db_host', variable: 'host'),
- string(credentialsId: 'db_port', variable: 'port')]) {
- def database_command = pass + "|psql --host=" + host + " --port=" + port + " --username=" + user + " --password --dbname onostest -c \"INSERT INTO " + SCPF[testName]['table'] + " VALUES('\$DATE','" + SCPF[testName]['results'] + "','\$BUILD_NUMBER', '\$ONOSBranch', " + affectedByOldFlow( isOldFlow, testName ) + "\$line);\""
- if (testName == "SCPFscaleTopo" || testName == "SCPFswitchLat" || testName == "SCPFportLat") {
- database_command = pass + "|psql --host=" + host + " --port=" + port + " --username=" + user + " --password --dbname onostest -c \"INSERT INTO " + SCPF[testName]['table'] + " VALUES('\$DATE','" + SCPF[testName]['results'] + "','\$BUILD_NUMBER', \$line, '\$ONOSBranch');\""
- }
- sh '''#!/bin/bash
-
- export DATE=\$(date +%F_%T)
- cd ~
- pwd
- cd /tmp
- while read line
- do
-
- echo \$line
- echo ''' + database_command + '''
-
- done< ''' + SCPF[testName]['file'] + '''
- ''' + getGraphCommand( SCPF[testName]['rFile'], SCPF[testName]['extra'], host, port, user, pass, testName, prop["ONOSBranch"], isOldFlow ) + '''
- ''' + ( SCPF[testName]['finalResult'] ? generateCombinedResultGraph( host,port, user, pass, testName, prop["ONOSBranch"], , isOldFlow ) : "" )
- }
- }
- // Fetch Logs
- sh '''#!/bin/bash
- set +e
- cd ~/OnosSystemTest/TestON/logs
- echo "Job Name is: ${JOB_NAME}"
- TestONlogDir=$(ls -t | grep ${TEST_NAME}_ |head -1)
- echo "########################################################################################"
- echo "##### copying ONOS logs from all nodes to TestON/logs directory: ${TestONlogDir}"
- echo "########################################################################################"
- cd $TestONlogDir
- if [ $? -eq 1 ]
- then
- echo "Job name does not match any test suite name to move log!"
- else
- pwd
- for i in $OC{1..7}; do onos-fetch-logs $i || echo log does not exist; done
- fi'''
- }
- }
- if( prop["manualRun"] == "false" || prop["postResult"] == "true" ){
- def post = build job: "Pipeline_postjob_BM", propagate: false
- }
- node("TestStation-BMs"){
- resultContents = readFile workSpace + "/" + testName.replaceAll("WithFlowObj","") + "Result.txt"
- resultContents = resultContents.split("\n")
- if( resultContents[ 0 ] == "1" ){
- print "All passed"
- }else{
- print "Failed"
- if( prop["manualRun"] == "false" )
- slackSend(color:"FF0000", message: "[" + prop["ONOSBranch"] + "]" + testName + " : Failed!\n"
- + resultContents[ 1 ] + "\n"
- + "https://onos-jenkins.onlab.us/blue/organizations/jenkins/${env.JOB_NAME}/detail/${env.JOB_NAME}/${env.BUILD_NUMBER}/pipeline" )
- Failed
- }
- }
- }
- }
- }
- }
-}
-def getGraphCommand( rFileName, extras, host, port, user, pass, testName, branchName, isOldFlow ){
- result = ""
- for( extra in extras ){
- result += generateGraph( rFileName, " " + extra, host, port, user, pass, testName, branchName, isOldFlow ) + ";"
- }
- return result
-}
-def generateGraph( rFileName, batch, host, port, user, pass, testName, branchName, isOldFlow ){
- return "Rscript " + graph_generator_directory + rFileName + " " + host + " " + port + " " + user + " " + pass + " " +
- testName + " " + branchName + " " + batch + " " + usingOldFlow( isOldFlow, testName ) + graph_saved_directory
-}
-def generateCombinedResultGraph( host, port, user, pass, testName, branchName, isOldFlow ){
- result = ""
- for ( int i=0; i< SCPF[testName]['graphTitle'].size(); i++){
- result += "Rscript " + graph_generator_directory + "SCPFLineGraph.R " + host + " " + port + " " + user + " " + pass + " \"" + SCPF[testName]['graphTitle'][i] + "\" " +
- branchName + " " + 50 + " \"SELECT " + checkIfList( testName, 'dbCols', i ) + " FROM " + SCPF[testName]['table'] + " WHERE branch=\'" + branchName + "\' " + sqlOldFlow( isOldFlow, testName ) +
- checkIfList( testName, 'dbWhere', i ) + " ORDER BY date DESC LIMIT 50\" \"" + SCPF[testName]['y_axis'] + "\" " + hasOldFlow( isOldFlow, testName ) + graph_saved_directory + ";"
- }
- return result
-}
-def checkIfList( testName, forWhich, pos ){
- return SCPF[testName][forWhich].getClass().getName() != "java.lang.String" ? SCPF[testName][forWhich][pos] : SCPF[testName][forWhich]
-}
-def sqlOldFlow( isOldFlow, testName ){
- return SCPF[ testName ][ 'flows' ] ? " AND " + ( isOldFlow == "true" ? "" : "NOT " ) + "is_old_flow " : ""
-}
-def oldFlowRuleCheck( isOldFlow ){
- if( isOldFlow == "false" ){
- SCPF[ 'SCPFflowTp1g' ][ 'test' ] += " --params TEST/flows=6125"
- SCPF[ 'SCPFbatchFlowResp' ][ 'test' ] += " --params CASE1000/batchSize=100"
- SCPF[ 'SCPFintentEventTp' ][ 'test' ] += " --params TEST/numKeys=4000"
- }
-}
-def affectedByOldFlow( isOldFlow, testName ){
- return SCPF[ testName ][ 'flows' ] ? "" + isOldFlow + ", " : ""
-}
-def usingOldFlow( isOldFlow, testName ){
- return SCPF[ testName ][ 'flows' ] ? ( isOldFlow == "true" ? "y" : "n" ) + " " : ""
-}
-def hasOldFlow( isOldFlow, testName ){
- return ( SCPF[ testName ][ 'flows' ] && isOldFlow == "true" ? "y" : "n" ) + " "
-}
\ No newline at end of file
+funcs.sendResultToSlack( start, prop["manualRun"], prop[ "WikiPrefix" ] )
\ No newline at end of file
diff --git a/TestON/JenkinsFile/SCPF_Graph_Generator b/TestON/JenkinsFile/SCPF_Graph_Generator
new file mode 100644
index 0000000..679ccd4
--- /dev/null
+++ b/TestON/JenkinsFile/SCPF_Graph_Generator
@@ -0,0 +1,28 @@
+#!groovy
+SCPFfuncs = evaluate readTrusted( 'TestON/JenkinsFile/PerformanceFuncs.groovy' )
+SCPFfuncs.init()
+funcs = evaluate readTrusted( 'TestON/JenkinsFile/JenkinsCommonFuncs.groovy' )
+funcs.initialize( "SCPF", SCPFfuncs );
+
+def prop = null
+prop = funcs.getProperties()
+
+def Tests = params.Test
+isOldFlow = params.isOldFlow
+prop[ "ONOSBranch" ] = params.ONOSbranch
+
+SCPFfuncs.oldFlowRuleCheck( isOldFlow, prop[ "ONOSBranch" ] )
+
+def testsToRun = null
+testsToRun = funcs.getTestsToRun( Tests )
+
+def tests = [:]
+for( String test : testsToRun ){
+ println test
+ pureTestName = test.replaceAll( "WithFlowObj", "" )
+ tests[ test ] = funcs.runTest( test, true, prop, pureTestName, true, [], "", "" )
+}
+
+for ( test in tests.keySet() ){
+ tests[ test ].call()
+}
\ No newline at end of file
diff --git a/TestON/JenkinsFile/Trend_Graph_Generator b/TestON/JenkinsFile/Trend_Graph_Generator
new file mode 100644
index 0000000..a9795f8
--- /dev/null
+++ b/TestON/JenkinsFile/Trend_Graph_Generator
@@ -0,0 +1,26 @@
+#!groovy
+funcs = evaluate readTrusted( 'TestON/JenkinsFile/JenkinsCommonFuncs.groovy' )
+nodeCluster = params.NodeCluster
+
+graph_generator_file = "~/OnosSystemTest/TestON/JenkinsFile/scripts/testCaseGraphGenerator.R"
+graph_saved_directory = "/var/jenkins/workspace/postjob-" + nodeCluster + "/"
+
+funcs.initializeTrend( nodeCluster );
+def prop = null
+prop = funcs.getProperties()
+
+def Tests = params.Test
+prop[ "ONOSBranch" ] = params.ONOSbranch
+
+def testsToRun = null
+testsToRun = funcs.getTestsToRun( Tests )
+
+def tests = [:]
+for( String test : testsToRun ){
+ println test
+ tests[ test ] = funcs.runTest( test, true, prop, test, true, [], graph_generator_file, graph_saved_directory )
+}
+
+for ( test in tests.keySet() ){
+ tests[ test ].call()
+}
\ No newline at end of file
diff --git a/TestON/JenkinsFile/USECASEJenkinsFile b/TestON/JenkinsFile/USECASEJenkinsFile
index bfae6c1..3806fc3 100644
--- a/TestON/JenkinsFile/USECASEJenkinsFile
+++ b/TestON/JenkinsFile/USECASEJenkinsFile
@@ -1,202 +1,44 @@
#!groovy
-import groovy.time.*
-// This is a Jenkinsfile for a scripted pipeline for the USECASETest tests
-
-// TODO: Exception handling around steps
-
+funcs = evaluate readTrusted( 'TestON/JenkinsFile/JenkinsCommonFuncs.groovy' )
+funcs.initialize( "USECASE" );
+// This is a Jenkinsfile for a scripted pipeline for the USECASE tests
def prop = null
-node("TestStation-BMs"){
- prop = readProperties(file:'/var/jenkins/TestONOS.property')
-}
+prop = funcs.getProperties()
USECASE = [
- "FUNCvirNetNB" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCvirNetNB", wiki_file:"FUNCvirNetNBWiki.txt"],
- "FUNCbgpls" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCbgpls", wiki_file:"FUNCbgplsWiki.txt"],
- "VPLSBasic" : [wiki_link:prop["WikiPrefix"]+"-"+"VPLSBasic", wiki_file:"VPLSBasicWiki.txt"],
- "VPLSfailsafe" : [wiki_link:prop["WikiPrefix"]+"-"+"VPLSfailsafe", wiki_file:"VPLSfailsafeWiki.txt"],
- "PLATdockertest": [wiki_link:"Docker Images sanity test", wiki_file:"PLATdockertestTableWiki.txt"],
- "SRSanity": [wiki_link:prop["WikiPrefix"]+"-"+"SR Sanity", wiki_file:"SRSanityWiki.txt"],
- "SRSwitchFailure": [wiki_link:prop["WikiPrefix"]+"-"+"SR Switch Failure", wiki_file:"SRSwitchFailureWiki.txt"],
- "SRLinkFailure": [wiki_link:prop["WikiPrefix"]+"-"+"SR Link Failure", wiki_file:"SRLinkFailureWiki.txt"],
- "SROnosFailure": [wiki_link:prop["WikiPrefix"]+"-"+"SR Onos node Failure", wiki_file:"SROnosFailureWiki.txt"],
- "SRClusterRestart": [wiki_link:prop["WikiPrefix"]+"-"+"SR Cluster Restart", wiki_file:"SRClusterRestartWiki.txt"],
- "SRDynamic": [wiki_link:prop["WikiPrefix"]+"-"+"SR Dynamic Config", wiki_file:"SRDynamicWiki.txt"],
- "SRHighAvailability": [wiki_link:prop["WikiPrefix"]+"-"+"SR High Availability", wiki_file:"SRHighAvailabilityWiki.txt"],
- "USECASE_SdnipFunction": [wiki_link:prop["WikiPrefix"]+"-"+"SDNIP Function", wiki_file:"USECASE_SdnipFunctionWiki.txt"],
- "USECASE_SdnipFunctionCluster": [wiki_link:prop["WikiPrefix"]+"-"+"SDNIP Function Cluster", wiki_file:"USECASE_SdnipFunctionClusterWiki.txt"]
+ "FUNCvirNetNB" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "FUNCvirNetNB", wiki_file:"FUNCvirNetNBWiki.txt" ],
+ "FUNCbgpls" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "FUNCbgpls", wiki_file:"FUNCbgplsWiki.txt" ],
+ "VPLSBasic" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "VPLSBasic", wiki_file:"VPLSBasicWiki.txt" ],
+ "VPLSfailsafe" : [ wiki_link:prop[ "WikiPrefix" ] + "-" + "VPLSfailsafe", wiki_file:"VPLSfailsafeWiki.txt" ],
+ "PLATdockertest": [ wiki_link:"Docker Images sanity test", wiki_file:"PLATdockertestTableWiki.txt" ],
+ "SRSanity": [ wiki_link:prop[ "WikiPrefix" ] + "-" + "SR Sanity", wiki_file:"SRSanityWiki.txt" ],
+ "SRSwitchFailure": [ wiki_link:prop[ "WikiPrefix" ] + "-" + "SR Switch Failure", wiki_file:"SRSwitchFailureWiki.txt" ],
+ "SRLinkFailure": [ wiki_link:prop[ "WikiPrefix" ] + "-" + "SR Link Failure", wiki_file:"SRLinkFailureWiki.txt" ],
+ "SROnosFailure": [ wiki_link:prop[ "WikiPrefix" ] + "-" + "SR Onos node Failure", wiki_file:"SROnosFailureWiki.txt" ],
+ "SRClusterRestart": [ wiki_link:prop[ "WikiPrefix" ] + "-" + "SR Cluster Restart", wiki_file:"SRClusterRestartWiki.txt" ],
+ "SRDynamic": [ wiki_link:prop[ "WikiPrefix" ] + "-" + "SR Dynamic Config", wiki_file:"SRDynamicWiki.txt" ],
+ "SRHighAvailability": [ wiki_link:prop[ "WikiPrefix" ] + "-" + "SR High Availability", wiki_file:"SRHighAvailabilityWiki.txt" ],
+ "USECASE_SdnipFunction": [ wiki_link:prop[ "WikiPrefix" ] + "-" + "SDNIP Function", wiki_file:"USECASE_SdnipFunctionWiki.txt" ],
+ "USECASE_SdnipFunctionCluster": [ wiki_link:prop[ "WikiPrefix" ] + "-" + "SDNIP Function Cluster", wiki_file:"USECASE_SdnipFunctionClusterWiki.txt" ]
]
-
-table_name = "executed_test_tests"
-result_name = "executed_test_results"
graph_generator_file = "~/OnosSystemTest/TestON/JenkinsFile/scripts/testCaseGraphGenerator.R"
-graph_saved_directory = "/var/jenkins/workspace/Pipeline_postjob_BM/"
+graph_saved_directory = "/var/jenkins/workspace/postjob-BM/"
-echo("Testcases:")
-testsToRun = prop["Tests"].tokenize("\n;, ")
-for ( String test : testsToRun ) {
- println test
-}
+echo( "Testcases:" )
+def testsToRun = null
+testsToRun = funcs.getTestsToRun( prop[ "Tests" ] )
+funcs.printTestToRun( testsToRun )
def tests = [:]
for( String test : USECASE.keySet() ){
toBeRun = testsToRun.contains( test )
def stepName = ( toBeRun ? "" : "Not " ) + "Running $test"
- tests[stepName] = USECASETest(test, toBeRun, prop)
+ tests[ stepName ] = funcs.runTest( test, toBeRun, prop, test, false, USECASE, graph_generator_file, graph_saved_directory )
}
-def now = new Date()
+start = funcs.getCurrentTime()
// run the tests
for ( test in tests.keySet() ){
- tests[test].call()
+ tests[ test ].call()
}
-try{
- if( prop["manualRun"] == "false" ){
- def end = new Date()
- TimeDuration duration = TimeCategory.minus( end, now )
- slackSend( color:"#5816EE", message: "USECASE tests ended at: " + end.toString() + "\nTime took : " + duration )
- }
-}
-catch(all){}
-
-// The testName should be the key from the FUNC
-def USECASETest( testName, toBeRun, prop ) {
- return {
- catchError{
- stage(testName) {
- if ( toBeRun ){
- workSpace = "/var/jenkins/workspace/"+testName
- def fileContents = ""
- node("TestStation-BMs"){
- withEnv(['ONOSBranch='+prop["ONOSBranch"],
- 'ONOSJVMHeap='+prop["ONOSJVMHeap"],
- 'TestONBranch='+prop["TestONBranch"],
- 'ONOSTag='+prop["ONOSTag"],
- 'WikiPrefix='+prop["WikiPrefix"],
- 'WORKSPACE='+workSpace]){
- sh '''#!/bin/bash -l
- set -i # interactive
- set +e
- shopt -s expand_aliases # expand alias in non-interactive mode
- export PYTHONUNBUFFERED=1
-
- ifconfig
-
- echo "ONOS Branch is: $ONOSBranch"
- echo "TestON Branch is: $TestONBranch"
- echo "Test date: "
- date
-
- cd ~
- export PATH=$PATH:onos/tools/test/bin
-
- . .bash_killcmd
- killTestONall
- onos-group uninstall
- timeout 240 stc teardown | head -100
-
- cd ~/OnosSystemTest/TestON/bin
- git log |head
- ./cleanup.sh -f
- ''' + "./cli.py run " + testName + '''
- ./cleanup.sh -f
- cd ~/onos/tools/package/config
- git clean -df'''
-
- // For the Wiki page
- sh '''#!/bin/bash -i
- set +e
- echo "ONOS Branch is: ${ONOSBranch}"
- echo "TestON Branch is: ${TestONBranch}"
-
- echo "Job name is: "''' + testName + '''
- echo "Workspace is: ${WORKSPACE}/"
-
- echo "Wiki page to post is: ${WikiPrefix}-"
-
- # remove any leftover files from previous tests
- sudo rm ${WORKSPACE}/*Wiki.txt
- sudo rm ${WORKSPACE}/*Summary.txt
- sudo rm ${WORKSPACE}/*Result.txt
- sudo rm ${WORKSPACE}/*.csv
-
- #copy files to workspace
- cd `ls -t ~/OnosSystemTest/TestON/logs/*/ | head -1 | sed 's/://'`
- sudo cp *.txt ${WORKSPACE}/
- sudo cp *.csv ${WORKSPACE}/
- cd ${WORKSPACE}/
- for i in *.csv
- do mv "$i" "$WikiPrefix"-"$i"
- done
- ls -al
- cd '''
-
- if( prop["manualRun"] == "false" || prop["postResult"] == "true" ){
- // Post Results
- withCredentials([
- string(credentialsId: 'db_pass', variable: 'pass'),
- string(credentialsId: 'db_user', variable: 'user'),
- string(credentialsId: 'db_host', variable: 'host'),
- string(credentialsId: 'db_port', variable: 'port')]) {
- def database_command = pass + "|psql --host=" + host + " --port=" + port + " --username=" + user + " --password --dbname onostest -c \"INSERT INTO " + table_name + " VALUES('\$DATE','" + result_name + "','" + testName + "',\$BUILD_NUMBER, '\$ONOSBranch', \$line);\""
-
- sh '''#!/bin/bash
- export DATE=\$(date +%F_%T)
- cd ~
- pwd
- sed 1d ''' + workSpace + "/" + prop["WikiPrefix"] + "-" + testName + '''.csv | while read line
- do
- echo \$line
- echo ''' + database_command + '''
-
- done
- Rscript ''' + graph_generator_file + " " + host + " " + port + " " + user + " " + pass + " " + testName + " " + prop["ONOSBranch"] + " 20 " + graph_saved_directory
-
- }
- }
- // Fetch Logs
- sh '''#!/bin/bash
- set +e
- cd ~/OnosSystemTest/TestON/logs
- echo "Job Name is: " + ''' + testName + '''
- TestONlogDir=$(ls -t | grep ${TEST_NAME}_ |head -1)
- echo "########################################################################################"
- echo "##### copying ONOS logs from all nodes to TestON/logs directory: ${TestONlogDir}"
- echo "########################################################################################"
- cd $TestONlogDir
- if [ $? -eq 1 ]
- then
- echo "Job name does not match any test suite name to move log!"
- else
- pwd
- for i in $OC{1..7}; do onos-fetch-logs $i || echo log does not exist; done
- fi
- cd'''
- fileContents = readFile workSpace+"/"+USECASE[testName]['wiki_file']
-
- }
- }
- if( prop["manualRun"] == "false" || prop["postResult"] == "true" ){
- def post = build job: "Pipeline_postjob_BM", propagate: false,
- parameters: [
- string(name: 'Wiki_Contents', value: fileContents),
- string(name: 'Wiki_Link', value: USECASE[testName]['wiki_link'])
- ]
- }
- node("TestStation-BMs"){
- resultContents = readFile workSpace + "/" + testName + "Result.txt"
- resultContents = resultContents.split("\n")
- if( resultContents[ 0 ] == "1" ){
- print "All passed"
- }else{
- print "Failed"
- if( prop["manualRun"] == "false" )
- slackSend(color:"FF0000", message: "[" + prop["ONOSBranch"] + "]" + testName + " : Failed!\n"
- + resultContents[ 1 ] + "\n"
- + "https://onos-jenkins.onlab.us/blue/organizations/jenkins/${env.JOB_NAME}/detail/${env.JOB_NAME}/${env.BUILD_NUMBER}/pipeline" )
- Failed
- }
- }
- }
- }
- }
- }
-}
\ No newline at end of file
+funcs.sendResultToSlack( start, prop[ "manualRun" ], prop[ "WikiPrefix" ] )
+funcs.generateOverallGraph( prop, USECASE, graph_saved_directory )
\ No newline at end of file
diff --git a/TestON/drivers/common/api/controller/onosrestdriver.py b/TestON/drivers/common/api/controller/onosrestdriver.py
index cf3eec6..915c29b 100755
--- a/TestON/drivers/common/api/controller/onosrestdriver.py
+++ b/TestON/drivers/common/api/controller/onosrestdriver.py
@@ -1416,7 +1416,7 @@
"type": "IP_PROTO",
"protocol": ipProto } )
- return self.sendFlow( deviceId=deviceId, flowJson=flowJson, debug=debug )
+ return self.sendFlow( deviceId=deviceId, flowJson=flowJson, debug=debug, ip=ip, port=port )
except ( AttributeError, TypeError ):
main.log.exception( self.name + ": Object not as expected" )
diff --git a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
index 90ff577..c8110d2 100644
--- a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
+++ b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
@@ -100,9 +100,9 @@
extraApply=[ main.testSetUp.createApplyCell,
main.HA.startingMininet,
main.testSetUp.createApplyCell ],
- applyArgs=[ [ main.Cluster, True, cellName, "", "localhost", True, ip ],
+ applyArgs=[ [ main.Cluster, True, cellName, "", "", True, ip ],
None,
- [ main.Cluster, True, "SingleHA", "", "localhost",
+ [ main.Cluster, True, "SingleHA", "", "",
True, main.Cluster.runningNodes[ 0 ].ipAddress ] ] )
main.HA.initialSetUp()
diff --git a/TestON/tests/PLAT/PLATdockertest/PLATdockertest.params b/TestON/tests/PLAT/PLATdockertest/PLATdockertest.params
index 4e66dea..f9558d5 100755
--- a/TestON/tests/PLAT/PLATdockertest/PLATdockertest.params
+++ b/TestON/tests/PLAT/PLATdockertest/PLATdockertest.params
@@ -1,6 +1,6 @@
<PARAMS>
- <testcases>0,[1,1000,10,110,900,1000]*12,1001</testcases>
+ <testcases>0,[1,1000,10,110,120,130,140,299,900,1000]*12,1001</testcases>
<DOCKER>
<repo>onosproject/onos</repo>
@@ -15,7 +15,10 @@
<CASE110>
<clustercmdpath>../tests/PLAT/PLATdockertest/dependencies</clustercmdpath>
- <apps>org.onosproject.proxyarp,org.onosproject.fwd</apps>
+ <apps>org.onosproject.proxyarp,org.onosproject.fwd,org.onosproject.openflow</apps>
+ <mnCmd>mn --topo tree,2 --controller remote,ip=</mnCmd>
+ <swDPID>of:0000000000000002</swDPID>
+ <debug>on</debug>
</CASE110>
<ENV>
diff --git a/TestON/tests/PLAT/PLATdockertest/PLATdockertest.py b/TestON/tests/PLAT/PLATdockertest/PLATdockertest.py
index dba226c..d871fae 100644
--- a/TestON/tests/PLAT/PLATdockertest/PLATdockertest.py
+++ b/TestON/tests/PLAT/PLATdockertest/PLATdockertest.py
@@ -171,12 +171,13 @@
def CASE110( self, main ):
"""
+ Docker init testing
+
Steps:
1 ) check default startup standalone onos applications status;
2 ) form onos cluster with all nodes;
3 ) check onos applications status;
4 ) activate apps per params and check app status;
- 5 ) deactivate apps and check app status
"""
import time
@@ -185,15 +186,17 @@
main.case( "Form onos cluster and check status of onos apps for onos image {}".format( DOCKERTAG ) )
startupSleep = int( main.params[ "SLEEP" ][ "startup" ] )
+ main.swDPID = main.params[ "CASE110" ][ "swDPID" ]
+ main.debug = main.params[ "CASE110" ][ "debug" ]
appToAct = main.params[ "CASE110" ][ "apps" ]
- stepResult = main.FALSE
+ main.initResult = main.FALSE
main.log.info( "Wait for startup, sleep (sec): " + str( startupSleep ) )
time.sleep( startupSleep )
main.step( "Check initial app states from onos1 for onos image {}".format( DOCKERTAG ) )
- stepResult = main.TRUE
+ main.initResult = main.TRUE
response = main.ONOSbenchRest.apps( ip=IPlist[ 0 ], port=8181 )
main.log.debug( "Rest call response is: " + response )
if response is not main.FALSE:
@@ -201,20 +204,20 @@
if item[ "state" ] not in [ "ACTIVE", "INSTALLED" ]:
main.log.info( "Some bundles are not in correct state. " )
main.log.info( "App states are: " + response )
- stepResult = main.FALSE
+ main.initResult = main.FALSE
break
if ( item[ "description" ] == "Builtin device drivers" ) and ( item[ "state" ] != "ACTIVE" ):
main.log.info( "Driver app is not in 'ACTIVE' state, but in: " + item[ "state" ] )
- stepResult = main.FALSE
+ main.initResult = main.FALSE
break
- utilities.assert_equals( expect=main.TRUE, actual=stepResult,
+ utilities.assert_equals( expect=main.TRUE, actual=main.initResult,
onpass="ONOS successfully started",
onfail="Failed to start ONOS correctly" )
- if stepResult is main.FALSE:
+ if main.initResult is main.FALSE:
main.skipCase()
main.step( "Form onos cluster using 'dependencies/onos-form-cluster' util" )
- stepResult = main.FALSE
+ main.initResult = main.FALSE
clcmdpath = main.params[ "CASE110" ][ "clustercmdpath" ]
main.log.info( "onos-form-cluster cmd path is: " + clcmdpath )
dkruser = main.params[ "DOCKER" ][ "user" ]
@@ -233,52 +236,196 @@
main.log.debug( " IPlist is:" + ",".join( IPlist ) )
main.log.debug( " cluster IP is" + ",".join( clusterIP ) )
if set( IPlist ) == set( clusterIP ):
- stepResult = main.TRUE
+ main.initResult = main.TRUE
- utilities.assert_equals( expect=main.TRUE, actual=stepResult,
+ utilities.assert_equals( expect=main.TRUE, actual=main.initResult,
onpass="ONOS successfully started",
onfail="Failed to start ONOS correctly" )
- if stepResult is main.FALSE:
+ if main.initResult is main.FALSE:
main.skipCase()
main.step( "Check cluster app status" )
- stepResult = main.TRUE
+ main.initResult = main.TRUE
response = main.ONOSbenchRest.apps( ip=IPlist[ 0 ], port=8181 )
if response is not main.FALSE:
for item in json.loads( response ):
if item[ "state" ] not in [ "ACTIVE", "INSTALLED" ]:
main.log.info( "Some bundles are not in correct state. " )
main.log.info( "App states are: " + response )
- stepResult = main.FALSE
+ main.initResult = main.FALSE
break
if ( item[ "description" ] == "Builtin device drivers" ) and ( item[ "state" ] != "ACTIVE" ):
main.log.info( "Driver app is not in 'ACTIVE' state, but in: " + item[ "state" ] )
- stepResult = main.FALSE
+ main.initResult = main.FALSE
break
- utilities.assert_equals( expect=main.TRUE, actual=stepResult,
+ utilities.assert_equals( expect=main.TRUE, actual=main.initResult,
onpass="ONOS successfully started",
onfail="Failed to start ONOS correctly" )
- if stepResult is main.FALSE:
+ if main.initResult is main.FALSE:
main.skipCase()
main.step( " Activate an APP from REST and check APP status" )
appResults = list()
- stepResult = main.TRUE
+ main.initResult = main.TRUE
applist = main.params[ "CASE110" ][ "apps" ].split( "," )
main.log.info( "List of apps to activate: " + str( applist ) )
for app in applist:
appRslt = main.ONOSbenchRest.activateApp( appName=app, ip=IPlist[ 0 ], port=8181, check=True )
time.sleep( 5 )
appResults.append( appRslt )
- stepResult = stepResult and appRslt
+ main.initResult = main.initResult and appRslt
main.log.debug( "Apps activation result for " + ",".join( applist ) + ": " + str( appResults ) )
- utilities.assert_equals( expect=main.TRUE, actual=stepResult,
+ utilities.assert_equals( expect=main.TRUE, actual=main.initResult,
onpass="Successfully activated apps",
onfail="Failed to activated apps correctly" )
- if stepResult is main.FALSE:
+
+ def CASE120( self, main ):
+ """
+ Docker Mininet testing
+ """
+ import time
+ import json
+ from operator import itemgetter
+
+ if main.initResult is main.FALSE:
+ main.mininetResult = main.FALSE
main.skipCase()
- main.step( " Deactivate an APP from REST and check APP status" )
+ main.step( "Loading Mininet Topology." )
+
+ mnCmd = main.params[ "CASE110" ][ "mnCmd" ]
+ main.mininetResult = main.Mininet1.startNet( mnCmd=mnCmd + IPlist[ 0 ] )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=main.mininetResult,
+ onpass="Successfully loaded topology.",
+ onfail="Failed to load topology" )
+
+ if main.mininetResult is main.FALSE:
+ main.skipCase()
+
+ main.mininetResult = utilities.retry( f=main.Mininet1.pingall,
+ retValue=main.FALSE,
+ attempts=3,
+ sleep=5 )
+
+ utilities.assert_equals( expect=main.TRUE,
+ actual=main.mininetResult,
+ onpass="Successfully loaded topology.",
+ onfail="Failed to load topology" )
+
+ def CASE130( self, main ):
+ """
+ Docker Intents testing
+ """
+ import time
+ import json
+ from operator import itemgetter
+
+ if main.initResult is main.FALSE or main.mininetResult is main.FALSE:
+ main.intentResult = False
+ main.skipCase()
+
+ main.hosts = sorted( json.loads( main.ONOSbenchRest.hosts( ip=IPlist[ 0 ] ) ), key=itemgetter( "ipAddresses" ) )
+ main.ONOSbenchRest.addHostIntent( main.hosts[ 0 ][ "id" ], main.hosts[ -1 ][ "id" ], ip=IPlist[ 0 ] )
+ main.ONOSbenchRest.addHostIntent( main.hosts[ 1 ][ "id" ], main.hosts[ -2 ][ "id" ], ip=IPlist[ 0 ] )
+
+ main.log.info( "Sleeping for 5 seconds to avoid potential race condition..." )
+ time.sleep( 5 )
+
+ main.step( "Get the intents from each controller" )
+ main.ONOSIntents = main.ONOSbenchRest.intents( IPlist[ 0 ] )
+ main.intentResult = True
+ for i in range( 0, len( IPlist ) ):
+ node = str( IPlist[ i ] )
+ if not main.ONOSIntents[ i ] or "Error" in main.ONOSIntents[ i ]:
+ main.log.error( "Error in getting " + node + " intents" )
+ main.log.warn( node + " intents response: " +
+ repr( main.ONOSIntents[ i ] ) )
+ main.intentResult = False
+
+ utilities.assert_equals( expect=True,
+ actual=main.intentResult,
+ onpass="No error in reading intents output",
+ onfail="Error in reading intents from ONOS" )
+
+ if not main.intentResult:
+ main.skipCase()
+
+ main.step( "Checking intent state" )
+
+ main.intentResult = json.loads( main.ONOSIntents )[ 0 ][ "state" ] == "INSTALLED"
+
+ utilities.assert_equals( expect=True,
+ actual=main.intentResult,
+ onpass="Intent check successful.",
+ onfail="Intent check failed." )
+
+ def CASE140( self, main ):
+ """
+ Docker Flows testing
+ """
+ import time
+ import json
+
+ if main.initResult is main.FALSE or not main.intentResult:
+ main.skipCase()
+
+ main.step( "Adding flows." )
+
+ ingress = 1
+ egress = 2
+
+ main.log.info( "Add flow with MAC selectors." )
+ main.flowResult = main.ONOSbenchRest.addFlow( deviceId=main.swDPID,
+ egressPort=egress,
+ ingressPort=ingress,
+ ethSrc=main.hosts[ 0 ][ 'mac' ],
+ ethDst=main.hosts[ 1 ][ 'mac' ],
+ debug=main.debug,
+ ip=IPlist[ 0 ] )
+
+ main.log.info( "Sleeping for 10 seconds..." )
+ time.sleep( 10 )
+
+ utilities.assert_equals( expect=main.TRUE,
+ actual=main.flowResult,
+ onpass="Successfully added flows",
+ onfail="Failed to add flows" )
+
+ def CASE299( self, main ):
+ """
+ Cleanup Docker testing
+ """
+ import time
+ import json
+
+ if main.initResult is main.FALSE:
+ main.skipCase()
+
+ if main.flowResult is main.TRUE:
+ main.step( "Remove flow." )
+
+ prevFlow = json.loads( main.ONOSbenchRest.getFlows( main.swDPID, ip=IPlist[ 0 ] ) )[ -1 ]
+ stepResult = main.ONOSbenchRest.removeFlow( main.swDPID, prevFlow[ 'id' ], ip=IPlist[ 0 ] )
+ utilities.assert_equals( expect=main.TRUE, actual=stepResult,
+ onpass="Successfully removed flow.",
+ onfail="Failed to remove flow." )
+
+ if main.intentResult:
+ main.step( "Remove intents." )
+ results = []
+ for i in range( 0, len( json.loads( main.ONOSIntents ) ) ):
+ intentID = json.loads( main.ONOSbenchRest.intents( IPlist[ 0 ] ) )[ 0 ][ 'id' ]
+ results.append( main.ONOSbenchRest.removeIntent( intentID, ip=IPlist[ 0 ] ) == main.TRUE )
+
+ utilities.assert_equals( expect=True, actual=all( results ),
+ onpass="Successfully removed intents.",
+ onfail="Failed to remove intents." )
+
+ if main.mininetResult is main.TRUE:
+ main.Mininet1.stopNet()
+
+ main.step( "Deactivate an APP from REST and check APP status" )
appResults = list()
stepResult = main.TRUE
applist = main.params[ "CASE110" ][ "apps" ].split( "," )
@@ -290,10 +437,8 @@
stepResult = stepResult and appRslt
main.log.debug( "Apps deactivation result for " + ",".join( applist ) + ": " + str( appResults ) )
utilities.assert_equals( expect=main.TRUE, actual=stepResult,
- onpass="Successfully deactivated apps",
- onfail="Failed to deactivated apps correctly" )
- if stepResult is main.FALSE:
- main.skipCase()
+ onpass="Successfully deactivated apps",
+ onfail="Failed to deactivated apps correctly" )
def CASE900( self, main ):
"""
diff --git a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.py b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.py
index 6f8c9fc..d587f69 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.py
@@ -16,7 +16,7 @@
test_idx = 1,
topology = '0x1',
onosNodes = 1,
- description = "Bridging test between two untagged hosts" )
+ description = "Bridging test between two untagged hosts on vlan-untagged port" )
def CASE2( self, main ):
"""
@@ -32,7 +32,7 @@
test_idx = 2,
topology = '0x2',
onosNodes = 1,
- description = "Bridging test between two untagged hosts" )
+ description = "Bridging test between two untagged hosts on vlan-untagged port" )
def CASE3( self, main ):
"""
@@ -48,7 +48,7 @@
test_idx = 3,
topology = '2x2',
onosNodes = 1,
- description = "Bridging test between two untagged hosts" )
+ description = "Bridging test between two untagged hosts on vlan-untagged port" )
def CASE4( self, main ):
"""
@@ -64,7 +64,7 @@
test_idx = 4,
topology = '2x4',
onosNodes = 1,
- description = "Bridging test between two untagged hosts" )
+ description = "Bridging test between two untagged hosts on vlan-untagged port" )
def CASE5( self, main ):
"""
@@ -80,7 +80,7 @@
test_idx = 5,
topology = '0x1',
onosNodes = 3,
- description = "Bridging test between two untagged hosts" )
+ description = "Bridging test between two untagged hosts on vlan-untagged port" )
def CASE6( self, main ):
"""
@@ -96,7 +96,7 @@
test_idx = 6,
topology = '0x2',
onosNodes = 3,
- description = "Bridging test between two untagged hosts" )
+ description = "Bridging test between two untagged hosts on vlan-untagged port" )
def CASE7( self, main ):
"""
@@ -112,7 +112,7 @@
test_idx = 7,
topology = '2x2',
onosNodes = 3,
- description = "Bridging test between two untagged hosts" )
+ description = "Bridging test between two untagged hosts on vlan-untagged port" )
def CASE8( self, main ):
"""
@@ -128,7 +128,7 @@
test_idx = 8,
topology = '2x4',
onosNodes = 3,
- description = "Bridging test between two untagged hosts" )
+ description = "Bridging test between two untagged hosts on vlan-untagged port" )
def CASE11( self, main ):
"""
@@ -280,7 +280,7 @@
test_idx = 21,
topology = '0x1',
onosNodes = 1,
- description = "Bridging test between two untagged hosts" )
+ description = "Bridging test between two untagged hosts on vlan-native port" )
def CASE22( self, main ):
"""
@@ -296,7 +296,7 @@
test_idx = 22,
topology = '0x2',
onosNodes = 1,
- description = "Bridging test between two untagged hosts" )
+ description = "Bridging test between two untagged hosts on vlan-native port" )
def CASE23( self, main ):
"""
@@ -312,7 +312,7 @@
test_idx = 23,
topology = '2x2',
onosNodes = 1,
- description = "Bridging test between two untagged hosts" )
+ description = "Bridging test between two untagged hosts on vlan-native port" )
def CASE24( self, main ):
"""
@@ -328,7 +328,7 @@
test_idx = 24,
topology = '2x4',
onosNodes = 1,
- description = "Bridging test between two untagged hosts" )
+ description = "Bridging test between two untagged hosts on vlan-native port" )
def CASE25( self, main ):
"""
@@ -344,7 +344,7 @@
test_idx = 25,
topology = '0x1',
onosNodes = 3,
- description = "Bridging test between two untagged hosts" )
+ description = "Bridging test between two untagged hosts on vlan-native port" )
def CASE26( self, main ):
"""
@@ -360,7 +360,7 @@
test_idx = 26,
topology = '0x2',
onosNodes = 3,
- description = "Bridging test between two untagged hosts" )
+ description = "Bridging test between two untagged hosts on vlan-native port" )
def CASE27( self, main ):
"""
@@ -376,7 +376,7 @@
test_idx = 27,
topology = '2x2',
onosNodes = 3,
- description = "Bridging test between two untagged hosts" )
+ description = "Bridging test between two untagged hosts on vlan-native port" )
def CASE28( self, main ):
"""
@@ -392,7 +392,7 @@
test_idx = 28,
topology = '2x4',
onosNodes = 3,
- description = "Bridging test between two untagged hosts" )
+ description = "Bridging test between two untagged hosts on vlan-native port" )
def CASE31( self, main ):
"""
diff --git a/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/SRBridgingTest.py b/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/SRBridgingTest.py
index ac79922..6b869c1 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/SRBridgingTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRBridging/dependencies/SRBridgingTest.py
@@ -51,7 +51,6 @@
main.cfgName = 'CASE%01d%01d' % ( test_idx / 10, ( ( test_idx - 1 ) % 10 ) % 4 + 1 )
main.configPath = main.path + "/dependencies/"
- main.resultFileName = 'CASE%02d' % test_idx
main.Cluster.setRunningNode( onosNodes )
run.installOnos( main, skipPackage=skipPackage, cliSleep=5 )
if hasattr( main, 'Mininet1' ):
@@ -72,7 +71,7 @@
leaf_dpid = [ "of:%016d" % ( ls + 1 ) for ls in range( topo[ topology ][ 1 ] ) ]
for dpid in leaf_dpid:
run.checkFlowsByDpid( main, dpid, topo[ topology ][ 4 ], sleep=5 )
- run.pingAll( main, 'CASE%02d' % test_idx )
+ run.pingAll( main )
if hasattr( main, 'Mininet1' ):
run.cleanup( main )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.params b/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.params
index 23057dd..abe305e 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.params
@@ -18,7 +18,7 @@
<ENV>
<cellName>productionCell</cellName>
- <cellApps>drivers,segmentrouting,openflow-base,netcfghostprovider,netcfglinksprovider</cellApps>
+ <cellApps>drivers,segmentrouting,openflow,netcfghostprovider,netcfglinksprovider</cellApps>
</ENV>
<GIT>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.py b/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.py
index ce3020c..0e8844a 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.py
@@ -48,11 +48,11 @@
run.startMininet( main, 'cord_fabric.py' )
# pre-configured routing and bridging test
run.checkFlows( main, minFlowCount=116 )
- run.pingAll( main, "CASE1" )
+ run.pingAll( main )
run.killOnos( main, [ 0, 1, 2 ], '4', '8', '0' )
run.pingAll( main, 'CASE1_Failure', dumpflows=False )
run.recoverOnos( main, [ 0, 1, 2 ], '4', '8', '3' )
- run.checkFlows( main, minFlowCount=116 )
+ run.checkFlows( main, minFlowCount=116, tag='CASE1_Failure' )
run.pingAll( main, 'CASE1_Failure' )
# TODO Dynamic config of hosts in subnet
# TODO Dynamic config of host not in subnet
@@ -81,11 +81,11 @@
run.startMininet( main, 'cord_fabric.py', args="--leaf=4 --spine=4" )
# pre-configured routing and bridging test
run.checkFlows( main, minFlowCount=350 )
- run.pingAll( main, 'CASE2' )
+ run.pingAll( main )
run.killOnos( main, [ 0, 1, 2 ], '8', '32', '0' )
run.pingAll( main, 'CASE2_Failure', dumpflows=False )
run.recoverOnos( main, [ 0, 1, 2 ], '8', '32', '3' )
- run.checkFlows( main, minFlowCount=350 )
+ run.checkFlows( main, minFlowCount=350, tag='CASE2_Failure' )
run.pingAll( main, 'CASE3_Recovery' )
# TODO Dynamic config of hosts in subnet
# TODO Dynamic config of host not in subnet
@@ -114,11 +114,11 @@
run.startMininet( main, 'cord_fabric.py', args="--leaf=1 --spine=0" )
# pre-configured routing and bridging test
run.checkFlows( main, minFlowCount=15 )
- run.pingAll( main, 'CASE3' )
+ run.pingAll( main )
run.killOnos( main, [ 0, 1, 2 ], '1', '0', '0' )
run.pingAll( main, 'CASE3_Failure', dumpflows=False )
run.recoverOnos( main, [ 0, 1, 2 ], '1', '0', '3' )
- run.checkFlows( main, minFlowCount=15 )
+ run.checkFlows( main, minFlowCount=15, tag='CASE3_Failure' )
run.pingAll( main, 'CASE3_Failure' )
# TODO Dynamic config of hosts in subnet
# TODO Dynamic config of host not in subnet
diff --git a/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.topo b/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.topo
index f6661f8..5c4cc02 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.topo
+++ b/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.topo
@@ -9,7 +9,7 @@
<connect_order>1</connect_order>
<COMPONENTS>
<cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
- <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+ <diff_clihost></diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
<karaf_username></karaf_username>
<karaf_password></karaf_password>
<web_user></web_user>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.params b/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.params
index fba7e9f..fceb91a 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.params
@@ -18,7 +18,7 @@
<ENV>
<cellName>productionCell</cellName>
- <cellApps>drivers,segmentrouting,openflow-base,netcfghostprovider,netcfglinksprovider</cellApps>
+ <cellApps>drivers,segmentrouting,openflow,netcfghostprovider,netcfglinksprovider</cellApps>
</ENV>
<GIT>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.py b/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.py
index 1af681c..6a228f3 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.py
@@ -54,7 +54,7 @@
run.pingAll( main, dumpflows=False, )
run.addHostCfg( main )
run.checkFlows( main, minFlowCount=140, dumpflows=False )
- run.pingAll( main, "CASE1" )
+ run.pingAll( main )
run.cleanup( main )
def CASE2( self, main ):
@@ -79,7 +79,7 @@
run.pingAll( main, dumpflows=False )
run.addHostCfg( main )
run.checkFlows( main, minFlowCount=380, dumpflows=False )
- run.pingAll( main, 'CASE2' )
+ run.pingAll( main )
run.cleanup( main )
def CASE3( self, main ):
@@ -104,7 +104,7 @@
run.pingAll( main, dumpflows=False )
run.addHostCfg( main )
run.checkFlows( main, minFlowCount=18, dumpflows=False )
- run.pingAll( main, 'CASE3' )
+ run.pingAll( main )
run.cleanup( main )
def CASE4( self, main ):
@@ -129,7 +129,7 @@
run.pingAll( main, dumpflows=False )
run.addHostCfg( main )
run.checkFlows( main, minFlowCount=140, dumpflows=False )
- run.pingAll( main, "CASE4" )
+ run.pingAll( main )
run.killOnos( main, [ 0 ], '4', '8', '2' )
run.delHostCfg( main )
run.checkFlows( main, minFlowCount=116, dumpflows=False )
@@ -158,7 +158,7 @@
run.pingAll( main, dumpflows=False )
run.addHostCfg( main )
run.checkFlows( main, minFlowCount=380, dumpflows=False )
- run.pingAll( main, 'CASE5' )
+ run.pingAll( main )
run.killOnos( main, [ 0 ], '8', '32', '2' )
run.delHostCfg( main )
run.checkFlows( main, minFlowCount=350, dumpflows=False )
@@ -187,7 +187,7 @@
run.pingAll( main, dumpflows=False )
run.addHostCfg( main )
run.checkFlows( main, minFlowCount=20, dumpflows=False )
- run.pingAll( main, 'CASE6' )
+ run.pingAll( main )
run.killOnos( main, [ 0 ], '1', '0', '2' )
run.delHostCfg( main )
run.checkFlows( main, minFlowCount=15, dumpflows=False )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.topo b/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.topo
index f6661f8..5c4cc02 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.topo
+++ b/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.topo
@@ -9,7 +9,7 @@
<connect_order>1</connect_order>
<COMPONENTS>
<cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
- <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+ <diff_clihost></diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
<karaf_username></karaf_username>
<karaf_password></karaf_password>
<web_user></web_user>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.params b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.params
index cdb841b..dfee950 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.params
@@ -18,7 +18,7 @@
<ENV>
<cellName>productionCell</cellName>
- <cellApps>drivers,segmentrouting,openflow-base,netcfghostprovider,netcfglinksprovider</cellApps>
+ <cellApps>drivers,segmentrouting,openflow,netcfghostprovider,netcfglinksprovider</cellApps>
</ENV>
<GIT>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.py b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.py
index f84b39c..91757f0 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.py
@@ -61,7 +61,7 @@
run.killOnos( main, [ toKill ], '4', '8', '2' )
run.pingAll( main, 'CASE1_Failure%d' % ( i + 1 ) )
run.recoverOnos( main, [ toKill ], '4', '8', '3' )
- run.checkFlows( main, minFlowCount=116 )
+ run.checkFlows( main, minFlowCount=116, tag='CASE1_Recovery%d' % ( i + 1 ) )
run.pingAll( main, 'CASE1_Recovery%d' % ( i + 1 ) )
run.cleanup( main )
@@ -96,7 +96,7 @@
run.killOnos( main, [ toKill ], '4', '8', '2' )
run.pingAll( main, 'CASE2_Failure%d' % ( i + 1 ) )
run.recoverOnos( main, [ toKill ], '4', '8', '3' )
- run.checkFlows( main, minFlowCount=116 )
+ run.checkFlows( main, minFlowCount=116, tag='CASE2_Recovery%d' % ( i + 1 ) )
run.pingAll( main, 'CASE2_Recovery%d' % ( i + 1 ) )
run.cleanup( main )
@@ -127,7 +127,7 @@
run.killOnos( main, [ toKill ], '8', '32', '2' )
run.pingAll( main, 'CASE3_Failure%d' % ( i + 1 ) )
run.recoverOnos( main, [ toKill ], '8', '32', '3' )
- run.checkFlows( main, minFlowCount=350 )
+ run.checkFlows( main, minFlowCount=350, tag='CASE3_Recovery%d' % ( i + 1 ) )
run.pingAll( main, 'CASE3_Recovery%d' % ( i + 1 ) )
run.cleanup( main )
@@ -162,7 +162,7 @@
run.killOnos( main, [ toKill ], '8', '32', '2' )
run.pingAll( main, 'CASE4_Failure%d' % ( i + 1 ) )
run.recoverOnos( main, [ toKill ], '8', '32', '3' )
- run.checkFlows( main, minFlowCount=350 )
+ run.checkFlows( main, minFlowCount=350, tag='CASE4_Recovery%d' % ( i + 1 ) )
run.pingAll( main, 'CASE4_Recovery%d' % ( i + 1 ) )
run.cleanup( main )
@@ -200,10 +200,10 @@
time.sleep( main.switchSleep )
run.pingAll( main, "CASE5_SWITCH_Failure%d" % ( i + 1 ) )
run.recoverSwitch( main, main.spines[ switchToKill ][ 'name' ], switches='4', links='8' )
- run.checkFlows( main, minFlowCount=116 )
+ run.checkFlows( main, minFlowCount=116, tag="CASE5_SWITCH_Recovery%d" % ( i + 1 ) )
run.pingAll( main, "CASE5_SWITCH_Recovery%d" % ( i + 1 ) )
run.recoverOnos( main, [ onosToKill ], '4', '8', '3' )
- run.checkFlows( main, minFlowCount=116 )
+ run.checkFlows( main, minFlowCount=116, tag='CASE5_ONOS_Recovery%d' % ( i + 1 ) )
run.pingAll( main, 'CASE5_ONOS_Recovery%d' % ( i + 1 ) )
run.cleanup( main )
@@ -244,10 +244,10 @@
time.sleep( main.switchSleep )
run.pingAll( main, "CASE6_SWITCH_Failure%d" % ( i + 1 ) )
run.recoverSwitch( main, main.spines[ switchToKill ][ 'name' ], switches='4', links='8' )
- run.checkFlows( main, minFlowCount=116 )
+ run.checkFlows( main, minFlowCount=116, tag="CASE6_SWITCH_Recovery%d" % ( i + 1 ) )
run.pingAll( main, "CASE6_SWITCH_Recovery%d" % ( i + 1 ) )
run.recoverOnos( main, [ onosToKill ], '4', '8', '3' )
- run.checkFlows( main, minFlowCount=116 )
+ run.checkFlows( main, minFlowCount=116, tag='CASE6_ONOS_Recovery%d' % ( i + 1 ) )
run.pingAll( main, 'CASE6_ONOS_Recovery%d' % ( i + 1 ) )
run.cleanup( main )
@@ -285,10 +285,10 @@
time.sleep( main.switchSleep )
run.pingAll( main, "CASE7_SWITCH_Failure%d" % ( i + 1 ) )
run.recoverSwitch( main, main.spines[ switchToKill ][ 'name' ], switches='8', links='32' )
- run.checkFlows( main, minFlowCount=350 )
+ run.checkFlows( main, minFlowCount=350, tag="CASE7_SWITCH_Recovery%d" % ( i + 1 ) )
run.pingAll( main, "CASE7_SWITCH_Recovery%d" % ( i + 1 ) )
run.recoverOnos( main, [ onosToKill ], '8', '32', '3' )
- run.checkFlows( main, minFlowCount=350 )
+ run.checkFlows( main, minFlowCount=350, tag='CASE7_ONOS_Recovery%d' % ( i + 1 ) )
run.pingAll( main, 'CASE7_ONOS_Recovery%d' % ( i + 1 ) )
run.cleanup( main )
@@ -329,9 +329,9 @@
time.sleep( main.switchSleep )
run.pingAll( main, "CASE8_SWITCH_Failure%d" % ( i + 1 ) )
run.recoverSwitch( main, main.spines[ switchToKill ][ 'name' ], switches='8', links='32' )
- run.checkFlows( main, minFlowCount=350 )
+ run.checkFlows( main, minFlowCount=350, tag="CASE8_SWITCH_Recovery%d" % ( i + 1 ) )
run.pingAll( main, "CASE8_SWITCH_Recovery%d" % ( i + 1 ) )
run.recoverOnos( main, [ onosToKill ], '8', '32', '3' )
- run.checkFlows( main, minFlowCount=350 )
+ run.checkFlows( main, minFlowCount=350, tag='CASE8_ONOS_Recovery%d' % ( i + 1 ) )
run.pingAll( main, 'CASE8_ONOS_Recovery%d' % ( i + 1 ) )
run.cleanup( main )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.topo b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.topo
index f6661f8..5c4cc02 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.topo
+++ b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.topo
@@ -9,7 +9,7 @@
<connect_order>1</connect_order>
<COMPONENTS>
<cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
- <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+ <diff_clihost></diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
<karaf_username></karaf_username>
<karaf_password></karaf_password>
<web_user></web_user>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params
index a5d4811..ea3112c 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params
@@ -18,7 +18,7 @@
<ENV>
<cellName>productionCell</cellName>
- <cellApps>drivers,segmentrouting,openflow-base,netcfghostprovider,netcfglinksprovider</cellApps>
+ <cellApps>drivers,segmentrouting,openflow,netcfghostprovider,netcfglinksprovider</cellApps>
</ENV>
<GIT>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.topo b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.topo
index f6661f8..5c4cc02 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.topo
+++ b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.topo
@@ -9,7 +9,7 @@
<connect_order>1</connect_order>
<COMPONENTS>
<cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
- <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+ <diff_clihost></diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
<karaf_username></karaf_username>
<karaf_password></karaf_password>
<web_user></web_user>
diff --git a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.params b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.params
index 23057dd..abe305e 100755
--- a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.params
+++ b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.params
@@ -18,7 +18,7 @@
<ENV>
<cellName>productionCell</cellName>
- <cellApps>drivers,segmentrouting,openflow-base,netcfghostprovider,netcfglinksprovider</cellApps>
+ <cellApps>drivers,segmentrouting,openflow,netcfghostprovider,netcfglinksprovider</cellApps>
</ENV>
<GIT>
diff --git a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.py b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.py
index 6c972b2..0e2b135 100644
--- a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.py
+++ b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.py
@@ -48,12 +48,12 @@
run.startMininet( main, 'cord_fabric.py' )
# pre-configured routing and bridging test
run.checkFlows( main, minFlowCount=116 )
- run.pingAll( main, "CASE1" )
+ run.pingAll( main )
run.killOnos( main, [ 0 ], '4', '8', '2' )
run.pingAll( main, 'CASE1_Failure' )
run.recoverOnos( main, [ 0 ], '4', '8', '3' )
- run.checkFlows( main, minFlowCount=116 )
- run.pingAll( main, 'CASE1_Failure' )
+ run.checkFlows( main, minFlowCount=116, tag='CASE1_Recovery' )
+ run.pingAll( main, 'CASE1_Recovery' )
# TODO Dynamic config of hosts in subnet
# TODO Dynamic config of host not in subnet
# TODO Dynamic config of vlan xconnect
@@ -85,8 +85,8 @@
run.killOnos( main, [ 0 ], '8', '32', '2' )
run.pingAll( main, 'CASE2_Failure' )
run.recoverOnos( main, [ 0 ], '8', '32', '3' )
- run.checkFlows( main, minFlowCount=350 )
- run.pingAll( main, 'CASE3_Recovery' )
+ run.checkFlows( main, minFlowCount=350, tag='CASE2_Recovery' )
+ run.pingAll( main, 'CASE2_Recovery' )
# TODO Dynamic config of hosts in subnet
# TODO Dynamic config of host not in subnet
# TODO Dynamic config of vlan xconnect
@@ -118,8 +118,8 @@
run.killOnos( main, [ 0 ], '1', '0', '2' )
run.pingAll( main, 'CASE3_Failure' )
run.recoverOnos( main, [ 0 ], '1', '0', '3' )
- run.checkFlows( main, minFlowCount=15 )
- run.pingAll( main, 'CASE3_Failure' )
+ run.checkFlows( main, minFlowCount=15, tag='CASE3_Recovery' )
+ run.pingAll( main, 'CASE3_Recovery' )
# TODO Dynamic config of hosts in subnet
# TODO Dynamic config of host not in subnet
# TODO Dynamic config of vlan xconnect
diff --git a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.topo b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.topo
index e0b1d43..28ecb9a 100755
--- a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.topo
+++ b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.topo
@@ -9,7 +9,7 @@
<connect_order>1</connect_order>
<COMPONENTS>
<cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
- <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+ <diff_clihost></diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
<karaf_username></karaf_username>
<karaf_password></karaf_password>
<web_user></web_user>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/README.md b/TestON/tests/USECASE/SegmentRouting/SRRouting/README.md
new file mode 100644
index 0000000..6d461c4
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/README.md
@@ -0,0 +1,15 @@
+This test verifies basic L23 connectivity using SegmentRouting via pingall
+
+It consists of
+
+1) Configure and install ONOS cluster
+2) Start Mininet and check flow state
+3) Pingall
+
+<h3>Requirements</h3>
+ - Trellis leaf-spine fabric: please visit following URL to set up Trellis leaf-spine fabric
+ https://github.com/opennetworkinglab/routing/tree/master/trellis
+ - ONOS_APPS=drivers,openflow,segmentrouting,fpm,netcfghostprovider
+
+<h3>Topologies</h3>
+- 2x5 Comcast Topology.
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.params b/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.params
new file mode 100644
index 0000000..6ce7f81
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.params
@@ -0,0 +1,41 @@
+<PARAMS>
+ <testcases>1, 2</testcases>
+
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
+ <SCALE>
+ <size>3</size>
+ <max>3</max>
+ </SCALE>
+
+ <DEPENDENCY>
+ <topology>comcast_fabric.py</topology>
+ <lib>routinglib.py,trellislib.py,trellis_fabric.py, dhcpd.conf</lib>
+ </DEPENDENCY>
+
+ <ENV>
+ <cellName>productionCell</cellName>
+ <cellApps>drivers,openflow,segmentrouting,fpm,dhcprelay,netcfghostprovider,routeradvertisement</cellApps>
+ </ENV>
+
+ <GIT>
+ <pull>False</pull>
+ <branch>master</branch>
+ </GIT>
+
+ <CTRL>
+ <port>6653</port>
+ </CTRL>
+
+ <timers>
+ <LinkDiscovery>12</LinkDiscovery>
+ <SwitchDiscovery>12</SwitchDiscovery>
+ </timers>
+
+ <SLEEP>
+ <startup>10</startup>
+ </SLEEP>
+</PARAMS>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.py b/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.py
new file mode 100644
index 0000000..b3d61f3
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.py
@@ -0,0 +1,38 @@
+
+class SRRouting:
+ def __init__( self ):
+ self.default = ''
+
+ def CASE1( self, main ):
+ """
+ Ping between all ipv4 hosts in the topology.
+ """
+
+ from tests.USECASE.SegmentRouting.SRRouting.dependencies.SRRoutingTest import SRRoutingTest
+
+ SRRoutingTest.runTest( main,
+ test_idx = 1,
+ onosNodes = 3,
+ dhcp=0,
+ routers=0,
+ ipv4=1,
+ ipv6=0,
+ description = "Ping between all ipv4 hosts in the topology")
+
+ def CASE2( self, main ):
+ """
+ Ping between all ipv6 hosts in the topology.
+ """
+
+ from tests.USECASE.SegmentRouting.SRRouting.dependencies.SRRoutingTest import SRRoutingTest
+
+ SRRoutingTest.runTest( main,
+ test_idx = 2,
+ onosNodes = 3,
+ dhcp=0,
+ routers=0,
+ ipv4=0,
+ ipv6=1,
+ description = "Ping between all ipv6 hosts in the topology")
+
+
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.topo b/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.topo
new file mode 100644
index 0000000..01316b6
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.topo
@@ -0,0 +1,36 @@
+<TOPOLOGY>
+ <COMPONENT>
+ <ONOScell>
+ <host>localhost</host> # ONOS "bench" machine
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosClusterDriver</type>
+ <connect_order>1</connect_order>
+ <COMPONENTS>
+ <cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
+ <diff_clihost></diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+ <karaf_username></karaf_username>
+ <karaf_password></karaf_password>
+ <web_user></web_user>
+ <web_pass></web_pass>
+ <rest_port></rest_port>
+ <prompt></prompt> # TODO: we technically need a few of these, one per component
+ <onos_home></onos_home> # defines where onos home is
+ <nodes>3</nodes> # number of nodes in the cluster
+ </COMPONENTS>
+ </ONOScell>
+
+ <Mininet1>
+ <host>OCN</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>MininetCliDriver</type>
+ <connect_order>2</connect_order>
+ <COMPONENTS>
+ <home>~/mininet/custom/</home>
+ <prompt></prompt>
+ </COMPONENTS>
+ </Mininet1>
+
+ </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/__init__.py b/TestON/tests/USECASE/SegmentRouting/SRRouting/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/__init__.py
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py
new file mode 100644
index 0000000..0f0caac
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py
@@ -0,0 +1,79 @@
+"""
+Copyright 2017 Open Networking Foundation ( ONF )
+
+Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+
+ TestON is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 2 of the License, or
+ ( at your option ) any later version.
+
+ TestON is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TestON. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as run
+import time
+
+class SRRoutingTest ():
+
+ topo = {}
+
+ def __init__( self ):
+ self.default = ''
+
+ @staticmethod
+ def runTest( main, test_idx, onosNodes, dhcp, routers, ipv4, ipv6, description):
+
+ skipPackage = False
+ init = False
+ if not hasattr( main, 'apps' ):
+ init = True
+ run.initTest( main )
+
+ # Skip onos packaging if the cluster size stays the same
+ if not init and onosNodes == main.Cluster.numCtrls:
+ skipPackage = True
+
+ main.case('%s, ONOS instance%s' %
+ (description, onosNodes))
+
+ main.cfgName = 'COMCAST_CONFIG_ipv4=%d_ipv6=%d_dhcp=%d_routers=%d' % \
+ (ipv4, ipv6, dhcp, routers)
+ main.configPath = main.path + "/dependencies/"
+ main.resultFileName = 'CASE%02d' % test_idx
+ main.Cluster.setRunningNode(onosNodes)
+
+ run.installOnos(main, skipPackage=skipPackage, cliSleep=5,
+ parallel=False)
+
+ # wait some
+ time.sleep(5)
+
+ if hasattr(main, 'Mininet1'):
+ # Run the test with Mininet
+ mininet_args = ' --dhcp=%s --routers=%s --ipv6=%s --ipv4=%s' % (dhcp, routers, ipv6, ipv4)
+ run.startMininet(main, 'comcast_fabric.py', args=mininet_args)
+ else:
+ # Run the test with physical devices
+ # TODO: connect TestON to the physical network
+ pass
+
+ # wait some time for onos to install the rules!
+ time.sleep(15)
+
+ # ping hosts
+ run.pingAllBasedOnIp( main, 'CASE%02d' % test_idx )
+
+ if hasattr(main, 'Mininet1'):
+ run.cleanup(main)
+ else:
+ # TODO: disconnect TestON from the physical network
+ pass
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/__init__.py b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/__init__.py
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/json/COMCAST_CONFIG_ipv4=0_ipv6=1_dhcp=0_routers=0.chart b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/json/COMCAST_CONFIG_ipv4=0_ipv6=1_dhcp=0_routers=0.chart
new file mode 100644
index 0000000..e77d1c5
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/json/COMCAST_CONFIG_ipv4=0_ipv6=1_dhcp=0_routers=0.chart
@@ -0,0 +1,4 @@
+{
+ "ipv6": {"expect": "True",
+ "hosts":["h1v6", "h2v6" ,"h3v6", "h4v6", "h5v6", "h6v6", "h7v6", "h8v6", "h9v6", "h10v6", "h11v6"]}
+}
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/json/COMCAST_CONFIG_ipv4=0_ipv6=1_dhcp=0_routers=0.json b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/json/COMCAST_CONFIG_ipv4=0_ipv6=1_dhcp=0_routers=0.json
new file mode 100644
index 0000000..da381ab
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/json/COMCAST_CONFIG_ipv4=0_ipv6=1_dhcp=0_routers=0.json
@@ -0,0 +1,321 @@
+{
+ "ports" : {
+ "of:0000000000000001/3" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1000::3ff/120" ],
+ "vlan-untagged": 10
+ }
+ ]
+ },
+ "of:0000000000000001/4" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1001::3ff/120" ],
+ "vlan-untagged": 10
+ }
+ ]
+ },
+ "of:0000000000000002/6" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1002::3ff/120" ],
+ "vlan-untagged": 10
+ }
+ ]
+ },
+ "of:0000000000000002/7" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1003::3ff/120" ],
+ "vlan-untagged": 15
+ }
+ ]
+ },
+ "of:0000000000000002/8" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1004::3ff/120" ],
+ "vlan-tagged": [30]
+ }
+ ]
+ },
+ "of:0000000000000003/8" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1005::3ff/120" ],
+ "vlan-tagged": [20]
+ }
+ ]
+ },
+ "of:0000000000000003/9" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1006::3ff/120" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000003/6" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1003::3ff/120" ],
+ "vlan-untagged": 15
+ }
+ ]
+ },
+ "of:0000000000000003/7" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1004::3ff/120" ],
+ "vlan-tagged": [30]
+ }
+ ]
+ },
+ "of:0000000000000004/6" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1007::3ff/120" ],
+ "vlan-tagged": [30]
+ }
+ ]
+ },
+ "of:0000000000000004/7" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1008::3ff/120" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000004/8" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1009::3ff/120" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000005/8" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1010::3ff/120" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000005/6" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1008::3ff/120" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000005/7" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1009::3ff/120" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ }
+ },
+ "hosts" : {
+ "00:aa:00:00:00:01/None" : {
+ "basic": {
+ "locations": ["of:0000000000000001/3"],
+ "ips": ["1000::3fe"]
+ }
+ },
+ "00:aa:00:00:01:01/None" : {
+ "basic": {
+ "locations": ["of:0000000000000001/4"],
+ "ips": ["1001::3fe"]
+ }
+ },
+ "00:aa:00:00:00:02/None" : {
+ "basic": {
+ "locations": ["of:0000000000000002/6"],
+ "ips": ["1002::3fe"]
+ }
+ },
+ "00:aa:00:00:00:03/None" : {
+ "basic": {
+ "locations": ["of:0000000000000002/7", "of:0000000000000003/6"],
+ "ips": ["1003::3fe"]
+ }
+ },
+ "00:aa:00:00:00:04/30" : {
+ "basic": {
+ "locations": ["of:0000000000000003/7","of:0000000000000002/8"],
+ "ips": ["1004::3fe"]
+ }
+ },
+ "00:aa:00:00:00:05/20" : {
+ "basic": {
+ "locations": ["of:0000000000000003/8"],
+ "ips": ["1005::3fe"]
+ }
+ },
+ "00:aa:00:00:01:05/40" : {
+ "basic": {
+ "locations": ["of:0000000000000003/9"],
+ "ips": ["1006::3fe"]
+ }
+ },
+ "00:aa:00:00:00:06/30" : {
+ "basic": {
+ "locations": ["of:0000000000000004/6"],
+ "ips": ["1007::3fe"]
+ }
+ },
+ "00:aa:00:00:00:07/40" : {
+ "basic": {
+ "locations": ["of:0000000000000005/6", "of:0000000000000004/7"],
+ "ips": ["1008::3fe"]
+ }
+ },
+ "00:aa:00:00:00:08/40" : {
+ "basic": {
+ "locations": ["of:0000000000000004/8","of:0000000000000005/7"],
+ "ips": ["1009::3fe"]
+ }
+ },
+ "00:aa:00:00:00:0A/40" : {
+ "basic": {
+ "locations": ["of:0000000000000005/8"],
+ "ips": ["1010::3fe"]
+ }
+ }
+ },
+ "devices" : {
+ "of:0000000000000001" : {
+ "segmentrouting" : {
+ "name" : "s001",
+ "ipv4NodeSid" : 1,
+ "ipv6NodeSid" : 101,
+ "ipv6Loopback" : "2000::c0a8:0001",
+ "ipv4Loopback" : "192.168.0.1",
+ "routerMac" : "00:00:00:00:00:01",
+ "isEdgeRouter" : true,
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-105
+ }
+ },
+ "of:0000000000000002" : {
+ "segmentrouting" : {
+ "name" : "s002",
+ "ipv4NodeSid" : 2,
+ "ipv4Loopback" : "192.168.0.2",
+ "ipv6NodeSid" : 102,
+ "ipv6Loopback" : "2000::c0a8:0002",
+ "routerMac" : "00:00:00:00:00:02",
+ "isEdgeRouter" : true,
+ "pairLocalPort" : 5,
+ "pairDeviceId": "of:0000000000000003",
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-95
+ }
+ },
+ "of:0000000000000003" : {
+ "segmentrouting" : {
+ "name" : "s003",
+ "ipv4NodeSid" : 3,
+ "ipv4Loopback" : "192.168.0.3",
+ "ipv6NodeSid" : 103,
+ "ipv6Loopback" : "2000::c0a8:0003",
+ "routerMac" : "00:00:00:00:00:02",
+ "isEdgeRouter" : true,
+ "pairLocalPort" : 5,
+ "pairDeviceId": "of:0000000000000002",
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-90
+ }
+ },
+ "of:0000000000000004" : {
+ "segmentrouting" : {
+ "name" : "s004",
+ "ipv4NodeSid" : 4,
+ "ipv4Loopback" : "192.168.0.4",
+ "ipv6NodeSid" : 104,
+ "ipv6Loopback" : "2000::c0a8:0004",
+ "routerMac" : "00:00:00:00:00:04",
+ "isEdgeRouter" : true,
+ "pairLocalPort" : 5,
+ "pairDeviceId": "of:0000000000000005",
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-85
+ }
+ },
+ "of:0000000000000005" : {
+ "segmentrouting" : {
+ "name" : "s005",
+ "ipv4NodeSid" : 5,
+ "ipv4Loopback" : "192.168.0.5",
+ "ipv6NodeSid" : 105,
+ "ipv6Loopback" : "2000::c0a8:0005",
+ "routerMac" : "00:00:00:00:00:04",
+ "isEdgeRouter" : true,
+ "pairLocalPort" : 5,
+ "pairDeviceId": "of:0000000000000004",
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-80
+ }
+ },
+ "of:0000000000000101" : {
+ "segmentrouting" : {
+ "name" : "s101",
+ "ipv4NodeSid" : 101,
+ "ipv4Loopback" : "192.168.0.101",
+ "ipv6NodeSid" : 201,
+ "ipv6Loopback" : "2000::c0a8:0101",
+ "routerMac" : "00:00:00:00:01:01",
+ "isEdgeRouter" : false,
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":42,
+ "longitude":-100
+ }
+ },
+ "of:0000000000000102" : {
+ "segmentrouting" : {
+ "name" : "s102",
+ "ipv4NodeSid" : 102,
+ "ipv4Loopback" : "192.168.0.102",
+ "ipv6NodeSid" : 202,
+ "ipv6Loopback" : "2000::c0a8:0202",
+ "routerMac" : "00:00:00:00:01:02",
+ "isEdgeRouter" : false,
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":42,
+ "longitude":-95
+ }
+ }
+ }
+}
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/json/COMCAST_CONFIG_ipv4=0_ipv6=1_dhcp=1_routers=0.json b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/json/COMCAST_CONFIG_ipv4=0_ipv6=1_dhcp=1_routers=0.json
new file mode 100644
index 0000000..d4f36ea
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/json/COMCAST_CONFIG_ipv4=0_ipv6=1_dhcp=1_routers=0.json
@@ -0,0 +1,332 @@
+{
+ "ports" : {
+ "of:0000000000000001/3" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1000::3ff/120" ],
+ "vlan-untagged": 10
+ }
+ ]
+ },
+ "of:0000000000000001/4" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1001::3ff/120" ],
+ "vlan-untagged": 10
+ }
+ ]
+ },
+ "of:0000000000000002/6" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1002::3ff/120" ],
+ "vlan-untagged": 10
+ }
+ ]
+ },
+ "of:0000000000000002/7" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1003::3ff/120" ],
+ "vlan-untagged": 15
+ }
+ ]
+ },
+ "of:0000000000000002/8" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1004::3ff/120" ],
+ "vlan-tagged": [30]
+ }
+ ]
+ },
+ "of:0000000000000003/8" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1005::3ff/120" ],
+ "vlan-tagged": [20]
+ }
+ ]
+ },
+ "of:0000000000000003/9" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1006::3ff/120" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000003/6" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1003::3ff/120" ],
+ "vlan-untagged": 15
+ }
+ ]
+ },
+ "of:0000000000000003/7" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1004::3ff/120" ],
+ "vlan-tagged": [30]
+ }
+ ]
+ },
+ "of:0000000000000004/6" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1007::3ff/120" ],
+ "vlan-tagged": [30]
+ }
+ ]
+ },
+ "of:0000000000000004/7" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1008::3ff/120" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000004/8" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1009::3ff/120" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000005/8" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1010::3ff/120" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000005/6" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1008::3ff/120" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000005/7" : {
+ "interfaces" : [
+ {
+ "ips" : [ "1009::3ff/120" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ }
+ },
+ "hosts" : {
+ "00:aa:00:00:00:01/None" : {
+ "basic": {
+ "locations": ["of:0000000000000001/3"],
+ "ips": ["1000::3fe"]
+ }
+ },
+ "00:aa:00:00:01:01/None" : {
+ "basic": {
+ "locations": ["of:0000000000000001/4"],
+ "ips": ["1001::3fe"]
+ }
+ },
+ "00:aa:00:00:00:02/None" : {
+ "basic": {
+ "locations": ["of:0000000000000002/6"],
+ "ips": ["1002::3fe"]
+ }
+ },
+ "00:aa:00:00:00:03/None" : {
+ "basic": {
+ "locations": ["of:0000000000000002/7", "of:0000000000000003/6"],
+ "ips": ["1003::3fe"]
+ }
+ },
+ "00:aa:00:00:00:04/30" : {
+ "basic": {
+ "locations": ["of:0000000000000003/7","of:0000000000000002/8"],
+ "ips": ["1004::3fe"]
+ }
+ },
+ "00:aa:00:00:00:05/20" : {
+ "basic": {
+ "locations": ["of:0000000000000003/8"],
+ "ips": ["1005::3fe"]
+ }
+ },
+ "00:aa:00:00:01:05/40" : {
+ "basic": {
+ "locations": ["of:0000000000000003/9"],
+ "ips": ["1006::3fe"]
+ }
+ },
+ "00:aa:00:00:00:06/30" : {
+ "basic": {
+ "locations": ["of:0000000000000004/6"],
+ "ips": ["1007::3fe"]
+ }
+ },
+ "00:aa:00:00:00:07/40" : {
+ "basic": {
+ "locations": ["of:0000000000000005/6", "of:0000000000000004/7"],
+ "ips": ["1008::3fe"]
+ }
+ },
+ "00:aa:00:00:00:08/40" : {
+ "basic": {
+ "locations": ["of:0000000000000004/8","of:0000000000000005/7"],
+ "ips": ["1009::3fe"]
+ }
+ },
+ "00:aa:00:00:00:0A/40" : {
+ "basic": {
+ "locations": ["of:0000000000000005/8"],
+ "ips": ["1010::3fe"]
+ }
+ }
+ },
+ "devices" : {
+ "of:0000000000000001" : {
+ "segmentrouting" : {
+ "name" : "s001",
+ "ipv4NodeSid" : 1,
+ "ipv6NodeSid" : 101,
+ "ipv6Loopback" : "2000::c0a8:0001",
+ "ipv4Loopback" : "192.168.0.1",
+ "routerMac" : "00:00:00:00:00:01",
+ "isEdgeRouter" : true,
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-105
+ }
+ },
+ "of:0000000000000002" : {
+ "segmentrouting" : {
+ "name" : "s002",
+ "ipv4NodeSid" : 2,
+ "ipv4Loopback" : "192.168.0.2",
+ "ipv6NodeSid" : 102,
+ "ipv6Loopback" : "2000::c0a8:0002",
+ "routerMac" : "00:00:00:00:00:02",
+ "isEdgeRouter" : true,
+ "pairLocalPort" : 5,
+ "pairDeviceId": "of:0000000000000003",
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-95
+ }
+ },
+ "of:0000000000000003" : {
+ "segmentrouting" : {
+ "name" : "s003",
+ "ipv4NodeSid" : 3,
+ "ipv4Loopback" : "192.168.0.3",
+ "ipv6NodeSid" : 103,
+ "ipv6Loopback" : "2000::c0a8:0003",
+ "routerMac" : "00:00:00:00:00:02",
+ "isEdgeRouter" : true,
+ "pairLocalPort" : 5,
+ "pairDeviceId": "of:0000000000000002",
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-90
+ }
+ },
+ "of:0000000000000004" : {
+ "segmentrouting" : {
+ "name" : "s004",
+ "ipv4NodeSid" : 4,
+ "ipv4Loopback" : "192.168.0.4",
+ "ipv6NodeSid" : 104,
+ "ipv6Loopback" : "2000::c0a8:0004",
+ "routerMac" : "00:00:00:00:00:04",
+ "isEdgeRouter" : true,
+ "pairLocalPort" : 5,
+ "pairDeviceId": "of:0000000000000005",
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-85
+ }
+ },
+ "of:0000000000000005" : {
+ "segmentrouting" : {
+ "name" : "s005",
+ "ipv4NodeSid" : 5,
+ "ipv4Loopback" : "192.168.0.5",
+ "ipv6NodeSid" : 105,
+ "ipv6Loopback" : "2000::c0a8:0005",
+ "routerMac" : "00:00:00:00:00:04",
+ "isEdgeRouter" : true,
+ "pairLocalPort" : 5,
+ "pairDeviceId": "of:0000000000000004",
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-80
+ }
+ },
+ "of:0000000000000101" : {
+ "segmentrouting" : {
+ "name" : "s101",
+ "ipv4NodeSid" : 101,
+ "ipv4Loopback" : "192.168.0.101",
+ "ipv6NodeSid" : 201,
+ "ipv6Loopback" : "2000::c0a8:0101",
+ "routerMac" : "00:00:00:00:01:01",
+ "isEdgeRouter" : false,
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":42,
+ "longitude":-100
+ }
+ },
+ "of:0000000000000102" : {
+ "segmentrouting" : {
+ "name" : "s102",
+ "ipv4NodeSid" : 102,
+ "ipv4Loopback" : "192.168.0.102",
+ "ipv6NodeSid" : 202,
+ "ipv6Loopback" : "2000::c0a8:0202",
+ "routerMac" : "00:00:00:00:01:02",
+ "isEdgeRouter" : false,
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":42,
+ "longitude":-95
+ }
+ }
+ },
+ "apps" : {
+ "org.onosproject.dhcprelay" : {
+ "default" : [
+ {
+ "dhcpServerConnectPoint": "of:0000000000000102/7",
+ "serverIps": ["10.0.3.253", "2000::3fd"]
+ }
+ ]
+ }
+ }
+
+}
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/json/COMCAST_CONFIG_ipv4=1_ipv6=0_dhcp=0_routers=0.chart b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/json/COMCAST_CONFIG_ipv4=1_ipv6=0_dhcp=0_routers=0.chart
new file mode 100644
index 0000000..8fc87ff
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/json/COMCAST_CONFIG_ipv4=1_ipv6=0_dhcp=0_routers=0.chart
@@ -0,0 +1,4 @@
+{
+ "ipv4": {"expect": "True",
+ "hosts":["h1v4", "h2v4" ,"h3v4", "h4v4", "h5v4", "h6v4", "h7v4", "h8v4", "h9v4", "h10v4", "h11v4"]}
+}
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/json/COMCAST_CONFIG_ipv4=1_ipv6=0_dhcp=0_routers=0.json b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/json/COMCAST_CONFIG_ipv4=1_ipv6=0_dhcp=0_routers=0.json
new file mode 100644
index 0000000..85e920f
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/json/COMCAST_CONFIG_ipv4=1_ipv6=0_dhcp=0_routers=0.json
@@ -0,0 +1,307 @@
+{
+ "ports" : {
+ "of:0000000000000001/3" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.1.0.254/24" ],
+ "vlan-untagged": 10
+ }
+ ]
+ },
+ "of:0000000000000001/4" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.1.10.254/24" ],
+ "vlan-untagged": 10
+ }
+ ]
+ },
+ "of:0000000000000002/6" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.2.0.254/24" ],
+ "vlan-untagged": 10
+ }
+ ]
+ },
+ "of:0000000000000002/7" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.2.30.254/24" ],
+ "vlan-untagged": 15
+ }
+ ]
+ },
+ "of:0000000000000002/8" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.2.20.254/24" ],
+ "vlan-tagged": [30]
+ }
+ ]
+ },
+ "of:0000000000000003/8" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.2.10.254/24" ],
+ "vlan-tagged": [20]
+ }
+ ]
+ },
+ "of:0000000000000003/9" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.2.40.254/24" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000003/6" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.2.30.254/24" ],
+ "vlan-untagged": 15
+ }
+ ]
+ },
+ "of:0000000000000003/7" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.2.20.254/24" ],
+ "vlan-tagged": [30]
+ }
+ ]
+ },
+ "of:0000000000000004/6" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.3.0.254/24" ],
+ "vlan-tagged": [30]
+ }
+ ]
+ },
+ "of:0000000000000004/7" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.3.10.254/24" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000004/8" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.3.30.254/24" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000005/8" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.3.20.254/24" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000005/6" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.3.10.254/24" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000005/7" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.3.30.254/24" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ }
+ },
+ "hosts" : {
+ "00:aa:00:00:00:01/None" : {
+ "basic": {
+ "locations": ["of:0000000000000001/3"],
+ "ips": ["10.1.0.1"]
+ }
+ },
+ "00:aa:00:00:01:01/None" : {
+ "basic": {
+ "locations": ["of:0000000000000001/4"],
+ "ips": ["10.1.10.1"]
+ }
+ },
+ "00:aa:00:00:00:02/None" : {
+ "basic": {
+ "locations": ["of:0000000000000002/6"],
+ "ips": ["10.2.0.1"]
+ }
+ },
+ "00:aa:00:00:00:03/None" : {
+ "basic": {
+ "locations": ["of:0000000000000002/7", "of:0000000000000003/6"],
+ "ips": ["10.2.30.1"]
+ }
+ },
+ "00:aa:00:00:00:04/30" : {
+ "basic": {
+ "locations": ["of:0000000000000003/7","of:0000000000000002/8"],
+ "ips": ["10.2.20.1"]
+ }
+ },
+ "00:aa:00:00:00:05/20" : {
+ "basic": {
+ "locations": ["of:0000000000000003/8"],
+ "ips": ["10.2.10.1"]
+ }
+ },
+ "00:aa:00:00:01:05/40" : {
+ "basic": {
+ "locations": ["of:0000000000000003/9"],
+ "ips": ["10.2.40.1"]
+ }
+ },
+ "00:aa:00:00:00:06/30" : {
+ "basic": {
+ "locations": ["of:0000000000000004/6"],
+ "ips": ["10.3.0.1"]
+ }
+ },
+ "00:aa:00:00:00:07/40" : {
+ "basic": {
+ "locations": ["of:0000000000000005/6", "of:0000000000000004/7"],
+ "ips": ["10.3.10.1"]
+ }
+ },
+ "00:aa:00:00:00:08/40" : {
+ "basic": {
+ "locations": ["of:0000000000000004/8","of:0000000000000005/7"],
+ "ips": ["10.3.30.1"]
+ }
+ },
+ "00:aa:00:00:00:0A/40" : {
+ "basic": {
+ "locations": ["of:0000000000000005/8"],
+ "ips": ["10.3.20.1"]
+ }
+ }
+ },
+ "devices" : {
+ "of:0000000000000001" : {
+ "segmentrouting" : {
+ "name" : "s001",
+ "ipv4NodeSid" : 1,
+ "ipv4Loopback" : "192.168.0.1",
+ "routerMac" : "00:00:00:00:00:01",
+ "isEdgeRouter" : true,
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-105
+ }
+ },
+ "of:0000000000000002" : {
+ "segmentrouting" : {
+ "name" : "s002",
+ "ipv4NodeSid" : 2,
+ "ipv4Loopback" : "192.168.0.2",
+ "routerMac" : "00:00:00:00:00:02",
+ "isEdgeRouter" : true,
+ "pairLocalPort" : 5,
+ "pairDeviceId": "of:0000000000000003",
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-95
+ }
+ },
+ "of:0000000000000003" : {
+ "segmentrouting" : {
+ "name" : "s003",
+ "ipv4NodeSid" : 3,
+ "ipv4Loopback" : "192.168.0.3",
+ "routerMac" : "00:00:00:00:00:02",
+ "isEdgeRouter" : true,
+ "pairLocalPort" : 5,
+ "pairDeviceId": "of:0000000000000002",
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-90
+ }
+ },
+ "of:0000000000000004" : {
+ "segmentrouting" : {
+ "name" : "s004",
+ "ipv4NodeSid" : 4,
+ "ipv4Loopback" : "192.168.0.4",
+ "routerMac" : "00:00:00:00:00:04",
+ "isEdgeRouter" : true,
+ "pairLocalPort" : 5,
+ "pairDeviceId": "of:0000000000000005",
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-85
+ }
+ },
+ "of:0000000000000005" : {
+ "segmentrouting" : {
+ "name" : "s005",
+ "ipv4NodeSid" : 5,
+ "ipv4Loopback" : "192.168.0.5",
+ "routerMac" : "00:00:00:00:00:04",
+ "isEdgeRouter" : true,
+ "pairLocalPort" : 5,
+ "pairDeviceId": "of:0000000000000004",
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-80
+ }
+ },
+ "of:0000000000000101" : {
+ "segmentrouting" : {
+ "name" : "s101",
+ "ipv4NodeSid" : 101,
+ "ipv4Loopback" : "192.168.0.101",
+ "routerMac" : "00:00:00:00:01:01",
+ "isEdgeRouter" : false,
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":42,
+ "longitude":-100
+ }
+ },
+ "of:0000000000000102" : {
+ "segmentrouting" : {
+ "name" : "s102",
+ "ipv4NodeSid" : 102,
+ "ipv4Loopback" : "192.168.0.102",
+ "routerMac" : "00:00:00:00:01:02",
+ "isEdgeRouter" : false,
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":42,
+ "longitude":-95
+ }
+ }
+ }
+}
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/json/COMCAST_CONFIG_ipv4=1_ipv6=0_dhcp=1_routers=0.json b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/json/COMCAST_CONFIG_ipv4=1_ipv6=0_dhcp=1_routers=0.json
new file mode 100644
index 0000000..f943f35
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/json/COMCAST_CONFIG_ipv4=1_ipv6=0_dhcp=1_routers=0.json
@@ -0,0 +1,318 @@
+{
+ "ports" : {
+ "of:0000000000000001/3" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.1.0.254/24" ],
+ "vlan-untagged": 10
+ }
+ ]
+ },
+ "of:0000000000000001/4" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.1.10.254/24" ],
+ "vlan-untagged": 10
+ }
+ ]
+ },
+ "of:0000000000000002/6" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.2.0.254/24" ],
+ "vlan-untagged": 10
+ }
+ ]
+ },
+ "of:0000000000000002/7" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.2.30.254/24" ],
+ "vlan-untagged": 15
+ }
+ ]
+ },
+ "of:0000000000000002/8" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.2.20.254/24" ],
+ "vlan-tagged": [30]
+ }
+ ]
+ },
+ "of:0000000000000003/8" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.2.10.254/24" ],
+ "vlan-tagged": [20]
+ }
+ ]
+ },
+ "of:0000000000000003/9" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.2.40.254/24" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000003/6" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.2.30.254/24" ],
+ "vlan-untagged": 15
+ }
+ ]
+ },
+ "of:0000000000000003/7" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.2.20.254/24" ],
+ "vlan-tagged": [30]
+ }
+ ]
+ },
+ "of:0000000000000004/6" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.3.0.254/24" ],
+ "vlan-tagged": [30]
+ }
+ ]
+ },
+ "of:0000000000000004/7" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.3.10.254/24" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000004/8" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.3.30.254/24" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000005/8" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.3.20.254/24" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000005/6" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.3.10.254/24" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000005/7" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.3.30.254/24" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ }
+ },
+ "hosts" : {
+ "00:aa:00:00:00:01/None" : {
+ "basic": {
+ "locations": ["of:0000000000000001/3"],
+ "ips": ["10.1.0.1"]
+ }
+ },
+ "00:aa:00:00:01:01/None" : {
+ "basic": {
+ "locations": ["of:0000000000000001/4"],
+ "ips": ["10.1.10.1"]
+ }
+ },
+ "00:aa:00:00:00:02/None" : {
+ "basic": {
+ "locations": ["of:0000000000000002/6"],
+ "ips": ["10.2.0.1"]
+ }
+ },
+ "00:aa:00:00:00:03/None" : {
+ "basic": {
+ "locations": ["of:0000000000000002/7", "of:0000000000000003/6"],
+ "ips": ["10.2.30.1"]
+ }
+ },
+ "00:aa:00:00:00:04/30" : {
+ "basic": {
+ "locations": ["of:0000000000000003/7","of:0000000000000002/8"],
+ "ips": ["10.2.20.1"]
+ }
+ },
+ "00:aa:00:00:00:05/20" : {
+ "basic": {
+ "locations": ["of:0000000000000003/8"],
+ "ips": ["10.2.10.1"]
+ }
+ },
+ "00:aa:00:00:01:05/40" : {
+ "basic": {
+ "locations": ["of:0000000000000003/9"],
+ "ips": ["10.2.40.1"]
+ }
+ },
+ "00:aa:00:00:00:06/30" : {
+ "basic": {
+ "locations": ["of:0000000000000004/6"],
+ "ips": ["10.3.0.1"]
+ }
+ },
+ "00:aa:00:00:00:07/40" : {
+ "basic": {
+ "locations": ["of:0000000000000005/6", "of:0000000000000004/7"],
+ "ips": ["10.3.10.1"]
+ }
+ },
+ "00:aa:00:00:00:08/40" : {
+ "basic": {
+ "locations": ["of:0000000000000004/8","of:0000000000000005/7"],
+ "ips": ["10.3.30.1"]
+ }
+ },
+ "00:aa:00:00:00:0A/40" : {
+ "basic": {
+ "locations": ["of:0000000000000005/8"],
+ "ips": ["10.3.20.1"]
+ }
+ }
+ },
+ "devices" : {
+ "of:0000000000000001" : {
+ "segmentrouting" : {
+ "name" : "s001",
+ "ipv4NodeSid" : 1,
+ "ipv4Loopback" : "192.168.0.1",
+ "routerMac" : "00:00:00:00:00:01",
+ "isEdgeRouter" : true,
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-105
+ }
+ },
+ "of:0000000000000002" : {
+ "segmentrouting" : {
+ "name" : "s002",
+ "ipv4NodeSid" : 2,
+ "ipv4Loopback" : "192.168.0.2",
+ "routerMac" : "00:00:00:00:00:02",
+ "isEdgeRouter" : true,
+ "pairLocalPort" : 5,
+ "pairDeviceId": "of:0000000000000003",
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-95
+ }
+ },
+ "of:0000000000000003" : {
+ "segmentrouting" : {
+ "name" : "s003",
+ "ipv4NodeSid" : 3,
+ "ipv4Loopback" : "192.168.0.3",
+ "routerMac" : "00:00:00:00:00:02",
+ "isEdgeRouter" : true,
+ "pairLocalPort" : 5,
+ "pairDeviceId": "of:0000000000000002",
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-90
+ }
+ },
+ "of:0000000000000004" : {
+ "segmentrouting" : {
+ "name" : "s004",
+ "ipv4NodeSid" : 4,
+ "ipv4Loopback" : "192.168.0.4",
+ "routerMac" : "00:00:00:00:00:04",
+ "isEdgeRouter" : true,
+ "pairLocalPort" : 5,
+ "pairDeviceId": "of:0000000000000005",
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-85
+ }
+ },
+ "of:0000000000000005" : {
+ "segmentrouting" : {
+ "name" : "s005",
+ "ipv4NodeSid" : 5,
+ "ipv4Loopback" : "192.168.0.5",
+ "routerMac" : "00:00:00:00:00:04",
+ "isEdgeRouter" : true,
+ "pairLocalPort" : 5,
+ "pairDeviceId": "of:0000000000000004",
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-80
+ }
+ },
+ "of:0000000000000101" : {
+ "segmentrouting" : {
+ "name" : "s101",
+ "ipv4NodeSid" : 101,
+ "ipv4Loopback" : "192.168.0.101",
+ "routerMac" : "00:00:00:00:01:01",
+ "isEdgeRouter" : false,
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":42,
+ "longitude":-100
+ }
+ },
+ "of:0000000000000102" : {
+ "segmentrouting" : {
+ "name" : "s102",
+ "ipv4NodeSid" : 102,
+ "ipv4Loopback" : "192.168.0.102",
+ "routerMac" : "00:00:00:00:01:02",
+ "isEdgeRouter" : false,
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":42,
+ "longitude":-95
+ }
+ }
+ },
+ "apps" : {
+ "org.onosproject.dhcprelay" : {
+ "default" : [
+ {
+ "dhcpServerConnectPoint": "of:0000000000000102/10",
+ "serverIps": ["10.0.3.253"]
+ }
+ ]
+ }
+ }
+
+}
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/json/COMCAST_CONFIG_ipv4=1_ipv6=1_dhcp=0_routers=0.chart b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/json/COMCAST_CONFIG_ipv4=1_ipv6=1_dhcp=0_routers=0.chart
new file mode 100644
index 0000000..e23a8b4
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/json/COMCAST_CONFIG_ipv4=1_ipv6=1_dhcp=0_routers=0.chart
@@ -0,0 +1,6 @@
+{
+ "ipv4": {"expect": "True",
+ "hosts":["h1v4", "h2v4" ,"h3v4", "h4v4", "h5v4", "h6v4", "h7v4", "h8v4", "h9v4", "h10v4", "h11v4"]}
+ "ipv6": {"expect": "True",
+ "hosts":["h1v6", "h2v6" ,"h3v6", "h4v6", "h5v6", "h6v6", "h7v6", "h8v6", "h9v6", "h10v6", "h11v6"]}
+}
diff --git a/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.params b/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.params
index fba7e9f..fceb91a 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.params
@@ -18,7 +18,7 @@
<ENV>
<cellName>productionCell</cellName>
- <cellApps>drivers,segmentrouting,openflow-base,netcfghostprovider,netcfglinksprovider</cellApps>
+ <cellApps>drivers,segmentrouting,openflow,netcfghostprovider,netcfglinksprovider</cellApps>
</ENV>
<GIT>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.py b/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.py
index 2919500..5a863a9 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.py
@@ -51,7 +51,7 @@
run.startMininet( main, 'cord_fabric.py' )
# pre-configured routing and bridging test
run.checkFlows( main, minFlowCount=116 )
- run.pingAll( main, "CASE1" )
+ run.pingAll( main )
# TODO Dynamic config of hosts in subnet
# TODO Dynamic config of host not in subnet
# TODO Dynamic config of vlan xconnect
@@ -77,7 +77,7 @@
run.startMininet( main, 'cord_fabric.py', args="--leaf=4 --spine=4" )
# pre-configured routing and bridging test
run.checkFlows( main, minFlowCount=350 )
- run.pingAll( main, 'CASE2' )
+ run.pingAll( main )
# TODO Dynamic config of hosts in subnet
# TODO Dynamic config of host not in subnet
# TODO Dynamic config of vlan xconnect
@@ -103,7 +103,7 @@
run.startMininet( main, 'cord_fabric.py', args="--leaf=1 --spine=0" )
# pre-configured routing and bridging test
run.checkFlows( main, minFlowCount=15 )
- run.pingAll( main, 'CASE3' )
+ run.pingAll( main )
# TODO Dynamic config of hosts in subnet
# TODO Dynamic config of host not in subnet
# TODO Dynamic config of vlan xconnect
@@ -129,7 +129,7 @@
run.startMininet( main, 'cord_fabric.py' )
# pre-configured routing and bridging test
run.checkFlows( main, minFlowCount=116 )
- run.pingAll( main, 'CASE4' )
+ run.pingAll( main )
# TODO Dynamic config of hosts in subnet
# TODO Dynamic config of host not in subnet
# TODO Dynamic config of vlan xconnect
@@ -155,7 +155,7 @@
run.startMininet( main, 'cord_fabric.py', args="--leaf=4 --spine=4" )
# pre-configured routing and bridging test
run.checkFlows( main, minFlowCount=350 )
- run.pingAll( main, 'CASE5' )
+ run.pingAll( main )
# TODO Dynamic config of hosts in subnet
# TODO Dynamic config of host not in subnet
# TODO Dynamic config of vlan xconnect
@@ -181,7 +181,7 @@
run.startMininet( main, 'cord_fabric.py', args="--leaf=1 --spine=0" )
# pre-configured routing and bridging test
run.checkFlows( main, minFlowCount=15 )
- run.pingAll( main, 'CASE4' )
+ run.pingAll( main )
# TODO Dynamic config of hosts in subnet
# TODO Dynamic config of host not in subnet
# TODO Dynamic config of vlan xconnect
diff --git a/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.topo b/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.topo
index e0b1d43..28ecb9a 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.topo
+++ b/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.topo
@@ -9,7 +9,7 @@
<connect_order>1</connect_order>
<COMPONENTS>
<cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
- <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+ <diff_clihost></diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
<karaf_username></karaf_username>
<karaf_password></karaf_password>
<web_user></web_user>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.params b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.params
index a5d4811..ea3112c 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.params
@@ -18,7 +18,7 @@
<ENV>
<cellName>productionCell</cellName>
- <cellApps>drivers,segmentrouting,openflow-base,netcfghostprovider,netcfglinksprovider</cellApps>
+ <cellApps>drivers,segmentrouting,openflow,netcfghostprovider,netcfglinksprovider</cellApps>
</ENV>
<GIT>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.py b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.py
index d0d79c8..1d013a0 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.py
@@ -58,7 +58,7 @@
run.killSwitch( main, switch, switches='3', links='4' )
run.pingAll( main, "CASE1_Failure" )
run.recoverSwitch( main, switch, switches='4', links='8' )
- run.checkFlows( main, minFlowCount=116 )
+ run.checkFlows( main, minFlowCount=116, tag="CASE1_Recovery" )
run.pingAll( main, "CASE1_Recovery" )
# TODO Dynamic config of hosts in subnet
# TODO Dynamic config of host not in subnet
@@ -95,7 +95,7 @@
time.sleep( main.switchSleep )
run.pingAll( main, "CASE2_Failure" )
run.recoverSwitch( main, switch, switches='8', links='32' )
- run.checkFlows( main, minFlowCount=350 )
+ run.checkFlows( main, minFlowCount=350, tag="CASE2_Recovery" )
run.pingAll( main, "CASE2_Recovery" )
# TODO Dynamic config of hosts in subnet
# TODO Dynamic config of host not in subnet
@@ -128,10 +128,10 @@
# switch failure
switch = main.params[ 'kill' ][ 'switch' ]
run.killSwitch( main, switch, switches='3', links='4' )
- run.pingAll( main, "CASE3_Failure" )
+ run.pingAll( main, "CASE4_Failure" )
run.recoverSwitch( main, switch, switches='4', links='8' )
- run.checkFlows( main, minFlowCount=116 )
- run.pingAll( main, "CASE3_Recovery" )
+ run.checkFlows( main, minFlowCount=116, tag="CASE4_Recovery" )
+ run.pingAll( main, "CASE4_Recovery" )
# TODO Dynamic config of hosts in subnet
# TODO Dynamic config of host not in subnet
# TODO Dynamic config of vlan xconnect
@@ -165,10 +165,10 @@
switch = main.params[ 'kill' ][ 'switch' ]
run.killSwitch( main, switch, switches='7', links='24' )
time.sleep( main.switchSleep )
- run.pingAll( main, "CASE4_Failure" )
+ run.pingAll( main, "CASE5_Failure" )
run.recoverSwitch( main, switch, switches='8', links='32' )
- run.checkFlows( main, minFlowCount=350 )
- run.pingAll( main, "CASE4_Recovery" )
+ run.checkFlows( main, minFlowCount=350, tag="CASE5_Recovery" )
+ run.pingAll( main, "CASE5_Recovery" )
# TODO Dynamic config of hosts in subnet
# TODO Dynamic config of host not in subnet
# TODO preconfigured xconnect
diff --git a/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.topo b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.topo
index ee6ce56..67a973e 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.topo
+++ b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.topo
@@ -9,7 +9,7 @@
<connect_order>1</connect_order>
<COMPONENTS>
<cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
- <diff_clihost>True</diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used for True.
+ <diff_clihost></diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used for True.
<karaf_username></karaf_username>
<karaf_password></karaf_password>
<web_user></web_user>
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/COMCAST_CONFIG.json b/TestON/tests/USECASE/SegmentRouting/dependencies/COMCAST_CONFIG.json
new file mode 100644
index 0000000..8a7eff8
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/COMCAST_CONFIG.json
@@ -0,0 +1,279 @@
+{
+ "ports" : {
+ "of:0000000000000001/3" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.1.0.254/24" ],
+ "vlan-untagged": 10
+ }
+ ]
+ },
+ "of:0000000000000002/6" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.2.0.254/24" ],
+ "vlan-untagged": 10
+ }
+ ]
+ },
+ "of:0000000000000002/7" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.2.30.254/24" ],
+ "vlan-untagged": 15
+ }
+ ]
+ },
+ "of:0000000000000002/8" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.2.20.254/24" ],
+ "vlan-tagged": [30]
+ }
+ ]
+ },
+ "of:0000000000000003/8" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.2.10.254/24" ],
+ "vlan-tagged": [20]
+ }
+ ]
+ },
+ "of:0000000000000003/6" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.2.30.254/24" ],
+ "vlan-untagged": 15
+ }
+ ]
+ },
+ "of:0000000000000003/7" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.2.20.254/24" ],
+ "vlan-tagged": [30]
+ }
+ ]
+ },
+ "of:0000000000000004/6" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.3.0.254/24" ],
+ "vlan-tagged": [30]
+ }
+ ]
+ },
+ "of:0000000000000004/7" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.3.10.254/24" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000004/8" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.3.30.254/24" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000005/8" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.3.20.254/24" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000005/6" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.3.10.254/24" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ },
+ "of:0000000000000005/7" : {
+ "interfaces" : [
+ {
+ "ips" : [ "10.3.30.254/24" ],
+ "vlan-tagged": [40]
+ }
+ ]
+ }
+ },
+ "hosts" : {
+ "00:aa:00:00:00:01/10" : {
+ "basic": {
+ "locations": ["of:0000000000000001/3"],
+ "ips": ["10.1.0.1"]
+ }
+ },
+ "00:aa:00:00:00:02/None" : {
+ "basic": {
+ "locations": ["of:0000000000000002/6"],
+ "ips": ["10.2.0.1"]
+ }
+ },
+ "00:aa:00:00:00:03/None" : {
+ "basic": {
+ "locations": ["of:0000000000000002/7", "of:0000000000000003/6"],
+ "ips": ["10.2.30.1"]
+ }
+ },
+ "00:aa:00:00:00:04/30" : {
+ "basic": {
+ "locations": ["of:0000000000000003/7","of:0000000000000002/8"],
+ "ips": ["10.2.20.1"]
+ }
+ },
+ "00:aa:00:00:00:05/20" : {
+ "basic": {
+ "locations": ["of:0000000000000003/8"],
+ "ips": ["10.2.10.1"]
+ }
+ },
+ "00:aa:00:00:00:06/30" : {
+ "basic": {
+ "locations": ["of:0000000000000004/6"],
+ "ips": ["10.3.0.1"]
+ }
+ },
+ "00:aa:00:00:00:07/40" : {
+ "basic": {
+ "locations": ["of:0000000000000005/6", "of:0000000000000004/7"],
+ "ips": ["10.3.10.1"]
+ }
+ },
+ "00:aa:00:00:00:08/40" : {
+ "basic": {
+ "locations": ["of:0000000000000004/8","of:0000000000000005/7"],
+ "ips": ["10.3.30.1"]
+ }
+ },
+ "00:aa:00:00:00:0A/40" : {
+ "basic": {
+ "locations": ["of:0000000000000005/8"],
+ "ips": ["10.3.20.1"]
+ }
+ }
+ },
+ "devices" : {
+ "of:0000000000000001" : {
+ "segmentrouting" : {
+ "name" : "s001",
+ "ipv4NodeSid" : 1,
+ "ipv4Loopback" : "192.168.0.1",
+ "routerMac" : "00:00:00:00:00:01",
+ "isEdgeRouter" : true,
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-105
+ }
+ },
+ "of:0000000000000002" : {
+ "segmentrouting" : {
+ "name" : "s002",
+ "ipv4NodeSid" : 2,
+ "ipv4Loopback" : "192.168.0.2",
+ "routerMac" : "00:00:00:00:00:02",
+ "isEdgeRouter" : true,
+ "pairLocalPort" : 5,
+ "pairDeviceId": "of:0000000000000003",
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-95
+ }
+ },
+ "of:0000000000000003" : {
+ "segmentrouting" : {
+ "name" : "s003",
+ "ipv4NodeSid" : 3,
+ "ipv4Loopback" : "192.168.0.3",
+ "routerMac" : "00:00:00:00:00:03",
+ "isEdgeRouter" : true,
+ "pairLocalPort" : 5,
+ "pairDeviceId": "of:0000000000000002",
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-90
+ }
+ },
+ "of:0000000000000004" : {
+ "segmentrouting" : {
+ "name" : "s004",
+ "ipv4NodeSid" : 4,
+ "ipv4Loopback" : "192.168.0.4",
+ "routerMac" : "00:00:00:00:00:04",
+ "isEdgeRouter" : true,
+ "pairLocalPort" : 5,
+ "pairDeviceId": "of:0000000000000005",
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-85
+ }
+ },
+ "of:0000000000000005" : {
+ "segmentrouting" : {
+ "name" : "s005",
+ "ipv4NodeSid" : 5,
+ "ipv4Loopback" : "192.168.0.5",
+ "routerMac" : "00:00:00:00:00:05",
+ "isEdgeRouter" : true,
+ "pairLocalPort" : 5,
+ "pairDeviceId": "of:0000000000000004",
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":34,
+ "longitude":-80
+ }
+ },
+ "of:0000000000000101" : {
+ "segmentrouting" : {
+ "name" : "s101",
+ "ipv4NodeSid" : 101,
+ "ipv4Loopback" : "192.168.0.101",
+ "routerMac" : "00:00:00:00:01:01",
+ "isEdgeRouter" : false,
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":42,
+ "longitude":-100
+ }
+ },
+ "of:0000000000000102" : {
+ "segmentrouting" : {
+ "name" : "s102",
+ "ipv4NodeSid" : 102,
+ "ipv4Loopback" : "192.168.0.102",
+ "routerMac" : "00:00:00:00:01:02",
+ "isEdgeRouter" : false,
+ "adjacencySids" : []
+ },
+ "basic" : {
+ "driver" : "ofdpa-ovs",
+ "latitude":42,
+ "longitude":-95
+ }
+ }
+ }
+}
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py b/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
index 78c96cc..c912ea2 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
@@ -49,7 +49,7 @@
main.cleanAndExit()
from tests.dependencies.Network import Network
main.Network = Network()
- main.testSetUp.envSetupDescription()
+ main.testSetUp.envSetupDescription( False )
stepResult = main.FALSE
try:
main.step( "Constructing test variables" )
@@ -66,14 +66,15 @@
main.maxNodes = int( main.params[ 'SCALE' ][ 'max' ] )
main.startUpSleep = int( main.params[ 'SLEEP' ][ 'startup' ] )
- stepResult = main.testSetUp.envSetup()
+ stepResult = main.testSetUp.envSetup( False )
except Exception as e:
main.testSetUp.envSetupException( e )
main.testSetUp.evnSetupConclusion( stepResult )
@staticmethod
- def installOnos( main, vlanCfg=True, skipPackage=False, cliSleep=10 ):
+ def installOnos( main, vlanCfg=True, skipPackage=False, cliSleep=10,
+ parallel=True ):
"""
- Set up cell
- Create cell file
@@ -92,7 +93,9 @@
main.log.info( ''.join( main.Cluster.getIps() ) )
main.dynamicHosts = [ 'in1', 'out1' ]
main.testSetUp.ONOSSetUp( main.Cluster, newCell=True, cellName=main.cellName,
- skipPack=skipPackage, useSSH=Testcaselib.useSSH )
+ skipPack=skipPackage,
+ useSSH=Testcaselib.useSSH,
+ installParallel=parallel, includeCaseDesc=False )
ready = utilities.retry( main.Cluster.active( 0 ).CLI.summary,
main.FALSE,
sleep=cliSleep,
@@ -103,12 +106,10 @@
onpass="ONOS summary command succeded",
onfail="ONOS summary command failed" )
- with open( "%s/json/%s.json" % (
- main.configPath, main.cfgName ) ) as cfg:
- main.Cluster.active( 0 ).REST.setNetCfg( json.load( cfg ) )
- with open( "%s/json/%s.chart" % (
- main.configPath, main.cfgName ) ) as chart:
- main.pingChart = json.load( chart )
+ with open( "%s/json/%s.json" % (main.configPath, main.cfgName)) as cfg:
+ main.Cluster.active( 0 ).REST.setNetCfg(json.load(cfg))
+ with open("%s/json/%s.chart" % (main.configPath, main.cfgName)) as chart:
+ main.pingChart = json.load(chart)
if not ready:
main.log.error( "ONOS startup failed!" )
main.cleanAndExit()
@@ -200,9 +201,11 @@
main.cleanAndExit()
@staticmethod
- def checkFlows( main, minFlowCount, dumpflows=True, sleep=10 ):
+ def checkFlows( main, minFlowCount, tag="", dumpflows=True, sleep=10 ):
main.step(
" Check whether the flow count is bigger than %s" % minFlowCount )
+ if tag == "":
+ tag = 'CASE%d' % main.CurrentTestCaseNumber
count = utilities.retry( main.Cluster.active( 0 ).CLI.checkFlowCount,
main.FALSE,
kwargs={ 'min': minFlowCount },
@@ -229,11 +232,11 @@
main.ONOSbench.dumpONOSCmd( main.Cluster.active( 0 ).ipAddress,
"flows",
main.logdir,
- main.resultFileName + "_FlowsBefore" )
+ tag + "_FlowsBefore" )
main.ONOSbench.dumpONOSCmd( main.Cluster.active( 0 ).ipAddress,
"groups",
main.logdir,
- main.resultFileName + "_GroupsBefore" )
+ tag + "_GroupsBefore" )
@staticmethod
def checkFlowsByDpid( main, dpid, minFlowCount, sleep=10 ):
@@ -251,9 +254,47 @@
onfail="Flow count looks wrong: " + count )
@staticmethod
+ def pingAllBasedOnIp( main, tag="", dumpflows=True ):
+ main.log.report( "Check full connectivity" )
+ print main.pingChart
+ if tag == "":
+ tag = 'CASE%d' % main.CurrentTestCaseNumber
+ for entry in main.pingChart.itervalues():
+ print entry
+ hosts, expect = entry[ 'hosts' ], entry[ 'expect' ]
+ try:
+ expect = main.TRUE if str(expect).lower() == 'true' else main.FALSE
+ except:
+ expect = main.FALSE
+ main.step( "Connectivity for %s %s" % ( str( hosts ), tag ) )
+
+ if ("v4" in hosts[0]):
+ pa = main.Network.pingallHosts( hosts )
+ utilities.assert_equals( expect=expect, actual=pa,
+ onpass="IPv4 connectivity successfully tested",
+ onfail="IPv4 connectivity failed" )
+ if ("v6" in hosts[0]):
+ pa = main.Network.pingIpv6Hosts( hosts )
+ utilities.assert_equals( expect=expect, actual=pa,
+ onpass="IPv6 connectivity successfully tested",
+ onfail="IPv6 connectivity failed" )
+
+ if dumpflows:
+ main.ONOSbench.dumpONOSCmd( main.Cluster.active( 0 ).ipAddress,
+ "flows",
+ main.logdir,
+ tag + "_FlowsOn" )
+ main.ONOSbench.dumpONOSCmd( main.Cluster.active( 0 ).ipAddress,
+ "groups",
+ main.logdir,
+ tag + "_GroupsOn" )
+
+ @staticmethod
def pingAll( main, tag="", dumpflows=True ):
main.log.report( "Check full connectivity" )
print main.pingChart
+ if tag == "":
+ tag = 'CASE%d' % main.CurrentTestCaseNumber
for entry in main.pingChart.itervalues():
print entry
hosts, expect = entry[ 'hosts' ], entry[ 'expect' ]
@@ -328,8 +369,8 @@
ctrl = main.Cluster.runningNodes[ i ]
onosIsUp = main.ONOSbench.isup( ctrl.ipAddress )
if onosIsUp == main.TRUE:
- ctrl.CLI.portstate( dpid=dpid1, port=port1 )
- ctrl.CLI.portstate( dpid=dpid2, port=port2 )
+ ctrl.CLI.portstate( dpid=dpid1, port=port1, state='Enable' )
+ ctrl.CLI.portstate( dpid=dpid2, port=port2, state='Enable' )
time.sleep( main.linkSleep )
result = main.Cluster.active( 0 ).CLI.checkStatus( numoswitch=switches,
@@ -406,7 +447,7 @@
main.utils.mininetCleanup( main.Mininet1 )
- main.utils.copyKarafLog( main.resultFileName, before=True )
+ main.utils.copyKarafLog( "CASE%d" % main.CurrentTestCaseNumber, before=True, includeCaseDesc=False )
for ctrl in main.Cluster.active():
main.ONOSbench.onosStop( ctrl.ipAddress )
@@ -447,7 +488,7 @@
main.log.error( "Failed to kill ONOS, stopping test" )
main.cleanAndExit()
- topology = utilities.retry( main.Cluster.active( 0 ).checkStatus,
+ topology = utilities.retry( main.Cluster.active( 0 ).CLI.checkStatus,
main.FALSE,
kwargs={ 'numoswitch': switches,
'numolink': links,
@@ -475,7 +516,7 @@
for i in nodes:
main.step( "Checking if ONOS CLI is ready" )
ctrl = main.Cluster.runningNodes[ i ]
- ctrl.CLI.startCellCli()
+ # ctrl.CLI.startCellCli()
cliResult = ctrl.CLI.startOnosCli( ctrl.ipAddress,
commandlineTimeout=60,
onosStartTimeout=100 )
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/comcast_fabric.py b/TestON/tests/USECASE/SegmentRouting/dependencies/comcast_fabric.py
new file mode 100644
index 0000000..e08e5a6
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/comcast_fabric.py
@@ -0,0 +1,301 @@
+#!/usr/bin/python
+import os
+import re
+from optparse import OptionParser
+from ipaddress import ip_network
+from mininet.node import RemoteController, OVSBridge, Host
+from mininet.link import TCLink
+from mininet.log import setLogLevel
+from mininet.net import Mininet
+from mininet.topo import Topo
+from mininet.nodelib import NAT
+from mininet.cli import CLI
+
+from routinglib import BgpRouter, RoutedHost
+from trellislib import DhcpServer, TaggedRoutedHost, DualHomedRoutedHost, DualHomedTaggedRoutedHost, DhcpClient, Dhcp6Client, DhcpServer, Dhcp6Server, TrellisHost
+
+# Parse command line options and dump results
+def parseOptions():
+ "Parse command line options"
+ parser = OptionParser()
+ parser.add_option( '--dhcp', dest='dhcp', type='int', default=0,
+ help='Configure hosts with dhcp or not' )
+ parser.add_option( '--routers', dest='routers', type='int', default=0,
+ help='Configure external routers or not in the topology' )
+ parser.add_option( '--ipv6', dest='ipv6', type='int', default=0,
+ help='Configure hosts with ipv6 or not' )
+ parser.add_option( '--ipv4', dest='ipv4', type='int', default=1,
+ help='Configure hosts with ipv4 or not' )
+ parser.add_option( '--onos-ip', dest='onosIp', type='str', default='',
+ help='IP address list of ONOS instances, separated by comma(,). Overrides --onos option' )
+
+ ( options, args ) = parser.parse_args()
+ return options, args
+
+opts, args = parseOptions()
+
+class ComcastLeafSpineFabric(Topo):
+
+ spines = dict()
+ leafs = dict()
+ hosts_dict = dict()
+
+ def createIpv4Hosts(self, dhcp):
+
+ h1 = self.addHost('h1v4', cls=TrellisHost,
+ mac='00:aa:00:00:00:01', ips=['10.1.0.1/24'],
+ gateway='10.1.0.254', dhcpClient=dhcp)
+ self.addLink(h1, self.leafs[0])
+ self.hosts_dict['h1v4'] = h1
+
+ h2 = self.addHost('h2v4', cls=TrellisHost,
+ mac='00:aa:00:00:01:01', ips=['10.1.10.1/24'],
+ gateway='10.1.10.254', dhcpClient=dhcp)
+ self.addLink(h2, self.leafs[0])
+ self.hosts_dict['h2v4'] = h2
+
+ h3 = self.addHost('h3v4', cls=TrellisHost,
+ mac='00:aa:00:00:00:02', ips=['10.2.0.1/24'],
+ gateway='10.2.0.254', dhcpClient=dhcp)
+ self.addLink(h3, self.leafs[1])
+ self.hosts_dict['h3v4'] = h3
+
+ h4 = self.addHost('h4v4', cls=TrellisHost,
+ mac='00:aa:00:00:00:03', ips=['10.2.30.1/24'],
+ gateway='10.2.30.254', dhcpClient=dhcp,
+ dualHomed=True)
+ self.addLink(h4, self.leafs[1])
+ self.addLink(h4, self.leafs[2])
+ self.hosts_dict['h4v4'] = h4
+
+ h5 = self.addHost('h5v4', cls=TrellisHost,
+ mac='00:aa:00:00:00:04', ips=['10.2.20.1/24'],
+ gateway='10.2.20.254', dhcpClient=dhcp, vlan=30,
+ dualHomed=True)
+ self.addLink(h5, self.leafs[1])
+ self.addLink(h5, self.leafs[2])
+ self.hosts_dict['h5v4'] = h5
+
+ h6 = self.addHost('h6v4', cls=TrellisHost,
+ mac='00:aa:00:00:00:05', ips=['10.2.10.1/24'],
+ gateway='10.2.10.254', dhcpClient=dhcp, vlan=20)
+ self.addLink(h6, self.leafs[2])
+ self.hosts_dict['h6v4'] = h6
+
+ h7 = self.addHost('h7v4', cls=TrellisHost,
+ mac='00:aa:00:00:01:05', ips=['10.2.40.1/24'],
+ gateway='10.2.40.254', dhcpClient=dhcp, vlan=40)
+ self.addLink(h7, self.leafs[2])
+ self.hosts_dict['h7v4'] = h7
+
+ h8 = self.addHost('h8v4', cls=TrellisHost,
+ mac='00:aa:00:00:00:06', ips=['10.3.0.1/24'],
+ gateway='10.3.0.254', dhcpClient=dhcp, vlan=30)
+ self.addLink(h8, self.leafs[3])
+ self.hosts_dict['h8v4'] = h8
+
+ h9 = self.addHost('h9v4', cls=TrellisHost,
+ mac='00:aa:00:00:00:07', ips=['10.3.10.1/24'],
+ gateway='10.3.10.254', dhcpClient=dhcp, vlan=40,
+ dualHomed=True)
+ self.addLink(h9, self.leafs[3])
+ self.addLink(h9, self.leafs[4])
+ self.hosts_dict['h9v4'] = h9
+
+ h10 = self.addHost('h10v4', cls=TrellisHost,
+ mac='00:aa:00:00:00:08', ips=['10.3.30.1/24'],
+ gateway='10.3.30.254', dhcpClient=dhcp, vlan=40,
+ dualHomed=True)
+ self.addLink(h10, self.leafs[3])
+ self.addLink(h10, self.leafs[4])
+ self.hosts_dict['h10v4'] = h10
+
+ h11 = self.addHost('h11v4', cls=TrellisHost,
+ mac='00:aa:00:00:00:0a', ips=['10.3.20.1/24'],
+ gateway='10.3.20.254', dhcpClient=dhcp, vlan=40)
+ self.addLink(h11, self.leafs[4])
+ self.hosts_dict['h11v4'] = h11
+
+ return
+
+ def createIpv6Hosts(self, dhcp):
+
+ h1 = self.addHost('h1v6', cls=TrellisHost,
+ mac='00:aa:00:00:00:01', ips=["1000::3fe/120"],
+ gateway='1000::3ff', dhcpClient=dhcp)
+ self.addLink(h1, self.leafs[0])
+ self.hosts_dict['h1v6'] = h1
+
+ h2 = self.addHost('h2v6', cls=TrellisHost,
+ mac='00:aa:00:00:01:01', ips=['1001::3fe/120'],
+ gateway='1001::3ff', dhcpClient=dhcp)
+ self.addLink(h2, self.leafs[0])
+ self.hosts_dict['h2v6'] = h2
+
+ h3 = self.addHost('h3v6', cls=TrellisHost,
+ mac='00:aa:00:00:00:02', ips=['1002::3fe/120'],
+ gateway='1002::3ff', dhcpClient=dhcp)
+ self.addLink(h3, self.leafs[1])
+ self.hosts_dict['h3v6'] = h3
+
+ h4 = self.addHost('h4v6', cls=TrellisHost,
+ mac='00:aa:00:00:00:03', ips=['1003::3fe/120'],
+ gateway='1003::3ff', dhcpClient=dhcp,
+ dualHomed=True)
+ self.addLink(h4, self.leafs[1])
+ self.addLink(h4, self.leafs[2])
+ self.hosts_dict['h4v6'] = h4
+
+ h5 = self.addHost('h5v6', cls=TrellisHost,
+ mac='00:aa:00:00:00:04', ips=['1004::3fe/120'],
+ gateway='1004::3ff', dhcpClient=dhcp, vlan=30,
+ dualHomed=True)
+ self.addLink(h5, self.leafs[1])
+ self.addLink(h5, self.leafs[2])
+ self.hosts_dict['h5v6'] = h5
+
+ h6 = self.addHost('h6v6', cls=TrellisHost,
+ mac='00:aa:00:00:00:05', ips=['1005::3fe/120'],
+ gateway='1005::3ff', dhcpClient=dhcp, vlan=20)
+ self.addLink(h6, self.leafs[2])
+ self.hosts_dict['h6v6'] = h6
+
+ h7 = self.addHost('h7v6', cls=TrellisHost,
+ mac='00:aa:00:00:01:05', ips=['1006::3fe/120'],
+ gateway='1006::3ff', dhcpClient=dhcp, vlan=40)
+ self.addLink(h7, self.leafs[2])
+ self.hosts_dict['h7v6'] = h7
+
+ h8 = self.addHost('h8v6', cls=TrellisHost,
+ mac='00:aa:00:00:00:06', ips=['1007::3fe/120'],
+ gateway='1007::3ff', dhcpClient=dhcp, vlan=30)
+ self.addLink(h8, self.leafs[3])
+ self.hosts_dict['h8v6'] = h8
+
+ h9 = self.addHost('h9v6', cls=TrellisHost,
+ mac='00:aa:00:00:00:07', ips=['1008::3fe/120'],
+ gateway='1008::3ff', dhcpClient=dhcp, vlan=40,
+ dualHomed=True)
+ self.addLink(h9, self.leafs[3])
+ self.addLink(h9, self.leafs[4])
+ self.hosts_dict['h9v6'] = h9
+
+ h10 = self.addHost('h10v6', cls=TrellisHost,
+ mac='00:aa:00:00:00:08', ips=['1009::3fe/120'],
+ gateway='1009::3ff', dhcpClient=dhcp, vlan=40,
+ dualHomed=True)
+ self.addLink(h10, self.leafs[3])
+ self.addLink(h10, self.leafs[4])
+ self.hosts_dict['h10v6'] = h10
+
+ h11 = self.addHost('h11v6', cls=TrellisHost,
+ mac='00:aa:00:00:00:0a', ips=['1010::3fe/120'],
+ gateway='1010::3ff', dhcpClient=dhcp, vlan=40)
+ self.addLink(h11, self.leafs[4])
+ self.hosts_dict['h11v6'] = h11
+
+ return
+
+ '''
+ Creates the topology employed by Comcast which is a 2x5
+ leaf spine traffic.
+
+ S1 S2
+
+ L1 L2 L3 L4 L5
+
+ Where L2/L3 and L4/L5 are paired switches.
+ Parameters for this topology :
+ dhcp = True/False : set up dhcp servers
+ routers = True/False : set up external routers
+ '''
+ def __init__(self, dhcp=False, routers=False, ipv4=False, ipv6=False, **opts):
+ Topo.__init__(self, **opts)
+
+ # TODO: support IPv6 hosts
+ linkopts = dict( bw=10 )
+
+ spine = 2
+ leaf = 5
+
+ # Create spine switches
+ for s in range(spine):
+ self.spines[s] = self.addSwitch('spine10%s' % (s + 1), dpid = "00000000010%s" % (s + 1) )
+
+ # Create leaf switches
+ for ls in range(leaf):
+ self.leafs[ls] = self.addSwitch('leaf%s' % (ls + 1), dpid = "00000000000%s" % ( ls + 1) )
+
+ # connecting leaf and spines, leafs 1-5 have double links
+ for s in range( spine ):
+ spine_switch = self.spines[s]
+
+ for ls in range( leaf ):
+ leaf_switch = self.leafs[ls]
+
+ self.addLink( spine_switch, leaf_switch, **linkopts )
+ if ls > 0:
+ self.addLink( spine_switch, leaf_switch, **linkopts )
+
+ # connect paired leafs
+ self.addLink(self.leafs[1], self.leafs[2], **linkopts)
+ self.addLink(self.leafs[3], self.leafs[4], **linkopts)
+
+ # create dhcp servers
+ if dhcp:
+ if ipv4:
+ dhcp4 = self.addHost( 'dhcp', cls=TrellisHost,
+ mac="00:bb:00:00:00:01", ips=["10.0.3.253/24"],
+ gateway="10.0.3.254", dhcpServer=True)
+ self.addLink(self.spines[1], dhcp4, **linkopts)
+ if ipv6:
+ dhcp6 = self.addHost( 'dhcp', cls=TrellisHost,
+ mac="00:bb:00:00:00:02", ips=["2000::3fd/120"],
+ gateway="2000::3ff")
+ self.addLink(self.spines[1], dhcp4, **linkopts)
+ # creatte quagga routers
+ if routers:
+ print("NYI (quagga)!")
+
+ # create hosts
+ if ipv6:
+ self.createIpv6Hosts(dhcp)
+
+ if ipv4:
+ self.createIpv4Hosts(dhcp)
+
+ if not ipv4 and not ipv6:
+ print("No hosts were created!")
+
+def config( opts ):
+
+ dhcp = bool(opts.dhcp)
+ routers = bool(opts.routers)
+ ipv6 = bool(opts.ipv6)
+ ipv4 = bool(opts.ipv4)
+
+ if opts.onosIp != '':
+ controllers = opts.onosIp.split( ',' )
+ else:
+ controllers = ['127.0.0.1']
+ topo = ComcastLeafSpineFabric(dhcp=dhcp, routers=routers, ipv6=ipv6,
+ ipv4=ipv4)
+
+ net = Mininet( topo=topo, link=TCLink, build=False,
+ controller=None, autoSetMacs=True )
+ i = 0
+ for ip in controllers:
+ net.addController( "c%s" % ( i ), controller=RemoteController, ip=ip )
+ i += 1
+
+ net.build()
+ net.start()
+ CLI( net )
+ net.stop()
+
+
+if __name__ == '__main__':
+ setLogLevel('info')
+ config(opts)
+ os.system('sudo mn -c')
+
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/cord_fabric.py b/TestON/tests/USECASE/SegmentRouting/dependencies/cord_fabric.py
index 1012fef..0c15cdc 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/cord_fabric.py
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/cord_fabric.py
@@ -27,7 +27,7 @@
from ipaddress import IPv6Network, IPv4Network
from mininet.net import Mininet
from mininet.topo import Topo
-from mininet.node import RemoteController, UserSwitch, Host, OVSBridge
+from mininet.node import RemoteController, Host, OVSBridge
from mininet.link import TCLink
from mininet.log import setLogLevel
from mininet.cli import CLI
@@ -43,8 +43,8 @@
help='number of leaf switches, default=2' )
parser.add_option( '--fanout', dest='fanout', type='int', default=2,
help='number of hosts per leaf switch, default=2' )
- parser.add_option( '--onos', dest='onos', type='int', default=0,
- help='number of ONOS Instances, default=0, 0 means localhost, 1 will use OC1 and so on' )
+ parser.add_option( '--onos-ip', dest='onosIp', type='str', default='',
+ help='IP address list of ONOS instances, separated by comma(,). Overrides --onos option' )
parser.add_option( '--vlan', dest='vlan', type='int', default=-1,
help='vid of cross connect, default=-1, -1 means utilize default value' )
parser.add_option( '--ipv6', action="store_true", dest='ipv6',
@@ -266,9 +266,11 @@
fanout = opts.fanout
vlan = opts.vlan
ipv6 = opts.ipv6
- controllers = [ os.environ[ 'OC%s' % i ] for i in
- range( 1, opts.onos + 1 ) ] if ( opts.onos ) else [
- '127.0.0.1' ]
+ if opts.onosIp != '':
+ controllers = opts.onosIp.split( ',' )
+ else:
+ controllers = ['127.0.0.1']
+
if not ipv6:
topo = LeafAndSpine(
spine=spine,
@@ -285,7 +287,7 @@
ipv6=ipv6
)
net = Mininet( topo=topo, link=TCLink, build=False,
- switch=UserSwitch, controller=None, autoSetMacs=True )
+ controller=None, autoSetMacs=True )
i = 0
for ip in controllers:
net.addController( "c%s" % ( i ), controller=RemoteController, ip=ip )
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/json/0x1.json b/TestON/tests/USECASE/SegmentRouting/dependencies/json/0x1.json
index 36955e1..90cf052 100755
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/json/0x1.json
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/json/0x1.json
@@ -3,39 +3,43 @@
"of:0000000000000001/1" : {
"interfaces" : [
{
- "ips" : [ "10.0.1.254/24" ]
+ "ips" : [ "10.0.1.254/24" ],
+ "vlan-untagged" : 100
}
]
},
"of:0000000000000001/2" : {
"interfaces" : [
{
- "ips" : [ "10.0.1.254/24" ]
+ "ips" : [ "10.0.1.254/24" ],
+ "vlan-untagged" : 100
}
]
},
"of:0000000000000001/3" : {
"interfaces" : [
{
- "ips" : [ "10.0.1.254/24" ]
+ "ips" : [ "10.0.1.254/24" ],
+ "vlan-untagged" : 100
}
]
},
"of:0000000000000001/4" : {
"interfaces" : [
{
- "ips" : [ "10.0.1.254/24" ]
+ "ips" : [ "10.0.1.254/24" ],
+ "vlan-untagged" : 100
}
]
}
},
"devices" : {
"of:0000000000000001" : {
- "basic":{ "driver" : "ofdpa-cpqd" },
+ "basic":{ "driver" : "ofdpa-ovs" },
"segmentrouting" : {
"name" : "Leaf-R1",
- "nodeSid" : 1,
- "routerIp" : "192.168.0.1",
+ "ipv4NodeSid" : 1,
+ "ipv4Loopback" : "192.168.0.1",
"routerMac" : "10:00:00:00:00:01",
"isEdgeRouter" : true,
"adjacencySids" : []
@@ -46,13 +50,13 @@
"00:00:00:00:00:01/-1" : {
"basic": {
"ips": ["10.0.1.1"],
- "location": "of:0000000000000001/1"
+ "locations": ["of:0000000000000001/1"]
}
},
"00:00:00:00:00:02/-1" : {
"basic": {
"ips": ["10.0.1.2"],
- "location": "of:0000000000000001/2"
+ "locations": ["of:0000000000000001/2"]
}
}
},
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/json/2x2.json b/TestON/tests/USECASE/SegmentRouting/dependencies/json/2x2.json
index 433b594..52625c7 100755
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/json/2x2.json
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/json/2x2.json
@@ -3,86 +3,92 @@
"of:0000000000000001/1" : {
"interfaces" : [
{
- "ips" : [ "10.0.1.254/24" ]
+ "ips" : [ "10.0.1.254/24" ],
+ "vlan-untagged" : 100
}
]
},
"of:0000000000000001/2" : {
"interfaces" : [
{
- "ips" : [ "10.0.1.254/24" ]
+ "ips" : [ "10.0.1.254/24" ],
+ "vlan-untagged" : 100
}
]
},
"of:0000000000000001/3" : {
"interfaces" : [
{
- "ips" : [ "10.0.1.254/24" ]
+ "ips" : [ "10.0.1.254/24" ],
+ "vlan-untagged" : 100
}
]
},
"of:0000000000000001/4" : {
"interfaces" : [
{
- "ips" : [ "10.0.1.254/24" ]
+ "ips" : [ "10.0.1.254/24" ],
+ "vlan-untagged" : 100
}
]
},
"of:0000000000000002/1" : {
"interfaces" : [
{
- "ips" : [ "10.0.2.254/24" ]
+ "ips" : [ "10.0.2.254/24" ],
+ "vlan-untagged" : 200
}
]
},
"of:0000000000000002/2" : {
"interfaces" : [
{
- "ips" : [ "10.0.2.254/24" ]
+ "ips" : [ "10.0.2.254/24" ],
+ "vlan-untagged" : 200
}
]
}
},
"devices" : {
"of:0000000000000001" : {
- "basic":{ "driver" : "ofdpa-cpqd" },
+ "basic":{ "driver" : "ofdpa-ovs" },
"segmentrouting" : {
"name" : "Leaf-R1",
- "nodeSid" : 1,
- "routerIp" : "192.168.0.1",
+ "ipv4NodeSid" : 1,
+ "ipv4Loopback" : "192.168.0.1",
"routerMac" : "10:00:00:00:00:01",
"isEdgeRouter" : true,
"adjacencySids" : []
}
},
"of:0000000000000002" : {
- "basic":{ "driver" : "ofdpa-cpqd" },
+ "basic":{ "driver" : "ofdpa-ovs" },
"segmentrouting" : {
"name" : "Leaf-R2",
- "nodeSid" : 2,
- "routerIp" : "192.168.0.2",
+ "ipv4NodeSid" : 2,
+ "ipv4Loopback" : "192.168.0.2",
"routerMac" : "10:00:00:00:00:02",
"isEdgeRouter" : true,
"adjacencySids" : []
}
},
"of:0000000000000101" : {
- "basic":{ "driver" : "ofdpa-cpqd" },
+ "basic":{ "driver" : "ofdpa-ovs" },
"segmentrouting" : {
"name" : "Spine-R1",
- "nodeSid" : 101,
- "routerIp" : "192.168.0.101",
+ "ipv4NodeSid" : 101,
+ "ipv4Loopback" : "192.168.0.101",
"routerMac" : "10:00:00:00:01:01",
"isEdgeRouter" : false,
"adjacencySids" : []
}
},
"of:0000000000000102" : {
- "basic":{ "driver" : "ofdpa-cpqd" },
+ "basic":{ "driver" : "ofdpa-ovs" },
"segmentrouting" : {
"name" : "Spine-R2",
- "nodeSid" : 102,
- "routerIp" : "192.168.0.102",
+ "ipv4NodeSid" : 102,
+ "ipv4Loopback" : "192.168.0.102",
"routerMac" : "10:00:00:00:01:02",
"isEdgeRouter" : false,
"adjacencySids" : []
@@ -93,25 +99,25 @@
"00:00:00:00:00:01/-1" : {
"basic": {
"ips": ["10.0.1.1"],
- "location": "of:0000000000000001/1"
+ "locations": ["of:0000000000000001/1"]
}
},
"00:00:00:00:00:02/-1" : {
"basic": {
"ips": ["10.0.1.2"],
- "location": "of:0000000000000001/2"
+ "locations": ["of:0000000000000001/2"]
}
},
"00:00:00:00:00:03/-1" : {
"basic": {
"ips": ["10.0.2.1"],
- "location": "of:0000000000000002/1"
+ "locations": ["of:0000000000000002/1"]
}
},
"00:00:00:00:00:04/-1" : {
"basic": {
"ips": ["10.0.2.2"],
- "location": "of:0000000000000002/2"
+ "locations": ["of:0000000000000002/2"]
}
}
},
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/json/4x4.json b/TestON/tests/USECASE/SegmentRouting/dependencies/json/4x4.json
index 8230261..42223b6 100755
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/json/4x4.json
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/json/4x4.json
@@ -3,158 +3,168 @@
"of:0000000000000001/1" : {
"interfaces" : [
{
- "ips" : [ "10.0.1.254/24" ]
+ "ips" : [ "10.0.1.254/24" ],
+ "vlan-untagged" : 100
}
]
},
"of:0000000000000001/2" : {
"interfaces" : [
{
- "ips" : [ "10.0.1.254/24" ]
+ "ips" : [ "10.0.1.254/24" ],
+ "vlan-untagged" : 100
}
]
},
"of:0000000000000001/3" : {
"interfaces" : [
{
- "ips" : [ "10.0.1.254/24" ]
+ "ips" : [ "10.0.1.254/24" ],
+ "vlan-untagged" : 100
}
]
},
"of:0000000000000001/4" : {
"interfaces" : [
{
- "ips" : [ "10.0.1.254/24" ]
+ "ips" : [ "10.0.1.254/24" ],
+ "vlan-untagged" : 100
}
]
},
"of:0000000000000002/1" : {
"interfaces" : [
{
- "ips" : [ "10.0.2.254/24" ]
+ "ips" : [ "10.0.2.254/24" ],
+ "vlan-untagged" : 200
}
]
},
"of:0000000000000002/2" : {
"interfaces" : [
{
- "ips" : [ "10.0.2.254/24" ]
+ "ips" : [ "10.0.2.254/24" ],
+ "vlan-untagged" : 200
}
]
},
"of:0000000000000003/1" : {
"interfaces" : [
{
- "ips" : [ "10.0.3.254/24" ]
+ "ips" : [ "10.0.3.254/24" ],
+ "vlan-untagged" : 300
}
]
},
"of:0000000000000003/2" : {
"interfaces" : [
{
- "ips" : [ "10.0.3.254/24" ]
+ "ips" : [ "10.0.3.254/24" ],
+ "vlan-untagged" : 300
}
]
},
"of:0000000000000004/1" : {
"interfaces" : [
{
- "ips" : [ "10.0.4.254/24" ]
+ "ips" : [ "10.0.4.254/24" ],
+ "vlan-untagged" : 400
}
]
},
"of:0000000000000004/2" : {
"interfaces" : [
{
- "ips" : [ "10.0.4.254/24"]
+ "ips" : [ "10.0.4.254/24"],
+ "vlan-untagged" : 400
}
]
}
},
"devices" : {
"of:0000000000000001" : {
- "basic":{ "driver" : "ofdpa-cpqd" },
+ "basic":{ "driver" : "ofdpa-ovs" },
"segmentrouting" : {
"name" : "Leaf-R1",
- "nodeSid" : 1,
- "routerIp" : "192.168.0.1",
+ "ipv4NodeSid" : 1,
+ "ipv4Loopback" : "192.168.0.1",
"routerMac" : "10:00:00:00:00:01",
"isEdgeRouter" : true,
"adjacencySids" : []
}
},
"of:0000000000000002" : {
- "basic":{ "driver" : "ofdpa-cpqd" },
+ "basic":{ "driver" : "ofdpa-ovs" },
"segmentrouting" : {
"name" : "Leaf-R2",
- "nodeSid" : 2,
- "routerIp" : "192.168.0.2",
+ "ipv4NodeSid" : 2,
+ "ipv4Loopback" : "192.168.0.2",
"routerMac" : "10:00:00:00:00:02",
"isEdgeRouter" : true,
"adjacencySids" : []
}
},
"of:0000000000000003" : {
- "basic" :{ "driver" : "ofdpa-cpqd" },
+ "basic" :{ "driver" : "ofdpa-ovs" },
"segmentrouting" : {
"name" : "Leaf-R3",
- "nodeSid" : 3,
- "routerIp" : "192.168.0.3",
+ "ipv4NodeSid" : 3,
+ "ipv4Loopback" : "192.168.0.3",
"routerMac" : "10:00:00:00:00:03",
"isEdgeRouter" : true,
"adjacencySids" : []
}
},
"of:0000000000000004" : {
- "basic":{ "driver" : "ofdpa-cpqd" },
+ "basic":{ "driver" : "ofdpa-ovs" },
"segmentrouting" : {
"name" : "Leaf-R4",
- "nodeSid" : 4,
- "routerIp" : "192.168.0.4",
+ "ipv4NodeSid" : 4,
+ "ipv4Loopback" : "192.168.0.4",
"routerMac" : "10:00:00:00:00:04",
"isEdgeRouter" : true,
"adjacencySids" : []
}
},
"of:0000000000000101" : {
- "basic":{ "driver" : "ofdpa-cpqd" },
+ "basic":{ "driver" : "ofdpa-ovs" },
"segmentrouting" : {
"name" : "Spine-R1",
- "nodeSid" : 101,
- "routerIp" : "192.168.0.101",
+ "ipv4NodeSid" : 101,
+ "ipv4Loopback" : "192.168.0.101",
"routerMac" : "10:00:00:00:01:01",
"isEdgeRouter" : false,
"adjacencySids" : []
}
},
"of:0000000000000102" : {
- "basic":{ "driver" : "ofdpa-cpqd" },
+ "basic":{ "driver" : "ofdpa-ovs" },
"segmentrouting" : {
"name" : "Spine-R2",
- "nodeSid" : 102,
- "routerIp" : "192.168.0.102",
+ "ipv4NodeSid" : 102,
+ "ipv4Loopback" : "192.168.0.102",
"routerMac" : "10:00:00:00:01:02",
"isEdgeRouter" : false,
"adjacencySids" : []
}
},
"of:0000000000000103" : {
- "basic":{ "driver" : "ofdpa-cpqd" },
+ "basic":{ "driver" : "ofdpa-ovs" },
"segmentrouting" : {
"name" : "Spine-R3",
- "nodeSid" : 103,
- "routerIp" : "192.168.0.103",
+ "ipv4NodeSid" : 103,
+ "ipv4Loopback" : "192.168.0.103",
"routerMac" : "10:00:00:00:01:03",
"isEdgeRouter" : false,
"adjacencySids" : []
}
},
"of:0000000000000104" : {
- "basic":{ "driver" : "ofdpa-cpqd" },
+ "basic":{ "driver" : "ofdpa-ovs" },
"segmentrouting" : {
"name" : "Spine-R4",
- "nodeSid" : 104,
- "routerIp" : "192.168.0.104",
+ "ipv4NodeSid" : 104,
+ "ipv4Loopback" : "192.168.0.104",
"routerMac" : "10:00:00:00:01:04",
"isEdgeRouter" : false,
"adjacencySids" : []
@@ -165,49 +175,49 @@
"00:00:00:00:00:01/-1" : {
"basic": {
"ips": ["10.0.1.1"],
- "location": "of:0000000000000001/1"
+ "locations": ["of:0000000000000001/1"]
}
},
"00:00:00:00:00:02/-1" : {
"basic": {
"ips": ["10.0.1.2"],
- "location": "of:0000000000000001/2"
+ "locations": ["of:0000000000000001/2"]
}
},
"00:00:00:00:00:03/-1" : {
"basic": {
"ips": ["10.0.2.1"],
- "location": "of:0000000000000002/1"
+ "locations": ["of:0000000000000002/1"]
}
},
"00:00:00:00:00:04/-1" : {
"basic": {
"ips": ["10.0.2.2"],
- "location": "of:0000000000000002/2"
+ "locations": ["of:0000000000000002/2"]
}
},
"00:00:00:00:00:05/-1" : {
"basic": {
"ips": ["10.0.3.1"],
- "location": "of:0000000000000003/1"
+ "locations": ["of:0000000000000003/1"]
}
},
"00:00:00:00:00:06/-1" : {
"basic": {
"ips": ["10.0.3.2"],
- "location": "of:0000000000000003/2"
+ "locations": ["of:0000000000000003/2"]
}
},
"00:00:00:00:00:07/-1" : {
"basic": {
"ips": ["10.0.4.1"],
- "location": "of:0000000000000004/1"
+ "locations": ["of:0000000000000004/1"]
}
},
"00:00:00:00:00:08/-1" : {
"basic": {
"ips": ["10.0.4.2"],
- "location": "of:0000000000000004/2"
+ "locations": ["of:0000000000000004/2"]
}
}
},
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/json/extra.json b/TestON/tests/USECASE/SegmentRouting/dependencies/json/extra.json
index 992a04c..e3544bf 100755
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/json/extra.json
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/json/extra.json
@@ -3,13 +3,13 @@
"00:00:00:00:00:09/-1" : {
"basic": {
"ips": ["10.0.1.9"],
- "location": "of:0000000000000001/3"
+ "locations": ["of:0000000000000001/3"]
}
},
"00:00:00:00:09:01/-1" : {
"basic": {
"ips": ["10.0.9.1"],
- "location": "of:0000000000000001/4"
+ "locations": ["of:0000000000000001/4"]
}
}
}
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/trellis_fabric.py b/TestON/tests/USECASE/SegmentRouting/dependencies/trellis_fabric.py
index 98b5316..b0b26a1 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/trellis_fabric.py
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/trellis_fabric.py
@@ -12,8 +12,8 @@
from mininet.nodelib import NAT
from mininet.cli import CLI
-from routinglib import BgpRouter, RoutedHost
-from trellislib import DhcpServer, TaggedRoutedHost, DualHomedRoutedHost, DualHomedTaggedRoutedHost
+from routinglib import BgpRouter
+from trellislib import TrellisHost
# Parse command line options and dump results
def parseOptions():
@@ -124,19 +124,21 @@
if vlan_id[ dual_ls * fanout + f] != 0:
host = self.addHost(
name='h%s' % ( dual_ls * fanout + f + 1),
- cls=DualHomedTaggedRoutedHost,
+ cls=TrellisHost,
ips=['10.0.%d.%d/%d' % ( dual_ls + 2, f + 1, IP4_SUBNET_CLASS)],
gateway='10.0.%d.254' % ( dual_ls + 2),
mac='00:aa:00:00:00:%02x' % (dual_ls * fanout + f + 1),
- vlan=vlan_id[ dual_ls*fanout + f ]
+ vlan=vlan_id[ dual_ls*fanout + f ],
+ dualHomed=True
)
else:
host = self.addHost(
name='h%s' % (dual_ls * fanout + f + 1),
- cls= DualHomedRoutedHost,
+ cls=TrellisHost,
ips=['10.0.%d.%d/%d' % (dual_ls+2, f+1, IP4_SUBNET_CLASS)],
gateway='10.0.%d.254' % (dual_ls+2),
- mac='00:aa:00:00:00:%02x' % (dual_ls * fanout + f + 1)
+ mac='00:aa:00:00:00:%02x' % (dual_ls * fanout + f + 1),
+ dualHomed=True
)
self.addLink(host, leafs[ls], **linkopts)
self.addLink(host, leafs[ls-1], **linkopts)
@@ -145,8 +147,8 @@
last_paired_ls = leafs[leaf-1]
# Create common components
# DHCP server
- dhcp = self.addHost('dhcp', cls=DhcpServer, mac='00:99:00:00:00:01', ips=['10.0.3.253/24'],
- gateway='10.0.3.254')
+ dhcp = self.addHost('dhcp', cls=TrellisHost, mac='00:99:00:00:00:01', ips=['10.0.3.253/24'],
+ gateway='10.0.3.254', dhcpServer=True)
# Control plane switch (for DHCP servers)
cs1 = self.addSwitch('cs1', cls=OVSBridge)
@@ -194,7 +196,7 @@
self.addLink(r1, last_paired_ls)
# External IPv4 Host behind r1
- rh1 = self.addHost('rh1', cls=RoutedHost, ips=['10.0.99.2/24'], gateway='10.0.99.1')
+ rh1 = self.addHost('rh1', cls=TrellisHost, ips=['10.0.99.2/24'], gateway='10.0.99.1')
self.addLink(r1, rh1)
# External Quagga r2
@@ -208,7 +210,7 @@
self.addLink(r2, last_paired_ls)
# External IPv4 Host behind r2
- rh2 = self.addHost('rh2', cls=RoutedHost, ips=['10.0.99.2/24'], gateway='10.0.99.1')
+ rh2 = self.addHost('rh2', cls=TrellisHost, ips=['10.0.99.2/24'], gateway='10.0.99.1')
self.addLink(r2, rh2)
class LeafSpineFabric (Topo) :
@@ -241,7 +243,7 @@
if vlan_id[ls * fanout + f] != 0:
host = self.addHost(
name='h%s' % (ls * fanout + f + 1),
- cls=TaggedRoutedHost,
+ cls=TrellisHost,
ips=['10.0.%d.%d/%d' % (ls+2, f+1, IP4_SUBNET_CLASS)],
gateway='10.0.%d.254' % (ls+2),
mac='00:aa:00:00:00:%02x' % (ls * fanout + f + 1),
@@ -250,7 +252,7 @@
else:
host = self.addHost(
name='h%s' % (ls * fanout + f + 1),
- cls= RoutedHost,
+ cls=TrellisHost,
ips=['10.0.%d.%d/%d' % (ls+2, f+1, IP4_SUBNET_CLASS)],
gateway='10.0.%d.254' % (ls+2),
mac='00:aa:00:00:00:%02x' % (ls * fanout + f + 1)
@@ -260,8 +262,8 @@
last_ls = leafs[leaf-1]
# Create common components
# DHCP server
- dhcp = self.addHost('dhcp', cls=DhcpServer, mac='00:99:00:00:00:01', ips=['10.0.3.253/24'],
- gateway='10.0.3.254')
+ dhcp = self.addHost('dhcp', cls=TrellisHost, mac='00:99:00:00:00:01', ips=['10.0.3.253/24'],
+ gateway='10.0.3.254', dhcpServer=True)
# Control plane switch (for DHCP servers)
cs1 = self.addSwitch('cs1', cls=OVSBridge)
@@ -297,10 +299,9 @@
self.addLink(r1, last_ls)
# External IPv4 Host behind r1
- rh1 = self.addHost('rh1', cls=RoutedHost, ips=['10.0.99.2/24'], gateway='10.0.99.1')
+ rh1 = self.addHost('rh1', cls=TrellisHost, ips=['10.0.99.2/24'], gateway='10.0.99.1')
self.addLink(r1, rh1)
-
def config( opts ):
spine = opts.spine
leaf = opts.leaf
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/trellislib.py b/TestON/tests/USECASE/SegmentRouting/dependencies/trellislib.py
index 08dcf2f..4c827f0 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/trellislib.py
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/trellislib.py
@@ -252,6 +252,93 @@
self.cmd('rm -rf %s' % self.pidFile)
super(DualHomedDhcpClient, self).terminate()
+class TrellisHost(Host):
+ def __init__(self, name, ips=[], gateway="", dualHomed=False, vlan=None, dhcpClient=False, dhcpServer=False, ipv6=False, *args, **kwargs):
+ super(TrellisHost, self).__init__(name, *args, **kwargs)
+ self.dualHomed = dualHomed
+ self.bond0 = None
+ self.vlan = vlan
+ self.vlanIntf = None
+ self.dhcpClient = dhcpClient
+ self.dhcpServer = dhcpServer
+ if dhcpClient:
+ self.pidFile = '/run/dhclient-%s.pid' % self.name
+ self.leaseFile = '/var/lib/dhcp/dhcpclient%s-%s.lease' % ("6" if ipv6 else "", self.name)
+ else:
+ self.ips = ips
+ self.gateway = gateway
+ if dhcpServer:
+ self.binFile = '/usr/sbin/dhcpd'
+ self.pidFile = '/run/dhcp-server-dhcpd%s.pid' % ("6" if ipv6 else "")
+ self.configFile = './dhcpd%s.conf' % ("6" if ipv6 else "")
+ self.leasesFile = '/var/lib/dhcp/dhcpd%s.leases' % ("6" if ipv6 else "")
+ self.ipv6 = ipv6
+
+ def config(self, **kwargs):
+ super(TrellisHost, self).config(**kwargs)
+
+ if self.dualHomed:
+ # Setup bond0 interface
+ intf0 = self.intfs[0].name
+ intf1 = self.intfs[1].name
+ self.bond0 = "%s-bond0" % self.name
+ self.cmd('modprobe bonding')
+ self.cmd('ip link add %s type bond' % self.bond0)
+ self.cmd('ip link set %s down' % intf0)
+ self.cmd('ip link set %s down' % intf1)
+ self.cmd('ip link set %s master %s' % (intf0, self.bond0))
+ self.cmd('ip link set %s master %s' % (intf1, self.bond0))
+ self.cmd('ip addr flush dev %s' % intf0)
+ self.cmd('ip addr flush dev %s' % intf1)
+ self.cmd('ip link set %s up' % self.bond0)
+ defaultIntf = self.defaultIntf()
+ defaultIntf.name = self.bond0
+ self.nameToIntf[self.bond0] = defaultIntf
+
+ self.cmd('ip %s addr flush dev %s' % ("-4" if self.ipv6 else "", self.defaultIntf()))
+
+ if self.vlan:
+ # Setup vlan interface
+ defaultIntf = self.defaultIntf()
+ self.vlanIntf = "%s.%s" % (defaultIntf, self.vlan)
+ self.cmd('ip link add link %s name %s type vlan id %s' % (defaultIntf, self.vlanIntf, self.vlan))
+ self.cmd('ip link set up %s' % self.vlanIntf)
+ defaultIntf.name = self.vlanIntf
+ self.nameToIntf[self.vlanIntf] = defaultIntf
+
+ if self.dhcpClient:
+ if self.vlan or self.dualHomed:
+ # Why leaseFile is not required here?
+ self.cmd('dhclient -q -%s -nw -pf %s %s' % (6 if self.ipv6 else 4, self.pidFile, self.defaultIntf()))
+ else:
+ self.cmd('dhclient -q -%s -nw -pf %s -lf %s %s' % (6 if self.ipv6 else 4, self.pidFile, self.leaseFile, self.defaultIntf()))
+ else:
+ # Setup IP addresses
+ for ip in self.ips:
+ self.cmd('ip addr add %s dev %s' % (ip, self.defaultIntf()))
+ self.cmd('ip route add default via %s' % self.gateway)
+
+ if self.dhcpServer:
+ if self.ipv6:
+ linkLocalAddr = mac_to_ipv6_linklocal(kwargs['mac'])
+ self.cmd('ip -6 addr add dev %s scope link %s' % (self.defaultIntf(), linkLocalAddr))
+ self.cmd('touch %s' % self.leasesFile)
+ self.cmd('%s -q -%s -pf %s -cf %s %s' % (self.binFile, 6 if self.ipv6 else 4, self.pidFile, self.configFile, self.defaultIntf()))
+
+ def terminate(self, **kwargs):
+ if self.vlan:
+ self.cmd('ip link remove link %s' % self.vlanIntf)
+ if self.dualHomed:
+ self.cmd('ip link set %s down' % self.bond0)
+ self.cmd('ip link delete %s' % self.bond0)
+ if self.dhcpClient:
+ self.cmd('kill -9 `cat %s`' % self.pidFile)
+ self.cmd('rm -rf %s' % self.pidFile)
+ if self.dhcpServer:
+ self.cmd('kill -9 `cat %s`' % self.pidFile)
+ self.cmd('rm -rf %s' % self.pidFile)
+ super(TrellisHost, self).terminate()
+
# Utility for IPv6
def mac_to_ipv6_linklocal(mac):
'''
diff --git a/TestON/tests/dependencies/ONOSSetup.py b/TestON/tests/dependencies/ONOSSetup.py
index cbf7f8c..bcecf45 100644
--- a/TestON/tests/dependencies/ONOSSetup.py
+++ b/TestON/tests/dependencies/ONOSSetup.py
@@ -27,14 +27,15 @@
def __init__( self ):
self.default = ''
- def envSetupDescription( self ):
+ def envSetupDescription( self, includeCaseDesc=True ):
"""
Introduction part of the test. It will initialize some basic vairables.
"""
- main.case( "Constructing test variables and building ONOS package" )
- main.step( "Constructing test variables" )
- main.caseExplanation = "For loading from params file, and pull" + \
- " and build the latest ONOS package"
+ if includeCaseDesc:
+ main.case( "Constructing test variables and building ONOS package" )
+ main.caseExplanation = "For loading from params file, and pull" + \
+ " and build the latest ONOS package"
+ main.step("Constructing test variables")
try:
from tests.dependencies.Cluster import Cluster
except ImportError:
@@ -47,11 +48,14 @@
main.ONOSbench = main.Cluster.controllers[ 0 ].Bench
main.testOnDirectory = re.sub( "(/tests)$", "", main.testDir )
- def gitPulling( self ):
+ def gitPulling( self, includeCaseDesc=True ):
"""
it will do git checkout or pull if they are enabled from the params file.
"""
- main.case( "Pull onos branch and build onos on Teststation." )
+
+ if includeCaseDesc:
+ main.case( "Pull onos branch and build onos on Teststation." )
+
gitPull = main.params[ 'GIT' ][ 'pull' ]
gitBranch = main.params[ 'GIT' ][ 'branch' ]
if gitPull == 'True':
@@ -76,7 +80,7 @@
else:
main.log.info( "Skipped git checkout and pull as they are disabled in params file" )
- def envSetup( self, includeGitPull=True ):
+ def envSetup( self, includeGitPull=True, includeCaseDesc=True ):
"""
Description:
some environment setup for the test.
@@ -86,7 +90,7 @@
Returns main.TRUE
"""
if includeGitPull:
- self.gitPulling()
+ self.gitPulling( includeCaseDesc )
try:
from tests.dependencies.Cluster import Cluster
@@ -355,7 +359,7 @@
def ONOSSetUp( self, cluster, hasMultiNodeRounds=False, startOnos=True, newCell=True,
cellName="temp", cellApps="drivers", mininetIp="", removeLog=False, extraApply=None, applyArgs=None,
extraClean=None, cleanArgs=None, skipPack=False, installMax=False, useSSH=True,
- killRemoveMax=True, stopOnos=False, installParallel=True, cellApply=True ):
+ killRemoveMax=True, stopOnos=False, installParallel=True, cellApply=True, includeCaseDesc=True ):
"""
Description:
Initial ONOS setting up of the tests. It will also verify the result of each steps.
@@ -395,11 +399,11 @@
Returns main.TRUE if it everything successfully proceeded.
"""
self.setNumCtrls( hasMultiNodeRounds )
-
- main.case( "Starting up " + str( cluster.numCtrls ) +
- " node(s) ONOS cluster" )
- main.caseExplanation = "Set up ONOS with " + str( cluster.numCtrls ) + \
- " node(s) ONOS cluster"
+ if includeCaseDesc:
+ main.case( "Starting up " + str( cluster.numCtrls ) +
+ " node(s) ONOS cluster" )
+ main.caseExplanation = "Set up ONOS with " + str( cluster.numCtrls ) + \
+ " node(s) ONOS cluster"
killResult = self.killingAllOnos( cluster, killRemoveMax, stopOnos )
main.log.info( "NODE COUNT = " + str( cluster.numCtrls ) )
diff --git a/TestON/tests/dependencies/topology.py b/TestON/tests/dependencies/topology.py
index 96109d3..04e6246 100644
--- a/TestON/tests/dependencies/topology.py
+++ b/TestON/tests/dependencies/topology.py
@@ -130,7 +130,7 @@
return currentCompareResult
- def compareTopos( self, Mininet, attempts=1 ):
+ def compareTopos( self, Mininet, attempts=1, includeCaseDesc=True ):
"""
Description:
compares the links and hosts and switches of the onos to the mininet.
@@ -142,9 +142,10 @@
Returns main.TRUE if the results are matching else
Returns main.FALSE
"""
- main.case( "Compare ONOS Topology view to Mininet topology" )
- main.caseExplanation = "Compare topology elements between Mininet" +\
- " and ONOS"
+ if includeCaseDesc:
+ main.case( "Compare ONOS Topology view to Mininet topology" )
+ main.caseExplanation = "Compare topology elements between Mininet" +\
+ " and ONOS"
main.log.info( "Gathering topology information from Mininet" )
devicesResults = main.FALSE # Overall Boolean for device correctness
linksResults = main.FALSE # Overall Boolean for link correctness
diff --git a/TestON/tests/dependencies/utils.py b/TestON/tests/dependencies/utils.py
index 6155201..6537afc 100644
--- a/TestON/tests/dependencies/utils.py
+++ b/TestON/tests/dependencies/utils.py
@@ -23,7 +23,7 @@
def __init__( self ):
self.default = ''
- def mininetCleanIntro( self ):
+ def mininetCleanIntro( self, includeCaseDesc=True ):
"""
Description:
Introduction information of the mininet clean up
@@ -31,9 +31,9 @@
Returns:
"""
main.log.report( "Stop Mininet" )
-
- main.case( "Stop Mininet" )
- main.caseExplanation = "Stopping the current mininet to start up fresh"
+ if includeCaseDesc:
+ main.case( "Stop Mininet" )
+ main.caseExplanation = "Stopping the current mininet to start up fresh"
def mininetCleanup( self, Mininet, timeout=5, exitTimeout=1000 ):
"""
@@ -54,7 +54,7 @@
onfail="Failed to stopped mininet" )
return topoResult
- def copyKarafLog( self, copyFileName="", before=False ):
+ def copyKarafLog( self, copyFileName="", before=False, includeCaseDesc=True ):
"""
Description:
copy the karaf log and verify it.
@@ -65,9 +65,10 @@
"""
# TODO: Also grab the rotated karaf logs
main.log.report( "Copy karaf logs" )
- main.case( "Copy karaf logs" )
- main.caseExplanation = "Copying the karaf logs to preserve them through" +\
- "reinstalling ONOS"
+ if includeCaseDesc:
+ main.case( "Copy karaf logs" )
+ main.caseExplanation = "Copying the karaf logs to preserve them through" +\
+ "reinstalling ONOS"
main.step( "Copying karaf logs" )
stepResult = main.TRUE
scpResult = main.TRUE
@@ -78,11 +79,10 @@
"/tmp/karaf.log",
direction="from" )
copyResult = copyResult and main.ONOSbench.cpLogsToDir( "/tmp/karaf.log", main.logdir,
- copyFileName= ( copyFileName + "_karaf.log." +
- ctrl.name + "_" )
- if before else
- ( "karaf.log." + ctrl.name +
- "." + copyFileName ) )
+ copyFileName=( copyFileName + "_karaf.log." +
+ ctrl.name + "_" ) if before else
+ ( "karaf.log." + ctrl.name +
+ "." + copyFileName ) )
if scpResult and copyResult:
stepResult = main.TRUE and stepResult
else: