Merge "[ONOS-7529]: Update TestON Documentation (README.md in this case)"
diff --git a/TestON/JenkinsFile/JenkinsCommonFuncs.groovy b/TestON/JenkinsFile/JenkinsCommonFuncs.groovy
index f9af19a..7ba926b 100644
--- a/TestON/JenkinsFile/JenkinsCommonFuncs.groovy
+++ b/TestON/JenkinsFile/JenkinsCommonFuncs.groovy
@@ -28,7 +28,7 @@
def init( type ){
machineType = [ "FUNC" : "VM",
"HA" : "VM",
- "SR" : "VM",
+ "SR" : "Fabric",
"SCPF" : "BM",
"USECASE" : "BM" ]
testType = type;
@@ -193,8 +193,8 @@
}
}
}
-def publishToConfluence( prop, wikiLink, file ){
- if( isPostingResult( prop[ "manualRun" ], prop[ "postResult" ] ) ){
+def publishToConfluence( isManualRun, isPostResult, wikiLink, file ){
+ if( isPostingResult( isManualRun, isPostResult ) ){
publishConfluence siteName: 'wiki.onosproject.org', pageName: wikiLink, spaceName: 'ONOS',
attachArchivedArtifacts: true, buildIfUnstable: true,
editorList: [
@@ -226,7 +226,9 @@
if( ! graphOnly ){
sh fetchLogs( pureTestName )
if( !isSCPF )
- publishToConfluence( prop, testCategory[ testName ][ 'wiki_link' ], workSpace + "/" + testCategory[ testName ][ 'wiki_file' ] )
+ publishToConfluence( prop[ "manualRun" ], prop[ "postResult" ],
+ testCategory[ testName ][ 'wiki_link' ],
+ workSpace + "/" + testCategory[ testName ][ 'wiki_file' ] )
}
}
diff --git a/TestON/JenkinsFile/JenkinsTestONTests.groovy b/TestON/JenkinsFile/JenkinsTestONTests.groovy
index 9ffc948..8364774 100644
--- a/TestON/JenkinsFile/JenkinsTestONTests.groovy
+++ b/TestON/JenkinsFile/JenkinsTestONTests.groovy
@@ -50,20 +50,23 @@
"FUNCbgpls" : [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "FUNCbgpls", wiki_file:"FUNCbgplsWiki.txt" ],
"VPLSBasic" : [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "VPLSBasic", wiki_file:"VPLSBasicWiki.txt" ],
"VPLSfailsafe" : [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "VPLSfailsafe", wiki_file:"VPLSfailsafeWiki.txt" ],
- "PLATdockertest": [ "basic":true, "extra_A":true, "extra_B":false, "new_Test":false, "day":"", wiki_link:"Docker Images sanity test", wiki_file:"PLATdockertestTableWiki.txt" ],
- "SRSanity": [ "basic":false, "extra_A":false, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SR Sanity", wiki_file:"SRSanityWiki.txt" ],
- "SRSwitchFailure": [ "basic":false, "extra_A":false, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SR Switch Failure", wiki_file:"SRSwitchFailureWiki.txt" ],
- "SRLinkFailure": [ "basic":false, "extra_A":false, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SR Link Failure", wiki_file:"SRLinkFailureWiki.txt" ],
- "SROnosFailure": [ "basic":false, "extra_A":false, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SR Onos node Failure", wiki_file:"SROnosFailureWiki.txt" ],
- "SRClusterRestart": [ "basic":false, "extra_A":false, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SR Cluster Restart", wiki_file:"SRClusterRestartWiki.txt" ],
- "SRDynamic": [ "basic":false, "extra_A":false, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SR Dynamic Config", wiki_file:"SRDynamicWiki.txt" ],
- "SRHighAvailability": [ "basic":false, "extra_A":false, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SR High Availability", wiki_file:"SRHighAvailabilityWiki.txt" ],
"USECASE_SdnipFunction": [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SDNIP Function", wiki_file:"USECASE_SdnipFunctionWiki.txt" ],
- "USECASE_SdnipFunctionCluster": [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SDNIP Function Cluster", wiki_file:"USECASE_SdnipFunctionClusterWiki.txt" ]
+ "USECASE_SdnipFunctionCluster": [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SDNIP Function Cluster", wiki_file:"USECASE_SdnipFunctionClusterWiki.txt" ],
+ "PLATdockertest": [ "basic":false, "extra_A":true, "extra_B":false, "new_Test":false, "day":"", wiki_link:"Docker Images sanity test", wiki_file:"PLATdockertestTableWiki.txt" ]
],
"SR":[
"SRBridging": [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SR Bridging", wiki_file:"SRBridgingWiki.txt" ],
- "SRRouting": [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SR Routing", wiki_file:"SRRoutingWiki.txt" ]
+ "SRRouting": [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SR Routing", wiki_file:"SRRoutingWiki.txt" ],
+ "SRDhcprelay": [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SR Dhcp Relay", wiki_file:"SRDhcprelayWiki.txt" ],
+ "SRDynamicConf": [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SR Dynamic Config", wiki_file:"SRDynamicConfWiki.txt" ],
+ "SRMulticast": [ "basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SR Multi Cast", wiki_file:"SRMulticastWiki.txt" ],
+ "SRSanity": [ "basic":false, "extra_A":true, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SR Sanity", wiki_file:"SRSanityWiki.txt" ],
+ "SRSwitchFailure": [ "basic":false, "extra_A":true, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SR Switch Failure", wiki_file:"SRSwitchFailureWiki.txt" ],
+ "SRLinkFailure": [ "basic":false, "extra_A":true, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SR Link Failure", wiki_file:"SRLinkFailureWiki.txt" ],
+ "SROnosFailure": [ "basic":false, "extra_A":true, "extra_B":false, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SR Onos node Failure", wiki_file:"SROnosFailureWiki.txt" ],
+ "SRClusterRestart": [ "basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SR Cluster Restart", wiki_file:"SRClusterRestartWiki.txt" ],
+ "SRDynamic": [ "basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SR Dynamic", wiki_file:"SRDynamicWiki.txt" ],
+ "SRHighAvailability": [ "basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":"", wiki_link:wikiPrefix + "-" + "SR High Availability", wiki_file:"SRHighAvailabilityWiki.txt" ]
]
];
}
diff --git a/TestON/JenkinsFile/JenkinsfileTrigger b/TestON/JenkinsFile/JenkinsfileTrigger
index c624984..39230f9 100644
--- a/TestON/JenkinsFile/JenkinsfileTrigger
+++ b/TestON/JenkinsFile/JenkinsfileTrigger
@@ -2,17 +2,19 @@
funcs = evaluate readTrusted( 'TestON/JenkinsFile/JenkinsCommonFuncs.groovy' )
test_lists = evaluate readTrusted( 'TestON/JenkinsFile/JenkinsTestONTests.groovy' )
+triggerFuncs = evaluate readTrusted( 'TestON/JenkinsFile/TriggerFuncs.groovy' )
previous_version = "1.12"
before_previous_version = "1.11"
funcs.initializeTrend( "VM" );
-
+triggerFuncs.init( funcs )
+wikiContents = ""
testcases = [
- "FUNC" : [ tests : "" , nodeName : "VM" ],
- "HA" : [ tests : "" , nodeName : "VM" ],
- "SR" : [ tests : "", nodeName : "VM" ],
- "SCPF" : [ tests : "" , nodeName : "BM" ],
- "USECASE" : [ tests : "" , nodeName : "BM" ]
+ "FUNC" : [ tests : "" , nodeName : "VM", wikiContent : "" ],
+ "HA" : [ tests : "" , nodeName : "VM", wikiContent : "" ],
+ "SCPF" : [ tests : "" , nodeName : "BM", wikiContent : "" ],
+ "SR" : [ tests : "", nodeName : "Fabric", wikiContent : "" ],
+ "USECASE" : [ tests : "" , nodeName : "BM", wikiContent : "" ]
]
Prefix_organizer = [
"FU" : "FUNC",
@@ -64,15 +66,15 @@
+ "Starting tests on : " + now.toString()
+ "\n:sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles:" )
testDivider( today )
- FUNC_choices = lastCommaRemover( FUNC_choices )
- HA_choices = lastCommaRemover( HA_choices )
- SCPF_choices = lastCommaRemover( SCPF_choices )
- USECASE_choices = lastCommaRemover( USECASE_choices )
- SR_choices = lastCommaRemover( SR_choices )
+ FUNC_choices = triggerFuncs.lastCommaRemover( FUNC_choices )
+ HA_choices = triggerFuncs.lastCommaRemover( HA_choices )
+ SCPF_choices = triggerFuncs.lastCommaRemover( SCPF_choices )
+ USECASE_choices = triggerFuncs.lastCommaRemover( USECASE_choices )
+ SR_choices = triggerFuncs.lastCommaRemover( SR_choices )
}
if ( manually_run ){
- organize_tests( params.Tests )
+ testcases = triggerFuncs.organize_tests( params.Tests, testcases )
isOldFlow = params.isOldFlow
println "Tests to be run manually : "
@@ -85,7 +87,7 @@
println "Defaulting to " + day + " tests:"
}
-print_tests( testcases )
+triggerFuncs.print_tests( testcases )
def runTest = [
"VM" : [:],
@@ -94,12 +96,12 @@
for( String test in testcases.keySet() ){
println test
if ( testcases[ test ][ "tests" ] != "" ){
- runTest[ testcases[ test ][ "nodeName" ] ][ test ] = trigger_pipeline( onos_b, testcases[ test ][ "tests" ], testcases[ test ][ "nodeName" ], test, manually_run, onos_tag )
+ runTest[ testcases[ test ][ "nodeName" ] ][ test ] = triggerFuncs.trigger_pipeline( onos_b, testcases[ test ][ "tests" ], testcases[ test ][ "nodeName" ], test, manually_run, onos_tag )
}
}
def finalList = [:]
-finalList[ "VM" ] = runTestSeq( runTest[ "VM" ] )
-finalList[ "BM" ] = runTestSeq( runTest[ "BM" ] )
+finalList[ "VM" ] = triggerFuncs.runTestSeq( runTest[ "VM" ] )
+finalList[ "BM" ] = triggerFuncs.runTestSeq( runTest[ "BM" ] )
parallel finalList
//finalList[ "BM" ].call()
@@ -114,349 +116,292 @@
def testDivider( today ){
switch ( today ) {
case Calendar.MONDAY:
+ initHtmlForWiki()
monday( true )
tuesday( true, false )
wednesday( true, false )
thursday( true, false )
friday( true, false )
+ saturday( false, false )
+ sunday( false, false )
day = "Monday"
+ closeHtmlForWiki()
+ postToWiki( wikiContents )
slackSend( color:'#FFD988', message:"Tests to be run this weekdays : \n" + printDaysForTest() )
break
case Calendar.TUESDAY:
- tuesday( true, true )
+ tuesday( false, true )
day = "Tuesday"
break
case Calendar.WEDNESDAY:
- wednesday( true, true )
+ wednesday( false, true )
day = "Wednesday"
break
case Calendar.THURSDAY:
- thursday( true, true )
+ thursday( false, true )
day = "Thursday"
- isOldFlow = false
break
case Calendar.FRIDAY:
- friday( true, true )
+ friday( false, true )
day = "Friday"
- isOldFlow = false
break
case Calendar.SATURDAY:
- saturday()
+ saturday( false, true )
day = "Saturday"
break
case Calendar.SUNDAY:
- sunday()
+ sunday( false , true )
day = "Sunday"
isOldFlow = false
break
}
}
-def printDaysForTest(){
- result = ""
- for ( String test in AllTheTests.keySet() ){
- result += test + " : \n"
- for( String each in AllTheTests[ test ].keySet() ){
- AllTheTests[ test ][ each ][ "day" ] = lastCommaRemover( AllTheTests[ test ][ each ][ "day" ] )
- result += " " + each + ":[" + AllTheTests[ test ][ each ][ "day" ] + "]\n"
- }
- result += "\n"
- }
- return result
-}
-def lastCommaRemover( str ){
- if ( str.size() > 0 && str[ str.size() - 1 ] == ',' ){
- str = str.substring( 0,str.size() - 1 )
- }
- return str
-}
def monday( getResult ){
+ addingHeader( "FUNC" )
FUNC_choices += adder( "FUNC", "basic", true, "M", getResult )
FUNC_choices += adder( "FUNC", "new_Test", true, "M", getResult )
FUNC_choices += adder( "FUNC", "extra_A", true, "M", getResult )
+ closingHeader( "FUNC" )
+ addingHeader( "HA" )
HA_choices += adder( "HA", "basic", true, "M", getResult )
HA_choices += adder( "HA", "extra_A", true, "M", getResult )
//HA_choices += adder( "HA", "new_Test", true, "M", getResult )
- SR_choices += adder( "SR", "basic", true, "M", getResult )
+ closingHeader( "HA" )
+ addingHeader( "SCPF" )
SCPF_choices += adder( "SCPF", "basic", true, "M", getResult )
SCPF_choices += adder( "SCPF", "extra_B", true, "M", getResult )
+ closingHeader( "SCPF" )
+ addingHeader( "SR" )
+ SR_choices += adder( "SR", "basic", true, "M", false )
+ closingHeader( "SR" )
+ addingHeader( "USECASE" )
+ closingHeader( "USECASE" )
}
def tuesday( getDay, getResult ){
+ addingHeader( "FUNC" )
FUNC_choices += adder( "FUNC", "basic", getDay, "T", getResult )
FUNC_choices += adder( "FUNC", "new_Test", getDay, "T", getResult )
FUNC_choices += adder( "FUNC", "extra_B", getDay, "T", getResult )
+ closingHeader( "FUNC" )
+ addingHeader( "HA" )
HA_choices += adder( "HA", "basic", getDay, "T", getResult )
HA_choices += adder( "HA", "extra_B", getDay, "T", getResult )
HA_choices += adder( "HA", "new_Test", getDay, "T", getResult )
- SR_choices += adder( "SR", "basic", getDay, "T", getResult )
+ closingHeader( "HA" )
+ addingHeader( "SCPF" )
SCPF_choices += adder( "SCPF", "basic", getDay, "T", getResult )
SCPF_choices += adder( "SCPF", "extra_C", getDay, "T", getResult )
+ closingHeader( "SCPF" )
+ addingHeader( "SR" )
+ SR_choices += adder( "SR", "basic", getDay, "T", false )
+ closingHeader( "SR" )
+ addingHeader( "USECASE" )
USECASE_choices += adder( "USECASE", "basic", getDay, "T", getResult )
USECASE_choices += adder( "USECASE", "extra_A", getDay, "T", getResult )
- USECASE_choices += adder( "USECASE", "new_Test", getDay, "T", getResult )
+ closingHeader( "USECASE" )
}
def wednesday( getDay, getResult ){
+ addingHeader( "FUNC" )
FUNC_choices += adder( "FUNC", "basic", getDay, "W", getResult )
FUNC_choices += adder( "FUNC", "new_Test", getDay, "W", getResult )
FUNC_choices += adder( "FUNC", "extra_A", getDay, "W", getResult )
+ closingHeader( "FUNC" )
+ addingHeader( "HA" )
HA_choices += adder( "HA", "basic", getDay, "W", getResult )
HA_choices += adder( "HA", "extra_A", getDay, "W", getResult )
//HA_choices += adder( "HA", "new_Test", getDay, "W", getResult )
- SR_choices += adder( "SR", "basic", getDay, "W", getResult )
+ closingHeader( "HA" )
+ addingHeader( "SCPF" )
SCPF_choices += adder( "SCPF", "basic", getDay, "W", getResult )
SCPF_choices += adder( "SCPF", "extra_A", getDay, "W", getResult )
SCPF_choices += adder( "SCPF", "new_Test", getDay, "W", getResult )
+ closingHeader( "SCPF" )
+ addingHeader( "SR" )
+ SR_choices += adder( "SR", "basic", getDay, "W", false )
+ closingHeader( "SR" )
+ addingHeader( "USECASE" )
+ closingHeader( "USECASE" )
}
def thursday( getDay, getResult ){
+ addingHeader( "FUNC" )
FUNC_choices += adder( "FUNC", "basic", getDay, "Th", getResult )
FUNC_choices += adder( "FUNC", "new_Test", getDay, "Th", getResult )
FUNC_choices += adder( "FUNC", "extra_B", getDay, "Th", getResult )
+ closingHeader( "FUNC" )
+ addingHeader( "HA" )
HA_choices += adder( "HA", "basic", getDay, "Th", getResult )
HA_choices += adder( "HA", "extra_B", getDay, "Th", getResult )
HA_choices += adder( "HA", "new_Test", getDay, "Th", getResult )
- SR_choices += adder( "SR", "basic", getDay, "Th", getResult )
+ closingHeader( "HA" )
+ addingHeader( "SCPF" )
SCPF_choices += adder( "SCPF", "basic", getDay, "Th", getResult )
SCPF_choices += adder( "SCPF", "extra_B", getDay, "Th", getResult )
+ closingHeader( "SCPF" )
+ addingHeader( "SR" )
+ SR_choices += adder( "SR", "basic", getDay, "Th", false )
+ closingHeader( "SR" )
+ addingHeader( "USECASE" )
+ closingHeader( "USECASE" )
}
def friday( getDay, getResult ){
+ addingHeader( "FUNC" )
FUNC_choices += adder( "FUNC", "basic", getDay, "F", getResult )
FUNC_choices += adder( "FUNC", "new_Test", getDay, "F", getResult )
FUNC_choices += adder( "FUNC", "extra_A", getDay, "F", getResult )
+ closingHeader( "FUNC" )
+ addingHeader( "HA" )
HA_choices += adder( "HA", "basic", getDay, "F", getResult )
HA_choices += adder( "HA", "extra_A", getDay, "F", getResult )
//HA_choices += adder( "HA", "new_Test", getDay, "F", getResult )
- SR_choices += adder( "SR", "basic", getDay, "F", getResult )
+ closingHeader( "HA" )
+ addingHeader( "SCPF" )
SCPF_choices += adder( "SCPF", "basic", getDay, "F", getResult )
SCPF_choices += adder( "SCPF", "extra_A", getDay, "F", getResult )
SCPF_choices += adder( "SCPF", "extra_D", getDay, "F", getResult )
+ closingHeader( "SCPF" )
+ addingHeader( "SR" )
+ SR_choices += adder( "SR", "basic", getDay, "F", false )
+ SR_choices += adder( "SR", "extra_A", getDay, "F", false )
+ closingHeader( "SR" )
+ addingHeader( "USECASE" )
+ closingHeader( "USECASE" )
}
-def saturday(){
- FUNC_choices += adder( "FUNC", "basic", false, "Sa", true )
- FUNC_choices += adder( "FUNC", "extra_A", false, "Sa", true )
- FUNC_choices += adder( "FUNC", "extra_B", false, "Sa", true )
- FUNC_choices += adder( "FUNC", "new_Test", true, "Sa", true )
- HA_choices += adder( "HA", "basic", false, "Sa", true )
- HA_choices += adder( "HA", "extra_A", false, "Sa", true )
- HA_choices += adder( "HA", "extra_B", false, "Sa", true )
- HA_choices += adder( "HA", "new_Test", false, "Sa", true )
- SR_choices += adder( "SR", "basic", false, "Sa", true )
- SCPF_choices += adder( "SCPF", "basic", false, "Sa", true )
- SCPF_choices += adder( "SCPF", "extra_A", false, "Sa", true )
- SCPF_choices += adder( "SCPF", "extra_B", false, "Sa", true )
- SCPF_choices += adder( "SCPF", "extra_C", false, "Sa", true )
- SCPF_choices += adder( "SCPF", "extra_D", false, "Sa", true )
- SCPF_choices += adder( "SCPF", "new_Test", false, "Sa", true )
- USECASE_choices += adder( "USECASE", "basic", false, "Sa", true )
+def saturday( getDay, getResult ){
+ addingHeader( "FUNC" )
+ FUNC_choices += adder( "FUNC", "basic", getDay, "Sa", getResult )
+ FUNC_choices += adder( "FUNC", "extra_A", getDay, "Sa", getResult )
+ FUNC_choices += adder( "FUNC", "extra_B", getDay, "Sa", getResult )
+ FUNC_choices += adder( "FUNC", "new_Test", getDay, "Sa", getResult )
+ closingHeader( "FUNC" )
+ addingHeader( "HA" )
+ HA_choices += adder( "HA", "basic", getDay, "Sa", getResult )
+ HA_choices += adder( "HA", "extra_A", getDay, "Sa", getResult )
+ HA_choices += adder( "HA", "extra_B", getDay, "Sa", getResult )
+ HA_choices += adder( "HA", "new_Test", getDay, "Sa", getResult )
+ closingHeader( "HA" )
+ addingHeader( "SCPF" )
+ SCPF_choices += adder( "SCPF", "basic", getDay, "Sa", getResult )
+ SCPF_choices += adder( "SCPF", "extra_A", getDay, "Sa", getResult )
+ SCPF_choices += adder( "SCPF", "extra_B", getDay, "Sa", getResult )
+ SCPF_choices += adder( "SCPF", "extra_C", getDay, "Sa", getResult )
+ SCPF_choices += adder( "SCPF", "extra_D", getDay, "Sa", getResult )
+ SCPF_choices += adder( "SCPF", "new_Test", getDay, "Sa", getResult )
+ closingHeader( "SCPF" )
+ addingHeader( "SR" )
+ SR_choices += adder( "SR", "basic", getDay, "Sa", false )
+ SR_choices += adder( "SR", "extra_B", getDay, "Sa", false )
+ closingHeader( "SR" )
+ addingHeader( "USECASE" )
+ USECASE_choices += adder( "USECASE", "basic", getDay, "Sa", getResult )
+ closingHeader( "USECASE" )
}
-def sunday(){
- FUNC_choices += adder( "FUNC", "basic", false, "S", true )
- FUNC_choices += adder( "FUNC", "extra_A", false, "S", true )
- FUNC_choices += adder( "FUNC", "extra_B", false, "S", true )
- HA_choices += adder( "HA", "basic", false, "S", true )
- HA_choices += adder( "HA", "extra_A", false, "S", true )
- HA_choices += adder( "HA", "extra_B", false, "S", true )
- SR_choices += adder( "SR", "basic", false, "S", true )
- SCPF_choices += adder( "SCPF", "basic", false, "S", true )
- SCPF_choices += adder( "SCPF", "extra_A", false, "S", true )
- SCPF_choices += adder( "SCPF", "extra_B", false, "S", true )
- SCPF_choices += adder( "SCPF", "extra_C", false, "S", true )
- SCPF_choices += adder( "SCPF", "extra_D", false, "S", true )
- USECASE_choices += adder( "USECASE", "basic", false, "S", true )
+def sunday( getDay, getResult ){
+ addingHeader( "FUNC" )
+ FUNC_choices += adder( "FUNC", "basic", getDay, "S", getResult )
+ FUNC_choices += adder( "FUNC", "extra_A", getDay, "S", getResult )
+ FUNC_choices += adder( "FUNC", "extra_B", getDay, "S", getResult )
+ closingHeader( "FUNC" )
+ addingHeader( "HA" )
+ HA_choices += adder( "HA", "basic", getDay, "S", getResult )
+ HA_choices += adder( "HA", "extra_A", getDay, "S", getResult )
+ HA_choices += adder( "HA", "extra_B", getDay, "S", getResult )
+ closingHeader( "HA" )
+ addingHeader( "SCPF" )
+ SCPF_choices += adder( "SCPF", "basic", getDay, "S", getResult )
+ SCPF_choices += adder( "SCPF", "extra_A", getDay, "S", getResult )
+ SCPF_choices += adder( "SCPF", "extra_B", getDay, "S", getResult )
+ SCPF_choices += adder( "SCPF", "extra_C", getDay, "S", getResult )
+ SCPF_choices += adder( "SCPF", "extra_D", getDay, "S", getResult )
+ closingHeader( "SCPF" )
+ addingHeader( "SR" )
+ SR_choices += adder( "SR", "basic", getDay, "S", false )
+ closingHeader( "SR" )
+ addingHeader( "USECASE" )
+ USECASE_choices += adder( "USECASE", "basic", getDay, "S", getResult )
+ closingHeader( "USECASE" )
}
def adder( testCat, set, dayAdding, day, getResult ){
result = ""
- for( String test in AllTheTests[ testCat ].keySet() ){
+ for( String test in AllTheTests[ testCat ].keySet() ){
if( AllTheTests[ testCat ][ test ][ set ] ){
if( getResult )
result += test + ","
if( dayAdding )
dayAdder( testCat, test, day )
+ makeHtmlColList( testCat, test )
}
}
return result
}
+def initHtmlForWiki(){
+ wikiContents = '''
+ <table class="wrapped confluenceTable">
+ <colgroup>
+ <col />
+ <col />
+ <col />
+ <col />
+ <col />
+ <col />
+ </colgroup>
+ <tbody>
+ <tr>
+ <th colspan="1" class="confluenceTh">
+ <br />
+ </th>
+ <th class="confluenceTh"><p>Monday</p></th>
+ <th class="confluenceTh"><p>Tuesday</p></th>
+ <th class="confluenceTh"><p>Wednesday</p></th>
+ <th class="confluenceTh"><p>Thursday</p></th>
+ <th class="confluenceTh"><p>Friday</p></th>
+ <th class="confluenceTh"><p>Saturday</p></th>
+ <th class="confluenceTh"><p>Sunday</p></th>
+ </tr>'''
+ for( String test in testcases.keySet() ){
+ testcases[ test ][ 'wikiContent' ] = '''
+ <tr>
+ <th colspan="1" class="confluenceTh">''' + test + '''</th>'''
+ }
+}
+def addingHeader( testCategory ){
+ testcases[ testCategory ][ 'wikiContent' ] += '''
+ <td class="confluenceTd">
+ <ul>'''
+}
+def makeHtmlColList( testCategory, testName ){
+ testcases[ testCategory ][ 'wikiContent' ] += '''
+ <li>'''+ testName +'''</li>'''
+
+}
+def closingHeader( testCategory ){
+ testcases[ testCategory ][ 'wikiContent' ] += '''
+ </ul>
+ </td>'''
+}
+def closeHtmlForWiki(){
+ for( String test in testcases.keySet() ){
+ wikiContents += testcases[ test ][ 'wikiContent' ]
+ wikiContents += '''
+ </tr>'''
+ }
+ wikiContents += '''
+ </tbody>
+ </table>
+ <p><strong>Everyday</strong>, all SegmentRouting tests are built and run on every branch.</p>
+ <p>On <strong>Weekdays</strong>, all the other tests are built and run on the master branch.</p>
+ <p>On <strong>Saturdays</strong>, all the other tests are built and run on the '''+ funcs.branchWithPrefix( previous_version ) +''' branch.</p>
+ <p>On <strong>Sundays</strong>, all the other tests are built and run on the '''+ funcs.branchWithPrefix( before_previous_version ) +''' branch.</p>'''
+}
+def postToWiki( contents ){
+ node( testMachine ){
+ workspace = "/var/jenkins/workspace/all-pipeline-trigger/"
+ filename = "jenkinsSchedule.txt"
+ writeFile file: workspace + filename, text: contents
+ funcs.publishToConfluence( "false", "true",
+ "Automated Test Schedule",
+ workspace + filename )
+ }
+}
def dayAdder( testCat, testName, dayOfWeek ){
AllTheTests[ testCat ][ testName ][ "day" ] += dayOfWeek + ","
-}
-def runTestSeq( testList ){
- return{
- for ( test in testList.keySet() ){
- testList[ test ].call()
- }
- }
-}
-
-def print_tests( tests ){
- for( String test in tests.keySet() ){
- if( tests[ test ][ "tests" ] != "" ){
- println test + ":"
- println tests[ test ][ "tests" ]
- }
- }
-}
-def organize_tests( tests ){
- testList = tests.tokenize( "\n;, " )
- for( String test in testList )
- testcases [ Prefix_organizer[ ( test == "FUNCbgpls" || test == "FUNCvirNetNB" ? "US" : ( test[ 0 ] + test[ 1 ] ) ) ] ][ "tests" ] += test + ","
-}
-def borrow_mn( jobOn ){
- result = ""
- if( jobOn == "SR" ){
- result = "~/cell_borrow.sh"
- }
- return result
-}
-def trigger( branch, tests, nodeName, jobOn, manuallyRun, onosTag ){
- println jobOn + "-pipeline-" + manuallyRun ? "manually" : branch
- wiki = branch
- branch = funcs.branchWithPrefix( branch )
- test_branch = "master"
- node( "TestStation-" + nodeName + "s" ){
- envSetup( branch, test_branch, onosTag, jobOn, manuallyRun )
-
- exportEnvProperty( branch, test_branch, wiki, tests, post_result, manuallyRun, onosTag, isOldFlow )
- }
-
- jobToRun = jobOn + "-pipeline-" + ( manuallyRun ? "manually" : wiki )
- build job: jobToRun, propagate: false
-}
-def trigger_pipeline( branch, tests, nodeName, jobOn, manuallyRun, onosTag ){
-// nodeName : "BM" or "VM"
-// jobOn : "SCPF" or "USECASE" or "FUNC" or "HA"
- return{
- if( jobOn == "SR" ){
- trigger( "1.11", tests, nodeName, jobOn, manuallyRun, onosTag )
- trigger( "1.12", tests, nodeName, jobOn, manuallyRun, onosTag )
- trigger( "master", tests, nodeName, jobOn, manuallyRun, onosTag )
- // returnCell( nodeName )
- }else{
- trigger( branch, tests, nodeName, jobOn, manuallyRun, onosTag )
- }
- }
-}
-
-// export Environment properties.
-def exportEnvProperty( onos_branch, test_branch, wiki, tests, postResult, manually_run, onosTag, isOldFlow ){
- stage( "export Property" ){
- sh '''
- echo "ONOSBranch=''' + onos_branch +'''" > /var/jenkins/TestONOS.property
- echo "TestONBranch=''' + test_branch +'''" >> /var/jenkins/TestONOS.property
- echo "ONOSTag='''+ onosTag +'''" >> /var/jenkins/TestONOS.property
- echo "WikiPrefix=''' + wiki +'''" >> /var/jenkins/TestONOS.property
- echo "ONOSJVMHeap='''+ env.ONOSJVMHeap +'''" >> /var/jenkins/TestONOS.property
- echo "Tests=''' + tests +'''" >> /var/jenkins/TestONOS.property
- echo "postResult=''' + postResult +'''" >> /var/jenkins/TestONOS.property
- echo "manualRun=''' + manually_run +'''" >> /var/jenkins/TestONOS.property
- echo "isOldFlow=''' + isOldFlow +'''" >> /var/jenkins/TestONOS.property
-
- '''
- }
-}
-// Initialize the environment Setup for the onos and OnosSystemTest
-def envSetup( onos_branch, test_branch, onos_tag, jobOn, manuallyRun ){
- stage( "envSetup" ) {
- // after env: ''' + borrow_mn( jobOn ) + '''
- sh '''#!/bin/bash -l
- set +e
- . ~/.bashrc
- env
- ''' + preSetup( onos_branch, test_branch, onos_tag, manuallyRun ) + '''
- ''' + oldFlowCheck( jobOn, onos_branch ) + '''
- ''' + postSetup( onos_branch, test_branch, onos_tag, manuallyRun )
- }
-}
-def tagCheck( onos_tag, onos_branch ){
- result = "git checkout "
- if ( onos_tag == "" )
- result += onos_branch //create new local branch
- else
- result += onos_tag //checkout the tag
- return result
-}
-def preSetup( onos_branch, test_branch, onos_tag, isManual ){
- result = ""
- if( !isManual ){
- result = '''echo -e "\n##### Set TestON Branch #####"
- echo "TestON Branch is set on: ''' + test_branch + '''"
-
- cd ~/OnosSystemTest/
- git checkout HEAD~1 # Make sure you aren't pn a branch
- git branch | grep -v "detached from" | xargs git branch -d # delete all local branches merged with remote
- git branch -D ''' + test_branch + ''' # just incase there are local changes. This will normally result in a branch not found error
- git clean -df # clean any local files
- git fetch --all # update all caches from remotes
- git reset --hard origin/''' + test_branch +''' # force local index to match remote branch
- git clean -df # clean any local files
- git checkout ''' + test_branch + ''' #create new local branch
- git branch
- git log -1 --decorate
-
-
- echo -e "\n##### Set ONOS Branch #####"
- echo "ONOS Branch is set on: ''' + onos_branch + '''"
-
- echo -e "\n #### check karaf version ######"
- env |grep karaf
-
- cd ~/onos
- rm -rf buck-out/*
- ~/onos/tools/build/onos-buck clean
- git checkout HEAD~1 # Make sure you aren't pn a branch
- git branch | grep -v "detached from" | xargs git branch -d # delete all local branches merged with remote
- git branch -D ''' + onos_branch + ''' # just incase there are local changes. This will normally result in a branch not found error
- git clean -df # clean any local files
- git fetch --all # update all caches from remotes
- git reset --hard origin/''' + onos_branch + ''' # force local index to match remote branch
- git clean -df # clean any local files
- ''' + tagCheck( onos_tag, onos_branch ) + '''
- git branch
- git log -1 --decorate
-
-
- echo -e "\n##### set jvm heap size to 8G #####"
- echo ${ONOSJVMHeap}
-
- inserted_line="export JAVA_OPTS=\"\${ONOSJVMHeap}\""
- sed -i "s/bash/bash\\n$inserted_line/" ~/onos/tools/package/bin/onos-service
-
- echo "##### Check onos-service setting..... #####"
- cat ~/onos/tools/package/bin/onos-service
-
- export JAVA_HOME=/usr/lib/jvm/java-8-oracle'''
- }
- return result
-}
-def oldFlowCheck( jobOn, onos_branch ){
- result = ""
- if( jobOn == "SCPF" && ( onos_branch== "master" || onos_branch=="onos-1.12" ) )
- result = '''sed -i -e 's/@Component(immediate = true)/@Component(enabled = false)/g' ~/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/''' + ( isOldFlow ? "DistributedFlowRuleStore" : "ECFlowRuleStore" ) + '''.java
- sed -i -e 's/@Component(enabled = false)/@Component(immediate = true)/g' ~/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/''' + ( isOldFlow ? "ECFlowRuleStore" : "DistributedFlowRuleStore" ) + ".java"
- return result
-}
-def postSetup( onos_branch, test_branch, onos_tag, isManual ){
- result = ""
- if( !isManual ){
- result = '''echo -e "\n##### build ONOS skip unit tests ######"
- #mvn clean install -DskipTests
- # Force buck update
- rm -f ~/onos/bin/buck
- ~/onos/tools/build/onos-buck build onos
-
- sleep 30
- echo -e "\n##### Stop all running instances of Karaf #####"
- kill $(ps -efw | grep karaf | grep -v grep | awk '{print $2}')
- sleep 30
-
- git branch'''
- }
- return result
-}
-def returnCell( nodeName ){
- node( "TestStation-" + nodeName + "s" ){
- sh '''#!/bin/bash -l
- set +e
- . ~/.bashrc
- env
- ~/./return_cell.sh
- '''
- }
-}
+}
\ No newline at end of file
diff --git a/TestON/JenkinsFile/PerformanceFuncs.groovy b/TestON/JenkinsFile/PerformanceFuncs.groovy
index 5761497..0fa4795 100644
--- a/TestON/JenkinsFile/PerformanceFuncs.groovy
+++ b/TestON/JenkinsFile/PerformanceFuncs.groovy
@@ -14,7 +14,7 @@
SCPFscaleTopo: [ flows:false, test:'SCPFscaleTopo', table:'scale_topo_latency_details', results:'scale_topo_latency_results', file:'/tmp/scaleTopoResultDb', rFile:'SCPFscaleTopo.R', extra:none, finalResult:1, graphTitle:[ 'Scale Topology Test' ], dbCols:[ 'first_connection_to_last_connection, last_connection_to_last_role_request, last_role_request_to_last_topology' ], dbWhere:'AND scale=20' , y_axis:'Latency (s)' ],
SCPFswitchLat: [ flows:false, test:'SCPFswitchLat', table:'switch_latency_details', results:'switch_latency_results', file:'/tmp/switchEventResultDb', rFile:'SCPFswitchLat.R', extra:none, finalResult:1, graphTitle:[ 'Switch Latency Test - Switch Up','Switch Latency Test - Switch Down' ], dbCols:[ 'tcp_to_feature_reply_avg,feature_reply_to_role_request_avg,role_request_to_role_reply_avg,role_reply_to_device_avg,up_device_to_graph_avg', 'fin_ack_to_ack_avg,ack_to_device_avg,down_device_to_graph_avg' ], dbWhere:'AND scale=5', y_axis:'Latency (ms)' ],
SCPFbatchFlowResp: [ flows:true, test:'SCPFbatchFlowResp', table:'batch_flow_tests', results:'batch_flow_results', file:'SCPFbatchFlowRespData', rFile:'SCPFbatchFlowResp.R', extra:none, finalResult:1, graphTitle:[ 'Batch Flow Test - Post', 'Batch Flow Test - Del' ], dbCols:[ 'elapsepost, posttoconfrm', 'elapsedel, deltoconfrm' ], dbWhere:'', y_axis:'Latency (ms)' ],
- SCPFintentEventTp: [ flows:true, test:'SCPFintentEventTp', table:'intent_tp_tests', results:'intent_tp_results', file:'IntentEventTPDB', rFile:'SCPFintentEventTp.R n', extra:neighbors, finalResult:1, graphTitle:[ 'Intent Throughput Test - neighbors=0','Intent Throughput Test - neighbors=4' ], dbCols:'SUM( avg ) as avg', dbWhere:[ 'AND scale=5 AND neighbors=0 GROUP BY date','AND scale=5 AND NOT neighbors=0 GROUP BY date' ], y_axis:'Throughput (Ops/sec)' ],
+ SCPFintentEventTp: [ flows:true, test:'SCPFintentEventTp', table:'intent_tp_tests', results:'intent_tp_results', file:'IntentEventTPDB', rFile:'SCPFintentEventTp.R n', extra:neighbors, finalResult:1, graphTitle:[ 'Intent Throughput Test - neighbors=0','Intent Throughput Test - neighbors=4' ], dbCols:'SUM( avg ) as avg', dbWhere:[ 'AND scale=5 AND neighbors=0 GROUP BY date,build','AND scale=5 AND NOT neighbors=0 GROUP BY date,build' ], y_axis:'Throughput (Ops/sec)' ],
SCPFintentRerouteLat: [ flows:true, test:'SCPFintentRerouteLat', table:'intent_reroute_latency_tests', results:'intent_reroute_latency_results', file:'IntentRerouteLatDB', rFile:'SCPFIntentInstallWithdrawRerouteLat.R n', extra:batches, finalResult:1, graphTitle:[ 'Intent Reroute Test' ], dbCols:'avg', dbWhere:'AND scale=5 AND batch_size=100', y_axis:'Latency (ms)' ],
SCPFscalingMaxIntents: [ flows:true, test:'SCPFscalingMaxIntents', table:'max_intents_tests', results:'max_intents_results', file:'ScalingMaxIntentDB', rFile:'SCPFscalingMaxIntents.R n', extra:none, finalResult:0 ],
SCPFintentEventTpWithFlowObj: [ flows:true, test:'SCPFintentEventTp --params TEST/flowObj=True', table:'intent_tp_fobj_tests', results:'intent_tp_fobj_results', file:'IntentEventTPflowObjDB', rFile:'SCPFintentEventTp.R y', extra:neighbors,finalResult:0 ],
@@ -43,7 +43,7 @@
for ( int i=0; i< SCPF[ testName ][ 'graphTitle' ].size(); i++ ){
result += generalFuncs.basicGraphPart( generalFuncs.rScriptLocation + "SCPFLineGraph.R", host, port, user, pass, "\"" + SCPF[ testName ][ 'graphTitle' ][ i ] + "\"", branchName ) +
- " " + 50 + " \"SELECT " + checkIfList( testName, 'dbCols', i ) + " FROM " + SCPF[ testName ][ 'table' ] + " WHERE branch=\'" + branchName + "\' " + sqlOldFlow( isOldFlow, testName ) +
+ " " + 50 + " \"SELECT " + checkIfList( testName, 'dbCols', i ) + ", build FROM " + SCPF[ testName ][ 'table' ] + " WHERE branch=\'" + branchName + "\' " + sqlOldFlow( isOldFlow, testName ) +
checkIfList( testName, 'dbWhere', i ) + " ORDER BY date DESC LIMIT 50\" \"" + SCPF[ testName ][ 'y_axis' ] + "\" " + hasOldFlow( isOldFlow, testName ) + graph_saved_directory + ";"
}
return result
@@ -87,4 +87,4 @@
return getGraphCommand( SCPF[ testName ][ 'rFile' ], SCPF[ testName ][ 'extra' ], host, port, user, pass, testName, prop[ "ONOSBranch" ], isOldFlow ) + '''
''' + ( SCPF[ testName ][ 'finalResult' ] ? generateCombinedResultGraph( host, port, user, pass, testName, prop[ "ONOSBranch" ], , isOldFlow ) : "" )
}
-return this;
\ No newline at end of file
+return this;
diff --git a/TestON/JenkinsFile/SRJenkinsFile b/TestON/JenkinsFile/SRJenkinsFile
index 0cd424c..e4e0139 100644
--- a/TestON/JenkinsFile/SRJenkinsFile
+++ b/TestON/JenkinsFile/SRJenkinsFile
@@ -9,7 +9,7 @@
SR = test_lists.getAllTheTests( prop[ "WikiPrefix" ] )[ "SR" ]
graph_generator_file = "~/OnosSystemTest/TestON/JenkinsFile/scripts/testCaseGraphGenerator.R"
-graph_saved_directory = "/var/jenkins/workspace/postjob-VM/"
+graph_saved_directory = "/var/jenkins/workspace/postjob-Fabric/"
echo( "Testcases:" )
def testsToRun = null
diff --git a/TestON/JenkinsFile/SRJenkinsfileTrigger b/TestON/JenkinsFile/SRJenkinsfileTrigger
new file mode 100644
index 0000000..72f0604
--- /dev/null
+++ b/TestON/JenkinsFile/SRJenkinsfileTrigger
@@ -0,0 +1,116 @@
+#!groovy
+
+funcs = evaluate readTrusted( 'TestON/JenkinsFile/JenkinsCommonFuncs.groovy' )
+test_lists = evaluate readTrusted( 'TestON/JenkinsFile/JenkinsTestONTests.groovy' )
+triggerFuncs = evaluate readTrusted( 'TestON/JenkinsFile/TriggerFuncs.groovy' )
+
+previous_version = "1.12"
+before_previous_version = "1.11"
+funcs.initializeTrend( "Fabric" );
+funcs.initialize( "Fabric" )
+triggerFuncs.init( funcs )
+
+wikiContents = ""
+testcases = [
+ "FUNC" : [ tests : "" , nodeName : "VM", wikiContent : "" ],
+ "HA" : [ tests : "" , nodeName : "VM", wikiContent : "" ],
+ "SCPF" : [ tests : "" , nodeName : "BM", wikiContent : "" ],
+ "SR" : [ tests : "", nodeName : "Fabric", wikiContent : "" ],
+ "USECASE" : [ tests : "" , nodeName : "BM", wikiContent : "" ]
+]
+Prefix_organizer = [
+ "FU" : "FUNC",
+ "HA" : "HA",
+ "PL" : "USECASE",
+ "SA" : "USECASE",
+ "SC" : "SCPF",
+ "SR" : "SR",
+ "US" : "USECASE",
+ "VP" : "USECASE"
+]
+
+manually_run = params.manual_run
+onos_b = "master"
+test_branch = ""
+onos_tag = params.ONOSTag
+isOldFlow = true
+
+// Set tests based on day of week
+def now = funcs.getCurrentTime()
+print now.toString()
+today = now[ Calendar.DAY_OF_WEEK ]
+
+if ( manually_run ){
+ onos_b = params.ONOSVersion
+}
+AllTheTests = test_lists.getAllTheTests( onos_b )
+
+day = ""
+SCPF_choices = ""
+USECASE_choices = ""
+FUNC_choices = ""
+HA_choices = ""
+SR_choices = ""
+stat_graph_generator_file = "testCategoryBuildStats.R"
+pie_graph_generator_file = "testCategoryPiePassFail.R"
+graph_saved_directory = "/var/jenkins/workspace/postjob-Fabric/"
+
+post_result = params.PostResult
+if( !manually_run ){
+ slackSend( channel:'sr-failures', color:'#03CD9F',
+ message:":sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles:\n"
+ + "Starting tests on : " + now.toString()
+ + "\n:sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles:" )
+
+ SR_choices += adder( "SR", "basic", true )
+ if ( today == Calendar.FRIDAY ){
+ SR_choices += adder( "SR", "extra_A", true )
+ } else if( today == Calendar.SATURDAY ){
+ SR_choices += adder( "SR", "extra_B", true )
+ }
+ SR_choices = triggerFuncs.lastCommaRemover( SR_choices )
+}
+if ( manually_run ){
+ testcases = triggerFuncs.organize_tests( params.Tests, testcases )
+
+ isOldFlow = params.isOldFlow
+ println "Tests to be run manually : "
+}else{
+ testcases[ "SR" ][ "tests" ] = SR_choices
+ println "Defaulting to " + day + " tests:"
+}
+
+triggerFuncs.print_tests( testcases )
+
+def runTest = [
+ "VM" : [:],
+ "BM" : [:],
+ "Fabric" : [:]
+]
+for( String test in testcases.keySet() ){
+ println test
+ if ( testcases[ test ][ "tests" ] != "" ){
+ runTest[ testcases[ test ][ "nodeName" ] ][ test ] = triggerFuncs.trigger_pipeline( onos_b, testcases[ test ][ "tests" ], testcases[ test ][ "nodeName" ], test, manually_run, onos_tag )
+ }
+}
+def finalList = [:]
+finalList[ "Fabric" ] = triggerFuncs.runTestSeq( runTest[ "Fabric" ] )
+parallel finalList
+/*
+if ( !manually_run ){
+ funcs.generateStatGraph( funcs.branchWithPrefix( onos_b ),
+ AllTheTests,
+ stat_graph_generator_file,
+ pie_graph_generator_file,
+ graph_saved_directory )
+}*/
+def adder( testCat, set, getResult ){
+ result = ""
+ for( String test in AllTheTests[ testCat ].keySet() ){
+ if( AllTheTests[ testCat ][ test ][ set ] ){
+ if( getResult )
+ result += test + ","
+ }
+ }
+ return result
+}
\ No newline at end of file
diff --git a/TestON/JenkinsFile/TriggerFuncs.groovy b/TestON/JenkinsFile/TriggerFuncs.groovy
new file mode 100644
index 0000000..c6fa9ca
--- /dev/null
+++ b/TestON/JenkinsFile/TriggerFuncs.groovy
@@ -0,0 +1,194 @@
+#!groovy
+
+def init( commonFuncs ){
+ funcs = commonFuncs
+}
+def lastCommaRemover( str ){
+ if ( str.size() > 0 && str[ str.size() - 1 ] == ',' ){
+ str = str.substring( 0,str.size() - 1 )
+ }
+ return str
+}
+def printDaysForTest( AllTheTests ){
+ result = ""
+ for ( String test in AllTheTests.keySet() ){
+ result += test + " : \n"
+ for( String each in AllTheTests[ test ].keySet() ){
+ AllTheTests[ test ][ each ][ "day" ] = lastCommaRemover( AllTheTests[ test ][ each ][ "day" ] )
+ result += " " + each + ":[" + AllTheTests[ test ][ each ][ "day" ] + "]\n"
+ }
+ result += "\n"
+ }
+ return result
+}
+def runTestSeq( testList ){
+ return{
+ for ( test in testList.keySet() ){
+ testList[ test ].call()
+ }
+ }
+}
+def print_tests( tests ){
+ for( String test in tests.keySet() ){
+ if( tests[ test ][ "tests" ] != "" ){
+ println test + ":"
+ println tests[ test ][ "tests" ]
+ }
+ }
+}
+def organize_tests( tests, testcases ){
+ testList = tests.tokenize( "\n;, " )
+ for( String test in testList )
+ testcases [ Prefix_organizer[ ( test == "FUNCbgpls" || test == "FUNCvirNetNB" ? "US" : ( test[ 0 ] + test[ 1 ] ) ) ] ][ "tests" ] += test + ","
+ return testcases
+}
+def borrow_mn( jobOn ){
+ result = ""
+ if( jobOn == "SR" ){
+ result = "~/cell_borrow.sh"
+ }
+ return result
+}
+def trigger( branch, tests, nodeName, jobOn, manuallyRun, onosTag ){
+ println jobOn + "-pipeline-" + manuallyRun ? "manually" : branch
+ wiki = branch
+ branch = funcs.branchWithPrefix( branch )
+ test_branch = "master"
+ node( "TestStation-" + nodeName + "s" ){
+ envSetup( branch, test_branch, onosTag, jobOn, manuallyRun )
+
+ exportEnvProperty( branch, test_branch, wiki, tests, post_result, manuallyRun, onosTag, isOldFlow )
+ }
+
+ jobToRun = jobOn + "-pipeline-" + ( manuallyRun ? "manually" : wiki )
+ build job: jobToRun, propagate: false
+}
+def trigger_pipeline( branch, tests, nodeName, jobOn, manuallyRun, onosTag ){
+// nodeName : "BM" or "VM"
+// jobOn : "SCPF" or "USECASE" or "FUNC" or "HA"
+ return{
+ if( jobOn == "SR" ){
+ trigger( "1.11", tests, nodeName, jobOn, manuallyRun, onosTag )
+ trigger( "1.12", tests, nodeName, jobOn, manuallyRun, onosTag )
+ trigger( "master", tests, nodeName, jobOn, manuallyRun, onosTag )
+ // returnCell( nodeName )
+ }else{
+ trigger( branch, tests, nodeName, jobOn, manuallyRun, onosTag )
+ }
+ }
+}
+// export Environment properties.
+def exportEnvProperty( onos_branch, test_branch, wiki, tests, postResult, manually_run, onosTag, isOldFlow ){
+ stage( "export Property" ){
+ sh '''
+ echo "ONOSBranch=''' + onos_branch +'''" > /var/jenkins/TestONOS.property
+ echo "TestONBranch=''' + test_branch +'''" >> /var/jenkins/TestONOS.property
+ echo "ONOSTag='''+ onosTag +'''" >> /var/jenkins/TestONOS.property
+ echo "WikiPrefix=''' + wiki +'''" >> /var/jenkins/TestONOS.property
+ echo "ONOSJVMHeap='''+ env.ONOSJVMHeap +'''" >> /var/jenkins/TestONOS.property
+ echo "Tests=''' + tests +'''" >> /var/jenkins/TestONOS.property
+ echo "postResult=''' + postResult +'''" >> /var/jenkins/TestONOS.property
+ echo "manualRun=''' + manually_run +'''" >> /var/jenkins/TestONOS.property
+ echo "isOldFlow=''' + isOldFlow +'''" >> /var/jenkins/TestONOS.property
+ '''
+ }
+}
+// Initialize the environment Setup for the onos and OnosSystemTest
+def envSetup( onos_branch, test_branch, onos_tag, jobOn, manuallyRun ){
+ stage( "envSetup" ) {
+ // after env: ''' + borrow_mn( jobOn ) + '''
+ sh '''#!/bin/bash -l
+ set +e
+ . ~/.bashrc
+ env
+ ''' + preSetup( onos_branch, test_branch, onos_tag, manuallyRun ) + '''
+ ''' + oldFlowCheck( jobOn, onos_branch ) + '''
+ ''' + postSetup( onos_branch, test_branch, onos_tag, manuallyRun )
+ }
+}
+def tagCheck( onos_tag, onos_branch ){
+ result = "git checkout "
+ if ( onos_tag == "" )
+ result += onos_branch //create new local branch
+ else
+ result += onos_tag //checkout the tag
+ return result
+}
+def preSetup( onos_branch, test_branch, onos_tag, isManual ){
+ result = ""
+ if( !isManual ){
+ result = '''echo -e "\n##### Set TestON Branch #####"
+ echo "TestON Branch is set on: ''' + test_branch + '''"
+ cd ~/OnosSystemTest/
+ git checkout HEAD~1 # Make sure you aren't pn a branch
+ git branch | grep -v "detached from" | xargs git branch -d # delete all local branches merged with remote
+ git branch -D ''' + test_branch + ''' # just incase there are local changes. This will normally result in a branch not found error
+ git clean -df # clean any local files
+ git fetch --all # update all caches from remotes
+ git reset --hard origin/''' + test_branch +''' # force local index to match remote branch
+ git clean -df # clean any local files
+ git checkout ''' + test_branch + ''' #create new local branch
+ git branch
+ git log -1 --decorate
+ echo -e "\n##### Set ONOS Branch #####"
+ echo "ONOS Branch is set on: ''' + onos_branch + '''"
+ echo -e "\n #### check karaf version ######"
+ env |grep karaf
+ cd ~/onos
+ rm -rf buck-out/*
+ ~/onos/tools/build/onos-buck clean
+ git checkout HEAD~1 # Make sure you aren't pn a branch
+ git branch | grep -v "detached from" | xargs git branch -d # delete all local branches merged with remote
+ git branch -D ''' + onos_branch + ''' # just incase there are local changes. This will normally result in a branch not found error
+ git clean -df # clean any local files
+ git fetch --all # update all caches from remotes
+ git reset --hard origin/''' + onos_branch + ''' # force local index to match remote branch
+ git clean -df # clean any local files
+ ''' + tagCheck( onos_tag, onos_branch ) + '''
+ git branch
+ git log -1 --decorate
+ echo -e "\n##### set jvm heap size to 8G #####"
+ echo ${ONOSJVMHeap}
+ inserted_line="export JAVA_OPTS=\"\${ONOSJVMHeap}\""
+ sed -i "s/bash/bash\\n$inserted_line/" ~/onos/tools/package/bin/onos-service
+ echo "##### Check onos-service setting..... #####"
+ cat ~/onos/tools/package/bin/onos-service
+ export JAVA_HOME=/usr/lib/jvm/java-8-oracle'''
+ }
+ return result
+}
+def oldFlowCheck( jobOn, onos_branch ){
+ result = ""
+ if( jobOn == "SCPF" && ( onos_branch== "master" || onos_branch=="onos-1.12" ) )
+ result = '''sed -i -e 's/@Component(immediate = true)/@Component(enabled = false)/g' ~/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/''' + ( isOldFlow ? "DistributedFlowRuleStore" : "ECFlowRuleStore" ) + '''.java
+ sed -i -e 's/@Component(enabled = false)/@Component(immediate = true)/g' ~/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/''' + ( isOldFlow ? "ECFlowRuleStore" : "DistributedFlowRuleStore" ) + ".java"
+ return result
+}
+def postSetup( onos_branch, test_branch, onos_tag, isManual ){
+ result = ""
+ if( !isManual ){
+ result = '''echo -e "\n##### build ONOS skip unit tests ######"
+ #mvn clean install -DskipTests
+ # Force buck update
+ rm -f ~/onos/bin/buck
+ ~/onos/tools/build/onos-buck build onos
+ sleep 30
+ echo -e "\n##### Stop all running instances of Karaf #####"
+ kill $(ps -efw | grep karaf | grep -v grep | awk '{print $2}')
+ sleep 30
+ git branch'''
+ }
+ return result
+}
+def returnCell( nodeName ){
+ node( "TestStation-" + nodeName + "s" ){
+ sh '''#!/bin/bash -l
+ set +e
+ . ~/.bashrc
+ env
+ ~/./return_cell.sh
+ '''
+ }
+}
+
+return this;
\ No newline at end of file
diff --git a/TestON/drivers/common/cli/emulator/lincoemininetdriver.py b/TestON/drivers/common/cli/emulator/lincoemininetdriver.py
index df1abd7..ba54a75 100644
--- a/TestON/drivers/common/cli/emulator/lincoemininetdriver.py
+++ b/TestON/drivers/common/cli/emulator/lincoemininetdriver.py
@@ -73,7 +73,7 @@
self.handle.sendline( cmd )
lincStart = self.handle.expect( [ "mininet>", pexpect.TIMEOUT ], timeout=120 )
if lincStart == 1:
- self.handle.sendline( "\x03" )
+ self.handle.send( "\x03" )
self.handle.sendline( "sudo mn -c" )
self.handle.sendline( cmd )
lincStart = self.handle.expect( [ "mininet>", pexpect.TIMEOUT ], timeout=120 )
diff --git a/TestON/drivers/common/cli/emulator/mininetclidriver.py b/TestON/drivers/common/cli/emulator/mininetclidriver.py
index 8e1108b..1da21b9 100644
--- a/TestON/drivers/common/cli/emulator/mininetclidriver.py
+++ b/TestON/drivers/common/cli/emulator/mininetclidriver.py
@@ -477,16 +477,15 @@
# One of the host to host pair is unreachable
isReachable = main.FALSE
failedPings += 1
+ main.log.warn( "Cannot ping between {} and {}".format( host, temp ) )
pingResponse += "\n"
- if not isReachable:
- main.log.warn( "Cannot ping between {} and {}".format( host, temp ) )
main.log.info( pingResponse + "Failed pings: " + str( failedPings ) )
return isReachable
except pexpect.TIMEOUT:
main.log.exception( self.name + ": TIMEOUT exception" )
response = self.handle.before
# NOTE: Send ctrl-c to make sure command is stopped
- self.handle.sendline( "\x03" )
+ self.handle.send( "\x03" )
self.handle.expect( "Interrupt" )
response += self.handle.before + self.handle.after
self.handle.expect( "mininet>" )
@@ -545,9 +544,8 @@
pingResponse += " X"
isReachable = main.FALSE
failedPingsTotal += 1
+ main.log.warn( "Cannot ping between {} and {}".format( host, temp ) )
pingResponse += "\n"
- if not isReachable:
- main.log.warn( "Cannot ping between {} and {}".format( host, temp ) )
main.log.info( pingResponse + "Failed pings: " + str( failedPingsTotal ) )
return isReachable
@@ -555,7 +553,7 @@
main.log.exception( self.name + ": TIMEOUT exception" )
response = self.handle.before
# NOTE: Send ctrl-c to make sure command is stopped
- self.handle.sendline( "\x03" )
+ self.handle.send( "\x03" )
self.handle.expect( "Interrupt" )
response += self.handle.before + self.handle.after
self.handle.expect( "mininet>" )
@@ -618,7 +616,7 @@
main.log.exception( self.name + ": TIMEOUT exception" )
response = self.handle.before
# NOTE: Send ctrl-c to make sure command is stopped
- self.handle.sendline( "\x03" )
+ self.handle.send( "\x03" )
self.handle.expect( "Interrupt" )
response += self.handle.before + self.handle.after
self.handle.expect( "mininet>" )
@@ -765,7 +763,7 @@
main.log.exception( self.name + ": TIMEOUT exception" )
response = self.handle.before
# NOTE: Send ctrl-c to make sure command is stopped
- self.handle.sendline( "\x03" )
+ self.handle.send( "\x03" )
self.handle.expect( "Interrupt" )
response += self.handle.before + self.handle.after
self.handle.expect( "mininet>" )
@@ -1472,7 +1470,7 @@
main.log.error( self.name + " response: " +
repr( self.handle.before ) )
# NOTE: Send ctrl-c to make sure iperf is done
- self.handle.sendline( "\x03" )
+ self.handle.send( "\x03" )
self.handle.expect( "Interrupt" )
self.handle.expect( "mininet>" )
return main.FALSE
@@ -1507,7 +1505,7 @@
except pexpect.TIMEOUT:
main.log.error( self.name + ": TIMEOUT exception found" )
main.log.error( self.name + " response: " + repr( self.handle.before ) )
- self.handle.sendline( "\x03" )
+ self.handle.send( "\x03" )
self.handle.expect( "Interrupt" )
self.handle.expect( "mininet>" )
return main.FALSE
diff --git a/TestON/drivers/common/cli/emulator/scapyclidriver.py b/TestON/drivers/common/cli/emulator/scapyclidriver.py
index 418aa9e..96b9c17 100644
--- a/TestON/drivers/common/cli/emulator/scapyclidriver.py
+++ b/TestON/drivers/common/cli/emulator/scapyclidriver.py
@@ -718,7 +718,7 @@
# TODO: add all params, or use kwargs
ifaceName = str( ifaceName ) if ifaceName else self.ifaceName
# Set interface
- self.handle.sendline( ' conf.iface = "' + ifaceName + '"' )
+ self.handle.sendline( 'conf.iface = "' + ifaceName + '"' )
self.handle.expect( self.scapyPrompt )
cmd = 'pkt = sniff(count = ' + str( sniffCount ) +\
', filter = "' + str( pktFilter ) + '")'
diff --git a/TestON/drivers/common/cli/hostdriver.py b/TestON/drivers/common/cli/hostdriver.py
new file mode 100644
index 0000000..238721e
--- /dev/null
+++ b/TestON/drivers/common/cli/hostdriver.py
@@ -0,0 +1,198 @@
+#!/usr/bin/env python
+"""
+Copyright 2018 Open Networking Foundation (ONF)
+
+Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+
+TestON is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 2 of the License, or
+( at your option ) any later version.
+
+TestON is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with TestON. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import pexpect
+import re
+import sys
+import types
+import os
+import time
+from math import pow
+from drivers.common.clidriver import CLI
+
+class HostDriver( CLI ):
+ """
+ This class is created as a standalone host driver.
+ """
+ def __init__( self ):
+ super( HostDriver, self ).__init__()
+ self.handle = self
+ self.name = None
+ self.shortName = None
+ self.home = None
+
+ def connect( self, **connectargs ):
+ """
+ Creates ssh handle for host.
+ NOTE:
+ The ip_address would come from the topo file using the host tag, the
+ value can be an environment variable as well as a "localhost" to get
+ the ip address needed to ssh to the "bench"
+ """
+ try:
+ for key in connectargs:
+ vars( self )[ key ] = connectargs[ key ]
+ self.name = self.options[ 'name' ]
+ self.shortName = self.options[ 'shortName' ]
+
+ try:
+ if os.getenv( str( self.ip_address ) ) is not None:
+ self.ip_address = os.getenv( str( self.ip_address ) )
+ else:
+ main.log.info( self.name +
+ ": Trying to connect to " +
+ self.ip_address )
+ except KeyError:
+ main.log.info( "Invalid host name," +
+ " connecting to local host instead" )
+ self.ip_address = 'localhost'
+ except Exception as inst:
+ main.log.error( "Uncaught exception: " + str( inst ) )
+
+ self.handle = super(
+ HostDriver,
+ self ).connect(
+ user_name=self.user_name,
+ ip_address=self.ip_address,
+ port=None,
+ pwd=self.pwd )
+
+ if self.handle:
+ main.log.info( "Connection successful to the " +
+ self.user_name +
+ "@" +
+ self.ip_address )
+ self.handle.sendline( "" )
+ self.handle.expect( self.prompt )
+ return main.TRUE
+ else:
+ main.log.error( "Connection failed to " +
+ self.user_name +
+ "@" +
+ self.ip_address )
+ return main.FALSE
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanAndExit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanAndExit()
+
+ def disconnect( self, **connectargs ):
+ """
+ Called when test is complete to disconnect the handle.
+ """
+ response = main.TRUE
+ try:
+ if self.handle:
+ # Disconnect from the host
+ self.handle.sendline( "" )
+ self.handle.expect( self.prompt )
+ self.handle.sendline( "exit" )
+ i = self.handle.expect( [ "closed", pexpect.TIMEOUT ], timeout=2 )
+ if i == 1:
+ main.log.error(
+ self.name +
+ ": timeout when waiting for response" )
+ main.log.error( "response: " + str( self.handle.before ) )
+ except TypeError:
+ main.log.exception( self.name + ": Object not as expected" )
+ response = main.FALSE
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ except ValueError:
+ main.log.exception( "Exception in disconnect of " + self.name )
+ response = main.TRUE
+ except Exception:
+ main.log.exception( self.name + ": Connection failed to the host" )
+ response = main.FALSE
+ return response
+
+ def ping( self, dst, ipv6=False, wait=3 ):
+ """
+ Description:
+ Ping from this host to another
+ Required:
+ dst: IP address of destination host
+ Optional:
+ ipv6: will use ping6 command if True; otherwise use ping command
+ wait: timeout for ping command
+ """
+ try:
+ command = "ping6" if ipv6 else "ping"
+ command += " -c 1 -i 1 -W " + str( wait ) + " " + str( dst )
+ main.log.info( self.name + ": Sending: " + command )
+ self.handle.sendline( command )
+ i = self.handle.expect( [ self.prompt, pexpect.TIMEOUT ],
+ timeout=wait + 1 )
+ if i == 1:
+ main.log.error(
+ self.name +
+ ": timeout when waiting for response" )
+ main.log.error( "response: " + str( self.handle.before ) )
+ self.handle.sendline( "" )
+ self.handle.expect( self.prompt )
+ response = self.handle.before
+ if re.search( ',\s0\%\spacket\sloss', response ):
+ main.log.info( self.name + ": no packets lost, host is reachable" )
+ return main.TRUE
+ else:
+ main.log.warn(
+ self.name +
+ ": PACKET LOST, HOST IS NOT REACHABLE" )
+ return main.FALSE
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanAndExit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanAndExit()
+
+ def ifconfig( self, wait=3 ):
+ """
+ Run ifconfig command on host and return output
+ """
+ try:
+ command = "ifconfig"
+ main.log.info( self.name + ": Sending: " + command )
+ self.handle.sendline( command )
+ i = self.handle.expect( [ self.prompt, pexpect.TIMEOUT ],
+ timeout=wait + 1 )
+ if i == 1:
+ main.log.error(
+ self.name +
+ ": timeout when waiting for response" )
+ main.log.error( "response: " + str( self.handle.before ) )
+ self.handle.sendline( "" )
+ self.handle.expect( self.prompt )
+ response = self.handle.before
+ return response
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanAndExit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanAndExit()
diff --git a/TestON/drivers/common/cli/networkdriver.py b/TestON/drivers/common/cli/networkdriver.py
index d1a7faf..38d7531 100755
--- a/TestON/drivers/common/cli/networkdriver.py
+++ b/TestON/drivers/common/cli/networkdriver.py
@@ -122,9 +122,9 @@
try:
for key, value in main.componentDictionary.items():
if hasattr( main, key ):
- if value[ 'type' ] in [ 'MininetSwitchDriver' ]:
+ if value[ 'type' ] in [ 'MininetSwitchDriver', 'OFDPASwitchDriver' ]:
self.switches[ key ] = getattr( main, key )
- elif value[ 'type' ] in [ 'MininetHostDriver' ]:
+ elif value[ 'type' ] in [ 'MininetHostDriver', 'HostDriver' ]:
self.hosts[ key ] = getattr( main, key )
return main.TRUE
except Exception:
@@ -138,7 +138,7 @@
hosts = {}
try:
for hostComponent in self.hosts.values():
- #TODO: return more host data
+ # TODO: return more host data
hosts[ hostComponent.options[ 'shortName' ] ] = {}
except Exception:
main.log.error( self.name + ": host component not as expected" )
diff --git a/TestON/drivers/common/cli/ofdpa/__init__.py b/TestON/drivers/common/cli/ofdpa/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/drivers/common/cli/ofdpa/__init__.py
diff --git a/TestON/drivers/common/cli/ofdpa/ofagent.conf.template b/TestON/drivers/common/cli/ofdpa/ofagent.conf.template
new file mode 100644
index 0000000..9dccbcf
--- /dev/null
+++ b/TestON/drivers/common/cli/ofdpa/ofagent.conf.template
@@ -0,0 +1,99 @@
+# Controller
+#-----------------------
+#CTRL1_IP="127.0.0.1"
+#CTRL1_PORT="6653"
+#CTRL2_IP="127.0.0.1"
+#CTRL2_PORT="6653"
+#CTRL3_IP="127.0.0.1"
+#CTRL3_PORT="6653"
+
+#LISTEN_IP="0.0.0.0"
+#LISTEN_PORT="6653"
+
+# Datapath ID
+#-----------------------
+#DPID=`cat /sys/class/net/ma1/address | sed 's/://g'`
+#DPID="1"
+
+# In-band management
+#-----------------------
+#IB_MGMT_VLAN="1"
+#IB_MGMT_PORT_ARG="-p 5"
+
+# Debug options
+#-----------------------
+#OPT_ARGS="-a2 -d4 -c1 -c2 -c3 -c4 -c5"
+
+# Maximu number of log files (valid: 0-10; 0:disble logging)
+MAX_LOG_NUM=0
+
+#----------------------------------------------------------------------------
+# OPT_ARGS:
+#----------------------------------------------------------------------------
+#
+# Controllers:
+# -i, --dpid=DATAPATHID The Datapath ID for this switch.
+# -l, --listen=IP[:PORT] A local IP address on which to listen for
+# controllers (may use this option multiple times)
+# -t, --controller=IP[:PORT] A Controller IP address (may use this option
+# multiple times)
+#
+# TLS:
+# --cacert=CACERTIFICATE The Certificate Authority certficate
+# --cert=CERTIFICATE The SSL public certificate file for the switch
+# --cipher=CIPHER The list of ciphers to use
+# --key=KEYFILE The SSL private key file for the switch
+#
+# Management VLAN:
+# -p, --port=MGMTPORT A port in the mgmt VLAN (may use this option
+# multiple times)
+# -v, --vlan=MGMTVLAN The VLAN to be reserved for management.
+#
+# Debugging:
+# -a, --agentdebuglvl=AGENTDEBUGLVL
+# The verbosity of OF Agent debug messages.
+# -c, --ofdpadebugcomp=OFPDACOMPONENT
+# The OF-DPA component for which debug messages are
+# enabled.
+# -d, --ofdpadebuglvl=OFDPADEBUGLVL
+# The verbosity of OF-DPA debug messages.
+#
+#
+# Note:
+# IPv6 address parameters are specified following RFC3986.
+# To include a port number, enclose the IPv6 address in square brackets:
+# Example: -t [2001:db8:1f70::999:de8:7648:6e8]:6653
+#
+# To use TLS when connecting to a controller, prefix the IP address with "tls:".
+# Example: -t tls:[2001:db8:1f70::999:de8:7648:6e8]:6653
+#
+# Note: it is necessary to have a private key and public certificate to use TLS.
+# If the CA certificate is not provided, then the switch does not validate
+# certificates. This can be helpful if self-signed certificates are being used.
+#
+# Default values:
+# No controllers connections
+# Note: may listen on mutiple IP addresses. E.g., IPv4 and IPv6.
+# OFAGENTDEBUGLVL = 0
+# Valid OF Agent debug levels are 0 - 2.
+# OFDPADEBUGLVL = 0
+# Valid OF-DPA debug levels are 0 - 4.
+# No components enabled for debug:
+# Valid OF-DPA components are:
+# 1 = API
+# 2 = Mapping
+# 3 = RPC
+# 4 = OFDB
+# 5 = Datapath
+# 6 = G8131
+# 7 = Y1731
+# 8 = sFlow
+# 9 = SDK
+# DATAPATHID = 0xda7a
+# No defaults for the management VLAN and port(s). The management VLAN feature
+# is disabled by default.
+# CIPHER = HIGH
+# CACERTIFICATE =
+# KEYFILE = /etc/ssl/private/switch.key
+# CERTIFICATE = /etc/ssl/certs/switch.crt
+#----------------------------------------------------------------------------
diff --git a/TestON/drivers/common/cli/ofdpa/ofdpaswitchdriver.py b/TestON/drivers/common/cli/ofdpa/ofdpaswitchdriver.py
new file mode 100644
index 0000000..b080c96
--- /dev/null
+++ b/TestON/drivers/common/cli/ofdpa/ofdpaswitchdriver.py
@@ -0,0 +1,327 @@
+#!/usr/bin/env python
+"""
+Copyright 2018 Open Networking Foundation (ONF)
+
+Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+
+TestON is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 2 of the License, or
+( at your option ) any later version.
+
+TestON is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with TestON. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import pexpect
+import re
+import json
+import types
+import time
+import os
+from drivers.common.clidriver import CLI
+from core import utilities
+from shutil import copyfile
+
+class OFDPASwitchDriver( CLI ):
+
+ def __init__( self ):
+ """
+ Initialize client
+ """
+ super( CLI, self ).__init__()
+ self.name = None
+ self.handle = None
+ self.prompt = "~#"
+ # Respect to bin folder
+ self.home = "../drivers/common/cli/ofdpa/"
+ # Local home for functions using scp
+ self.tempDirectory = "/tmp/"
+ self.conf = "ofagent.conf"
+ self.switchDirectory = "/etc/ofagent/"
+
+ def connect( self, **connectargs ):
+ """
+ Creates ssh handle for Accton cli.
+ """
+ try:
+ # Parse keys in xml object
+ for key in connectargs:
+ vars( self )[ key ] = connectargs[ key ]
+ # Get the name
+ self.name = self.options['name']
+ # Get the dpid
+ self.dpid = self.options[ 'dpid' ]
+ # Parse the IP address
+ try:
+ if os.getenv( str( self.ip_address ) ) is not None:
+ self.ip_address = os.getenv( str( self.ip_address ) )
+ # Otherwise is an ip address
+ else:
+ main.log.info( self.name + ": Trying to connect to " + self.ip_address )
+ # Error handling
+ except KeyError:
+ main.log.info( "Invalid host name," + " connecting to local host instead" )
+ self.ip_address = 'localhost'
+ except Exception as inst:
+ main.log.error( "Uncaught exception: " + str( inst ) )
+ # Build the handle using the above information
+ self.handle = super(OFDPASwitchDriver, self ).connect(
+ user_name=self.user_name,
+ ip_address=self.ip_address,
+ port=None,
+ pwd=self.pwd)
+ # Successful connection
+ if self.handle:
+ main.log.info( "Connection successful to the host " + self.user_name + "@" + self.ip_address )
+ self.handle.sendline( "" )
+ self.handle.expect( self.prompt )
+ return main.TRUE
+ # Connection failed
+ else:
+ main.log.error( "Connection failed to the host " + self.user_name + "@" + self.ip_address )
+ main.log.error( "Failed to connect to the OFDPA CLI" )
+ return main.FALSE
+ # Error handling
+ except TypeError:
+ main.log.exception( self.name + ": Object not as expected" )
+ return None
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanup()
+ main.exit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanup()
+ main.exit()
+
+ def disconnect( self ):
+ """
+ Called when Test is complete to disconnect the OFDPASwitchDriver handle.
+ """
+ response = main.TRUE
+ try:
+ if self.handle:
+ # Stop the ofagent
+ self.stopOfAgent()
+ # Disconnect from the device
+ self.handle.sendline( "" )
+ self.handle.expect( self.prompt )
+ self.handle.sendline( "exit" )
+ self.handle.expect( "closed" )
+ # Errors handling
+ except pexpect.TIMEOUT:
+ main.log.error( self.name + ": pexpect.TIMEOUT found" )
+ return main.FALSE
+ except TypeError:
+ main.log.exception( self.name + ": Object not as expected" )
+ response = main.FALSE
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ except ValueError:
+ main.log.exception( "Exception in disconnect of " + self.name )
+ response = main.TRUE
+ except Exception:
+ main.log.exception( self.name + ": Connection failed to the host" )
+ response = main.FALSE
+ return response
+
+ def assignSwController( self, ip, port="6653", ptcp=""):
+ """
+ Description:
+ The assignment is realized properly creating the agent.conf
+ for each switch and then pushing it into the device.
+ Required:
+ ip - Ip addresses of controllers. This can be a list or a string.
+ Optional:
+ port - controller port is ignored
+ ptcp - ptcp information is ignored
+ Return:
+ Returns main.TRUE if the switch is correctly assigned to controllers,
+ otherwise it will return main.FALSE or an appropriate exception(s)
+ """
+ assignResult = main.TRUE
+ # Initial arguments for OFDPA
+ opt_args = 'OPT_ARGS="-d 2 -c 2 -c 4 '
+ onosIp = ""
+ # Parses the controller option
+ try:
+ if isinstance( ip, types.StringType ):
+ onosIp = "-t " + str( ip )
+ elif isinstance( ip, types.ListType ):
+ for ipAddress in ip:
+ onosIp += "-t " + str( ipAddress ) + " "
+ else:
+ main.log.error( self.name + ": Invalid ip address" )
+ return main.FALSE
+ # Complete the arguments adding the dpid
+ opt_args += onosIp + '-i %s' % self.dpid + '"'
+ # Create a copy of the cfg file using the template
+ self.createCfg()
+ # Load the cfg file and adds the missing option
+ self.updateCfg( opt_args )
+ # Backup the cfg on the switch
+ self.backupCfg()
+ # Push the new cfg on the device
+ self.pushCfg()
+ # Start the ofagent on the device
+ self.startOfAgent()
+ # Enable all the ports
+ assignResult = utilities.retry(
+ self.enablePorts,
+ main.FALSE,
+ kwargs={},
+ attempts=5,
+ sleep=10)
+ # Done return true
+ return assignResult
+ # Errors handling
+ except pexpect.TIMEOUT:
+ main.log.error( self.name + ": pexpect.TIMEOUT found" )
+ return main.FALSE
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanAndExit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanAndExit()
+
+ def createCfg( self ):
+ """
+ Create in bench context a new config file starting from the template
+ """
+ copyfile(self.home + self.conf + ".template", self.tempDirectory + self.conf)
+
+ def updateCfg( self, opt_args):
+ """
+ Add the arguments related to the current switch (self)
+ """
+ with open(self.tempDirectory + self.conf, "a") as cfg:
+ cfg.write(opt_args + "\n")
+ cfg.close()
+
+ def backupCfg( self ):
+ """
+ Create a backup file of the old configuration on the switch
+ """
+ self.handle.sendline( "" )
+ self.handle.expect( self.prompt )
+ self.handle.sendline( "cp %s%s %s%s.backup" % (self.switchDirectory, self.conf, self.switchDirectory, self.conf) )
+ self.handle.expect( self.prompt )
+
+ def pushCfg( self ):
+ """
+ Push the new configuration from the network bench
+ """
+ # We use os.system to send the command from TestON cluster
+ # to the switches. This means that passwordless access is
+ # necessary in order to push the configuration file
+ os.system( "scp " + self.tempDirectory + self.conf + " " +
+ self.user_name + "@" + self.ip_address + ":" + self.switchDirectory)
+
+ def startOfAgent( self ):
+ """
+ Start the ofagent on the device
+ """
+ self.handle.sendline( "" )
+ self.handle.expect( self.prompt )
+ self.handle.sendline( "service ofagentd start" )
+ self.handle.expect( self.prompt )
+
+ def stopOfAgent( self ):
+ """
+ Stop the ofagent on the device
+ """
+ self.handle.sendline( "" )
+ self.handle.expect( self.prompt )
+ self.handle.sendline( "service ofagentd stop" )
+ self.handle.expect( self.prompt )
+
+ def dumpFlows( self ):
+ """
+ Dump the flows from the devices
+ FIXME need changes in the workflow in order to be used
+ """
+ try:
+ self.handle.sendline( "" )
+ self.handle.expect( self.prompt )
+ # Create the dump of the flows locally on the switches
+ self.handle.sendline( "client_flowtable_dump" )
+ self.handle.expect( self.prompt )
+ response = self.handle.before
+ # Write back in the tmp folder - needs to be changed in future
+ with open(self.tempDirectory + "flows_%s.txt" % self.dpid, "w") as flows:
+ flows.write(response + "\n")
+ flows.close()
+ # Done return for further processing
+ return response
+ # Errors handling
+ except pexpect.TIMEOUT:
+ main.log.error( self.name + ": pexpect.TIMEOUT found" )
+ return main.FALSE
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanAndExit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanAndExit()
+
+ def dumpGroups( self ):
+ """
+ Dump the groups from the devices
+ FIXME need changes in the workflow in order to be used
+ """
+ try:
+ self.handle.sendline( "" )
+ self.handle.expect( self.prompt )
+ self.handle.sendline( "client_grouptable_dump > groups.txt" )
+ self.handle.expect( self.prompt )
+ response = self.handle.before
+ # Write back in the tmp folder - needs to be changed in future
+ with open(self.tempDirectory + "groups_%s.txt" % self.dpid, "w") as groups:
+ groups.write(response + "\n")
+ groups.close()
+ # Done return for further processing
+ return response
+ # Errors handling
+ except pexpect.TIMEOUT:
+ main.log.error( self.name + ": pexpect.TIMEOUT found" )
+ return main.FALSE
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanAndExit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanAndExit()
+
+ def enablePorts( self ):
+ """
+ Enable all the ports on the devices
+ It needs to wait for the boot
+ """
+ self.handle.sendline( "" )
+ self.handle.expect( self.prompt )
+ self.handle.sendline( "client_port_table_dump" )
+ self.handle.expect( self.prompt )
+ response = self.handle.before
+ if "Error from ofdpaClientInitialize()" in response:
+ main.log.warn(
+ self.name +
+ ": Not yet started" )
+ return main.FALSE
+ main.log.info( self.name + ": started" )
+ self.handle.sendline( "sh portspeed.sh" )
+ self.handle.expect( self.prompt )
+ return main.TRUE
diff --git a/TestON/drivers/common/cli/onosclidriver.py b/TestON/drivers/common/cli/onosclidriver.py
index 52ef98a..b7d10bd 100755
--- a/TestON/drivers/common/cli/onosclidriver.py
+++ b/TestON/drivers/common/cli/onosclidriver.py
@@ -3035,7 +3035,7 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- def checkStatus( self, numoswitch, numolink, numoctrl = -1, logLevel="info" ):
+ def checkStatus( self, numoswitch, numolink = -1, numoctrl = -1, logLevel="info" ):
"""
Checks the number of switches & links that ONOS sees against the
supplied values. By default this will report to main.log, but the
@@ -3071,7 +3071,7 @@
return main.ERROR
switchCheck = ( int( devices ) == int( numoswitch ) )
# Is the number of links is what we expected
- linkCheck = ( int( links ) == int( numolink ) )
+ linkCheck = ( int( links ) == int( numolink ) ) or int( numolink ) == -1
nodeCheck = ( int( nodes ) == int( numoctrl ) ) or int( numoctrl ) == -1
if switchCheck and linkCheck and nodeCheck:
# We expected the correct numbers
@@ -3085,8 +3085,9 @@
result = main.FALSE
output = output + "\n ONOS sees %i devices" % int( devices )
output = output + " (%i expected) " % int( numoswitch )
- output = output + "and %i links " % int( links )
- output = output + "(%i expected)" % int( numolink )
+ if int( numolink ) > 0:
+ output = output + "and %i links " % int( links )
+ output = output + "(%i expected)" % int( numolink )
if int( numoctrl ) > 0:
output = output + "and %i controllers " % int( nodes )
output = output + "(%i expected)" % int( numoctrl )
diff --git a/TestON/drivers/common/cli/onosdriver.py b/TestON/drivers/common/cli/onosdriver.py
index 6058cbd..8324677 100755
--- a/TestON/drivers/common/cli/onosdriver.py
+++ b/TestON/drivers/common/cli/onosdriver.py
@@ -795,7 +795,7 @@
tempCount = tempCount + 1
cellFile.write( "export OCI=$OC1\n" )
- cellFile.write( mnString + "\"" + mnIpAddrs + "\"\n" )
+ cellFile.write( mnString + "\"" + str(mnIpAddrs) + "\"\n" )
cellFile.write( appString + "\n" )
cellFile.write( onosGroup + "\n" )
cellFile.write( onosUser + "\n" )
diff --git a/TestON/tests/FUNC/FUNCbgpls/FUNCbgpls.py b/TestON/tests/FUNC/FUNCbgpls/FUNCbgpls.py
index dd2d2cc..8275033 100644
--- a/TestON/tests/FUNC/FUNCbgpls/FUNCbgpls.py
+++ b/TestON/tests/FUNC/FUNCbgpls/FUNCbgpls.py
@@ -187,7 +187,7 @@
main.log.info( "Kill Scapy process" )
bgplsConfig.Comments()
- main.Scapy1.handle.sendline( "\x03" )
+ main.Scapy1.handle.send( "\x03" )
time.sleep( 90 ) # This Sleep time gives time for the socket to close.
def CASE3( self, main ):
@@ -282,7 +282,7 @@
bgplsConfig.Comments()
main.log.info( "Kill Scapy process" )
bgplsConfig.Comments()
- main.Scapy1.handle.sendline( "\x03" )
+ main.Scapy1.handle.send( "\x03" )
time.sleep( 90 ) # This Sleep time gives time for the socket to close.
def CASE4( self, main ):
@@ -352,7 +352,7 @@
bgplsConfig.Comments()
main.log.info( "Kill Scapy process" )
bgplsConfig.Comments()
- main.Scapy1.handle.sendline( "\x03" )
+ main.Scapy1.handle.send( "\x03" )
time.sleep( 90 )
def CASE5( self, main ):
@@ -423,7 +423,7 @@
bgplsConfig.Comments()
main.log.info( "Kill Scapy process" )
bgplsConfig.Comments()
- main.Scapy1.handle.sendline( "\x03" )
+ main.Scapy1.handle.send( "\x03" )
time.sleep( 90 )
def CASE6( self, main ):
diff --git a/TestON/tests/FUNC/FUNCovsdbtest/FUNCovsdbtest.py b/TestON/tests/FUNC/FUNCovsdbtest/FUNCovsdbtest.py
index fd5c9a7..4570863 100644
--- a/TestON/tests/FUNC/FUNCovsdbtest/FUNCovsdbtest.py
+++ b/TestON/tests/FUNC/FUNCovsdbtest/FUNCovsdbtest.py
@@ -466,7 +466,7 @@
port2postdata = port2.DictoJson()
main.step( "Post Network Data via HTTP(Post port need post network)" )
- Poststatus, result = main.Cluster.active( 0 ).REST.send( ctrlip, httpport, '', path + 'networks/',
+ Poststatus, result = main.Cluster.active( 0 ).REST.send( 'networks/', ctrlip, httpport, path,
'POST', None, networkpostdata )
utilities.assert_equals(
expect='200',
@@ -475,7 +475,7 @@
onfail="Post Network Failed " + str( Poststatus ) + "," + str( result ) )
main.step( "Post Subnet Data via HTTP(Post port need post subnet)" )
- Poststatus, result = main.Cluster.active( 0 ).REST.send( ctrlip, httpport, '', path + 'subnets/',
+ Poststatus, result = main.Cluster.active( 0 ).REST.send( 'subnets/', ctrlip, httpport, path,
'POST', None, subnetpostdata )
utilities.assert_equals(
expect='202',
@@ -484,7 +484,7 @@
onfail="Post Subnet Failed " + str( Poststatus ) + "," + str( result ) )
main.step( "Post Port1 Data via HTTP" )
- Poststatus, result = main.Cluster.active( 0 ).REST.send( ctrlip, httpport, '', path + 'ports/',
+ Poststatus, result = main.Cluster.active( 0 ).REST.send( 'ports/', ctrlip, httpport, path,
'POST', None, port1postdata )
utilities.assert_equals(
expect='200',
@@ -493,7 +493,7 @@
onfail="Post Port Failed " + str( Poststatus ) + "," + str( result ) )
main.step( "Post Port2 Data via HTTP" )
- Poststatus, result = main.Cluster.active( 0 ).REST.send( ctrlip, httpport, '', path + 'ports/',
+ Poststatus, result = main.Cluster.active( 0 ).REST.send( 'ports/', ctrlip, httpport, path,
'POST', None, port2postdata )
utilities.assert_equals(
expect='200',
diff --git a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params
index 9ca0543..748844e 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRBridging/SRBridging.params
@@ -3,7 +3,7 @@
<testcases>5,6,7,8,15,16,17,18,25,26,27,28,35,36,37,38,45,46,47,48,55,56,57,58,65,66,67,68,75,76,77,78</testcases>
<GRAPH>
- <nodeCluster>VM</nodeCluster>
+ <nodeCluster>Fabric</nodeCluster>
<builds>20</builds>
</GRAPH>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.params b/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.params
index 9a0a8a1..778370e 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.params
@@ -3,7 +3,7 @@
<testcases>1,2,3</testcases>
<GRAPH>
- <nodeCluster>BM</nodeCluster>
+ <nodeCluster>Fabric</nodeCluster>
<builds>20</builds>
</GRAPH>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/SRDhcprelay.params b/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/SRDhcprelay.params
index 29b381d..0fba048 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/SRDhcprelay.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRDhcprelay/SRDhcprelay.params
@@ -2,7 +2,7 @@
<testcases>1,2,11,12,21,22,31,41,51,61,71</testcases>
<GRAPH>
- <nodeCluster>VM</nodeCluster>
+ <nodeCluster>Fabric</nodeCluster>
<builds>20</builds>
</GRAPH>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.params b/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.params
index 541c836..8beeccd 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.params
@@ -3,7 +3,7 @@
<testcases>1,2,3,4,5,6</testcases>
<GRAPH>
- <nodeCluster>BM</nodeCluster>
+ <nodeCluster>Fabric</nodeCluster>
<builds>20</builds>
</GRAPH>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDynamicConf/SRDynamicConf.params b/TestON/tests/USECASE/SegmentRouting/SRDynamicConf/SRDynamicConf.params
index 69062b3..551b649 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRDynamicConf/SRDynamicConf.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRDynamicConf/SRDynamicConf.params
@@ -2,7 +2,7 @@
<testcases>range(13, 251, 10)</testcases>
<GRAPH>
- <nodeCluster>VM</nodeCluster>
+ <nodeCluster>Fabric</nodeCluster>
<builds>20</builds>
</GRAPH>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.params b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.params
index 236a21c..0870eab 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.params
@@ -3,7 +3,7 @@
<testcases>1,2,3,4,5,6,7,8</testcases>
<GRAPH>
- <nodeCluster>BM</nodeCluster>
+ <nodeCluster>Fabric</nodeCluster>
<builds>20</builds>
</GRAPH>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params
index 098f9da..8e0bbbf 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params
@@ -3,7 +3,7 @@
<testcases>1,2,4,5</testcases>
<GRAPH>
- <nodeCluster>BM</nodeCluster>
+ <nodeCluster>Fabric</nodeCluster>
<builds>20</builds>
</GRAPH>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/README.md b/TestON/tests/USECASE/SegmentRouting/SRMulticast/README.md
new file mode 100644
index 0000000..2fd9f95
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/README.md
@@ -0,0 +1 @@
+TBD
\ No newline at end of file
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/SRMulticast.params b/TestON/tests/USECASE/SegmentRouting/SRMulticast/SRMulticast.params
new file mode 100644
index 0000000..0189744
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/SRMulticast.params
@@ -0,0 +1,43 @@
+<PARAMS>
+ <testcases>1</testcases>
+
+ <GRAPH>
+ <nodeCluster>Fabric</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
+ <SCALE>
+ <size>1</size>
+ <max>1</max>
+ </SCALE>
+
+ <DEPENDENCY>
+ <useCommonConf>False</useCommonConf>
+ <useCommonTopo>True</useCommonTopo>
+ <topology>trellis_fabric.py</topology>
+ <lib>routinglib.py,trellislib.py</lib>
+ </DEPENDENCY>
+
+ <ENV>
+ <cellName>productionCell</cellName>
+ <cellApps>drivers,segmentrouting,openflow,fpm,netcfghostprovider</cellApps>
+ </ENV>
+
+ <GIT>
+ <pull>False</pull>
+ <branch>master</branch>
+ </GIT>
+
+ <CTRL>
+ <port>6653</port>
+ </CTRL>
+
+ <timers>
+ <LinkDiscovery>12</LinkDiscovery>
+ <SwitchDiscovery>12</SwitchDiscovery>
+ </timers>
+
+ <SLEEP>
+ <startup>10</startup>
+ </SLEEP>
+</PARAMS>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/SRMulticast.py b/TestON/tests/USECASE/SegmentRouting/SRMulticast/SRMulticast.py
new file mode 100644
index 0000000..616ba4c
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/SRMulticast.py
@@ -0,0 +1,198 @@
+class SRMulticast:
+ def __init__( self ):
+ self.default = ''
+
+ def CASE1( self, main ):
+ """
+ Sets up 3 ONOS instances
+ Start 2x2 topology of hardware switches
+ """
+ try:
+ from tests.USECASE.SegmentRouting.SRMulticast.dependencies.SRMulticastTest import SRMulticastTest
+ except ImportError:
+ main.log.error( "SRMulticastTest not found. Exiting the test" )
+ main.cleanAndExit()
+ try:
+ main.funcs
+ except ( NameError, AttributeError ):
+ main.funcs = SRMulticastTest()
+ main.funcs.runTest( main,
+ test_idx=1,
+ topology='2x2',
+ onosNodes=1,
+ description="TBD" )
+
+ def CASE01( self, main ):
+ """
+ Sets up 3 ONOS instances, start 2x5 topology
+ Create a Multicast flow between a source and sink on the same dual-tor leaf
+ Verify flows and groups
+ Verify traffic
+ Remove sink
+ Verify flows and groups
+ """
+ pass
+
+ def CASE02( self, main ):
+ """
+ Sets up 3 ONOS instances, start 2x5 topology
+ Create a Multicast flow between a source and sink on different dual-tor leaves
+ Verify flows and groups
+ Verify traffic
+ Remove sink
+ Verify flows and groups
+ """
+ pass
+
+ def CASE03( self, main ):
+ """
+ Sets up 3 ONOS instances, start 2x5 topology
+ Create a Multicast flow between a source and sink on different leaves (sink on single-tor)
+ Verify flows and groups
+ Verify traffic
+ Remove sink
+ Verify flows and groups
+ """
+ pass
+
+ def CASE04( self, main ):
+ """
+ Sets up 3 ONOS instances, start 2x5 topology
+ Combines CASE01 and CASE02
+ Verify flows and groups
+ Verify traffic
+ Remove sinks
+ Verify flows and groups
+ """
+ pass
+
+ def CASE05( self, main ):
+ """
+ Sets up 3 ONOS instances, start 2x5 topology
+ Combines CASE02 and CASE03
+ Verify flows and groups
+ Verify traffic
+ Remove sinks
+ Verify flows and groups
+ """
+ pass
+
+ def CASE06( self, main ):
+ """
+ Sets up 3 ONOS instances, start 2x5 topology
+ Combines CASE01 and CASE03
+ Verify flows and groups
+ Verify traffic
+ Remove sinks
+ Verify flows and groups
+ """
+ pass
+
+ def CASE07( self, main ):
+ """
+ Sets up 3 ONOS instances, start 2x5 topology
+ Combines CASE01, CASE02 and CASE03
+ Verify flows and groups
+ Verify traffic
+ Remove sinks
+ Verify flows and groups
+ """
+ pass
+
+ def CASE08( self, main ):
+ """
+ Sets up 3 ONOS instances, start 2x5 topology
+ Combines CASE07 with route removal
+ Verify flows and groups
+ """
+ pass
+
+ def CASE101( self, main ):
+ """
+ Sets up 3 ONOS instances, start 2x5 topology
+ Combines CASE07 with a link failure (link ingress-spine)
+ Verify flows and groups
+ Verify traffic
+ """
+ pass
+
+ def CASE102( self, main ):
+ """
+ Sets up 3 ONOS instances, start 2x5 topology
+ Combines CASE07 with a link failure (link spine-egress-dt-leaf)
+ Verify flows and groups
+ Verify traffic
+ """
+ pass
+
+ def CASE103( self, main ):
+ """
+ Sets up 3 ONOS instances, start 2x5 topology
+ Combines CASE07 with a link failure (link spine-egress-st-leaf)
+ Verify flows and groups
+ Verify traffic
+ """
+ pass
+
+ def CASE201( self, main ):
+ """
+ Sets up 3 ONOS instances, start 2x5 topology
+ Combines CASE07 with spine failure
+ Verify flows and groups
+ Verify traffic
+ """
+ pass
+
+ def CASE202( self, main ):
+ """
+ Sets up 3 ONOS instances, start 2x5 topology
+ Combines CASE07 with ingress failure and recovery
+ Verify flows and groups are removed (failure)
+ Verify flows and groups (recovery)
+ Verify traffic (recovery)
+ """
+ pass
+
+ def CASE203( self, main ):
+ """
+ Sets up 3 ONOS instances, start 2x5 topology
+ Combines CASE07 with egress-dt-leaf failure and recovery
+ Verify flows and groups are removed for the failing sink (failure)
+ Verify traffic on remaining sinks (failure)
+ Verify flows and groups (recovery)
+ Verify traffic (recovery)
+ """
+ pass
+
+ def CASE204( self, main ):
+ """
+ Sets up 3 ONOS instances, start 2x5 topology
+ Combines CASE07 with egress-st-leaf failure and recovery
+ Verify flows and groups are removed for the failing sink (failure)
+ Verify traffic on remaining sinks (failure)
+ Verify flows and groups (recovery)
+ Verify traffic (recovery)
+ """
+ pass
+
+ def CASE205( self, main ):
+ """
+ Sets up 3 ONOS instances, start 2x5 topology
+ Combines CASE07 with egress leaves failure and recovery
+ Verify flows and groups are removed for the failing sinks (failure)
+ Verify traffic on remaining sink (failure)
+ Verify flows and groups (recovery)
+ Verify traffic (recovery)
+ """
+ pass
+
+ def CASE301( self, main ):
+ """
+ Sets up 3 ONOS instances, start 2x5 topology
+ Combines CASE07 with ONOS failure and recovery
+ Verify flows and groups (failure)
+ Verify traffic (failure)
+ Verify flows and groups (recovery)
+ Verify traffic (recovery)
+ """
+ pass
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/SRMulticast.topo b/TestON/tests/USECASE/SegmentRouting/SRMulticast/SRMulticast.topo
new file mode 100644
index 0000000..34a2013
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/SRMulticast.topo
@@ -0,0 +1,166 @@
+<TOPOLOGY>
+ <COMPONENT>
+ <ONOScell>
+ <host>localhost</host> # ONOS "bench" machine
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosClusterDriver</type>
+ <connect_order>1</connect_order>
+ <COMPONENTS>
+ <cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
+ <diff_clihost></diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+ <karaf_username></karaf_username>
+ <karaf_password></karaf_password>
+ <web_user></web_user>
+ <web_pass></web_pass>
+ <rest_port></rest_port>
+ <prompt></prompt> # TODO: we technically need a few of these, one per component
+ <onos_home></onos_home> # defines where onos home is
+ <nodes>1</nodes> # number of nodes in the cluster
+ </COMPONENTS>
+ </ONOScell>
+
+ <OFDPASwitchLeaf205>
+ <host>10.128.0.205</host>
+ <user>root</user>
+ <password>onl</password>
+ <type>OFDPASwitchDriver</type>
+ <connect_order>2</connect_order>
+ <COMPONENTS>
+ <shortName>leaf205</shortName>
+ <dpid>0x205</dpid>
+ <port1>49</port1>
+ <link1>OFDPASwitchSpine227</link1>
+ <port2>51</port2>
+ <link2>OFDPASwitchSpine228</link2>
+ <port3>33</port3>
+ <link3>Host1</link3>
+ <port4>44</port4>
+ <link4>Host2</link4>
+ </COMPONENTS>
+ </OFDPASwitchLeaf205>
+
+ <OFDPASwitchLeaf206>
+ <host>10.128.0.206</host>
+ <user>root</user>
+ <password>onl</password>
+ <type>OFDPASwitchDriver</type>
+ <connect_order>3</connect_order>
+ <COMPONENTS>
+ <shortName>leaf206</shortName>
+ <dpid>0x206</dpid>
+ <port1>49</port1>
+ <link1>OFDPASwitchSpine227</link1>
+ <port2>51</port2>
+ <link2>OFDPASwitchSpine228</link2>
+ <port3>33</port3>
+ <link3>Host3</link3>
+ <port4>44</port4>
+ <link4>Host4</link4>
+ </COMPONENTS>
+ </OFDPASwitchLeaf206>
+
+ <OFDPASwitchSpine227>
+ <host>10.128.0.227</host>
+ <user>root</user>
+ <password>onl</password>
+ <type>OFDPASwitchDriver</type>
+ <connect_order>4</connect_order>
+ <COMPONENTS>
+ <shortName>spine227</shortName>
+ <dpid>0x227</dpid>
+ <port1>25</port1>
+ <link1>OFDPASwitchLeaf205</link1>
+ <port2>27</port2>
+ <link2>OFDPASwitchLeaf206</link2>
+ </COMPONENTS>
+ </OFDPASwitchSpine227>
+
+ <OFDPASwitchSpine228>
+ <host>10.128.0.228</host>
+ <user>root</user>
+ <password>onl</password>
+ <type>OFDPASwitchDriver</type>
+ <connect_order>5</connect_order>
+ <COMPONENTS>
+ <shortName>spine228</shortName>
+ <dpid>0x228</dpid>
+ <port1>25</port1>
+ <link1>OFDPASwitchLeaf205</link1>
+ <port2>27</port2>
+ <link2>OFDPASwitchLeaf206</link2>
+ </COMPONENTS>
+ </OFDPASwitchSpine228>
+
+ <Host1>
+ <host>10.128.100.58</host>
+ <user>mininet</user>
+ <password>mininet</password>
+ <type>HostDriver</type>
+ <connect_order>6</connect_order>
+ <COMPONENTS>
+ <ip>10.0.10.10</ip>
+ <ip6></ip6>
+ <shortName>h1</shortName>
+ <port1>0</port1>
+ <link1>OFDPASwitchLeaf205</link1>
+ </COMPONENTS>
+ </Host1>
+
+ <Host2>
+ <host>10.128.100.59</host>
+ <user>mininet</user>
+ <password>mininet</password>
+ <type>HostDriver</type>
+ <connect_order>7</connect_order>
+ <COMPONENTS>
+ <ip>10.0.10.20</ip>
+ <ip6></ip6>
+ <shortName>h2</shortName>
+ <port1>0</port1>
+ <link1>OFDPASwitchLeaf205</link1>
+ </COMPONENTS>
+ </Host2>
+
+ <Host3>
+ <host>10.128.100.60</host>
+ <user>mininet</user>
+ <password>mininet</password>
+ <type>HostDriver</type>
+ <connect_order>8</connect_order>
+ <COMPONENTS>
+ <ip>10.0.20.10</ip>
+ <ip6></ip6>
+ <shortName>h3</shortName>
+ <port1>0</port1>
+ <link1>OFDPASwitchLeaf206</link1>
+ </COMPONENTS>
+ </Host3>
+
+ <Host4>
+ <host>10.128.100.61</host>
+ <user>mininet</user>
+ <password>mininet</password>
+ <type>HostDriver</type>
+ <connect_order>9</connect_order>
+ <COMPONENTS>
+ <ip>10.0.20.20</ip>
+ <ip6></ip6>
+ <shortName>h4</shortName>
+ <port1>0</port1>
+ <link1>OFDPASwitchLeaf206</link1>
+ </COMPONENTS>
+ </Host4>
+
+ <NetworkBench>
+ <host>localhost</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>NetworkDriver</type>
+ <connect_order>10</connect_order>
+ <COMPONENTS>
+ </COMPONENTS>
+ </NetworkBench>
+
+ </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/__init__.py b/TestON/tests/USECASE/SegmentRouting/SRMulticast/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/__init__.py
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/SRMulticastTest.py b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/SRMulticastTest.py
new file mode 100644
index 0000000..e822c29
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/SRMulticastTest.py
@@ -0,0 +1,61 @@
+"""
+Copyright 2018 Open Networking Foundation ( ONF )
+
+Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+
+ TestON is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 2 of the License, or
+ ( at your option ) any later version.
+
+ TestON is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TestON. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from tests.USECASE.SegmentRouting.dependencies.Testcaselib import Testcaselib as run
+
+class SRMulticastTest ():
+
+ def __init__( self ):
+ self.default = ''
+ self.topo = dict()
+ # (number of spine switch, number of leaf switch, dual-homed, description, minFlowCount - leaf)
+ self.topo[ '2x2' ] = ( 2, 2, False, '2x2 leaf-spine topology', 1 )
+ self.switchNames = {}
+ self.switchNames[ '2x2' ] = [ "leaf205", "leaf206", "spine227", "spine228" ]
+
+ def runTest( self, main, test_idx, topology, onosNodes, description, vlan = [] ):
+ skipPackage = False
+ init = False
+ if not hasattr( main, 'apps' ):
+ init = True
+ run.initTest( main )
+ # Skip onos packaging if the cluster size stays the same
+ if not init and onosNodes == main.Cluster.numCtrls:
+ skipPackage = True
+
+ main.case( '%s, with %s and %d ONOS instance%s' %
+ ( description, self.topo[ topology ][ 3 ], onosNodes, 's' if onosNodes > 1 else '' ) )
+
+ main.cfgName = 'CASE%01d%01d' % ( test_idx / 10, ( ( test_idx - 1 ) % 10 ) % 4 + 1 )
+ main.Cluster.setRunningNode( onosNodes )
+ run.installOnos( main, skipPackage=skipPackage, cliSleep=5 )
+ if hasattr( main, 'Mininet1' ):
+ # TODO Mininet implementation
+ pass
+ else:
+ # Run the test with physical devices
+ run.connectToPhysicalNetwork( main, self.switchNames[ topology ] )
+ # Check if the devices are up
+ run.checkDevices( main, switches=len(self.switchNames[ topology ]))
+ # Check the flows against the devices
+ run.checkFlows( main, minFlowCount=self.topo[ topology ][ 4 ] * self.topo[ topology ][ 1 ], sleep=5 )
+ # Clean up the environment
+ run.cleanup( main, physical=(not hasattr( main, 'Mininet1' )))
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/__init__.py b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/__init__.py
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/bgpdbgp1.conf b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/bgpdbgp1.conf
new file mode 100644
index 0000000..8870fb4
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/bgpdbgp1.conf
@@ -0,0 +1,81 @@
+log file /var/log/quagga/bgpdbgp1.log
+hostname bgp1
+password quagga
+!
+! Different next hop for IPv4
+!
+ip prefix-list 1 seq 10 permit 10.0.2.0/24
+ip prefix-list 1 seq 20 permit 10.1.2.0/24
+ip prefix-list 1 seq 30 permit 10.0.3.0/24
+ip prefix-list 1 seq 40 permit 10.0.4.0/24
+!
+route-map NEXTHOP41 permit 10
+match ip address prefix-list 1
+set ip next-hop 10.0.1.254
+!
+!
+route-map NEXTHOP47 permit 10
+match ip address prefix-list 1
+set ip next-hop 10.0.7.254
+!
+! Different next hop for IPv6
+!
+ipv6 prefix-list 2 seq 10 permit 2000::200/120
+ipv6 prefix-list 2 seq 20 permit 2000::300/120
+!
+route-map NEXTHOP61 permit 10
+match ipv6 address prefix-list 2
+set ipv6 next-hop global 2000::1ff
+set ipv6 next-hop local 2000::1ff
+!
+!
+route-map NEXTHOP67 permit 10
+match ipv6 address prefix-list 2
+set ipv6 next-hop global 2000::7ff
+set ipv6 next-hop local 2000::7ff
+!
+! Basic router config
+!
+router bgp 65003
+bgp router-id 172.16.0.3
+timers bgp 3 9
+!
+! IPv4
+!
+neighbor 10.0.1.1 remote-as 65001
+neighbor 10.0.1.1 ebgp-multihop
+neighbor 10.0.1.1 timers connect 5
+neighbor 10.0.1.1 advertisement-interval 5
+neighbor 10.0.1.1 route-map NEXTHOP41 out
+!
+neighbor 2000::101 remote-as 65001
+neighbor 2000::101 timers connect 5
+neighbor 2000::101 advertisement-interval 1
+no neighbor 2000::101 activate
+!
+neighbor 10.0.7.1 remote-as 65002
+neighbor 10.0.7.1 ebgp-multihop
+neighbor 10.0.7.1 timers connect 5
+neighbor 10.0.7.1 advertisement-interval 5
+neighbor 10.0.7.1 route-map NEXTHOP47 out
+!
+neighbor 2000::701 remote-as 65002
+neighbor 2000::701 timers connect 5
+neighbor 2000::701 advertisement-interval 1
+no neighbor 2000::701 activate
+!
+network 10.0.2.0/24
+network 10.1.2.0/24
+network 10.0.3.0/24
+network 10.0.4.0/24
+!
+! IPv6
+!
+address-family ipv6
+network 2000::200/120
+network 2000::300/120
+neighbor 2000::101 activate
+neighbor 2000::101 route-map NEXTHOP61 out
+neighbor 2000::701 activate
+neighbor 2000::701 route-map NEXTHOP67 out
+exit-address-family
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/bgpdbgp2.conf b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/bgpdbgp2.conf
new file mode 100644
index 0000000..e554de4
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/bgpdbgp2.conf
@@ -0,0 +1,81 @@
+log file /var/log/quagga/bgpdbgp2.log
+hostname bgp2
+password quagga
+!
+! Different next hop for IPv4
+!
+ip prefix-list 1 seq 10 permit 10.0.2.0/24
+ip prefix-list 1 seq 20 permit 10.1.2.0/24
+ip prefix-list 1 seq 30 permit 10.0.3.0/24
+ip prefix-list 1 seq 40 permit 10.0.4.0/24
+!
+route-map NEXTHOP45 permit 10
+match ip address prefix-list 1
+set ip next-hop 10.0.5.254
+!
+!
+route-map NEXTHOP46 permit 10
+match ip address prefix-list 1
+set ip next-hop 10.0.6.254
+!
+! Different next hop for IPv6
+!
+ipv6 prefix-list 2 seq 10 permit 2000::200/120
+ipv6 prefix-list 2 seq 20 permit 2000::300/120
+!
+route-map NEXTHOP65 permit 10
+match ipv6 address prefix-list 2
+set ipv6 next-hop global 2000::5ff
+set ipv6 next-hop local 2000::5ff
+!
+!
+route-map NEXTHOP66 permit 10
+match ipv6 address prefix-list 2
+set ipv6 next-hop global 2000::6ff
+set ipv6 next-hop local 2000::6ff
+!
+! Basic router config
+!
+router bgp 65003
+bgp router-id 172.16.0.4
+timers bgp 3 9
+!
+! IPv4
+!
+neighbor 10.0.5.1 remote-as 65001
+neighbor 10.0.5.1 ebgp-multihop
+neighbor 10.0.5.1 timers connect 5
+neighbor 10.0.5.1 advertisement-interval 5
+neighbor 10.0.5.1 route-map NEXTHOP45 out
+!
+neighbor 2000::501 remote-as 65001
+neighbor 2000::501 timers connect 5
+neighbor 2000::501 advertisement-interval 1
+no neighbor 2000::501 activate
+!
+neighbor 10.0.6.1 remote-as 65002
+neighbor 10.0.6.1 ebgp-multihop
+neighbor 10.0.6.1 timers connect 5
+neighbor 10.0.6.1 advertisement-interval 5
+neighbor 10.0.6.1 route-map NEXTHOP46 out
+!
+neighbor 2000::601 remote-as 65002
+neighbor 2000::601 timers connect 5
+neighbor 2000::601 advertisement-interval 1
+no neighbor 2000::601 activate
+!
+network 10.0.2.0/24
+network 10.1.2.0/24
+network 10.0.3.0/24
+network 10.0.4.0/24
+!
+! IPv6
+!
+address-family ipv6
+network 2000::200/120
+network 2000::300/120
+neighbor 2000::501 activate
+neighbor 2000::501 route-map NEXTHOP65 out
+neighbor 2000::601 activate
+neighbor 2000::601 route-map NEXTHOP66 out
+exit-address-family
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/bgpdr1.conf b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/bgpdr1.conf
new file mode 100644
index 0000000..9e526b8
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/bgpdr1.conf
@@ -0,0 +1,42 @@
+log file /var/log/quagga/bgpdr1.log
+hostname r1
+password quagga
+!
+! Basic router config
+!
+router bgp 65001
+bgp router-id 10.0.1.1
+timers bgp 3 9
+!
+! IPv4
+!
+neighbor 10.0.1.2 remote-as 65003
+neighbor 10.0.1.2 ebgp-multihop
+neighbor 10.0.1.2 timers connect 5
+neighbor 10.0.1.2 advertisement-interval 5
+!
+neighbor 2000::102 remote-as 65003
+neighbor 2000::102 timers connect 5
+neighbor 2000::102 advertisement-interval 1
+no neighbor 2000::102 activate
+!
+neighbor 10.0.5.2 remote-as 65003
+neighbor 10.0.5.2 ebgp-multihop
+neighbor 10.0.5.2 timers connect 5
+neighbor 10.0.5.2 advertisement-interval 5
+!
+neighbor 2000::502 remote-as 65003
+neighbor 2000::502 timers connect 5
+neighbor 2000::502 advertisement-interval 1
+no neighbor 2000::502 activate
+!
+network 10.0.99.0/24
+!
+! IPv6
+!
+address-family ipv6
+network 2000::7700/120
+network 2000::9900/120
+neighbor 2000::102 activate
+neighbor 2000::502 activate
+exit-address-family
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/bgpdr2.conf b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/bgpdr2.conf
new file mode 100644
index 0000000..49553e2
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/bgpdr2.conf
@@ -0,0 +1,42 @@
+log file /var/log/quagga/bgpdr2.log
+hostname r2
+password quagga
+!
+! Basic router config
+!
+router bgp 65002
+bgp router-id 10.0.6.1
+timers bgp 3 9
+!
+! IPv4
+!
+neighbor 10.0.6.2 remote-as 65003
+neighbor 10.0.6.2 ebgp-multihop
+neighbor 10.0.6.2 timers connect 5
+neighbor 10.0.6.2 advertisement-interval 5
+!
+neighbor 2000::602 remote-as 65003
+neighbor 2000::602 timers connect 5
+neighbor 2000::602 advertisement-interval 1
+no neighbor 2000::602 activate
+!
+neighbor 10.0.7.2 remote-as 65003
+neighbor 10.0.7.2 ebgp-multihop
+neighbor 10.0.7.2 timers connect 5
+neighbor 10.0.7.2 advertisement-interval 5
+!
+neighbor 2000::702 remote-as 65003
+neighbor 2000::702 timers connect 5
+neighbor 2000::702 advertisement-interval 1
+no neighbor 2000::702 activate
+!
+network 10.0.99.0/24
+!
+! IPv6
+!
+address-family ipv6
+network 2000::8800/120
+network 2000::9900/120
+neighbor 2000::602 activate
+neighbor 2000::702 activate
+exit-address-family
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/dhcpd.conf b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/dhcpd.conf
new file mode 100644
index 0000000..aa559d2
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/dhcpd.conf
@@ -0,0 +1,55 @@
+ddns-update-style none;
+
+default-lease-time 600;
+max-lease-time 7200;
+
+option domain-name-servers 8.8.8.8, 8.8.4.4;
+option domain-name "trellis.local";
+
+subnet 10.0.2.0 netmask 255.255.255.0 {
+ range 10.0.2.100 10.0.2.240;
+ option routers 10.0.2.254;
+}
+
+subnet 10.1.2.0 netmask 255.255.255.0 {
+ range 10.1.2.100 10.1.2.240;
+ option routers 10.1.2.254;
+}
+
+subnet 10.0.3.0 netmask 255.255.255.0 {
+ range 10.0.3.100 10.0.3.240;
+ option routers 10.0.3.254;
+}
+
+subnet 10.0.4.0 netmask 255.255.255.0 {
+ range 10.0.4.100 10.0.4.240;
+ option routers 10.0.4.254;
+}
+
+subnet 10.0.99.3 netmask 255.255.255.255 {
+}
+
+host h1 {
+ hardware ethernet 00:aa:00:00:00:01;
+ fixed-address 10.0.2.1;
+}
+
+host h2 {
+ hardware ethernet 00:aa:00:00:00:02;
+ fixed-address 10.0.2.2;
+}
+
+host h3 {
+ hardware ethernet 00:aa:00:00:00:03;
+ fixed-address 10.0.3.1;
+}
+
+host h4 {
+ hardware ethernet 00:aa:00:00:00:04;
+ fixed-address 10.0.3.2;
+}
+
+host dh1 {
+ hardware ethernet 00:cc:00:00:00:01;
+ fixed-address 10.1.2.1;
+}
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/dhcpd6.conf b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/dhcpd6.conf
new file mode 100644
index 0000000..526de85
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/dhcpd6.conf
@@ -0,0 +1,37 @@
+default-lease-time 600;
+max-lease-time 7200;
+
+option dhcp6.next-hop code 242 = ip6-address;
+
+subnet6 2000::200/120 {
+ range6 2000::260 2000::2fe;
+ option dhcp6.next-hop 2000::02ff;
+}
+
+subnet6 2000::300/120 {
+ range6 2000::360 2000::3fe;
+ option dhcp6.next-hop 2000::03ff;
+}
+
+subnet6 2000::9903/128 {
+}
+
+host h1v6 {
+ hardware ethernet 00:bb:00:00:00:01;
+ fixed-address6 2000::201;
+}
+
+host h2v6 {
+ hardware ethernet 00:bb:00:00:00:02;
+ fixed-address6 2000::202;
+}
+
+host h3v6 {
+ hardware ethernet 00:bb:00:00:00:03;
+ fixed-address6 2000::301;
+}
+
+host h4v6 {
+ hardware ethernet 00:bb:00:00:00:04;
+ fixed-address6 2000::302;
+}
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/zebradbgp1.conf b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/zebradbgp1.conf
new file mode 100644
index 0000000..51991a4
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/zebradbgp1.conf
@@ -0,0 +1,9 @@
+log file /var/log/quagga/zebradbgp1.log
+hostname zebra-bgp1
+password quagga
+!
+! Default route via virtual management switch
+!
+ip route 0.0.0.0/0 172.16.0.1
+!
+fpm connection ip 192.168.56.11 port 2620
diff --git a/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/zebradbgp2.conf b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/zebradbgp2.conf
new file mode 100644
index 0000000..dce218d
--- /dev/null
+++ b/TestON/tests/USECASE/SegmentRouting/SRMulticast/dependencies/conf/zebradbgp2.conf
@@ -0,0 +1,9 @@
+log file /var/log/quagga/zebradbgp2.log
+hostname zebra-bgp2
+password quagga
+!
+! Default route via virtual management switch
+!
+ip route 0.0.0.0/0 172.16.0.1
+!
+fpm connection ip 192.168.56.11 port 2620
diff --git a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.params b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.params
index 9a0a8a1..778370e 100755
--- a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.params
+++ b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.params
@@ -3,7 +3,7 @@
<testcases>1,2,3</testcases>
<GRAPH>
- <nodeCluster>BM</nodeCluster>
+ <nodeCluster>Fabric</nodeCluster>
<builds>20</builds>
</GRAPH>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.params b/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.params
index b5feb3a..39448c2 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/SRRouting.params
@@ -2,7 +2,7 @@
<testcases>1,2,3,4,5,6,7,8,9</testcases>
<GRAPH>
- <nodeCluster>VM</nodeCluster>
+ <nodeCluster>Fabric</nodeCluster>
<builds>20</builds>
</GRAPH>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py
index f6dbf03..45e8c1b 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/SRRoutingTest.py
@@ -46,7 +46,7 @@
if not init and onosNodes == main.Cluster.numCtrls:
skipPackage = True
- main.case( '%s, ONOS instance%s' % ( description, onosNodes ) )
+ main.case( '%s, ONOS cluster size: %s' % ( description, onosNodes ) )
main.cfgName = 'COMCAST_CONFIG_ipv4=%d_ipv6=%d_dhcp=%d_routers=%d' % \
( ipv4, ipv6, dhcp, routers )
@@ -103,9 +103,11 @@
# Test switch failures
if switchFailure:
for switch, expected in main.switchFailureChart.items():
+ main.step( "Killing switch {}" % switch )
run.killSwitch( main, switch, expected['switches_after_failure'], expected['links_after_failure'] )
SRRoutingTest.runChecks( main, test_idx, countFlowsGroups )
+ main.step( "Restoring switch {}" % switch )
run.recoverSwitch( main, switch, expected['switches_before_failure'], expected['links_before_failure'] )
SRRoutingTest.runChecks( main, test_idx, countFlowsGroups )
@@ -117,9 +119,11 @@
linksBefore = info['links_before']
linksAfter = info['links_after']
+ main.step( "Killing links {}" % linksToRemove )
run.killLinkBatch( main, linksToRemove, linksAfter )
SRRoutingTest.runChecks( main, test_idx, countFlowsGroups )
+ main.step( "Restoring links {}" % linksToRemove )
run.restoreLinkBatch( main, linksToRemove, linksBefore )
SRRoutingTest.runChecks( main, test_idx, countFlowsGroups )
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/conf/zebradbgp1.conf b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/conf/zebradbgp1.conf
index b30eaa1..d3dac23 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/conf/zebradbgp1.conf
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/conf/zebradbgp1.conf
@@ -6,4 +6,4 @@
!
ip route 0.0.0.0/0 172.16.0.1
!
-fpm connection ip 10.192.19.231 port 2620
+fpm connection ip 10.192.19.41 port 2620
diff --git a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/conf/zebradbgp2.conf b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/conf/zebradbgp2.conf
index fe27a14..147bcbc 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/conf/zebradbgp2.conf
+++ b/TestON/tests/USECASE/SegmentRouting/SRRouting/dependencies/conf/zebradbgp2.conf
@@ -6,4 +6,4 @@
!
ip route 0.0.0.0/0 172.16.0.1
!
-fpm connection ip 10.192.19.231 port 2620
+fpm connection ip 10.192.19.41 port 2620
diff --git a/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.params b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.params
index 098f9da..8e0bbbf 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.params
@@ -3,7 +3,7 @@
<testcases>1,2,4,5</testcases>
<GRAPH>
- <nodeCluster>BM</nodeCluster>
+ <nodeCluster>Fabric</nodeCluster>
<builds>20</builds>
</GRAPH>
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py b/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
index 5455847..45d166e 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
@@ -316,6 +316,21 @@
tag + "_GroupsBefore" )
@staticmethod
+ def checkDevices( main, switches, tag="", sleep=10 ):
+ main.step(
+ "Check whether the switches count is equal to %s" % switches )
+ if tag == "":
+ tag = 'CASE%d' % main.CurrentTestCaseNumber
+ result = utilities.retry( main.Cluster.active( 0 ).CLI.checkStatus,
+ main.FALSE,
+ kwargs={ 'numoswitch': switches},
+ attempts=10,
+ sleep=sleep )
+ utilities.assert_equals( expect=main.TRUE, actual=result,
+ onpass="Device up successful",
+ onfail="Failed to boot up devices?" )
+
+ @staticmethod
def checkFlowsByDpid( main, dpid, minFlowCount, sleep=10 ):
main.step(
" Check whether the flow count of device %s is bigger than %s" % ( dpid, minFlowCount ) )
@@ -613,7 +628,7 @@
onfail="Failed to recover switch?" )
@staticmethod
- def cleanup( main ):
+ def cleanup( main, physical=False):
"""
Stop Onos-cluster.
Stops Mininet
@@ -629,7 +644,8 @@
except ( NameError, AttributeError ):
main.utils = Utils()
- main.utils.mininetCleanup( main.Mininet1 )
+ if not physical:
+ main.utils.mininetCleanup( main.Mininet1 )
main.utils.copyKarafLog( "CASE%d" % main.CurrentTestCaseNumber, before=True, includeCaseDesc=False )
diff --git a/TestON/tests/dependencies/Network.py b/TestON/tests/dependencies/Network.py
index 6377c0c..e5c2200 100644
--- a/TestON/tests/dependencies/Network.py
+++ b/TestON/tests/dependencies/Network.py
@@ -38,7 +38,7 @@
We will look into each of the network component handles to try
to find the attreibute.
"""
- #FIXME: allow to call a specific driver
+ # FIXME: allow to call a specific driver
for component in self.components:
if hasattr( component, name ):
main.log.debug( "%s has attribute '%s'" % ( component.options[ 'name' ], name ) )
@@ -53,6 +53,6 @@
# Get a list of network components that are created in the test
self.components = []
for key, value in main.componentDictionary.items():
- if value[ 'type' ] in [ 'MininetCliDriver', 'RemoteMininetDriver', 'NetworkDriver' ] and hasattr( main, key ):
+ if value[ 'type' ] in [ 'MininetCliDriver', 'RemoteMininetDriver', 'NetworkDriver', 'OFDPASwitchDriver' ] and hasattr( main, key ):
self.components.append( getattr( main, key ) )
main.log.debug( "%s initialized with components: %s" % ( self.name, self.components ) )