Merge "Add tests on SegmentRouting - SRBridging (+ fix some bugs)"
diff --git a/TestON/JenkinsFile/FUNCJenkinsFile b/TestON/JenkinsFile/FUNCJenkinsFile
index de05cee..857ff54 100644
--- a/TestON/JenkinsFile/FUNCJenkinsFile
+++ b/TestON/JenkinsFile/FUNCJenkinsFile
@@ -15,7 +15,8 @@
"FUNCnetconf" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCnetconf", wiki_file:"FUNCnetconfWiki.txt"],
"FUNCgroup" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCgroup", wiki_file:"FUNCgroupWiki.txt"],
"FUNCintent" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCintent", wiki_file:"FUNCintentWiki.txt"],
-"FUNCintentRest" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCintentRest", wiki_file:"FUNCintentRestWiki.txt"]
+"FUNCintentRest" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCintentRest", wiki_file:"FUNCintentRestWiki.txt"],
+"FUNCformCluster" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCformCluster", wiki_file:"FUNCformClusterWiki.txt"]
]
table_name = "executed_test_tests"
result_name = "executed_test_results"
@@ -40,15 +41,18 @@
for ( test in tests.keySet() ){
tests[test].call()
}
-if( prop["manualRun"] == "false" ){
- def end = new Date()
- TimeDuration duration = TimeCategory.minus( end, now )
- slackSend( color:"#5816EE", message: "FUNC tests ended at: " + end.toString() + "\nTime took : " + duration )
+try{
+ if( prop["manualRun"] == "false" ){
+ def end = new Date()
+ TimeDuration duration = TimeCategory.minus( end, now )
+ slackSend( color:"#5816EE", message: "FUNC tests ended at: " + end.toString() + "\nTime took : " + duration )
+ }
}
+catch(all){}
// The testName should be the key from the FUNC
def FUNCTest( testName, toBeRun, prop ) {
return {
- try{
+ catchError{
stage(testName) {
if ( toBeRun ){
workSpace = "/var/jenkins/workspace/"+testName
@@ -115,7 +119,7 @@
done
ls -al
cd '''
- if( prop["manualRun"] == "false" ){
+ if( prop["manualRun"] == "false" || prop["postResult"] == "true" ){
// Post Results
withCredentials([
string(credentialsId: 'db_pass', variable: 'pass'),
@@ -160,7 +164,7 @@
}
}
- if( prop["manualRun"] == "false" ){
+ if( prop["manualRun"] == "false" || prop["postResult"] == "true" ){
def post = build job: "Pipeline_postjob_VM", propagate: false,
parameters: [
string(name: 'Wiki_Contents', value: fileContents),
@@ -168,25 +172,21 @@
]
}
node("TestStation-VMs"){
- sh '''#!/bin/bash
-
- if [ -e ''' + workSpace + "/" + testName + "Result.txt ] && grep -q \"1\" " + workSpace + "/" + testName + "Result.txt" + '''
- then
- echo ''' + testName + " : All passed." + '''
- else
- echo ''' + testName + " : not all passed." + '''
- DoingThisToSetTheResultFalse
- fi'''
+ resultContents = readFile workSpace + "/" + testName + "Result.txt"
+ resultContents = resultContents.split("\n")
+ if( resultContents[ 0 ] == "1" ){
+ print "All passed"
+ }else{
+ print "Failed"
+ if( prop["manualRun"] == "false" )
+ slackSend(color:"FF0000", message: "[" + prop["ONOSBranch"] + "]" + testName + " : Failed!\n"
+ + resultContents[ 1 ] + "\n"
+ + "https://onos-jenkins.onlab.us/blue/organizations/jenkins/${env.JOB_NAME}/detail/${env.JOB_NAME}/${env.BUILD_NUMBER}/pipeline" )
+ Failed
+ }
}
}
}
- }catch (all) {
- catchError{
- if( prop["manualRun"] == "false" )
- slackSend(color:"FF0000", message: "[" + prop["ONOSBranch"] + "]" + testName + " : Failed!\n"
- + "https://onos-jenkins.onlab.us/blue/organizations/jenkins/${env.JOB_NAME}/detail/${env.JOB_NAME}/${env.BUILD_NUMBER}/pipeline" )
- Failed
- }
}
}
}
\ No newline at end of file
diff --git a/TestON/JenkinsFile/HAJenkinsFile b/TestON/JenkinsFile/HAJenkinsFile
index 685ffd6..8493f3c 100644
--- a/TestON/JenkinsFile/HAJenkinsFile
+++ b/TestON/JenkinsFile/HAJenkinsFile
@@ -41,15 +41,19 @@
for ( test in tests.keySet() ){
tests[test].call()
}
-if( prop["manualRun"] == "false" ){
- def end = new Date()
- TimeDuration duration = TimeCategory.minus( end, now )
- slackSend( color:"#5816EE", message: "HA tests ended at: " + end.toString() + "\nTime took : " + duration )
+try{
+ if( prop["manualRun"] == "false" ){
+ def end = new Date()
+ TimeDuration duration = TimeCategory.minus( end, now )
+ slackSend( color:"#5816EE", message: "HA tests ended at: " + end.toString() + "\nTime took : " + duration )
+ }
}
+catch(all){}
+
// The testName should be the key from the FUNC
def HATest( testName, toBeRun, prop ) {
return {
- try{
+ catchError{
stage(testName) {
if ( toBeRun ){
workSpace = "/var/jenkins/workspace/"+testName
@@ -169,25 +173,21 @@
]
}
node("TestStation-VMs"){
- sh '''#!/bin/bash
-
- if [ -e ''' + workSpace + "/" + testName + "Result.txt ] && grep -q \"1\" " + workSpace + "/" + testName + "Result.txt" + '''
- then
- echo ''' + testName + " : All passed." + '''
- else
- echo ''' + testName + " : not all passed." + '''
- DoingThisToSetTheResultFalse
- fi'''
+ resultContents = readFile workSpace + "/" + testName + "Result.txt"
+ resultContents = resultContents.split("\n")
+ if( resultContents[ 0 ] == "1" ){
+ print "All passed"
+ }else{
+ print "Failed"
+ if( prop["manualRun"] == "false" )
+ slackSend(color:"FF0000", message: "[" + prop["ONOSBranch"] + "]" + testName + " : Failed!\n"
+ + resultContents[ 1 ] + "\n"
+ + "https://onos-jenkins.onlab.us/blue/organizations/jenkins/${env.JOB_NAME}/detail/${env.JOB_NAME}/${env.BUILD_NUMBER}/pipeline" )
+ Failed
+ }
}
}
}
- }catch (all) {
- catchError{
- if( prop["manualRun"] == "false" )
- slackSend(color:"FF0000", message: "[" + prop["ONOSBranch"] + "]" + testName + " : Failed!\n"
- + "https://onos-jenkins.onlab.us/blue/organizations/jenkins/${env.JOB_NAME}/detail/${env.JOB_NAME}/${env.BUILD_NUMBER}/pipeline" )
- Failed
- }
}
}
}
\ No newline at end of file
diff --git a/TestON/JenkinsFile/JenkinsfileTrigger b/TestON/JenkinsFile/JenkinsfileTrigger
index fd856eb..6b1da7b 100644
--- a/TestON/JenkinsFile/JenkinsfileTrigger
+++ b/TestON/JenkinsFile/JenkinsfileTrigger
@@ -1,18 +1,21 @@
#!groovy
// This is a Jenkinsfile for a scripted pipeline for the SCPF tests
// Define sets of tests
+previous_version = "1.11"
+before_previous_version = "1.10"
AllTheTests=
[
"FUNC":[
- "FUNCipv6Intent" : ["basic":true, "extra_A":false, "extra_B":false, "day":""],
- "FUNCoptical" : ["basic":true, "extra_A":false, "extra_B":false, "day":""],
- "FUNCflow" : ["basic":true, "extra_A":false, "extra_B":false, "day":""],
- "FUNCnetCfg": ["basic":true, "extra_A":false, "extra_B":false, "day":""],
- "FUNCovsdbtest" : ["basic":true, "extra_A":false, "extra_B":false, "day":""],
- "FUNCnetconf" : ["basic":true, "extra_A":false, "extra_B":false, "day":""],
- "FUNCgroup" : ["basic":true, "extra_A":false, "extra_B":false, "day":""],
- "FUNCintent" : ["basic":false, "extra_A":true, "extra_B":false, "day":""],
- "FUNCintentRest" : ["basic":false, "extra_A":false, "extra_B":true, "day":""]
+ "FUNCipv6Intent" : ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
+ "FUNCoptical" : ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
+ "FUNCflow" : ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
+ "FUNCnetCfg": ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
+ "FUNCovsdbtest" : ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
+ "FUNCnetconf" : ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
+ "FUNCgroup" : ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
+ "FUNCformCluster" :["basic":false, "extra_A":false, "extra_B":false, "new_Test":true, "day":""],
+ "FUNCintent" : ["basic":false, "extra_A":true, "extra_B":false, "new_Test":false, "day":""],
+ "FUNCintentRest" : ["basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":""],
],
"HA":[
"HAsanity" : ["basic":true, "extra_A":false, "extra_B":false, "day":""],
@@ -32,7 +35,7 @@
"SCPFintentEventTp": ["basic":true, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
"SCPFhostLat": ["basic":false, "extra_A":true, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
// batch will be on extra_A after fixing from the ONOS side.
- "SCPFbatchFlowResp": ["basic":false, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
+ "SCPFbatchFlowResp": ["basic":false, "extra_A":true, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
"SCPFintentRerouteLat": ["basic":false, "extra_A":true, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
"SCPFintentInstallWithdrawLat": ["basic":false, "extra_A":true, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
"SCPFflowTp1gWithFlowObj": ["basic":false, "extra_A":false, "extra_B":true, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
@@ -79,7 +82,9 @@
]
onos_branch = "master"
+test_branch = ""
onos_tag = params.ONOSTag
+isOldFlow = false
// Set tests based on day of week
def now = new Date()
echo(now.toString())
@@ -106,7 +111,7 @@
if ( manually_run ){
organize_tests( params.Tests )
onos_branch = params.ONOSVersion
- test_branch = params.TestONBranch
+ isOldFlow = params.isOldFlow
println "Tests to be run manually : "
}else{
testcases["SCPF"]["tests"] = SCPF_choices
@@ -116,6 +121,20 @@
println "Defaulting to " + day + " tests:"
}
print_tests( testcases )
+def runTest = [
+ "VM" : [:],
+ "BM" : [:]
+]
+for( String test in testcases.keySet() ){
+ println test
+ if (testcases[test]["tests"] != ""){
+ runTest[testcases[test]["nodeName"]][test] = trigger_pipeline( onos_branch, testcases[test]["tests"], testcases[test]["nodeName"], test, manually_run, onos_tag )
+ }
+}
+def finalList = [:]
+finalList["VM"] = runTestSeq( runTest["VM"] )
+finalList["BM"] = runTestSeq( runTest["BM"] )
+parallel finalList
def testDivider( today ){
switch (today) {
@@ -137,22 +156,25 @@
day = "Wednesday"
break
case Calendar.THURSDAY:
- thursday( true, true)
+ thursday( true, true )
day = "Thursday"
+ isOldFlow = true
break
case Calendar.FRIDAY:
- friday( true, true)
+ friday( true, true )
day = "Friday"
+ isOldFlow = true
break
case Calendar.SATURDAY:
saturday()
- onos_branch= "1.11"
+ onos_branch= previous_version
day = "Saturday"
break
case Calendar.SUNDAY:
sunday()
- onos_branch= "1.10"
+ onos_branch= before_previous_version
day = "Sunday"
+ isOldFlow = true
break
}
}
@@ -176,6 +198,7 @@
}
def monday( getResult ){
FUNC_choices += adder( "FUNC", "basic", true, "M", getResult )
+ FUNC_choices += adder( "FUNC", "new_Test", true, "M", getResult )
FUNC_choices += adder( "FUNC", "extra_A", true, "M", getResult )
HA_choices += adder( "HA", "basic", true, "M", getResult )
HA_choices += adder( "HA", "extra_A", true, "M", getResult )
@@ -184,6 +207,7 @@
}
def tuesday( getDay, getResult ){
FUNC_choices += adder( "FUNC", "basic", getDay, "T", getResult )
+ FUNC_choices += adder( "FUNC", "new_Test", getDay, "T", getResult )
FUNC_choices += adder( "FUNC", "extra_B", getDay, "T", getResult )
HA_choices += adder( "HA", "basic", getDay, "T", getResult )
HA_choices += adder( "HA", "extra_B", getDay, "T", getResult )
@@ -195,6 +219,7 @@
}
def wednesday( getDay, getResult ){
FUNC_choices += adder( "FUNC", "basic", getDay, "W", getResult )
+ FUNC_choices += adder( "FUNC", "new_Test", getDay, "W", getResult )
FUNC_choices += adder( "FUNC", "extra_A", getDay, "W", getResult )
HA_choices += adder( "HA", "basic", getDay, "W", getResult )
HA_choices += adder( "HA", "extra_A", getDay, "W", getResult )
@@ -204,6 +229,7 @@
}
def thursday( getDay, getResult ){
FUNC_choices += adder( "FUNC", "basic", getDay, "Th", getResult )
+ FUNC_choices += adder( "FUNC", "new_Test", getDay, "Th", getResult )
FUNC_choices += adder( "FUNC", "extra_B", getDay, "Th", getResult )
HA_choices += adder( "HA", "basic", getDay, "Th", getResult )
HA_choices += adder( "HA", "extra_B", getDay, "Th", getResult )
@@ -212,6 +238,7 @@
}
def friday( getDay, getResult ){
FUNC_choices += adder( "FUNC", "basic", getDay, "F", getResult )
+ FUNC_choices += adder( "FUNC", "new_Test", getDay, "F", getResult )
FUNC_choices += adder( "FUNC", "extra_A", getDay, "F", getResult )
HA_choices += adder( "HA", "basic", getDay, "F", getResult )
HA_choices += adder( "HA", "extra_A", getDay, "F", getResult )
@@ -260,21 +287,6 @@
def dayAdder( testCat, testName, dayOfWeek ){
AllTheTests[ testCat ][ testName ][ "day" ] += dayOfWeek + ","
}
-def runTest = [
- "VM" : [:],
- "BM" : [:]
-]
-for( String test in testcases.keySet() ){
- println test
- if (testcases[test]["tests"] != ""){
- runTest[testcases[test]["nodeName"]][test] = trigger_pipeline( onos_branch, testcases[test]["tests"], testcases[test]["nodeName"], test, manually_run, onos_tag )
- }
-}
-def finalList = [:]
-finalList["VM"] = runTestSeq( runTest["VM"] )
-finalList["BM"] = runTestSeq( runTest["BM"] )
-parallel finalList
-
def runTestSeq( testList ){
return{
for ( test in testList.keySet() ){
@@ -306,16 +318,14 @@
onos_branch = "onos-" + branch
}
wiki = branch
- if ( !manuallyRun )
- test_branch = onos_branch
- if (onos_branch == "onos-1.11")
+ test_branch = onos_branch
+ if (onos_branch == previous_version)
test_branch = "master"
println jobOn + "_Pipeline_" + manuallyRun ? "manually" : branch
node("TestStation-" + nodeName + "s"){
- if (!manuallyRun)
- envSetup(onos_branch, test_branch, onosTag)
+ envSetup(onos_branch, test_branch, onosTag, jobOn, manuallyRun )
- exportEnvProperty( onos_branch, test_branch, wiki, tests, post_result, manuallyRun, onosTag )
+ exportEnvProperty( onos_branch, test_branch, wiki, tests, post_result, manuallyRun, onosTag, isOldFlow )
}
jobToRun = jobOn + "_Pipeline_" + ( manuallyRun ? "manually" : branch )
@@ -324,7 +334,7 @@
}
// export Environment properties.
-def exportEnvProperty( onos_branch, test_branch, wiki, tests, postResult, manually_run, onosTag ){
+def exportEnvProperty( onos_branch, test_branch, wiki, tests, postResult, manually_run, onosTag, isOldFlow ){
stage("export Property"){
sh '''
echo "ONOSBranch=''' + onos_branch +'''" > /var/jenkins/TestONOS.property
@@ -335,6 +345,8 @@
echo "Tests=''' + tests +'''" >> /var/jenkins/TestONOS.property
echo "postResult=''' + postResult +'''" >> /var/jenkins/TestONOS.property
echo "manualRun=''' + manually_run +'''" >> /var/jenkins/TestONOS.property
+ echo "isOldFlow=''' + isOldFlow +'''" >> /var/jenkins/TestONOS.property
+
'''
}
}
@@ -342,14 +354,29 @@
slackSend(color:color, message: message)
}
// Initialize the environment Setup for the onos and OnosSystemTest
-def envSetup( onos_branch, test_branch, onos_tag ){
+def envSetup( onos_branch, test_branch, onos_tag, jobOn, manuallyRun ){
stage("envSetup") {
sh '''#!/bin/bash -l
set +e
. ~/.bashrc
env
-
- echo -e "\n##### Set TestON Branch #####"
+ ''' + preSetup( onos_branch, test_branch, onos_tag, manuallyRun ) + '''
+ ''' + oldFlowCheck( jobOn, onos_branch ) + '''
+ ''' + postSetup( onos_branch, test_branch, onos_tag, manuallyRun )
+ }
+}
+def tagCheck(onos_tag, onos_branch){
+ result = "git checkout "
+ if (onos_tag == "" )
+ result += onos_branch //create new local branch
+ else
+ result += onos_tag //checkout the tag
+ return result
+}
+def preSetup( onos_branch, test_branch, onos_tag, isManual ){
+ result = ""
+ if( !isManual ){
+ result = '''echo -e "\n##### Set TestON Branch #####"
echo "TestON Branch is set on: ''' + test_branch + '''"
cd ~/OnosSystemTest/
@@ -395,9 +422,21 @@
echo "##### Check onos-service setting..... #####"
cat ~/onos/tools/package/bin/onos-service
- export JAVA_HOME=/usr/lib/jvm/java-8-oracle
-
- echo -e "\n##### build ONOS skip unit tests ######"
+ export JAVA_HOME=/usr/lib/jvm/java-8-oracle'''
+ }
+ return result
+}
+def oldFlowCheck( jobOn, onos_branch ){
+ result = ""
+ if( isOldFlow && jobOn == "SCPF" && onos_branch== "master" )
+ result = '''sed -i -e 's/@Component(immediate = true)/@Component(enabled = false)/g' ~/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/DistributedFlowRuleStore.java
+ sed -i -e 's/@Component(enabled = false)/@Component(immediate = true)/g' ~/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/ECFlowRuleStore.java'''
+ return result
+}
+def postSetup( onos_branch, test_branch, onos_tag, isManual ){
+ result = ""
+ if( !isManual ){
+ result = '''echo -e "\n##### build ONOS skip unit tests ######"
#mvn clean install -DskipTests
# Force buck update
rm -f ~/onos/bin/buck
@@ -410,12 +449,5 @@
git branch'''
}
-}
-def tagCheck(onos_tag, onos_branch){
- result = "git checkout "
- if (onos_tag == "" )
- result += onos_branch //create new local branch
- else
- result += onos_tag //checkout the tag
return result
}
\ No newline at end of file
diff --git a/TestON/JenkinsFile/SCPFJenkinsFile b/TestON/JenkinsFile/SCPFJenkinsFile
index d9593c4..1ca7ab2 100644
--- a/TestON/JenkinsFile/SCPFJenkinsFile
+++ b/TestON/JenkinsFile/SCPFJenkinsFile
@@ -10,23 +10,23 @@
neighbors = ['y', 'n']
times = [ 'y', 'n' ]
SCPF = [
- SCPFcbench: [ test:'SCPFcbench', table:'cbench_bm_tests', results:'cbench_bm_results', file:'CbenchDB', rFile:'SCPFcbench.R', extra:none, finalResult:1, graphTitle:['Cbench Test'], dbCols:'avg', dbWhere:'', y_axis:'Throughput (Responses/sec)'],
- SCPFhostLat: [ test:'SCPFhostLat', table:'host_latency_tests', results:'host_latency_results', file:'HostAddLatency', rFile:'SCPFhostLat.R', extra:none,finalResult:1, graphTitle:['Host Latency Test'], dbCols:'avg', dbWhere:'AND scale=5', y_axis:'Latency (ms)'],
- SCPFportLat: [ test:'SCPFportLat', table:'port_latency_details', results:'port_latency_results', file:'/tmp/portEventResultDb', rFile:'SCPFportLat.R', extra:none, finalResult:1, graphTitle:['Port Latency Test - Port Up','Port Latency Test - Port Down'], dbCols:[ 'up_ofp_to_dev_avg,up_dev_to_link_avg,up_link_to_graph_avg', 'down_ofp_to_dev_avg,down_dev_to_link_avg,down_link_to_graph_avg' ], dbWhere:'AND scale=5', y_axis:'Latency (ms)' ],
- SCPFflowTp1g: [ test:'SCPFflowTp1g --params TEST/flows=12250', table:'flow_tp_tests', results:'flow_tp_results', file:'flowTP1gDB', rFile:'SCPFflowTp1g.R n', extra:neighbors,finalResult:1, graphTitle:['Flow Throughput Test - neighbors=0','Flow Throughput Test - neighbors=4'], dbCols:'avg', dbWhere:[ 'AND scale=5 AND neighbors=0 ','AND scale=5 AND NOT neighbors=0' ], y_axis:'Throughput (,000 Flows/sec)' ],
- SCPFflowTp1gWithFlowObj: [ test:'SCPFflowTp1g --params TEST/flowObj=True', table:'flow_tp_fobj_tests', results:'flow_tp_fobj_results', file:'flowTP1gDBFlowObj', rFile:'SCPFflowTp1g.R y', extra:neighbors, finalResult:0],
- SCPFscaleTopo: [ test:'SCPFscaleTopo', table:'scale_topo_latency_details', results:'scale_topo_latency_results', file:'/tmp/scaleTopoResultDb', rFile:'SCPFscaleTopo.R', extra:none, finalResult:1, graphTitle:['Scale Topology Test'], dbCols:[ 'first_connection_to_last_connection, last_connection_to_last_role_request, last_role_request_to_last_topology' ], dbWhere:'AND scale=20' , y_axis:'Latency (s)'],
- SCPFswitchLat: [ test:'SCPFswitchLat', table:'switch_latency_details', results:'switch_latency_results', file:'/tmp/switchEventResultDb', rFile:'SCPFswitchLat.R', extra:none, finalResult:1, graphTitle:['Switch Latency Test - Switch Up','Switch Latency Test - Switch Down'], dbCols:[ 'tcp_to_feature_reply_avg,feature_reply_to_role_request_avg,role_request_to_role_reply_avg,role_reply_to_device_avg,up_device_to_graph_avg', 'fin_ack_to_ack_avg,ack_to_device_avg,down_device_to_graph_avg' ], dbWhere:'AND scale=5', y_axis:'Latency (ms)' ],
- SCPFbatchFlowResp: [ test:'SCPFbatchFlowResp', table:'batch_flow_tests', results:'batch_flow_results', file:'SCPFbatchFlowRespData', rFile:'SCPFbatchFlowResp.R', extra:none, finalResult:1, graphTitle:['Batch Flow Test - Post', 'Batch Flow Test - Del'], dbCols:[ 'elapsepost, posttoconfrm', 'elapsedel, deltoconfrm' ], dbWhere:'', y_axis:'Latency (ms)'],
- SCPFintentEventTp: [ test:'SCPFintentEventTp', table:'intent_tp_tests', results:'intent_tp_results', file:'IntentEventTPDB', rFile:'SCPFintentEventTp.R n', extra:neighbors, finalResult:1, graphTitle:['Intent Throughput Test - neighbors=0','Intent Throughput Test - neighbors=4'], dbCols:'SUM( avg ) as avg', dbWhere:[ 'AND scale=5 AND neighbors=0 GROUP BY date','AND scale=5 AND NOT neighbors=0 GROUP BY date' ], y_axis:'Throughput (Ops/sec)'],
- SCPFintentRerouteLat: [ test:'SCPFintentRerouteLat', table:'intent_reroute_latency_tests', results:'intent_reroute_latency_results', file:'IntentRerouteLatDB', rFile:'SCPFIntentInstallWithdrawRerouteLat.R n', extra:batches, finalResult:1, graphTitle:['Intent Reroute Test'], dbCols:'avg', dbWhere:'AND scale=5 AND batch_size=100', y_axis:'Latency (ms)'],
- SCPFscalingMaxIntents: [ test:'SCPFscalingMaxIntents', table:'max_intents_tests', results:'max_intents_results', file:'ScalingMaxIntentDB', rFile:'SCPFscalingMaxIntents.R n', extra:times, finalResult:0],
- SCPFintentEventTpWithFlowObj: [ test:'SCPFintentEventTp --params TEST/flowObj=True', table:'intent_tp_fobj_tests', results:'intent_tp_fobj_results', file:'IntentEventTPflowObjDB', rFile:'SCPFintentEventTp.R y', extra:neighbors,finalResult:0],
- SCPFintentInstallWithdrawLat: [ test:'SCPFintentInstallWithdrawLat', table:'intent_latency_tests', results:'intent_latency_results', file:'IntentInstallWithdrawLatDB', rFile:'SCPFIntentInstallWithdrawRerouteLat.R n', extra:batches,finalResult:1, graphTitle:['Intent Installation Test','Intent Withdrawal Test'], dbCols:[ 'install_avg','withdraw_avg' ], dbWhere:'AND scale=5 AND batch_size=100', y_axis:'Latency (ms)'],
- SCPFintentRerouteLatWithFlowObj: [ test:'SCPFintentRerouteLat --params TEST/flowObj=True', table:'intent_reroute_latency_fobj_tests', results:'intent_reroute_latency_fobj_results', file:'IntentRerouteLatDBWithFlowObj', rFile:'SCPFIntentInstallWithdrawRerouteLat.R y', extra:batches, finalResult:0],
- SCPFscalingMaxIntentsWithFlowObj: [ test:'SCPFscalingMaxIntents --params TEST/flowObj=True', table:'max_intents_fobj_tests', results:'max_intents_fobj_results', file:'ScalingMaxIntentDBWFO', rFile:'SCPFscalingMaxIntents.R y', extra:times, finalResult:0],
- SCPFintentInstallWithdrawLatWithFlowObj: [ test:'SCPFintentInstallWithdrawLat --params TEST/flowObj=True', table:'intent_latency_fobj_tests', results:'intent_latency_fobj_results', file:'IntentInstallWithdrawLatDBWFO', rFile:'SCPFIntentInstallWithdrawRerouteLat.R y', extra:batches, finalResult:0],
- SCPFmastershipFailoverLat: [test:'SCPFmastershipFailoverLat', table:'mastership_failover_tests', results:'mastership_failover_results', file:'mastershipFailoverLatDB', rFile:'SCPFmastershipFailoverLat.R', extra:none, finalResult:1, graphTitle:['Mastership Failover Test'], dbCols:[ 'kill_deact_avg,deact_role_avg' ], dbWhere:'AND scale=5', y_axis:'Latency (ms)' ]
+ SCPFcbench: [ flows:false, test:'SCPFcbench', table:'cbench_bm_tests', results:'cbench_bm_results', file:'CbenchDB', rFile:'SCPFcbench.R', extra:none, finalResult:1, graphTitle:['Cbench Test'], dbCols:'avg', dbWhere:'', y_axis:'Throughput (Responses/sec)'],
+ SCPFhostLat: [ flows:false, test:'SCPFhostLat', table:'host_latency_tests', results:'host_latency_results', file:'HostAddLatency', rFile:'SCPFhostLat.R', extra:none,finalResult:1, graphTitle:['Host Latency Test'], dbCols:'avg', dbWhere:'AND scale=5', y_axis:'Latency (ms)'],
+ SCPFportLat: [ flows:false, test:'SCPFportLat', table:'port_latency_details', results:'port_latency_results', file:'/tmp/portEventResultDb', rFile:'SCPFportLat.R', extra:none, finalResult:1, graphTitle:['Port Latency Test - Port Up','Port Latency Test - Port Down'], dbCols:[ 'up_ofp_to_dev_avg,up_dev_to_link_avg,up_link_to_graph_avg', 'down_ofp_to_dev_avg,down_dev_to_link_avg,down_link_to_graph_avg' ], dbWhere:'AND scale=5', y_axis:'Latency (ms)' ],
+ SCPFflowTp1g: [ flows:true, test:'SCPFflowTp1g', table:'flow_tp_tests', results:'flow_tp_results', file:'flowTP1gDB', rFile:'SCPFflowTp1g.R n', extra:neighbors,finalResult:1, graphTitle:['Flow Throughput Test - neighbors=0','Flow Throughput Test - neighbors=4'], dbCols:'avg', dbWhere:[ 'AND scale=5 AND neighbors=0 ','AND scale=5 AND NOT neighbors=0' ], y_axis:'Throughput (,000 Flows/sec)' ],
+ SCPFflowTp1gWithFlowObj: [ flows:true, test:'SCPFflowTp1g --params TEST/flowObj=True', table:'flow_tp_fobj_tests', results:'flow_tp_fobj_results', file:'flowTP1gDBFlowObj', rFile:'SCPFflowTp1g.R y', extra:neighbors, finalResult:0],
+ SCPFscaleTopo: [ flows:false, test:'SCPFscaleTopo', table:'scale_topo_latency_details', results:'scale_topo_latency_results', file:'/tmp/scaleTopoResultDb', rFile:'SCPFscaleTopo.R', extra:none, finalResult:1, graphTitle:['Scale Topology Test'], dbCols:[ 'first_connection_to_last_connection, last_connection_to_last_role_request, last_role_request_to_last_topology' ], dbWhere:'AND scale=20' , y_axis:'Latency (s)'],
+ SCPFswitchLat: [ flows:false, test:'SCPFswitchLat', table:'switch_latency_details', results:'switch_latency_results', file:'/tmp/switchEventResultDb', rFile:'SCPFswitchLat.R', extra:none, finalResult:1, graphTitle:['Switch Latency Test - Switch Up','Switch Latency Test - Switch Down'], dbCols:[ 'tcp_to_feature_reply_avg,feature_reply_to_role_request_avg,role_request_to_role_reply_avg,role_reply_to_device_avg,up_device_to_graph_avg', 'fin_ack_to_ack_avg,ack_to_device_avg,down_device_to_graph_avg' ], dbWhere:'AND scale=5', y_axis:'Latency (ms)' ],
+ SCPFbatchFlowResp: [ flows:true, test:'SCPFbatchFlowResp', table:'batch_flow_tests', results:'batch_flow_results', file:'SCPFbatchFlowRespData', rFile:'SCPFbatchFlowResp.R', extra:none, finalResult:1, graphTitle:['Batch Flow Test - Post', 'Batch Flow Test - Del'], dbCols:[ 'elapsepost, posttoconfrm', 'elapsedel, deltoconfrm' ], dbWhere:'', y_axis:'Latency (ms)'],
+ SCPFintentEventTp: [ flows:true, test:'SCPFintentEventTp', table:'intent_tp_tests', results:'intent_tp_results', file:'IntentEventTPDB', rFile:'SCPFintentEventTp.R n', extra:neighbors, finalResult:1, graphTitle:['Intent Throughput Test - neighbors=0','Intent Throughput Test - neighbors=4'], dbCols:'SUM( avg ) as avg', dbWhere:[ 'AND scale=5 AND neighbors=0 GROUP BY date','AND scale=5 AND NOT neighbors=0 GROUP BY date' ], y_axis:'Throughput (Ops/sec)'],
+ SCPFintentRerouteLat: [ flows:true, test:'SCPFintentRerouteLat', table:'intent_reroute_latency_tests', results:'intent_reroute_latency_results', file:'IntentRerouteLatDB', rFile:'SCPFIntentInstallWithdrawRerouteLat.R n', extra:batches, finalResult:1, graphTitle:['Intent Reroute Test'], dbCols:'avg', dbWhere:'AND scale=5 AND batch_size=100', y_axis:'Latency (ms)'],
+ SCPFscalingMaxIntents: [ flows:true, test:'SCPFscalingMaxIntents', table:'max_intents_tests', results:'max_intents_results', file:'ScalingMaxIntentDB', rFile:'SCPFscalingMaxIntents.R n', extra:times, finalResult:0],
+ SCPFintentEventTpWithFlowObj: [ flows:true, test:'SCPFintentEventTp --params TEST/flowObj=True', table:'intent_tp_fobj_tests', results:'intent_tp_fobj_results', file:'IntentEventTPflowObjDB', rFile:'SCPFintentEventTp.R y', extra:neighbors,finalResult:0],
+ SCPFintentInstallWithdrawLat: [ flows:true, test:'SCPFintentInstallWithdrawLat', table:'intent_latency_tests', results:'intent_latency_results', file:'IntentInstallWithdrawLatDB', rFile:'SCPFIntentInstallWithdrawRerouteLat.R n', extra:batches,finalResult:1, graphTitle:['Intent Installation Test','Intent Withdrawal Test'], dbCols:[ 'install_avg','withdraw_avg' ], dbWhere:'AND scale=5 AND batch_size=100', y_axis:'Latency (ms)'],
+ SCPFintentRerouteLatWithFlowObj: [ flows:true, test:'SCPFintentRerouteLat --params TEST/flowObj=True', table:'intent_reroute_latency_fobj_tests', results:'intent_reroute_latency_fobj_results', file:'IntentRerouteLatDBWithFlowObj', rFile:'SCPFIntentInstallWithdrawRerouteLat.R y', extra:batches, finalResult:0],
+ SCPFscalingMaxIntentsWithFlowObj: [ flows:true, test:'SCPFscalingMaxIntents --params TEST/flowObj=True', table:'max_intents_fobj_tests', results:'max_intents_fobj_results', file:'ScalingMaxIntentDBWFO', rFile:'SCPFscalingMaxIntents.R y', extra:times, finalResult:0],
+ SCPFintentInstallWithdrawLatWithFlowObj: [ flows:true, test:'SCPFintentInstallWithdrawLat --params TEST/flowObj=True', table:'intent_latency_fobj_tests', results:'intent_latency_fobj_results', file:'IntentInstallWithdrawLatDBWFO', rFile:'SCPFIntentInstallWithdrawRerouteLat.R y', extra:batches, finalResult:0],
+ SCPFmastershipFailoverLat: [ flows:false, test:'SCPFmastershipFailoverLat', table:'mastership_failover_tests', results:'mastership_failover_results', file:'mastershipFailoverLatDB', rFile:'SCPFmastershipFailoverLat.R', extra:none, finalResult:1, graphTitle:['Mastership Failover Test'], dbCols:[ 'kill_deact_avg,deact_role_avg' ], dbWhere:'AND scale=5', y_axis:'Latency (ms)' ]
]
echo("Testcases:")
@@ -41,7 +41,8 @@
println test
}
}
-
+isOldFlow = prop[ "isOldFlow" ]
+oldFlowRuleCheck( isOldFlow )
def tests = [:]
for( String test : SCPF.keySet() ){
toBeRun = testsToRun.contains( test )
@@ -54,15 +55,19 @@
for ( test in tests.keySet() ){
tests[test].call()
}
-if( prop["manualRun"] == "false" ){
- def end = new Date()
- TimeDuration duration = TimeCategory.minus( end, now )
- slackSend( color:"#5816EE", message: "SCPF tests ended at: " + end.toString() + "\nTime took : " + duration )
+try{
+ if( prop["manualRun"] == "false" ){
+ def end = new Date()
+ TimeDuration duration = TimeCategory.minus( end, now )
+ slackSend( color:"#5816EE", message: "SCPF tests ended at: " + end.toString() + "\nTime took : " + duration )
+ }
}
+catch(all){}
+
// The testName should be the key from the SCPF map
def SCPFTest( testName, toBeRun, prop ) {
return {
- try{
+ catchError{
stage(testName) {
if ( toBeRun ){
workSpace = "/var/jenkins/workspace/"+testName
@@ -117,7 +122,7 @@
string(credentialsId: 'db_user', variable: 'user'),
string(credentialsId: 'db_host', variable: 'host'),
string(credentialsId: 'db_port', variable: 'port')]) {
- def database_command = pass + "|psql --host=" + host + " --port=" + port + " --username=" + user + " --password --dbname onostest -c \"INSERT INTO " + SCPF[testName]['table'] + " VALUES('\$DATE','" + SCPF[testName]['results'] + "','\$BUILD_NUMBER', '\$ONOSBranch', \$line);\""
+ def database_command = pass + "|psql --host=" + host + " --port=" + port + " --username=" + user + " --password --dbname onostest -c \"INSERT INTO " + SCPF[testName]['table'] + " VALUES('\$DATE','" + SCPF[testName]['results'] + "','\$BUILD_NUMBER', '\$ONOSBranch', " + affectedByOldFlow( isOldFlow, testName ) + "\$line);\""
if (testName == "SCPFscaleTopo" || testName == "SCPFswitchLat" || testName == "SCPFportLat") {
database_command = pass + "|psql --host=" + host + " --port=" + port + " --username=" + user + " --password --dbname onostest -c \"INSERT INTO " + SCPF[testName]['table'] + " VALUES('\$DATE','" + SCPF[testName]['results'] + "','\$BUILD_NUMBER', \$line, '\$ONOSBranch');\""
}
@@ -134,8 +139,8 @@
echo ''' + database_command + '''
done< ''' + SCPF[testName]['file'] + '''
- ''' + getGraphCommand( SCPF[testName]['rFile'], SCPF[testName]['extra'], host, port, user, pass, testName, prop["ONOSBranch"] ) + '''
- ''' + ( SCPF[testName]['finalResult'] ? generateCombinedResultGraph( host,port, user, pass, testName, prop["ONOSBranch"] ) : "" )
+ ''' + getGraphCommand( SCPF[testName]['rFile'], SCPF[testName]['extra'], host, port, user, pass, testName, prop["ONOSBranch"], isOldFlow ) + '''
+ ''' + ( SCPF[testName]['finalResult'] ? generateCombinedResultGraph( host,port, user, pass, testName, prop["ONOSBranch"], , isOldFlow ) : "" )
}
}
// Fetch Logs
@@ -161,47 +166,63 @@
def post = build job: "Pipeline_postjob_BM", propagate: false
}
node("TestStation-BMs"){
- sh '''#!/bin/bash
-
- if [ -e ''' + workSpace + "/*Result.txt ] && grep -q \"1\" " + workSpace + "/*Result.txt" + '''
- then
- echo ''' + testName + " : All passed." + '''
- else
- echo ''' + testName + " : not all passed." + '''
- DoingThisToSetTheResultFalse
- fi'''
+ resultContents = readFile workSpace + "/" + testName.replaceAll("WithFlowObj","") + "Result.txt"
+ resultContents = resultContents.split("\n")
+ if( resultContents[ 0 ] == "1" ){
+ print "All passed"
+ }else{
+ print "Failed"
+ if( prop["manualRun"] == "false" )
+ slackSend(color:"FF0000", message: "[" + prop["ONOSBranch"] + "]" + testName + " : Failed!\n"
+ + resultContents[ 1 ] + "\n"
+ + "https://onos-jenkins.onlab.us/blue/organizations/jenkins/${env.JOB_NAME}/detail/${env.JOB_NAME}/${env.BUILD_NUMBER}/pipeline" )
+ Failed
+ }
}
}
}
- }catch (all) {
- catchError{
- if( prop["manualRun"] == "false" )
- slackSend(color:"FF0000", message: "[" + prop["ONOSBranch"] + "]" + testName + " : Failed!\n"
- + "https://onos-jenkins.onlab.us/blue/organizations/jenkins/${env.JOB_NAME}/detail/${env.JOB_NAME}/${env.BUILD_NUMBER}/pipeline" )
- Failed
- }
}
}
}
-def getGraphCommand( rFileName, extras, host, port, user, pass, testName, branchName ){
+def getGraphCommand( rFileName, extras, host, port, user, pass, testName, branchName, isOldFlow ){
result = ""
for( extra in extras ){
- result += generateGraph( rFileName, " " + extra, host, port, user, pass, testName, branchName ) + ";"
+ result += generateGraph( rFileName, " " + extra, host, port, user, pass, testName, branchName, isOldFlow ) + ";"
}
return result
}
-def generateGraph( rFileName, batch, host, port, user, pass, testName, branchName){
- return "Rscript " + graph_generator_directory + rFileName + " " + host + " " + port + " " + user + " " + pass + " " + testName + " " + branchName + " " + batch + " " + graph_saved_directory
+def generateGraph( rFileName, batch, host, port, user, pass, testName, branchName, isOldFlow ){
+ return "Rscript " + graph_generator_directory + rFileName + " " + host + " " + port + " " + user + " " + pass + " " +
+ testName + " " + branchName + " " + batch + " " + usingOldFlow( isOldFlow, testName ) + graph_saved_directory
}
-def generateCombinedResultGraph( host, port, user, pass, testName, branchName ){
+def generateCombinedResultGraph( host, port, user, pass, testName, branchName, isOldFlow ){
result = ""
for ( int i=0; i< SCPF[testName]['graphTitle'].size(); i++){
- result += "Rscript " + graph_generator_directory + "SCPFLineGraph.R " + host + " " + port + " " + user + " " + pass + " \"" + SCPF[testName]['graphTitle'][i] + "\" " + branchName + " " + 100 + " \"SELECT " +
- checkIfList( testName, 'dbCols', i ) + " FROM " + SCPF[testName]['table'] + " WHERE branch=\'" + branchName + "\' " + checkIfList( testName, 'dbWhere', i ) +
- " ORDER BY date DESC LIMIT 100\" \"" + SCPF[testName]['y_axis'] + "\" " + graph_saved_directory + ";"
+ result += "Rscript " + graph_generator_directory + "SCPFLineGraph.R " + host + " " + port + " " + user + " " + pass + " \"" + SCPF[testName]['graphTitle'][i] + "\" " +
+ branchName + " " + 50 + " \"SELECT " + checkIfList( testName, 'dbCols', i ) + " FROM " + SCPF[testName]['table'] + " WHERE branch=\'" + branchName + "\' " + sqlOldFlow( isOldFlow, testName ) +
+ checkIfList( testName, 'dbWhere', i ) + " ORDER BY date DESC LIMIT 50\" \"" + SCPF[testName]['y_axis'] + "\" " + hasOldFlow( isOldFlow, testName ) + graph_saved_directory + ";"
}
return result
}
def checkIfList( testName, forWhich, pos ){
return SCPF[testName][forWhich].getClass().getName() != "java.lang.String" ? SCPF[testName][forWhich][pos] : SCPF[testName][forWhich]
+}
+def sqlOldFlow( isOldFlow, testName ){
+ return SCPF[ testName ][ 'flows' ] ? " AND " + ( isOldFlow == "true" ? "" : "NOT " ) + "is_old_flow " : ""
+}
+def oldFlowRuleCheck( isOldFlow ){
+ if( isOldFlow == "false" ){
+ SCPF[ 'SCPFflowTp1g' ][ 'test' ] += " --params TEST/flows=6125"
+ SCPF[ 'SCPFbatchFlowResp' ][ 'test' ] += " --params CASE1000/batchSize=100"
+ SCPF[ 'SCPFintentEventTp' ][ 'test' ] += " --params TEST/numKeys=4000"
+ }
+}
+def affectedByOldFlow( isOldFlow, testName ){
+ return SCPF[ testName ][ 'flows' ] ? "" + isOldFlow + ", " : ""
+}
+def usingOldFlow( isOldFlow, testName ){
+ return SCPF[ testName ][ 'flows' ] ? ( isOldFlow == "true" ? "y" : "n" ) + " " : ""
+}
+def hasOldFlow( isOldFlow, testName ){
+ return ( SCPF[ testName ][ 'flows' ] && isOldFlow == "true" ? "y" : "n" ) + " "
}
\ No newline at end of file
diff --git a/TestON/JenkinsFile/USECASEJenkinsFile b/TestON/JenkinsFile/USECASEJenkinsFile
index 63551b9..bfae6c1 100644
--- a/TestON/JenkinsFile/USECASEJenkinsFile
+++ b/TestON/JenkinsFile/USECASEJenkinsFile
@@ -48,15 +48,19 @@
for ( test in tests.keySet() ){
tests[test].call()
}
-if( prop["manualRun"] == "false" ){
- def end = new Date()
- TimeDuration duration = TimeCategory.minus( end, now )
- slackSend( color:"#5816EE", message: "USECASE tests ended at: " + end.toString() + "\nTime took : " + duration )
+try{
+ if( prop["manualRun"] == "false" ){
+ def end = new Date()
+ TimeDuration duration = TimeCategory.minus( end, now )
+ slackSend( color:"#5816EE", message: "USECASE tests ended at: " + end.toString() + "\nTime took : " + duration )
+ }
}
+catch(all){}
+
// The testName should be the key from the FUNC
def USECASETest( testName, toBeRun, prop ) {
return {
- try{
+ catchError{
stage(testName) {
if ( toBeRun ){
workSpace = "/var/jenkins/workspace/"+testName
@@ -178,25 +182,21 @@
]
}
node("TestStation-BMs"){
- sh '''#!/bin/bash
-
- if [ -e ''' + workSpace + "/" + testName + "Result.txt ] && grep -q \"1\" " + workSpace + "/" + testName + "Result.txt" + '''
- then
- echo ''' + testName + " : All passed." + '''
- else
- echo ''' + testName + " : not all passed." + '''
- DoingThisToSetTheResultFalse
- fi'''
+ resultContents = readFile workSpace + "/" + testName + "Result.txt"
+ resultContents = resultContents.split("\n")
+ if( resultContents[ 0 ] == "1" ){
+ print "All passed"
+ }else{
+ print "Failed"
+ if( prop["manualRun"] == "false" )
+ slackSend(color:"FF0000", message: "[" + prop["ONOSBranch"] + "]" + testName + " : Failed!\n"
+ + resultContents[ 1 ] + "\n"
+ + "https://onos-jenkins.onlab.us/blue/organizations/jenkins/${env.JOB_NAME}/detail/${env.JOB_NAME}/${env.BUILD_NUMBER}/pipeline" )
+ Failed
+ }
}
}
}
- }catch (all) {
- catchError{
- if( prop["manualRun"] == "false" )
- slackSend(color:"FF0000", message: "[" + prop["ONOSBranch"] + "]" + testName + " : Failed!\n"
- + "https://onos-jenkins.onlab.us/blue/organizations/jenkins/${env.JOB_NAME}/detail/${env.JOB_NAME}/${env.BUILD_NUMBER}/pipeline" )
- Failed
- }
}
}
}
\ No newline at end of file
diff --git a/TestON/JenkinsFile/scripts/README.md b/TestON/JenkinsFile/scripts/README.md
new file mode 100644
index 0000000..dab3f68
--- /dev/null
+++ b/TestON/JenkinsFile/scripts/README.md
@@ -0,0 +1,23 @@
+<h1>Wiki Graph Scripts</h1>
+
+The scripts that generate the graphs are written in the R programming language.
+
+The scripts are structured in the following format:
+1. Data Management
+ * Data is obtained from the databases through SQL. CLI arguments, filename, and titles are also handled here.
+ 1. Importing libraries
+ 2. Command line arguments
+ 3. Title of the graph
+ 4. Filename
+ 5. SQL Initialization and Data Gathering
+2. Organize Data
+ * Raw data is sorted into a data frame. The data frame is used in generating the graph.
+ 1. Combining data into a single list.
+ 2. Using the list to construct a data frame
+ 3. Adding data as columns to the data frame
+3. Generate Graphs
+ * The graphs are formatted and constructed here.
+ 1. Main plot generated
+ 2. Fundamental variables assigned
+ 3. Generate specific graph format
+ 4. Exporting graph to file
diff --git a/TestON/JenkinsFile/scripts/SCPFIntentInstallWithdrawRerouteLat.R b/TestON/JenkinsFile/scripts/SCPFIntentInstallWithdrawRerouteLat.R
index 98447ca..037b6d4 100644
--- a/TestON/JenkinsFile/scripts/SCPFIntentInstallWithdrawRerouteLat.R
+++ b/TestON/JenkinsFile/scripts/SCPFIntentInstallWithdrawRerouteLat.R
@@ -21,172 +21,362 @@
# please contact Jeremy Ronquillo: j_ronquillo@u.pacific.edu
# **********************************************************
-# STEP 1: File management.
+# STEP 1: Data management.
# **********************************************************
-
-print( "STEP 1: File management." )
+print( "**********************************************************" )
+print( "STEP 1: Data management." )
+print( "**********************************************************" )
+has_flow_obj = 1
+database_host = 2
+database_port = 3
+database_u_id = 4
+database_pw = 5
+test_name = 6
+branch_name = 7
+batch_size = 8
+old_flow = 9
+save_directory = 10
# Command line arguments are read.
print( "Reading commmand-line args." )
args <- commandArgs( trailingOnly=TRUE )
-# Import libraries to be used for graphing and organizing data, respectively.
-# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
-# reshape2: https://github.com/hadley/reshape
+# ----------------
+# Import Libraries
+# ----------------
+
print( "Importing libraries." )
library( ggplot2 )
library( reshape2 )
library( RPostgreSQL ) # For databases
-# Check if sufficient args are provided.
-if ( is.na( args[ 9 ] ) ){
- print( "Usage: Rscript SCPFIntentInstallWithdrawRerouteLat.R <isFlowObj> <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <batch-size> <directory-to-save-graphs>" )
- q() # basically exit(), but in R
+# -------------------
+# Check CLI Arguments
+# -------------------
+
+print( "Verifying CLI args." )
+
+if ( is.na( args[ save_directory ] ) ){
+
+ print( paste( "Usage: Rscript SCPFIntentInstallWithdrawRerouteLat.R",
+ "<isFlowObj>" ,
+ "<database-host>",
+ "<database-port>",
+ "<database-user-id>",
+ "<database-password>",
+ "<test-name>",
+ "<branch-name>",
+ "<batch-size>",
+ "<using-old-flow>",
+ "<directory-to-save-graphs>",
+ sep=" " ) )
+ quit( status = 1 ) # basically exit(), but in R
}
+# -----------------------------------
+# Create File Name and Title of Graph
+# -----------------------------------
+
+print( "Creating filename and title of graph." )
+
+chartTitle <- "Intent Install, Withdraw, & Reroute Latencies"
flowObjFileModifier <- ""
-if ( args[ 1 ] == "y" ){
- flowObjFileModifier <- "fobj_"
-}
+errBarOutputFile <- paste( args[ save_directory ],
+ "SCPFIntentInstallWithdrawRerouteLat_",
+ args[ branch_name ],
+ sep="" )
-# paste() is used to concatenate strings
-errBarOutputFile <- paste( args[ 9 ], "SCPFIntentInstallWithdrawRerouteLat", sep="" )
-errBarOutputFile <- paste( errBarOutputFile, args[ 7 ], sep="_" )
-if ( args[ 1 ] == "y" ){
+if ( args[ has_flow_obj ] == "y" ){
errBarOutputFile <- paste( errBarOutputFile, "_fobj", sep="" )
+ flowObjFileModifier <- "fobj_"
+ chartTitle <- paste( chartTitle, "w/ FlowObj" )
}
-errBarOutputFile <- paste( errBarOutputFile, "_", sep="" )
-errBarOutputFile <- paste( errBarOutputFile, args[ 8 ], sep="" )
-errBarOutputFile <- paste( errBarOutputFile, "-batchSize", sep="" )
-errBarOutputFile <- paste( errBarOutputFile, "_graph.jpg", sep="" )
+if ( args[ old_flow ] == "y" ){
+ errBarOutputFile <- paste( errBarOutputFile, "_OldFlow", sep="" )
+ chartTitle <- paste( chartTitle,
+ "With Eventually Consistent Flow Rule Store",
+ sep="\n" )
+}
+errBarOutputFile <- paste( errBarOutputFile,
+ "_",
+ args[ batch_size ],
+ "-batchSize_graph.jpg",
+ sep="" )
-print( "Reading from databases." )
+chartTitle <- paste( chartTitle,
+ "\nBatch Size =",
+ args[ batch_size ],
+ sep=" " )
-con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 2 ], port=strtoi( args[ 3 ] ), user=args[ 4 ],password=args[ 5 ] )
+# ------------------
+# SQL Initialization
+# ------------------
-command1 <- paste( "SELECT * FROM intent_latency_", flowObjFileModifier, sep="" )
-command1 <- paste( command1, "tests WHERE batch_size=", sep="" )
-command1 <- paste( command1, args[ 8 ], sep="" )
-command1 <- paste( command1, " AND branch = '", sep="" )
-command1 <- paste( command1, args[ 7 ], sep="" )
-command1 <- paste( command1, "' AND date IN ( SELECT MAX( date ) FROM intent_latency_", sep="" )
-command1 <- paste( command1, flowObjFileModifier, sep="" )
-command1 <- paste( command1, "tests WHERE branch='", sep="" )
-command1 <- paste( command1, args[ 7 ], sep="" )
-command1 <- paste( command1, "')", sep="" )
+print( "Initializing SQL" )
-print( paste( "Sending SQL command:", command1 ) )
+con <- dbConnect( dbDriver( "PostgreSQL" ),
+ dbname = "onostest",
+ host = args[ database_host ],
+ port = strtoi( args[ database_port ] ),
+ user = args[ database_u_id ],
+ password = args[ database_pw ] )
-fileData1 <- dbGetQuery( con, command1 )
+# ---------------------------------------
+# Intent Install and Withdraw SQL Command
+# ---------------------------------------
+print( "Generating Intent Install and Withdraw SQL Command" )
-command2 <- paste( "SELECT * FROM intent_reroute_latency_", flowObjFileModifier, sep="" )
-command2 <- paste( command2, "tests WHERE batch_size=", sep="" )
-command2 <- paste( command2, args[ 8 ], sep="" )
-command2 <- paste( command2, " AND branch = '", sep="" )
-command2 <- paste( command2, args[ 7 ], sep="" )
-command2 <- paste( command2, "' AND date IN ( SELECT MAX( date ) FROM intent_reroute_latency_", sep="" )
-command2 <- paste( command2, flowObjFileModifier, sep="" )
-command2 <- paste( command2, "tests WHERE branch='", sep="" )
-command2 <- paste( command2, args[ 7 ], sep="" )
-command2 <- paste( command2, "')", sep="" )
+installWithdrawSQLCommand <- paste( "SELECT * FROM intent_latency_",
+ flowObjFileModifier,
+ "tests WHERE batch_size=",
+ args[ batch_size ],
+ " AND branch = '",
+ args[ branch_name ],
+ "' AND date IN ( SELECT MAX( date ) FROM intent_latency_",
+ flowObjFileModifier,
+ "tests WHERE branch='",
+ args[ branch_name ],
+ "' AND ",
+ ( if( args[ old_flow ] == 'y' ) "" else "NOT " ) ,
+ "is_old_flow",
+ ")",
+ sep="" )
-print( paste( "Sending SQL command:", command2 ) )
+print( "Sending Intent Install and Withdraw SQL command:" )
+print( installWithdrawSQLCommand )
+installWithdrawData <- dbGetQuery( con, installWithdrawSQLCommand )
-fileData2 <- dbGetQuery( con, command2 )
+# --------------------------
+# Intent Reroute SQL Command
+# --------------------------
+
+print( "Generating Intent Reroute SQL Command" )
+
+rerouteSQLCommand <- paste( "SELECT * FROM intent_reroute_latency_",
+ flowObjFileModifier,
+ "tests WHERE batch_size=",
+ args[ batch_size ],
+ " AND branch = '",
+ args[ branch_name ],
+ "' AND date IN ( SELECT MAX( date ) FROM intent_reroute_latency_",
+ flowObjFileModifier,
+ "tests WHERE branch='",
+ args[ branch_name ],
+ "' AND ",
+ ( if( args[ old_flow ] == 'y' ) "" else "NOT " ) ,
+ "is_old_flow",
+ ")",
+ sep="" )
+
+print( "Sending Intent Reroute SQL command:" )
+print( rerouteSQLCommand )
+rerouteData <- dbGetQuery( con, rerouteSQLCommand )
# **********************************************************
-# STEP 2: Organize data.
+# STEP 2: Organize Data.
# **********************************************************
-print( "STEP 2: Organize data." )
+print( "**********************************************************" )
+print( "STEP 2: Organize Data." )
+print( "**********************************************************" )
-# Create lists c() and organize data into their corresponding list.
-print( "Sorting data." )
-if ( ncol( fileData2 ) == 0 ){
- avgs <- c( fileData1[ 'install_avg' ], fileData1[ 'withdraw_avg' ] )
+# -------------------------------------------------------
+# Combining Install, Withdraw, and Reroute Latencies Data
+# -------------------------------------------------------
+
+print( "Combining Install, Withdraw, and Reroute Latencies Data" )
+
+if ( ncol( rerouteData ) == 0 ){ # Checks if rerouteData exists, so we can exclude it if necessary
+
+ requiredColumns <- c( "install_avg",
+ "withdraw_avg" )
+
+ tryCatch( avgs <- c( installWithdrawData[ requiredColumns] ),
+ error = function( e ) {
+ print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+ print( "Required columns: " )
+ print( requiredColumns )
+ print( "Actual columns: " )
+ print( names( fileData ) )
+ print( "Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
} else{
- colnames( fileData2 ) <- c( "date", "name", "date", "branch", "commit", "scale", "batch_size", "reroute_avg", "reroute_std" )
- avgs <- c( fileData1[ 'install_avg' ], fileData1[ 'withdraw_avg' ], fileData2[ 'reroute_avg' ] )
+ colnames( rerouteData ) <- c( "date",
+ "name",
+ "date",
+ "branch",
+ "is_old_flow",
+ "commit",
+ "scale",
+ "batch_size",
+ "reroute_avg",
+ "reroute_std" )
+
+ tryCatch( avgs <- c( installWithdrawData[ 'install_avg' ],
+ installWithdrawData[ 'withdraw_avg' ],
+ rerouteData[ 'reroute_avg' ] ),
+ error = function( e ) {
+ print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+ print( "Required columns: " )
+ print( requiredColumns )
+ print( "Actual columns: " )
+ print( names( fileData ) )
+ print( "Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
}
-# Parse lists into data frames.
-dataFrame <- melt( avgs ) # This is where reshape2 comes in. Avgs list is converted to data frame
+# Combine lists into data frames.
+dataFrame <- melt( avgs )
-if ( ncol( fileData2 ) == 0 ){
- dataFrame$scale <- c( fileData1$scale, fileData1$scale ) # Add node scaling to the data frame.
- dataFrame$stds <- c( fileData1$install_std, fileData1$withdraw_std )
+# --------------------
+# Construct Data Frame
+# --------------------
+
+print( "Constructing data frame." )
+
+if ( ncol( rerouteData ) == 0 ){ # Checks if rerouteData exists (due to batch size) for the dataFrame this time
+ dataFrame$scale <- c( installWithdrawData$scale,
+ installWithdrawData$scale )
+
+ dataFrame$stds <- c( installWithdrawData$install_std,
+ installWithdrawData$withdraw_std )
} else{
- dataFrame$scale <- c( fileData1$scale, fileData1$scale, fileData2$scale ) # Add node scaling to the data frame.
- dataFrame$stds <- c( fileData1$install_std, fileData1$withdraw_std, fileData2$reroute_std )
+ dataFrame$scale <- c( installWithdrawData$scale,
+ installWithdrawData$scale,
+ rerouteData$scale )
+
+ dataFrame$stds <- c( installWithdrawData$install_std,
+ installWithdrawData$withdraw_std,
+ rerouteData$reroute_std )
}
-colnames( dataFrame ) <- c( "ms", "type", "scale", "stds" )
+
+colnames( dataFrame ) <- c( "ms",
+ "type",
+ "scale",
+ "stds" )
# Format data frame so that the data is in the same order as it appeared in the file.
dataFrame$type <- as.character( dataFrame$type )
dataFrame$type <- factor( dataFrame$type, levels=unique( dataFrame$type ) )
-dataFrame <- na.omit( dataFrame ) # Omit any data that doesn't exist
-
-
+dataFrame <- na.omit( dataFrame ) # Omit any data that doesn't exist
print( "Data Frame Results:" )
print( dataFrame )
# **********************************************************
-# STEP 3: Generate graphs.
+# STEP 3: Generate graph.
# **********************************************************
-print( "STEP 3: Generate graphs." )
+print( "**********************************************************" )
+print( "STEP 3: Generate Graph." )
+print( "**********************************************************" )
-# 1. Graph fundamental data is generated first.
-# These are variables that apply to all of the graphs being generated, regardless of type.
-#
-# 2. Type specific graph data is generated.
-# Data specific for the error bar and stacked bar graphs are generated.
-#
-# 3. Generate and save the graphs.
-# Graphs are saved to the filename above, in the directory provided in command line args
+# -------------------
+# Main Plot Generated
+# -------------------
+
+print( "Creating the main plot." )
+
+mainPlot <- ggplot( data = dataFrame, aes( x = scale,
+ y = ms,
+ ymin = ms,
+ ymax = ms + stds,
+ fill = type ) )
+
+# ------------------------------
+# Fundamental Variables Assigned
+# ------------------------------
print( "Generating fundamental graph data." )
-theme_set( theme_grey( base_size = 20 ) ) # set the default text size of the graph.
-
-mainPlot <- ggplot( data = dataFrame, aes( x = scale, y = ms, ymin = ms - stds, ymax = ms + stds,fill = type ) )
-
-# Formatting the plot
-width <- 1.3 # Width of the bars.
-xScaleConfig <- scale_x_continuous( breaks=c( 1, 3, 5, 7, 9) )
+theme_set( theme_grey( base_size = 22 ) )
+barWidth <- 1.3
+xScaleConfig <- scale_x_continuous( breaks = c( 1, 3, 5, 7, 9) )
xLabel <- xlab( "Scale" )
yLabel <- ylab( "Latency (ms)" )
fillLabel <- labs( fill="Type" )
-chartTitle <- "Intent Install, Withdraw, & Reroute Latencies"
-if ( args[ 1 ] == "y" ){
- chartTitle <- paste( chartTitle, "w/ FlowObj" )
-}
-chartTitle <- paste( chartTitle, "\nBatch Size =" )
-chartTitle <- paste( chartTitle, fileData1[ 1,'batch_size' ] )
+title <- ggtitle( chartTitle )
+imageWidth <- 15
+imageHeight <- 10
+imageDPI <- 200
+errorBarColor <- rgb( 140, 140, 140, maxColorValue=255 )
-theme <- theme( plot.title=element_text( hjust = 0.5, size = 22, face='bold' ) )
+theme <- theme( plot.title=element_text( hjust = 0.5, size = 32, face='bold' ),
+ legend.position="bottom",
+ legend.text=element_text( size=22 ),
+ legend.title = element_blank(),
+ legend.key.size = unit( 1.5, 'lines' ) )
-
+colors <- scale_fill_manual( values=c( "#F77670",
+ "#619DFA",
+ "#18BA48" ) )
# Store plot configurations as 1 variable
-fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
+fundamentalGraphData <- mainPlot +
+ xScaleConfig +
+ xLabel +
+ yLabel +
+ fillLabel +
+ theme +
+ title +
+ colors
-# Create the bar graph with error bars.
-# geom_bar contains:
-# - stat: data formatting (usually "identity")
-# - width: the width of the bar types (declared above)
-# geom_errorbar contains similar arguments as geom_bar.
+# ---------------------------
+# Generating Bar Graph Format
+# ---------------------------
+
print( "Generating bar graph with error bars." )
-barGraphFormat <- geom_bar( stat = "identity", width = width, position = "dodge" )
-errorBarFormat <- geom_errorbar( width = width, position = "dodge", color=rgb( 140, 140, 140, maxColorValue=255 ) )
-title <- ggtitle( chartTitle )
-values <- geom_text( aes( x=dataFrame$scale, y=dataFrame$ms + 0.035 * max( dataFrame$ms ), label = format( dataFrame$ms, digits=3, big.mark = ",", scientific = FALSE ) ), position=position_dodge( width=1.3 ), size = 3.2, fontface = "bold" )
-result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title + values
+barGraphFormat <- geom_bar( stat = "identity",
+ width = barWidth,
+ position = "dodge" )
-# Save graph to file
+errorBarFormat <- geom_errorbar( width = barWidth,
+ position = position_dodge( barWidth ),
+ color = errorBarColor )
+
+values <- geom_text( aes( x = dataFrame$scale,
+ y = dataFrame$ms + 0.035 * max( dataFrame$ms ),
+ label = format( dataFrame$ms,
+ digits = 3,
+ big.mark = ",",
+ scientific = FALSE ) ),
+ position = position_dodge( width = barWidth ),
+ size = 5.5,
+ fontface = "bold" )
+
+wrapLegend <- guides( fill = guide_legend( nrow = 1, byrow = TRUE ) )
+
+result <- fundamentalGraphData +
+ barGraphFormat +
+ errorBarFormat +
+ values +
+ wrapLegend
+
+# -----------------------
+# Exporting Graph to File
+# -----------------------
+
print( paste( "Saving bar chart with error bars to", errBarOutputFile ) )
-ggsave( errBarOutputFile, width = 10, height = 6, dpi = 200 )
-print( paste( "Successfully wrote bar chart with error bars out to", errBarOutputFile ) )
+
+tryCatch( ggsave( errBarOutputFile,
+ width = imageWidth,
+ height = imageHeight,
+ dpi = imageDPI ),
+ error = function( e ){
+ print( "[ERROR] There was a problem saving the graph due to a graph formatting exception. Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+print( paste( "[SUCCESS] Successfully wrote bar chart with error bars out to", errBarOutputFile ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/SCPFLineGraph.R b/TestON/JenkinsFile/scripts/SCPFLineGraph.R
index 376014d..d063d0a 100644
--- a/TestON/JenkinsFile/scripts/SCPFLineGraph.R
+++ b/TestON/JenkinsFile/scripts/SCPFLineGraph.R
@@ -22,58 +22,124 @@
# This is the R script that generates the SCPF front page graphs.
+
# **********************************************************
# STEP 1: Data management.
# **********************************************************
-print( "STEP 1: Data management." )
+database_host = 1
+database_port = 2
+database_u_id = 3
+database_pw = 4
+graph_title = 5
+branch_name = 6
+num_dates = 7
+sql_commands = 8
+y_axis = 9
+old_flow = 10
+save_directory = 11
-# Import libraries to be used for graphing and organizing data, respectively.
-# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
-# reshape2: https://github.com/hadley/reshape
-# RPostgreSQL: https://code.google.com/archive/p/rpostgresql/
+print( "**********************************************************" )
+print( "STEP 1: Data management." )
+print( "**********************************************************" )
+
+# ----------------
+# Import Libraries
+# ----------------
+
print( "Importing libraries." )
library( ggplot2 )
library( reshape2 )
library( RPostgreSQL )
+# -------------------
+# Check CLI Arguments
+# -------------------
+
+print( "Verifying CLI args." )
+
# Command line arguments are read. Args include the database credentials, test name, branch name, and the directory to output files.
print( "Reading commmand-line args." )
args <- commandArgs( trailingOnly=TRUE )
# Check if sufficient args are provided.
-if ( is.na( args[ 10 ] ) ){
- print( "Usage: Rscript testresultgraph.R <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <#-dates> <SQL-command> <y-axis> <directory-to-save-graph>" )
- q() # basically exit(), but in R
+if ( is.na( args[ save_directory ] ) ){
+
+ print( paste( "Usage: Rscript testresultgraph.R",
+ "<database-host>",
+ "<database-port>",
+ "<database-user-id>",
+ "<database-password>",
+ "<graph-title>", # part of the output filename as well
+ "<branch-name>", # part of the output filename
+ "<#-dates>", # part of the output filename
+ "<SQL-command>",
+ "<y-axis-title>", # y-axis may be different among other SCPF graphs (ie: batch size, latency, etc. )
+ "<using-old-flow>",
+ "<directory-to-save-graph>",
+ sep = " " ) )
+ quit( status = 1 ) # basically exit(), but in R
}
-# Filenames for the output graph include the testname, branch, and the graph type.
+# -------------------------------
+# Create Title and Graph Filename
+# -------------------------------
-outputFile <- paste( args[ 10 ], "SCPF_Front_Page" , sep="" )
-outputFile <- paste( outputFile, gsub( " ", "_", args[ 5 ] ), sep="_" )
-outputFile <- paste( outputFile, args[ 6 ], sep="_" )
-outputFile <- paste( outputFile, args[ 7 ], sep="_" )
-outputFile <- paste( outputFile, "dates", sep="-" )
-outputFile <- paste( outputFile, "_graph.jpg", sep="" )
-
-# From RPostgreSQL
-print( "Reading from databases." )
-con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 1 ], port=strtoi( args[ 2 ] ), user=args[ 3 ],password=args[ 4 ] )
-
-print( "Sending SQL command." )
-fileData <- dbGetQuery( con, args[ 8 ] )
+print( "Creating title of graph" )
# Title of graph based on command line args.
-title <- args[ 5 ]
+
+title <- args[ graph_title ]
+title <- paste( title, if( args[ old_flow ] == "y" ) "\nWith Eventually Consistent Flow Rule Store" else "" )
+
+print( "Creating graph filename." )
+
+# Filenames for the output graph include the testname, branch, and the graph type.
+outputFile <- paste( args[ save_directory ],
+ "SCPF_Front_Page_",
+ gsub( " ", "_", args[ graph_title ] ),
+ "_",
+ args[ branch_name ],
+ "_",
+ args[ num_dates ],
+ "-dates",
+ if( args[ old_flow ] == "y" ) "_OldFlow" else "",
+ "_graph.jpg",
+ sep="" )
+
+# ------------------
+# SQL Initialization
+# ------------------
+
+print( "Initializing SQL" )
+con <- dbConnect( dbDriver( "PostgreSQL" ),
+ dbname = "onostest",
+ host = args[ database_host ],
+ port = strtoi( args[ database_port ] ),
+ user = args[ database_u_id ],
+ password = args[ database_pw ] )
+
+print( "Sending SQL command:" )
+print( args[ sql_commands ] )
+
+fileData <- dbGetQuery( con, args[ sql_commands ] )
+
+# Check if data has been received
+if ( nrow( fileData ) == 0 ){
+ print( "[ERROR]: No data received from the databases. Please double check this by manually running the SQL command." )
+ quit( status = 1 )
+}
# **********************************************************
# STEP 2: Organize data.
# **********************************************************
-print( "STEP 2: Organize data." )
+print( "**********************************************************" )
+print( "STEP 2: Organize Data." )
+print( "**********************************************************" )
# Create lists c() and organize data into their corresponding list.
-print( "Sorting data into new data frame." )
+print( "Combine data retrieved from databases into a list." )
if ( ncol( fileData ) > 1 ){
for ( i in 2:ncol( fileData ) ){
@@ -81,13 +147,18 @@
}
}
-# Parse lists into data frames.
-# This is where reshape2 comes in. Avgs list is converted to data frame.
-dataFrame <- melt( fileData )
+# --------------------
+# Construct Data Frame
+# --------------------
+
+print( "Constructing data frame from combined data." )
+
+dataFrame <- melt( fileData )
dataFrame$date <- fileData$date
-colnames( dataFrame ) <- c( "Legend", "Values" )
+colnames( dataFrame ) <- c( "Legend",
+ "Values" )
# Format data frame so that the data is in the same order as it appeared in the file.
dataFrame$Legend <- as.character( dataFrame$Legend )
@@ -105,7 +176,13 @@
# STEP 3: Generate graphs.
# **********************************************************
-print( "STEP 3: Generate graphs." )
+print( "**********************************************************" )
+print( "STEP 3: Generate Graph." )
+print( "**********************************************************" )
+
+# -------------------
+# Main Plot Generated
+# -------------------
print( "Creating main plot." )
# Create the primary plot here.
@@ -115,33 +192,98 @@
# - x: x-axis values (usually iterative, but it will become date # later)
# - y: y-axis values (usually tests)
# - color: the category of the colored lines (usually legend of test)
-theme_set( theme_grey( base_size = 20 ) ) # set the default text size of the graph.
-mainPlot <- ggplot( data = dataFrame, aes( x = iterative, y = Values, color = Legend ) )
+
+mainPlot <- ggplot( data = dataFrame, aes( x = iterative,
+ y = Values,
+ color = Legend ) )
+
+# -------------------
+# Main Plot Formatted
+# -------------------
print( "Formatting main plot." )
-# Store plot configurations as 1 variable
-fundamentalGraphData <- mainPlot + expand_limits( y = 0 )
+limitExpansion <- expand_limits( y = 0 )
-yScaleConfig <- scale_y_continuous( breaks = seq( 0, max( dataFrame$Values ) * 1.05, by = ceiling( max( dataFrame$Values ) / 10 ) ) )
+maxYDisplay <- max( dataFrame$Values ) * 1.05
+yBreaks <- ceiling( max( dataFrame$Values ) / 10 )
+yScaleConfig <- scale_y_continuous( breaks = seq( 0, maxYDisplay, by = yBreaks ) )
-xLabel <- xlab( "Date" )
-yLabel <- ylab( args[ 9 ] )
-fillLabel <- labs( fill="Type" )
-legendLabels <- scale_colour_discrete( labels = names( fileData ) )
-centerTitle <- theme( plot.title=element_text( hjust = 0.5 ) ) # To center the title text
-theme <- theme( axis.text.x = element_blank(), axis.ticks.x = element_blank(), plot.title = element_text( size = 28, face='bold' ) )
+# ------------------------------
+# Fundamental Variables Assigned
+# ------------------------------
-fundamentalGraphData <- fundamentalGraphData + yScaleConfig + xLabel + yLabel + fillLabel + legendLabels + centerTitle + theme
-print( "Generating line graph." )
+print( "Generating fundamental graph data." )
-lineGraphFormat <- geom_line()
-pointFormat <- geom_point( size = 0.2 )
+theme_set( theme_grey( base_size = 22 ) ) # set the default text size of the graph.
+xLabel <- xlab( "Build" )
+yLabel <- ylab( args[ y_axis ] )
+
+imageWidth <- 15
+imageHeight <- 10
+imageDPI <- 200
+
+# Set other graph configurations here.
+theme <- theme( axis.text.x = element_blank(),
+ axis.ticks.x = element_blank(),
+ plot.title = element_text( size = 32, face='bold', hjust = 0.5 ),
+ legend.position = "bottom",
+ legend.text = element_text( size=22 ),
+ legend.title = element_blank(),
+ legend.key.size = unit( 1.5, 'lines' ),
+ legend.direction = 'horizontal' )
+
+# Colors used for the lines.
+# Note: graphs that have X lines will use the first X colors in this list.
+colors <- scale_color_manual( values=c( "#111111", # black
+ "#008CFF", # blue
+ "#FF3700", # red
+ "#00E043", # green
+ "#EEB600", # yellow
+ "#E500FF") ) # purple (not used)
+
+wrapLegend <- guides( color = guide_legend( nrow = 2, byrow = TRUE ) )
title <- ggtitle( title )
-result <- fundamentalGraphData + lineGraphFormat + pointFormat + title
+fundamentalGraphData <- mainPlot +
+ limitExpansion +
+ yScaleConfig +
+ xLabel +
+ yLabel +
+ theme +
+ colors +
+ wrapLegend +
+ title
-# Save graph to file
+# ----------------------------
+# Generating Line Graph Format
+# ----------------------------
+
+print( "Generating line graph." )
+
+lineGraphFormat <- geom_line( size = 0.75 )
+pointFormat <- geom_point( size = 1.75 )
+
+result <- fundamentalGraphData +
+ lineGraphFormat +
+ pointFormat
+
+# -----------------------
+# Exporting Graph to File
+# -----------------------
+
print( paste( "Saving result graph to", outputFile ) )
-ggsave( outputFile, width = 10, height = 6, dpi = 200 )
-print( paste( "Successfully wrote result graph out to", outputFile ) )
\ No newline at end of file
+
+tryCatch( ggsave( outputFile,
+ width = imageWidth,
+ height = imageHeight,
+ dpi = imageDPI ),
+ error = function( e ){
+ print( "[ERROR] There was a problem saving the graph due to a graph formatting exception. Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+print( paste( "[SUCCESS] Successfully wrote result graph out to", outputFile ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/SCPFbatchFlowResp.R b/TestON/JenkinsFile/scripts/SCPFbatchFlowResp.R
index 72f66c7..d90c53e 100644
--- a/TestON/JenkinsFile/scripts/SCPFbatchFlowResp.R
+++ b/TestON/JenkinsFile/scripts/SCPFbatchFlowResp.R
@@ -21,162 +21,394 @@
# please contact Jeremy Ronquillo: j_ronquillo@u.pacific.edu
# **********************************************************
-# STEP 1: File management.
+# STEP 1: Data management.
# **********************************************************
+database_host = 1
+database_port = 2
+database_u_id = 3
+database_pw = 4
+test_name = 5
+branch_name = 6
+old_flow = 7
+save_directory = 8
-print( "STEP 1: File management." )
+print( "**********************************************************" )
+print( "STEP 1: Data management." )
+print( "**********************************************************" )
# Command line arguments are read.
print( "Reading commmand-line args." )
args <- commandArgs( trailingOnly=TRUE )
-# Import libraries to be used for graphing and organizing data, respectively.
-# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
-# reshape2: https://github.com/hadley/reshape
+# ----------------
+# Import Libraries
+# ----------------
+
print( "Importing libraries." )
library( ggplot2 )
library( reshape2 )
library( RPostgreSQL ) # For databases
-# Check if sufficient args are provided.
-if ( is.na( args[ 7 ] ) ){
- print( "Usage: Rscript SCPFbatchFlowResp <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <directory-to-save-graphs>" )
- q() # basically exit(), but in R
+# -------------------
+# Check CLI Arguments
+# -------------------
+
+print( "Verifying CLI args." )
+
+if ( is.na( args[ save_directory ] ) ){
+
+ print( paste( "Usage: Rscript SCPFbatchFlowResp.R",
+ "<database-host>",
+ "<database-port>",
+ "<database-user-id>",
+ "<database-password>",
+ "<test-name>",
+ "<branch-name>",
+ "<using-old-flow>",
+ "<directory-to-save-graphs>",
+ sep=" " ) )
+
+ quit( status = 1 ) # basically exit(), but in R
}
-# paste() is used to concatenate strings.
-errBarOutputFile <- paste( args[ 7 ], args[ 5 ], sep="" )
-errBarOutputFile <- paste( errBarOutputFile, args[ 6 ], sep="_" )
-errBarOutputFile <- paste( errBarOutputFile, "_PostGraph.jpg", sep="" )
+# -----------------
+# Create File Names
+# -----------------
-print( "Reading from databases." )
+print( "Creating filenames and title of graph." )
-con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 1 ], port=strtoi( args[ 2 ] ), user=args[ 3 ],password=args[ 4 ] )
+postOutputFile <- paste( args[ save_directory ],
+ args[ test_name ],
+ "_",
+ args[ branch_name ],
+ if( args[ old_flow ] == "y" ) "_OldFlow" else "",
+ "_PostGraph.jpg",
+ sep="" )
-command <- paste( "SELECT * FROM batch_flow_tests WHERE branch='", args[ 6 ], sep="" )
-command <- paste( command, "' ORDER BY date DESC LIMIT 3", sep="" )
+delOutputFile <- paste( args[ save_directory ],
+ args[ test_name ],
+ "_",
+ args[ branch_name ],
+ if( args[ old_flow ] == "y" ) "_OldFlow" else "",
+ "_DelGraph.jpg",
+ sep="" )
-print( paste( "Sending SQL command:", command ) )
+postChartTitle <- paste( "Single Bench Flow Latency - Post\n",
+ "Last 3 Builds",
+ if( args[ old_flow ] == "y" ) "\nWith Eventually Consistent Flow Rule Store" else "",
+ sep = "" )
+delChartTitle <- paste( "Single Bench Flow Latency - Del\n",
+ "Last 3 Builds",
+ if( args[ old_flow ] == "y" ) "\nWith Eventually Consistent Flow Rule Store" else "",
+ sep = "" )
+
+# ------------------
+# SQL Initialization
+# ------------------
+
+print( "Initializing SQL" )
+
+con <- dbConnect( dbDriver( "PostgreSQL" ),
+ dbname = "onostest",
+ host = args[ database_host ],
+ port = strtoi( args[ database_port ] ),
+ user = args[ database_u_id ],
+ password = args[ database_pw ] )
+
+# ---------------------------
+# Batch Flow Resp SQL Command
+# ---------------------------
+
+print( "Generating Batch Flow Resp SQL Command" )
+
+command <- paste( "SELECT * FROM batch_flow_tests WHERE branch='",
+ args[ branch_name ],
+ "' AND " ,
+ ( if( args[ old_flow ] == 'y' ) "" else "NOT " ) ,
+ "is_old_flow",
+ " ORDER BY date DESC LIMIT 3",
+ sep="" )
+
+print( "Sending SQL command:" )
+print( command )
fileData <- dbGetQuery( con, command )
-chartTitle <- paste( "Single Bench Flow Latency - Post", "Last 3 Builds", sep = "\n" )
# **********************************************************
# STEP 2: Organize data.
# **********************************************************
-avgs <- c()
+print( "**********************************************************" )
+print( "STEP 2: Organize Data." )
+print( "**********************************************************" )
-print( "Sorting data." )
-avgs <- c( fileData[ 'posttoconfrm' ], fileData[ 'elapsepost' ] )
+# -----------------
+# Post Data Sorting
+# -----------------
-dataFrame <- melt( avgs )
-dataFrame$scale <- fileData$scale
-dataFrame$date <- fileData$date
-dataFrame$iterative <- dataFrame$iterative <- rev( seq( 1, nrow( fileData ), by = 1 ) )
+print( "Sorting data for Post." )
-colnames( dataFrame ) <- c( "ms", "type", "scale", "date", "iterative" )
+requiredColumns <- c( "posttoconfrm", "elapsepost" )
+
+tryCatch( postAvgs <- c( fileData[ requiredColumns] ),
+ error = function( e ) {
+ print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+ print( "Required columns: " )
+ print( requiredColumns )
+ print( "Actual columns: " )
+ print( names( fileData ) )
+ print( "Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+# -------------------------
+# Post Construct Data Frame
+# -------------------------
+
+postDataFrame <- melt( postAvgs )
+postDataFrame$scale <- fileData$scale
+postDataFrame$date <- fileData$date
+postDataFrame$iterative <- rev( seq( 1, nrow( fileData ), by = 1 ) )
+
+colnames( postDataFrame ) <- c( "ms",
+ "type",
+ "scale",
+ "date",
+ "iterative" )
# Format data frame so that the data is in the same order as it appeared in the file.
-dataFrame$type <- as.character( dataFrame$type )
-dataFrame$type <- factor( dataFrame$type, levels=unique( dataFrame$type ) )
+postDataFrame$type <- as.character( postDataFrame$type )
+postDataFrame$type <- factor( postDataFrame$type,
+ levels = unique( postDataFrame$type ) )
-dataFrame <- na.omit( dataFrame ) # Omit any data that doesn't exist
+postDataFrame <- na.omit( postDataFrame ) # Omit any data that doesn't exist
-print( "Data Frame Results:" )
-print( dataFrame )
+print( "Post Data Frame Results:" )
+print( postDataFrame )
+
+# ----------------
+# Del Data Sorting
+# ----------------
+
+requiredColumns <- c( "deltoconfrm", "elapsedel" )
+
+tryCatch( delAvgs <- c( fileData[ requiredColumns] ),
+ error = function( e ) {
+ print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+ print( "Required columns: " )
+ print( requiredColumns )
+ print( "Actual columns: " )
+ print( names( fileData ) )
+ print( "Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+
+# ------------------------
+# Del Construct Data Frame
+# ------------------------
+
+delDataFrame <- melt( delAvgs )
+delDataFrame$scale <- fileData$scale
+delDataFrame$date <- fileData$date
+delDataFrame$iterative <- rev( seq( 1, nrow( fileData ), by = 1 ) )
+
+colnames( delDataFrame ) <- c( "ms",
+ "type",
+ "scale",
+ "date",
+ "iterative" )
+
+# Format data frame so that the data is in the same order as it appeared in the file.
+delDataFrame$type <- as.character( delDataFrame$type )
+delDataFrame$type <- factor( delDataFrame$type,
+ levels = unique( delDataFrame$type ) )
+
+delDataFrame <- na.omit( delDataFrame ) # Omit any data that doesn't exist
+
+print( "Del Data Frame Results:" )
+print( delDataFrame )
# **********************************************************
# STEP 3: Generate graphs.
# **********************************************************
-print( "Generating fundamental graph data." )
+print( "**********************************************************" )
+print( "STEP 3: Generate Graph." )
+print( "**********************************************************" )
-theme_set( theme_grey( base_size = 20 ) ) # set the default text size of the graph.
+# ------------------------------------------
+# Initializing variables used in both graphs
+# ------------------------------------------
-mainPlot <- ggplot( data = dataFrame, aes( x = iterative, y = ms, fill = type ) )
-xScaleConfig <- scale_x_continuous( breaks = dataFrame$iterative, label = dataFrame$date )
+print( "Initializing variables used in both graphs." )
+
+theme_set( theme_grey( base_size = 22 ) ) # set the default text size of the graph.
xLabel <- xlab( "Build Date" )
yLabel <- ylab( "Latency (ms)" )
fillLabel <- labs( fill="Type" )
-theme <- theme( plot.title=element_text( hjust = 0.5, size = 28, face='bold' ) )
+colors <- scale_fill_manual( values=c( "#F77670", "#619DFA" ) )
+wrapLegend <- guides( fill=guide_legend( nrow=1, byrow=TRUE ) )
+barWidth <- 0.3
+imageWidth <- 15
+imageHeight <- 10
+imageDPI <- 200
-fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
+theme <- theme( plot.title = element_text( hjust = 0.5, size = 32, face = 'bold' ),
+ legend.position = "bottom",
+ legend.text = element_text( size = 22 ),
+ legend.title = element_blank(),
+ legend.key.size = unit( 1.5, 'lines' ) )
+
+barGraphFormat <- geom_bar( stat = "identity",
+ width = barWidth )
+
+# -----------------------
+# Post Generate Main Plot
+# -----------------------
+
+print( "Creating main plot for Post graph." )
+
+mainPlot <- ggplot( data = postDataFrame, aes( x = iterative,
+ y = ms,
+ fill = type ) )
+
+# -----------------------------------
+# Post Fundamental Variables Assigned
+# -----------------------------------
+
+print( "Generating fundamental graph data for Post graph." )
+
+xScaleConfig <- scale_x_continuous( breaks = postDataFrame$iterative,
+ label = postDataFrame$date )
+title <- ggtitle( postChartTitle )
-print( "Generating bar graph." )
-width <- 0.3
-barGraphFormat <- geom_bar( stat="identity", width = width )
-sum <- fileData[ 'posttoconfrm' ] + fileData[ 'elapsepost' ]
-values <- geom_text( aes( x=dataFrame$iterative, y=sum + 0.04 * max( sum ), label = format( sum, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold" )
-title <- ggtitle( chartTitle )
-result <- fundamentalGraphData + barGraphFormat + title + values
+fundamentalGraphData <- mainPlot +
+ xScaleConfig +
+ xLabel +
+ yLabel +
+ fillLabel +
+ theme +
+ wrapLegend +
+ colors +
+ title
+# --------------------------------
+# Post Generating Bar Graph Format
+# --------------------------------
-print( paste( "Saving bar chart to", errBarOutputFile ) )
-ggsave( errBarOutputFile, width = 10, height = 6, dpi = 200 )
+print( "Generating bar graph for Post graph." )
-print( paste( "Successfully wrote stacked bar chart out to", errBarOutputFile ) )
+sum <- fileData[ 'posttoconfrm' ] +
+ fileData[ 'elapsepost' ]
+values <- geom_text( aes( x = postDataFrame$iterative,
+ y = sum + 0.03 * max( sum ),
+ label = format( sum,
+ digits = 3,
+ big.mark = ",",
+ scientific = FALSE ) ),
+ size = 7.0,
+ fontface = "bold" )
-# **********************************************************
-# STEP 2: Organize data.
-# **********************************************************
+result <- fundamentalGraphData +
+ barGraphFormat +
+ values
-avgs <- c()
+# ----------------------------
+# Post Exporting Graph to File
+# ----------------------------
-print( "Sorting data." )
-avgs <- c( fileData[ 'deltoconfrm' ], fileData[ 'elapsedel' ] )
+print( paste( "Saving Post bar chart to", postOutputFile ) )
-dataFrame <- melt( avgs )
-dataFrame$scale <- fileData$scale
-dataFrame$date <- fileData$date
-dataFrame$iterative <- dataFrame$iterative <- rev( seq( 1, nrow( fileData ), by = 1 ) )
+tryCatch( ggsave( postOutputFile,
+ width = imageWidth,
+ height = imageHeight,
+ dpi = imageDPI ),
+ error = function( e ){
+ print( "[ERROR] There was a problem saving the graph due to a graph formatting exception. Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
-colnames( dataFrame ) <- c( "ms", "type", "scale", "date", "iterative" )
+print( paste( "[SUCCESS] Successfully wrote stacked bar chart out to", postOutputFile ) )
-# Format data frame so that the data is in the same order as it appeared in the file.
-dataFrame$type <- as.character( dataFrame$type )
-dataFrame$type <- factor( dataFrame$type, levels=unique( dataFrame$type ) )
+# ----------------------
+# Del Generate Main Plot
+# ----------------------
-dataFrame <- na.omit( dataFrame ) # Omit any data that doesn't exist
+print( "Creating main plot for Del graph." )
-print( "Data Frame Results:" )
-print( dataFrame )
+mainPlot <- ggplot( data = delDataFrame, aes( x = iterative,
+ y = ms,
+ fill = type ) )
-# **********************************************************
-# STEP 3: Generate graphs.
-# **********************************************************
+# ----------------------------------
+# Del Fundamental Variables Assigned
+# ----------------------------------
-print( "Generating fundamental graph data." )
+print( "Generating fundamental graph data for Del graph." )
-theme_set( theme_grey( base_size = 20 ) ) # set the default text size of the graph.
+xScaleConfig <- scale_x_continuous( breaks = delDataFrame$iterative,
+ label = delDataFrame$date )
+title <- ggtitle( delChartTitle )
-mainPlot <- ggplot( data = dataFrame, aes( x = iterative, y = ms, fill = type ) )
-xScaleConfig <- scale_x_continuous( breaks = dataFrame$iterative, label = dataFrame$date )
-xLabel <- xlab( "Build Date" )
-yLabel <- ylab( "Latency (ms)" )
-fillLabel <- labs( fill="Type" )
-theme <- theme( plot.title=element_text( hjust = 0.5, size = 28, face='bold' ) )
+fundamentalGraphData <- mainPlot +
+ xScaleConfig +
+ xLabel +
+ yLabel +
+ fillLabel +
+ theme +
+ wrapLegend +
+ colors +
+ title
-fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
+# -------------------------------
+# Del Generating Bar Graph Format
+# -------------------------------
+print( "Generating bar graph for Del graph." )
-print( "Generating bar graph." )
-width <- 0.3
-barGraphFormat <- geom_bar( stat="identity", width = width )
-sum <- fileData[ 'deltoconfrm' ] + fileData[ 'elapsedel' ]
-values <- geom_text( aes( x=dataFrame$iterative, y=sum + 0.04 * max( sum ), label = format( sum, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold" )
-chartTitle <- paste( "Single Bench Flow Latency - Del", "Last 3 Builds", sep = "\n" )
-title <- ggtitle( chartTitle )
-result <- fundamentalGraphData + barGraphFormat + title + values
+sum <- fileData[ 'deltoconfrm' ] +
+ fileData[ 'elapsedel' ]
-errBarOutputFile <- paste( args[ 7 ], args[ 5 ], sep="" )
-errBarOutputFile <- paste( errBarOutputFile, args[ 6 ], sep="_" )
-errBarOutputFile <- paste( errBarOutputFile, "_DelGraph.jpg", sep="" )
+values <- geom_text( aes( x = delDataFrame$iterative,
+ y = sum + 0.03 * max( sum ),
+ label = format( sum,
+ digits = 3,
+ big.mark = ",",
+ scientific = FALSE ) ),
+ size = 7.0,
+ fontface = "bold" )
-print( paste( "Saving bar chart to", errBarOutputFile ) )
-ggsave( errBarOutputFile, width = 10, height = 6, dpi = 200 )
+result <- fundamentalGraphData +
+ barGraphFormat +
+ title +
+ values
-print( paste( "Successfully wrote stacked bar chart out to", errBarOutputFile ) )
\ No newline at end of file
+# ---------------------------
+# Del Exporting Graph to File
+# ---------------------------
+
+print( paste( "Saving Del bar chart to", delOutputFile ) )
+
+tryCatch( ggsave( delOutputFile,
+ width = imageWidth,
+ height = imageHeight,
+ dpi = imageDPI ),
+ error = function( e ){
+ print( "[ERROR] There was a problem saving the graph due to a graph formatting exception. Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+print( paste( "[SUCCESS] Successfully wrote stacked bar chart out to", delOutputFile ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/SCPFcbench.R b/TestON/JenkinsFile/scripts/SCPFcbench.R
index c9a6c37..0a28024 100644
--- a/TestON/JenkinsFile/scripts/SCPFcbench.R
+++ b/TestON/JenkinsFile/scripts/SCPFcbench.R
@@ -21,66 +21,143 @@
# please contact Jeremy Ronquillo: j_ronquillo@u.pacific.edu
# **********************************************************
-# STEP 1: File management.
+# STEP 1: Data management.
# **********************************************************
-print( "STEP 1: File management." )
-
+print( "**********************************************************" )
+print( "STEP 1: Data management." )
+print( "**********************************************************" )
+database_host = 1
+database_port = 2
+database_u_id = 3
+database_pw = 4
+test_name = 5
+branch_name = 6
+save_directory = 7
# Command line arguments are read.
print( "Reading commmand-line args." )
args <- commandArgs( trailingOnly=TRUE )
-# Import libraries to be used for graphing and organizing data, respectively.
-# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
-# reshape2: https://github.com/hadley/reshape
+# ----------------
+# Import Libraries
+# ----------------
+
print( "Importing libraries." )
library( ggplot2 )
library( reshape2 )
library( RPostgreSQL ) # For databases
-# Normal usage
-# Check if sufficient args are provided.
-if ( is.na( args[ 7 ] ) ){
- print( "Usage: Rscript SCPFcbench <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <directory-to-save-graphs>" )
- q() # basically exit(), but in R
+# -------------------
+# Check CLI Arguments
+# -------------------
+
+print( "Verifying CLI args." )
+
+if ( is.na( args[ save_directory ] ) ){
+
+ print( paste( "Usage: Rscript SCPFcbench",
+ "<database-host>",
+ "<database-port>",
+ "<database-user-id>",
+ "<database-password>",
+ "<test-name>",
+ "<branch-name>",
+ "<directory-to-save-graphs>",
+ sep=" " ) )
+
+ quit( status = 1 ) # basically exit(), but in R
}
-# paste() is used to concatenate strings.
-errBarOutputFile <- paste( args[ 7 ], args[ 5 ], sep="" )
-errBarOutputFile <- paste( errBarOutputFile, args[ 6 ], sep="_" )
-errBarOutputFile <- paste( errBarOutputFile, "_errGraph.jpg", sep="" )
+# -----------------
+# Create File Names
+# -----------------
-print( "Reading from databases." )
+print( "Creating filenames and title of graph." )
-con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 1 ], port=strtoi( args[ 2 ] ), user=args[ 3 ],password=args[ 4 ] )
-
-command <- paste( "SELECT * FROM cbench_bm_tests WHERE branch='", args[ 6 ], sep="" )
-command <- paste( command, "' ORDER BY date DESC LIMIT 3", sep="" )
-
-print( paste( "Sending SQL command:", command ) )
-
-fileData <- dbGetQuery( con, command )
+errBarOutputFile <- paste( args[ save_directory ],
+ args[ test_name ],
+ "_",
+ args[ branch_name ],
+ "_errGraph.jpg",
+ sep="" )
chartTitle <- paste( "Single-Node CBench Throughput", "Last 3 Builds", sep = "\n" )
+# ------------------
+# SQL Initialization
+# ------------------
+
+print( "Initializing SQL" )
+
+con <- dbConnect( dbDriver( "PostgreSQL" ),
+ dbname = "onostest",
+ host = args[ database_host ],
+ port = strtoi( args[ database_port ] ),
+ user = args[ database_u_id ],
+ password = args[ database_pw ] )
+
+# ------------------
+# Cbench SQL Command
+# ------------------
+
+print( "Generating Scale Topology SQL Command" )
+
+command <- paste( "SELECT * FROM cbench_bm_tests WHERE branch='",
+ args[ branch_name ],
+ "' ORDER BY date DESC LIMIT 3",
+ sep="" )
+
+print( "Sending SQL command:" )
+print( command )
+
+fileData <- dbGetQuery( con, command )
+
# **********************************************************
# STEP 2: Organize data.
# **********************************************************
-fileDataNames <- names( fileData )
+print( "**********************************************************" )
+print( "STEP 2: Organize Data." )
+print( "**********************************************************" )
-avgs <- c()
-stds <- c()
+# ------------
+# Data Sorting
+# ------------
print( "Sorting data." )
-avgs <- c( fileData[ 'avg' ] )
+
+requiredColumns <- c( "avg" )
+
+tryCatch( avgs <- c( fileData[ requiredColumns] ),
+ error = function( e ) {
+ print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+ print( "Required columns: " )
+ print( requiredColumns )
+ print( "Actual columns: " )
+ print( names( fileData ) )
+ print( "Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+
+# --------------------
+# Construct Data Frame
+# --------------------
+
+print( "Constructing Data Frame" )
dataFrame <- melt( avgs )
dataFrame$std <- c( fileData$std )
dataFrame$date <- c( fileData$date )
dataFrame$iterative <- rev( seq( 1, nrow( fileData ), by = 1 ) )
-colnames( dataFrame ) <- c( "ms", "type", "std", "date", "iterative" )
+colnames( dataFrame ) <- c( "ms",
+ "type",
+ "std",
+ "date",
+ "iterative" )
dataFrame <- na.omit( dataFrame ) # Omit any data that doesn't exist
@@ -91,29 +168,97 @@
# STEP 3: Generate graphs.
# **********************************************************
+print( "**********************************************************" )
+print( "STEP 3: Generate Graph." )
+print( "**********************************************************" )
+
+# ------------------
+# Generate Main Plot
+# ------------------
+
+print( "Creating main plot." )
+
+mainPlot <- ggplot( data = dataFrame, aes( x = iterative,
+ y = ms,
+ ymin = ms,
+ ymax = ms + std ) )
+
+# ------------------------------
+# Fundamental Variables Assigned
+# ------------------------------
+
print( "Generating fundamental graph data." )
-theme_set( theme_grey( base_size = 20 ) ) # set the default text size of the graph.
-
-mainPlot <- ggplot( data = dataFrame, aes( x = iterative, y = ms, ymin = ms - std, ymax = ms + std ) )
-xScaleConfig <- scale_x_continuous( breaks = dataFrame$iterative, label = dataFrame$date )
+theme_set( theme_grey( base_size = 22 ) ) # set the default text size of the graph.
+barWidth <- 0.3
+xScaleConfig <- scale_x_continuous( breaks = dataFrame$iterative,
+ label = dataFrame$date )
xLabel <- xlab( "Build Date" )
yLabel <- ylab( "Responses / sec" )
-fillLabel <- labs( fill="Type" )
-theme <- theme( plot.title=element_text( hjust = 0.5, size = 28, face='bold' ) )
+fillLabel <- labs( fill = "Type" )
+imageWidth <- 15
+imageHeight <- 10
+imageDPI <- 200
+errorBarColor <- rgb( 140,140,140, maxColorValue=255 )
-fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
+theme <- theme( plot.title = element_text( hjust = 0.5, size = 32, face = 'bold' ),
+ legend.position = "bottom",
+ legend.text = element_text( size = 18, face = "bold" ),
+ legend.title = element_blank() )
+title <- ggtitle( chartTitle )
+
+fundamentalGraphData <- mainPlot +
+ xScaleConfig +
+ xLabel +
+ yLabel +
+ fillLabel +
+ theme +
+ title
+
+# ---------------------------
+# Generating Bar Graph Format
+# ---------------------------
print( "Generating bar graph with error bars." )
-width <- 0.3
-barGraphFormat <- geom_bar( stat="identity", position = position_dodge(), width = width, fill="#00AA13" )
-errorBarFormat <- geom_errorbar( width = width, color=rgb( 140, 140,140, maxColorValue=255 ) )
-values <- geom_text( aes( x=dataFrame$iterative, y=fileData[ 'avg' ] + 0.025 * max( fileData[ 'avg' ] ), label = format( fileData[ 'avg' ], digits=3, big.mark = ",", scientific = FALSE ) ), size = 4.5, fontface = "bold" )
-title <- ggtitle( chartTitle )
-result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title + values
+barGraphFormat <- geom_bar( stat = "identity",
+ position = position_dodge(),
+ width = barWidth,
+ fill = "#00AA13" )
+
+errorBarFormat <- geom_errorbar( width = barWidth,
+ color = errorBarColor )
+
+values <- geom_text( aes( x=dataFrame$iterative,
+ y=fileData[ 'avg' ] + 0.025 * max( fileData[ 'avg' ] ),
+ label = format( fileData[ 'avg' ],
+ digits=3,
+ big.mark = ",",
+ scientific = FALSE ) ),
+ size = 7.0,
+ fontface = "bold" )
+
+result <- fundamentalGraphData +
+ barGraphFormat +
+ errorBarFormat +
+ values
+
+# -----------------------
+# Exporting Graph to File
+# -----------------------
print( paste( "Saving bar chart with error bars to", errBarOutputFile ) )
-ggsave( errBarOutputFile, width = 10, height = 6, dpi = 200 )
-print( paste( "Successfully wrote bar chart with error bars out to", errBarOutputFile ) )
+
+tryCatch( ggsave( errBarOutputFile,
+ width = imageWidth,
+ height = imageHeight,
+ dpi = imageDPI ),
+ error = function( e ){
+ print( "[ERROR] There was a problem saving the graph due to a graph formatting exception. Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+print( paste( "[SUCCESS] Successfully wrote bar chart with error bars out to", errBarOutputFile ) )
diff --git a/TestON/JenkinsFile/scripts/SCPFflowTp1g.R b/TestON/JenkinsFile/scripts/SCPFflowTp1g.R
index 8ec053e..3d3a95e 100644
--- a/TestON/JenkinsFile/scripts/SCPFflowTp1g.R
+++ b/TestON/JenkinsFile/scripts/SCPFflowTp1g.R
@@ -21,89 +21,190 @@
# please contact Jeremy Ronquillo: j_ronquillo@u.pacific.edu
# **********************************************************
-# STEP 1: File management.
+# STEP 1: Data management.
# **********************************************************
+has_flow_obj = 1
+database_host = 2
+database_port = 3
+database_u_id = 4
+database_pw = 5
+test_name = 6
+branch_name = 7
+has_neighbors = 8
+old_flow = 9
+save_directory = 10
-print( "STEP 1: File management." )
+print( "**********************************************************" )
+print( "STEP 1: Data management." )
+print( "**********************************************************" )
# Command line arguments are read.
print( "Reading commmand-line args." )
args <- commandArgs( trailingOnly=TRUE )
-# Import libraries to be used for graphing and organizing data, respectively.
-# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
-# reshape2: https://github.com/hadley/reshape
+# ----------------
+# Import Libraries
+# ----------------
+
print( "Importing libraries." )
library( ggplot2 )
library( reshape2 )
library( RPostgreSQL ) # For databases
-# Normal usage
-# Check if sufficient args are provided.
-if ( is.na( args[ 9 ] ) ){
- print( "Usage: Rscript SCPFflowTp1g.R <has-flow-obj> <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <has-neighbors> <directory-to-save-graphs>" )
- q() # basically exit(), but in R
+# -------------------
+# Check CLI Arguments
+# -------------------
+
+print( "Verifying CLI args." )
+
+if ( is.na( args[ save_directory ] ) ){
+
+ print( paste( "Usage: Rscript SCPFflowTp1g.R",
+ "<has-flow-obj>",
+ "<database-host>",
+ "<database-port>",
+ "<database-user-id>",
+ "<database-password>",
+ "<test-name>",
+ "<branch-name>",
+ "<has-neighbors>",
+ "<using-old-flow>",
+ "<directory-to-save-graphs>",
+ sep=" " ) )
+
+ quit( status = 1 ) # basically exit(), but in R
}
-# paste() is used to concatenate strings.
-errBarOutputFile <- paste( args[ 9 ], args[ 6 ], sep="" )
-errBarOutputFile <- paste( errBarOutputFile, args[ 7 ], sep="_" )
-if ( args[ 8 ] == 'y' ){
- errBarOutputFile <- paste( errBarOutputFile, "all-neighbors", sep="_" )
-} else {
- errBarOutputFile <- paste( errBarOutputFile, "no-neighbors", sep="_" )
-}
-if ( args[ 1 ] == 'y' ){
- errBarOutputFile <- paste( errBarOutputFile, "flowObj", sep="_")
-}
-errBarOutputFile <- paste( errBarOutputFile, "_graph.jpg", sep="" )
+# -----------------
+# Create File Names
+# -----------------
-print( "Reading from databases." )
-con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 2 ], port=strtoi( args[ 3 ] ), user=args[ 4 ],password=args[ 5 ] )
+print( "Creating filenames and title of graph." )
+chartTitle <- "Flow Throughput Test"
+fileNeighborsModifier <- "no"
commandNeighborModifier <- ""
-flowObjModifier <- ""
-if ( args[ 1 ] == 'y' ){
- flowObjModifier <- "_fobj"
+fileFlowObjModifier <- ""
+sqlFlowObjModifier <- ""
+if ( args[ has_flow_obj ] == 'y' ){
+ fileFlowObjModifier <- "_flowObj"
+ sqlFlowObjModifier <- "_fobj"
+ chartTitle <- paste( chartTitle, " with Flow Objectives", sep="" )
}
-if ( args[ 8 ] == 'y' ){
+
+chartTitle <- paste( chartTitle, "\nNeighbors =", sep="" )
+
+fileOldFlowModifier <- ""
+if ( args[ has_neighbors ] == 'y' ){
+ fileNeighborsModifier <- "all"
commandNeighborModifier <- "scale=1 OR NOT "
+ chartTitle <- paste( chartTitle, "Cluster Size - 1" )
+} else {
+ chartTitle <- paste( chartTitle, "0" )
}
+if ( args[ old_flow ] == 'y' ){
+ fileOldFlowModifier <- "_OldFlow"
+ chartTitle <- paste( chartTitle, "With Eventually Consistent Flow Rule Store", sep="\n" )
+}
+errBarOutputFile <- paste( args[ save_directory ],
+ args[ test_name ],
+ "_",
+ args[ branch_name ],
+ "_",
+ fileNeighborsModifier,
+ "-neighbors",
+ fileFlowObjModifier,
+ fileOldFlowModifier,
+ "_graph.jpg",
+ sep="" )
+# ------------------
+# SQL Initialization
+# ------------------
-command <- paste( "SELECT scale, avg( avg ), avg( std ) FROM flow_tp", flowObjModifier, sep="" )
-command <- paste( command, "_tests WHERE (", sep="" )
-command <- paste( command, commandNeighborModifier, sep="" )
-command <- paste( command, "neighbors = 0 ) AND branch = '", sep="" )
-command <- paste( command, args[ 7 ], sep="" )
-command <- paste( command, "' AND date IN ( SELECT max( date ) FROM flow_tp", sep="" )
-command <- paste( command, flowObjModifier, sep="" )
-command <- paste( command, "_tests WHERE branch='", sep="" )
-command <- paste( command, args[ 7 ], sep="" )
-command <- paste( command, "' ) GROUP BY scale ORDER BY scale", sep="" )
+print( "Initializing SQL" )
-print( paste( "Sending SQL command:", command ) )
+con <- dbConnect( dbDriver( "PostgreSQL" ),
+ dbname = "onostest",
+ host = args[ database_host ],
+ port = strtoi( args[ database_port ] ),
+ user = args[ database_u_id ],
+ password = args[ database_pw ] )
+
+# ---------------------------
+# Flow Throughput SQL Command
+# ---------------------------
+
+print( "Generating Flow Throughput SQL command." )
+
+command <- paste( "SELECT scale, avg( avg ), avg( std ) FROM flow_tp",
+ sqlFlowObjModifier,
+ "_tests WHERE (",
+ commandNeighborModifier,
+ "neighbors = 0 ) AND branch = '",
+ args[ branch_name ],
+ "' AND date IN ( SELECT max( date ) FROM flow_tp",
+ sqlFlowObjModifier,
+ "_tests WHERE branch='",
+ args[ branch_name ],
+ "' AND ",
+ ( if( args[ old_flow ] == 'y' ) "" else "NOT " ),
+ "is_old_flow",
+ " ) GROUP BY scale ORDER BY scale",
+ sep="" )
+
+print( "Sending SQL command:" )
+print( command )
fileData <- dbGetQuery( con, command )
-title <- paste( args[ 6 ], args[ 7 ], sep="_" )
-
# **********************************************************
# STEP 2: Organize data.
# **********************************************************
-print( "STEP 2: Organize data." )
+print( "**********************************************************" )
+print( "STEP 2: Organize Data." )
+print( "**********************************************************" )
-# Create lists c() and organize data into their corresponding list.
-print( "Sorting data." )
-colnames( fileData ) <- c( "scale", "avg", "std" )
-avgs <- c( fileData[ 'avg' ] )
+# ------------
+# Data Sorting
+# ------------
-# Parse lists into data frames.
+print( "Sorting data for Flow Throughput." )
+
+colnames( fileData ) <- c( "scale",
+ "avg",
+ "std" )
+
+requiredColumns <- c( "avg" )
+
+tryCatch( avgs <- c( fileData[ requiredColumns] ),
+ error = function( e ) {
+ print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+ print( "Required columns: " )
+ print( requiredColumns )
+ print( "Actual columns: " )
+ print( names( fileData ) )
+ print( "Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+
+# ----------------------------
+# Flow TP Construct Data Frame
+# ----------------------------
+
+print( "Constructing Flow TP data frame." )
+
dataFrame <- melt( avgs ) # This is where reshape2 comes in. Avgs list is converted to data frame
-dataFrame$scale <- fileData$scale # Add node scaling to the data frame.
+dataFrame$scale <- fileData$scale # Add node scaling to the data frame.
dataFrame$std <- fileData$std
-colnames( dataFrame ) <- c( "throughput", "type", "scale", "std" )
+colnames( dataFrame ) <- c( "throughput",
+ "type",
+ "scale",
+ "std" )
dataFrame <- na.omit( dataFrame ) # Omit any data that doesn't exist
@@ -114,19 +215,15 @@
# STEP 3: Generate graphs.
# **********************************************************
-print( "STEP 3: Generate graphs." )
+print( "**********************************************************" )
+print( "STEP 3: Generate Graph." )
+print( "**********************************************************" )
-# 1. Graph fundamental data is generated first.
-# These are variables that apply to all of the graphs being generated, regardless of type.
-#
-# 2. Type specific graph data is generated.
-# Data specific for the error bar and stacked bar graphs are generated.
-#
-# 3. Generate and save the graphs.
-# Graphs are saved to the filename above, in the directory provided in command line args
+# ------------------
+# Generate Main Plot
+# ------------------
-print( "Generating fundamental graph data." )
-
+print( "Generating main plot." )
# Create the primary plot here.
# ggplot contains the following arguments:
# - data: the data frame that the graph will be based off of
@@ -135,33 +232,47 @@
# - y: y-axis values (usually time in milliseconds)
# - fill: the category of the colored side-by-side bars (usually type)
-theme_set( theme_grey( base_size = 20 ) ) # set the default text size of the graph.
+mainPlot <- ggplot( data = dataFrame, aes( x = scale,
+ y = throughput,
+ ymin = throughput,
+ ymax = throughput + std,
+ fill = type ) )
+# ------------------------------
+# Fundamental Variables Assigned
+# ------------------------------
-mainPlot <- ggplot( data = dataFrame, aes( x = scale, y = throughput, ymin = throughput - std, ymax = throughput + std, fill = type ) )
+print( "Generating fundamental graph data." )
# Formatting the plot
+theme_set( theme_grey( base_size = 22 ) ) # set the default text size of the graph.
width <- 0.7 # Width of the bars.
-xScaleConfig <- scale_x_continuous( breaks = dataFrame$scale, label = dataFrame$scale )
+xScaleConfig <- scale_x_continuous( breaks = dataFrame$scale,
+ label = dataFrame$scale )
xLabel <- xlab( "Scale" )
yLabel <- ylab( "Throughput (,000 Flows/sec)" )
fillLabel <- labs( fill="Type" )
-chartTitle <- "Flow Throughput Test"
-if ( args[ 1 ] == 'y' ){
- chartTitle <- paste( chartTitle, " with Flow Objectives", sep="" )
-}
-chartTitle <- paste( chartTitle, "\nNeighbors =", sep="" )
-if ( args[ 8 ] == 'y' ){
- chartTitle <- paste( chartTitle, "Cluster Size - 1" )
-} else {
- chartTitle <- paste( chartTitle, "0" )
-}
+imageWidth <- 15
+imageHeight <- 10
+imageDPI <- 200
+errorBarColor <- rgb( 140, 140, 140, maxColorValue=255 )
-theme <- theme( plot.title=element_text( hjust = 0.5, size = 28, face='bold' ) )
-
+theme <- theme( plot.title = element_text( hjust = 0.5,
+ size = 32,
+ face = 'bold' ) )
+title <- ggtitle( chartTitle )
# Store plot configurations as 1 variable
-fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
+fundamentalGraphData <- mainPlot +
+ xScaleConfig +
+ xLabel +
+ yLabel +
+ fillLabel +
+ theme +
+ title
+# ---------------------------
+# Generating Bar Graph Format
+# ---------------------------
# Create the stacked bar graph with error bars.
# geom_bar contains:
@@ -169,13 +280,44 @@
# - width: the width of the bar types (declared above)
# geom_errorbar contains similar arguments as geom_bar.
print( "Generating bar graph with error bars." )
-barGraphFormat <- geom_bar( stat = "identity", width = width, fill="#FFA94F" )
-errorBarFormat <- geom_errorbar( width = width, position=position_dodge(), color=rgb( 140,140,140, maxColorValue=255 ) )
-values <- geom_text( aes( x=dataFrame$scale, y=dataFrame$throughput + 0.04 * max( dataFrame$throughput ), label = format( dataFrame$throughput, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold" )
-title <- ggtitle( paste( chartTitle, "" ) )
-result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title + values
+barGraphFormat <- geom_bar( stat = "identity",
+ width = width,
+ fill = "#FFAA3C" )
-# Save graph to file
+errorBarFormat <- geom_errorbar( width = width,
+ position = position_dodge(),
+ color = errorBarColor )
+
+values <- geom_text( aes( x = dataFrame$scale,
+ y = dataFrame$throughput + 0.03 * max( dataFrame$throughput ),
+ label = format( dataFrame$throughput,
+ digits=3,
+ big.mark = ",",
+ scientific = FALSE ) ),
+ size = 7.0,
+ fontface = "bold" )
+
+result <- fundamentalGraphData +
+ barGraphFormat +
+ errorBarFormat +
+ values
+
+# -----------------------
+# Exporting Graph to File
+# -----------------------
+
print( paste( "Saving bar chart with error bars to", errBarOutputFile ) )
-ggsave( errBarOutputFile, width = 10, height = 6, dpi = 200 )
-print( paste( "Successfully wrote bar chart with error bars out to", errBarOutputFile ) )
+
+tryCatch( ggsave( errBarOutputFile,
+ width = imageWidth,
+ height = imageHeight,
+ dpi = imageDPI ),
+ error = function( e ){
+ print( "[ERROR] There was a problem saving the graph due to a graph formatting exception. Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+print( paste( "[SUCCESS] Successfully wrote bar chart with error bars out to", errBarOutputFile ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/SCPFhostLat.R b/TestON/JenkinsFile/scripts/SCPFhostLat.R
index 738765a..90781a3 100644
--- a/TestON/JenkinsFile/scripts/SCPFhostLat.R
+++ b/TestON/JenkinsFile/scripts/SCPFhostLat.R
@@ -21,67 +21,142 @@
# please contact Jeremy Ronquillo: j_ronquillo@u.pacific.edu
# **********************************************************
-# STEP 1: File management.
-# **********************************************************
-
-print( "STEP 1: File management." )
+# STEP 1: Data management.
+# **********************************************************\
+print( "**********************************************************" )
+print( "STEP 1: Data management." )
+print( "**********************************************************" )
+database_host = 1
+database_port = 2
+database_u_id = 3
+database_pw = 4
+test_name = 5
+branch_name = 6
+save_directory = 7
# Command line arguments are read.
print( "Reading commmand-line args." )
args <- commandArgs( trailingOnly=TRUE )
-# Import libraries to be used for graphing and organizing data, respectively.
-# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
-# reshape2: https://github.com/hadley/reshape
+# ----------------
+# Import Libraries
+# ----------------
+
print( "Importing libraries." )
library( ggplot2 )
library( reshape2 )
library( RPostgreSQL ) # For databases
+# -------------------
+# Check CLI Arguments
+# -------------------
-# Check if sufficient args are provided.
-if ( is.na( args[ 7 ] ) ){
- print( "Usage: Rscript SCPFhostLat <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <directory-to-save-graphs>" )
- q() # basically exit(), but in R
+print( "Verifying CLI args." )
+
+if ( is.na( args[ save_directory ] ) ){
+
+ print( paste( "Usage: Rscript SCPFhostLat",
+ "<database-host>",
+ "<database-port>",
+ "<database-user-id>",
+ "<database-password>",
+ "<test-name>",
+ "<branch-name>",
+ "<directory-to-save-graphs>",
+ sep=" " ) )
+
+ quit( status = 1 ) # basically exit(), but in R
}
-# paste() is used to concatenate strings
-errBarOutputFile <- paste( args[ 7 ], args[ 5 ], sep="" )
-errBarOutputFile <- paste( errBarOutputFile, args[ 6 ], sep="_" )
-errBarOutputFile <- paste( errBarOutputFile, "_errGraph.jpg", sep="" )
+# -----------------
+# Create File Names
+# -----------------
-print( "Reading from databases." )
+print( "Creating filenames and title of graph." )
-con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 1 ], port=strtoi( args[ 2 ] ), user=args[ 3 ],password=args[ 4 ] )
-
-command <- paste( "SELECT * FROM host_latency_tests WHERE branch = '", args[ 6 ], sep = "" )
-command <- paste( command, "' AND date IN ( SELECT MAX( date ) FROM host_latency_tests WHERE branch = '", sep = "" )
-command <- paste( command, args[ 6 ], sep = "" )
-command <- paste( command, "' ) ", sep="" )
-
-print( paste( "Sending SQL command:", command ) )
-
-fileData <- dbGetQuery( con, command )
+errBarOutputFile <- paste( args[ save_directory ],
+ args[ test_name ],
+ "_",
+ args[ branch_name ],
+ "_errGraph.jpg",
+ sep="" )
chartTitle <- "Host Latency"
+# ------------------
+# SQL Initialization
+# ------------------
+
+print( "Initializing SQL" )
+
+con <- dbConnect( dbDriver( "PostgreSQL" ),
+ dbname = "onostest",
+ host = args[ database_host ],
+ port = strtoi( args[ database_port ] ),
+ user = args[ database_u_id ],
+ password = args[ database_pw ] )
+
+# ------------------------
+# Host Latency SQL Command
+# ------------------------
+
+print( "Generating Host Latency SQL Command" )
+
+command <- paste( "SELECT * FROM host_latency_tests WHERE branch = '",
+ args[ branch_name ],
+ "' AND date IN ( SELECT MAX( date ) FROM host_latency_tests WHERE branch = '",
+ args[ branch_name ],
+ "' ) ",
+ sep = "" )
+
+print( "Sending SQL command:" )
+print( command )
+
+fileData <- dbGetQuery( con, command )
# **********************************************************
# STEP 2: Organize data.
# **********************************************************
-print( "STEP 2: Organize data." )
+print( "**********************************************************" )
+print( "STEP 2: Organize Data." )
+print( "**********************************************************" )
-avgs <- c()
+# ------------
+# Data Sorting
+# ------------
print( "Sorting data." )
-avgs <- c( fileData[ 'avg' ] )
+
+requiredColumns <- c( "avg" )
+
+tryCatch( avgs <- c( fileData[ requiredColumns] ),
+ error = function( e ) {
+ print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+ print( "Required columns: " )
+ print( requiredColumns )
+ print( "Actual columns: " )
+ print( names( fileData ) )
+ print( "Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+# --------------------
+# Construct Data Frame
+# --------------------
+
+print( "Constructing Data Frame" )
dataFrame <- melt( avgs )
dataFrame$scale <- fileData$scale
dataFrame$std <- fileData$std
-colnames( dataFrame ) <- c( "ms", "type", "scale", "std" )
+colnames( dataFrame ) <- c( "ms",
+ "type",
+ "scale",
+ "std" )
dataFrame <- na.omit( dataFrame ) # Omit any data that doesn't exist
@@ -92,29 +167,93 @@
# STEP 3: Generate graphs.
# **********************************************************
+print( "**********************************************************" )
+print( "STEP 3: Generate Graph." )
+print( "**********************************************************" )
+
+# ------------------
+# Generate Main Plot
+# ------------------
+
+print( "Creating main plot." )
+
+mainPlot <- ggplot( data = dataFrame, aes( x = scale,
+ y = ms,
+ ymin = ms,
+ ymax = ms + std ) )
+
+# ------------------------------
+# Fundamental Variables Assigned
+# ------------------------------
+
print( "Generating fundamental graph data." )
-theme_set( theme_grey( base_size = 20 ) ) # set the default text size of the graph.
-mainPlot <- ggplot( data = dataFrame, aes( x = scale, y = ms, ymin = ms - std, ymax = ms + std ) )
-xScaleConfig <- scale_x_continuous( breaks=c( 1, 3, 5, 7, 9) )
+theme_set( theme_grey( base_size = 22 ) ) # set the default text size of the graph.
+xScaleConfig <- scale_x_continuous( breaks=c( 1, 3, 5, 7, 9 ) )
xLabel <- xlab( "Scale" )
yLabel <- ylab( "Latency (ms)" )
fillLabel <- labs( fill="Type" )
-theme <- theme( plot.title=element_text( hjust = 0.5, size = 28, face='bold' ) )
+theme <- theme( plot.title = element_text( hjust = 0.5, size = 32, face ='bold' ) )
+title <- ggtitle( chartTitle )
+errorBarColor <- rgb( 140, 140, 140, maxColorValue = 255 )
+imageWidth <- 15
+imageHeight <- 10
+imageDPI <- 200
-fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
+fundamentalGraphData <- mainPlot +
+ xScaleConfig +
+ xLabel +
+ yLabel +
+ fillLabel +
+ theme +
+ title
+# ---------------------------
+# Generating Bar Graph Format
+# ---------------------------
print( "Generating bar graph with error bars." )
-width <- 0.9
-barGraphFormat <- geom_bar( stat="identity", position=position_dodge( ), width = width, fill="#E8BD00" )
-errorBarFormat <- geom_errorbar( position=position_dodge(), width = width, color=rgb( 140, 140, 140, maxColorValue=255 ) )
-values <- geom_text( aes( x=dataFrame$scale, y=dataFrame$ms + 0.08 * max( dataFrame$ms ), label = format( dataFrame$ms, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold" )
-title <- ggtitle( paste( chartTitle, "with Standard Error Bars" ) )
-result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title + values
+barWidth <- 0.9
+barGraphFormat <- geom_bar( stat = "identity",
+ position = position_dodge(),
+ width = barWidth,
+ fill = "#A700EF" )
+
+errorBarFormat <- geom_errorbar( position = position_dodge(),
+ width = barWidth,
+ color = errorBarColor )
+
+values <- geom_text( aes( x=dataFrame$scale,
+ y=dataFrame$ms + 0.06 * max( dataFrame$ms ),
+ label = format( dataFrame$ms,
+ digits=3,
+ big.mark = ",",
+ scientific = FALSE ) ),
+ size = 7.0,
+ fontface = "bold" )
+
+result <- fundamentalGraphData +
+ barGraphFormat +
+ errorBarFormat +
+ values
+
+# -----------------------
+# Exporting Graph to File
+# -----------------------
print( paste( "Saving bar chart with error bars to", errBarOutputFile ) )
-ggsave( errBarOutputFile, width = 10, height = 6, dpi = 200 )
-print( paste( "Successfully wrote bar chart out to", errBarOutputFile ) )
\ No newline at end of file
+tryCatch( ggsave( errBarOutputFile,
+ width = imageWidth,
+ height = imageHeight,
+ dpi = imageDPI ),
+ error = function( e ){
+ print( "[ERROR] There was a problem saving the graph due to a graph formatting exception. Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+print( paste( "[SUCCESS] Successfully wrote bar chart out to", errBarOutputFile ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/SCPFintentEventTp.R b/TestON/JenkinsFile/scripts/SCPFintentEventTp.R
index 420b444..0b168ba 100644
--- a/TestON/JenkinsFile/scripts/SCPFintentEventTp.R
+++ b/TestON/JenkinsFile/scripts/SCPFintentEventTp.R
@@ -21,88 +21,185 @@
# please contact Jeremy Ronquillo: j_ronquillo@u.pacific.edu
# **********************************************************
-# STEP 1: File management.
+# STEP 1: Data management.
# **********************************************************
-print( "STEP 1: File management." )
+print( "**********************************************************" )
+print( "STEP 1: Data management." )
+print( "**********************************************************" )
+has_flow_obj = 1
+database_host = 2
+database_port = 3
+database_u_id = 4
+database_pw = 5
+test_name = 6
+branch_name = 7
+has_neighbors = 8
+old_flow = 9
+save_directory = 10
-# Command line arguments are read. Args usually include the database filename and the output
-# directory for the graphs to save to.
+# Command line arguments are read.
print( "Reading commmand-line args." )
args <- commandArgs( trailingOnly=TRUE )
-# Import libraries to be used for graphing and organizing data, respectively.
-# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
-# reshape2: https://github.com/hadley/reshape
+# ----------------
+# Import Libraries
+# ----------------
+
print( "Importing libraries." )
library( ggplot2 )
library( reshape2 )
library( RPostgreSQL ) # For databases
-# Normal usage
-# Check if sufficient args are provided.
-if ( is.na( args[ 9 ] ) ){
- print( "Usage: Rscript SCPFIntentEventTp.R <has-flow-obj> <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <has-neighbors> <directory-to-save-graphs>" )
- q() # basically exit(), but in R
+# -------------------
+# Check CLI Arguments
+# -------------------
+
+print( "Verifying CLI args." )
+
+if ( is.na( args[ save_directory ] ) ){
+
+ print( paste( "Usage: Rscript SCPFIntentEventTp.R",
+ "<has-flow-obj>",
+ "<database-host>",
+ "<database-port>",
+ "<database-user-id>",
+ "<database-password>",
+ "<test-name>",
+ "<branch-name>",
+ "<has-neighbors>",
+ "<using-old-flow>",
+ "<directory-to-save-graphs>",
+ sep=" " ) )
+
+ quit( status = 1 ) # basically exit(), but in R
}
-# paste() is used to concatenate strings.
-errBarOutputFile <- paste( args[ 9 ], args[ 6 ], sep="" )
-errBarOutputFile <- paste( errBarOutputFile, args[ 7 ], sep="_" )
-if ( args[ 8 ] == 'y' ){
- errBarOutputFile <- paste( errBarOutputFile, "all-neighbors", sep="_" )
-} else {
- errBarOutputFile <- paste( errBarOutputFile, "no-neighbors", sep="_" )
-}
-if ( args[ 1 ] == 'y' ){
- errBarOutputFile <- paste( errBarOutputFile, "flowObj", sep="_")
-}
-errBarOutputFile <- paste( errBarOutputFile, "_graph.jpg", sep="" )
+# -----------------
+# Create File Names
+# -----------------
-print( "Reading from databases." )
-con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 2 ], port=strtoi( args[ 3 ] ), user=args[ 4 ],password=args[ 5 ] )
+print( "Creating filenames and title of graph." )
+chartTitle <- "Intent Event Throughput"
+fileNeighborsModifier <- "no"
commandNeighborModifier <- ""
-flowObjModifier <- ""
-if ( args[ 1 ] == 'y' ){
- flowObjModifier <- "_fobj"
+fileFlowObjModifier <- ""
+sqlFlowObjModifier <- ""
+
+if ( args[ has_flow_obj ] == 'y' ){
+ fileFlowObjModifier <- "_flowObj"
+ sqlFlowObjModifier <- "_fobj"
+ chartTitle <- paste( chartTitle, " with Flow Objectives", sep="" )
}
-if ( args[ 8 ] == 'y' ){
+
+chartTitle <- paste( chartTitle, "\nevents/second with Neighbors =", sep="" )
+
+fileOldFlowModifier <- ""
+if ( args[ has_neighbors ] == 'y' ){
+ fileNeighborsModifier <- "all"
commandNeighborModifier <- "scale=1 OR NOT "
+ chartTitle <- paste( chartTitle, "all" )
+} else {
+ chartTitle <- paste( chartTitle, "0" )
+}
+if ( args[ old_flow ] == 'y' ){
+ fileOldFlowModifier <- "_OldFlow"
+ chartTitle <- paste( chartTitle, "With Eventually Consistent Flow Rule Store", sep="\n" )
}
-command <- paste( "SELECT scale, SUM( avg ) as avg FROM intent_tp", flowObjModifier, sep="" )
-command <- paste( command, "_tests WHERE (", sep="" )
-command <- paste( command, commandNeighborModifier, sep="" )
-command <- paste( command, "neighbors = 0 ) AND branch = '", sep="")
-command <- paste( command, args[ 7 ], sep="" )
-command <- paste( command, "' AND date IN ( SELECT max( date ) FROM intent_tp", sep="" )
-command <- paste( command, flowObjModifier, sep="" )
-command <- paste( command, "_tests WHERE branch='", sep="" )
-command <- paste( command, args[ 7 ], sep="" )
-command <- paste( command, "' ) GROUP BY scale ORDER BY scale", sep="" )
+errBarOutputFile <- paste( args[ save_directory ],
+ args[ test_name ],
+ "_",
+ args[ branch_name ],
+ "_",
+ fileNeighborsModifier,
+ "-neighbors",
+ fileFlowObjModifier,
+ fileOldFlowModifier,
+ "_graph.jpg",
+ sep="" )
-print( paste( "Sending SQL command:", command ) )
+# ------------------
+# SQL Initialization
+# ------------------
+
+print( "Initializing SQL" )
+
+con <- dbConnect( dbDriver( "PostgreSQL" ),
+ dbname = "onostest",
+ host = args[ database_host ],
+ port = strtoi( args[ database_port ] ),
+ user = args[ database_u_id ],
+ password = args[ database_pw ] )
+
+# -----------------------------------
+# Intent Event Throughput SQL Command
+# -----------------------------------
+
+print( "Generating Intent Event Throughput SQL command." )
+
+command <- paste( "SELECT scale, SUM( avg ) as avg FROM intent_tp",
+ sqlFlowObjModifier,
+ "_tests WHERE (",
+ commandNeighborModifier,
+ "neighbors = 0 ) AND branch = '",
+ args[ branch_name ],
+ "' AND date IN ( SELECT max( date ) FROM intent_tp",
+ sqlFlowObjModifier,
+ "_tests WHERE branch='",
+ args[ branch_name ],
+ "' AND ",
+ ( if( args[ old_flow ] == 'y' ) "" else "NOT " ),
+ "is_old_flow",
+ " ) GROUP BY scale ORDER BY scale",
+ sep="" )
+
+print( "Sending SQL command:" )
+print( command )
fileData <- dbGetQuery( con, command )
-title <- paste( args[ 6 ], args[ 7 ], sep="_" )
-
# **********************************************************
# STEP 2: Organize data.
# **********************************************************
-print( "STEP 2: Organize data." )
+print( "**********************************************************" )
+print( "STEP 2: Organize Data." )
+print( "**********************************************************" )
-# Create lists c() and organize data into their corresponding list.
+# ------------
+# Data Sorting
+# ------------
+
print( "Sorting data." )
-avgs <- c( fileData[ 'avg' ] )
-# Parse lists into data frames.
+requiredColumns <- c( "avg" )
+
+tryCatch( avgs <- c( fileData[ requiredColumns] ),
+ error = function( e ) {
+ print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+ print( "Required columns: " )
+ print( requiredColumns )
+ print( "Actual columns: " )
+ print( names( fileData ) )
+ print( "Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+# --------------------
+# Construct Data Frame
+# --------------------
+
+print( "Constructing data frame." )
dataFrame <- melt( avgs ) # This is where reshape2 comes in. Avgs list is converted to data frame
dataFrame$scale <- fileData$scale # Add node scaling to the data frame.
-colnames( dataFrame ) <- c( "throughput", "type", "scale" )
+colnames( dataFrame ) <- c( "throughput",
+ "type",
+ "scale" )
dataFrame <- na.omit( dataFrame ) # Omit any data that doesn't exist
@@ -114,18 +211,15 @@
# STEP 3: Generate graphs.
# **********************************************************
-print( "STEP 3: Generate graphs." )
+print( "**********************************************************" )
+print( "STEP 3: Generate Graph." )
+print( "**********************************************************" )
-# 1. Graph fundamental data is generated first.
-# These are variables that apply to all of the graphs being generated, regardless of type.
-#
-# 2. Type specific graph data is generated.
-#
-# 3. Generate and save the graphs.
-# Graphs are saved to the filename above, in the directory provided in command line args
+# ------------------
+# Generate Main Plot
+# ------------------
-print( "Generating fundamental graph data." )
-
+print( "Generating main plot." )
# Create the primary plot here.
# ggplot contains the following arguments:
# - data: the data frame that the graph will be based off of
@@ -133,41 +227,81 @@
# - x: x-axis values (usually node scaling)
# - y: y-axis values (usually time in milliseconds)
# - fill: the category of the colored side-by-side bars (usually type)
-theme_set( theme_grey( base_size = 20 ) ) # set the default text size of the graph.
-mainPlot <- ggplot( data = dataFrame, aes( x = scale, y = throughput, fill = type ) )
+mainPlot <- ggplot( data = dataFrame, aes( x = scale,
+ y = throughput,
+ fill = type ) )
+# ------------------------------
+# Fundamental Variables Assigned
+# ------------------------------
+
+print( "Generating fundamental graph data." )
# Formatting the plot
+theme_set( theme_grey( base_size = 22 ) ) # set the default text size of the graph.
width <- 0.7 # Width of the bars.
xScaleConfig <- scale_x_continuous( breaks = dataFrame$scale, label = dataFrame$scale )
xLabel <- xlab( "Scale" )
yLabel <- ylab( "Throughput (events/second)" )
fillLabel <- labs( fill="Type" )
-chartTitle <- "Intent Event Throughput"
-if ( args[ 1 ] == 'y' ){
- chartTitle <- paste( chartTitle, " With Flow Objectives", sep="" )
-}
-chartTitle <- paste( chartTitle, "\nevents/second with Neighbors =", sep="" )
-if ( args[ 8 ] == 'y' ){
- chartTitle <- paste( chartTitle, "all" )
-} else {
- chartTitle <- paste( chartTitle, "0" )
-}
+imageWidth <- 15
+imageHeight <- 10
+imageDPI <- 200
-theme <- theme( plot.title=element_text( hjust = 0.5, size = 28, face='bold' ) )
-values <- geom_text( aes( x=dataFrame$scale, y=dataFrame$throughput + 0.04 * max( dataFrame$throughput ), label = format( dataFrame$throughput, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold" )
+theme <- theme( plot.title = element_text( hjust = 0.5, size = 32, face = 'bold' ),
+ legend.position = "bottom",
+ legend.text = element_text( size = 18, face = "bold" ),
+ legend.title = element_blank() )
+
+values <- geom_text( aes( x = dataFrame$scale,
+ y = dataFrame$throughput + 0.03 * max( dataFrame$throughput ),
+ label = format( dataFrame$throughput,
+ digits=3,
+ big.mark = ",",
+ scientific = FALSE ) ),
+ size = 7,
+ fontface = "bold" )
# Store plot configurations as 1 variable
-fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme + values
+fundamentalGraphData <- mainPlot +
+ xScaleConfig +
+ xLabel +
+ yLabel +
+ fillLabel +
+ theme +
+ values
+# ---------------------------
+# Generating Bar Graph Format
+# ---------------------------
print( "Generating bar graph." )
-barGraphFormat <- geom_bar( stat = "identity", width = width, fill="#169EFF" )
-title <- ggtitle( paste( chartTitle, "" ) )
-result <- fundamentalGraphData + barGraphFormat + title
+barGraphFormat <- geom_bar( stat = "identity",
+ width = width,
+ fill = "#169EFF" )
-# Save graph to file
+title <- ggtitle( chartTitle )
+
+result <- fundamentalGraphData +
+ barGraphFormat +
+ title
+
+# -----------------------
+# Exporting Graph to File
+# -----------------------
+
print( paste( "Saving bar chart to", errBarOutputFile ) )
-ggsave( errBarOutputFile, width = 10, height = 6, dpi = 200 )
-print( paste( "Successfully wrote bar chart out to", errBarOutputFile ) )
+tryCatch( ggsave( errBarOutputFile,
+ width = imageWidth,
+ height = imageHeight,
+ dpi = imageDPI ),
+ error = function( e ){
+ print( "[ERROR] There was a problem saving the graph due to a graph formatting exception. Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+print( paste( "[SUCCESS] Successfully wrote bar chart out to", errBarOutputFile ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/SCPFmastershipFailoverLat.R b/TestON/JenkinsFile/scripts/SCPFmastershipFailoverLat.R
index 9eea330..30f7bca 100644
--- a/TestON/JenkinsFile/scripts/SCPFmastershipFailoverLat.R
+++ b/TestON/JenkinsFile/scripts/SCPFmastershipFailoverLat.R
@@ -21,134 +21,330 @@
# please contact Jeremy Ronquillo: j_ronquillo@u.pacific.edu
# **********************************************************
-# STEP 1: File management.
+# STEP 1: Data management.
# **********************************************************
-print( "STEP 1: File management." )
+print( "**********************************************************" )
+print( "STEP 1: Data management." )
+print( "**********************************************************" )
+database_host = 1
+database_port = 2
+database_u_id = 3
+database_pw = 4
+test_name = 5
+branch_name = 6
+save_directory = 7
# Command line arguments are read.
print( "Reading commmand-line args." )
args <- commandArgs( trailingOnly=TRUE )
-# Import libraries to be used for graphing and organizing data, respectively.
-# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
-# reshape2: https://github.com/hadley/reshape
+# ----------------
+# Import Libraries
+# ----------------
+
print( "Importing libraries." )
library( ggplot2 )
library( reshape2 )
library( RPostgreSQL ) # For databases
-# Check if sufficient args are provided.
-if ( is.na( args[ 7 ] ) ){
- print( "Usage: Rscript SCPFmastershipFailoverLat <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <directory-to-save-graphs>" )
- q() # basically exit(), but in R
+# -------------------
+# Check CLI Arguments
+# -------------------
+
+print( "Verifying CLI args." )
+if ( is.na( args[ save_directory ] ) ){
+ print( paste( "Usage: Rscript SCPFmastershipFailoverLat",
+ "<database-host>",
+ "<database-port>",
+ "<database-user-id>",
+ "<database-password>",
+ "<test-name>",
+ "<branch-name>",
+ "<directory-to-save-graphs>",
+ sep=" " ) )
+
+ quit( status = 1 ) # basically exit(), but in R
}
-# paste() is used to concatenate strings.
-errBarOutputFile <- paste( args[ 7 ], args[ 5 ], sep="" )
-errBarOutputFile <- paste( errBarOutputFile, args[ 6 ], sep="_" )
-errBarOutputFile <- paste( errBarOutputFile, "_errGraph.jpg", sep="" )
+# -----------------
+# Create File Names
+# -----------------
-stackedBarOutputFile <- paste( args[ 7 ], args[ 5 ], sep="" )
-stackedBarOutputFile <- paste( stackedBarOutputFile, args[ 6 ], sep="_" )
-stackedBarOutputFile <- paste( stackedBarOutputFile, "_stackedGraph.jpg", sep="" )
-
-print( "Reading from databases." )
-
-con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 1 ], port=strtoi( args[ 2 ] ), user=args[ 3 ],password=args[ 4 ] )
-
-command <- paste( "SELECT * FROM mastership_failover_tests WHERE branch = '", args[ 6 ], sep = "" )
-command <- paste( command, "' AND date IN ( SELECT MAX( date ) FROM mastership_failover_tests WHERE branch = '", sep = "" )
-command <- paste( command, args[ 6 ], sep = "" )
-command <- paste( command, "' ) ", sep="" )
-
-print( paste( "Sending SQL command:", command ) )
-
-fileData <- dbGetQuery( con, command )
+print( "Creating filenames and title of graph." )
chartTitle <- "Mastership Failover Latency"
+errBarOutputFile <- paste( args[ save_directory ],
+ args[ test_name ],
+ "_",
+ args[ branch_name ],
+ "_errGraph.jpg",
+ sep="" )
+
+stackedBarOutputFile <- paste( args[ save_directory ],
+ args[ test_name ],
+ "_",
+ args[ branch_name ],
+ "_stackedGraph.jpg",
+ sep="" )
+
+# ------------------
+# SQL Initialization
+# ------------------
+
+print( "Initializing SQL" )
+
+con <- dbConnect( dbDriver( "PostgreSQL" ),
+ dbname = "onostest",
+ host = args[ database_host ],
+ port = strtoi( args[ database_port ] ),
+ user = args[ database_u_id ],
+ password = args[ database_pw ] )
+
+# ---------------------------------------
+# Mastership Failover Latency SQL Command
+# ---------------------------------------
+
+print( "Generating Mastership Failover Latency SQL command" )
+
+command <- paste( "SELECT * FROM mastership_failover_tests WHERE branch = '",
+ args[ branch_name ],
+ "' AND date IN ( SELECT MAX( date ) FROM mastership_failover_tests WHERE branch = '",
+ args[ branch_name ],
+ "' ) ",
+ sep = "" )
+
+print( "Sending SQL command:" )
+print( command )
+
+fileData <- dbGetQuery( con, command )
# **********************************************************
# STEP 2: Organize data.
# **********************************************************
-fileDataNames <- names( fileData )
+print( "**********************************************************" )
+print( "STEP 2: Organize Data." )
+print( "**********************************************************" )
-avgs <- c()
-stds <- c()
+# ------------
+# Data Sorting
+# ------------
+print( "Combining averages into a list." )
-print( "Sorting data." )
-for ( name in fileDataNames ){
- nameLen <- nchar( name )
- if ( nameLen > 2 ){
- if ( substring( name, nameLen - 2, nameLen ) == "avg" ){
- avgs <- c( avgs, fileData[ name ] )
- }
- if ( substring( name, nameLen - 2, nameLen ) == "std" ){
- stds <- c( stds, fileData[ name ] )
- }
- }
-}
+requiredColumns <- c( "kill_deact_avg", "deact_role_avg" )
-avgData <- melt( avgs )
-avgData$scale <- fileData$scale
-colnames( avgData ) <- c( "ms", "type", "scale" )
+tryCatch( avgs <- c( fileData[ requiredColumns] ),
+ error = function( e ) {
+ print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+ print( "Required columns: " )
+ print( requiredColumns )
+ print( "Actual columns: " )
+ print( names( fileData ) )
+ print( "Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
-stdData <- melt( stds )
-colnames( stdData ) <- c( "ms", "type" )
+# --------------------
+# Construct Data Frame
+# --------------------
-dataFrame <- na.omit( avgData ) # Omit any data that doesn't exist
+print( "Constructing Data Frame from list." )
+
+dataFrame <- melt( avgs )
+dataFrame$scale <- fileData$scale
+dataFrame$stds <- c( fileData$kill_deact_std,
+ fileData$deact_role_std )
+
+colnames( dataFrame ) <- c( "ms",
+ "type",
+ "scale",
+ "stds" )
+
+dataFrame <- na.omit( dataFrame ) # Omit any data that doesn't exist
+
+sum <- fileData[ 'deact_role_avg' ] +
+ fileData[ 'kill_deact_avg' ]
print( "Data Frame Results:" )
-print( avgData )
+print( dataFrame )
# **********************************************************
# STEP 3: Generate graphs.
# **********************************************************
-print( "Generating fundamental graph data." )
+print( "**********************************************************" )
+print( "STEP 3: Generate Graph." )
+print( "**********************************************************" )
-theme_set( theme_grey( base_size = 20 ) ) # set the default text size of the graph.
+# ------------------------------------
+# Initialize Variables for Both Graphs
+# ------------------------------------
-mainPlot <- ggplot( data = avgData, aes( x = scale, y = ms, ymin = ms - stdData$ms, ymax = ms + stdData$ms,fill = type ) )
-xScaleConfig <- scale_x_continuous( breaks=c( 1, 3, 5, 7, 9) )
+print( "Initializing variables used in both graphs." )
+
+theme_set( theme_grey( base_size = 22 ) ) # set the default text size of the graph.
+xScaleConfig <- scale_x_continuous( breaks = c( 1, 3, 5, 7, 9) )
xLabel <- xlab( "Scale" )
yLabel <- ylab( "Latency (ms)" )
-fillLabel <- labs( fill="Type" )
-theme <- theme( plot.title=element_text( hjust = 0.5, size = 28, face='bold' ) )
+fillLabel <- labs( fill = "Type" )
+barWidth <- 0.9
+imageWidth <- 15
+imageHeight <- 10
+imageDPI <- 200
-fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
+theme <- theme( plot.title = element_text( hjust = 0.5, size = 32, face='bold' ),
+ legend.position = "bottom",
+ legend.text = element_text( size=22 ),
+ legend.title = element_blank(),
+ legend.key.size = unit( 1.5, 'lines' ) )
+barColors <- scale_fill_manual( values=c( "#F77670",
+ "#619DFA" ) )
+
+wrapLegend <- guides( fill=guide_legend( nrow=1, byrow=TRUE ) )
+
+# ----------------------------------
+# Error Bar Graph Generate Main Plot
+# ----------------------------------
+
+print( "Creating main plot." )
+
+mainPlot <- ggplot( data = dataFrame, aes( x = scale,
+ y = ms,
+ ymin = ms,
+ ymax = ms + stds,
+ fill = type ) )
+
+# ----------------------------------------------
+# Error Bar Graph Fundamental Variables Assigned
+# ----------------------------------------------
+
+print( "Generating fundamental graph data for the error bar graph." )
+
+errorBarColor <- rgb( 140, 140, 140, maxColorValue=255 )
+
+title <- ggtitle( chartTitle )
+
+fundamentalGraphData <- mainPlot +
+ xScaleConfig +
+ xLabel +
+ yLabel +
+ fillLabel +
+ theme +
+ title +
+ wrapLegend
+
+# -------------------------------------------
+# Error Bar Graph Generating Bar Graph Format
+# -------------------------------------------
print( "Generating bar graph with error bars." )
-width <- 0.9
-barGraphFormat <- geom_bar( stat="identity", position=position_dodge(), width = width )
-errorBarFormat <- geom_errorbar( width = width, position=position_dodge(), color=rgb( 140, 140, 140, maxColorValue=255 ) )
-values <- geom_text( aes( x=avgData$scale, y=avgData$ms + 0.04 * max( avgData$ms ), label = format( avgData$ms, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold", position=position_dodge( 0.9 ) )
-title <- ggtitle( paste( chartTitle, "" ) )
-result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title + values
+barGraphFormat <- geom_bar( stat = "identity",
+ position = position_dodge(),
+ width = barWidth )
+
+errorBarFormat <- geom_errorbar( width = barWidth,
+ position = position_dodge(),
+ color = errorBarColor )
+
+values <- geom_text( aes( x = dataFrame$scale,
+ y = dataFrame$ms + 0.02 * max( dataFrame$ms ),
+ label = format( dataFrame$ms,
+ digits = 3,
+ big.mark = ",",
+ scientific = FALSE ) ),
+ size = 7.0,
+ fontface = "bold",
+ position = position_dodge( 0.9 ) )
+
+result <- fundamentalGraphData +
+ barGraphFormat +
+ barColors +
+ errorBarFormat +
+ values
+
+# ---------------------------------------
+# Error Bar Graph Exporting Graph to File
+# ---------------------------------------
print( paste( "Saving bar chart with error bars to", errBarOutputFile ) )
-ggsave( errBarOutputFile, width = 10, height = 6, dpi = 200 )
+tryCatch( ggsave( errBarOutputFile,
+ width = imageWidth,
+ height = imageHeight,
+ dpi = imageDPI ),
+ error = function( e ){
+ print( "[ERROR] There was a problem saving the graph due to a graph formatting exception. Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
-print( paste( "Successfully wrote bar chart with error bars out to", errBarOutputFile ) )
+print( paste( "[SUCCESS] Successfully wrote bar chart with error bars out to", errBarOutputFile ) )
+# ------------------------------------------------
+# Stacked Bar Graph Fundamental Variables Assigned
+# ------------------------------------------------
+
+print( "Generating fundamental graph data for the stacked bar graph." )
+
+title <- ggtitle( chartTitle )
+
+fundamentalGraphData <- mainPlot +
+ xScaleConfig +
+ xLabel +
+ yLabel +
+ fillLabel +
+ theme +
+ title +
+ wrapLegend
+
+# ---------------------------------------------
+# Stacked Bar Graph Generating Bar Graph Format
+# ---------------------------------------------
print( "Generating stacked bar chart." )
-stackedBarFormat <- geom_bar( stat="identity", width=width )
-title <- ggtitle( paste( chartTitle, "" ) )
-sum <- fileData[ 'deact_role_avg' ] + fileData[ 'kill_deact_avg' ]
-values <- geom_text( aes( x=avgData$scale, y=sum + 0.04 * max( sum ), label = format( sum, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold" )
-result <- fundamentalGraphData + stackedBarFormat + title + values
+stackedBarFormat <- geom_bar( stat = "identity",
+ width = barWidth )
+values <- geom_text( aes( x = dataFrame$scale,
+ y = sum + 0.02 * max( sum ),
+ label = format( sum,
+ digits = 3,
+ big.mark = ",",
+ scientific = FALSE ) ),
+ size = 7.0,
+ fontface = "bold" )
+
+result <- fundamentalGraphData +
+ stackedBarFormat +
+ barColors +
+ title +
+ values
+
+# -----------------------------------------
+# Stacked Bar Graph Exporting Graph to File
+# -----------------------------------------
print( paste( "Saving stacked bar chart to", stackedBarOutputFile ) )
-ggsave( stackedBarOutputFile, width = 10, height = 6, dpi = 200 )
+tryCatch( ggsave( stackedBarOutputFile,
+ width = imageWidth,
+ height = imageHeight,
+ dpi = imageDPI ),
+ error = function( e ){
+ print( "[ERROR] There was a problem saving the graph due to a graph formatting exception. Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
-print( paste( "Successfully wrote stacked bar chart out to", stackedBarOutputFile ) )
\ No newline at end of file
+print( paste( "[SUCCESS] Successfully wrote stacked bar chart out to", stackedBarOutputFile ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/SCPFportLat.R b/TestON/JenkinsFile/scripts/SCPFportLat.R
index fd24c8d..4637072 100644
--- a/TestON/JenkinsFile/scripts/SCPFportLat.R
+++ b/TestON/JenkinsFile/scripts/SCPFportLat.R
@@ -21,142 +21,386 @@
# please contact Jeremy Ronquillo: j_ronquillo@u.pacific.edu
# **********************************************************
-# STEP 1: File management.
+# STEP 1: Data management.
# **********************************************************
-
-print( "STEP 1: File management." )
+print( "**********************************************************" )
+print( "STEP 1: Data management." )
+print( "**********************************************************" )
+database_host = 1
+database_port = 2
+database_u_id = 3
+database_pw = 4
+test_name = 5
+branch_name = 6
+save_directory = 7
# Command line arguments are read.
print( "Reading commmand-line args." )
args <- commandArgs( trailingOnly=TRUE )
-# Import libraries to be used for graphing and organizing data, respectively.
-# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
-# reshape2: https://github.com/hadley/reshape
+# ----------------
+# Import Libraries
+# ----------------
+
print( "Importing libraries." )
library( ggplot2 )
library( reshape2 )
library( RPostgreSQL ) # For databases
-# Check if sufficient args are provided.
-if ( is.na( args[ 7 ] ) ){
- print( "Usage: Rscript SCPFportLat <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <directory-to-save-graphs>" )
- q() # basically exit(), but in R
+# -------------------
+# Check CLI Arguments
+# -------------------
+
+print( "Verifying CLI args." )
+
+if ( is.na( args[ save_directory ] ) ){
+
+ print( paste( "Usage: Rscript SCPFportLat",
+ "<database-host>",
+ "<database-port>",
+ "<database-user-id>",
+ "<database-password>",
+ "<test-name>",
+ "<branch-name>",
+ "<directory-to-save-graphs>",
+ sep=" " ) )
+
+ quit( status = 1 ) # basically exit(), but in R
}
-# paste() is used to concatenate strings.
-errBarOutputFileUp <- paste( args[ 7 ], "SCPFportLat_", sep = "" )
-errBarOutputFileUp <- paste( errBarOutputFileUp, args[ 6 ], sep = "" )
-errBarOutputFileUp <- paste( errBarOutputFileUp, "_UpErrBarWithStack.jpg", sep = "" )
+# -----------------
+# Create File Names
+# -----------------
-errBarOutputFileDown <- paste( args[ 7 ], "SCPFportLat_", sep = "" )
-errBarOutputFileDown <- paste( errBarOutputFileDown, args[ 6 ], sep = "" )
-errBarOutputFileDown <- paste( errBarOutputFileDown, "_DownErrBarWithStack.jpg", sep = "" )
+print( "Creating filenames and title of graph." )
+errBarOutputFileUp <- paste( args[ save_directory ],
+ "SCPFportLat_",
+ args[ branch_name ],
+ "_UpErrBarWithStack.jpg",
+ sep = "" )
-print( "Reading from databases." )
+errBarOutputFileDown <- paste( args[ save_directory ],
+ "SCPFportLat_",
+ args[ branch_name ],
+ "_DownErrBarWithStack.jpg",
+ sep = "" )
-con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 1 ], port=strtoi( args[ 2 ] ), user=args[ 3 ],password=args[ 4 ] )
+# ------------------
+# SQL Initialization
+# ------------------
+print( "Initializing SQL" )
-command <- paste( "SELECT * FROM port_latency_details WHERE branch = '", args[ 6 ], sep = "" )
-command <- paste( command, "' AND date IN ( SELECT MAX( date ) FROM port_latency_details WHERE branch = '", sep = "" )
-command <- paste( command, args[ 6 ], sep = "" )
-command <- paste( command, "' ) ", sep="" )
+con <- dbConnect( dbDriver( "PostgreSQL" ),
+ dbname = "onostest",
+ host = args[ database_host ],
+ port = strtoi( args[ database_port ] ),
+ user = args[ database_u_id ],
+ password = args[ database_pw ] )
-print( paste( "Sending SQL command:", command ) )
+# ------------------------
+# Port Latency SQL Command
+# ------------------------
+
+print( "Generating Port Latency SQL Command" )
+
+command <- paste( "SELECT * FROM port_latency_details WHERE branch = '",
+ args[ branch_name ],
+ "' AND date IN ( SELECT MAX( date ) FROM port_latency_details WHERE branch = '",
+ args[ branch_name ],
+ "' ) ",
+ sep = "" )
+
+print( "Sending SQL command:" )
+print( command )
fileData <- dbGetQuery( con, command )
-chartTitle <- paste( "Port Latency", args[ 6 ], sep = " - " )
-chartTitle <- paste( chartTitle, "\n" )
-
-
# **********************************************************
# STEP 2: Organize data.
# **********************************************************
-print( "Sorting data." )
+print( "**********************************************************" )
+print( "STEP 2: Organize Data." )
+print( "**********************************************************" )
-upAvgs <- c( fileData[ 'up_ofp_to_dev_avg' ], fileData[ 'up_dev_to_link_avg' ], fileData[ 'up_link_to_graph_avg' ] )
-upAvgsData <- melt( upAvgs )
-upAvgsData$scale <- fileData$scale
-upAvgsData$up_std <- fileData$up_std
+# -----------------------------
+# Port Up Averages Data Sorting
+# -----------------------------
+
+print( "Sorting data for Port Up Averages." )
+
+requiredColumns <- c( "up_ofp_to_dev_avg", "up_dev_to_link_avg", "up_link_to_graph_avg" )
+
+tryCatch( upAvgs <- c( fileData[ requiredColumns] ),
+ error = function( e ) {
+ print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+ print( "Required columns: " )
+ print( requiredColumns )
+ print( "Actual columns: " )
+ print( names( fileData ) )
+ print( "Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
-colnames( upAvgsData ) <- c( "ms", "type", "scale", "stds" )
-upAvgsData$type <- as.character( upAvgsData$type )
-upAvgsData$type <- factor( upAvgsData$type, levels=unique( upAvgsData$type ) )
+# ----------------------------
+# Port Up Construct Data Frame
+# ----------------------------
-downAvgs <- c( fileData[ 'down_ofp_to_dev_avg' ], fileData[ 'down_dev_to_link_avg' ], fileData[ 'down_link_to_graph_avg' ] )
-downAvgsData <- melt( downAvgs )
-downAvgsData$scale <- fileData$scale
-downAvgsData$down_std <- fileData$down_std
+print( "Constructing Port Up data frame." )
-colnames( downAvgsData ) <- c( "ms", "type", "scale", "stds" )
-downAvgsData$type <- as.character( downAvgsData$type )
-downAvgsData$type <- factor( downAvgsData$type, levels=unique( downAvgsData$type ) )
+upAvgsDataFrame <- melt( upAvgs )
+upAvgsDataFrame$scale <- fileData$scale
+upAvgsDataFrame$up_std <- fileData$up_std
-upAvgsData <- na.omit( upAvgsData ) # Omit any data that doesn't exist
-downAvgsData <- na.omit( downAvgsData ) # Omit any data that doesn't exist
+colnames( upAvgsDataFrame ) <- c( "ms",
+ "type",
+ "scale",
+ "stds" )
+
+upAvgsDataFrame <- na.omit( upAvgsDataFrame )
+
+upAvgsDataFrame$type <- as.character( upAvgsDataFrame$type )
+upAvgsDataFrame$type <- factor( upAvgsDataFrame$type, levels=unique( upAvgsDataFrame$type ) )
+
+sumOfUpAvgs <- fileData[ 'up_ofp_to_dev_avg' ] +
+ fileData[ 'up_dev_to_link_avg' ] +
+ fileData[ 'up_link_to_graph_avg' ]
print( "Up Averages Results:" )
-print( upAvgsData )
+print( upAvgsDataFrame )
+
+# -------------------------------
+# Port Down Averages Data Sorting
+# -------------------------------
+
+print( "Sorting data for Port Down Averages." )
+
+requiredColumns <- c( "down_ofp_to_dev_avg", "down_dev_to_link_avg", "down_link_to_graph_avg" )
+
+tryCatch( downAvgs <- c( fileData[ requiredColumns] ),
+ error = function( e ) {
+ print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+ print( "Required columns: " )
+ print( requiredColumns )
+ print( "Actual columns: " )
+ print( names( fileData ) )
+ print( "Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+# ------------------------------
+# Port Down Construct Data Frame
+# ------------------------------
+
+print( "Constructing Port Down data frame." )
+
+downAvgsDataFrame <- melt( downAvgs )
+downAvgsDataFrame$scale <- fileData$scale
+downAvgsDataFrame$down_std <- fileData$down_std
+
+colnames( downAvgsDataFrame ) <- c( "ms",
+ "type",
+ "scale",
+ "stds" )
+
+downAvgsDataFrame <- na.omit( downAvgsDataFrame )
+
+downAvgsDataFrame$type <- as.character( downAvgsDataFrame$type )
+downAvgsDataFrame$type <- factor( downAvgsDataFrame$type, levels=unique( downAvgsDataFrame$type ) )
+
+sumOfDownAvgs <- fileData[ 'down_ofp_to_dev_avg' ] +
+ fileData[ 'down_dev_to_link_avg' ] +
+ fileData[ 'down_link_to_graph_avg' ]
print( "Down Averages Results:" )
-print( downAvgsData )
+print( downAvgsDataFrame )
# **********************************************************
# STEP 3: Generate graphs.
# **********************************************************
+print( "**********************************************************" )
+print( "STEP 3: Generate Graph." )
+print( "**********************************************************" )
-print( "Generating fundamental graph data (Port Up Latency)." )
-width <- 1
-theme_set( theme_grey( base_size = 20 ) ) # set the default text size of the graph.
+# ------------------------------------
+# Initialize Variables For Both Graphs
+# ------------------------------------
-mainPlot <- ggplot( data = upAvgsData, aes( x = scale, y = ms, fill = type, ymin = fileData[ 'up_end_to_end_avg' ] - stds, ymax = fileData[ 'up_end_to_end_avg' ] + stds ) )
-xScaleConfig <- scale_x_continuous( breaks=c( 1, 3, 5, 7, 9) )
+print( "Initializing variables used in both graphs." )
+
+theme_set( theme_grey( base_size = 22 ) ) # set the default text size of the graph.
+barWidth <- 1
+xScaleConfig <- scale_x_continuous( breaks=c( 1, 3, 5, 7, 9 ) )
xLabel <- xlab( "Scale" )
yLabel <- ylab( "Latency (ms)" )
fillLabel <- labs( fill="Type" )
-theme <- theme( plot.title=element_text( hjust = 0.5, size = 28, face='bold' ) )
+wrapLegend <- guides( fill=guide_legend( nrow=1, byrow=TRUE ) )
+imageWidth <- 15
+imageHeight <- 10
+imageDPI <- 200
+errorBarColor <- rgb( 140, 140, 140, maxColorValue=255 )
-fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
+theme <- theme( plot.title=element_text( hjust = 0.5, size = 32, face='bold' ),
+ legend.position="bottom",
+ legend.text=element_text( size=22 ),
+ legend.title = element_blank(),
+ legend.key.size = unit( 1.5, 'lines' ) )
+
+colors <- scale_fill_manual( values=c( "#F77670",
+ "#619DFA",
+ "#18BA48" ) )
+
+# --------------------------
+# Port Up Generate Main Plot
+# --------------------------
+
+print( "Generating main plot (Port Up Latency)." )
+
+mainPlot <- ggplot( data = upAvgsDataFrame, aes( x = scale,
+ y = ms,
+ fill = type,
+ ymin = fileData[ 'up_end_to_end_avg' ],
+ ymax = fileData[ 'up_end_to_end_avg' ] + stds ) )
+
+# --------------------------------------
+# Port Up Fundamental Variables Assigned
+# --------------------------------------
+
+print( "Generating fundamental graph data (Port Up Latency)." )
+
+title <- ggtitle( "Port Up Latency" )
+
+fundamentalGraphData <- mainPlot +
+ xScaleConfig +
+ xLabel +
+ yLabel +
+ fillLabel +
+ theme +
+ wrapLegend +
+ title +
+ colors
+
+# -----------------------------------
+# Port Up Generating Bar Graph Format
+# -----------------------------------
print( "Generating bar graph with error bars (Port Up Latency)." )
-barGraphFormat <- geom_bar( stat="identity", width = width )
-errorBarFormat <- geom_errorbar( width = width, color=rgb( 140, 140, 140, maxColorValue=255 ) )
-sum <- fileData[ 'up_ofp_to_dev_avg' ] + fileData[ 'up_dev_to_link_avg' ] + fileData[ 'up_link_to_graph_avg' ]
-values <- geom_text( aes( x=upAvgsData$scale, y=sum + 0.04 * max( sum ), label = format( sum, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold" )
-title <- ggtitle( "Port Up Latency" )
-result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title + values
+barGraphFormat <- geom_bar( stat = "identity",
+ width = barWidth )
+errorBarFormat <- geom_errorbar( width = barWidth,
+ color = errorBarColor )
+
+values <- geom_text( aes( x = upAvgsDataFrame$scale,
+ y = sumOfUpAvgs + 0.03 * max( sumOfUpAvgs ),
+ label = format( sumOfUpAvgs,
+ digits=3,
+ big.mark = ",",
+ scientific = FALSE ) ),
+ size = 7.0,
+ fontface = "bold" )
+
+result <- fundamentalGraphData +
+ barGraphFormat +
+ errorBarFormat +
+ values
+
+# -------------------------------
+# Port Up Exporting Graph to File
+# -------------------------------
print( paste( "Saving bar chart with error bars (Port Up Latency) to", errBarOutputFileUp ) )
-ggsave( errBarOutputFileUp, width = 10, height = 6, dpi = 200 )
+tryCatch( ggsave( errBarOutputFileUp,
+ width = imageWidth,
+ height = imageHeight,
+ dpi = imageDPI ),
+ error = function( e ){
+ print( "[ERROR] There was a problem saving the graph due to a graph formatting exception. Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
-print( paste( "Successfully wrote bar chart with error bars (Port Up Latency) out to", errBarOutputFileUp ) )
+print( paste( "[SUCCESS] Successfully wrote bar chart with error bars (Port Up Latency) out to", errBarOutputFileUp ) )
+# ----------------------------
+# Port Down Generate Main Plot
+# ----------------------------
+
+print( "Generating main plot (Port Down Latency)." )
+
+mainPlot <- ggplot( data = downAvgsDataFrame, aes( x = scale,
+ y = ms,
+ fill = type,
+ ymin = fileData[ 'down_end_to_end_avg' ],
+ ymax = fileData[ 'down_end_to_end_avg' ] + stds ) )
+
+# ----------------------------------------
+# Port Down Fundamental Variables Assigned
+# ----------------------------------------
print( "Generating fundamental graph data (Port Down Latency)." )
-mainPlot <- ggplot( data = downAvgsData, aes( x = scale, y = ms, fill = type, ymin = fileData[ 'down_end_to_end_avg' ] - stds, ymax = fileData[ 'down_end_to_end_avg' ] + stds ) )
-theme <- theme( plot.title=element_text( hjust = 0.5, size = 28, face='bold' ) )
+title <- ggtitle( "Port Down Latency" )
-fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
+fundamentalGraphData <- mainPlot +
+ xScaleConfig +
+ xLabel +
+ yLabel +
+ fillLabel +
+ theme +
+ wrapLegend +
+ title +
+ colors
+
+# -------------------------------------
+# Port Down Generating Bar Graph Format
+# -------------------------------------
print( "Generating bar graph with error bars (Port Down Latency)." )
-barGraphFormat <- geom_bar( stat="identity", width = width )
-errorBarFormat <- geom_errorbar( width = width, color=rgb( 140, 140, 140, maxColorValue=255 ) )
-sum <- fileData[ 'down_ofp_to_dev_avg' ] + fileData[ 'down_dev_to_link_avg' ] + fileData[ 'down_link_to_graph_avg' ]
-values <- geom_text( aes( x=downAvgsData$scale, y=sum + 0.04 * max( sum ), label = format( sum, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold" )
-title <- ggtitle( "Port Down Latency" )
-result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title + values
+barGraphFormat <- geom_bar( stat = "identity",
+ width = barWidth )
+errorBarFormat <- geom_errorbar( width = barWidth,
+ color = errorBarColor )
+values <- geom_text( aes( x = downAvgsDataFrame$scale,
+ y = sumOfDownAvgs + 0.03 * max( sumOfDownAvgs ),
+ label = format( sumOfDownAvgs,
+ digits=3,
+ big.mark = ",",
+ scientific = FALSE ) ),
+ size = 7.0,
+ fontface = "bold" )
+
+result <- fundamentalGraphData +
+ barGraphFormat +
+ errorBarFormat +
+ values
+
+# ---------------------------------
+# Port Down Exporting Graph to File
+# ---------------------------------
print( paste( "Saving bar chart with error bars (Port Down Latency) to", errBarOutputFileDown ) )
-ggsave( errBarOutputFileDown, width = 10, height = 6, dpi = 200 )
+tryCatch( ggsave( errBarOutputFileDown,
+ width = imageWidth,
+ height = imageHeight,
+ dpi = imageDPI ),
+ error = function( e ){
+ print( "[ERROR] There was a problem saving the graph due to a graph formatting exception. Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
-print( paste( "Successfully wrote bar chart with error bars (Port Down Latency) out to", errBarOutputFileDown ) )
+print( paste( "[SUCCESS] Successfully wrote bar chart with error bars (Port Down Latency) out to", errBarOutputFileDown ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/SCPFscaleTopo.R b/TestON/JenkinsFile/scripts/SCPFscaleTopo.R
index 195019d..e69a383 100644
--- a/TestON/JenkinsFile/scripts/SCPFscaleTopo.R
+++ b/TestON/JenkinsFile/scripts/SCPFscaleTopo.R
@@ -21,64 +21,139 @@
# please contact Jeremy Ronquillo: j_ronquillo@u.pacific.edu
# **********************************************************
-# STEP 1: File management.
+# STEP 1: Data management.
# **********************************************************
-
-print( "STEP 1: File management." )
+print( "**********************************************************" )
+print( "STEP 1: Data management." )
+print( "**********************************************************" )
+database_host = 1
+database_port = 2
+database_u_id = 3
+database_pw = 4
+test_name = 5
+branch_name = 6
+save_directory = 7
# Command line arguments are read.
print( "Reading commmand-line args." )
args <- commandArgs( trailingOnly=TRUE )
-# Import libraries to be used for graphing and organizing data, respectively.
-# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
-# reshape2: https://github.com/hadley/reshape
+# ----------------
+# Import Libraries
+# ----------------
+
print( "Importing libraries." )
library( ggplot2 )
library( reshape2 )
library( RPostgreSQL ) # For databases
-# Check if sufficient args are provided.
-if ( is.na( args[ 7 ] ) ){
- print( "Usage: Rscript SCPFgraphGenerator <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <directory-to-save-graphs>" )
- q() # basically exit(), but in R
+# -------------------
+# Check CLI Arguments
+# -------------------
+
+print( "Verifying CLI args." )
+
+if ( is.na( args[ save_directory ] ) ){
+
+ print( paste( "Usage: Rscript SCPFgraphGenerator",
+ "<database-host>",
+ "<database-port>",
+ "<database-user-id>",
+ "<database-password>",
+ "<test-name>",
+ "<branch-name>",
+ "<directory-to-save-graphs>",
+ sep=" ") )
+
+ quit( status = 1 ) # basically exit(), but in R
}
-# paste() is used to concatenate strings
-outputFile <- paste( args[ 7 ], args[ 5 ], sep="" )
-outputFile <- paste( outputFile, args[ 6 ], sep="_" )
-outputFile <- paste( outputFile, "_graph.jpg", sep="" )
+# -----------------
+# Create File Names
+# -----------------
-print( "Reading from databases." )
+print( "Creating filenames and title of graph." )
-con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 1 ], port=strtoi( args[ 2 ] ), user=args[ 3 ],password=args[ 4 ] )
+outputFile <- paste( args[ save_directory ],
+ args[ test_name ],
+ "_",
+ args[ branch_name ],
+ "_graph.jpg",
+ sep="" )
-command <- paste( "SELECT * FROM scale_topo_latency_details WHERE branch = '", args[ 6 ], sep = "" )
-command <- paste( command, "' AND date IN ( SELECT MAX( date ) FROM scale_topo_latency_details WHERE branch = '", sep = "" )
-command <- paste( command, args[ 6 ], sep = "" )
-command <- paste( command, "' ) ", sep="" )
+chartTitle <- "Scale Topology Latency Test"
-print( paste( "Sending SQL command:", command ) )
+# ------------------
+# SQL Initialization
+# ------------------
+print( "Initializing SQL" )
+
+con <- dbConnect( dbDriver( "PostgreSQL" ),
+ dbname = "onostest",
+ host = args[ database_host ],
+ port = strtoi( args[ database_port ] ),
+ user = args[ database_u_id ],
+ password = args[ database_pw ] )
+
+# --------------------------
+# Scale Topology SQL Command
+# --------------------------
+
+print( "Generating Scale Topology SQL Command" )
+
+command <- paste( "SELECT * FROM scale_topo_latency_details WHERE branch = '",
+ args[ branch_name ],
+ "' AND date IN ( SELECT MAX( date ) FROM scale_topo_latency_details WHERE branch = '",
+ args[ branch_name ],
+ "' ) ",
+ sep = "" )
+
+print( "Sending SQL command:" )
+print( command )
fileData <- dbGetQuery( con, command )
-title <- paste( args[ 5 ], args[ 6 ], sep="_" )
-
# **********************************************************
# STEP 2: Organize data.
# **********************************************************
-print( "STEP 2: Organize data." )
+print( "**********************************************************" )
+print( "STEP 2: Organize Data." )
+print( "**********************************************************" )
-# Create lists c() and organize data into their corresponding list.
+# ------------
+# Data Sorting
+# ------------
+
print( "Sorting data." )
-avgs <- c( fileData[ 'last_role_request_to_last_topology' ], fileData[ 'last_connection_to_last_role_request' ], fileData[ 'first_connection_to_last_connection' ] )
+
+requiredColumns <- c( "last_role_request_to_last_topology", "last_connection_to_last_role_request", "first_connection_to_last_connection" )
+
+tryCatch( avgs <- c( fileData[ requiredColumns] ),
+ error = function( e ) {
+ print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+ print( "Required columns: " )
+ print( requiredColumns )
+ print( "Actual columns: " )
+ print( names( fileData ) )
+ print( "Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+# --------------------
+# Construct Data Frame
+# --------------------
+
+print( "Constructing Data Frame" )
# Parse lists into data frames.
-dataFrame <- melt( avgs ) # This is where reshape2 comes in. Avgs list is converted to data frame
-dataFrame$scale <- fileData$scale # Add node scaling to the data frame.
-colnames( dataFrame ) <- c( "ms", "type", "scale")
-
+dataFrame <- melt( avgs )
+dataFrame$scale <- fileData$scale
+colnames( dataFrame ) <- c( "s",
+ "type",
+ "scale")
# Format data frame so that the data is in the same order as it appeared in the file.
dataFrame$type <- as.character( dataFrame$type )
@@ -87,7 +162,9 @@
dataFrame <- na.omit( dataFrame ) # Omit any data that doesn't exist
-sum <- fileData[ 'last_role_request_to_last_topology' ] + fileData[ 'last_connection_to_last_role_request' ] + fileData[ 'first_connection_to_last_connection' ]
+sum <- fileData[ 'last_role_request_to_last_topology' ] +
+ fileData[ 'last_connection_to_last_role_request' ] +
+ fileData[ 'first_connection_to_last_connection' ]
print( "Data Frame Results:" )
print( dataFrame )
@@ -96,48 +173,93 @@
# STEP 3: Generate graphs.
# **********************************************************
-print( "STEP 3: Generate graphs." )
+print( "**********************************************************" )
+print( "STEP 3: Generate Graph." )
+print( "**********************************************************" )
-# 1. Graph fundamental data is generated first.
-# These are variables that apply to all of the graphs being generated, regardless of type.
-#
-# 2. Type specific graph data is generated.
-#
-# 3. Generate and save the graphs.
-# Graphs are saved to the filename above, in the directory provided in command line args
+# ------------------
+# Generate Main Plot
+# ------------------
+
+print( "Creating main plot." )
+
+mainPlot <- ggplot( data = dataFrame, aes( x = iterative,
+ y = s,
+ fill = type ) )
+
+# ------------------------------
+# Fundamental Variables Assigned
+# ------------------------------
print( "Generating fundamental graph data." )
theme_set( theme_grey( base_size = 20 ) ) # set the default text size of the graph.
-
-# Create the primary plot here.
-# ggplot contains the following arguments:
-# - data: the data frame that the graph will be based off of
-# - aes: the asthetics of the graph which require:
-# - x: x-axis values (usually node scaling)
-# - y: y-axis values (usually time in milliseconds)
-# - fill: the category of the colored side-by-side bars (usually type)
-mainPlot <- ggplot( data = dataFrame, aes( x = iterative, y = ms, fill = type ) )
-
-# Formatting the plot
width <- 0.6 # Width of the bars.
-xScaleConfig <- scale_x_continuous( breaks = dataFrame$iterative, label = dataFrame$scale )
+xScaleConfig <- scale_x_continuous( breaks = dataFrame$iterative,
+ label = dataFrame$scale )
xLabel <- xlab( "Scale" )
-yLabel <- ylab( "Latency (ms)" )
+yLabel <- ylab( "Latency (s)" )
fillLabel <- labs( fill="Type" )
-chartTitle <- paste( "Scale Topology Latency Test" )
-theme <- theme( plot.title=element_text( hjust = 0.5, size = 28, face='bold' ) )
-values <- geom_text( aes( x=dataFrame$iterative, y=sum + 0.02 * max( sum ), label = format( sum, big.mark = ",", scientific = FALSE ), fontface = "bold" ) )
+imageWidth <- 15
+imageHeight <- 10
+imageDPI <- 200
+
+theme <- theme( plot.title = element_text( hjust = 0.5, size = 32, face = 'bold' ),
+ legend.position = "bottom",
+ legend.text = element_text( size=22 ),
+ legend.title = element_blank(),
+ legend.key.size = unit( 1.5, 'lines' ) )
+
+values <- geom_text( aes( x = dataFrame$iterative,
+ y = sum + 0.02 * max( sum ),
+ label = format( sum,
+ big.mark = ",",
+ scientific = FALSE ),
+ fontface = "bold" ),
+ size = 7.0 )
+
+wrapLegend <- guides( fill = guide_legend( nrow=2, byrow=TRUE ) )
+
+title <- ggtitle( chartTitle, "" )
# Store plot configurations as 1 variable
-fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme + values
+fundamentalGraphData <- mainPlot +
+ xScaleConfig +
+ xLabel +
+ yLabel +
+ fillLabel +
+ theme +
+ values +
+ wrapLegend +
+ title
+
+# ---------------------------
+# Generating Bar Graph Format
+# ---------------------------
print( "Generating bar graph." )
-barGraphFormat <- geom_bar( stat = "identity", width = width )
-title <- ggtitle( paste( chartTitle, "" ) )
-result <- fundamentalGraphData + barGraphFormat + title
-# Save graph to file
+barGraphFormat <- geom_bar( stat = "identity", width = width )
+
+result <- fundamentalGraphData +
+ barGraphFormat
+
+# -----------------------
+# Exporting Graph to File
+# -----------------------
+
print( paste( "Saving bar chart to", outputFile ) )
-ggsave( outputFile, width = 10, height = 6, dpi = 200 )
-print( paste( "Successfully wrote bar chart out to", outputFile ) )
+
+tryCatch( ggsave( outputFile,
+ width = imageWidth,
+ height = imageHeight,
+ dpi = imageDPI ),
+ error = function( e ){
+ print( "[ERROR] There was a problem saving the graph due to a graph formatting exception. Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+print( paste( "[SUCCESS] Successfully wrote bar chart out to", outputFile ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/SCPFscalingMaxIntents.R b/TestON/JenkinsFile/scripts/SCPFscalingMaxIntents.R
index 02f4f28..21dd70f 100644
--- a/TestON/JenkinsFile/scripts/SCPFscalingMaxIntents.R
+++ b/TestON/JenkinsFile/scripts/SCPFscalingMaxIntents.R
@@ -21,75 +21,157 @@
# please contact Jeremy Ronquillo: j_ronquillo@u.pacific.edu
# **********************************************************
-# STEP 1: File management.
+# STEP 1: Data management.
# **********************************************************
-print( "STEP 1: File management." )
+print( "**********************************************************" )
+print( "STEP 1: Data management." )
+print( "**********************************************************" )
+has_flow_obj = 1
+database_host = 2
+database_port = 3
+database_u_id = 4
+database_pw = 5
+test_name = 6
+branch_name = 7
+old_flow = 8
+save_directory = 9
print( "Reading commmand-line args." )
args <- commandArgs( trailingOnly=TRUE )
-# Import libraries to be used for graphing and organizing data, respectively.
-# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
-# reshape2: https://github.com/hadley/reshape
+# ----------------
+# Import Libraries
+# ----------------
+
print( "Importing libraries." )
library( ggplot2 )
library( reshape2 )
library( RPostgreSQL ) # For databases
-# Normal usage
-# Check if sufficient args are provided.
-if ( is.na( args[ 8 ] ) ){
- print( "Usage: Rscript SCPFInstalledIntentsFlows <has-flowObj> <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <directory-to-save-graphs>" )
- q() # basically exit(), but in R
+# -------------------
+# Check CLI Arguments
+# -------------------
+
+print( "Verifying CLI args." )
+
+if ( is.na( args[ save_directory ] ) ){
+ print( paste( "Usage: Rscript SCPFInstalledIntentsFlows",
+ "<has-flowObj>",
+ "<database-host>",
+ "<database-port>",
+ "<database-user-id>",
+ "<database-password>",
+ "<test-name>",
+ "<branch-name>",
+ "<using-old-flow>",
+ "<directory-to-save-graphs>",
+ sep=" " ) )
+
+ quit( status = 1 ) # basically exit(), but in R
}
-# paste() is used to concatenate strings.
-outputFile <- paste( args[ 8 ], args[ 6 ], sep="" )
-if ( args[ 1 ] == "y" ){
- outputFile <- paste( outputFile, "flowObj", sep="_" )
-}
-outputFile <- paste( outputFile, args[ 7 ], sep="_" )
-outputFile <- paste( outputFile, "_errGraph.jpg", sep="" )
+# -----------------
+# Create File Names
+# -----------------
-print( "Reading from databases." )
+print( "Creating filenames and title of graph." )
-con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 2 ], port=strtoi( args[ 3 ] ), user=args[ 4 ],password=args[ 5 ] )
+fileFlowObjModifier <- ""
+sqlFlowObjModifier <- ""
+chartTitle <- "Number of Installed Intents & Flows"
-command <- "SELECT * FROM max_intents_"
-if ( args[ 1 ] == "y" ){
- command <- paste( command, "fobj_", sep="" )
-}
-command <- paste( command, "tests WHERE branch = '", sep = "" )
-command <- paste( command, args[ 7 ], sep="" )
-command <- paste( command, "' AND date IN ( SELECT MAX( date ) FROM max_intents_", sep="" )
-if ( args[ 1 ] == "y" ){
- command <- paste( command, "fobj_", sep="" )
-}
-command <- paste( command, "tests WHERE branch = '", sep = "" )
-command <- paste( command, args[ 7 ], sep = "" )
-command <- paste( command, "' ) ", sep="" )
-
-print( paste( "Sending SQL command:", command ) )
-
-fileData <- dbGetQuery( con, command )
-
-if ( args[ 1 ] == "y" ){
+if ( args[ has_flow_obj ] == "y" ){
+ fileFlowObjModifier <- "_flowObj"
+ sqlFlowObjModifier <- "fobj_"
chartTitle <- "Number of Installed Intents & Flows\n with Flow Objectives"
-} else {
- chartTitle <- "Number of Installed Intents & Flows"
}
+fileOldFlowModifier <- ""
+if ( args[ old_flow ] == 'y' ){
+ fileOldFlowModifier <- "_OldFlow"
+ chartTitle <- paste( chartTitle, "With Eventually Consistent Flow Rule Store", sep="\n" )
+}
+
+outputFile <- paste( args[ save_directory ],
+ args[ test_name ],
+ fileFlowObjModifier,
+ fileOldFlowModifier,
+ "_",
+ args[ branch_name ],
+ "_errGraph.jpg",
+ sep="" )
+
+# ------------------
+# SQL Initialization
+# ------------------
+
+print( "Initializing SQL" )
+
+con <- dbConnect( dbDriver( "PostgreSQL" ),
+ dbname = "onostest",
+ host = args[ database_host ],
+ port = strtoi( args[ database_port ] ),
+ user = args[ database_u_id ],
+ password = args[ database_pw ] )
+
+# -------------------------------
+# Scaling Max Intents SQL Command
+# -------------------------------
+
+print( "Scaling Max Intents SQL Command" )
+
+command <- paste( "SELECT * FROM max_intents_",
+ sqlFlowObjModifier,
+ "tests WHERE branch = '",
+ args[ branch_name ],
+ "' AND date IN ( SELECT MAX( date ) FROM max_intents_",
+ sqlFlowObjModifier,
+ "tests WHERE branch = '",
+ args[ branch_name ],
+ "' AND ",
+ ( if( args[ old_flow ] == 'y' ) "" else "NOT " ),
+ "is_old_flow",
+ " ) ",
+ sep="" )
+
+print( "Sending SQL command:" )
+print( command )
+fileData <- dbGetQuery( con, command )
# **********************************************************
# STEP 2: Organize data.
# **********************************************************
-fileDataNames <- names( fileData )
+print( "**********************************************************" )
+print( "STEP 2: Organize Data." )
+print( "**********************************************************" )
-avgs <- c()
+# ------------
+# Data Sorting
+# ------------
print( "Sorting data." )
-avgs <- c( fileData[ 'max_intents_ovs' ], fileData[ 'max_flows_ovs' ] )
+
+requiredColumns <- c( "max_intents_ovs", "max_flows_ovs" )
+
+tryCatch( avgs <- c( fileData[ requiredColumns] ),
+ error = function( e ) {
+ print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+ print( "Required columns: " )
+ print( requiredColumns )
+ print( "Actual columns: " )
+ print( names( fileData ) )
+ print( "Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+# --------------------
+# Construct Data Frame
+# --------------------
+
+print( "Constructing Data Frame" )
dataFrame <- melt( avgs )
dataFrame$scale <- fileData$scale
@@ -108,29 +190,97 @@
# STEP 3: Generate graphs.
# **********************************************************
+print( "**********************************************************" )
+print( "STEP 3: Generate Graph." )
+print( "**********************************************************" )
+
+# ------------------
+# Generate Main Plot
+# ------------------
+
+print( "Creating main plot." )
+mainPlot <- ggplot( data = dataFrame, aes( x = scale,
+ y = ms,
+ fill = type ) )
+
+# ------------------------------
+# Fundamental Variables Assigned
+# ------------------------------
+
print( "Generating fundamental graph data." )
-theme_set( theme_grey( base_size = 20 ) ) # set the default text size of the graph.
-
-mainPlot <- ggplot( data = dataFrame, aes( x = scale, y = ms, fill = type ) )
-xScaleConfig <- scale_x_continuous( breaks=c( 1, 3, 5, 7, 9) )
+barWidth <- 1.3
+theme_set( theme_grey( base_size = 22 ) ) # set the default text size of the graph.
+xScaleConfig <- scale_x_continuous( breaks=c( 1, 3, 5, 7, 9 ) )
xLabel <- xlab( "Scale" )
yLabel <- ylab( "Max Number of Intents/Flow Rules" )
fillLabel <- labs( fill="Type" )
-theme <- theme( plot.title=element_text( hjust = 0.5, size = 28, face='bold' ) )
+imageWidth <- 15
+imageHeight <- 10
+imageDPI <- 200
-fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
+theme <- theme( plot.title = element_text( hjust = 0.5, size = 32, face = 'bold' ),
+ legend.position = "bottom",
+ legend.text = element_text( size=22 ),
+ legend.title = element_blank(),
+ legend.key.size = unit( 1.5, 'lines' ) )
+colors <- scale_fill_manual( values = c( "#F77670",
+ "#619DFA" ) )
-print( "Generating bar graph bars." )
-width <- 1.3
-barGraphFormat <- geom_bar( stat="identity", position=position_dodge( ), width = width )
-values <- geom_text( aes( x=dataFrame$scale, y=dataFrame$ms + 0.02 * max( dataFrame$ms ), label = format( dataFrame$ms, digits=3, big.mark = ",", scientific = FALSE ) ), size = 3.2, fontface = "bold", position=position_dodge( width=1.25 ) )
+wrapLegend <- guides( fill = guide_legend( nrow = 1, byrow = TRUE ) )
title <- ggtitle( chartTitle )
-result <- fundamentalGraphData + barGraphFormat + title + values
+fundamentalGraphData <- mainPlot +
+ xScaleConfig +
+ xLabel +
+ yLabel +
+ fillLabel +
+ theme +
+ wrapLegend +
+ title +
+ colors
+
+# ---------------------------
+# Generating Bar Graph Format
+# ---------------------------
+
+print( "Generating bar graph." )
+
+barGraphFormat <- geom_bar( stat = "identity",
+ position = position_dodge(),
+ width = barWidth )
+
+values <- geom_text( aes( x = dataFrame$scale,
+ y = dataFrame$ms + 0.015 * max( dataFrame$ms ),
+ label = format( dataFrame$ms,
+ digits=3,
+ big.mark = ",",
+ scientific = FALSE ) ),
+ size = 5.2,
+ fontface = "bold",
+ position = position_dodge( width = 1.25 ) )
+
+result <- fundamentalGraphData +
+ barGraphFormat +
+ values
+
+# -----------------------
+# Exporting Graph to File
+# -----------------------
print( paste( "Saving bar chart to", outputFile ) )
-ggsave( outputFile, width = 10, height = 6, dpi = 200 )
-print( paste( "Successfully wrote bar chart out to", outputFile ) )
+tryCatch( ggsave( outputFile,
+ width = imageWidth,
+ height = imageHeight,
+ dpi = imageDPI ),
+ error = function( e ){
+ print( "[ERROR] There was a problem saving the graph due to a graph formatting exception. Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+print( paste( "[SUCCESS] Successfully wrote bar chart out to", outputFile ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/SCPFswitchLat.R b/TestON/JenkinsFile/scripts/SCPFswitchLat.R
index 8a03863..de506a3 100644
--- a/TestON/JenkinsFile/scripts/SCPFswitchLat.R
+++ b/TestON/JenkinsFile/scripts/SCPFswitchLat.R
@@ -21,48 +21,99 @@
# please contact Jeremy Ronquillo: j_ronquillo@u.pacific.edu
# **********************************************************
-# STEP 1: File management.
+# STEP 1: Data management.
# **********************************************************
-print( "STEP 1: File management." )
+print( "**********************************************************" )
+print( "STEP 1: Data management." )
+print( "**********************************************************" )
+database_host = 1
+database_port = 2
+database_u_id = 3
+database_pw = 4
+test_name = 5
+branch_name = 6
+save_directory = 7
# Command line arguments are read.
print( "Reading commmand-line args." )
args <- commandArgs( trailingOnly=TRUE )
-# Import libraries to be used for graphing and organizing data, respectively.
-# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
-# reshape2: https://github.com/hadley/reshape
+# ----------------
+# Import Libraries
+# ----------------
+
print( "Importing libraries." )
library( ggplot2 )
library( reshape2 )
library( RPostgreSQL ) # For databases
-# Check if sufficient args are provided.
-if ( is.na( args[ 7 ] ) ){
- print( "Usage: Rscript SCPFswitchLat <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <directory-to-save-graphs>" )
- q() # basically exit(), but in R
+# -------------------
+# Check CLI Arguments
+# -------------------
+
+print( "Verifying CLI args." )
+
+if ( is.na( args[ save_directory ] ) ){
+
+ print( paste( "Usage: Rscript SCPFswitchLat",
+ "<database-host>",
+ "<database-port>",
+ "<database-user-id>",
+ "<database-password>",
+ "<test-name>",
+ "<branch-name>",
+ "<directory-to-save-graphs>",
+ sep=" ") )
+
+ quit( status = 1 ) # basically exit(), but in R
}
-# paste() is used to concatenate strings.
-errBarOutputFileUp <- paste( args[ 7 ], "SCPFswitchLat_", sep = "" )
-errBarOutputFileUp <- paste( errBarOutputFileUp, args[ 6 ], sep = "" )
-errBarOutputFileUp <- paste( errBarOutputFileUp, "_UpErrBarWithStack.jpg", sep = "" )
+# -----------------
+# Create File Names
+# -----------------
-errBarOutputFileDown <- paste( args[ 7 ], "SCPFswitchLat_", sep = "" )
-errBarOutputFileDown <- paste( errBarOutputFileDown, args[ 6 ], sep = "" )
-errBarOutputFileDown <- paste( errBarOutputFileDown, "_DownErrBarWithStack.jpg", sep = "" )
+print( "Creating filenames and title of graph." )
-print( "Reading from databases." )
+errBarOutputFileUp <- paste( args[ save_directory ],
+ "SCPFswitchLat_",
+ args[ branch_name ],
+ "_UpErrBarWithStack.jpg",
+ sep="" )
-con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 1 ], port=strtoi( args[ 2 ] ), user=args[ 3 ],password=args[ 4 ] )
+errBarOutputFileDown <- paste( args[ save_directory ],
+ "SCPFswitchLat_",
+ args[ branch_name ],
+ "_DownErrBarWithStack.jpg",
+ sep="" )
+# ------------------
+# SQL Initialization
+# ------------------
-command <- paste( "SELECT * FROM switch_latency_details WHERE branch = '", args[ 6 ], sep="" )
-command <- paste( command, "' AND date IN ( SELECT MAX( date ) FROM switch_latency_details WHERE branch='", sep = "")
-command <- paste( command, args[ 6 ], sep="" )
-command <- paste( command, "' )", sep="" )
+print( "Initializing SQL" )
-print( paste( "Sending SQL command:", command ) )
+con <- dbConnect( dbDriver( "PostgreSQL" ),
+ dbname = "onostest",
+ host = args[ database_host ],
+ port = strtoi( args[ database_port ] ),
+ user = args[ database_u_id ],
+ password = args[ database_pw ] )
+
+# --------------------------
+# Switch Latency SQL Command
+# --------------------------
+
+print( "Generating Switch Latency SQL Command" )
+
+command <- paste( "SELECT * FROM switch_latency_details WHERE branch = '",
+ args[ branch_name ],
+ "' AND date IN ( SELECT MAX( date ) FROM switch_latency_details WHERE branch='",
+ args[ branch_name ],
+ "' )",
+ sep="" )
+
+print( "Sending SQL command:" )
+print( command )
fileData <- dbGetQuery( con, command )
@@ -70,31 +121,109 @@
# STEP 2: Organize data.
# **********************************************************
-print( "Sorting data." )
+print( "**********************************************************" )
+print( "STEP 2: Organize Data." )
+print( "**********************************************************" )
-upAvgs <- c( fileData[ 'up_device_to_graph_avg' ], fileData[ 'role_reply_to_device_avg' ], fileData[ 'role_request_to_role_reply_avg' ], fileData[ 'feature_reply_to_role_request_avg' ], fileData[ 'tcp_to_feature_reply_avg' ] )
+# -------------------------------
+# Switch Up Averages Data Sorting
+# -------------------------------
+
+print( "Sorting data for Switch Up Averages." )
+
+requiredColumns <- c( "up_device_to_graph_avg",
+ "role_reply_to_device_avg",
+ "role_request_to_role_reply_avg",
+ "feature_reply_to_role_request_avg",
+ "tcp_to_feature_reply_avg" )
+
+tryCatch( upAvgs <- c( fileData[ requiredColumns] ),
+ error = function( e ) {
+ print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+ print( "Required columns: " )
+ print( requiredColumns )
+ print( "Actual columns: " )
+ print( names( fileData ) )
+ print( "Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+# ------------------------------
+# Switch Up Construct Data Frame
+# ------------------------------
+
+print( "Constructing Switch Up data frame." )
+
upAvgsData <- melt( upAvgs )
upAvgsData$scale <- fileData$scale
upAvgsData$up_std <- fileData$up_std
+upAvgsData <- na.omit( upAvgsData )
-colnames( upAvgsData ) <- c( "ms", "type", "scale", "stds" )
+colnames( upAvgsData ) <- c( "ms",
+ "type",
+ "scale",
+ "stds" )
+
upAvgsData$type <- as.character( upAvgsData$type )
upAvgsData$type <- factor( upAvgsData$type, levels=unique( upAvgsData$type ) )
-downAvgs <- c( fileData[ 'down_device_to_graph_avg' ], fileData[ 'ack_to_device_avg' ], fileData[ 'fin_ack_to_ack_avg' ] )
+sumOfUpAvgs <- fileData[ 'up_device_to_graph_avg' ] +
+ fileData[ 'role_reply_to_device_avg' ] +
+ fileData[ 'role_request_to_role_reply_avg' ] +
+ fileData[ 'feature_reply_to_role_request_avg' ] +
+ fileData[ 'tcp_to_feature_reply_avg' ]
+
+print( "Up Averages Results:" )
+print( upAvgsData )
+
+# ---------------------------------
+# Switch Down Averages Data Sorting
+# ---------------------------------
+
+print( "Sorting data for Switch Down Averages." )
+
+requiredColumns <- c( "down_device_to_graph_avg",
+ "ack_to_device_avg",
+ "fin_ack_to_ack_avg" )
+
+tryCatch( downAvgs <- c( fileData[ requiredColumns] ),
+ error = function( e ) {
+ print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+ print( "Required columns: " )
+ print( requiredColumns )
+ print( "Actual columns: " )
+ print( names( fileData ) )
+ print( "Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+# --------------------------------
+# Switch Down Construct Data Frame
+# --------------------------------
+
+print( "Constructing Switch Down data frame." )
+
downAvgsData <- melt( downAvgs )
downAvgsData$scale <- fileData$scale
downAvgsData$down_std <- fileData$down_std
-colnames( downAvgsData ) <- c( "ms", "type", "scale", "stds" )
+colnames( downAvgsData ) <- c( "ms",
+ "type",
+ "scale",
+ "stds" )
+
downAvgsData$type <- as.character( downAvgsData$type )
downAvgsData$type <- factor( downAvgsData$type, levels=unique( downAvgsData$type ) )
-upAvgsData <- na.omit( upAvgsData ) # Omit any data that doesn't exist
downAvgsData <- na.omit( downAvgsData )
-print( "Up Averages Results:" )
-print( upAvgsData )
+sumOfDownAvgs <- fileData[ 'down_device_to_graph_avg' ] +
+ fileData[ 'ack_to_device_avg' ] +
+ fileData[ 'fin_ack_to_ack_avg' ]
print( "Down Averages Results:" )
print( downAvgsData )
@@ -103,57 +232,177 @@
# STEP 3: Generate graphs.
# **********************************************************
+print( "**********************************************************" )
+print( "STEP 3: Generate Graph." )
+print( "**********************************************************" )
-print( "Generating fundamental graph data (Switch Up Latency)." )
-width <- 1
+# ------------------------------------
+# Initialize Variables For Both Graphs
+# ------------------------------------
-theme_set( theme_grey( base_size = 20 ) ) # set the default text size of the graph.
+print( "Initializing variables used in both graphs." )
-mainPlot <- ggplot( data = upAvgsData, aes( x = scale, y = ms, fill = type, ymin = fileData[ 'up_end_to_end_avg' ] - stds, ymax = fileData[ 'up_end_to_end_avg' ] + stds ) )
-xScaleConfig <- scale_x_continuous( breaks=c( 1, 3, 5, 7, 9) )
+theme_set( theme_grey( base_size = 22 ) ) # set the default text size of the graphs
+xScaleConfig <- scale_x_continuous( breaks = c( 1, 3, 5, 7, 9 ) )
xLabel <- xlab( "Scale" )
yLabel <- ylab( "Latency (ms)" )
-fillLabel <- labs( fill="Type" )
-theme <- theme( plot.title=element_text( hjust = 0.5, size = 28, face='bold' ) )
+imageWidth <- 15
+imageHeight <- 10
+imageDPI <- 200
+errorBarColor <- rgb( 140, 140, 140, maxColorValue = 255 )
+barWidth <- 1
-fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
+theme <- theme( plot.title = element_text( hjust = 0.5, size = 32, face = 'bold' ),
+ legend.position = "bottom",
+ legend.text = element_text( size = 22 ),
+ legend.title = element_blank(),
+ legend.key.size = unit( 1.5, 'lines' ) )
+
+# ----------------------------
+# Switch Up Generate Main Plot
+# ----------------------------
+
+print( "Creating main plot (Switch Up Latency)." )
+
+mainPlot <- ggplot( data = upAvgsData, aes( x = scale,
+ y = ms,
+ fill = type,
+ ymin = fileData[ 'up_end_to_end_avg' ],
+ ymax = fileData[ 'up_end_to_end_avg' ] + stds ) )
+
+# ----------------------------------------
+# Switch Up Fundamental Variables Assigned
+# ----------------------------------------
+
+print( "Generating fundamental graph data (Switch Up Latency)." )
+
+title <- ggtitle( "Switch Up Latency" )
+
+fundamentalGraphData <- mainPlot +
+ xScaleConfig +
+ xLabel +
+ yLabel +
+ theme +
+ title
+
+# -------------------------------------
+# Switch Up Generating Bar Graph Format
+# -------------------------------------
print( "Generating bar graph with error bars (Switch Up Latency)." )
-barGraphFormat <- geom_bar( stat="identity", width = width )
-errorBarFormat <- geom_errorbar( width = width, color=rgb( 140, 140, 140, maxColorValue=255 ) )
-sum <- fileData[ 'up_device_to_graph_avg' ] + fileData[ 'role_reply_to_device_avg' ] + fileData[ 'role_request_to_role_reply_avg' ] + fileData[ 'feature_reply_to_role_request_avg' ] + fileData[ 'tcp_to_feature_reply_avg' ]
-values <- geom_text( aes( x=upAvgsData$scale, y=sum + 0.04 * max( sum ), label = format( sum, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold" )
-title <- ggtitle( "Switch Up Latency" )
-result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title + values
+barGraphFormat <- geom_bar( stat = "identity", width = barWidth )
+errorBarFormat <- geom_errorbar( width = barWidth, color = errorBarColor )
+
+barGraphValues <- geom_text( aes( x = upAvgsData$scale,
+ y = sumOfUpAvgs + 0.04 * max( sumOfUpAvgs ),
+ label = format( sumOfUpAvgs,
+ digits = 3,
+ big.mark = ",",
+ scientific = FALSE ) ),
+ size = 7.0,
+ fontface = "bold" )
+
+wrapLegend <- guides( fill = guide_legend( nrow = 2, byrow = TRUE ) )
+
+result <- fundamentalGraphData +
+ barGraphFormat +
+ errorBarFormat +
+ barGraphValues +
+ wrapLegend
+
+# ---------------------------------
+# Switch Up Exporting Graph to File
+# ---------------------------------
print( paste( "Saving bar chart with error bars (Switch Up Latency) to", errBarOutputFileUp ) )
-ggsave( errBarOutputFileUp, width = 10, height = 6, dpi = 200 )
-print( paste( "Successfully wrote bar chart with error bars (Switch Up Latency) out to", errBarOutputFileUp ) )
+tryCatch( ggsave( errBarOutputFileUp,
+ width = imageWidth,
+ height = imageHeight,
+ dpi = imageDPI ),
+ error = function( e ){
+ print( "[ERROR] There was a problem saving the graph due to a graph formatting exception. Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
-# Generate switch down latency graph
+print( paste( "[SUCCESS] Successfully wrote bar chart with error bars (Switch Up Latency) out to", errBarOutputFileUp ) )
+
+# ------------------------------
+# Switch Down Generate Main Plot
+# ------------------------------
+
+print( "Creating main plot (Switch Down Latency)." )
+
+mainPlot <- ggplot( data = downAvgsData, aes( x = scale,
+ y = ms,
+ fill = type,
+ ymin = fileData[ 'down_end_to_end_avg' ],
+ ymax = fileData[ 'down_end_to_end_avg' ] + stds ) )
+
+# ------------------------------------------
+# Switch Down Fundamental Variables Assigned
+# ------------------------------------------
print( "Generating fundamental graph data (Switch Down Latency)." )
-mainPlot <- ggplot( data = downAvgsData, aes( x = scale, y = ms, fill = type, ymin = fileData[ 'down_end_to_end_avg' ] - stds, ymax = fileData[ 'down_end_to_end_avg' ] + stds ) )
-theme <- theme( plot.title=element_text( hjust = 0.5, size = 28, face='bold' ) )
-
-
-fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
-
-print( "Generating bar graph with error bars (Switch Down Latency)." )
-barGraphFormat <- geom_bar( stat="identity", width = width )
-errorBarFormat <- geom_errorbar( width = width, color=rgb( 140, 140, 140, maxColorValue=255 ) )
+colors <- scale_fill_manual( values=c( "#F77670", # Red
+ "#619DFA", # Blue
+ "#18BA48" ) ) # Green
title <- ggtitle( "Switch Down Latency" )
-sum <- fileData[ 'down_device_to_graph_avg' ] + fileData[ 'ack_to_device_avg' ] + fileData[ 'fin_ack_to_ack_avg' ]
-values <- geom_text( aes( x=downAvgsData$scale, y=sum + 0.04 * max( sum ), label = format( sum, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold" )
-result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title + values
+fundamentalGraphData <- mainPlot +
+ xScaleConfig +
+ xLabel +
+ yLabel +
+ theme +
+ title
+
+# ---------------------------------------
+# Switch Down Generating Bar Graph Format
+# ---------------------------------------
+
+print( "Generating bar graph with error bars (Switch Down Latency)." )
+barGraphFormat <- geom_bar( stat = "identity", width = barWidth )
+errorBarFormat <- geom_errorbar( width = barWidth, color = errorBarColor )
+
+barGraphValues <- geom_text( aes( x = downAvgsData$scale,
+ y = sumOfDownAvgs + 0.04 * max( sumOfDownAvgs ),
+ label = format( sumOfDownAvgs,
+ digits = 3,
+ big.mark = ",",
+ scientific = FALSE ) ),
+ size = 7.0,
+ fontface = "bold" )
+
+wrapLegend <- guides( fill = guide_legend( nrow = 1, byrow = TRUE ) )
+
+result <- fundamentalGraphData +
+ barGraphFormat +
+ colors +
+ errorBarFormat +
+ barGraphValues +
+ wrapLegend
+
+# -----------------------------------
+# Switch Down Exporting Graph to File
+# -----------------------------------
print( paste( "Saving bar chart with error bars (Switch Down Latency) to", errBarOutputFileDown ) )
-ggsave( errBarOutputFileDown, width = 10, height = 6, dpi = 200 )
+tryCatch( ggsave( errBarOutputFileDown,
+ width = imageWidth,
+ height = imageHeight,
+ dpi = imageDPI ),
+ error = function( e ){
+ print( "[ERROR] There was a problem saving the graph due to a graph formatting exception. Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
-print( paste( "Successfully wrote bar chart with error bars (Switch Down Latency) out to", errBarOutputFileDown ) )
+print( paste( "[SUCCESS] Successfully wrote bar chart with error bars (Switch Down Latency) out to", errBarOutputFileDown ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/testCaseGraphGenerator.R b/TestON/JenkinsFile/scripts/testCaseGraphGenerator.R
index 395823d..1938ceb 100644
--- a/TestON/JenkinsFile/scripts/testCaseGraphGenerator.R
+++ b/TestON/JenkinsFile/scripts/testCaseGraphGenerator.R
@@ -26,72 +26,148 @@
# STEP 1: Data management.
# **********************************************************
+print( "**********************************************************" )
print( "STEP 1: Data management." )
+print( "**********************************************************" )
# Command line arguments are read. Args include the database credentials, test name, branch name, and the directory to output files.
print( "Reading commmand-line args." )
args <- commandArgs( trailingOnly=TRUE )
-# Import libraries to be used for graphing and organizing data, respectively.
-# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
-# reshape2: https://github.com/hadley/reshape
-# RPostgreSQL: https://code.google.com/archive/p/rpostgresql/
+# ----------------
+# Import Libraries
+# ----------------
+
print( "Importing libraries." )
library( ggplot2 )
library( reshape2 )
library( RPostgreSQL )
-# Check if sufficient args are provided.
+# -------------------
+# Check CLI Arguments
+# -------------------
+
+print( "Verifying CLI args." )
+
if ( is.na( args[ 8 ] ) ){
- print( "Usage: Rscript testCaseGraphGenerator.R <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <#-builds-to-show> <directory-to-save-graphs>" )
- q() # basically exit(), but in R
+
+ print( paste( "Usage: Rscript testCaseGraphGenerator.R",
+ "<database-host>",
+ "<database-port>",
+ "<database-user-id>",
+ "<database-password>",
+ "<test-name>", # part of the output filename
+ "<branch-name>", # for sql and output filename
+ "<#-builds-to-show>", # for sql and output filename
+ "<directory-to-save-graphs>",
+ sep=" " ) )
+
+ quit( status = 1 ) # basically exit(), but in R
}
-# Filenames for the output graph include the testname, branch, and the graph type.
-outputFile <- paste( args[ 8 ], args[ 5 ], sep="" )
-outputFile <- paste( outputFile, args[ 6 ], sep="_" )
-outputFile <- paste( outputFile, args[ 7 ], sep="_" )
-outputFile <- paste( outputFile, "builds", sep="-" )
-outputFile <- paste( outputFile, "_graph.jpg", sep="" )
+# -------------------------------
+# Create Title and Graph Filename
+# -------------------------------
-# From RPostgreSQL
-print( "Reading from databases." )
-con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 1 ], port=strtoi( args[ 2 ] ), user=args[ 3 ],password=args[ 4 ] )
+print( "Creating title of graph." )
-print( "Creating SQL command." )
-# Creating SQL command based on command line args.
-command <- paste( "SELECT * FROM executed_test_tests WHERE actual_test_name='", args[ 5 ], sep="" )
-command <- paste( command, "' AND branch='", sep="" )
-command <- paste( command, args[ 6 ], sep="" )
-command <- paste( command, "' ORDER BY date DESC LIMIT ", sep="" )
-command <- paste( command, args[ 7 ], sep="" )
+title <- paste( args[ 5 ],
+ " - ",
+ args[ 6 ],
+ " \n Results of Last ",
+ args[ 7 ],
+ " Builds",
+ sep="" )
+
+print( "Creating graph filename." )
+
+outputFile <- paste( args[ 8 ],
+ args[ 5 ],
+ "_",
+ args[ 6 ],
+ "_",
+ args[ 7 ],
+ "-builds_graph.jpg",
+ sep="" )
+
+# ------------------
+# SQL Initialization
+# ------------------
+
+print( "Initializing SQL" )
+
+con <- dbConnect( dbDriver( "PostgreSQL" ),
+ dbname = "onostest",
+ host = args[ 1 ],
+ port = strtoi( args[ 2 ] ),
+ user = args[ 3 ],
+ password = args[ 4 ] )
+
+# ---------------------
+# Test Case SQL Command
+# ---------------------
+print( "Generating Test Case SQL command." )
+
+command <- paste( "SELECT * FROM executed_test_tests WHERE actual_test_name='",
+ args[ 5 ],
+ "' AND branch='",
+ args[ 6 ],
+ "' ORDER BY date DESC LIMIT ",
+ args[ 7 ],
+ sep="" )
+
+print( "Sending SQL command:" )
+print( command )
fileData <- dbGetQuery( con, command )
-# Title of graph based on command line args.
-title <- paste( args[ 5 ], args[ 6 ], sep=" - " )
-title <- paste( title, "Results of Last ", sep=" \n " )
-title <- paste( title, args[ 7 ], sep="" )
-title <- paste( title, " Builds", sep="" )
# **********************************************************
# STEP 2: Organize data.
# **********************************************************
-print( "STEP 2: Organize data." )
+print( "**********************************************************" )
+print( "STEP 2: Organize Data." )
+print( "**********************************************************" )
-# Create lists c() and organize data into their corresponding list.
-print( "Sorting data into new data frame." )
-categories <- c( fileData[ 'num_failed' ], fileData[ 'num_passed' ], fileData[ 'num_planned' ] )
+# -------------------------------------------------------
+# Combining Passed, Failed, and Planned Data
+# -------------------------------------------------------
-# Parse lists into data frames.
-# This is where reshape2 comes in. Avgs list is converted to data frame.
+print( "Combining Passed, Failed, and Planned Data." )
+
+requiredColumns <- c( "num_failed", "num_passed", "num_planned" )
+
+tryCatch( categories <- c( fileData[ requiredColumns] ),
+ error = function( e ) {
+ print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+ print( "Required columns: " )
+ print( requiredColumns )
+ print( "Actual columns: " )
+ print( names( fileData ) )
+ print( "Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+# --------------------
+# Construct Data Frame
+# --------------------
+
+print( "Constructing data frame from combined data." )
+
dataFrame <- melt( categories )
+
+# Rename column names in dataFrame
+colnames( dataFrame ) <- c( "Tests",
+ "Status" )
+
+# Add build dates to the dataFrame
dataFrame$build <- fileData$build
-colnames( dataFrame ) <- c( "Tests", "Status", "Build" )
# Format data frame so that the data is in the same order as it appeared in the file.
dataFrame$Status <- as.character( dataFrame$Status )
-dataFrame$Status <- factor( dataFrame$Status, levels=unique( dataFrame$Status ) )
+dataFrame$Status <- factor( dataFrame$Status, levels = unique( dataFrame$Status ) )
# Add planned, passed, and failed results to the dataFrame (for the fill below the lines)
dataFrame$num_planned <- fileData$num_planned
@@ -101,7 +177,8 @@
# Adding a temporary reversed iterative list to the dataFrame so that there are no gaps in-between build numbers.
dataFrame$iterative <- rev( seq( 1, nrow( fileData ), by = 1 ) )
-dataFrame <- na.omit( dataFrame ) # Omit any data that doesn't exist
+# Omit any data that doesn't exist
+dataFrame <- na.omit( dataFrame )
print( "Data Frame Results:" )
print( dataFrame )
@@ -110,7 +187,13 @@
# STEP 3: Generate graphs.
# **********************************************************
-print( "STEP 3: Generate graphs." )
+print( "**********************************************************" )
+print( "STEP 3: Generate Graph." )
+print( "**********************************************************" )
+
+# -------------------
+# Main Plot Generated
+# -------------------
print( "Creating main plot." )
# Create the primary plot here.
@@ -120,37 +203,118 @@
# - x: x-axis values (usually iterative, but it will become build # later)
# - y: y-axis values (usually tests)
# - color: the category of the colored lines (usually status of test)
-theme_set( theme_grey( base_size = 20 ) ) # set the default text size of the graph.
-mainPlot <- ggplot( data = dataFrame, aes( x = iterative, y = Tests, color = Status ) )
+
+mainPlot <- ggplot( data = dataFrame, aes( x = iterative,
+ y = Tests,
+ color = Status ) )
+
+# -------------------
+# Main Plot Formatted
+# -------------------
print( "Formatting main plot." )
-# geom_ribbon is used so that there is a colored fill below the lines. These values shouldn't be changed.
-failedColor <- geom_ribbon( aes( ymin = 0, ymax = dataFrame$num_failed ), fill = "red", linetype = 0, alpha = 0.07 )
-passedColor <- geom_ribbon( aes( ymin = 0, ymax = dataFrame$num_passed ), fill = "green", linetype = 0, alpha = 0.05 )
-plannedColor <- geom_ribbon( aes( ymin = 0, ymax = dataFrame$num_planned ), fill = "blue", linetype = 0, alpha = 0.01 )
-xScaleConfig <- scale_x_continuous( breaks = dataFrame$iterative, label = dataFrame$Build )
-yScaleConfig <- scale_y_continuous( breaks = seq( 0, max( dataFrame$Tests ), by = ceiling( max( dataFrame$Tests ) / 10 ) ) )
+# geom_ribbon is used so that there is a colored fill below the lines. These values shouldn't be changed.
+failedColor <- geom_ribbon( aes( ymin = 0,
+ ymax = dataFrame$num_failed ),
+ fill = "red",
+ linetype = 0,
+ alpha = 0.07 )
+
+passedColor <- geom_ribbon( aes( ymin = 0,
+ ymax = dataFrame$num_passed ),
+ fill = "green",
+ linetype = 0,
+ alpha = 0.05 )
+
+plannedColor <- geom_ribbon( aes( ymin = 0,
+ ymax = dataFrame$num_planned ),
+ fill = "blue",
+ linetype = 0,
+ alpha = 0.01 )
+
+# Colors for the lines
+lineColors <- scale_color_manual( values=c( "#E80000", # red
+ "#00B208", # green
+ "#00A5FF") ) # blue
+
+# ------------------------------
+# Fundamental Variables Assigned
+# ------------------------------
+
+print( "Generating fundamental graph data." )
+
+theme_set( theme_grey( base_size = 26 ) ) # set the default text size of the graph.
+
+xScaleConfig <- scale_x_continuous( breaks = dataFrame$iterative,
+ label = dataFrame$build )
+yScaleConfig <- scale_y_continuous( breaks = seq( 0, max( dataFrame$Tests ),
+ by = ceiling( max( dataFrame$Tests ) / 10 ) ) )
xLabel <- xlab( "Build Number" )
yLabel <- ylab( "Test Cases" )
-fillLabel <- labs( fill="Type" )
-legendLabels <- scale_colour_discrete( labels = c( "Failed", "Passed", "Planned" ) )
-centerTitle <- theme( plot.title=element_text( hjust = 0.5 ) ) # To center the title text
-theme <- theme( plot.title = element_text( size = 28, face='bold' ), axis.text.x = element_text( angle = 45, size = 10 ) )
+
+imageWidth <- 15
+imageHeight <- 10
+imageDPI <- 200
+
+legendLabels <- scale_colour_discrete( labels = c( "Failed Cases",
+ "Passed Cases",
+ "Planned Cases" ) )
+
+# Set other graph configurations here.
+theme <- theme( plot.title = element_text( hjust = 0.5, size = 32, face ='bold' ),
+ axis.text.x = element_text( angle = 0, size = 14 ),
+ legend.position = "bottom",
+ legend.text = element_text( size = 22 ),
+ legend.title = element_blank(),
+ legend.key.size = unit( 1.5, 'lines' ) )
+
+title <- ggtitle( title )
# Store plot configurations as 1 variable
-fundamentalGraphData <- mainPlot + plannedColor + passedColor + failedColor + xScaleConfig + yScaleConfig + xLabel + yLabel + fillLabel + legendLabels + centerTitle + theme
+fundamentalGraphData <- mainPlot +
+ plannedColor +
+ passedColor +
+ failedColor +
+ xScaleConfig +
+ yScaleConfig +
+ xLabel +
+ yLabel +
+ lineColors +
+ legendLabels +
+ theme +
+ title
+
+# ----------------------------
+# Generating Line Graph Format
+# ----------------------------
print( "Generating line graph." )
lineGraphFormat <- geom_line( size = 1.1 )
pointFormat <- geom_point( size = 3 )
-title <- ggtitle( title )
-result <- fundamentalGraphData + lineGraphFormat + pointFormat + title
+result <- fundamentalGraphData +
+ lineGraphFormat +
+ pointFormat
-# Save graph to file
+# -----------------------
+# Exporting Graph to File
+# -----------------------
+
print( paste( "Saving result graph to", outputFile ) )
-ggsave( outputFile, width = 10, height = 6, dpi = 200 )
-print( paste( "Successfully wrote result graph out to", outputFile ) )
+
+tryCatch( ggsave( outputFile,
+ width = imageWidth,
+ height = imageHeight,
+ dpi = imageDPI ),
+ error = function( e ){
+ print( "[ERROR] There was a problem saving the graph due to a graph formatting exception. Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+print( paste( "[SUCCESS] Successfully wrote result graph out to", outputFile ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/testCategoryBuildStats.R b/TestON/JenkinsFile/scripts/testCategoryBuildStats.R
new file mode 100644
index 0000000..c0211bc
--- /dev/null
+++ b/TestON/JenkinsFile/scripts/testCategoryBuildStats.R
@@ -0,0 +1,318 @@
+# Copyright 2017 Open Networking Foundation (ONF)
+#
+# Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+# the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+# or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+#
+# TestON is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# TestON is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with TestON. If not, see <http://www.gnu.org/licenses/>.
+#
+# If you have any questions, or if you don't understand R,
+# please contact Jeremy Ronquillo: j_ronquillo@u.pacific.edu
+
+# **********************************************************
+# STEP 1: Data management.
+# **********************************************************
+
+print( "**********************************************************" )
+print( "STEP 1: Data management." )
+print( "**********************************************************" )
+
+# Command line arguments are read. Args include the database credentials, test name, branch name, and the directory to output files.
+print( "Reading commmand-line args." )
+args <- commandArgs( trailingOnly=TRUE )
+
+databaseHost <- 1
+databasePort <- 2
+databaseUserID <- 3
+databasePassword <- 4
+testSuiteName <- 5
+branchName <- 6
+testsToInclude <- 7
+buildToShow <- 8
+displayStatus <- 9
+scaleOfPercent <- 10
+saveDirectory <- 11
+
+# ----------------
+# Import Libraries
+# ----------------
+
+print( "Importing libraries." )
+library( ggplot2 )
+library( reshape2 )
+library( RPostgreSQL )
+
+# -------------------
+# Check CLI Arguments
+# -------------------
+
+print( "Verifying CLI args." )
+
+if ( is.na( args[ saveDirectory ] ) ){
+
+ print( paste( "Usage: Rscript testCategoryTrend.R",
+ "<database-host>",
+ "<database-port>",
+ "<database-user-id>",
+ "<database-password>",
+ "<test-suite-name>",
+ "<branch-name>",
+ "<tests-to-include-(as-one-string)>",
+ "<build-to-show>",
+ "<pass/fail/plan>",
+ "<percent-scale>",
+ "<directory-to-save-graphs>",
+ sep=" " ) )
+
+ quit( status = 1 ) # basically exit(), but in R
+}
+
+# ------------------
+# SQL Initialization
+# ------------------
+
+print( "Initializing SQL" )
+
+con <- dbConnect( dbDriver( "PostgreSQL" ),
+ dbname = "onostest",
+ host = args[ databaseHost ],
+ port = strtoi( args[ databasePort ] ),
+ user = args[ databaseUserID ],
+ password = args[ databasePassword ] )
+
+# ---------------------
+# Test Case SQL Command
+# ---------------------
+print( "Generating Test Case SQL command." )
+
+tests <- "'"
+for ( test in as.list( strsplit( args[ testsToInclude ], "," )[[1]] ) ){
+ tests <- paste( tests, test, "','", sep="" )
+}
+tests <- substr( tests, 0, nchar( tests ) - 2 )
+
+fileBuildToShow <- args[ buildToShow ]
+operator <- "= "
+if ( args[ buildToShow ] == "latest" ){
+ operator <- ">= "
+ args[ buildToShow ] <- "1000"
+}
+
+command <- paste( "SELECT * ",
+ "FROM executed_test_tests a ",
+ "WHERE ( SELECT COUNT( * ) FROM executed_test_tests b ",
+ "WHERE b.branch='",
+ args[ branchName ],
+ "' AND b.actual_test_name IN (",
+ tests,
+ ") AND a.actual_test_name = b.actual_test_name AND a.date <= b.date AND b.build ", operator,
+ args[ buildToShow ],
+ " ) = ",
+ 1,
+ " AND a.branch='",
+ args[ branchName ],
+ "' AND a.actual_test_name IN (",
+ tests,
+ ") AND a.build ", operator,
+ args[ buildToShow ],
+ " ORDER BY a.actual_test_name DESC, a.date DESC",
+ sep="")
+
+print( "Sending SQL command:" )
+print( command )
+dbResult <- dbGetQuery( con, command )
+
+maxBuild <- max( dbResult[ 'build' ] )
+dbResult <- dbResult[ which( dbResult[,4]>=maxBuild ), ]
+
+# -------------------------------
+# Create Title and Graph Filename
+# -------------------------------
+
+print( "Creating title of graph." )
+
+titleDisplayStatus <- ""
+if ( args[ displayStatus ] == "fail" ){
+ titleDisplayStatus <- "Failed"
+} else if ( args[ displayStatus ] == "plan" ){
+ titleDisplayStatus <- "Executed"
+} else if ( args[ displayStatus ] == "pass" ){
+ titleDisplayStatus <- "Succeeded"
+} else {
+ print( paste( "[ERROR]: Invalid histogram display status: ", args[ displayStatus ], sep="" ) )
+ quit( status = 1 )
+}
+
+title <- paste( args[ testSuiteName ],
+ " Tests ",
+ titleDisplayStatus,
+ " - ",
+ args[ branchName ],
+ " \n Build #",
+ max( dbResult[ 'build' ] ),
+ sep="" )
+
+print( "Creating graph filename." )
+
+outputFile <- paste( args[ saveDirectory ],
+ args[ testSuiteName ],
+ "_",
+ args[ branchName ],
+ "_build-",
+ fileBuildToShow,
+ "_",
+ args[ scaleOfPercent ],
+ "-scaling",
+ "_",
+ args[ displayStatus ],
+ "_histogram.jpg",
+ sep="" )
+
+print( dbResult )
+
+# **********************************************************
+# STEP 2: Organize data.
+# **********************************************************
+
+print( "**********************************************************" )
+print( "STEP 2: Organize Data." )
+print( "**********************************************************" )
+
+t <- subset( dbResult, select=c( "actual_test_name", "num_passed", "num_failed", "num_planned" ) )
+t$passed_percent <- t$num_passed / t$num_planned * 100
+t$failed_percent <- t$num_failed / t$num_planned * 100
+t$planned_percent <- ( t$num_passed + t$num_failed ) / t$num_planned * 100
+
+# --------------------
+# Construct Data Frame
+# --------------------
+
+dataFrame <- aggregate( t$passed_percent, by=list( Category=t$actual_test_name ), FUN=sum )
+if ( args[ displayStatus ] == "fail" ){
+ dataFrame <- aggregate( t$failed_percent, by=list( Category=t$actual_test_name ), FUN=sum )
+} else if ( args[ displayStatus ] == "plan" ){
+ dataFrame <- aggregate( t$planned_percent, by=list( Category=t$actual_test_name ), FUN=sum )
+}
+
+colnames( dataFrame ) <- c( "Test", paste( titleDisplayStatus, "%", sep="" ) )
+
+print( "Data Frame Results:" )
+print( dataFrame )
+
+# **********************************************************
+# STEP 3: Generate graphs.
+# **********************************************************
+
+print( "**********************************************************" )
+print( "STEP 3: Generate Graph." )
+print( "**********************************************************" )
+
+# -------------------
+# Main Plot Generated
+# -------------------
+
+print( "Creating main plot." )
+# Create the primary plot here.
+# ggplot contains the following arguments:
+# - data: the data frame that the graph will be based off of
+# - aes: the asthetics of the graph which require:
+# - x: x-axis values (usually iterative, but it will become build # later)
+# - y: y-axis values (usually tests)
+# - color: the category of the colored lines (usually status of test)
+
+mainPlot <- ggplot( data = dataFrame, aes( dataFrame[ ,2 ] ) )
+
+# -------------------
+# Main Plot Formatted
+# -------------------
+
+print( "Formatting main plot." )
+
+# ------------------------------
+# Fundamental Variables Assigned
+# ------------------------------
+
+print( "Generating fundamental graph data." )
+
+theme_set( theme_grey( base_size = 26 ) ) # set the default text size of the graph.
+
+xScaleConfig <- scale_x_continuous( breaks = seq( 0, 100, by = 10 ) )
+yScaleConfig <- scale_y_continuous( breaks = seq( 0, nrow( dbResult ), by = 1 ), limits = c( 0, nrow( dbResult ) ) )
+
+xLabel <- xlab( paste( titleDisplayStatus, "%" ) )
+yLabel <- ylab( "Frequency" )
+
+imageWidth <- 15
+imageHeight <- 10
+imageDPI <- 200
+
+# Set other graph configurations here.
+theme <- theme( plot.title = element_text( hjust = 0.5, size = 32, face ='bold' ),
+ axis.text.x = element_text( angle = 0, size = 14 ),
+ legend.position = "bottom",
+ legend.text = element_text( size = 22 ),
+ legend.title = element_blank(),
+ legend.key.size = unit( 1.5, 'lines' ) )
+
+title <- ggtitle( title )
+
+# Store plot configurations as 1 variable
+fundamentalGraphData <- mainPlot +
+ xScaleConfig +
+ yScaleConfig +
+ xLabel +
+ yLabel +
+ theme +
+ title
+
+# ----------------------------
+# Generating Line Graph Format
+# ----------------------------
+
+print( "Generating line graph." )
+
+barColor <- "#00B208"
+if ( args[ displayStatus ] == "fail" ){
+ barColor <- "#E80000"
+} else if ( args[ displayStatus ] == "plan" ){
+ barColor <- "#00A5FF"
+}
+
+histogramFormat <- geom_histogram( col = "#000000",
+ fill = barColor,
+ breaks = seq( 0, 100, by = strtoi( args[ scaleOfPercent ] ) ),
+ lwd = 0.5 )
+
+result <- fundamentalGraphData +
+ histogramFormat
+
+# -----------------------
+# Exporting Graph to File
+# -----------------------
+
+print( paste( "Saving result graph to", outputFile ) )
+
+tryCatch( ggsave( outputFile,
+ width = imageWidth,
+ height = imageHeight,
+ dpi = imageDPI ),
+ error = function( e ){
+ print( "[ERROR] There was a problem saving the graph due to a graph formatting exception. Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+print( paste( "[SUCCESS] Successfully wrote result graph out to", outputFile ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/testCategoryTrend.R b/TestON/JenkinsFile/scripts/testCategoryTrend.R
new file mode 100644
index 0000000..7552f65
--- /dev/null
+++ b/TestON/JenkinsFile/scripts/testCategoryTrend.R
@@ -0,0 +1,322 @@
+# Copyright 2017 Open Networking Foundation (ONF)
+#
+# Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+# the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+# or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+#
+# TestON is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# TestON is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with TestON. If not, see <http://www.gnu.org/licenses/>.
+#
+# If you have any questions, or if you don't understand R,
+# please contact Jeremy Ronquillo: j_ronquillo@u.pacific.edu
+
+pipelineMinValue = 1000
+
+# **********************************************************
+# STEP 1: Data management.
+# **********************************************************
+
+print( "**********************************************************" )
+print( "STEP 1: Data management." )
+print( "**********************************************************" )
+
+# Command line arguments are read. Args include the database credentials, test name, branch name, and the directory to output files.
+print( "Reading commmand-line args." )
+args <- commandArgs( trailingOnly=TRUE )
+
+databaseHost <- 1
+databasePort <- 2
+databaseUserID <- 3
+databasePassword <- 4
+testSuiteName <- 5
+branchName <- 6
+testsToInclude <- 7
+buildsToShow <- 8
+saveDirectory <- 9
+
+# ----------------
+# Import Libraries
+# ----------------
+
+print( "Importing libraries." )
+library( ggplot2 )
+library( reshape2 )
+library( RPostgreSQL )
+
+# -------------------
+# Check CLI Arguments
+# -------------------
+
+print( "Verifying CLI args." )
+
+if ( is.na( args[ saveDirectory ] ) ){
+
+ print( paste( "Usage: Rscript testCategoryTrend.R",
+ "<database-host>",
+ "<database-port>",
+ "<database-user-id>",
+ "<database-password>",
+ "<test-suite-name>",
+ "<branch-name>",
+ "<tests-to-include-(as-one-string)>",
+ "<builds-to-show>",
+ "<directory-to-save-graphs>",
+ sep=" " ) )
+
+ quit( status = 1 ) # basically exit(), but in R
+}
+
+# -------------------------------
+# Create Title and Graph Filename
+# -------------------------------
+
+print( "Creating title of graph." )
+
+title <- paste( args[ testSuiteName ],
+ " Test Results Trend - ",
+ args[ branchName ],
+ " \n Results of Last ",
+ args[ buildsToShow ],
+ " Nightly Builds",
+ sep="" )
+
+print( "Creating graph filename." )
+
+outputFile <- paste( args[ saveDirectory ],
+ args[ testSuiteName ],
+ "_",
+ args[ branchName ],
+ "_overview.jpg",
+ sep="" )
+
+# ------------------
+# SQL Initialization
+# ------------------
+
+print( "Initializing SQL" )
+
+con <- dbConnect( dbDriver( "PostgreSQL" ),
+ dbname = "onostest",
+ host = args[ databaseHost ],
+ port = strtoi( args[ databasePort ] ),
+ user = args[ databaseUserID ],
+ password = args[ databasePassword ] )
+
+# ---------------------
+# Test Case SQL Command
+# ---------------------
+print( "Generating Test Case SQL command." )
+
+tests <- "'"
+for ( test in as.list( strsplit( args[ testsToInclude ], "," )[[1]] ) ){
+ tests <- paste( tests, test, "','", sep="" )
+}
+tests <- substr( tests, 0, nchar( tests ) - 2 )
+
+command <- paste( "SELECT * ",
+ "FROM executed_test_tests a ",
+ "WHERE ( SELECT COUNT( * ) FROM executed_test_tests b ",
+ "WHERE b.branch='",
+ args[ branchName ],
+ "' AND b.actual_test_name IN (",
+ tests,
+ ") AND a.actual_test_name = b.actual_test_name AND a.date <= b.date AND b.build >= ",
+ pipelineMinValue,
+ " ) <= ",
+ args[ buildsToShow ],
+ " AND a.branch='",
+ args[ branchName ],
+ "' AND a.actual_test_name IN (",
+ tests,
+ ") AND a.build >= ",
+ pipelineMinValue,
+ " ORDER BY a.actual_test_name DESC, a.date DESC",
+ sep="")
+
+print( "Sending SQL command:" )
+print( command )
+dbResult <- dbGetQuery( con, command )
+maxBuild <- max( dbResult[ 'build' ] ) - strtoi( args[ buildsToShow ] )
+dbResult <- dbResult[ which( dbResult[,4]>maxBuild ), ]
+print( dbResult )
+
+# **********************************************************
+# STEP 2: Organize data.
+# **********************************************************
+
+print( "**********************************************************" )
+print( "STEP 2: Organize Data." )
+print( "**********************************************************" )
+
+t <- subset( dbResult, select=c( "actual_test_name", "build", "num_failed" ) )
+t$num_failed <- ceiling( t$num_failed / ( t$num_failed + 1 ) )
+t$num_planned <- 1
+
+fileData <- aggregate( t$num_failed, by=list( Category=t$build ), FUN=sum )
+colnames( fileData ) <- c( "build", "num_failed" )
+
+fileData$num_planned <- ( aggregate( t$num_planned, by=list( Category=t$build ), FUN=sum ) )$x
+fileData$num_passed <- fileData$num_planned - fileData$num_failed
+
+print(fileData)
+
+# --------------------
+# Construct Data Frame
+# --------------------
+#
+
+dataFrame <- melt( subset( fileData, select=c( "num_failed", "num_passed", "num_planned" ) ) )
+dataFrame$build <- fileData$build
+colnames( dataFrame ) <- c( "status", "results", "build" )
+
+dataFrame$num_failed <- fileData$num_failed
+dataFrame$num_passed <- fileData$num_passed
+dataFrame$num_planned <- fileData$num_planned
+dataFrame$iterative <- seq( 1, nrow( fileData ), by = 1 )
+
+print( "Data Frame Results:" )
+print( dataFrame )
+
+# **********************************************************
+# STEP 3: Generate graphs.
+# **********************************************************
+
+print( "**********************************************************" )
+print( "STEP 3: Generate Graph." )
+print( "**********************************************************" )
+
+# -------------------
+# Main Plot Generated
+# -------------------
+
+print( "Creating main plot." )
+# Create the primary plot here.
+# ggplot contains the following arguments:
+# - data: the data frame that the graph will be based off of
+# - aes: the asthetics of the graph which require:
+# - x: x-axis values (usually iterative, but it will become build # later)
+# - y: y-axis values (usually tests)
+# - color: the category of the colored lines (usually status of test)
+
+mainPlot <- ggplot( data = dataFrame, aes( x = iterative,
+ y = results,
+ color = status ) )
+
+# -------------------
+# Main Plot Formatted
+# -------------------
+
+print( "Formatting main plot." )
+
+# geom_ribbon is used so that there is a colored fill below the lines. These values shouldn't be changed.
+failedColor <- geom_ribbon( aes( ymin = 0,
+ ymax = dataFrame$num_failed ),
+ fill = "#ff0000",
+ linetype = 0,
+ alpha = 0.07 )
+
+passedColor <- geom_ribbon( aes( ymin = 0,
+ ymax = dataFrame$num_passed ),
+ fill = "#0083ff",
+ linetype = 0,
+ alpha = 0.05 )
+
+plannedColor <- geom_ribbon( aes( ymin = 0,
+ ymax = dataFrame$num_planned ),
+ fill = "#000000",
+ linetype = 0,
+ alpha = 0.01 )
+
+# Colors for the lines
+lineColors <- scale_color_manual( values=c( "#ff0000", # fail
+ "#0083ff", # pass
+ "#000000"),
+ labels = c( "Containing Failures",
+ "No Failures",
+ "Total Built" ) ) # planned
+
+# ------------------------------
+# Fundamental Variables Assigned
+# ------------------------------
+
+print( "Generating fundamental graph data." )
+
+theme_set( theme_grey( base_size = 26 ) ) # set the default text size of the graph.
+
+xScaleConfig <- scale_x_continuous( breaks = dataFrame$iterative,
+ label = dataFrame$build )
+yScaleConfig <- scale_y_continuous( breaks = seq( 0, max( dataFrame$results ),
+ by = ceiling( max( dataFrame$results ) / 10 ) ) )
+
+xLabel <- xlab( "Build Number" )
+yLabel <- ylab( "Tests" )
+
+imageWidth <- 15
+imageHeight <- 10
+imageDPI <- 200
+
+# Set other graph configurations here.
+theme <- theme( plot.title = element_text( hjust = 0.5, size = 32, face ='bold' ),
+ axis.text.x = element_text( angle = 0, size = 14 ),
+ legend.position = "bottom",
+ legend.text = element_text( size = 22 ),
+ legend.title = element_blank(),
+ legend.key.size = unit( 1.5, 'lines' ) )
+
+title <- ggtitle( title )
+
+# Store plot configurations as 1 variable
+fundamentalGraphData <- mainPlot +
+ plannedColor +
+ passedColor +
+ failedColor +
+ xScaleConfig +
+ yScaleConfig +
+ xLabel +
+ yLabel +
+ theme +
+ title +
+ lineColors
+
+# ----------------------------
+# Generating Line Graph Format
+# ----------------------------
+
+print( "Generating line graph." )
+
+lineGraphFormat <- geom_line( size = 1.1 )
+pointFormat <- geom_point( size = 3 )
+
+result <- fundamentalGraphData +
+ lineGraphFormat +
+ pointFormat
+
+# -----------------------
+# Exporting Graph to File
+# -----------------------
+
+print( paste( "Saving result graph to", outputFile ) )
+
+tryCatch( ggsave( outputFile,
+ width = imageWidth,
+ height = imageHeight,
+ dpi = imageDPI ),
+ error = function( e ){
+ print( "[ERROR] There was a problem saving the graph due to a graph formatting exception. Error dump:" )
+ print( e )
+ quit( status = 1 )
+ }
+ )
+
+print( paste( "[SUCCESS] Successfully wrote result graph out to", outputFile ) )
+quit( status = 0 )
diff --git a/TestON/bin/cli.py b/TestON/bin/cli.py
index a7e1297..8e48311 100755
--- a/TestON/bin/cli.py
+++ b/TestON/bin/cli.py
@@ -46,16 +46,16 @@
import threading
import __builtin__
import pprint
-dump = pprint.PrettyPrinter(indent=4)
+dump = pprint.PrettyPrinter( indent=4 )
__builtin__.testthread = False
introduction = "TestON is the testing framework \nDeveloped by Paxterra Solutions (www.paxterrasolutions.com)"
__builtin__.COLORS = False
-path = re.sub( "/bin$", "", sys.path[0] )
+path = re.sub( "/bin$", "", sys.path[ 0 ] )
sys.path.insert( 1, path )
from core.teston import *
-class CLI( threading.Thread,Cmd,object ):
+class CLI( threading.Thread, Cmd, object ):
"command-line interface to execute the test."
prompt = 'teston> '
@@ -64,7 +64,7 @@
self.teston = teston
self._mainevent = threading.Event()
- threading.Thread.__init__(self)
+ threading.Thread.__init__( self )
self.main_stop = False
self.locals = { 'test': teston }
self.stdin = stdin
@@ -86,7 +86,8 @@
Cmd.do_help( self, line )
if line is '':
output( self.helpStr )
- def do_run(self,args):
+
+ def do_run( self, args ):
'''
run command will execute the test with following optional command line arguments
logdir <directory to store logs in>
@@ -97,22 +98,22 @@
try:
args = args.split()
options = {}
- options = self.parseArgs(args,options)
- options = dictToObj(options)
+ options = self.parseArgs( args, options )
+ options = dictToObj( options )
if not testthread:
- test = TestThread(options)
+ test = TestThread( options )
test.start()
while test.isAlive():
- test.join(1)
+ test.join( 1 )
else:
- print main.TEST+ " test execution paused, please resume that before executing to another test"
+ print main.TEST + " test execution paused, please resume that before executing to another test"
except KeyboardInterrupt, SystemExit:
print "Interrupt called, Exiting."
test._Thread__stop()
main.cleanup()
main.exit()
- def do_resume(self, line):
+ def do_resume( self, line ):
'''
resume command will continue the execution of paused test.
teston>resume
@@ -122,10 +123,10 @@
'''
if testthread:
testthread.play()
- else :
+ else:
print "There is no test to resume"
- def do_nextstep(self,line):
+ def do_nextstep( self, line ):
'''
nextstep will execute the next-step of the paused test and
it will pause the test after finishing of step.
@@ -140,14 +141,14 @@
'''
if testthread:
- main.log.info("Executing the nextstep, Will pause test execution, after completion of the step")
+ main.log.info( "Executing the nextstep, Will pause test execution, after completion of the step" )
testthread.play()
- time.sleep(.1)
+ time.sleep( .1 )
testthread.pause()
else:
print "There is no paused test "
- def do_dumpvar(self,line):
+ def do_dumpvar( self, line ):
'''
dumpvar will print all the test data in raw format.
usgae :
@@ -162,16 +163,16 @@
'''
if testthread:
if line == "main":
- dump.pprint(vars(main))
- else :
- try :
- dump.pprint(vars(main)[line])
+ dump.pprint( vars( main ) )
+ else:
+ try:
+ dump.pprint( vars( main )[ line ] )
except KeyError as e:
print e
- else :
+ else:
print "There is no paused test "
- def do_currentcase(self,line):
+ def do_currentcase( self, line ):
'''
currentcase will return the current case in the test execution.
@@ -180,12 +181,11 @@
'''
if testthread:
- print "Currently executing test case is: "+str(main.CurrentTestCaseNumber)
- else :
+ print "Currently executing test case is: " + str( main.CurrentTestCaseNumber )
+ else:
print "There is no paused test "
-
- def do_currentstep(self,line):
+ def do_currentstep( self, line ):
'''
currentstep will return the current step in the test execution.
@@ -193,12 +193,11 @@
Currently executing test step is: 2.3
'''
if testthread:
- print "Currently executing test step is: "+str(main.CurrentTestCaseNumber)+'.'+str(main.stepCount)
- else :
+ print "Currently executing test step is: " + str( main.CurrentTestCaseNumber ) + '.' + str( main.stepCount )
+ else:
print "There is no paused test "
-
- def do_stop(self,line):
+ def do_stop( self, line ):
'''
Will stop the paused test, if any !
'''
@@ -207,7 +206,7 @@
return 'exited by user command'
- def do_gettest(self,line):
+ def do_gettest( self, line ):
'''
gettest will return the test name which is under execution or recently executed.
@@ -218,16 +217,16 @@
Test recently executed:
Recently executed test is: MininetTest
'''
- try :
- if testthread :
- print "Currently executing Test is: "+main.TEST
- else :
- print "Recently executed test is: "+main.TEST
+ try:
+ if testthread:
+ print "Currently executing Test is: " + main.TEST
+ else:
+ print "Recently executed test is: " + main.TEST
except NameError:
print "There is no previously executed Test"
- def do_showlog(self,line):
+ def do_showlog( self, line ):
'''
showlog will show the test's Log
teston>showlog
@@ -237,16 +236,16 @@
Currently executing Test's log is: /home/openflow/TestON/logs/PoxTest_07_Jan_2013_21_46_58/PoxTest_07_Jan_2013_21_46_58.log
.....
'''
- try :
- if testthread :
- print "Currently executing Test's log is: "+main.LogFileName
+ try:
+ if testthread:
+ print "Currently executing Test's log is: " + main.LogFileName
- else :
- print "Last executed test's log is : "+main.LogFileName
+ else:
+ print "Last executed test's log is : " + main.LogFileName
logFile = main.LogFileName
- logFileHandler = open(logFile, 'r')
- for msg in logFileHandler.readlines() :
+ logFileHandler = open( logFile, 'r' )
+ for msg in logFileHandler.readlines():
print msg,
logFileHandler.close()
@@ -254,79 +253,77 @@
except NameError:
print "There is no previously executed Test"
-
-
- def parseArgs(self,args,options):
+ def parseArgs( self, args, options ):
'''
This will parse the command line arguments.
'''
- options = self.initOptions(options)
- try :
+ options = self.initOptions( options )
+ try:
index = 0
while index < len( args ):
- option = args[index]
- if index > 0 :
- if re.match("--params", option, flags=0):
+ option = args[ index ]
+ if index > 0:
+ if re.match( "--params", option, flags=0 ):
# check if there is a params
- options['params'].append(args[index+1])
- elif re.match("logdir|mail|example|testdir|testcases|onoscell", option, flags = 0):
- options[option] = args[index+1]
- options = self.testcasesInRange(index+1,option,args,options)
+ options[ 'params' ].append( args[ index + 1 ] )
+ elif re.match( "logdir|mail|example|testdir|testcases|onoscell", option, flags = 0 ):
+ options[ option ] = args[ index + 1 ]
+ options = self.testcasesInRange( index + 1, option, args, options )
index += 2
- else :
- options['testname'] = option
+ else:
+ options[ 'testname' ] = option
index += 1
except IndexError as e:
- print (e)
+ print ( e )
main.cleanup()
main.exit()
return options
- def initOptions(self,options):
+ def initOptions( self, options ):
'''
This will initialize the commandline options.
'''
- options['logdir'] = None
- options['mail'] = None
- options['example'] = None
- options['testdir'] = None
- options['testcases'] = None
- options['onoscell'] = None
+ options[ 'logdir' ] = None
+ options[ 'mail' ] = None
+ options[ 'example' ] = None
+ options[ 'testdir' ] = None
+ options[ 'testcases' ] = None
+ options[ 'onoscell' ] = None
# init params as a empty list
- options['params'] = []
+ options[ 'params' ] = []
return options
- def testcasesInRange(self,index,option,args,options):
+ def testcasesInRange( self, index, option, args, options ):
'''
This method will handle testcases list,specified in range [1-10].
'''
- if re.match("testcases",option,1):
+ if re.match( "testcases", option, 1 ):
testcases = []
- args[index] = re.sub("\[|\]","",args[index],0)
- m = re.match("(\d+)\-(\d+)",args[index],flags=0)
+ args[ index ] = re.sub( "\[|\]", "", args[ index ], 0 )
+ m = re.match( "(\d+)\-(\d+)", args[ index ], flags=0 )
if m:
- start_case = eval(m.group(1))
- end_case = eval(m.group(2))
- if (start_case <= end_case):
+ start_case = eval( m.group( 1 ) )
+ end_case = eval( m.group( 2 ) )
+ if ( start_case <= end_case ):
i = start_case
while i <= end_case:
- testcases.append(i)
- i= i+1
- else :
+ testcases.append( i )
+ i = i + 1
+ else:
print "Please specify testcases properly like 1-5"
- else :
- options[option] = args[index]
+ else:
+ options[ option ] = args[ index ]
return options
- options[option] = str(testcases)
+ options[ option ] = str( testcases )
return options
- def cmdloop(self, intro=introduction):
+ def cmdloop( self, intro=introduction ):
print introduction
while True:
try:
- super(CLI, self).cmdloop(intro="")
+ super( CLI, self ).cmdloop( intro="" )
self.postloop()
except KeyboardInterrupt:
if testthread:
@@ -339,7 +336,7 @@
'''
Echoing of given input.
'''
- output(line)
+ output( line )
def do_sh( self, line ):
'''
@@ -349,7 +346,6 @@
'''
call( line, shell=True )
-
def do_py( self, line ):
'''
Evaluate a Python expression.
@@ -363,7 +359,7 @@
except Exception as e:
output( str( e ) + '\n' )
- def do_interpret(self,line):
+ def do_interpret( self, line ):
'''
interpret will translate the single line openspeak statement to equivalent python script.
@@ -373,13 +369,13 @@
'''
from core import openspeak
ospk = openspeak.OpenSpeak()
- try :
- translated_code = ospk.interpret(text=line)
+ try:
+ translated_code = ospk.interpret( text=line )
print translated_code
except AttributeError as e:
print 'Dynamic params are not allowed in single statement translations'
- def do_do (self,line):
+ def do_do( self, line ):
'''
Do will translate and execute the openspeak statement for the paused test.
do <OpenSpeak statement>
@@ -387,16 +383,16 @@
if testthread:
from core import openspeak
ospk = openspeak.OpenSpeak()
- try :
- translated_code = ospk.interpret(text=line)
- eval(translated_code)
+ try:
+ translated_code = ospk.interpret( text=line )
+ eval( translated_code )
except ( AttributeError, SyntaxError ) as e:
print 'Dynamic params are not allowed in single statement translations:'
print e
- else :
+ else:
print "Do will translate and execute the openspeak statement for the paused test.\nPlease use interpret to translate the OpenSpeak statement."
- def do_compile(self,line):
+ def do_compile( self, line ):
'''
compile will translate the openspeak (.ospk) file into TestON test script (python).
It will receive the openspeak file path as input and will generate
@@ -411,11 +407,11 @@
from core import openspeak
openspeak = openspeak.OpenSpeak()
openspeakfile = line
- if os.path.exists(openspeakfile) :
- openspeak.compiler(openspeakfile=openspeakfile,writetofile=1)
- print "Auto-generated test-script file is "+ re.sub("ospk","py",openspeakfile,0)
+ if os.path.exists( openspeakfile ):
+ openspeak.compiler( openspeakfile=openspeakfile, writetofile=1 )
+ print "Auto-generated test-script file is " + re.sub( "ospk", "py", openspeakfile, 0 )
else:
- print 'There is no such file : '+line
+ print 'There is no such file : ' + line
def do_exit( self, _line ):
"Exit"
@@ -454,7 +450,7 @@
'''
args = line.split()
- if len(args) != 1:
+ if len( args ) != 1:
error( 'usage: source <file>\n' )
return
try:
@@ -471,43 +467,43 @@
def do_time( self, line ):
"Measure time taken for any command in TestON."
start = time.time()
- self.onecmd(line)
+ self.onecmd( line )
elapsed = time.time() - start
- self.stdout.write("*** Elapsed time: %0.6f secs\n" % elapsed)
+ self.stdout.write( "*** Elapsed time: %0.6f secs\n" % elapsed )
def default( self, line ):
"""Called on an input line when the command prefix is not recognized."""
first, args, line = self.parseline( line )
if not args:
return
- if args and len(args) > 0 and args[ -1 ] == '\n':
- args = args[ :-1 ]
+ if args and len( args ) > 0 and args[ -1 ] == '\n':
+ args = args[:-1 ]
rest = args.split( ' ' )
error( '*** Unknown command: %s\n' % first )
-class TestThread(threading.Thread):
+class TestThread( threading.Thread ):
'''
TestThread class will handle the test execution and will communicate with the thread in the do_run.
'''
- def __init__(self,options):
+ def __init__( self, options ):
self._stopevent = threading.Event()
- threading.Thread.__init__(self)
+ threading.Thread.__init__( self )
self.is_stop = False
self.options = options
__builtin__.testthread = self
- def run(self):
+ def run( self ):
'''
Will execute the test.
'''
- while not self.is_stop :
+ while not self.is_stop:
if not self._stopevent.isSet():
- self.test_on = TestON(self.options)
- try :
+ self.test_on = TestON( self.options )
+ try:
if self.test_on.init_result:
result = self.test_on.run()
- if not self.is_stop :
+ if not self.is_stop:
result = self.test_on.cleanup()
self.is_stop = True
except KeyboardInterrupt:
@@ -517,7 +513,7 @@
__builtin__.testthread = False
- def pause(self):
+ def pause( self ):
'''
Will pause the test.
'''
@@ -533,14 +529,14 @@
result = self.test_on.cleanup()
self.is_stop = True
- def play(self):
+ def play( self ):
'''
Will resume the paused test.
'''
self._stopevent.clear()
cli.pause = False
- def stop(self):
+ def stop( self ):
'''
Will stop the test execution.
'''
@@ -550,39 +546,40 @@
cli.stop = True
__builtin__.testthread = False
-def output(msg):
+def output( msg ):
'''
Simply, print the message in console
'''
print msg
-def error(msg):
+def error( msg ):
'''
print the error message.
'''
print msg
-def dictToObj(dictionary):
+def dictToObj( dictionary ):
'''
This will facilitates the converting of the dictionary to the object.
This method will help to send options as object format to the test.
'''
- if isinstance(dictionary, list):
- dictionary = [dictToObj(x) for x in dictionary]
- if not isinstance(dictionary, dict):
+ if isinstance( dictionary, list ):
+ dictionary = [ dictToObj( x ) for x in dictionary ]
+ if not isinstance( dictionary, dict ):
return dictionary
- class Convert(object):
+
+ class Convert( object ):
pass
obj = Convert()
for k in dictionary:
- obj.__dict__[k] = dictToObj(dictionary[k])
+ obj.__dict__[ k ] = dictToObj( dictionary[ k ] )
return obj
if __name__ == '__main__':
- if len(sys.argv) > 1:
+ if len( sys.argv ) > 1:
__builtin__.COLORS = True
- CLI("test").onecmd(' '.join(sys.argv[1:]))
+ CLI( "test" ).onecmd( ' '.join( sys.argv[ 1: ] ) )
else:
__builtin__.COLORS = False
- CLI("test").cmdloop()
+ CLI( "test" ).cmdloop()
diff --git a/TestON/core/Thread.py b/TestON/core/Thread.py
index 4c040a6..0907dd8 100644
--- a/TestON/core/Thread.py
+++ b/TestON/core/Thread.py
@@ -38,8 +38,8 @@
if self.target is not None:
self.result = self.target( *self.args, **self.kwargs )
except Exception as e:
- print "ThreadID:" + str( self.threadID ) + ", Name:" +\
- self.name + "- something went wrong with " +\
- str( self.target.im_class ) + "." +\
+ print "ThreadID:" + str( self.threadID ) + ", Name:" + \
+ self.name + "- something went wrong with " + \
+ str( self.target.im_class ) + "." + \
str( self.target.im_func ) + " method: "
print e
diff --git a/TestON/core/ast.py b/TestON/core/ast.py
index fd5dfdb..c876b45 100644
--- a/TestON/core/ast.py
+++ b/TestON/core/ast.py
@@ -29,58 +29,58 @@
from _ast import __version__
-def parse(source, filename='<unknown>', mode='exec'):
+def parse( source, filename='<unknown>', mode='exec' ):
"""
Parse the source into an AST node.
Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
"""
- return compile(source, filename, mode, PyCF_ONLY_AST)
+ return compile( source, filename, mode, PyCF_ONLY_AST )
-def literal_eval(node_or_string):
+def literal_eval( node_or_string ):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
- _safe_names = {'None': None, 'True': True, 'False': False}
- if isinstance(node_or_string, basestring):
- node_or_string = parse(node_or_string, mode='eval')
- if isinstance(node_or_string, Expression):
+ _safe_names = { 'None': None, 'True': True, 'False': False }
+ if isinstance( node_or_string, basestring ):
+ node_or_string = parse( node_or_string, mode='eval' )
+ if isinstance( node_or_string, Expression ):
node_or_string = node_or_string.body
- def _convert(node):
- if isinstance(node, Str):
+
+ def _convert( node ):
+ if isinstance( node, Str ):
return node.s
- elif isinstance(node, Num):
+ elif isinstance( node, Num ):
return node.n
- elif isinstance(node, Tuple):
- return tuple(map(_convert, node.elts))
- elif isinstance(node, List):
- return list(map(_convert, node.elts))
- elif isinstance(node, Dict):
- return dict((_convert(k), _convert(v)) for k, v
- in zip(node.keys, node.values))
- elif isinstance(node, Name):
+ elif isinstance( node, Tuple ):
+ return tuple( map( _convert, node.elts ) )
+ elif isinstance( node, List ):
+ return list( map( _convert, node.elts ) )
+ elif isinstance( node, Dict ):
+ return dict( ( _convert( k ), _convert( v ) ) for k, v in zip( node.keys, node.values ) )
+ elif isinstance( node, Name ):
if node.id in _safe_names:
- return _safe_names[node.id]
- elif isinstance(node, BinOp) and \
- isinstance(node.op, (Add, Sub)) and \
- isinstance(node.right, Num) and \
- isinstance(node.right.n, complex) and \
- isinstance(node.left, Num) and \
- isinstance(node.left.n, (int, long, float)):
+ return _safe_names[ node.id ]
+ elif isinstance( node, BinOp ) and \
+ isinstance( node.op, ( Add, Sub ) ) and \
+ isinstance( node.right, Num ) and \
+ isinstance( node.right.n, complex ) and \
+ isinstance( node.left, Num ) and \
+ isinstance( node.left.n, ( int, long, float ) ):
left = node.left.n
right = node.right.n
- if isinstance(node.op, Add):
+ if isinstance( node.op, Add ):
return left + right
else:
return left - right
- raise ValueError('malformed string')
- return _convert(node_or_string)
+ raise ValueError( 'malformed string' )
+ return _convert( node_or_string )
-def dump(node, annotate_fields=True, include_attributes=False):
+def dump( node, annotate_fields=True, include_attributes=False ):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
@@ -89,40 +89,39 @@
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
- def _format(node):
- if isinstance(node, AST):
- fields = [(a, _format(b)) for a, b in iter_fields(node)]
- rv = '%s(%s' % (node.__class__.__name__, ', '.join(
- ('%s=%s' % field for field in fields)
+ def _format( node ):
+ if isinstance( node, AST ):
+ fields = [ ( a, _format( b ) ) for a, b in iter_fields( node ) ]
+ rv = '%s(%s' % ( node.__class__.__name__, ', '.join(
+ ( '%s=%s' % field for field in fields )
if annotate_fields else
- (b for a, b in fields)
- ))
+ ( b for a, b in fields )
+ ) )
if include_attributes and node._attributes:
rv += fields and ', ' or ' '
- rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
- for a in node._attributes)
+ rv += ', '.join( '%s=%s' % ( a, _format( getattr( node, a ) ) ) for a in node._attributes )
return rv + ')'
- elif isinstance(node, list):
- return '[%s]' % ', '.join(_format(x) for x in node)
- return repr(node)
- if not isinstance(node, AST):
- raise TypeError('expected AST, got %r' % node.__class__.__name__)
- return _format(node)
+ elif isinstance( node, list ):
+ return '[%s]' % ', '.join( _format( x ) for x in node )
+ return repr( node )
+ if not isinstance( node, AST ):
+ raise TypeError( 'expected AST, got %r' % node.__class__.__name__ )
+ return _format( node )
-def copy_location(new_node, old_node):
+def copy_location( new_node, old_node ):
"""
Copy source location (`lineno` and `col_offset` attributes) from
*old_node* to *new_node* if possible, and return *new_node*.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
- and hasattr(old_node, attr):
- setattr(new_node, attr, getattr(old_node, attr))
+ and hasattr( old_node, attr ):
+ setattr( new_node, attr, getattr( old_node, attr ) )
return new_node
-def fix_missing_locations(node):
+def fix_missing_locations( node ):
"""
When you compile a node tree with compile(), the compiler expects lineno and
col_offset attributes for every node that supports them. This is rather
@@ -130,91 +129,91 @@
recursively where not already set, by setting them to the values of the
parent node. It works recursively starting at *node*.
"""
- def _fix(node, lineno, col_offset):
+ def _fix( node, lineno, col_offset ):
if 'lineno' in node._attributes:
- if not hasattr(node, 'lineno'):
+ if not hasattr( node, 'lineno' ):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
- if not hasattr(node, 'col_offset'):
+ if not hasattr( node, 'col_offset' ):
node.col_offset = col_offset
else:
col_offset = node.col_offset
- for child in iter_child_nodes(node):
- _fix(child, lineno, col_offset)
- _fix(node, 1, 0)
+ for child in iter_child_nodes( node ):
+ _fix( child, lineno, col_offset )
+ _fix( node, 1, 0 )
return node
-def increment_lineno(node, n=1):
+def increment_lineno( node, n=1 ):
"""
Increment the line number of each node in the tree starting at *node* by *n*.
This is useful to "move code" to a different location in a file.
"""
- for child in walk(node):
+ for child in walk( node ):
if 'lineno' in child._attributes:
- child.lineno = getattr(child, 'lineno', 0) + n
+ child.lineno = getattr( child, 'lineno', 0 ) + n
return node
-def iter_fields(node):
+def iter_fields( node ):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
- yield field, getattr(node, field)
+ yield field, getattr( node, field )
except AttributeError:
pass
-def iter_child_nodes(node):
+def iter_child_nodes( node ):
"""
Yield all direct child nodes of *node*, that is, all fields that are nodes
and all items of fields that are lists of nodes.
"""
- for name, field in iter_fields(node):
- if isinstance(field, AST):
+ for name, field in iter_fields( node ):
+ if isinstance( field, AST ):
yield field
- elif isinstance(field, list):
+ elif isinstance( field, list ):
for item in field:
- if isinstance(item, AST):
+ if isinstance( item, AST ):
yield item
-def get_docstring(node, clean=True):
+def get_docstring( node, clean=True ):
"""
Return the docstring for the given node or None if no docstring can
be found. If the node provided does not have docstrings a TypeError
will be raised.
"""
- if not isinstance(node, (FunctionDef, ClassDef, Module)):
- raise TypeError("%r can't have docstrings" % node.__class__.__name__)
- if node.body and isinstance(node.body[0], Expr) and \
- isinstance(node.body[0].value, Str):
+ if not isinstance( node, ( FunctionDef, ClassDef, Module ) ):
+ raise TypeError( "%r can't have docstrings" % node.__class__.__name__ )
+ if node.body and isinstance( node.body[ 0 ], Expr ) and \
+ isinstance( node.body[ 0 ].value, Str ):
if clean:
import inspect
- return inspect.cleandoc(node.body[0].value.s)
- return node.body[0].value.s
+ return inspect.cleandoc( node.body[ 0 ].value.s )
+ return node.body[ 0 ].value.s
-def walk(node):
+def walk( node ):
"""
Recursively yield all descendant nodes in the tree starting at *node*
(including *node* itself), in no specified order. This is useful if you
only want to modify nodes in place and don't care about the context.
"""
from collections import deque
- todo = deque([node])
+ todo = deque( [ node ] )
while todo:
node = todo.popleft()
- todo.extend(iter_child_nodes(node))
+ todo.extend( iter_child_nodes( node ) )
yield node
-class NodeVisitor(object):
+class NodeVisitor( object ):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
@@ -234,24 +233,24 @@
allows modifications.
"""
- def visit(self, node):
+ def visit( self, node ):
"""Visit a node."""
method = 'visit_' + node.__class__.__name__
- visitor = getattr(self, method, self.generic_visit)
- return visitor(node)
+ visitor = getattr( self, method, self.generic_visit )
+ return visitor( node )
- def generic_visit(self, node):
+ def generic_visit( self, node ):
"""Called if no explicit visitor function exists for a node."""
- for field, value in iter_fields(node):
- if isinstance(value, list):
+ for field, value in iter_fields( node ):
+ if isinstance( value, list ):
for item in value:
- if isinstance(item, AST):
- self.visit(item)
- elif isinstance(value, AST):
- self.visit(value)
+ if isinstance( item, AST ):
+ self.visit( item )
+ elif isinstance( value, AST ):
+ self.visit( value )
-class NodeTransformer(NodeVisitor):
+class NodeTransformer( NodeVisitor ):
"""
A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
allows modification of nodes.
@@ -287,25 +286,25 @@
node = YourTransformer().visit(node)
"""
- def generic_visit(self, node):
- for field, old_value in iter_fields(node):
- old_value = getattr(node, field, None)
- if isinstance(old_value, list):
+ def generic_visit( self, node ):
+ for field, old_value in iter_fields( node ):
+ old_value = getattr( node, field, None )
+ if isinstance( old_value, list ):
new_values = []
for value in old_value:
- if isinstance(value, AST):
- value = self.visit(value)
+ if isinstance( value, AST ):
+ value = self.visit( value )
if value is None:
continue
- elif not isinstance(value, AST):
- new_values.extend(value)
+ elif not isinstance( value, AST ):
+ new_values.extend( value )
continue
- new_values.append(value)
- old_value[:] = new_values
- elif isinstance(old_value, AST):
- new_node = self.visit(old_value)
+ new_values.append( value )
+ old_value[ : ] = new_values
+ elif isinstance( old_value, AST ):
+ new_node = self.visit( old_value )
if new_node is None:
- delattr(node, field)
+ delattr( node, field )
else:
- setattr(node, field, new_node)
+ setattr( node, field, new_node )
return node
diff --git a/TestON/core/graph.py b/TestON/core/graph.py
index b1cbdb2..7c2988e 100644
--- a/TestON/core/graph.py
+++ b/TestON/core/graph.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+# !/usr/bin/env python
'''
Copyright 2016 Open Networking Foundation (ONF)
@@ -69,7 +69,7 @@
self.graphDict = graphDict
return main.TRUE
- def compareGraphs( self, graphDictA, graphDictB, vertexAttributes=['edges'], edgeAttributes=['port'] ):
+ def compareGraphs( self, graphDictA, graphDictB, vertexAttributes=[ 'edges' ], edgeAttributes=[ 'port' ] ):
"""
Compare two graphs.
By default only the adjacency relationship, i.e. 'port' attribute in
@@ -150,8 +150,8 @@
attributeValueA,
attributeValueB ) )
if not result:
- #main.log.debug( "Graph: graphDictA: {}".format( graphDictA ) )
- #main.log.debug( "Graph: graphDictB: {}".format( graphDictB ) )
+ # main.log.debug( "Graph: graphDictA: {}".format( graphDictA ) )
+ # main.log.debug( "Graph: graphDictB: {}".format( graphDictB ) )
pass
return result
except TypeError:
@@ -183,7 +183,7 @@
for chain in self.chains:
for edge in chain:
nonCutEdges.append( edge )
- #main.log.debug( 'Non-cut-edges: {}'.format( nonCutEdges ) )
+ # main.log.debug( 'Non-cut-edges: {}'.format( nonCutEdges ) )
return nonCutEdges
except Exception:
main.log.exception( "Graph: Uncaught exception" )
@@ -207,16 +207,16 @@
# chain, the chain is a cycle chain
if chain[ 0 ][ 0 ] == chain[ -1 ][ 1 ]:
cycleChains.append( chain )
- #main.log.debug( 'Cycle chains: {}'.format( cycleChains ) )
+ # main.log.debug( 'Cycle chains: {}'.format( cycleChains ) )
# Get a set of vertices which are the first vertices of a cycle chain (excluding the first
# cycle chain), and these vertices are a subset of all cut-vertices
subsetOfCutVertices = []
if len( cycleChains ) > 1:
for cycleChain in cycleChains[ 1: ]:
subsetOfCutVertices.append( cycleChain[ 0 ][ 0 ] )
- #main.log.debug( 'Subset of cut vertices: {}'.format( subsetOfCutVertices ) )
+ # main.log.debug( 'Subset of cut vertices: {}'.format( subsetOfCutVertices ) )
nonCutVertices = []
- assert nonCutEdges != None
+ assert nonCutEdges is not None
for vertex in self.graphDict.keys():
if vertex in subsetOfCutVertices:
continue
@@ -224,12 +224,12 @@
for neighbor in self.graphDict[ vertex ][ 'edges' ].keys():
edge = [ vertex, neighbor ]
backwardEdge = [ neighbor, vertex ]
- if not edge in nonCutEdges and not backwardEdge in nonCutEdges:
+ if edge not in nonCutEdges and backwardEdge not in nonCutEdges:
vertexIsNonCut = False
break
if vertexIsNonCut:
nonCutVertices.append( vertex )
- #main.log.debug( 'Non-cut-vertices: {}'.format( nonCutVertices ) )
+ # main.log.debug( 'Non-cut-vertices: {}'.format( nonCutVertices ) )
return nonCutVertices
except KeyError:
main.log.exception( "Graph: KeyError exception found" )
@@ -247,7 +247,7 @@
as generates the back edges
"""
try:
- assert self.graphDict != None and len( self.graphDict ) != 0
+ assert self.graphDict is not None and len( self.graphDict ) != 0
for vertex in self.graphDict.keys():
self.DFI[ vertex ] = -1
self.parentVertexInDFS[ vertex ] = ''
@@ -288,14 +288,14 @@
else:
key = self.DFI[ neighbor ]
if key in self.backEdges.keys():
- if not edge in self.backEdges[ key ] and\
- not backwardEdge in self.backEdges[ key ]:
+ if edge not in self.backEdges[ key ] and \
+ backwardEdge not in self.backEdges[ key ]:
self.backEdges[ key ].append( backwardEdge )
else:
tempKey = self.DFI[ vertex ]
if tempKey in self.backEdges.keys():
- if not edge in self.backEdges[ tempKey ] and\
- not backwardEdge in self.backEdges[ tempKey ]:
+ if edge not in self.backEdges[ tempKey ] and\
+ backwardEdge not in self.backEdges[ tempKey ]:
self.backEdges[ key ] = [ backwardEdge ]
else:
self.backEdges[ key ] = [ backwardEdge ]
@@ -329,7 +329,7 @@
nextVertex = currentEdge[ 1 ]
vertexIsVisited[ currentVertex ] = True
chain.append( currentEdge )
- if nextVertex == sourceVertex or vertexIsVisited[ nextVertex ] == True:
+ if nextVertex == sourceVertex or vertexIsVisited[ nextVertex ] is True:
break
currentEdge = self.parentEdgeInDFS[ nextVertex ]
self.chains.append( chain )
diff --git a/TestON/core/logger.py b/TestON/core/logger.py
index ed9b0bd..dc2b2b2 100644
--- a/TestON/core/logger.py
+++ b/TestON/core/logger.py
@@ -1,7 +1,7 @@
-#/usr/bin/env python
+# /usr/bin/env python
'''
Created on 07-Jan-2013
-Modified 2015 by ON.Lab
+Modified 2015 by Open Networking Foundation
Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -33,93 +33,93 @@
@author: Raghav Kashyap(raghavkashyap@paxterrasolutions.com)
'''
- def _printHeader(self,main) :
+ def _printHeader( self, main ):
'''
Log's header will be append to the Log file
'''
- logmsg = "\n"+" " * 32+"+----------------+\n" +"-" * 30+" { Script And Files } "+"-" * 30+"\n" +" " * 32+"+----------------+\n";
+ logmsg = "\n" + " " * 32 + "+----------------+\n" + "-" * 30 + " { Script And Files } " + "-" * 30 + "\n" + " " * 32 + "+----------------+\n"
logmsg = logmsg + "\n\tScript Log File : " + main.LogFileName + ""
logmsg = logmsg + "\n\tReport Log File : " + main.ReportFileName + ""
for component in main.componentDictionary.keys():
- logmsg = logmsg + "\n\t"+component+" Session Log : " + main.logdir+"/"+component+".session" + ""
+ logmsg = logmsg + "\n\t" + component + " Session Log : " + main.logdir + "/" + component + ".session" + ""
- logmsg = logmsg + "\n\tTest Script :" + path + "Tests/" + main.TEST + ".py"+ ""
+ logmsg = logmsg + "\n\tTest Script :" + path + "Tests/" + main.TEST + ".py" + ""
logmsg = logmsg + "\n\tTest Params : " + path + "Tests/" + main.TEST + ".params" + ""
- logmsg = logmsg + "\n\tTopology : " + path + "Tests/" +main.TEST + ".topo" + ""
- logmsg = logmsg + "\n"+" " * 30+"+" +"-" * 18+"+" +"\n" +"-" * 27+" { Script Exec Params } "+"-" * 27 +"\n" +" " * 30 +"+"+"-" * 18 +"+\n";
- values = "\n\t" + str(main.params)
- values = re.sub(",", "\n\t", values)
- values = re.sub("{", "\n\t", values)
- values = re.sub("}", "\n\t", values)
+ logmsg = logmsg + "\n\tTopology : " + path + "Tests/" + main.TEST + ".topo" + ""
+ logmsg = logmsg + "\n" + " " * 30 + "+" + "-" * 18 + "+" + "\n" + "-" * 27 + " { Script Exec Params } " + "-" * 27 + "\n" + " " * 30 + "+" + "-" * 18 + "+\n"
+ values = "\n\t" + str( main.params )
+ values = re.sub( ",", "\n\t", values )
+ values = re.sub( "{", "\n\t", values )
+ values = re.sub( "}", "\n\t", values )
logmsg = logmsg + values
- logmsg = logmsg + "\n\n"+" " * 31+"+---------------+\n" +"-" * 29+" { Components Used } " +"-" * 29+"\n"+" " * 31+"+---------------+\n"
+ logmsg = logmsg + "\n\n" + " " * 31 + "+---------------+\n" + "-" * 29 + " { Components Used } " + "-" * 29 + "\n" + " " * 31 + "+---------------+\n"
component_list = []
- component_list.append(None)
+ component_list.append( None )
# Listing the components in the order of test_target component should be first.
- if type(main.componentDictionary) == dict:
+ if type( main.componentDictionary ) == dict:
for key in main.componentDictionary.keys():
- if main.test_target == key :
- component_list[0] = key+"-Test Target"
- else :
- component_list.append(key)
+ if main.test_target == key:
+ component_list[ 0 ] = key + "-Test Target"
+ else:
+ component_list.append( key )
- for index in range(len(component_list)) :
- if index==0:
- if component_list[index]:
- logmsg+="\t"+component_list[index]+"\n"
- elif index > 0 :
- logmsg+="\t"+str(component_list[index])+"\n"
+ for index in range( len( component_list ) ):
+ if index == 0:
+ if component_list[ index ]:
+ logmsg += "\t" + component_list[ index ] + "\n"
+ elif index > 0:
+ logmsg += "\t" + str( component_list[ index ] ) + "\n"
- logmsg = logmsg + "\n\n"+" " * 30+"+--------+\n" +"-" * 28+" { Topology } "+"-" * 28 +"\n" +" " * 30+"+--------+\n"
- values = "\n\t" + str(main.topology['COMPONENT'])
- values = re.sub(",", "\n\t", values)
- values = re.sub("{", "\n\t", values)
- values = re.sub("}", "\n\t", values)
+ logmsg = logmsg + "\n\n" + " " * 30 + "+--------+\n" + "-" * 28 + " { Topology } " + "-" * 28 + "\n" + " " * 30 + "+--------+\n"
+ values = "\n\t" + str( main.topology[ 'COMPONENT' ] )
+ values = re.sub( ",", "\n\t", values )
+ values = re.sub( "{", "\n\t", values )
+ values = re.sub( "}", "\n\t", values )
logmsg = logmsg + values
- logmsg = logmsg + "\n"+"-" * 60+"\n"
+ logmsg = logmsg + "\n" + "-" * 60 + "\n"
# enter into log file all headers
- logfile = open(main.LogFileName,"w+")
- logfile.write (logmsg)
+ logfile = open( main.LogFileName, "w+" )
+ logfile.write( logmsg )
print logmsg
main.logHeader = logmsg
logfile.close()
- #enter into report file all headers
- main.reportFile = open(main.ReportFileName,"w+")
- main.reportFile.write(logmsg)
+ # enter into report file all headers
+ main.reportFile = open( main.ReportFileName, "w+" )
+ main.reportFile.write( logmsg )
main.reportFile.close()
- #Sumamry file header
- currentTime = str( main.STARTTIME.strftime("%d %b %Y %H:%M:%S") )
+ # Sumamry file header
+ currentTime = str( main.STARTTIME.strftime( "%d %b %Y %H:%M:%S" ) )
main.summaryFile = open( main.SummaryFileName, "w+" )
main.summaryFile.write( main.TEST + " at " + currentTime + "\n" )
main.summaryFile.close()
- #wiki file header
- currentTime = str( main.STARTTIME.strftime("%d %b %Y %H:%M:%S") )
+ # wiki file header
+ currentTime = str( main.STARTTIME.strftime( "%d %b %Y %H:%M:%S" ) )
main.wikiFile = open( main.WikiFileName, "w+" )
main.wikiFile.write( main.TEST + " at " + currentTime + "<p></p>\n" )
main.wikiFile.close()
- def initlog(self,main):
+ def initlog( self, main ):
'''
Initialise all the log handles.
'''
main._getTest()
main.STARTTIME = datetime.datetime.now()
- currentTime = re.sub("-|\s|:|\.", "_", str(main.STARTTIME.strftime("%d %b %Y %H:%M:%S")))
+ currentTime = re.sub( "-|\s|:|\.", "_", str( main.STARTTIME.strftime( "%d %b %Y %H:%M:%S" ) ) )
if main.logdir:
- main.logdir = main.logdir+ "/"+main.TEST + "_" + currentTime
+ main.logdir = main.logdir + "/" + main.TEST + "_" + currentTime
else:
main.logdir = main.logs_path + main.TEST + "_" + currentTime
- os.mkdir(main.logdir)
+ os.mkdir( main.logdir )
- main.LogFileName = main.logdir + "/" + main.TEST + "_" +str(currentTime) + ".log"
- main.ReportFileName = main.logdir + "/" + main.TEST + "_" + str(currentTime) + ".rpt"
+ main.LogFileName = main.logdir + "/" + main.TEST + "_" + str( currentTime ) + ".log"
+ main.ReportFileName = main.logdir + "/" + main.TEST + "_" + str( currentTime ) + ".rpt"
main.WikiFileName = main.logdir + "/" + main.TEST + "Wiki.txt"
main.SummaryFileName = main.logdir + "/" + main.TEST + "Summary.txt"
main.JenkinsCSV = main.logdir + "/" + main.TEST + ".csv"
@@ -127,23 +127,24 @@
main.TOTAL_TC_SUCCESS = 0
- #### Add log-level - Report
- logging.addLevelName(9, "REPORT")
- logging.addLevelName(7, "EXACT")
- logging.addLevelName(11, "CASE")
- logging.addLevelName(12, "STEP")
- main.log = logging.getLogger(main.TEST)
- def report(msg):
+ # Add log-level - Report
+ logging.addLevelName( 9, "REPORT" )
+ logging.addLevelName( 7, "EXACT" )
+ logging.addLevelName( 11, "CASE" )
+ logging.addLevelName( 12, "STEP" )
+ main.log = logging.getLogger( main.TEST )
+
+ def report( msg ):
'''
Will append the report message to the logs.
'''
- main.log._log(9,msg,"OpenFlowAutoMattion","OFAutoMation")
+ main.log._log( 9, msg, "OpenFlowAutoMattion", "OFAutoMation" )
currentTime = datetime.datetime.now()
- currentTime = currentTime.strftime("%d %b %Y %H:%M:%S")
- newmsg = "\n[REPORT] " +"["+ str(currentTime)+"] "+msg
+ currentTime = currentTime.strftime( "%d %b %Y %H:%M:%S" )
+ newmsg = "\n[REPORT] " + "[" + str( currentTime ) + "] " + msg
print newmsg
- main.reportFile = open(main.ReportFileName,"a+")
- main.reportFile.write(newmsg)
+ main.reportFile = open( main.ReportFileName, "a+" )
+ main.reportFile.write( newmsg )
main.reportFile.close()
main.log.report = report
@@ -152,9 +153,9 @@
'''
Will append the message to the txt file for the summary.
'''
- main.log._log(6,msg,"OpenFlowAutoMattion","OFAutoMation")
- main.summaryFile = open(main.SummaryFileName,"a+")
- main.summaryFile.write(msg+"\n")
+ main.log._log( 6, msg, "OpenFlowAutoMattion", "OFAutoMation" )
+ main.summaryFile = open( main.SummaryFileName, "a+" )
+ main.summaryFile.write( msg + "\n" )
main.summaryFile.close()
main.log.summary = summary
@@ -163,90 +164,91 @@
'''
Will append the message to the txt file for the wiki.
'''
- main.log._log(6,msg,"OpenFlowAutoMattion","OFAutoMation")
- main.wikiFile = open(main.WikiFileName,"a+")
- main.wikiFile.write(msg+"\n")
+ main.log._log( 6, msg, "OpenFlowAutoMattion", "OFAutoMation" )
+ main.wikiFile = open( main.WikiFileName, "a+" )
+ main.wikiFile.write( msg + "\n" )
main.wikiFile.close()
main.log.wiki = wiki
- def exact(exmsg):
+ def exact( exmsg ):
'''
Will append the raw formatted message to the logs
'''
- main.log._log(7,exmsg,"OpenFlowAutoMattion","OFAutoMation")
- main.reportFile = open(main.ReportFileName,"a+")
- main.reportFile.write(exmsg)
+ main.log._log( 7, exmsg, "OpenFlowAutoMattion", "OFAutoMation" )
+ main.reportFile = open( main.ReportFileName, "a+" )
+ main.reportFile.write( exmsg )
main.reportFile.close()
- logfile = open(main.LogFileName,"a")
- logfile.write("\n"+ str(exmsg) +"\n")
+ logfile = open( main.LogFileName, "a" )
+ logfile.write( "\n" + str( exmsg ) + "\n" )
logfile.close()
print exmsg
main.log.exact = exact
- def case(msg):
+ def case( msg ):
'''
Format of the case type log defined here.
'''
- main.log._log(9,msg,"OpenFlowAutoMattion","OFAutoMation")
+ main.log._log( 9, msg, "OpenFlowAutoMattion", "OFAutoMation" )
currentTime = datetime.datetime.now()
- newmsg = "["+str(currentTime)+"] " + "["+main.TEST+"] " + "[CASE] " +msg
- logfile = open(main.LogFileName,"a")
- logfile.write("\n"+ str(newmsg) +"\n")
+ newmsg = "[" + str( currentTime ) + "] " + "[" + main.TEST + "] " + "[CASE] " + msg
+ logfile = open( main.LogFileName, "a" )
+ logfile.write( "\n" + str( newmsg ) + "\n" )
logfile.close()
print newmsg
main.log.case = case
- def step(msg):
+ def step( msg ):
'''
Format of the step type log defined here.
'''
- main.log._log(9,msg,"OpenFlowAutoMattion","OFAutoMation")
+ main.log._log( 9, msg, "OpenFlowAutoMattion", "OFAutoMation" )
currentTime = datetime.datetime.now()
- newmsg = "["+str(currentTime)+"] " + "["+main.TEST+"] " + "[STEP] " +msg
- logfile = open(main.LogFileName,"a")
- logfile.write("\n"+ str(newmsg) +"\n")
+ newmsg = "[" + str( currentTime ) + "] " + "[" + main.TEST + "] " + "[STEP] " + msg
+ logfile = open( main.LogFileName, "a" )
+ logfile.write( "\n" + str( newmsg ) + "\n" )
logfile.close()
print newmsg
main.log.step = step
- main.LogFileHandler = logging.FileHandler(main.LogFileName)
- self._printHeader(main)
+ main.LogFileHandler = logging.FileHandler( main.LogFileName )
+ self._printHeader( main )
- ### initializing logging module and settig log level
- main.log.setLevel(logging.INFO)
- main.log.setLevel(logging.DEBUG) # Temporary
- main.LogFileHandler.setLevel(logging.INFO)
+ # initializing logging module and settig log level
+ main.log.setLevel( logging.INFO )
+ main.log.setLevel( logging.DEBUG ) # Temporary
+ main.LogFileHandler.setLevel( logging.INFO )
# create console handler with a higher log level
main.ConsoleHandler = logging.StreamHandler()
- main.ConsoleHandler.setLevel(logging.INFO)
- main.ConsoleHandler.setLevel(logging.DEBUG) #Temporary
+ main.ConsoleHandler.setLevel( logging.INFO )
+ main.ConsoleHandler.setLevel( logging.DEBUG ) # Temporary
# create formatter and add it to the handlers
- #formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+
class MyFormatter( logging.Formatter ):
colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
'blue': '\033[94m', 'green': '\033[92m',
'yellow': '\033[93m', 'red': '\033[91m',
'end': '\033[0m' }
- FORMATS = {'DEFAULT': '%(asctime)s - %(name)s - %(levelname)s - %(message)s'}
+ FORMATS = { 'DEFAULT': '%(asctime)s - %(name)s - %(levelname)s - %(message)s' }
if COLORS: # NOTE:colors will only be loaded if command is run from one line
# IE: './cli.py run testname'
# This is to prevent issues with Jenkins parsing
# TODO: Make colors configurable
- levels = { logging.ERROR : colors['red'] +
- FORMATS['DEFAULT'] +
- colors['end'],
- logging.WARN : colors['yellow'] +
- FORMATS['DEFAULT'] +
- colors['end'],
- logging.DEBUG : colors['purple'] +
- FORMATS['DEFAULT'] +
- colors['end'] }
+ levels = { logging.ERROR: colors[ 'red' ] +
+ FORMATS[ 'DEFAULT' ] +
+ colors[ 'end' ],
+ logging.WARN: colors[ 'yellow' ] +
+ FORMATS[ 'DEFAULT' ] +
+ colors[ 'end' ],
+ logging.DEBUG: colors[ 'purple' ] +
+ FORMATS[ 'DEFAULT' ] +
+ colors[ 'end' ] }
FORMATS.update( levels )
def format( self, record ):
@@ -254,89 +256,89 @@
self.FORMATS[ 'DEFAULT' ] )
return logging.Formatter.format( self, record )
formatter = MyFormatter()
- main.ConsoleHandler.setFormatter(formatter)
- main.LogFileHandler.setFormatter(formatter)
+ main.ConsoleHandler.setFormatter( formatter )
+ main.LogFileHandler.setFormatter( formatter )
# add the handlers to logger
- main.log.addHandler(main.ConsoleHandler)
- main.log.addHandler(main.LogFileHandler)
+ main.log.addHandler( main.ConsoleHandler )
+ main.log.addHandler( main.LogFileHandler )
- def testSummary(self,main):
+ def testSummary( self, main ):
'''
testSummary will take care about the Summary of test.
'''
main.ENDTIME = datetime.datetime.now()
main.EXECTIME = main.ENDTIME - main.STARTTIME
- if (main.TOTAL_TC_PASS == 0):
+ if ( main.TOTAL_TC_PASS == 0 ):
main.TOTAL_TC_SUCCESS = 0
else:
- main.TOTAL_TC_SUCCESS = str((main.TOTAL_TC_PASS*100)/main.TOTAL_TC_RUN)
- if (main.TOTAL_TC_RUN == 0) :
+ main.TOTAL_TC_SUCCESS = str( ( main.TOTAL_TC_PASS * 100 ) / main.TOTAL_TC_RUN )
+ if ( main.TOTAL_TC_RUN == 0 ):
main.TOTAL_TC_EXECPERCENT = 0
- else :
- main.TOTAL_TC_EXECPERCENT = str((main.TOTAL_TC_RUN*100)/main.TOTAL_TC_PLANNED)
- testResult = "\n\n"+"*" * 37+"\n" + "\tTest Execution Summary\n" + "\n"+"*" * 37+" \n"
- testResult = testResult + "\n Test Start : " + str(main.STARTTIME.strftime("%d %b %Y %H:%M:%S"))
- testResult = testResult + "\n Test End : " + str(main.ENDTIME.strftime("%d %b %Y %H:%M:%S"))
- testResult = testResult + "\n Execution Time : " + str(main.EXECTIME)
- testResult = testResult + "\n Total tests planned : " + str(main.TOTAL_TC_PLANNED)
- testResult = testResult + "\n Total tests RUN : " + str(main.TOTAL_TC_RUN)
- testResult = testResult + "\n Total Pass : " + str(main.TOTAL_TC_PASS)
- testResult = testResult + "\n Total Fail : " + str(main.TOTAL_TC_FAIL)
- testResult = testResult + "\n Total No Result : " + str(main.TOTAL_TC_NORESULT)
- testResult = testResult + "\n Success Percentage : " + str(main.TOTAL_TC_SUCCESS) + "%"
- testResult = testResult + "\n Execution Result : " + str(main.TOTAL_TC_EXECPERCENT) + "%\n"
+ else:
+ main.TOTAL_TC_EXECPERCENT = str( ( main.TOTAL_TC_RUN*100 )/main.TOTAL_TC_PLANNED )
+ testResult = "\n\n" + "*" * 37 + "\n" + "\tTest Execution Summary\n" + "\n" + "*" * 37 + " \n"
+ testResult = testResult + "\n Test Start : " + str( main.STARTTIME.strftime( "%d %b %Y %H:%M:%S" ) )
+ testResult = testResult + "\n Test End : " + str( main.ENDTIME.strftime( "%d %b %Y %H:%M:%S" ) )
+ testResult = testResult + "\n Execution Time : " + str( main.EXECTIME )
+ testResult = testResult + "\n Total tests planned : " + str( main.TOTAL_TC_PLANNED )
+ testResult = testResult + "\n Total tests RUN : " + str( main.TOTAL_TC_RUN )
+ testResult = testResult + "\n Total Pass : " + str( main.TOTAL_TC_PASS )
+ testResult = testResult + "\n Total Fail : " + str( main.TOTAL_TC_FAIL )
+ testResult = testResult + "\n Total No Result : " + str( main.TOTAL_TC_NORESULT )
+ testResult = testResult + "\n Success Percentage : " + str( main.TOTAL_TC_SUCCESS ) + "%"
+ testResult = testResult + "\n Execution Result : " + str( main.TOTAL_TC_EXECPERCENT ) + "%\n"
if main.failedCase:
- testResult = testResult + "\n Case Failed : " + str( main.failedCase )
+ testResult = testResult + "\n Case Failed : " + str( main.failedCase )
if main.noResultCase:
- testResult = testResult + "\n Case NoResult : " + str( main.noResultCase )
- testResult = testResult + "\n Case Executed : " + str( main.executedCase )
- testResult = testResult + "\n Case Not Executed : " + str( main.leftCase )
- #main.log.report(testResult)
+ testResult = testResult + "\n Case NoResult : " + str( main.noResultCase )
+ testResult = testResult + "\n Case Executed : " + str( main.executedCase )
+ testResult = testResult + "\n Case Not Executed : " + str( main.leftCase )
+ # main.log.report(testResult)
main.testResult = testResult
- main.log.exact(testResult)
+ main.log.exact( testResult )
- ##CSV output needed for Jenkin's plot plugin
- #NOTE: the elements were orded based on the colors assigned to the data
- logfile = open(main.JenkinsCSV ,"w")
- logfile.write(",".join( ['Tests Failed', 'Tests Passed', 'Tests Planned'] ) + "\n")
- logfile.write(",".join( [str(int(main.TOTAL_TC_FAIL)), str(int(main.TOTAL_TC_PASS)), str(int(main.TOTAL_TC_PLANNED))] ) + "\n")
+ # CSV output needed for Jenkin's plot plugin
+ # NOTE: the elements were orded based on the colors assigned to the data
+ logfile = open( main.JenkinsCSV , "w" )
+ logfile.write( ",".join( [ 'Tests Failed', 'Tests Passed', 'Tests Planned' ] ) + "\n" )
+ logfile.write( ",".join( [ str( int( main.TOTAL_TC_FAIL ) ), str( int( main.TOTAL_TC_PASS ) ), str( int( main.TOTAL_TC_PLANNED ) ) ] ) + "\n" )
logfile.close()
- executedStatus = open(main.resultFile, "w")
+ executedStatus = open( main.resultFile, "w" )
if main.TOTAL_TC_FAIL == 0 and main.TOTAL_TC_NORESULT + main.TOTAL_TC_PASS == main.TOTAL_TC_PLANNED:
- executedStatus.write("1\n")
+ executedStatus.write( "1\n" )
else:
- executedStatus.write("0\n")
+ executedStatus.write( "0\n" )
+ executedStatus.write( "[Total]:" + str( main.TOTAL_TC_PLANNED ) + " [Executed]:" + str( main.TOTAL_TC_RUN ) + " [Failed]:" + str( main.TOTAL_TC_FAIL ) + "\n" )
executedStatus.close()
- def updateCaseResults(self,main):
+ def updateCaseResults( self, main ):
'''
Update the case result based on the steps execution and asserting each step in the test-case
'''
- case = str(main.CurrentTestCaseNumber)
- currentResult = main.testCaseResult.get(case, 2)
+ case = str( main.CurrentTestCaseNumber )
+ currentResult = main.testCaseResult.get( case, 2 )
if currentResult == 2:
- main.TOTAL_TC_RUN = main.TOTAL_TC_RUN + 1
+ main.TOTAL_TC_RUN = main.TOTAL_TC_RUN + 1
main.TOTAL_TC_NORESULT = main.TOTAL_TC_NORESULT + 1
- main.log.exact("\n "+"*" * 29+"\n" + "\n Result: No Assertion Called \n"+"*" * 29+"\n")
- line = "Case "+case+": "+main.CurrentTestCase+" - No Result"
+ main.log.exact( "\n " + "*" * 29 + "\n" + "\n Result: No Assertion Called \n" + "*" * 29 + "\n" )
+ line = "Case " + case + ": " + main.CurrentTestCase + " - No Result"
elif currentResult == 1:
- main.TOTAL_TC_RUN = main.TOTAL_TC_RUN + 1
- main.TOTAL_TC_PASS = main.TOTAL_TC_PASS + 1
- main.log.exact("\n"+"*" * 29+"\n Result: Pass \n"+"*" * 29+"\n")
- line = "Case "+case+": "+main.CurrentTestCase+" - PASS"
+ main.TOTAL_TC_RUN = main.TOTAL_TC_RUN + 1
+ main.TOTAL_TC_PASS = main.TOTAL_TC_PASS + 1
+ main.log.exact( "\n" + "*" * 29 + "\n Result: Pass \n" + "*" * 29 + "\n" )
+ line = "Case " + case + ": " + main.CurrentTestCase + " - PASS"
elif currentResult == 0:
- main.TOTAL_TC_RUN = main.TOTAL_TC_RUN + 1
+ main.TOTAL_TC_RUN = main.TOTAL_TC_RUN + 1
main.TOTAL_TC_FAIL = main.TOTAL_TC_FAIL + 1
- main.log.exact("\n"+"*" * 29+"\n Result: Failed \n"+"*" * 29+"\n")
- line = "Case "+case+": "+main.CurrentTestCase+" - FAIL"
+ main.log.exact( "\n" + "*" * 29 + "\n Result: Failed \n" + "*" * 29 + "\n" )
+ line = "Case " + case + ": " + main.CurrentTestCase + " - FAIL"
else:
main.log.error( " Unknown result of case " + case +
". Result was: " + currentResult )
- line = "Case "+case+": "+main.CurrentTestCase+" - ERROR"
+ line = "Case " + case + ": " + main.CurrentTestCase + " - ERROR"
main.log.wiki( "<h3>" + line + "</h3>" )
main.log.summary( line )
-
diff --git a/TestON/core/openspeak.py b/TestON/core/openspeak.py
index b98c68b..1b351f9 100644
--- a/TestON/core/openspeak.py
+++ b/TestON/core/openspeak.py
@@ -1,4 +1,4 @@
-#/usr/bin/env python
+# /usr/bin/env python
'''
Created on 20-Dec-2012
Modified 2015 by ON.Lab
@@ -20,764 +20,745 @@
You should have received a copy of the GNU General Public License
along with TestON. If not, see <http://www.gnu.org/licenses/>.
-
'''
import re
import inspect
-
class OpenSpeak:
- def __init__(self):
+ def __init__( self ):
self.default = ''
self.flag = 0
self.CurrentStep = 0
self.grtrOrLssr = 0
- def compiler(self,**compileParameters):
+ def compiler( self, **compileParameters ):
'''
This method will parse the openspeak file and will write to a python module with the equivalent translations.
It can accept OpenSpeak syntax in string or an OpenSpeak file as an input parameter.
Translated form can be written into python module if attribute "WRITETOFILE" is set to 1.
'''
- args = self.parse_args(["OPENSPEAKFILE","TEXT","WRITETOFILE","FILEHANDLE"],**compileParameters)
+ args = self.parse_args( [ "OPENSPEAKFILE", "TEXT", "WRITETOFILE", "FILEHANDLE" ], **compileParameters )
resultString = ''
Test = "Mininet"
- args["WRITETOFILE"] = args["WRITETOFILE"] if args["WRITETOFILE"] != None else 1
+ args[ "WRITETOFILE" ] = args[ "WRITETOFILE" ] if args[ "WRITETOFILE" ] is not None else 1
self.CurrentStep = 0
self.CurrentCase = ''
- ## here Open Speak file will be parsed by each line and translated.
- if args["OPENSPEAKFILE"] !=None and args["TEXT"] ==None and args["FILEHANDLE"] == None:
- self.openspeakfile = args["OPENSPEAKFILE"]
- openSpeakFile = open(args["OPENSPEAKFILE"],"r").readlines()
+ # here Open Speak file will be parsed by each line and translated.
+ if args[ "OPENSPEAKFILE" ] is not None and args[ "TEXT" ] is None and args[ "FILEHANDLE" ] is None:
+ self.openspeakfile = args[ "OPENSPEAKFILE" ]
+ openSpeakFile = open( args[ "OPENSPEAKFILE" ], "r" ).readlines()
- elif args["OPENSPEAKFILE"] ==None and args["TEXT"] and args["FILEHANDLE"] == None:
- openSpeakFile = args["TEXT"].split("\n")
- elif args["FILEHANDLE"] and args["OPENSPEAKFILE"] ==None and args["TEXT"] ==None:
- openSpeakFile = args["FILEHANDLE"].readlines()
+ elif args[ "OPENSPEAKFILE" ] is None and args[ "TEXT" ] and args[ "FILEHANDLE" ] is None:
+ openSpeakFile = args[ "TEXT" ].split( "\n" )
+ elif args[ "FILEHANDLE" ] and args[ "OPENSPEAKFILE" ] is None and args[ "TEXT" ] is None:
+ openSpeakFile = args[ "FILEHANDLE" ].readlines()
index = 0
outputFile = []
- testName = re.search("\/(.*)\.ospk$",self.openspeakfile,0)
- testName = testName.group(1)
- testName = testName.split("/")
- testName = testName[len(testName)-1]
- outputFile.append("\nclass " + testName + " :" + "\n")
- outputFile.append("\n" + " " * 4 + "def __init__(self) :")
- outputFile.append("\n" + " " * 8 + "self.default = \'\'" + "\n")
+ testName = re.search( "\/(.*)\.ospk$", self.openspeakfile, 0 )
+ testName = testName.group( 1 )
+ testName = testName.split( "/" )
+ testName = testName[ len( testName )-1 ]
+ outputFile.append( "\nclass " + testName + " :" + "\n" )
+ outputFile.append( "\n" + " " * 4 + "def __init__(self) :" )
+ outputFile.append( "\n" + " " * 8 + "self.default = \'\'" + "\n" )
- while index < len(openSpeakFile):
- ifelseMatch = re.match("\s+IF|\s+ELSE|\s+ELIF",openSpeakFile[index],flags=0)
- line = openSpeakFile[index]
- repeatMatch = re.match("\s*REPEAT", openSpeakFile[index], flags=0)
- if ifelseMatch :
- result = self.verify_and_translate(line)
- initialSpaces = len(line) -len(line.lstrip())
+ while index < len( openSpeakFile ):
+ ifelseMatch = re.match( "\s+IF|\s+ELSE|\s+ELIF", openSpeakFile[ index ], flags=0 )
+ line = openSpeakFile[ index ]
+ repeatMatch = re.match( "\s*REPEAT", openSpeakFile[ index ], flags=0 )
+ if ifelseMatch:
+ result = self.verify_and_translate( line )
+ initialSpaces = len( line ) - len( line.lstrip() )
self.outLoopSpace = initialSpaces
- nextLine = openSpeakFile[index+1]
- nextinitialSpaces = len(nextLine) -len(nextLine.lstrip())
+ nextLine = openSpeakFile[ index + 1 ]
+ nextinitialSpaces = len( nextLine ) - len( nextLine.lstrip() )
-
- while nextinitialSpaces > initialSpaces :
- try :
- elseMatch = re.match("\s*ELSE|\s*ELIF",nextLine,flags=0)
- if elseMatch :
- self.flag = self.flag -1
- result = result + self.verify_and_translate(nextLine)
- nextLine = openSpeakFile[index + 1]
- nextinitialSpaces = len(nextLine) -len(nextLine.lstrip())
+ while nextinitialSpaces > initialSpaces:
+ try:
+ elseMatch = re.match( "\s*ELSE|\s*ELIF", nextLine, flags=0 )
+ if elseMatch:
+ self.flag = self.flag - 1
+ result = result + self.verify_and_translate( nextLine )
+ nextLine = openSpeakFile[ index + 1 ]
+ nextinitialSpaces = len( nextLine ) - len( nextLine.lstrip() )
except IndexError:
pass
index = index + 1
self.flag = 0
elif repeatMatch:
self.flag = 0
- result = self.verify_and_translate(line)
+ result = self.verify_and_translate( line )
index = index + 1
- endMatch = re.match("\s*END",openSpeakFile[index],flags=0)
- while not endMatch :
- try :
+ endMatch = re.match( "\s*END", openSpeakFile[ index ], flags=0 )
+ while not endMatch:
+ try:
self.flag = self.flag + 1
- result = result + self.verify_and_translate(openSpeakFile[index])
+ result = result + self.verify_and_translate( openSpeakFile[ index ] )
index = index + 1
- except IndexError :
+ except IndexError:
pass
-
- else :
+ else:
self.flag = 0
- result = self.verify_and_translate(line)
+ result = self.verify_and_translate( line )
index = index + 1
- outputFile.append(result)
+ outputFile.append( result )
- if args["WRITETOFILE"] == 1 :
- testscript = re.sub("ospk","py",self.openspeakfile,0)
- testScript = open(testscript,"w")
- for lines in outputFile :
- testScript.write(lines)
+ if args[ "WRITETOFILE" ] == 1:
+ testscript = re.sub( "ospk", "py", self.openspeakfile, 0 )
+ testScript = open( testscript, "w" )
+ for lines in outputFile:
+ testScript.write( lines )
testScript.close()
return resultString
- def verify_and_translate(self,line):
+ def verify_and_translate( self, line ):
'''
It will accept the each line and calls the suitable API to conver into pyton equivalent syntax .
It will return the translated python syntax .
'''
- lineSpace = re.match("^\s+",line,flags=0)
- initialSpaces = len(line) -len(line.lstrip())
- line = re.sub("^\s+","",line) if lineSpace else line
-
+ lineSpace = re.match( "^\s+", line, flags=0 )
+ initialSpaces = len( line ) - len( line.lstrip() )
+ line = re.sub( "^\s+", "", line ) if lineSpace else line
resultString = None
- resultString = "\n" + " " * 4 if str(inspect.stack()[1][3]) == "compiler" else "\n"
- indent = " " *(4 + 4 * self.flag) if self.flag > 0 else " " * 4
- caseMatch = re.search("^CASE\s+(\d+)",line,flags=0)
- nameMatch = re.match("^NAME\s+\"(.*)\"",line,flags=0)
- commentMatch = re.match("^COMMENT\s+\"(.*)\"",line,flags=0)
- stepMatch = re.match("^STEP\s+\"(.*)\"",line,flags=0)
- connectMatch = re.match("^CONNECT\s+(\w+)\s+USING\s+(.*)",line,flags=0)
- disconnectMatch = re.match("^DISCONNECT\s+(.*)",line,flags=0)
- ondoMatch = re.match("^ON\s+(.*)\s+DO\s+(.*)",line,flags=0)
+ resultString = "\n" + " " * 4 if str( inspect.stack()[ 1 ][ 3 ] ) == "compiler" else "\n"
+ indent = " " * ( 4 + 4 * self.flag ) if self.flag > 0 else " " * 4
+ caseMatch = re.search( "^CASE\s+(\d+)", line, flags=0 )
+ nameMatch = re.match( "^NAME\s+\"(.*)\"", line, flags=0 )
+ commentMatch = re.match( "^COMMENT\s+\"(.*)\"", line, flags=0 )
+ stepMatch = re.match( "^STEP\s+\"(.*)\"", line, flags=0 )
+ connectMatch = re.match( "^CONNECT\s+(\w+)\s+USING\s+(.*)", line, flags=0 )
+ disconnectMatch = re.match( "^DISCONNECT\s+(.*)", line, flags=0 )
+ ondoMatch = re.match( "^ON\s+(.*)\s+DO\s+(.*)", line, flags=0 )
- storeMatch = re.match("^STORE\s+(.*)\s+IN\s+(.*)",line,flags=0)
- variableMatch = re.match("^(.*)\s+=\s+(.*)",line,flags=0)
- assertMatch = re.match("^ASSERT\s+(\w+)\s+(.*)\s+(.*)\s+ONPASS\s+(.*)\s+ONFAIL\s+(.*)",line,flags=0)
- logMatch = re.match("^(ERROR|INFO|DEBUG|CRITICAL|REPORT|EXACT|WARN)\s+(.*)",line,flags=0)
- ifloop = re.match("IF\s+(\w+)\s*(..|\w+)\s*(.*)",line,flags=0)
- elseloopMatch = re.match("ELSE\s*$",line,flags=0)
- elifloop = re.match("ELSE\sIF\s+(\w+)\s*(..|\w+)\s*(.*)",line,flags=0)
- forloopMatch = re.match("\s*REPEAT\s+(/d+)\s+TIMES",line,flags=0)
- experimentalMatch = re.match("EXPERIMENTAL\s+MODE\s+(\w+)",line,flags=0)
- repeatMatch = re.match("\s*REPEAT\s+(\d+)\s+TIMES", line, flags=0)
+ storeMatch = re.match( "^STORE\s+(.*)\s+IN\s+(.*)", line, flags=0 )
+ variableMatch = re.match( "^(.*)\s+=\s+(.*)", line, flags=0 )
+ assertMatch = re.match( "^ASSERT\s+(\w+)\s+(.*)\s+(.*)\s+ONPASS\s+(.*)\s+ONFAIL\s+(.*)", line, flags=0 )
+ logMatch = re.match( "^(ERROR|INFO|DEBUG|CRITICAL|REPORT|EXACT|WARN)\s+(.*)", line, flags=0 )
+ ifloop = re.match( "IF\s+(\w+)\s*(..|\w+)\s*(.*)", line, flags=0 )
+ elseloopMatch = re.match( "ELSE\s*$", line, flags=0 )
+ elifloop = re.match( "ELSE\sIF\s+(\w+)\s*(..|\w+)\s*(.*)", line, flags=0 )
+ forloopMatch = re.match( "\s*REPEAT\s+(/d+)\s+TIMES", line, flags=0 )
+ experimentalMatch = re.match( "EXPERIMENTAL\s+MODE\s+(\w+)", line, flags=0 )
+ repeatMatch = re.match( "\s*REPEAT\s+(\d+)\s+TIMES", line, flags=0 )
- response_pasrse = re.match("\s*PARSE\s+(\w+)\s+AS\s+(\w+)\s+INTO\s+(\w+)", line, flags=0)
+ response_pasrse = re.match( "\s*PARSE\s+(\w+)\s+AS\s+(\w+)\s+INTO\s+(\w+)", line, flags=0 )
- if caseMatch :
+ if caseMatch:
self.CurrentStep = 0
- self.CurrentCase = "CASE" + caseMatch.group(1)
- resultString = resultString + self.translate_case_block(casenumber=caseMatch.group(1))
+ self.CurrentCase = "CASE" + caseMatch.group( 1 )
+ resultString = resultString + self.translate_case_block( casenumber=caseMatch.group( 1 ) )
elif repeatMatch:
- resultString = resultString + indent + self.translate_repeat(repeat=repeatMatch.group(1))
- elif nameMatch :
- resultString = resultString + indent + self.translate_testcase_name(testname=nameMatch.group(1))
- elif commentMatch :
- resultString = resultString + indent + self.translate_comment(comment=commentMatch.group(1))
- elif stepMatch :
+ resultString = resultString + indent + self.translate_repeat( repeat=repeatMatch.group( 1 ) )
+ elif nameMatch:
+ resultString = resultString + indent + self.translate_testcase_name( testname=nameMatch.group( 1 ) )
+ elif commentMatch:
+ resultString = resultString + indent + self.translate_comment( comment=commentMatch.group( 1 ) )
+ elif stepMatch:
self.CurrentStep = self.CurrentStep + 1
- resultString = resultString + indent + self.translate_step(step=stepMatch.group(1))
- elif connectMatch :
- resultString = resultString + indent + self.translate_connect(component=connectMatch.group(1),
- arguments=connectMatch.group(2) )
- elif disconnectMatch :
- resultString = resultString + indent + self.translate_disconnect(component=disconnectMatch.group(1))
- elif ondoMatch :
- resultString = resultString + indent + self.translate_onDOAs(component=ondoMatch.group(1),action=ondoMatch.group(2))
- elif storeMatch :
- resultString = resultString + indent + self.translate_store(variable=storeMatch.group(2),
- value=storeMatch.group(1))
- elif variableMatch :
- resultString = resultString + indent + self.translate_store(variable=variableMatch.group(1),
- value=variableMatch.group(2))
- elif assertMatch :
- resultString = resultString + indent + self.translate_assertion(leftvalue=assertMatch.group(1),
- operator=assertMatch.group(2),
- rightvalue=assertMatch.group(3),
- onpass=assertMatch.group(4),
- onfail=assertMatch.group(5))
- elif logMatch :
- resultString = resultString + indent + self.translate_logs(loglevel=logMatch.group(1),
- message=logMatch.group(2))
- elif ifloop :
+ resultString = resultString + indent + self.translate_step( step=stepMatch.group( 1 ) )
+ elif connectMatch:
+ resultString = resultString + indent + self.translate_connect( component=connectMatch.group( 1 ),
+ arguments=connectMatch.group( 2 ) )
+ elif disconnectMatch:
+ resultString = resultString + indent + self.translate_disconnect( component=disconnectMatch.group( 1 ) )
+ elif ondoMatch:
+ resultString = resultString + indent + self.translate_onDOAs( component=ondoMatch.group( 1 ), action=ondoMatch.group( 2 ) )
+ elif storeMatch:
+ resultString = resultString + indent + self.translate_store( variable=storeMatch.group( 2 ),
+ value=storeMatch.group( 1 ) )
+ elif variableMatch:
+ resultString = resultString + indent + self.translate_store( variable=variableMatch.group( 1 ),
+ value=variableMatch.group( 2 ) )
+ elif assertMatch:
+ resultString = resultString + indent + self.translate_assertion( leftvalue=assertMatch.group( 1 ),
+ operator=assertMatch.group( 2 ),
+ rightvalue=assertMatch.group( 3 ),
+ onpass=assertMatch.group( 4 ),
+ onfail=assertMatch.group( 5 ) )
+ elif logMatch:
+ resultString = resultString + indent + self.translate_logs( loglevel=logMatch.group( 1 ),
+ message=logMatch.group( 2 ) )
+ elif ifloop:
self.initSpace = initialSpaces
- operand = ifloop.group(1)
- operator = ifloop.group(2)
- value = ifloop.group(3)
- resultString = resultString + indent + "if " + operand + self.translate_if_else_operator(conditionoperator=operator) + value + ":"
+ operand = ifloop.group( 1 )
+ operator = ifloop.group( 2 )
+ value = ifloop.group( 3 )
+ resultString = resultString + indent + "if " + operand + self.translate_if_else_operator( conditionoperator=operator ) + value + ":"
self.flag = self.flag + 1
- elif experimentalMatch :
- resultString = resultString + indent + self.translate_experimental_mode(mode=experimentalMatch.group(1))
+ elif experimentalMatch:
+ resultString = resultString + indent + self.translate_experimental_mode( mode=experimentalMatch.group( 1 ) )
- elif elseloopMatch :
+ elif elseloopMatch:
if initialSpaces == self.initSpace or initialSpaces == self.outLoopSpace:
resultString = resultString + indent + "else :"
self.flag = self.flag + 1
- else :
- indent = " " *(4 + 4 * (self.flag-1))
+ else:
+ indent = " " * ( 4 + 4 * ( self.flag - 1 ) )
resultString = resultString + indent + "else :"
self.flag = self.flag + 1
- elif elifloop :
+ elif elifloop:
- operand = elifloop.group(1)
- operator = elifloop.group(2)
- value = elifloop.group(3)
+ operand = elifloop.group( 1 )
+ operator = elifloop.group( 2 )
+ value = elifloop.group( 3 )
if initialSpaces == self.initSpace or initialSpaces == self.outLoopSpace:
- resultString = resultString + indent + "elif " + operand + self.translate_if_else_operator(conditionoperator=operator) + value + ":"
+ resultString = resultString + indent + "elif " + operand + self.translate_if_else_operator( conditionoperator=operator ) + value + ":"
self.flag = self.flag + 1
- else :
- indent = " " *(4 + 4 * (self.flag-1))
- resultString = resultString + indent + "elif " + operand + self.translate_if_else_operator(conditionoperator=operator) + value + ":"
+ else:
+ indent = " " * ( 4 + 4 * ( self.flag - 1 ) )
+ resultString = resultString + indent + "elif " + operand + self.translate_if_else_operator( conditionoperator=operator ) + value + ":"
self.flag = self.flag + 1
- elif response_pasrse :
- output_string = response_pasrse.group(1)
- req_format = response_pasrse.group(2)
- store_in = response_pasrse.group(3)
- resultString = resultString + indent + store_in +'= main.response_parser('+output_string+",\""+req_format+"\")"
+ elif response_pasrse:
+ output_string = response_pasrse.group( 1 )
+ req_format = response_pasrse.group( 2 )
+ store_in = response_pasrse.group( 3 )
+ resultString = resultString + indent + store_in + '= main.response_parser(' + output_string + ",\"" + req_format + "\")"
self.flag = self.flag + 1
return resultString
- def translate_repeat(self,**repeatStatement):
+ def translate_repeat( self, **repeatStatement ):
'''
this will transalte the repeat statement into a python equivalen while loop
'''
- args = self.parse_args(["REPEAT"],**repeatStatement)
+ args = self.parse_args( [ "REPEAT" ], **repeatStatement )
resultString = ''
resultString = "i = 0"
- resultString = resultString + "\n" + " " * 8 +"while i<" + args["REPEAT"] + " :"
+ resultString = resultString + "\n" + " " * 8 + "while i<" + args[ "REPEAT" ] + " :"
return resultString
- def translate_if_else_operator(self,**loopBlock):
+ def translate_if_else_operator( self, **loopBlock ):
'''
This method will translate if-else loop block into its equivalent python code.
Whole loop block will be passed into loopBlock List.
It returns the transalted reuslt as a string.
'''
- args = self.parse_args(["CONDITIONOPERATOR"],**loopBlock)
+ args = self.parse_args( [ "CONDITIONOPERATOR" ], **loopBlock )
resultString = ''
# process the loopBlock List translate all statements underlying the given loop block
- equalsMatch = re.match("EQUALS$|==\s*$",args["CONDITIONOPERATOR"],flags=0)
- greaterMatch = re.match("GREATER\s+THAN$|>\s*$",args["CONDITIONOPERATOR"],flags=0)
- lesserMatch = re.match("LESSER\s+THAN$|<\s*$",args["CONDITIONOPERATOR"],flags=0)
- greaterEqualMatch = re.match("GREATER\s+THAN\s+OR\s+EQUALS$|>=\s*$",args["CONDITIONOPERATOR"],flags=0)
- lesserEqualMatch = re.match("LESSER\s+THAN\s+OR\s+EQUALS$|<=\s*$",args["CONDITIONOPERATOR"],flags=0)
- if equalsMatch :
+ equalsMatch = re.match( "EQUALS$|==\s*$", args[ "CONDITIONOPERATOR" ], flags=0 )
+ greaterMatch = re.match( "GREATER\s+THAN$|>\s*$", args[ "CONDITIONOPERATOR" ], flags=0 )
+ lesserMatch = re.match( "LESSER\s+THAN$|<\s*$", args[ "CONDITIONOPERATOR" ], flags=0 )
+ greaterEqualMatch = re.match( "GREATER\s+THAN\s+OR\s+EQUALS$|>=\s*$", args[ "CONDITIONOPERATOR" ], flags=0 )
+ lesserEqualMatch = re.match( "LESSER\s+THAN\s+OR\s+EQUALS$|<=\s*$", args[ "CONDITIONOPERATOR" ], flags=0 )
+ if equalsMatch:
resultString = resultString + " == "
- elif greaterMatch :
+ elif greaterMatch:
resultString = resultString + " > "
- elif lesserMatch :
+ elif lesserMatch:
resultString = resultString + " < "
elif greaterEqualMatch:
resultString = resultString + " >= "
- elif lesserEqualMatch :
+ elif lesserEqualMatch:
resultString = resultString + " <= "
- else :
+ else:
print "\n Error: Given Operator is not listed "
return resultString
- def translate_experimental_mode(self,**modeType):
+ def translate_experimental_mode( self, **modeType ):
'''
This API will translate statment EXPERIMENTAL MODE ON/OFF into python equivalent.
It will return the transalted value.
'''
- args = self.parse_args(["MODE"],**modeType)
+ args = self.parse_args( [ "MODE" ], **modeType )
resultString = ''
- ONmatch = re.match("\s*ON",args["MODE"],flags=0)
- OFFmatch = re.match("\sOFF",args["MODE"],flags=0)
+ ONmatch = re.match( "\s*ON", args[ "MODE" ], flags=0 )
+ OFFmatch = re.match( "\sOFF", args[ "MODE" ], flags=0 )
- if ONmatch :
+ if ONmatch:
resultString = "main.EXPERIMENTAL_MODE = main.TRUE"
- elif OFFmatch :
+ elif OFFmatch:
resultString = "main.EXPERIMENTAL_MODE = main.FALSE"
return resultString
- def interpret(self,**interpetParameters):
+ def interpret( self, **interpetParameters ):
'''
This method will accept the OpenSpeak syntax into a string and will return
a python equivalent translations statement
'''
- args = self.parse_args(["TEXT","WRITETOFILE"],**interpetParameters)
+ args = self.parse_args( [ "TEXT", "WRITETOFILE" ], **interpetParameters )
resultString = ''
- ## here Open Speak syntax will be translated into python equivalent.
- resultString = self.verify_and_translate(args["TEXT"])
- lineSpace = re.match("^\s+",resultString,flags=0)
+ # here Open Speak syntax will be translated into python equivalent.
+ resultString = self.verify_and_translate( args[ "TEXT" ] )
+ lineSpace = re.match( "^\s+", resultString, flags=0 )
- resultString = re.sub("^\s+","",resultString) if lineSpace else resultString
+ resultString = re.sub( "^\s+", "", resultString ) if lineSpace else resultString
return resultString
- def translate_logs(self,**logStatement):
+ def translate_logs( self, **logStatement ):
'''
This will translate the OpenSpeak log message statements into python equivalent
to resultString and returns resultString
'''
- args = self.parse_args(["LOGLEVEL","MESSAGE"],**logStatement)
+ args = self.parse_args( [ "LOGLEVEL", "MESSAGE" ], **logStatement )
resultString = ''
# convert the statement here
- message = self.translate_log_message(message=args["MESSAGE"])
- if args["LOGLEVEL"] == "INFO" :
- resultString = resultString + "main.log.info(" + message + ")"
- elif args["LOGLEVEL"] == "ERROR" :
- resultString = resultString + "main.log.error(" + message + ")"
- elif args["LOGLEVEL"] == "DEBUG" :
- resultString = resultString + "main.log.debug(" + message + ")"
- elif args["LOGLEVEL"] == "REPORT" :
- resultString = resultString + "main.log.report(" + message + ")"
- elif args["LOGLEVEL"] == "CRITICAL" :
- resultString = resultString + "main.log.critical(" + message + ")"
- elif args["LOGLEVEL"] == "WARN" :
- resultString = resultString + "main.log.warn(" + args["MESSAGE"] + ")"
- elif args["LOGLEVEL"] == "EXACT" :
- resultString = resultString + "main.log.exact(" + args["MESSAGE"] + ")"
-
+ message = self.translate_log_message( message=args[ "MESSAGE" ] )
+ if args[ "LOGLEVEL" ] == "INFO":
+ resultString = resultString + "main.log.info( " + message + " )"
+ elif args[ "LOGLEVEL" ] == "ERROR":
+ resultString = resultString + "main.log.error( " + message + " )"
+ elif args[ "LOGLEVEL" ] == "DEBUG":
+ resultString = resultString + "main.log.debug( " + message + " )"
+ elif args[ "LOGLEVEL" ] == "REPORT":
+ resultString = resultString + "main.log.report( " + message + " )"
+ elif args[ "LOGLEVEL" ] == "CRITICAL":
+ resultString = resultString + "main.log.critical( " + message + " )"
+ elif args[ "LOGLEVEL" ] == "WARN":
+ resultString = resultString + "main.log.warn( " + args[ "MESSAGE" ] + ")"
+ elif args[ "LOGLEVEL" ] == "EXACT":
+ resultString = resultString + "main.log.exact( " + args[ "MESSAGE" ] + ")"
return resultString
- def translate_log_message(self,**messageStatement) :
+ def translate_log_message( self, **messageStatement ):
'''
This API will translate log messages if it is a string or Variable or combination
of string and variable.
It will return the analysed and translate message.
'''
- args = self.parse_args(["MESSAGE"],**messageStatement)
+ args = self.parse_args( [ "MESSAGE" ], **messageStatement )
resultString = ''
- paramsMatch = re.match("PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]|LAST_RESULT|LAST_RESPONSE",args["MESSAGE"],flags=0)
- stringMatch = re.match("\s*\"(.*)\"\s*$",args["MESSAGE"],flags=0)
- stringWidVariableMatch = re.match("\"(.*)\"\s+\+\s+(.*)",args["MESSAGE"],flags=0)
- varRefMatch = re.search("\<(\w+)\>",args["MESSAGE"],flags=0)
- if paramsMatch :
- resultString = resultString + self.translate_parameters(parameters=args["MESSAGE"])
- elif stringMatch :
- resultString = resultString + args["MESSAGE"]
+ paramsMatch = re.match( "PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]|LAST_RESULT|LAST_RESPONSE", args[ "MESSAGE" ], flags=0 )
+ stringMatch = re.match( "\s*\"(.*)\"\s*$", args[ "MESSAGE" ], flags=0 )
+ stringWidVariableMatch = re.match( "\"(.*)\"\s+\+\s+(.*)", args[ "MESSAGE" ], flags=0 )
+ varRefMatch = re.search( "\<(\w+)\>", args[ "MESSAGE" ], flags=0 )
+ if paramsMatch:
+ resultString = resultString + self.translate_parameters( parameters=args[ "MESSAGE" ] )
+ elif stringMatch:
+ resultString = resultString + args[ "MESSAGE" ]
elif stringWidVariableMatch:
- quoteWord = stringWidVariableMatch.group(1)
- variableRef = stringWidVariableMatch.group(2)
- varMatch = re.search("PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]",variableRef,flags=0)
- varRefMatch = re.search("\<(\w+)\>",variableRef,flags=0)
- if varMatch :
- resultString = resultString + "\"" + quoteWord + "\"" + " + " + self.translate_parameters(parameters=variableRef)
- elif varRefMatch :
- resultString = resultString + "\"" + quoteWord + "\"" + " + " + varRefMatch.group(1)
+ quoteWord = stringWidVariableMatch.group( 1 )
+ variableRef = stringWidVariableMatch.group( 2 )
+ varMatch = re.search( "PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]", variableRef, flags=0 )
+ varRefMatch = re.search( "\<(\w+)\>", variableRef, flags=0 )
+ if varMatch:
+ resultString = resultString + "\"" + quoteWord + "\"" + " + " + self.translate_parameters( parameters=variableRef )
+ elif varRefMatch:
+ resultString = resultString + "\"" + quoteWord + "\"" + " + " + varRefMatch.group( 1 )
elif varRefMatch:
- resultString = resultString + varRefMatch.group(1)
- else :
- print "\nError : Syntax error , Not defined way to give log message" + args["MESSAGE"]
+ resultString = resultString + varRefMatch.group( 1 )
+ else:
+ print "\nError : Syntax error , Not defined way to give log message" + args[ "MESSAGE" ]
return resultString
- def translate_assertion(self,**assertStatement):
+ def translate_assertion( self, **assertStatement ):
'''
This will translate the ASSERT <value1> <COMPARISON OPERATOR> <value2> into python
equivalent to resultString and returns resultString
'''
- args = self.parse_args(["LEFTVALUE","OPERATOR","RIGHTVALUE","ONPASS","ONFAIL"],**assertStatement)
+ args = self.parse_args( [ "LEFTVALUE", "OPERATOR", "RIGHTVALUE", "ONPASS", "ONFAIL" ], **assertStatement )
resultString = ''
# convert the statement here
- notOperatorMatch = re.search("NOT\s+(.*)",args["OPERATOR"],flags=0)
- notOperatorSymbMatch = re.search("\!(.*)",args["OPERATOR"],flags=0)
+ notOperatorMatch = re.search( "NOT\s+(.*)", args[ "OPERATOR" ], flags=0 )
+ notOperatorSymbMatch = re.search( "\!(.*)", args[ "OPERATOR" ], flags=0 )
operator = ''
- lastresultMatch = re.match("LAST_RESULT",args["RIGHTVALUE"],flags=0)
- lastresponseMatch = re.match("LAST_RESPONSE",args["RIGHTVALUE"],flags=0)
- if lastresultMatch :
+ lastresultMatch = re.match( "LAST_RESULT", args[ "RIGHTVALUE" ], flags=0 )
+ lastresponseMatch = re.match( "LAST_RESPONSE", args[ "RIGHTVALUE" ], flags=0 )
+ if lastresultMatch:
operator = "main.last_result"
- elif lastresponseMatch :
+ elif lastresponseMatch:
operator = "main.last_response"
- else :
- operator = args["RIGHTVALUE"]
+ else:
+ operator = args[ "RIGHTVALUE" ]
- if args["OPERATOR"] == None or args["OPERATOR"] == "" :
+ if args[ "OPERATOR" ] is None or args[ "OPERATOR" ] == "":
print "\n Error : Operator has not been specified !!!"
elif notOperatorMatch or notOperatorSymbMatch:
- operators = notOperatorMatch.group(1) if notOperatorMatch else notOperatorSymbMatch.group(1)
- operators = self.translate_operator(operator=operators)
- if self.grtrOrLssr == 0 :
- resultString = resultString + "utilities.assert_not_" + operators + "(expect=" +\
- self.translate_response_result(operator=args["RIGHTVALUE"]) + ",actual=" + self.translate_response_result(operator=args["LEFTVALUE"]) +\
- ",onpass=" + self.translate_assertMessage(message=args["ONPASS"]) +\
- ",onfail=" + self.translate_assertMessage(message=args["ONFAIL"]) + ")"
- else :
- resultString = resultString + "utilities.assert_not_" + operators + "(expect=" +\
- self.translate_response_result(operator=args["LEFTVALUE"]) + ",actual=" + self.translate_response_result(operator=args["RIGHTVALUE"]) +\
- ",onpass=" + self.translate_assertMessage(message=args["ONPASS"]) +\
- ",onfail=" + self.translate_assertMessage(message=args["ONFAIL"]) + ")"
+ operators = notOperatorMatch.group( 1 ) if notOperatorMatch else notOperatorSymbMatch.group( 1 )
+ operators = self.translate_operator( operator=operators )
+ if self.grtrOrLssr == 0:
+ resultString = resultString + "utilities.assert_not_" + operators + "(expect=" + \
+ self.translate_response_result( operator=args[ "RIGHTVALUE" ] ) + ",actual=" + self.translate_response_result( operator=args[ "LEFTVALUE" ] ) + \
+ ",onpass=" + self.translate_assertMessage( message=args[ "ONPASS" ] ) + \
+ ",onfail=" + self.translate_assertMessage( message=args[ "ONFAIL" ] ) + ")"
+ else:
+ resultString = resultString + "utilities.assert_not_" + operators + "(expect=" + \
+ self.translate_response_result( operator=args[ "LEFTVALUE" ] ) + ",actual=" + self.translate_response_result( operator=args[ "RIGHTVALUE" ] ) + \
+ ",onpass=" + self.translate_assertMessage( message=args[ "ONPASS" ] ) + \
+ ",onfail=" + self.translate_assertMessage( message=args[ "ONFAIL" ] ) + ")"
- else :
- operators = self.translate_operator(operator=args["OPERATOR"])
- if self.grtrOrLssr == 0 :
- resultString = resultString + "utilities.assert_" + operators + "(expect=" +\
- self.translate_response_result(operator=args["RIGHTVALUE"]) +\
- ",actual=" + self.translate_response_result(operator=args["LEFTVALUE"]) +\
- ",onpass=" + self.translate_assertMessage(message=args["ONPASS"]) +\
- ",onfail=" + self.translate_assertMessage(message=args["ONFAIL"]) + ")"
- else :
- resultString = resultString + "utilities.assert_" + operators + "(expect=" +\
- self.translate_response_result(operator=args["LEFTVALUE"]) +\
- ",actual=" + self.translate_response_result(operator=args["RIGHTVALUE"]) +\
- ",onpass=" + self.translate_assertMessage(message=args["ONPASS"]) +\
- ",onfail=" + self.translate_assertMessage(message=args["ONFAIL"]) + ")"
-
+ else:
+ operators = self.translate_operator( operator=args[ "OPERATOR" ] )
+ if self.grtrOrLssr == 0:
+ resultString = resultString + "utilities.assert_" + operators + "(expect=" + \
+ self.translate_response_result( operator=args[ "RIGHTVALUE" ] ) + \
+ ",actual=" + self.translate_response_result( operator=args[ "LEFTVALUE" ] ) + \
+ ",onpass=" + self.translate_assertMessage( message=args[ "ONPASS" ] ) + \
+ ",onfail=" + self.translate_assertMessage( message=args[ "ONFAIL" ] ) + ")"
+ else:
+ resultString = resultString + "utilities.assert_" + operators + "(expect=" + \
+ self.translate_response_result( operator=args[ "LEFTVALUE" ] ) + \
+ ",actual=" + self.translate_response_result( operator=args[ "RIGHTVALUE" ] ) + \
+ ",onpass=" + self.translate_assertMessage( message=args[ "ONPASS" ] ) + \
+ ",onfail=" + self.translate_assertMessage( message=args[ "ONFAIL" ] ) + ")"
return resultString
- def translate_response_result(self,**operatorStatement):
+ def translate_response_result( self, **operatorStatement ):
'''
It will translate the LAST_RESPONSE or LAST_RESULT statement into its equivalent.
It returns the translate form in resulString.
'''
- args = self.parse_args(["OPERATOR"],**operatorStatement)
+ args = self.parse_args( [ "OPERATOR" ], **operatorStatement )
resultString = ''
- lastResultMatch = re.match("LAST_RESULT",args["OPERATOR"],flags=0)
- lastResponseMatch = re.match("LAST_RESPONSE",args["OPERATOR"],flags=0)
- if lastResultMatch :
+ lastResultMatch = re.match( "LAST_RESULT", args[ "OPERATOR" ], flags=0 )
+ lastResponseMatch = re.match( "LAST_RESPONSE", args[ "OPERATOR" ], flags=0 )
+ if lastResultMatch:
resultString = resultString + "main.last_result"
elif lastResponseMatch:
resultString = resultString + "main.last_response"
- else :
- resultString = resultString + args["OPERATOR"]
+ else:
+ resultString = resultString + args[ "OPERATOR" ]
return resultString
-
- def translate_assertMessage(self,**messageStatement) :
+ def translate_assertMessage( self, **messageStatement ):
'''
This API will facilitate the translation of assert ONPASS or ONFAIL messages . The message can be
a string or calling another API in OpenSpeak syntax.
It will return the translated message
'''
- args = self.parse_args(["MESSAGE"],**messageStatement)
+ args = self.parse_args( [ "MESSAGE" ], **messageStatement )
- connectMatch = re.search("CONNECT\s+(\w+)\s+USING\s+(.*)",args["MESSAGE"],flags=0)
- disconnectMatch = re.search("DISCONNECT\s+(.*)",args["MESSAGE"],flags=0)
- ondoMatch = re.search("ON\s+(.*)\s+DO\s+(.*)",args["MESSAGE"],flags=0)
- paramsMatch = re.search("PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]",args["MESSAGE"],flags=0)
- stringMatch = re.search("\"(.*)\"|\'(.*)\'",args["MESSAGE"],flags=0)
- variableMatch = re.search("\<(.*)\>",args["MESSAGE"],flags=0)
+ connectMatch = re.search( "CONNECT\s+(\w+)\s+USING\s+(.*)", args[ "MESSAGE" ], flags=0 )
+ disconnectMatch = re.search( "DISCONNECT\s+(.*)", args[ "MESSAGE" ], flags=0 )
+ ondoMatch = re.search( "ON\s+(.*)\s+DO\s+(.*)", args[ "MESSAGE" ], flags=0 )
+ paramsMatch = re.search( "PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]", args[ "MESSAGE" ], flags=0 )
+ stringMatch = re.search( "\"(.*)\"|\'(.*)\'", args[ "MESSAGE" ], flags=0 )
+ variableMatch = re.search( "\<(.*)\>", args[ "MESSAGE" ], flags=0 )
resultString = ''
- if connectMatch :
- resultString = resultString + self.translate_connect(component=connectMatch.group(1),
- arguments=connectMatch.group(2) )
- elif disconnectMatch :
- resultString = resultString + self.translate_disconnect(component=disconnectMatch.group(1))
- elif ondoMatch :
- resultString = resultString + self.translate_onDOAs(component=ondoMatch.group(1),
- action=ondoMatch.group(2))
- elif paramsMatch :
- resultString = resultString + self.translate_parameters(parameters=args["MESSAGE"])
- elif stringMatch :
- resultString = resultString + "\"" + stringMatch.group(1) + "\""
- elif variableMatch :
- resultString = resultString + variableMatch.group(1)
- elif args["MESSAGE"] == None :
+ if connectMatch:
+ resultString = resultString + self.translate_connect( component=connectMatch.group( 1 ),
+ arguments=connectMatch.group( 2 ) )
+ elif disconnectMatch:
+ resultString = resultString + self.translate_disconnect( component=disconnectMatch.group( 1 ) )
+ elif ondoMatch:
+ resultString = resultString + self.translate_onDOAs( component=ondoMatch.group( 1 ),
+ action=ondoMatch.group( 2 ) )
+ elif paramsMatch:
+ resultString = resultString + self.translate_parameters( parameters=args[ "MESSAGE" ] )
+ elif stringMatch:
+ resultString = resultString + "\"" + stringMatch.group( 1 ) + "\""
+ elif variableMatch:
+ resultString = resultString + variableMatch.group( 1 )
+ elif args[ "MESSAGE" ] is None:
print "\n Error : Please pass a message or action for assertion "
return resultString
- def translate_operator(self,**operatorStatement) :
+ def translate_operator( self, **operatorStatement ):
'''
It will translate the operator for assertion , by ensuring against given arguments.
It will return the translated assertion operator.
'''
- args = self.parse_args(["OPERATOR"],**operatorStatement)
+ args = self.parse_args( [ "OPERATOR" ], **operatorStatement )
resultString = ''
- equalsMatch = re.match("EQUALS$|==$",args["OPERATOR"],flags=0)
- greaterMatch = re.match("GREATER\s+THAN$|>$",args["OPERATOR"],flags=0)
- lesserMatch = re.match("LESSER\s+THAN$|<$",args["OPERATOR"],flags=0)
- stringMatch = re.match("MATCHES|~$",args["OPERATOR"],flags=0)
- greaterEqualMatch = re.match("GREATER\s+THAN\s+OR\s+EQUALS$|>=$",args["OPERATOR"],flags=0)
- lesserEqualMatch = re.match("LESSER\s+THAN\s+OR\s+EQUALS$|<=$",args["OPERATOR"],flags=0)
- if equalsMatch :
+ equalsMatch = re.match( "EQUALS$|==$", args[ "OPERATOR" ], flags=0 )
+ greaterMatch = re.match( "GREATER\s+THAN$|>$", args[ "OPERATOR" ], flags=0 )
+ lesserMatch = re.match( "LESSER\s+THAN$|<$", args[ "OPERATOR" ], flags=0 )
+ stringMatch = re.match( "MATCHES|~$", args[ "OPERATOR" ], flags=0 )
+ greaterEqualMatch = re.match( "GREATER\s+THAN\s+OR\s+EQUALS$|>=$", args[ "OPERATOR" ], flags=0 )
+ lesserEqualMatch = re.match( "LESSER\s+THAN\s+OR\s+EQUALS$|<=$", args[ "OPERATOR" ], flags=0 )
+ if equalsMatch:
resultString = resultString + "equals"
- elif greaterMatch :
+ elif greaterMatch:
self.grtrOrLssr = self.grtrOrLssr + 1
resultString = resultString + "greater"
- elif lesserMatch :
+ elif lesserMatch:
self.grtrOrLssr = self.grtrOrLssr + 1
resultString = resultString + "lesser"
- elif stringMatch :
+ elif stringMatch:
resultString = resultString + "matches"
elif greaterEqualMatch:
resultString = resultString + "greater_equals"
- elif lesserEqualMatch :
+ elif lesserEqualMatch:
resultString = resultString + "lesser_equals"
- else :
+ else:
print "\n Error: Given Operator is not listed for assertion"
return resultString
- def translate_store(self,**storeStatement):
+ def translate_store( self, **storeStatement ):
'''
This will translate the STORE <variable> IN <value> or <variable> = <value>
into python equivalent to resultString and returns resultString
'''
- args = self.parse_args(["VARIABLE","VALUE"],**storeStatement)
+ args = self.parse_args( [ "VARIABLE", "VALUE" ], **storeStatement )
resultString = ''
# convert the statement here
- ondoMatch = re.match("^\s*ON\s+(.*)\s+DO\s+(.*)",args["VALUE"],flags=0)
- paramsMatch = re.match("^\s*PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]|LAST_RESULT|LAST_RESPONSE",args["VALUE"],flags=0)
- if paramsMatch :
- argString = self.translate_parameters(parameters=args["VALUE"])
- resultString = args["VARIABLE"] + " = " + argString
- elif ondoMatch :
- resultString = args["VARIABLE"] + " = " + self.translate_onDOAs(component=ondoMatch.group(1),action=ondoMatch.group(2))
- else :
- resultString = args["VARIABLE"] + " = " + args["VALUE"]
-
+ ondoMatch = re.match( "^\s*ON\s+(.*)\s+DO\s+(.*)", args[ "VALUE" ], flags=0 )
+ paramsMatch = re.match( "^\s*PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]|LAST_RESULT|LAST_RESPONSE", args[ "VALUE" ], flags=0 )
+ if paramsMatch:
+ argString = self.translate_parameters( parameters=args[ "VALUE" ] )
+ resultString = args[ "VARIABLE" ] + " = " + argString
+ elif ondoMatch:
+ resultString = args[ "VARIABLE" ] + " = " + self.translate_onDOAs( component=ondoMatch.group( 1 ), action=ondoMatch.group( 2 ) )
+ else:
+ resultString = args[ "VARIABLE" ] + " = " + args[ "VALUE" ]
return resultString
- def translate_disconnect(self,**disconnectStatement):
+ def translate_disconnect( self, **disconnectStatement ):
'''
This will translate the DISCONNECT <component_name> into python
equivalent to resultString and returns resultString
'''
- args = self.parse_args(["COMPONENT"],**disconnectStatement)
+ args = self.parse_args( [ "COMPONENT" ], **disconnectStatement )
resultString = ''
# convert the statement here
- resultString = "main." + args["COMPONENT"] + ".disconnect()"
+ resultString = "main." + args[ "COMPONENT" ] + ".disconnect()"
return resultString
- def translate_onDOAs(self,**onDoStatement):
+ def translate_onDOAs( self, **onDoStatement ):
'''
This will translate the ON <component> DO <action> USING <arg1> AS <value1>,<arg2> AS <value2>
into python equivalent to resultString and returns resultString
'''
- args = self.parse_args(["COMPONENT","ACTION","ARGUMENTS"],**onDoStatement)
+ args = self.parse_args( [ "COMPONENT", "ACTION", "ARGUMENTS" ], **onDoStatement )
subString = ''
- usingMatch = re.match("\s*(.*)\s+USING\s+(.*)",args["ACTION"],flags=0)
+ usingMatch = re.match( "\s*(.*)\s+USING\s+(.*)", args[ "ACTION" ], flags=0 )
action = ''
- if usingMatch :
- action = usingMatch.group(1)
- arguments = usingMatch.group(2)
- subString = self.translate_usingas(arguments=arguments)
+ if usingMatch:
+ action = usingMatch.group( 1 )
+ arguments = usingMatch.group( 2 )
+ subString = self.translate_usingas( arguments=arguments )
- else :
- andCheck = re.search ("(.*)\s+AND\s+(.*)",args["ACTION"],flags=0)
+ else:
+ andCheck = re.search( "(.*)\s+AND\s+(.*)", args[ "ACTION" ], flags=0 )
action = action + "()"
if andCheck:
- action = andCheck.group(1) + "()"
- subString = subString + self.handle_conjuction(statement=andCheck.group(2))
- else :
- action = args["ACTION"]
+ action = andCheck.group( 1 ) + "()"
+ subString = subString + self.handle_conjuction( statement=andCheck.group( 2 ) )
+ else:
+ action = args[ "ACTION" ]
action = action + "()"
# convert the statement here
- resultString = "main." + args["COMPONENT"] + "." + action + subString
+ resultString = "main." + args[ "COMPONENT" ] + "." + action + subString
return resultString
-
- def handle_conjuction(self,**conjuctStatement):
+ def handle_conjuction( self, **conjuctStatement ):
'''
This will handle the conjuctions
'''
- args = self.parse_args(["STATEMENT"],**conjuctStatement)
+ args = self.parse_args( [ "STATEMENT" ], **conjuctStatement )
subSentence = ''
- storeMatch = re.match("\s*STORE\s+(.*)\s+IN\s+(.*)",args["STATEMENT"],flags=0)
- assertMatch = re.match("\s*ASSERT\s+(\w+)\s+(.*)\s+(.*)\s+ONPASS\s+(.*)\s+ONFAIL\s+(.*)",args["STATEMENT"],flags=0)
- if storeMatch :
- subSentence = "\n" + " " * 8 + self.translate_store(variable=storeMatch.group(2),
- value=storeMatch.group(1))
- elif assertMatch :
- subSentence = "\n" + " " * 8 + self.translate_assertion(leftvalue=assertMatch.group(1),
- operator=assertMatch.group(2),
- rightvalue=assertMatch.group(3),
- onpass=assertMatch.group(4),
- onfail=assertMatch.group(5))
+ storeMatch = re.match( "\s*STORE\s+(.*)\s+IN\s+(.*)", args[ "STATEMENT" ], flags=0 )
+ assertMatch = re.match( "\s*ASSERT\s+(\w+)\s+(.*)\s+(.*)\s+ONPASS\s+(.*)\s+ONFAIL\s+(.*)", args[ "STATEMENT" ], flags=0 )
+ if storeMatch:
+ subSentence = "\n" + " " * 8 + self.translate_store( variable=storeMatch.group( 2 ),
+ value=storeMatch.group( 1 ) )
+ elif assertMatch:
+ subSentence = "\n" + " " * 8 + self.translate_assertion( leftvalue=assertMatch.group( 1 ),
+ operator=assertMatch.group( 2 ),
+ rightvalue=assertMatch.group( 3 ),
+ onpass=assertMatch.group( 4 ),
+ onfail=assertMatch.group( 5 ) )
return subSentence
- def translate_usingas(self,**argumentAS) :
+ def translate_usingas( self, **argumentAS ):
'''
This will tranlate USING argument AS value Statement into equivalent argument passing.
It will return translated form into resultString
'''
- args = self.parse_args(["ARGUMENTS"],**argumentAS)
+ args = self.parse_args( [ "ARGUMENTS" ], **argumentAS )
resultString = ''
argsList = []
subString = ''
subSentence = ''
line = ''
- andCheck = re.search ("(.*)\s+AND\s+(.*)",args["ARGUMENTS"],flags=0)
+ andCheck = re.search( "(.*)\s+AND\s+(.*)", args[ "ARGUMENTS" ], flags=0 )
if andCheck:
- line = andCheck.group(1)
- subSentence = self.handle_conjuction(statement=andCheck.group(2))
- else :
- line = args["ARGUMENTS"]
+ line = andCheck.group( 1 )
+ subSentence = self.handle_conjuction( statement=andCheck.group( 2 ) )
+ else:
+ line = args[ "ARGUMENTS" ]
+ argsMatch = re.search( "(.*),(.*)", line, flags=0 )
-
- argsMatch = re.search("(.*),(.*)",line,flags=0)
-
-
- if args["ARGUMENTS"] == None or args["ARGUMENTS"] == '' :
+ if args[ "ARGUMENTS" ] is None or args[ "ARGUMENTS" ] == '':
subString = ''
- elif argsMatch :
+ elif argsMatch:
- argsList = line.split(",")
- for index, arguments in enumerate(argsList):
- argMatch = re.search("(.*)\s+AS\s+(.*)",arguments,flags=0)
+ argsList = line.split( "," )
+ for index, arguments in enumerate( argsList ):
+ argMatch = re.search( "(.*)\s+AS\s+(.*)", arguments, flags=0 )
if argMatch:
- argsKey = argMatch.group(1)
- argsValue = argMatch.group(2)
- paramsMatch = re.search("PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]|LAST_RESPONSE|LAST_RESULT",argsValue,flags=0)
- if not paramsMatch :
- if index == len(argsList) - 1 :
- subString = subString + argsKey + "=" + argsValue
- else :
- subString = subString + argsKey + "=" + argsValue + ","
- else :
- argString = self.translate_parameters(parameters=argsValue)
- if index == len(argsList) - 1 :
- subString = subString + argsKey + "=" + argString
- else :
- subString = subString + argsKey + "=" + argString + ","
- else :
- if index == len(argsList) - 1 :
- subString = subString + arguments
- else :
- subString = subString + arguments + ","
- else :
- argMatch = re.search("(.*)\s+AS\s+(.*)",args["ARGUMENTS"],flags=0)
+ argsKey = argMatch.group( 1 )
+ argsValue = argMatch.group( 2 )
+ paramsMatch = re.search( "PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]|LAST_RESPONSE|LAST_RESULT", argsValue, flags=0 )
+ if not paramsMatch:
+ if index == len( argsList ) - 1:
+ subString = subString + argsKey + "=" + argsValue
+ else:
+ subString = subString + argsKey + "=" + argsValue + ", "
+ else:
+ argString = self.translate_parameters( parameters=argsValue )
+ if index == len( argsList ) - 1:
+ subString = subString + argsKey + "=" + argString
+ else:
+ subString = subString + argsKey + "=" + argString + ", "
+ else:
+ if index == len( argsList ) - 1:
+ subString = subString + arguments
+ else:
+ subString = subString + arguments + ", "
+ else:
+ argMatch = re.search( "(.*)\s+AS\s+(.*)", args[ "ARGUMENTS" ], flags=0 )
if argMatch:
- argsKey = argMatch.group(1)
- argsValue = argMatch.group(2)
- paramsMatch = re.search("PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]|LAST_RESPONSE|LAST_RESULT",argsValue,flags=0)
- if not paramsMatch :
- subString = subString + argsKey + "=" + argsValue
- else :
- argString = self.translate_parameters(parameters=argsValue)
- subString = subString + argsKey + "=" + argString
- else :
- paramsMatch = re.match("PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]|LAST_RESPONSE|LAST_RESULT",line,flags=0)
- if paramsMatch :
- subString = subString + self.translate_parameters(parameters=line)
- else :
- subString = subString + line
- resultString = "(" + subString + ")"+ subSentence
+ argsKey = argMatch.group( 1 )
+ argsValue = argMatch.group( 2 )
+ paramsMatch = re.search( "PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]|LAST_RESPONSE|LAST_RESULT", argsValue, flags=0 )
+ if not paramsMatch:
+ subString = subString + argsKey + "=" + argsValue
+ else:
+ argString = self.translate_parameters( parameters=argsValue )
+ subString = subString + argsKey + "=" + argString
+ else:
+ paramsMatch = re.match( "PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]|LAST_RESPONSE|LAST_RESULT", line, flags=0 )
+ if paramsMatch:
+ subString = subString + self.translate_parameters( parameters=line )
+ else:
+ subString = subString + line
+ resultString = "(" + subString + ")" + subSentence
return resultString
-
- def translate_connect(self,**connectStatement):
+ def translate_connect( self, **connectStatement ):
'''
This will translate the CONNECT <component_name> USING1 <arg1> AS <value1>, <arg2> AS <value2>
into python equivalent to resultString and returns resultString
'''
- args = self.parse_args(["COMPONENT","ARGUMENTS"],**connectStatement)
+ args = self.parse_args( [ "COMPONENT", "ARGUMENTS" ], **connectStatement )
resultString = ''
- subString = self.translate_usingas(arguments=args["ARGUMENTS"])
+ subString = self.translate_usingas( arguments=args[ "ARGUMENTS" ] )
# convert the statement here
- resultString = "main." + args["COMPONENT"] + ".connect(" + subString + ")"
+ resultString = "main." + args[ "COMPONENT" ] + ".connect( " + subString + " )"
return resultString
-
- def translate_parameters(self,**parameterStatement):
+ def translate_parameters( self, **parameterStatement ):
'''
This will translate the OpenSpeak Case and Params parameters into python equivalent
to resultString and returns resultString
'''
- args = self.parse_args(["PARAMETERS"],**parameterStatement)
- argument = args["PARAMETERS"]
+ args = self.parse_args( [ "PARAMETERS" ], **parameterStatement )
+ argument = args[ "PARAMETERS" ]
resultString = ''
- ### match arguments
- paramsMatch = re.search("PARAMS((\[(.*)\])*)",argument,flags=0)
- stepsMatch = re.search("STEP((\[(.*)\])*)",argument,flags=0)
- casesMatch = re.search("CASE((\[(.*)\])*)",argument,flags=0)
- topoMatch = re.search("TOPO((\[(.*)\])*)",argument,flags=0)
- lastResultMatch = re.match("LAST_RESULT",argument,flags=0)
- lastResponseMatch = re.match("LAST_RESPONSE",argument,flags=0)
+ # match arguments
+ paramsMatch = re.search( "PARAMS((\[(.*)\])*)", argument, flags=0 )
+ stepsMatch = re.search( "STEP((\[(.*)\])*)", argument, flags=0 )
+ casesMatch = re.search( "CASE((\[(.*)\])*)", argument, flags=0 )
+ topoMatch = re.search( "TOPO((\[(.*)\])*)", argument, flags=0 )
+ lastResultMatch = re.match( "LAST_RESULT", argument, flags=0 )
+ lastResponseMatch = re.match( "LAST_RESPONSE", argument, flags=0 )
# convert the statement here
- if paramsMatch :
- params = paramsMatch.group(1)
- resultString = resultString + "main.params" + self._argsCheck(checkvar=params)
- elif stepsMatch :
- resultString = resultString +"main.params[\'" + self.CurrentCase +\
- "\'][\'STEP" + str(self.CurrentStep) + "\']" +\
- self._argsCheck(checkvar=stepsMatch.group(1))
- elif casesMatch :
- resultString = resultString + "main.params[\'" + self.CurrentCase + "\']" +\
- self._argsCheck(checkvar=casesMatch.group(1))
- elif topoMatch :
- resultString = resultString + "main.componentDictionary" +\
- self._argsCheck(checkvar=topoMatch.group(1))
- elif lastResultMatch :
+ if paramsMatch:
+ params = paramsMatch.group( 1 )
+ resultString = resultString + "main.params" + self._argsCheck( checkvar=params )
+ elif stepsMatch:
+ resultString = resultString + "main.params[\'" + self.CurrentCase + \
+ "\'][\'STEP" + str( self.CurrentStep ) + "\']" + \
+ self._argsCheck( checkvar=stepsMatch.group( 1 ) )
+ elif casesMatch:
+ resultString = resultString + "main.params[\'" + self.CurrentCase + "\']" + \
+ self._argsCheck( checkvar=casesMatch.group( 1 ) )
+ elif topoMatch:
+ resultString = resultString + "main.componentDictionary" + \
+ self._argsCheck( checkvar=topoMatch.group( 1 ) )
+ elif lastResultMatch:
resultString = resultString + "main.last_result"
- elif lastResponseMatch :
+ elif lastResponseMatch:
resultString = resultString + "main.last_response"
return resultString
- def _argsCheck(self,**args):
+ def _argsCheck( self, **args ):
''' This API will check if given argument is varibale reference or String and will translate accordingly.
It will return the tanslate form in resultString.
'''
- args = self.parse_args(["CHECKVAR"],**args)
- params = args["CHECKVAR"]
- argsList = params.split("]")
+ args = self.parse_args( [ "CHECKVAR" ], **args )
+ params = args[ "CHECKVAR" ]
+ argsList = params.split( "]" )
resultString = ''
- del argsList[len(argsList) - 1]
- for index,paramArgs in enumerate(argsList) :
- argsWidVariable = re.search("(\"|\')\s*(\w+)\s*(\'|\")",paramArgs,flags=0)
- if argsWidVariable :
- resultString = resultString + "[\'" + argsWidVariable.group(2) + "\']"
- else :
+ del argsList[ len( argsList ) - 1 ]
+ for index, paramArgs in enumerate( argsList ):
+ argsWidVariable = re.search( "(\"|\')\s*(\w+)\s*(\'|\")", paramArgs, flags=0 )
+ if argsWidVariable:
+ resultString = resultString + "[\'" + argsWidVariable.group( 2 ) + "\']"
+ else:
resultString = resultString + paramArgs + "]"
return resultString
- def translate_step(self,**stepStatement):
+ def translate_step( self, **stepStatement ):
'''
This will translate the STEP "DO SOMETHING HERE" into python equivalent
to resultString and returns resultString
'''
- args = self.parse_args(["STEP"],**stepStatement)
+ args = self.parse_args( [ "STEP" ], **stepStatement )
resultString = ''
- resultString = "main.step(\"" + args["STEP"] + "\")"
+ resultString = "main.step(\"" + args[ "STEP" ] + "\")"
# convert the statement here
return resultString
-
- def translate_comment(self,**commentStatement):
+ def translate_comment( self, **commentStatement ):
'''
This will translate the COMMENT "DO SOMETHING HERE" into python equivalent
to resultString and returns resultString
'''
- args = self.parse_args(["COMMENT"],**commentStatement)
+ args = self.parse_args( [ "COMMENT" ], **commentStatement )
resultString = ''
- resultString = "#" + args["COMMENT"]
+ resultString = "#" + args[ "COMMENT" ]
# convert the statement here
return resultString
- def translate_testcase_name(self,**nameStatement):
+ def translate_testcase_name( self, **nameStatement ):
'''
This method will convert NAME "<Testcase_name>" into python equivalent statement
to resultString and returns resultString
'''
- args = self.parse_args(["TESTNAME"],**nameStatement)
+ args = self.parse_args( [ "TESTNAME" ], **nameStatement )
resultString = ''
- resultString = "main.case(\"" + args["TESTNAME"] + "\")"
+ resultString = "main.case(\"" + args[ "TESTNAME" ] + "\")"
# convert the statement here
return resultString
-
- def translate_case_block(self,**caseBlock):
+ def translate_case_block( self, **caseBlock ):
'''
This method will translate the case block in test script .
It returns the translated equivalent python code for test script
'''
- args = self.parse_args(["CASENUMBER"],**caseBlock)
+ args = self.parse_args( [ "CASENUMBER" ], **caseBlock )
resultString = ""
- resultString = "def CASE" + str(args["CASENUMBER"]) + "(self,main) :\n"
+ resultString = "def CASE" + str( args[ "CASENUMBER" ] ) + "(self,main) :\n"
# process the caseBlock List translate all statements underlying the given case
return resultString
-
-
- def translate_loop_block(self,*loopBlock):
+ def translate_loop_block( self, *loopBlock ):
'''
This method will translate for loop block into its equivalent python code.
Whole loop block will be passed into loopBlock List.
@@ -787,8 +768,7 @@
# process the loopBlock List translate all statements underlying the given loop block
return resultString
-
- def translate_conjuction(self,conjuctionStatement):
+ def translate_conjuction( self, conjuctionStatement ):
'''
This will translate the AND conjuction statements into python equivalent
to resultString and returns resultString
@@ -797,21 +777,16 @@
# convert the statement here
return resultString
-
- def parse_args(self,args, **kwargs):
+ def parse_args( self, args, **kwargs ):
'''
It will accept the (key,value) pair and will return the (key,value) pairs with keys in uppercase.
'''
newArgs = {}
- for key,value in kwargs.iteritems():
- #currentKey = str.upper(key)
- if isinstance(args,list) and str.upper(key) in args:
+ for key, value in kwargs.iteritems():
+ if isinstance( args, list ) and str.upper( key ) in args:
for each in args:
- if each==str.upper(key):
- newArgs [str(each)] = value
- elif each != str.upper(key) and (newArgs.has_key(str(each)) == False ):
- newArgs[str(each)] = None
-
-
-
+ if each == str.upper( key ):
+ newArgs[ str( each ) ] = value
+ elif each != str.upper( key ) and str( each ) not in newArgs:
+ newArgs[ str( each ) ] = None
return newArgs
diff --git a/TestON/core/teston.py b/TestON/core/teston.py
index 89315dc..6794564 100644
--- a/TestON/core/teston.py
+++ b/TestON/core/teston.py
@@ -110,19 +110,19 @@
verifyOptions( options )
load_logger()
self.componentDictionary = {}
- self.componentDictionary = self.topology['COMPONENT']
+ self.componentDictionary = self.topology[ 'COMPONENT' ]
self.driversList = []
- if isinstance( self.componentDictionary, str):
+ if isinstance( self.componentDictionary, str ):
self.componentDictionary = dict( self.componentDictionary )
for component in self.componentDictionary:
- self.driversList.append( self.componentDictionary[component]['type'] )
+ self.driversList.append( self.componentDictionary[ component ][ 'type' ] )
self.driversList = list( set( self.driversList ) ) # Removing duplicates.
# Checking the test_target option set for the component or not
if isinstance( self.componentDictionary, dict ):
for component in self.componentDictionary.keys():
- if 'test_target' in self.componentDictionary[component].keys():
+ if 'test_target' in self.componentDictionary[ component ].keys():
self.test_target = component
# Checking for the openspeak file and test script
@@ -136,12 +136,12 @@
components_connect_order = {}
if isinstance( self.componentDictionary, dict ):
for component in self.componentDictionary.keys():
- if 'connect_order' not in self.componentDictionary[component].keys():
- self.componentDictionary[component]['connect_order'] = str( self.get_random() )
- components_connect_order[component] = eval( self.componentDictionary[component]['connect_order'] )
+ if 'connect_order' not in self.componentDictionary[ component ].keys():
+ self.componentDictionary[ component ][ 'connect_order' ] = str( self.get_random() )
+ components_connect_order[ component ] = eval( self.componentDictionary[ component ][ 'connect_order' ] )
# Ordering components based on the connect order.
ordered_component_list = sorted( components_connect_order,
- key=lambda key: components_connect_order[key] )
+ key=lambda key: components_connect_order[ key ] )
print ordered_component_list
for component in ordered_component_list:
self.componentInit( component )
@@ -169,40 +169,37 @@
self.initiated = False
self.log.info( "Creating component Handle: " + component )
driver_options = {}
- if 'COMPONENTS' in self.componentDictionary[component].keys():
- driver_options = dict( self.componentDictionary[component]['COMPONENTS'] )
- driver_options['name'] = component
- driverName = self.componentDictionary[component]['type']
- driver_options['type'] = driverName
+ if 'COMPONENTS' in self.componentDictionary[ component ].keys():
+ driver_options = dict( self.componentDictionary[ component ][ 'COMPONENTS' ] )
+ driver_options[ 'name' ] = component
+ driverName = self.componentDictionary[ component ][ 'type' ]
+ driver_options[ 'type' ] = driverName
classPath = self.getDriverPath( driverName.lower() )
driverModule = importlib.import_module( classPath )
driverClass = getattr( driverModule, driverName )
driverObject = driverClass()
- if "OCN" in self.componentDictionary[component]['host'] and\
+ if "OCN" in self.componentDictionary[ component ][ 'host' ] and\
main.onoscell:
- self.componentDictionary[component]['host'] = main.mnIP
+ self.componentDictionary[ component ][ 'host' ] = main.mnIP
- user_name = self.componentDictionary[component].get( 'user',
- getpass.getuser() )
- ip_address = self.componentDictionary[component].get( 'host',
- 'localhost' )
- pwd = self.componentDictionary[component].get( 'password',
- 'changeme' )
- port = self.componentDictionary[component].get( 'port' )
+ user_name = self.componentDictionary[ component ].get( 'user', getpass.getuser() )
+ ip_address = self.componentDictionary[ component ].get( 'host', 'localhost' )
+ pwd = self.componentDictionary[ component ].get( 'password', 'changeme' )
+ port = self.componentDictionary[ component ].get( 'port' )
connect_result = driverObject.connect( user_name=user_name,
ip_address=ip_address,
pwd=pwd,
port=port,
- options=driver_options)
+ options=driver_options )
if not connect_result:
self.log.error( "Exiting from the test execution because connecting to the " +
component + " component failed." )
self.exit()
- vars( self )[component] = driverObject
+ vars( self )[ component ] = driverObject
self.initiated = True
return driverObject
@@ -270,12 +267,12 @@
self.CASERESULT = self.NORESULT
stopped = False
try:
- self.code[self.testCaseNumber]
+ self.code[ self.testCaseNumber ]
except KeyError:
self.log.error( "There is no Test-Case " + self.testCaseNumber )
return self.FALSE
self.stepCount = 0
- while self.stepCount < len( self.code[self.testCaseNumber].keys() ):
+ while self.stepCount < len( self.code[ self.testCaseNumber ].keys() ):
result = self.runStep( self.code, self.testCaseNumber )
if result == self.FALSE:
break
@@ -299,7 +296,7 @@
self.CASERESULT = self.TRUE
else:
self.CASERESULT = self.NORESULT
- self.testCaseResult[str( self.CurrentTestCaseNumber )] = self.CASERESULT
+ self.testCaseResult[ str( self.CurrentTestCaseNumber ) ] = self.CASERESULT
self.organizeResult( self.CurrentTestCaseNumber, self.CASERESULT )
self.logger.updateCaseResults( self )
self.log.wiki( "<p>" + self.caseExplanation + "</p>" )
@@ -350,7 +347,7 @@
# NOTE: This is needed to catch results of main.step()'s
# called inside functions or loops
self.stepResults = ( [], [], [], [] )
- exec code[testCaseNumber][step] in module.__dict__
+ exec code[ testCaseNumber ][ step ] in module.__dict__
self.stepCount = self.stepCount + 1
self.parseStepResults( testCaseNumber )
except SkipCase: # Raised in self.skipCase()
@@ -363,7 +360,7 @@
return self.FALSE
except StandardError as e:
try:
- stepNo = self.stepResults[0][ self.stepNumber - 1 ]
+ stepNo = self.stepResults[ 0 ][ self.stepNumber - 1 ]
except IndexError:
stepNo = "<IndexError>"
main.log.warn( "Error trying to get step number. " +
@@ -371,7 +368,7 @@
str( self.stepNumber ) + " and step " +
str( self.stepNumber + 1 ) )
try:
- stepName = self.stepResults[1][ self.stepNumber - 1 ]
+ stepName = self.stepResults[ 1 ][ self.stepNumber - 1 ]
except IndexError:
stepName = "<IndexError>"
self.log.error( "\nException in the following section of" +
@@ -401,7 +398,7 @@
if cli.stop:
cli.stop = False
self.TOTAL_TC_NORESULT = self.TOTAL_TC_NORESULT + 1
- self.testCaseResult[str( self.CurrentTestCaseNumber )] = "Stopped"
+ self.testCaseResult[ str( self.CurrentTestCaseNumber ) ] = "Stopped"
self.logger.updateCaseResults( self )
result = self.cleanup()
return self.FALSE
@@ -412,12 +409,12 @@
"""
try:
# Iterate through each of the steps and print them
- for index in range( len( self.stepResults[0] ) ):
+ for index in range( len( self.stepResults[ 0 ] ) ):
# stepResults = ( stepNo, stepName, stepResult, onFail )
- stepNo = self.stepResults[0][ index ]
- stepName = self.stepResults[1][ index ]
- stepResult = self.stepResults[2][ index ]
- onFail = self.stepResults[3][ index ]
+ stepNo = self.stepResults[ 0 ][ index ]
+ stepName = self.stepResults[ 1 ][ index ]
+ stepResult = self.stepResults[ 2 ][ index ]
+ onFail = self.stepResults[ 3 ][ index ]
self.stepCache += "\t" + str( testCaseNumber ) + "."
self.stepCache += str( stepNo ) + " "
self.stepCache += stepName + " - "
@@ -454,27 +451,27 @@
raise SkipCase
def addCaseHeader( self ):
- caseHeader = "\n" + "*" * 30 + "\n Result summary for Testcase" +\
+ caseHeader = "\n" + "*" * 30 + "\n Result summary for Testcase" + \
str( self.CurrentTestCaseNumber ) + "\n" + "*" * 30 + "\n"
self.log.exact( caseHeader )
- caseHeader = "\n" + "*" * 40 + "\nStart of Test Case" +\
+ caseHeader = "\n" + "*" * 40 + "\nStart of Test Case" + \
str( self.CurrentTestCaseNumber ) + " : "
for driver in self.componentDictionary.keys():
- vars( self )[driver + 'log'].info( caseHeader )
+ vars( self )[ driver + 'log' ].info( caseHeader )
def addCaseFooter( self ):
- stepNo = self.stepResults[0][-2]
+ stepNo = self.stepResults[ 0 ][ -2 ]
if stepNo > 0:
- previousStep = " " + str( self.CurrentTestCaseNumber ) + "." +\
+ previousStep = " " + str( self.CurrentTestCaseNumber ) + "." + \
str( stepNo ) + ": " + str( self.stepName )
- stepHeader = "\n" + "*" * 40 + "\nEnd of Step " + previousStep +\
+ stepHeader = "\n" + "*" * 40 + "\nEnd of Step " + previousStep + \
"\n" + "*" * 40 + "\n"
- caseFooter = "\n" + "*" * 40 + "\nEnd of Test case " +\
+ caseFooter = "\n" + "*" * 40 + "\nEnd of Test case " + \
str( self.CurrentTestCaseNumber ) + "\n" + "*" * 40 + "\n"
for driver in self.driversList:
- vars( self )[driver].write( stepHeader + "\n" + caseFooter )
+ vars( self )[ driver ].write( stepHeader + "\n" + caseFooter )
def cleanup( self ):
'''
@@ -497,12 +494,12 @@
self.logger.testSummary( self )
components = self.componentDictionary
for component in sorted( components,
- key=lambda item: components[item]['connect_order'],
+ key=lambda item: components[ item ][ 'connect_order' ],
reverse=True ):
try:
- tempObject = vars( self )[component]
- print "Disconnecting from " + str( tempObject.name ) +\
- ": " + str( tempObject.__class__)
+ tempObject = vars( self )[ component ]
+ print "Disconnecting from " + str( tempObject.name ) + \
+ ": " + str( tempObject.__class__ )
tempObject.disconnect()
except KeyboardInterrupt:
pass
@@ -517,7 +514,7 @@
# Closing all the driver's session files
for driver in self.componentDictionary.keys():
try:
- vars( self )[driver].close_log_handles()
+ vars( self )[ driver ].close_log_handles()
except KeyboardInterrupt:
pass
except KeyError:
@@ -556,7 +553,7 @@
if not components:
try:
for component in self.componentDictionary.keys():
- tempObject = vars( self )[component]
+ tempObject = vars( self )[ component ]
result = tempObject.onfail()
except StandardError as e:
print str( e )
@@ -564,7 +561,7 @@
else:
try:
for component in components:
- tempObject = vars( self )[component]
+ tempObject = vars( self )[ component ]
result = tempObject.onfail()
except StandardError as e:
print str( e )
@@ -605,25 +602,25 @@
'''
The step information of the test-case will append to the logs.
'''
- previousStep = " " + str( self.CurrentTestCaseNumber ) + "." +\
+ previousStep = " " + str( self.CurrentTestCaseNumber ) + "." + \
str( self.stepNumber ) + ": " + str( self.stepName )
self.stepName = stepDesc
self.stepNumber += 1
- self.stepResults[0].append( self.stepNumber )
- self.stepResults[1].append( stepDesc )
- self.stepResults[2].append( self.NORESULT )
- self.stepResults[3].append( "No on fail message given" )
+ self.stepResults[ 0 ].append( self.stepNumber )
+ self.stepResults[ 1 ].append( stepDesc )
+ self.stepResults[ 2 ].append( self.NORESULT )
+ self.stepResults[ 3 ].append( "No on fail message given" )
- stepName = " " + str( self.CurrentTestCaseNumber ) + "." +\
+ stepName = " " + str( self.CurrentTestCaseNumber ) + "." + \
str( self.stepNumber ) + ": " + str( stepDesc )
- self.log.step(stepName)
+ self.log.step( stepName )
stepHeader = ""
line = "\n" + "-" * 45 + "\n"
if self.stepNumber > 1:
stepHeader = line + "End of Step " + previousStep + line
stepHeader += line + "Start of Step" + stepName + line
for driver in self.componentDictionary.keys():
- vars( self )[driver + 'log'].info( stepHeader )
+ vars( self )[ driver + 'log' ].info( stepHeader )
def case( self, testCaseName ):
'''
@@ -634,7 +631,7 @@
self.log.case( testCaseName )
caseHeader = testCaseName + "\n" + "*" * 40 + "\n"
for driver in self.componentDictionary.keys():
- vars( self )[driver + 'log'].info( caseHeader )
+ vars( self )[ driver + 'log' ].info( caseHeader )
def testDesc( self, description ):
'''
@@ -654,7 +651,7 @@
counter = 0
for index in range( len( testFileList ) ):
lineMatch = re.match( '\s+def CASE(\d+)(.*):',
- testFileList[index],
+ testFileList[ index ],
0 )
if lineMatch:
counter = counter + 1
@@ -721,7 +718,7 @@
table format'''
table_data = ""
if isinstance( value_to_convert, dict ):
- table_data = table_data + '\t'.join( value_to_convert ) +\
+ table_data = table_data + '\t'.join( value_to_convert ) + \
"\n"
for temp_val in value_to_convert.values():
table_data = table_data + get_table( temp_val )
@@ -770,7 +767,7 @@
# NOTE: We should catch any exceptions while trying to
# close the thread so that we can try to close the other
# threads as well
- print str( thread.getName() ) +\
+ print str( thread.getName() ) + \
' could not be terminated'
os.system( "stty sane" ) # fix format if necessary
sys.exit()
@@ -859,16 +856,16 @@
# Mail-To: field
if options.mail: # Test run specific
main.mail = options.mail
- elif main.params.get('mail'): # Test suite specific
+ elif main.params.get( 'mail' ): # Test suite specific
main.mail = main.params.get( 'mail' )
else: # TestON specific
- main.mail = main.config['config'].get( 'mail_to' )
+ main.mail = main.config[ 'config' ].get( 'mail_to' )
# Mail-From: field
- main.sender = main.config['config'].get( 'mail_from' )
+ main.sender = main.config[ 'config' ].get( 'mail_from' )
# Mail smtp server
- main.smtp = main.config['config'].get( 'mail_server' )
+ main.smtp = main.config[ 'config' ].get( 'mail_server' )
# Mail-From account password
- main.senderPwd = main.config['config'].get( 'mail_pass' )
+ main.senderPwd = main.config[ 'config' ].get( 'mail_pass' )
def evalTestCase( tempList ):
tList = []
@@ -876,7 +873,7 @@
if isinstance( tcase, list ):
tList.extend( evalTestCase( tcase ) )
else:
- tList.extend( [tcase] )
+ tList.extend( [ tcase ] )
return tList
def verifyTestCases( options ):
@@ -888,10 +885,10 @@
main.testcases_list = eval( testcases_list + "," )
else:
if 'testcases' in main.params.keys():
- temp = eval( main.params['testcases'] + "," )
+ temp = eval( main.params[ 'testcases' ] + "," )
main.testcases_list = evalTestCase( list( temp ) )
else:
- print "Testcases not specifed in params, please provide in " +\
+ print "Testcases not specifed in params, please provide in " + \
"params file or 'testcases' commandline argument"
sys.exit()
@@ -902,20 +899,20 @@
main.ONOSip = []
main.mnIP = ""
cellCMD = ". ~/onos/tools/dev/bash_profile; cell " + main.onoscell
- output = subprocess.check_output( ["bash", '-c', cellCMD] )
+ output = subprocess.check_output( [ "bash", '-c', cellCMD ] )
splitOutput = output.splitlines()
main.apps = ""
for i in range( len( splitOutput ) ):
- if re.match( "OCN", splitOutput[i] ):
- mnNode = splitOutput[i].split( "=" )
- main.mnIP = mnNode[1]
+ if re.match( "OCN", splitOutput[ i ] ):
+ mnNode = splitOutput[ i ].split( "=" )
+ main.mnIP = mnNode[ 1 ]
# cell already sorts OC variables in bash, so no need to
# sort in TestON
- elif re.match( "OC[1-9]", splitOutput[i] ):
- onosNodes = splitOutput[i].split( "=" )
- main.ONOSip.append( onosNodes[1] )
- elif re.match( "ONOS_APPS", splitOutput[i] ):
- main.apps = ( splitOutput[i].split( "=" ) )[1]
+ elif re.match( "OC[1-9]", splitOutput[ i ] ):
+ onosNodes = splitOutput[ i ].split( "=" )
+ main.ONOSip.append( onosNodes[ 1 ] )
+ elif re.match( "ONOS_APPS", splitOutput[ i ] ):
+ main.apps = ( splitOutput[ i ].split( "=" ) )[ 1 ]
else:
main.onoscell = main.FALSE
@@ -929,13 +926,13 @@
pass
else:
directory = ""
- for root, dirs, files in os.walk( main.testDir, topdown=True):
+ for root, dirs, files in os.walk( main.testDir, topdown=True ):
if not directory:
for name in dirs:
if name == main.TEST:
directory = ( os.path.join( root, name ) )
index = directory.find( "/tests/" ) + 1
- main.classPath = directory[index:].replace( '/', '.' ) + "." + main.TEST
+ main.classPath = directory[ index: ].replace( '/', '.' ) + "." + main.TEST
break
openspeakfile = directory + "/" + main.TEST + ".ospk"
main.testFile = directory + "/" + main.TEST + ".py"
@@ -946,8 +943,8 @@
# No openspeak found, using python file instead
pass
else:
- print "\nThere is no \"" + main.TEST + "\" test script.\nPlease provide a " +\
- "Python or OpenSpeak test script in the tests folder: " +\
+ print "\nThere is no \"" + main.TEST + "\" test script.\nPlease provide a " + \
+ "Python or OpenSpeak test script in the tests folder: " + \
main.testDir + "/" + main.TEST + "/"
__builtin__.testthread = None
main.exit()
@@ -955,10 +952,10 @@
testModule = __import__( main.classPath,
globals(),
locals(),
- [main.TEST],
+ [ main.TEST ],
-1 )
except ImportError:
- print "There was an import error, it might mean that there is " +\
+ print "There was an import error, it might mean that there is " + \
"no test named " + main.TEST
main.exit()
@@ -970,19 +967,19 @@
def verifyParams( options ):
try:
- main.params = main.params['PARAMS']
+ main.params = main.params[ 'PARAMS' ]
except KeyError:
- print "Error with the params file: Either the file not specified " +\
+ print "Error with the params file: Either the file not specified " + \
"or the format is not correct"
main.exit()
try:
- main.topology = main.topology['TOPOLOGY']
+ main.topology = main.topology[ 'TOPOLOGY' ]
except KeyError:
- print "Error with the Topology file: Either the file not specified " +\
+ print "Error with the Topology file: Either the file not specified " + \
"or the format is not correct"
main.exit()
# Overwrite existing params variables if they are specified from command line
- if len(options.params) > 0:
+ if len( options.params ) > 0:
# Some params variables are specified from command line
for param in options.params:
if not re.search( ".=.", param ):
@@ -997,7 +994,7 @@
# Get the innermost dictionary
try:
while len( keyList ) > 1:
- key = keyList.pop(0)
+ key = keyList.pop( 0 )
assert isinstance( paramDict[ key ], dict )
paramDict = paramDict[ key ]
except KeyError:
@@ -1007,14 +1004,14 @@
print( "Error when parsing params: \"" + key + "\" is already the innermost level in main.params" )
main.exit()
# Change the value
- if not paramDict.has_key( keyList[0] ):
- print( "Error when parsing params: key \"" + keyList[0] + "\" not found in main.params" )
+ if keyList[ 0 ] not in paramDict:
+ print( "Error when parsing params: key \"" + keyList[ 0 ] + "\" not found in main.params" )
main.exit()
- elif isinstance( paramDict[ keyList[0] ], dict ):
- print( "Error when parsing params: more levels under key \"" + keyList[0] + "\" in main.params" )
+ elif isinstance( paramDict[ keyList[ 0 ] ], dict ):
+ print( "Error when parsing params: more levels under key \"" + keyList[ 0 ] + "\" in main.params" )
main.exit()
else:
- paramDict[ keyList[0] ] = value
+ paramDict[ keyList[ 0 ] ] = value
def load_parser():
'''
@@ -1025,20 +1022,20 @@
'''
confighash = main.configDict
- if 'file' in confighash['config']['parser'] and\
- 'class' in confighash['config']['parser']:
- path = confighash['config']['parser']['file']
+ if 'file' in confighash[ 'config' ][ 'parser' ] and\
+ 'class' in confighash[ 'config' ][ 'parser' ]:
+ path = confighash[ 'config' ][ 'parser' ][ 'file' ]
if path is not None or\
- confighash['config']['parser']['class'] is not None:
+ confighash[ 'config' ][ 'parser' ][ 'class' ] is not None:
try:
module = re.sub( r".py\s*$", "", path )
- moduleList = module.split("/")
- newModule = ".".join( moduleList[-2:] )
- parsingClass = confighash['config']['parser']['class']
+ moduleList = module.split( "/" )
+ newModule = ".".join( moduleList[ -2: ] )
+ parsingClass = confighash[ 'config' ][ 'parser' ][ 'class' ]
parsingModule = __import__( newModule,
globals(),
locals(),
- [parsingClass],
+ [ parsingClass ],
-1 )
parsingClass = getattr( parsingModule, parsingClass )
main.parser = parsingClass()
@@ -1050,11 +1047,11 @@
print "Invalid parser format"
main.exit()
except ImportError:
- print "Could not find the file " + path +\
+ print "Could not find the file " + path + \
" using default parser."
load_defaultParser()
- elif confighash['config']['parser']['file'] is None or\
- confighash['config']['parser']['class'] is None:
+ elif confighash[ 'config' ][ 'parser' ][ 'file' ] is None or\
+ confighash[ 'config' ][ 'parser' ][ 'class' ] is None:
load_defaultParser()
else:
load_defaultParser()
@@ -1064,14 +1061,14 @@
It will load the default parser which is xml parser to parse the params and
topology file.
'''
- moduleList = main.parserPath.split("/")
- newModule = ".".join( moduleList[-2:] )
+ moduleList = main.parserPath.split( "/" )
+ newModule = ".".join( moduleList[ -2: ] )
try:
parsingClass = main.parsingClass
parsingModule = __import__( newModule,
globals(),
locals(),
- [parsingClass],
+ [ parsingClass ],
-1 )
parsingClass = getattr( parsingModule, parsingClass )
main.parser = parsingClass()
@@ -1082,7 +1079,7 @@
else:
main.exit()
except ImportError:
- print sys.exc_info()[1]
+ print sys.exc_info()[ 1 ]
def load_logger():
'''
@@ -1092,29 +1089,29 @@
file.
'''
confighash = main.configDict
- if 'file' in confighash['config']['logger'] and\
- 'class' in confighash['config']['logger']:
- path = confighash['config']['logger']['file']
+ if 'file' in confighash[ 'config' ][ 'logger' ] and\
+ 'class' in confighash[ 'config' ][ 'logger' ]:
+ path = confighash[ 'config' ][ 'logger' ][ 'file' ]
if path is not None or\
- confighash['config']['logger']['class'] is not None:
+ confighash[ 'config' ][ 'logger' ][ 'class' ] is not None:
try:
module = re.sub( r".py\s*$", "", path )
moduleList = module.split( "/" )
- newModule = ".".join( moduleList[-2:] )
- loggerClass = confighash['config']['logger']['class']
+ newModule = ".".join( moduleList[ -2: ] )
+ loggerClass = confighash[ 'config' ][ 'logger' ][ 'class' ]
loggerModule = __import__( newModule,
globals(),
locals(),
- [loggerClass],
+ [ loggerClass ],
-1 )
loggerClass = getattr( loggerModule, loggerClass )
main.logger = loggerClass()
except ImportError:
- print "Could not find the file " + path +\
+ print "Could not find the file " + path + \
" using default logger."
load_defaultlogger()
- elif confighash['config']['parser']['file'] is None or\
- confighash['config']['parser']['class'] is None:
+ elif confighash[ 'config' ][ 'parser' ][ 'file' ] is None or\
+ confighash[ 'config' ][ 'parser' ][ 'class' ] is None:
load_defaultlogger()
else:
load_defaultlogger()
@@ -1124,20 +1121,20 @@
It will load the default parser which is xml parser to parse the params and
topology file.
'''
- moduleList = main.loggerPath.split("/")
- newModule = ".".join( moduleList[-2:] )
+ moduleList = main.loggerPath.split( "/" )
+ newModule = ".".join( moduleList[ -2: ] )
try:
loggerClass = main.loggerClass
loggerModule = __import__( newModule,
globals(),
locals(),
- [loggerClass],
+ [ loggerClass ],
-1 )
loggerClass = getattr( loggerModule, loggerClass )
main.logger = loggerClass()
except ImportError:
- print sys.exc_info()[1]
+ print sys.exc_info()[ 1 ]
main.exit()
def _echo( self ):
diff --git a/TestON/core/testparser.py b/TestON/core/testparser.py
index 904ebc0..1dea300 100644
--- a/TestON/core/testparser.py
+++ b/TestON/core/testparser.py
@@ -25,12 +25,12 @@
import re
import sys
class TestParser:
- def __init__(self,testFile):
- try :
- testFileHandler = open(testFile, 'r')
+ def __init__( self, testFile ):
+ try:
+ testFileHandler = open( testFile, 'r' )
except IOError:
- print "No such file "+testFile
- sys.exit(0)
+ print "No such file " + testFile
+ sys.exit( 0 )
testFileList = testFileHandler.readlines()
self.testscript = testFileList
@@ -39,89 +39,89 @@
self.statementsList = []
index = 0
self.statementsList = []
- #initialSpaces = len(line) -len(line.lstrip())
- while index < len(testFileList):
- testFileList[index] = re.sub("^\s{8}|^\s{4}", "", testFileList[index])
+ # initialSpaces = len(line) -len(line.lstrip())
+ while index < len( testFileList ):
+ testFileList[ index ] = re.sub( "^\s{8}|^\s{4}", "", testFileList[ index ] )
# Skip multiline comments
- if re.match('^(\'\'\')|^(\"\"\")',testFileList[index],0) :
+ if re.match( '^(\'\'\')|^(\"\"\")', testFileList[ index ], 0 ):
index = index + 1
- try :
- while not re.match('^\s*(\'\'\')|^\s*(\"\"\")',testFileList[index],0) :
+ try:
+ while not re.match( '^\s*(\'\'\')|^\s*(\"\"\")', testFileList[ index ], 0 ):
index = index + 1
except IndexError:
print ''
# skip empty lines and single line comments
- elif not re.match('#|^\s*$',testFileList[index],0):
- self.statementsList.append(testFileList[index])
+ elif not re.match( '#|^\s*$', testFileList[ index ], 0 ):
+ self.statementsList.append( testFileList[ index ] )
index = index + 1
- def case_code(self):
+ def case_code( self ):
index = 0
statementsList = self.statementsList
- while index < len(statementsList):
- m= re.match('def\s+CASE(\d+)',statementsList[index],0)
+ while index < len( statementsList ):
+ m = re.match( 'def\s+CASE(\d+)', statementsList[ index ], 0 )
self.caseBlock = []
if m:
index = index + 1
- try :
- while not re.match('\s*def\s+CASE(\d+)',statementsList[index],0) :
- self.caseBlock.append(statementsList[index])
- if index < len(statementsList)-1:
+ try:
+ while not re.match( '\s*def\s+CASE(\d+)', statementsList[ index ], 0 ):
+ self.caseBlock.append( statementsList[ index ] )
+ if index < len( statementsList )-1:
index = index + 1
- else :
+ else:
break
index = index - 1
except IndexError:
print ''
- self.caseCode [str(m.group(1))] = self.caseBlock
+ self.caseCode[ str( m.group( 1 ) ) ] = self.caseBlock
index = index + 1
return self.caseCode
- def step_code(self,caseStatements):
+ def step_code( self, caseStatements ):
index = 0
step = 0
stepCode = {}
step_flag = False
- while index < len(caseStatements):
- m= re.match('main\.step',caseStatements[index],0)
+ while index < len( caseStatements ):
+ m = re.match( 'main\.step', caseStatements[ index ], 0 )
stepBlock = ''
if m:
step_flag = True
- if step == 0 :
+ if step == 0:
i = 0
block = ''
- while i < index :
- block += caseStatements[i]
+ while i < index:
+ block += caseStatements[ i ]
i = i + 1
- stepCode[step] = block
+ stepCode[ step ] = block
step = step + 1
- stepBlock = stepBlock + caseStatements[index]
+ stepBlock = stepBlock + caseStatements[ index ]
index = index + 1
- try :
- while not re.match('main\.step',caseStatements[index],0) :
- stepBlock = stepBlock + caseStatements[index]
- if index < len(caseStatements)-1:
+ try:
+ while not re.match( 'main\.step', caseStatements[ index ], 0 ):
+ stepBlock = stepBlock + caseStatements[ index ]
+ if index < len( caseStatements )-1:
index = index + 1
- else :
+ else:
break
index = index - 1
except IndexError:
print ''
- stepCode[step] = stepBlock
+ stepCode[ step ] = stepBlock
step = step + 1
index = index + 1
# If there is no step defined !!
- if not step_flag :
- stepCode[step] = "".join(caseStatements)
+ if not step_flag:
+ stepCode[ step ] = "".join( caseStatements )
return stepCode
- def getStepCode(self):
+ def getStepCode( self ):
case_step_code = {}
case_block = self.case_code()
- for case in case_block :
- case_step_code[case] = {}
- step_block = self.step_code(case_block[case])
- for step in step_block :
- case_step_code[case][step] = step_block[step]
+ for case in case_block:
+ case_step_code[ case ] = {}
+ step_block = self.step_code( case_block[ case ] )
+ for step in step_block:
+ case_step_code[ case ][ step ] = step_block[ step ]
return case_step_code
diff --git a/TestON/core/utilities.py b/TestON/core/utilities.py
index 2f7e5bb..91fc105 100644
--- a/TestON/core/utilities.py
+++ b/TestON/core/utilities.py
@@ -46,47 +46,47 @@
* Parsing the params or topology file.
'''
- def __init__(self):
- self.wrapped = sys.modules[__name__]
+ def __init__( self ):
+ self.wrapped = sys.modules[ __name__ ]
- def __getattr__(self, name):
+ def __getattr__( self, name ):
'''
This will invoke, if the attribute wasn't found the usual ways.
Here it will look for assert_attribute and will execute when AttributeError occurs.
It will return the result of the assert_attribute.
'''
try:
- return getattr(self.wrapped, name)
+ return getattr( self.wrapped, name )
except AttributeError:
- def assertHandling(**kwargs):
- nameVar = re.match("^assert",name,flags=0)
- matchVar = re.match("assert(_not_|_)(equals|matches|greater|lesser)",name,flags=0)
+ def assertHandling( **kwargs ):
+ nameVar = re.match( "^assert", name, flags=0 )
+ matchVar = re.match( "assert(_not_|_)(equals|matches|greater|lesser)", name, flags=0 )
notVar = 0
operators = ""
- try :
- if matchVar.group(1) == "_not_" and matchVar.group(2) :
+ try:
+ if matchVar.group( 1 ) == "_not_" and matchVar.group( 2 ):
notVar = 1
- operators = matchVar.group(2)
- elif matchVar.group(1) == "_" and matchVar.group(2):
- operators = matchVar.group(2)
+ operators = matchVar.group( 2 )
+ elif matchVar.group( 1 ) == "_" and matchVar.group( 2 ):
+ operators = matchVar.group( 2 )
except AttributeError:
- if matchVar==None and nameVar:
- operators ='equals'
- result = self._assert(NOT=notVar,operator=operators,**kwargs)
+ if matchVar is None and nameVar:
+ operators = 'equals'
+ result = self._assert( NOT=notVar, operator=operators, **kwargs )
if result == main.TRUE:
- main.log.info("Assertion Passed")
+ main.log.info( "Assertion Passed" )
main.STEPRESULT = main.TRUE
elif result == main.FALSE:
- main.log.warn("Assertion Failed")
+ main.log.warn( "Assertion Failed" )
main.STEPRESULT = main.FALSE
else:
- main.log.error("There is an Error in Assertion")
+ main.log.error( "There is an Error in Assertion" )
main.STEPRESULT = main.ERROR
return result
return assertHandling
- def _assert (self,**assertParam):
+ def _assert( self, **assertParam ):
'''
It will take the arguments :
expect:'Expected output'
@@ -100,28 +100,28 @@
'''
- arguments = self.parse_args(["EXPECT","ACTUAL","ONPASS","ONFAIL","NOT","OPERATOR"],**assertParam)
+ arguments = self.parse_args( [ "EXPECT", "ACTUAL", "ONPASS", "ONFAIL", "NOT", "OPERATOR" ], **assertParam )
result = 0
valuetype = ''
- operation = "not "+ str(arguments["OPERATOR"]) if arguments['NOT'] and arguments['NOT'] == 1 else arguments["OPERATOR"]
- operators = {'equals':{'STR':'==','NUM':'=='}, 'matches' : '=~', 'greater':'>' ,'lesser':'<'}
+ operation = "not " + str( arguments[ "OPERATOR" ] ) if arguments[ 'NOT' ] and arguments[ 'NOT' ] == 1 else arguments[ "OPERATOR" ]
+ operators = { 'equals': { 'STR': '==', 'NUM': '==' }, 'matches': '=~', 'greater': '>' , 'lesser': '<' }
- expectMatch = re.match('^\s*[+-]?0(e0)?\s*$', str(arguments["EXPECT"]), re.I+re.M)
- if not ((not expectMatch) and (arguments["EXPECT"]==0)):
+ expectMatch = re.match( '^\s*[+-]?0(e0)?\s*$', str( arguments[ "EXPECT" ] ), re.I + re.M )
+ if not( ( not expectMatch ) and ( arguments[ "EXPECT" ] == 0 ) ):
valuetype = 'NUM'
- else :
- if arguments["OPERATOR"] == 'greater' or arguments["OPERATOR"] == 'lesser':
- main.log.error("Numeric comparison on strings is not possibele")
+ else:
+ if arguments[ "OPERATOR" ] == 'greater' or arguments[ "OPERATOR" ] == 'lesser':
+ main.log.error( "Numeric comparison on strings is not possibele" )
return main.ERROR
valuetype = 'STR'
- arguments["ACTUAL"] = str(arguments["ACTUAL"])
- if arguments["OPERATOR"] != 'matches':
- arguments["EXPECT"] = str(arguments["EXPECT"])
+ arguments[ "ACTUAL" ] = str( arguments[ "ACTUAL" ] )
+ if arguments[ "OPERATOR" ] != 'matches':
+ arguments[ "EXPECT" ] = str( arguments[ "EXPECT" ] )
- try :
- opcode = operators[str(arguments["OPERATOR"])][valuetype] if arguments["OPERATOR"] == 'equals' else operators[str(arguments["OPERATOR"])]
+ try:
+ opcode = operators[ str( arguments[ "OPERATOR" ] ) ][ valuetype ] if arguments[ "OPERATOR" ] == 'equals' else operators[ str( arguments[ "OPERATOR" ] ) ]
except KeyError as e:
print "Key Error in assertion"
@@ -130,111 +130,111 @@
if opcode == '=~':
try:
- assert re.search(str(arguments["EXPECT"]),str(arguments["ACTUAL"]))
+ assert re.search( str( arguments[ "EXPECT" ] ), str( arguments[ "ACTUAL" ] ) )
result = main.TRUE
except AssertionError:
- try :
- assert re.match(str(arguments["EXPECT"]),str(arguments["ACTUAL"]))
+ try:
+ assert re.match( str( arguments[ "EXPECT" ] ), str( arguments[ "ACTUAL" ] ) )
result = main.TRUE
except AssertionError:
- main.log.error("Assertion Failed")
+ main.log.error( "Assertion Failed" )
result = main.FALSE
- else :
+ else:
try:
- if str(opcode)=="==":
- main.log.info("Verifying the Expected is equal to the actual or not using assert_equal")
- if (arguments["EXPECT"] == arguments["ACTUAL"]):
+ if str( opcode ) == "==":
+ main.log.info( "Verifying the Expected is equal to the actual or not using assert_equal" )
+ if( arguments[ "EXPECT" ] == arguments[ "ACTUAL" ] ):
result = main.TRUE
- else :
+ else:
result = main.FALSE
- elif str(opcode) == ">":
- main.log.info("Verifying the Expected is Greater than the actual or not using assert_greater")
- if (ast.literal_eval(arguments["EXPECT"]) > ast.literal_eval(arguments["ACTUAL"])) :
+ elif str( opcode ) == ">":
+ main.log.info( "Verifying the Expected is Greater than the actual or not using assert_greater" )
+ if( ast.literal_eval( arguments[ "EXPECT" ] ) > ast.literal_eval( arguments[ "ACTUAL" ] ) ):
result = main.TRUE
- else :
+ else:
result = main.FALSE
- elif str(opcode) == "<":
- main.log.info("Verifying the Expected is Lesser than the actual or not using assert_lesser")
- if (ast.literal_eval(arguments["EXPECT"]) < ast.literal_eval(arguments["ACTUAL"])):
+ elif str( opcode ) == "<":
+ main.log.info( "Verifying the Expected is Lesser than the actual or not using assert_lesser" )
+ if( ast.literal_eval( arguments[ "EXPECT" ] ) < ast.literal_eval( arguments[ "ACTUAL" ] ) ):
result = main.TRUE
- else :
+ else:
result = main.FALSE
except AssertionError:
- main.log.error("Assertion Failed")
+ main.log.error( "Assertion Failed" )
result = main.FALSE
result = result if result else 0
- result = not result if arguments["NOT"] and arguments["NOT"] == 1 else result
+ result = not result if arguments[ "NOT" ] and arguments[ "NOT" ] == 1 else result
resultString = ""
- if result :
- resultString = str(resultString) + "PASS"
- main.log.info(arguments["ONPASS"])
- else :
- resultString = str(resultString) + "FAIL"
- if not isinstance(arguments["ONFAIL"],str):
- eval(str(arguments["ONFAIL"]))
- else :
- main.log.error(arguments["ONFAIL"])
- main.log.report(arguments["ONFAIL"])
+ if result:
+ resultString = str( resultString ) + "PASS"
+ main.log.info( arguments[ "ONPASS" ] )
+ else:
+ resultString = str( resultString ) + "FAIL"
+ if not isinstance( arguments[ "ONFAIL" ], str ):
+ eval( str( arguments[ "ONFAIL" ] ) )
+ else:
+ main.log.error( arguments[ "ONFAIL" ] )
+ main.log.report( arguments[ "ONFAIL" ] )
main.onFailMsg = arguments[ 'ONFAIL' ]
- msg = arguments["ON" + str(resultString)]
+ msg = arguments[ "ON" + str( resultString ) ]
- if not isinstance(msg,str):
+ if not isinstance( msg, str ):
try:
- eval(str(msg))
+ eval( str( msg ) )
except SyntaxError as e:
print "function definition is not right"
print e
main.last_result = result
- if main.stepResults[2]:
- main.stepResults[2][-1] = result
+ if main.stepResults[ 2 ]:
+ main.stepResults[ 2 ][ -1 ] = result
try:
- main.stepResults[3][-1] = arguments[ 'ONFAIL' ]
+ main.stepResults[ 3 ][ -1 ] = arguments[ 'ONFAIL' ]
except AttributeError:
pass
else:
main.log.warn( "Assertion called before a test step" )
return result
- def parse_args(self,args, **kwargs):
+ def parse_args( self, args, **kwargs ):
'''
It will accept the (key,value) pair and will return the (key,value) pairs with keys in uppercase.
'''
newArgs = {}
- for key,value in kwargs.iteritems():
- if isinstance(args,list) and str.upper(key) in args:
+ for key, value in kwargs.iteritems():
+ if isinstance( args, list ) and str.upper( key ) in args:
for each in args:
- if each==str.upper(key):
- newArgs [str(each)] = value
- elif each != str.upper(key) and (newArgs.has_key(str(each)) == False ):
- newArgs[str(each)] = None
+ if each == str.upper( key ):
+ newArgs[ str( each ) ] = value
+ elif each != str.upper( key ) and str( each ) not in newArgs:
+ newArgs[ str( each ) ] = None
return newArgs
- def send_mail(self):
+ def send_mail( self ):
# Create a text/plain message
msg = email.mime.Multipart.MIMEMultipart()
- try :
+ try:
if main.test_target:
- sub = "Result summary of \"" + main.TEST + "\" run on component \"" +\
- main.test_target + "\" Version \"" +\
- vars( main )[main.test_target].get_version() + "\": " +\
+ sub = "Result summary of \"" + main.TEST + "\" run on component \"" + \
+ main.test_target + "\" Version \"" + \
+ vars( main )[ main.test_target ].get_version() + "\": " + \
str( main.TOTAL_TC_SUCCESS ) + "% Passed"
- else :
- sub = "Result summary of \"" + main.TEST + "\": " +\
+ else:
+ sub = "Result summary of \"" + main.TEST + "\": " + \
str( main.TOTAL_TC_SUCCESS ) + "% Passed"
- except ( KeyError, AttributeError ):
- sub = "Result summary of \"" + main.TEST + "\": " +\
+ except( KeyError, AttributeError ):
+ sub = "Result summary of \"" + main.TEST + "\": " + \
str( main.TOTAL_TC_SUCCESS ) + "% Passed"
- msg['Subject'] = sub
- msg['From'] = main.sender
- msg['To'] = main.mail
+ msg[ 'Subject' ] = sub
+ msg[ 'From' ] = main.sender
+ msg[ 'To' ] = main.mail
# The main body is just another attachment
body = email.mime.Text.MIMEText( main.logHeader + "\n" +
- main.testResult)
+ main.testResult )
msg.attach( body )
# Attachments
@@ -252,7 +252,7 @@
smtp = smtplib.SMTP( main.smtp )
smtp.starttls()
smtp.login( main.sender, main.senderPwd )
- smtp.sendmail( msg['From'], [msg['To']], msg.as_string() )
+ smtp.sendmail( msg[ 'From' ], [ msg[ 'To' ]], msg.as_string() )
smtp.quit()
except Exception:
main.log.exception( "Error sending email" )
@@ -265,32 +265,32 @@
# Create a text/plain message
msg = email.mime.Multipart.MIMEMultipart()
- msg['Subject'] = subject
- msg['From'] = main.sender
- msg['To'] = main.mail
+ msg[ 'Subject' ] = subject
+ msg[ 'From' ] = main.sender
+ msg[ 'To' ] = main.mail
smtp = smtplib.SMTP( main.smtp )
smtp.starttls()
smtp.login( main.sender, main.senderPwd )
- smtp.sendmail( msg['From'], [msg['To']], msg.as_string() )
+ smtp.sendmail( msg[ 'From' ], [ msg[ 'To' ]], msg.as_string() )
smtp.quit()
except Exception:
main.log.exception( "" )
return main.FALSE
return main.TRUE
- def parse(self,fileName):
+ def parse( self, fileName ):
'''
This will parse the params or topo or cfg file and return content in the file as Dictionary
'''
self.fileName = fileName
- matchFileName = re.match(r'(.*)\.(cfg|params|topo)',self.fileName,re.M|re.I)
+ matchFileName = re.match( r'(.*)\.(cfg|params|topo)', self.fileName, re.M | re.I )
if matchFileName:
- try :
- parsedInfo = ConfigObj(self.fileName)
+ try:
+ parsedInfo = ConfigObj( self.fileName )
return parsedInfo
except StandardError:
- print "There is no such file to parse "+fileName
+ print "There is no such file to parse " + fileName
else:
return 0
@@ -326,8 +326,7 @@
startTime = time.time()
for i in range( 0, attempts ):
ret = f( *args, **kwargs )
- if ret not in retValue:
- # NOTE that False in [ 0 ] == True
+ if ret not in retValue: # NOTE that False in [ 0 ] == True
break
if randomTime:
sleeptime = random.randint( 0, sleep )
@@ -350,4 +349,4 @@
if __name__ != "__main__":
import sys
- sys.modules[__name__] = Utilities()
+ sys.modules[ __name__ ] = Utilities()
diff --git a/TestON/core/xmldict.py b/TestON/core/xmldict.py
index 808b365..41bb670 100644
--- a/TestON/core/xmldict.py
+++ b/TestON/core/xmldict.py
@@ -31,141 +31,141 @@
"""
import datetime
-def xml_to_dict(root_or_str, strict=True):
+def xml_to_dict( root_or_str, strict=True ):
"""
Converts `root_or_str` which can be parsed xml or a xml string to dict.
"""
root = root_or_str
- if isinstance(root, str):
+ if isinstance( root, str ):
import xml.etree.cElementTree as ElementTree
- root = ElementTree.XML(root_or_str)
- try :
- return {root.tag: _from_xml(root, strict)}
+ root = ElementTree.XML( root_or_str )
+ try:
+ return { root.tag: _from_xml( root, strict ) }
except StandardError:
return None
-def dict_to_xml(dict_xml):
+def dict_to_xml( dict_xml ):
"""
Converts `dict_xml` which is a python dict to corresponding xml.
"""
- return _to_xml(dict_xml)
+ return _to_xml( dict_xml )
-def _to_xml(el):
+def _to_xml( el ):
"""
Converts `el` to its xml representation.
"""
val = None
- if isinstance(el, dict):
- val = _dict_to_xml(el)
- elif isinstance(el, bool):
- val = str(el).lower()
+ if isinstance( el, dict ):
+ val = _dict_to_xml( el )
+ elif isinstance( el, bool ):
+ val = str( el ).lower()
else:
val = el
- if val is None: val = 'null'
+ if val is None:
+ val = 'null'
return val
-def _extract_attrs(els):
+def _extract_attrs( els ):
"""
Extracts attributes from dictionary `els`. Attributes are keys which start
with '@'
"""
- if not isinstance(els, dict):
+ if not isinstance( els, dict ):
return ''
- return ''.join(' %s="%s"' % (key[1:], value) for key, value in els.iteritems()
- if key.startswith('@'))
+ return ''.join( ' %s="%s"' % ( key[ 1: ], value ) for key, value in els.iteritems() if key.startswith( '@' ) )
-def _dict_to_xml(els):
+def _dict_to_xml( els ):
"""
Converts `els` which is a python dict to corresponding xml.
"""
- def process_content(tag, content):
- attrs = _extract_attrs(content)
- text = isinstance(content, dict) and content.get('#text', '') or ''
- return '<%s%s>%s%s</%s>' % (tag, attrs, _to_xml(content), text, tag)
+ def process_content( tag, content ):
+ attrs = _extract_attrs( content )
+ text = isinstance( content, dict ) and content.get( '#text', '' ) or ''
+ return '<%s%s>%s%s</%s>' % ( tag, attrs, _to_xml( content ), text, tag )
tags = []
for tag, content in els.iteritems():
# Text and attributes
- if tag.startswith('@') or tag == '#text':
+ if tag.startswith( '@' ) or tag == '#text':
continue
- elif isinstance(content, list):
+ elif isinstance( content, list ):
for el in content:
- tags.append(process_content(tag, el))
- elif isinstance(content, dict):
- tags.append(process_content(tag, content))
+ tags.append( process_content( tag, el ) )
+ elif isinstance( content, dict ):
+ tags.append( process_content( tag, content ) )
else:
- tags.append('<%s>%s</%s>' % (tag, _to_xml(content), tag))
- return ''.join(tags)
+ tags.append( '<%s>%s</%s>' % ( tag, _to_xml( content ), tag ) )
+ return ''.join( tags )
-def _is_xml_el_dict(el):
+def _is_xml_el_dict( el ):
"""
Returns true if `el` is supposed to be a dict.
This function makes sense only in the context of making dicts out of xml.
"""
- if len(el) == 1 or el[0].tag != el[1].tag:
+ if len( el ) == 1 or el[ 0 ].tag != el[ 1 ].tag:
return True
return False
-def _is_xml_el_list(el):
+def _is_xml_el_list( el ):
"""
Returns true if `el` is supposed to be a list.
This function makes sense only in the context of making lists out of xml.
"""
- if len(el) > 1 and el[0].tag == el[1].tag:
+ if len( el ) > 1 and el[ 0 ].tag == el[ 1 ].tag:
return True
return False
-def _str_to_datetime(date_str):
+def _str_to_datetime( date_str ):
try:
- val = datetime.datetime.strptime(date_str, "%Y-%m-%dT%H:%M:%SZ")
+ val = datetime.datetime.strptime( date_str, "%Y-%m-%dT%H:%M:%SZ" )
except ValueError:
val = date_str
return val
-def _str_to_boolean(bool_str):
- if bool_str.lower() != 'false' and bool(bool_str):
+def _str_to_boolean( bool_str ):
+ if bool_str.lower() != 'false' and bool( bool_str ):
return True
return False
-def _from_xml(el, strict):
+def _from_xml( el, strict ):
"""
Extracts value of xml element element `el`.
"""
val = None
# Parent node.
if el:
- if _is_xml_el_dict(el):
- val = _dict_from_xml(el, strict)
- elif _is_xml_el_list(el):
- val = _list_from_xml(el, strict)
+ if _is_xml_el_dict( el ):
+ val = _dict_from_xml( el, strict )
+ elif _is_xml_el_list( el ):
+ val = _list_from_xml( el, strict )
# Simple node.
else:
attribs = el.items()
# An element with attributes.
if attribs and strict:
- val = dict(('@%s' % k, v) for k, v in dict(attribs).iteritems())
+ val = dict( ( '@%s' % k, v ) for k, v in dict( attribs ).iteritems() )
if el.text:
- converted = _val_and_maybe_convert(el)
- val['#text'] = el.text
+ converted = _val_and_maybe_convert( el )
+ val[ '#text' ] = el.text
if converted != el.text:
- val['#value'] = converted
+ val[ '#value' ] = converted
elif el.text:
# An element with no subelements but text.
- val = _val_and_maybe_convert(el)
+ val = _val_and_maybe_convert( el )
elif attribs:
- val = dict(attribs)
+ val = dict( attribs )
return val
-def _val_and_maybe_convert(el):
+def _val_and_maybe_convert( el ):
"""
Converts `el.text` if `el` has attribute `type` with valid value.
"""
text = el.text.strip()
- data_type = el.get('type')
- convertor = _val_and_maybe_convert.convertors.get(data_type)
+ data_type = el.get( 'type' )
+ convertor = _val_and_maybe_convert.convertors.get( data_type )
if convertor:
- return convertor(text)
+ return convertor( text )
else:
return text
_val_and_maybe_convert.convertors = {
@@ -174,23 +174,23 @@
'integer': int
}
-def _list_from_xml(els, strict):
+def _list_from_xml( els, strict ):
"""
Converts xml elements list `el_list` to a python list.
"""
temp = {}
for el in els:
- tag = el.attrib["name"]
- temp[tag] = (_from_xml(el, strict))
+ tag = el.attrib[ "name" ]
+ temp[ tag ] = ( _from_xml( el, strict ) )
return temp
-def _dict_from_xml(els, strict):
+def _dict_from_xml( els, strict ):
"""
Converts xml doc with root `root` to a python dict.
"""
# An element with subelements.
res = {}
for el in els:
- res[el.tag] = _from_xml(el, strict)
+ res[ el.tag ] = _from_xml( el, strict )
return res
diff --git a/TestON/core/xmlparser.py b/TestON/core/xmlparser.py
index a41ed92..12a3f61 100644
--- a/TestON/core/xmlparser.py
+++ b/TestON/core/xmlparser.py
@@ -1,4 +1,4 @@
-#/usr/bin/env python
+# /usr/bin/env python
'''
Created on 07-Jan-2013
Copyright 2013 Open Networking Foundation (ONF)
@@ -28,45 +28,44 @@
import xmldict
import re
-class xmlparser :
+class xmlparser:
- def __init__(self) :
+ def __init__( self ):
self.default = ''
- def parse(self,fileName) :
+ def parse( self, fileName ):
'''
This will parse the params or topo or cfg file and return content in the file as Dictionary
'''
self.fileName = fileName
- matchFileName = re.match(r'(.*)\.(params|topo|cfg)', self.fileName, re.M | re.I)
+ matchFileName = re.match( r'(.*)\.(params|topo|cfg)', self.fileName, re.M | re.I )
if matchFileName:
- xml = open(fileName).read()
- try :
- parsedInfo = xmldict.xml_to_dict(xml)
+ xml = open( fileName ).read()
+ try:
+ parsedInfo = xmldict.xml_to_dict( xml )
return parsedInfo
except StandardError as e:
print "Error parsing file " + fileName + ": " + e.message
- else :
+ else:
print "File name is not correct"
- def parseParams(self,paramsPath):
+ def parseParams( self, paramsPath ):
'''
It will take the params file path and will return the params dictionary
'''
- paramsPath = re.sub("\.","/",paramsPath)
- paramsPath = re.sub("tests|examples","",paramsPath)
- params = self.parse(main.tests_path+paramsPath+".params")
- paramsAsString = str(params)
- return eval(paramsAsString)
+ paramsPath = re.sub( "\.", "/", paramsPath )
+ paramsPath = re.sub( "tests|examples", "", paramsPath )
+ params = self.parse( main.tests_path + paramsPath + ".params" )
+ paramsAsString = str( params )
+ return eval( paramsAsString )
- def parseTopology(self,topologyPath):
+ def parseTopology( self, topologyPath ):
'''
It will take topology file path and will return topology dictionary
'''
- topologyPath = re.sub("\.","/",topologyPath)
- topologyPath = re.sub("tests|examples","",topologyPath)
- #topology = self.parse(main.tests_path+"/"+topologyPath+".topo")
- topology = self.parse(main.tests_path+topologyPath+".topo")
- topoAsString = str(topology)
- return eval(topoAsString)
+ topologyPath = re.sub( "\.", "/", topologyPath )
+ topologyPath = re.sub( "tests|examples", "", topologyPath )
+ topology = self.parse( main.tests_path + topologyPath + ".topo" )
+ topoAsString = str( topology )
+ return eval( topoAsString )
diff --git a/TestON/drivers/common/api/controller/onosrestdriver.py b/TestON/drivers/common/api/controller/onosrestdriver.py
index 312f1f1..cf3eec6 100755
--- a/TestON/drivers/common/api/controller/onosrestdriver.py
+++ b/TestON/drivers/common/api/controller/onosrestdriver.py
@@ -48,7 +48,7 @@
except Exception as e:
main.log.exception( e )
try:
- if os.getenv( str( self.ip_address ) ) != None:
+ if os.getenv( str( self.ip_address ) ) is not None:
self.ip_address = os.getenv( str( self.ip_address ) )
else:
main.log.info( self.name + ": ip set to " + self.ip_address )
@@ -74,7 +74,7 @@
if isinstance( jsonObject, str ):
jsonObject = json.loads( jsonObject )
return json.dumps( jsonObject, sort_keys=True,
- indent=4, separators=(',', ': '))
+ indent=4, separators=( ',', ': ' ) )
except ( TypeError, ValueError ):
main.log.exception( "Error parsing jsonObject" )
return None
@@ -99,20 +99,20 @@
# TODO: Do we need to allow for other protocols besides http?
# ANSWER: Not yet, but potentially https with certificates
if ip == "DEFAULT":
- main.log.warn( "No ip given, reverting to ip from topo file" )
- ip = self.ip_address
+ main.log.warn( "No ip given, reverting to ip from topo file" )
+ ip = self.ip_address
if port == "DEFAULT":
- main.log.warn( "No port given, reverting to port " +
- "from topo file" )
- port = self.port
+ main.log.warn( "No port given, reverting to port " +
+ "from topo file" )
+ port = self.port
try:
path = "http://" + str( ip ) + ":" + str( port ) + base + url
if self.user_name and self.pwd:
- main.log.info("user/passwd is: " + self.user_name + "/" + self.pwd)
- auth = (self.user_name, self.pwd)
+ main.log.info( "user/passwd is: " + self.user_name + "/" + self.pwd )
+ auth = ( self.user_name, self.pwd )
else:
- auth=None
+ auth = None
main.log.info( "Sending request " + path + " using " +
method.upper() + " method." )
response = requests.request( method.upper(),
@@ -273,7 +273,7 @@
query = "/" + str( appName ) + "/active"
response = self.send( method="POST",
url="/applications" + query,
- ip = ip, port = port)
+ ip = ip, port = port )
if response:
output = response[ 1 ]
app = json.loads( output )
@@ -414,17 +414,17 @@
error on requests; Returns None for exceptions
"""
try:
- intentJson = {"two": str( hostIdTwo ),
- "selector": {"criteria": []}, "priority": 7,
- "treatment": {"deferred": [], "instructions": []},
- "appId": appId, "one": str( hostIdOne ),
- "type": "HostToHostIntent",
- "constraints": [{"type": "LinkTypeConstraint",
- "types": ["OPTICAL"],
- "inclusive": 'false' }]}
+ intentJson = { "two": str( hostIdTwo ),
+ "selector": { "criteria": [] }, "priority": 7,
+ "treatment": { "deferred": [], "instructions": [] },
+ "appId": appId, "one": str( hostIdOne ),
+ "type": "HostToHostIntent",
+ "constraints": [ { "type": "LinkTypeConstraint",
+ "types": [ "OPTICAL" ],
+ "inclusive": 'false' } ] }
if vlanId:
- intentJson[ 'selector' ][ 'criteria' ].append( { "type":"VLAN_VID",
- "vlanId":vlanId } )
+ intentJson[ 'selector' ][ 'criteria' ].append( { "type": "VLAN_VID",
+ "vlanId": vlanId } )
output = None
if ip == "DEFAULT":
main.log.warn( "No ip given, reverting to ip from topo file" )
@@ -520,63 +520,63 @@
main.log.debug( self.name + ": Egress port not specified" )
return main.FALSE
- intentJson ={ "ingressPoint": { "device": ingressDevice,
- "port": ingressPort },
- "selector": { "criteria": [] },
- "priority": 55,
- "treatment": { "deferred": [],
- "instructions": [] },
- "egressPoint": { "device": egressDevice,
- "port": egressPort },
- "appId": appId,
- "type": "PointToPointIntent",
- "constraints": [ { "type": "LinkTypeConstraint",
- "types": [ "OPTICAL" ],
- "inclusive": "false" } ] }
+ intentJson = { "ingressPoint": { "device": ingressDevice,
+ "port": ingressPort },
+ "selector": { "criteria": [] },
+ "priority": 55,
+ "treatment": { "deferred": [],
+ "instructions": [] },
+ "egressPoint": { "device": egressDevice,
+ "port": egressPort },
+ "appId": appId,
+ "type": "PointToPointIntent",
+ "constraints": [ { "type": "LinkTypeConstraint",
+ "types": [ "OPTICAL" ],
+ "inclusive": "false" } ] }
# if protected:
# intentJson['constraints'].append( { "type": "Protection", "types": ["Protection"], "inclusive": "true" } )
if ethType == "IPV4":
intentJson[ 'selector' ][ 'criteria' ].append( {
- "type":"ETH_TYPE",
- "ethType":2048 } )
+ "type": "ETH_TYPE",
+ "ethType": 2048 } )
elif ethType:
intentJson[ 'selector' ][ 'criteria' ].append( {
- "type":"ETH_TYPE",
- "ethType":ethType } )
+ "type": "ETH_TYPE",
+ "ethType": ethType } )
if ethSrc:
intentJson[ 'selector' ][ 'criteria' ].append(
- { "type":"ETH_SRC",
- "mac":ethSrc } )
+ { "type": "ETH_SRC",
+ "mac": ethSrc } )
if ethDst:
intentJson[ 'selector' ][ 'criteria' ].append(
- { "type":"ETH_DST",
- "mac":ethDst } )
+ { "type": "ETH_DST",
+ "mac": ethDst } )
if ipSrc:
intentJson[ 'selector' ][ 'criteria' ].append(
- { "type":"IPV4_SRC",
- "ip":ipSrc } )
+ { "type": "IPV4_SRC",
+ "ip": ipSrc } )
if ipDst:
intentJson[ 'selector' ][ 'criteria' ].append(
- { "type":"IPV4_DST",
- "ip":ipDst } )
+ { "type": "IPV4_DST",
+ "ip": ipDst } )
if tcpSrc:
intentJson[ 'selector' ][ 'criteria' ].append(
- { "type":"TCP_SRC",
+ { "type": "TCP_SRC",
"tcpPort": tcpSrc } )
if tcpDst:
intentJson[ 'selector' ][ 'criteria' ].append(
- { "type":"TCP_DST",
+ { "type": "TCP_DST",
"tcpPort": tcpDst } )
if ipProto:
intentJson[ 'selector' ][ 'criteria' ].append(
- { "type":"IP_PROTO",
+ { "type": "IP_PROTO",
"protocol": ipProto } )
if vlanId:
intentJson[ 'selector' ][ 'criteria' ].append(
- { "type":"VLAN_VID",
+ { "type": "VLAN_VID",
"vlanId": vlanId } )
# TODO: Bandwidth and Lambda will be implemented if needed
@@ -613,26 +613,26 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- def addSinglepointToMultipointIntent(self,
- ingressDevice,
- egressDeviceList,
- portEgressList,
- appId='org.onosproject.cli',
- portIngress="",
- ethType="",
- ethSrc="",
- ethDst="",
- bandwidth="",
- lambdaAlloc=False,
- ipProto="",
- ipSrc="",
- ipDst="",
- tcpSrc="",
- tcpDst="",
- partial=False,
- ip="DEFAULT",
- port="DEFAULT",
- vlanId="" ):
+ def addSinglepointToMultipointIntent( self,
+ ingressDevice,
+ egressDeviceList,
+ portEgressList,
+ appId='org.onosproject.cli',
+ portIngress="",
+ ethType="",
+ ethSrc="",
+ ethDst="",
+ bandwidth="",
+ lambdaAlloc=False,
+ ipProto="",
+ ipSrc="",
+ ipDst="",
+ tcpSrc="",
+ tcpDst="",
+ partial=False,
+ ip="DEFAULT",
+ port="DEFAULT",
+ vlanId="" ):
"""
Description:
Adds a point-to-multi point intent ( uni-directional ) by
@@ -694,7 +694,7 @@
"appId": appId,
"type": "SinglePointToMultiPointIntent",
"constraints": [ { "type": "LinkTypeConstraint",
- "types": ["OPTICAL"],
+ "types": [ "OPTICAL" ],
"inclusive": "false" } ] }
index = 0
@@ -752,16 +752,16 @@
"from topo file" )
port = self.port
response = self.send( method="POST",
- url="/intents", ip=ip, port=port,
- data=json.dumps( intentJson ) )
+ url="/intents", ip=ip, port=port,
+ data=json.dumps( intentJson ) )
- main.log.debug(intentJson)
+ main.log.debug( intentJson )
if response:
if "201" in str( response[ 0 ] ):
main.log.info( self.name + ": Successfully POST point" +
" intent between ingress: " + ingressDevice +
- " and egress: " + str(egressDeviceList) + " devices" )
+ " and egress: " + str( egressDeviceList ) + " devices" )
return main.TRUE
else:
main.log.error( "Error with REST request, response was: " + str( response ) )
@@ -834,7 +834,7 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- def removeAllIntents( self, intentIdList ='ALL',appId='org.onosproject.cli',
+ def removeAllIntents( self, intentIdList ='ALL', appId='org.onosproject.cli',
ip="DEFAULT", port="DEFAULT", delay=5 ):
"""
Description:
@@ -864,7 +864,7 @@
import time
time.sleep( delay )
intentRemain = len( json.loads( self.intents() ) )
- if all( result==main.TRUE for result in results ) and \
+ if all( result == main.TRUE for result in results ) and \
intentRemain == 0:
main.log.info( self.name + ": All intents are removed " )
return main.TRUE
@@ -947,10 +947,10 @@
query = "/" + mac + "/" + vlan
response = self.send( url="/hosts" + query, ip = ip, port = port )
if response:
- # NOTE: What if the person wants other values? would it be better
- # to have a function that gets a key and return a value instead?
- # This function requires mac and vlan and returns an ID which
- # makes this current function useless
+ # NOTE: What if the person wants other values? would it be better
+ # to have a function that gets a key and return a value instead?
+ # This function requires mac and vlan and returns an ID which
+ # makes this current function useless
if 200 <= response[ 0 ] <= 299:
output = response[ 1 ]
hostId = json.loads( output ).get( 'id' )
@@ -1095,7 +1095,7 @@
main.cleanAndExit()
def checkIntentState( self, intentsId="ALL", expectedState='INSTALLED',
- ip="DEFAULT", port="DEFAULT"):
+ ip="DEFAULT", port="DEFAULT" ):
"""
Description:
Check intents state based on expected state which defaults to
@@ -1264,7 +1264,8 @@
"""
try:
- if debug: main.log.debug( "Adding flow: " + self.pprint( flowJson ) )
+ if debug:
+ main.log.debug( "Adding flow: " + self.pprint( flowJson ) )
output = None
if ip == "DEFAULT":
main.log.warn( "No ip given, reverting to ip from topo file" )
@@ -1344,75 +1345,75 @@
of the ONOS node
"""
try:
- flowJson = { "priority":priority,
- "isPermanent":"true",
- "timeout":0,
- "deviceId":deviceId,
- "treatment":{"instructions":[]},
- "selector": {"criteria":[]}}
+ flowJson = { "priority": priority,
+ "isPermanent": "true",
+ "timeout": 0,
+ "deviceId": deviceId,
+ "treatment": { "instructions": [] },
+ "selector": { "criteria": [] }}
if appId:
flowJson[ "appId" ] = appId
if groupId:
flowJson[ 'treatment' ][ 'instructions' ].append( {
- "type":"GROUP",
- "groupId":groupId } )
+ "type": "GROUP",
+ "groupId": groupId } )
if egressPort:
flowJson[ 'treatment' ][ 'instructions' ].append( {
- "type":"OUTPUT",
- "port":egressPort } )
+ "type": "OUTPUT",
+ "port": egressPort } )
if ingressPort:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"IN_PORT",
- "port":ingressPort } )
+ "type": "IN_PORT",
+ "port": ingressPort } )
if ethType:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"ETH_TYPE",
- "ethType":ethType } )
+ "type": "ETH_TYPE",
+ "ethType": ethType } )
if ethSrc:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"ETH_SRC",
- "mac":ethSrc } )
+ "type": "ETH_SRC",
+ "mac": ethSrc } )
if ethDst:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"ETH_DST",
- "mac":ethDst } )
+ "type": "ETH_DST",
+ "mac": ethDst } )
if vlan:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"VLAN_VID",
- "vlanId":vlan } )
+ "type": "VLAN_VID",
+ "vlanId": vlan } )
if mpls:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"MPLS_LABEL",
- "label":mpls } )
+ "type": "MPLS_LABEL",
+ "label": mpls } )
if ipSrc:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":ipSrc[0],
- "ip":ipSrc[1] } )
+ "type": ipSrc[ 0 ],
+ "ip": ipSrc[ 1 ] } )
if ipDst:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":ipDst[0],
- "ip":ipDst[1] } )
+ "type": ipDst[ 0 ],
+ "ip": ipDst[ 1 ] } )
if tcpSrc:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"TCP_SRC",
+ "type": "TCP_SRC",
"tcpPort": tcpSrc } )
if tcpDst:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"TCP_DST",
+ "type": "TCP_DST",
"tcpPort": tcpDst } )
if udpSrc:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"UDP_SRC",
+ "type": "UDP_SRC",
"udpPort": udpSrc } )
if udpDst:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"UDP_DST",
+ "type": "UDP_DST",
"udpPort": udpDst } )
if ipProto:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"IP_PROTO",
+ "type": "IP_PROTO",
"protocol": ipProto } )
return self.sendFlow( deviceId=deviceId, flowJson=flowJson, debug=debug )
@@ -1628,30 +1629,30 @@
main.cleanAndExit()
def createFlowBatch( self,
- numSw = 1,
- swIndex = 1,
- batchSize = 1,
- batchIndex = 1,
- deviceIdpreFix = "of:",
- appId=0,
- deviceID="",
- ingressPort="",
- egressPort="",
- ethType="",
- ethSrc="",
- ethDst="",
- vlan="",
- ipProto="",
- ipSrc=(),
- ipDst=(),
- tcpSrc="",
- tcpDst="",
- udpDst="",
- udpSrc="",
- mpls="",
- ip="DEFAULT",
- port="DEFAULT",
- debug=False ):
+ numSw = 1,
+ swIndex = 1,
+ batchSize = 1,
+ batchIndex = 1,
+ deviceIdpreFix = "of:",
+ appId=0,
+ deviceID="",
+ ingressPort="",
+ egressPort="",
+ ethType="",
+ ethSrc="",
+ ethDst="",
+ vlan="",
+ ipProto="",
+ ipSrc=(),
+ ipDst=(),
+ tcpSrc="",
+ tcpDst="",
+ udpDst="",
+ udpSrc="",
+ mpls="",
+ ip="DEFAULT",
+ port="DEFAULT",
+ debug=False ):
"""
Description:
Creates batches of MAC-rule flows for POST.
@@ -1681,111 +1682,110 @@
The ip and port option are for the requests input's ip and port
of the ONOS node
"""
- #from pprint import pprint
+ # from pprint import pprint
flowJsonList = []
- flowJsonBatch = {"flows":flowJsonList}
+ flowJsonBatch = { "flows": flowJsonList }
dev = swIndex
- for fl in range(1, batchSize + 1):
- flowJson = { "priority":100,
- "deviceId":"",
- "isPermanent":"true",
- "timeout":0,
- "treatment":{"instructions":[]},
- "selector": {"criteria":[]}}
+ for fl in range( 1, batchSize + 1 ):
+ flowJson = { "priority": 100,
+ "deviceId": "",
+ "isPermanent": "true",
+ "timeout": 0,
+ "treatment": { "instructions": [] },
+ "selector": { "criteria": [] }}
- #main.log.info("fl: " + str(fl))
+ # main.log.info("fl: " + str(fl))
if dev <= numSw:
- deviceId = deviceIdpreFix + "{0:0{1}x}".format(dev,16)
- #print deviceId
- flowJson['deviceId'] = deviceId
+ deviceId = deviceIdpreFix + "{0:0{1}x}".format( dev, 16 )
+ # print deviceId
+ flowJson[ 'deviceId' ] = deviceId
dev += 1
else:
dev = 1
- deviceId = deviceIdpreFix + "{0:0{1}x}".format(dev,16)
- #print deviceId
- flowJson['deviceId'] = deviceId
+ deviceId = deviceIdpreFix + "{0:0{1}x}".format( dev, 16 )
+ # print deviceId
+ flowJson[ 'deviceId' ] = deviceId
dev += 1
# ethSrc starts with "0"; ethDst starts with "1"
# 2 Hex digit of device number; 5 digits of batch index number; 5 digits of batch size
- ethS = "%02X" %int( "0" + "{0:0{1}b}".format(dev,7), 2 ) + \
- "{0:0{1}x}".format(batchIndex,5) + "{0:0{1}x}".format(fl,5)
- ethSrc = ':'.join(ethS[i:i+2] for i in range(0,len(ethS),2))
- ethD = "%02X" %int( "1" + "{0:0{1}b}".format(dev,7), 2 ) + \
- "{0:0{1}x}".format(batchIndex,5) + "{0:0{1}x}".format(fl,5)
- ethDst = ':'.join(ethD[i:i+2] for i in range(0,len(ethD),2))
+ ethS = "%02X" % int( "0" + "{0:0{1}b}".format( dev, 7 ), 2 ) + \
+ "{0:0{1}x}".format( batchIndex, 5 ) + "{0:0{1}x}".format( fl, 5 )
+ ethSrc = ':'.join( ethS[ i: i+2 ] for i in range( 0, len( ethS ), 2 ) )
+ ethD = "%02X" % int( "1" + "{0:0{1}b}".format( dev, 7 ), 2 ) + \
+ "{0:0{1}x}".format( batchIndex, 5 ) + "{0:0{1}x}".format( fl, 5 )
+ ethDst = ':'.join( ethD[ i: i+2 ] for i in range( 0, len( ethD ), 2 ) )
if appId:
flowJson[ "appId" ] = appId
if egressPort:
flowJson[ 'treatment' ][ 'instructions' ].append( {
- "type":"OUTPUT",
- "port":egressPort } )
+ "type": "OUTPUT",
+ "port": egressPort } )
if ingressPort:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"IN_PORT",
- "port":ingressPort } )
+ "type": "IN_PORT",
+ "port": ingressPort } )
if ethType:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"ETH_TYPE",
- "ethType":ethType } )
+ "type": "ETH_TYPE",
+ "ethType": ethType } )
if ethSrc:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"ETH_SRC",
- "mac":ethSrc } )
+ "type": "ETH_SRC",
+ "mac": ethSrc } )
if ethDst:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"ETH_DST",
- "mac":ethDst } )
+ "type": "ETH_DST",
+ "mac": ethDst } )
if vlan:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"VLAN_VID",
- "vlanId":vlan } )
+ "type": "VLAN_VID",
+ "vlanId": vlan } )
if mpls:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"MPLS_LABEL",
- "label":mpls } )
+ "type": "MPLS_LABEL",
+ "label": mpls } )
if ipSrc:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":ipSrc[0],
- "ip":ipSrc[1] } )
+ "type": ipSrc[ 0 ],
+ "ip": ipSrc[ 1 ] } )
if ipDst:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":ipDst[0],
- "ip":ipDst[1] } )
+ "type": ipDst[ 0 ],
+ "ip": ipDst[ 1 ] } )
if tcpSrc:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"TCP_SRC",
+ "type": "TCP_SRC",
"tcpPort": tcpSrc } )
if tcpDst:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"TCP_DST",
+ "type": "TCP_DST",
"tcpPort": tcpDst } )
if udpSrc:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"UDP_SRC",
+ "type": "UDP_SRC",
"udpPort": udpSrc } )
if udpDst:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"UDP_DST",
+ "type": "UDP_DST",
"udpPort": udpDst } )
if ipProto:
flowJson[ 'selector' ][ 'criteria' ].append( {
- "type":"IP_PROTO",
+ "type": "IP_PROTO",
"protocol": ipProto } )
- #pprint(flowJson)
- flowJsonList.append(flowJson)
+ # pprint(flowJson)
+ flowJsonList.append( flowJson )
- main.log.info("Number of flows in batch: " + str( len(flowJsonList) ) )
- flowJsonBatch['flows'] = flowJsonList
- #pprint(flowJsonBatch)
+ main.log.info( "Number of flows in batch: " + str( len( flowJsonList ) ) )
+ flowJsonBatch[ 'flows' ] = flowJsonList
+ # pprint(flowJsonBatch)
return flowJsonBatch
-
def sendFlowBatch( self, batch={}, ip="DEFAULT", port="DEFAULT", debug=False ):
"""
Description:
@@ -1803,7 +1803,8 @@
import time
try:
- if debug: main.log.debug( "Adding flow: " + self.pprint( batch ) )
+ if debug:
+ main.log.debug( "Adding flow: " + self.pprint( batch ) )
output = None
if ip == "DEFAULT":
main.log.warn( "No ip given, reverting to ip from topo file" )
@@ -1816,8 +1817,8 @@
response = self.send( method="POST",
url=url, ip = ip, port = port,
data=json.dumps( batch ) )
- #main.log.info("Post response is: ", str(response[0]))
- if response[0] == 200:
+ # main.log.info("Post response is: ", str(response[0]))
+ if response[ 0 ] == 200:
main.log.info( self.name + ": Successfully POST flow batch" )
return main.TRUE, response
else:
@@ -1834,7 +1835,7 @@
main.cleanAndExit()
def removeFlowBatch( self, batch={},
- ip="DEFAULT", port="DEFAULT" ):
+ ip="DEFAULT", port="DEFAULT" ):
"""
Description:
Remove a batch of flows
@@ -1857,7 +1858,7 @@
response = self.send( method="DELETE",
url="/flows/", ip = ip, port = port,
- data = json.dumps(batch) )
+ data = json.dumps( batch ) )
if response:
if 200 <= response[ 0 ] <= 299:
return main.TRUE
@@ -1882,7 +1883,7 @@
import json
try:
# either onos:topology or 'topology' will work in CLI
- topology = json.loads(topologyOutput)
+ topology = json.loads( topologyOutput )
main.log.debug( topology )
return topology
except pexpect.EOF:
@@ -1914,7 +1915,7 @@
"""
try:
topology = self.getTopology( self.topology() )
- #summary = self.summary()
+ # summary = self.summary()
if topology == {}:
return main.ERROR
output = ""
@@ -1978,7 +1979,7 @@
"appCookie": appCookie,
"groupId": groupId,
"buckets": bucketList
- }
+ }
return self.sendGroup( deviceId=deviceId, groupJson=groupJson, ip="DEFAULT", port="DEFAULT", debug=False )
except ( AttributeError, TypeError ):
@@ -2004,7 +2005,8 @@
of the ONOS node
"""
try:
- if debug: main.log.debug( "Adding group: " + self.pprint( groupJson ) )
+ if debug:
+ main.log.debug( "Adding group: " + self.pprint( groupJson ) )
output = None
if ip == "DEFAULT":
main.log.warn( "No ip given, reverting to ip from topo file" )
@@ -2064,7 +2066,7 @@
if deviceId:
url += "/" + deviceId
if appCookie:
- url += "/" + appCookie
+ url += "/" + appCookie
response = self.send( url=url, ip = ip, port = port )
if response:
if 200 <= response[ 0 ] <= 299:
@@ -2126,4 +2128,3 @@
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
-
diff --git a/TestON/drivers/common/api/controllerdriver.py b/TestON/drivers/common/api/controllerdriver.py
index 956374c..863b643 100644
--- a/TestON/drivers/common/api/controllerdriver.py
+++ b/TestON/drivers/common/api/controllerdriver.py
@@ -32,4 +32,3 @@
def __init__( self ):
super( Controller, self ).__init__()
-
diff --git a/TestON/drivers/common/api/dockerapidriver.py b/TestON/drivers/common/api/dockerapidriver.py
index 3fac610..1393e18 100644
--- a/TestON/drivers/common/api/dockerapidriver.py
+++ b/TestON/drivers/common/api/dockerapidriver.py
@@ -55,7 +55,7 @@
self.home = "/var/tmp"
self.handle = super( DockerApiDriver, self ).connect()
- self.dockerClient = Client(base_url='unix://var/run/docker.sock')
+ self.dockerClient = Client( base_url='unix://var/run/docker.sock' )
return self.handle
except Exception as e:
main.log.exception( e )
@@ -72,7 +72,7 @@
if imageDict[ 'RepoTags' ] is not None:
if len( imageDict[ 'RepoTags' ] ) > 1:
duplicateTagDetected = 1
- imageListToSend.append( imageDict['RepoTags'][0].encode('UTF8').split(':')[1] )
+ imageListToSend.append( imageDict[ 'RepoTags' ][ 0 ].encode( 'UTF8' ).split( ':' )[ 1 ] )
return imageListToSend, duplicateTagDetected
except Exception as e:
main.log.exception( e )
@@ -83,18 +83,18 @@
"""
try:
main.log.info( self.name +
- ": Pulling Docker image " + onosRepo + ":"+ onosTag )
- for line in self.dockerClient.pull( repository = onosRepo, \
- tag = onosTag, stream = True ):
- print "#",
- main.log.info(json.dumps(json.loads(line), indent =4))
+ ": Pulling Docker image " + onosRepo + ":" + onosTag )
+ for line in self.dockerClient.pull( repository = onosRepo,
+ tag = onosTag, stream = True ):
+ print "#",
+ main.log.info( json.dumps( json.loads( line ), indent =4 ))
- #response = json.dumps( json.load( pullResult ), indent=4 )
+ # response = json.dumps( json.load( pullResult ), indent=4 )
if re.search( "for onosproject/onos:" + onosTag, line ):
main.log.info( "onos docker image pulled is: " + line )
return main.TRUE
else:
- main.log.error( "Failed to download image from: " + onosRepo +":"+ onosTag )
+ main.log.error( "Failed to download image from: " + onosRepo + ":" + onosTag )
main.log.error( "Error respone: " )
main.log.error( line )
return main.FALSE
@@ -109,17 +109,17 @@
try:
main.log.info( self.name +
": Creating Docker container for node: " + onosNode )
- response = self.dockerClient.create_container( image=onosImage, \
- tty=True, name=onosNode, detach=True )
- #print response
- #print response.get("Id")
- #print response.get("Warnings")
- if( str( response.get("Warnings") ) == 'None' ):
- main.log.info( "Created container for node: " + onosNode + "; container id is: " + response.get("Id") )
- return ( main.TRUE, response.get("Id") )
+ response = self.dockerClient.create_container( image=onosImage,
+ tty=True, name=onosNode, detach=True )
+ # print response
+ # print response.get("Id")
+ # print response.get("Warnings")
+ if( str( response.get( "Warnings" ) ) == 'None' ):
+ main.log.info( "Created container for node: " + onosNode + "; container id is: " + response.get( "Id" ) )
+ return( main.TRUE, response.get( "Id" ) )
else:
main.log.info( "Noticed warnings during create" )
- return ( main.FALSE, null)
+ return( main.FALSE, null )
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
@@ -157,11 +157,11 @@
main.log.info( "Noticed warnings during stop" )
return main.FALSE
except errors.NotFound:
- main.log.info( ctName + " not found! Continue on tests...")
+ main.log.info( ctName + " not found! Continue on tests..." )
return main.TRUE
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
- #main.cleanAndExit()
+ # main.cleanAndExit()
def dockerRestartCT( self, ctName ):
"""
@@ -181,27 +181,27 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- def dockerCheckCTName( self, ctName):
+ def dockerCheckCTName( self, ctName ):
"""
Check Docker conatiner status
"""
try:
main.log.info( self.name +
": Checking Docker Status for CT with 'Names' " + ctName )
- namelist = [response["Names"] for response in self.dockerClient.containers(all=True) if not []]
- main.log.info("Name list is: " + str(namelist) )
- if( [ctName] in namelist):
+ namelist = [ response[ "Names" ] for response in self.dockerClient.containers( all=True ) if not [] ]
+ main.log.info( "Name list is: " + str( namelist ) )
+ if( [ ctName ] in namelist ):
main.log.info( "Container " + ctName + " exists" )
return main.TRUE
else:
main.log.info( "Container " + ctName + " does not exist" )
return main.FALSE
except errors.NotFound:
- main.log.warn( ctName + "not found! Continue with the tests...")
+ main.log.warn( ctName + "not found! Continue with the tests..." )
return main.FALSE
except Exception:
main.log.exception( self.name + ": Uncaught exception! Continue tests..." )
- #main.cleanAndExit()
+ # main.cleanAndExit()
def dockerRemoveCT( self, ctName ):
"""
@@ -215,15 +215,15 @@
main.log.info( "Removed container for node: " + ctName )
return main.TRUE
else:
- main.log.info( "Noticed warnings during Remove " + ctName)
+ main.log.info( "Noticed warnings during Remove " + ctName )
return main.FALSE
- main.log.exception(self.name + ": not found, continuing...")
+ main.log.exception( self.name + ": not found, continuing..." )
except errors.NotFound:
- main.log.warn( ctName + "not found! Continue with the tests...")
+ main.log.warn( ctName + "not found! Continue with the tests..." )
return main.TRUE
except Exception:
main.log.exception( self.name + ": Uncaught exception! Continuing..." )
- #main.cleanAndExit()
+ # main.cleanAndExit()
def dockerRemoveImage( self, imageRepoTag=None ):
"""
@@ -234,13 +234,13 @@
main.log.info( "No docker image found" )
return rmResult
else:
- imageList = [ image["Id"] for image in self.dockerClient.images()
- if image["RepoTags"] is None
- or imageRepoTag in image["RepoTags"] ]
+ imageList = [ image[ "Id" ] for image in self.dockerClient.images()
+ if image[ "RepoTags" ] is None
+ or imageRepoTag in image[ "RepoTags" ] ]
for id in imageList:
try:
main.log.info( self.name + ": Removing Docker image " + id )
- response = self.dockerClient.remove_image(id, force = True)
+ response = self.dockerClient.remove_image( id, force = True )
if response is None:
main.log.info( "Removed Docker image: " + id )
rmResult = rmResult and main.TRUE
@@ -248,12 +248,12 @@
main.log.info( "Noticed warnings during Remove " + id )
rmResult = rmResult and main.FALSE
except errors.NotFound:
- main.log.warn( image + "not found! Continue with the tests...")
+ main.log.warn( image + "not found! Continue with the tests..." )
rmResult = rmResult and main.TRUE
except Exception:
main.log.exception( self.name + ": Uncaught exception! Continuing..." )
rmResult = rmResult and main.FALSE
- #main.cleanAndExit()
+ # main.cleanAndExit()
return rmResult
def fetchLatestClusterFile( self, branch="master" ):
@@ -263,7 +263,7 @@
try:
command = "wget -N https://raw.githubusercontent.com/opennetworkinglab/\
onos/" + branch + "/tools/package/bin/onos-form-cluster"
- subprocess.call( command ) # output checks are missing for now
+ subprocess.call( command ) # output checks are missing for now
command = "chmod u+x " + "onos-form-cluster"
subprocess.call( command )
return main.TRUE
@@ -276,7 +276,7 @@
From ONOS cluster for IP addresses in onosIPs list
"""
try:
- onosIPs = " ".join(onosIPs)
+ onosIPs = " ".join( onosIPs )
command = "{}/onos-form-cluster -u {} -p {} {}".format( cmdPath,
user,
passwd,
@@ -285,7 +285,7 @@
if result == 0:
return main.TRUE
else:
- main.log.info("Something is not right in forming cluster>")
+ main.log.info( "Something is not right in forming cluster>" )
return main.FALSE
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
@@ -297,11 +297,10 @@
"""
try:
output = self.dockerClient.inspect_container( ctName )
- nodeIP = output['NetworkSettings']['IPAddress']
- main.log.info( " Docker IP " + str(nodeIP) )
- return str(nodeIP)
+ nodeIP = output[ 'NetworkSettings' ][ 'IPAddress' ]
+ main.log.info( " Docker IP " + str( nodeIP ) )
+ return str( nodeIP )
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
-
diff --git a/TestON/drivers/common/api/fvtapidriver.py b/TestON/drivers/common/api/fvtapidriver.py
index 4f20c89..2a1c0cb 100644
--- a/TestON/drivers/common/api/fvtapidriver.py
+++ b/TestON/drivers/common/api/fvtapidriver.py
@@ -188,4 +188,3 @@
# self.logfile_handler.close()
return main.TRUE
-
diff --git a/TestON/drivers/common/apidriver.py b/TestON/drivers/common/apidriver.py
index ec416ee..3e5615f 100644
--- a/TestON/drivers/common/apidriver.py
+++ b/TestON/drivers/common/apidriver.py
@@ -44,4 +44,3 @@
super( API, self ).connect()
return main.TRUE
-
diff --git a/TestON/drivers/common/cli/dpclidriver.py b/TestON/drivers/common/cli/dpclidriver.py
index 2386c45..a152273 100644
--- a/TestON/drivers/common/cli/dpclidriver.py
+++ b/TestON/drivers/common/cli/dpclidriver.py
@@ -38,9 +38,9 @@
self.name = self.options[ 'name' ]
self.handle = super( DPCliDriver, self ).connect( user_name=self.user_name,
- ip_address=self.ip_address,
- port=self.port,
- pwd=self.pwd )
+ ip_address=self.ip_address,
+ port=self.port,
+ pwd=self.pwd )
if self.handle:
return self.handle
@@ -148,7 +148,7 @@
self.handle.sendline( "sudo fping -S " + str( netsrc ) + "." +
str( netstrt ) + ".1.1 -f /tmp/ip_table" +
str( netdst ) + ".txt" )
- while 1:
+ while True:
i = self.handle.expect( [
"reachable",
"unreachable",
@@ -210,4 +210,3 @@
main.log.exception( "Connection failed to the host" )
response = main.FALSE
return response
-
diff --git a/TestON/drivers/common/cli/emulator/lincoemininetdriver.py b/TestON/drivers/common/cli/emulator/lincoemininetdriver.py
index bfa466f..df1abd7 100644
--- a/TestON/drivers/common/cli/emulator/lincoemininetdriver.py
+++ b/TestON/drivers/common/cli/emulator/lincoemininetdriver.py
@@ -32,6 +32,7 @@
class LincOEMininetDriver( MininetCliDriver ):
+
def runOpticalMnScript( self, onosDirectory = 'onos', ctrllerIP = None, topology = 'opticalTest' ):
import time
import types
@@ -49,14 +50,14 @@
$OC1 can be accepted
"""
try:
- if ctrllerIP == None:
+ if ctrllerIP is None:
main.log.error( "You need to specify the IP" )
return main.FALSE
else:
controller = ''
if isinstance( ctrllerIP, types.ListType ):
for i in xrange( len( ctrllerIP ) ):
- controller += ctrllerIP[i] + ' '
+ controller += ctrllerIP[ i ] + ' '
main.log.info( "Mininet topology is being loaded with " +
"controllers: " + controller )
elif isinstance( ctrllerIP, types.StringType ):
@@ -70,12 +71,12 @@
cmd = "sudo -E python {0} {1}".format( topoFile, controller )
main.log.info( self.name + ": cmd = " + cmd )
self.handle.sendline( cmd )
- lincStart = self.handle.expect( [ "mininet>", pexpect.TIMEOUT ],timeout=120 )
+ lincStart = self.handle.expect( [ "mininet>", pexpect.TIMEOUT ], timeout=120 )
if lincStart == 1:
self.handle.sendline( "\x03" )
self.handle.sendline( "sudo mn -c" )
self.handle.sendline( cmd )
- lincStart = self.handle.expect( [ "mininet>", pexpect.TIMEOUT ],timeout=120 )
+ lincStart = self.handle.expect( [ "mininet>", pexpect.TIMEOUT ], timeout=120 )
if lincStart == 1:
main.log.error( "OpticalTest.py failed to start." )
return main.FALSE
@@ -85,7 +86,7 @@
main.log.error( self.name + ": " + self.handle.before )
return main.FALSE
except Exception:
- main.log.exception(self.name + ": Uncaught exception!")
+ main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
return main.FALSE
@@ -120,7 +121,7 @@
main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except Exception:
- main.log.exception(self.name + ": Uncaught exception!")
+ main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
main.log.info( self.name + ": Ping Response: " + response )
if re.search( ',\s0\%\spacket\sloss', response ):
diff --git a/TestON/drivers/common/cli/emulator/mininetclidriver.py b/TestON/drivers/common/cli/emulator/mininetclidriver.py
index d69fbeb..25cafe0 100644
--- a/TestON/drivers/common/cli/emulator/mininetclidriver.py
+++ b/TestON/drivers/common/cli/emulator/mininetclidriver.py
@@ -45,6 +45,7 @@
class MininetCliDriver( Emulator ):
+
"""
MininetCliDriver is the basic driver which will handle
the Mininet functions"""
@@ -184,7 +185,7 @@
main.log.info(
"Starting Mininet from topo file " +
topoFile )
- cmdString += "-E python " + topoFile + " "
+ cmdString += "-E python " + topoFile + " "
if args is None:
args = ''
# TODO: allow use of args from .topo file?
@@ -434,7 +435,6 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
-
def pingallHosts( self, hostList, wait=1 ):
"""
Ping all specified IPv4 hosts
@@ -462,7 +462,7 @@
pingList = hostList[ :listIndex ] + \
hostList[ ( listIndex + 1 ): ]
- pingResponse += str(str(host) + " -> ")
+ pingResponse += str( str( host ) + " -> " )
for temp in pingList:
# Current host pings all other hosts specified
@@ -471,14 +471,14 @@
self.handle.expect( "mininet>", timeout=wait + 1 )
response = self.handle.before
if re.search( ',\s0\%\spacket\sloss', response ):
- pingResponse += str(" h" + str( temp[1:] ))
+ pingResponse += str( " h" + str( temp[ 1: ] ) )
else:
pingResponse += " X"
# One of the host to host pair is unreachable
isReachable = main.FALSE
failedPings += 1
pingResponse += "\n"
- main.log.info( pingResponse + "Failed pings: " + str(failedPings) )
+ main.log.info( pingResponse + "Failed pings: " + str( failedPings ) )
return isReachable
except pexpect.TIMEOUT:
main.log.exception( self.name + ": TIMEOUT exception" )
@@ -513,23 +513,23 @@
pingList = hostList[ :listIndex ] + \
hostList[ ( listIndex + 1 ): ]
- pingResponse += str(str(host) + " -> ")
+ pingResponse += str( str( host ) + " -> " )
for temp in pingList:
# Current host pings all other hosts specified
- pingCmd = str( host ) + cmd + str( self.getIPAddress(temp,proto='IPv6') )
+ pingCmd = str( host ) + cmd + str( self.getIPAddress( temp, proto='IPv6' ) )
self.handle.sendline( pingCmd )
self.handle.expect( "mininet>", timeout=wait + 1 )
response = self.handle.before
if re.search( ',\s0\%\spacket\sloss', response ):
- pingResponse += str(" h" + str( temp[1:] ))
+ pingResponse += str( " h" + str( temp[ 1: ] ) )
else:
pingResponse += " X"
# One of the host to host pair is unreachable
isReachable = main.FALSE
failedPings += 1
pingResponse += "\n"
- main.log.info( pingResponse + "Failed pings: " + str(failedPings) )
+ main.log.info( pingResponse + "Failed pings: " + str( failedPings ) )
return isReachable
except pexpect.TIMEOUT:
@@ -549,7 +549,7 @@
Currently the only supported Params: SRC, TARGET, and WAIT
"""
args = utilities.parse_args( [ "SRC", "TARGET", 'WAIT' ], **pingParams )
- wait = args['WAIT']
+ wait = args[ 'WAIT' ]
wait = int( wait if wait else 1 )
command = args[ "SRC" ] + " ping " + \
args[ "TARGET" ] + " -c 1 -i 1 -W " + str( wait ) + " "
@@ -594,7 +594,7 @@
Example: main.Mininet1.ping6pair( src="h1", target="1000::2" )
"""
args = utilities.parse_args( [ "SRC", "TARGET", 'WAIT' ], **pingParams )
- wait = args['WAIT']
+ wait = args[ 'WAIT' ]
wait = int( wait if wait else 1 )
command = args[ "SRC" ] + " ping6 " + \
args[ "TARGET" ] + " -c 1 -i 1 -W " + str( wait ) + " "
@@ -838,8 +838,8 @@
return main.TRUE
except pexpect.TIMEOUT:
- main.log.error(self.name + ": TIMEOUT exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": TIMEOUT exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
@@ -877,7 +877,7 @@
self.handle.expect( "mininet>" )
# Determine ip and mac address of the host-oldSw interface
- cmd = "px ipaddr = " + str(IP)
+ cmd = "px ipaddr = " + str( IP )
print "cmd3= ", cmd
self.handle.sendline( cmd )
self.handle.expect( "mininet>" )
@@ -888,7 +888,7 @@
self.handle.expect( "mininet>" )
# Detach interface between oldSw-host
- cmd = "px " + oldSw + ".detach( sintf )"
+ cmd = "px " + oldSw + ".detach(sintf)"
print "cmd4= ", cmd
self.handle.sendline( cmd )
self.handle.expect( "mininet>" )
@@ -907,31 +907,31 @@
self.handle.expect( "mininet>" )
# Attach interface between newSw-host
- cmd = "px " + newSw + ".attach( sintf )"
+ cmd = "px " + newSw + ".attach(sintf)"
print "cmd6= ", cmd
self.handle.sendline( cmd )
self.handle.expect( "mininet>" )
# Set macaddress of the host-newSw interface
- cmd = "px " + host + ".setMAC( mac = macaddr, intf = hintf)"
+ cmd = "px " + host + ".setMAC(mac = macaddr, intf = hintf)"
print "cmd7 = ", cmd
self.handle.sendline( cmd )
self.handle.expect( "mininet>" )
# Set ipaddress of the host-newSw interface
- cmd = "px " + host + ".setIP( ip = ipaddr, intf = hintf)"
+ cmd = "px " + host + ".setIP(ip = ipaddr, intf = hintf)"
print "cmd8 = ", cmd
self.handle.sendline( cmd )
self.handle.expect( "mininet>" )
cmd = host + " ifconfig"
- print "cmd9 =",cmd
- response = self.execute( cmd = cmd, prompt="mininet>" ,timeout=10 )
+ print "cmd9 =", cmd
+ response = self.execute( cmd = cmd, prompt="mininet>", timeout=10 )
print response
pattern = "h\d-eth([\w])"
ipAddressSearch = re.search( pattern, response )
- print ipAddressSearch.group(1)
- intf= host + "-eth" + str(ipAddressSearch.group(1))
+ print ipAddressSearch.group( 1 )
+ intf = host + "-eth" + str( ipAddressSearch.group( 1 ) )
cmd = host + " ip -6 addr add %s dev %s" % ( IP, intf )
print "cmd10 = ", cmd
self.handle.sendline( cmd )
@@ -952,8 +952,8 @@
return main.TRUE
except pexpect.TIMEOUT:
- main.log.error(self.name + ": TIMEOUT exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": TIMEOUT exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
@@ -982,8 +982,8 @@
newIP )
return main.TRUE
except pexpect.TIMEOUT:
- main.log.error(self.name + ": TIMEOUT exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": TIMEOUT exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
@@ -1011,8 +1011,8 @@
newGW )
return main.TRUE
except pexpect.TIMEOUT:
- main.log.error(self.name + ": TIMEOUT exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": TIMEOUT exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
@@ -1040,8 +1040,8 @@
macaddr )
return main.TRUE
except pexpect.TIMEOUT:
- main.log.error(self.name + ": TIMEOUT exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": TIMEOUT exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
@@ -1064,8 +1064,8 @@
main.log.info( host + " arp -an = " + response )
return main.TRUE
except pexpect.TIMEOUT:
- main.log.error(self.name + ": TIMEOUT exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": TIMEOUT exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
@@ -1138,7 +1138,7 @@
else:
main.log.error( "Connection failed to the host" )
- def getIPAddress( self, host , proto='IPV4'):
+ def getIPAddress( self, host , proto='IPV4' ):
"""
Verifies the host's ip configured or not."""
if self.handle:
@@ -1214,8 +1214,8 @@
response = self.handle.before
return response
except pexpect.TIMEOUT:
- main.log.error(self.name + ": TIMEOUT exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": TIMEOUT exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
@@ -1307,7 +1307,7 @@
main.cleanAndExit()
return response
- def iperftcpAll(self, hosts, timeout=6):
+ def iperftcpAll( self, hosts, timeout=6 ):
'''
Runs the iperftcp function with a given set of hosts and specified timeout.
@@ -1319,13 +1319,13 @@
for host1 in hosts:
for host2 in hosts:
if host1 != host2:
- if self.iperftcp(host1, host2, timeout) == main.FALSE:
- main.log.error(self.name + ": iperftcp test failed for " + host1 + " and " + host2)
+ if self.iperftcp( host1, host2, timeout ) == main.FALSE:
+ main.log.error( self.name + ": iperftcp test failed for " + host1 + " and " + host2 )
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- def iperftcp(self, host1="h1", host2="h2", timeout=6):
+ def iperftcp( self, host1="h1", host2="h2", timeout=6 ):
'''
Creates an iperf TCP test between two hosts. Returns main.TRUE if test results
are valid.
@@ -1344,26 +1344,26 @@
# checks if there are results in the mininet response
if "Results:" in response:
- main.log.report(self.name + ": iperf test completed")
+ main.log.report( self.name + ": iperf test completed" )
# parse the mn results
- response = response.split("\r\n")
- response = response[len(response)-2]
- response = response.split(": ")
- response = response[len(response)-1]
- response = response.replace("[", "")
- response = response.replace("]", "")
- response = response.replace("\'", "")
+ response = response.split( "\r\n" )
+ response = response[ len( response )-2 ]
+ response = response.split( ": " )
+ response = response[ len( response )-1 ]
+ response = response.replace( "[", "" )
+ response = response.replace( "]", "" )
+ response = response.replace( "\'", "" )
# this is the bandwith two and from the two hosts
- bandwidth = response.split(", ")
+ bandwidth = response.split( ", " )
# there should be two elements in the bandwidth list
# ['host1 to host2', 'host2 to host1"]
- if len(bandwidth) == 2:
- main.log.report(self.name + ": iperf test successful")
+ if len( bandwidth ) == 2:
+ main.log.report( self.name + ": iperf test successful" )
return main.TRUE
else:
- main.log.error(self.name + ": invalid iperf results")
+ main.log.error( self.name + ": invalid iperf results" )
return main.FALSE
else:
main.log.error( self.name + ": iperf test failed" )
@@ -1385,22 +1385,22 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- def iperftcpipv6(self, host1="h1", host2="h2", timeout=50):
+ def iperftcpipv6( self, host1="h1", host2="h2", timeout=50 ):
main.log.info( self.name + ": Simple iperf TCP test between two hosts" )
try:
IP1 = self.getIPAddress( host1, proto='IPV6' )
- cmd1 = host1 +' iperf -V -sD -B '+ str(IP1)
+ cmd1 = host1 + ' iperf -V -sD -B ' + str( IP1 )
self.handle.sendline( cmd1 )
- outcome1 = self.handle.expect( "mininet>")
- cmd2 = host2 +' iperf -V -c '+ str(IP1) +' -t 5'
+ outcome1 = self.handle.expect( "mininet>" )
+ cmd2 = host2 + ' iperf -V -c ' + str( IP1 ) + ' -t 5'
self.handle.sendline( cmd2 )
- outcome2 = self.handle.expect( "mininet>")
+ outcome2 = self.handle.expect( "mininet>" )
response1 = self.handle.before
response2 = self.handle.after
- print response1,response2
- pattern = "connected with "+ str(IP1)
+ print response1, response2
+ pattern = "connected with " + str( IP1 )
if pattern in response1:
- main.log.report(self.name + ": iperf test completed")
+ main.log.report( self.name + ": iperf test completed" )
return main.TRUE
else:
main.log.error( self.name + ": iperf test failed" )
@@ -1420,7 +1420,7 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- def iperfudpAll(self, hosts, bandwidth="10M"):
+ def iperfudpAll( self, hosts, bandwidth="10M" ):
'''
Runs the iperfudp function with a given set of hosts and specified
bandwidth
@@ -1432,17 +1432,16 @@
for host1 in hosts:
for host2 in hosts:
if host1 != host2:
- if self.iperfudp(host1, host2, bandwidth) == main.FALSE:
- main.log.error(self.name + ": iperfudp test failed for " + host1 + " and " + host2)
+ if self.iperfudp( host1, host2, bandwidth ) == main.FALSE:
+ main.log.error( self.name + ": iperfudp test failed for " + host1 + " and " + host2 )
except TypeError:
- main.log.exception(self.name + ": Object not as expected")
+ main.log.exception( self.name + ": Object not as expected" )
return main.FALSE
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- def iperfudp( self, bandwidth="10M", host1="h1", host2="h2"):
-
+ def iperfudp( self, bandwidth="10M", host1="h1", host2="h2" ):
'''
Creates an iperf UDP test with a specific bandwidth.
Returns true if results are valid.
@@ -1450,47 +1449,47 @@
@param:
bandwidth: the targeted bandwidth, in megabits ('M'), to run the test
'''
- main.log.info(self.name + ": Simple iperf UDP test between two hosts")
+ main.log.info( self.name + ": Simple iperf UDP test between two hosts" )
try:
# setup the mininet command
cmd = 'iperfudp ' + bandwidth + " " + host1 + " " + host2
- self.handle.sendline(cmd)
- self.handle.expect("mininet>")
+ self.handle.sendline( cmd )
+ self.handle.expect( "mininet>" )
response = self.handle.before
# check if there are in results in the mininet response
if "Results:" in response:
- main.log.report(self.name + ": iperfudp test completed")
+ main.log.report( self.name + ": iperfudp test completed" )
# parse the results
- response = response.split("\r\n")
- response = response[len(response)-2]
- response = response.split(": ")
- response = response[len(response)-1]
- response = response.replace("[", "")
- response = response.replace("]", "")
- response = response.replace("\'", "")
+ response = response.split( "\r\n" )
+ response = response[ len( response )-2 ]
+ response = response.split( ": " )
+ response = response[ len( response )-1 ]
+ response = response.replace( "[", "" )
+ response = response.replace( "]", "" )
+ response = response.replace( "\'", "" )
- mnBandwidth = response.split(", ")
+ mnBandwidth = response.split( ", " )
# check to see if there are at least three entries
# ['bandwidth', 'host1 to host2', 'host2 to host1']
- if len(mnBandwidth) == 3:
+ if len( mnBandwidth ) == 3:
# if one entry is blank then something is wrong
for item in mnBandwidth:
if item == "":
- main.log.error(self.name + ": Could not parse iperf output")
- main.log.error(self.name + ": invalid iperfudp results")
+ main.log.error( self.name + ": Could not parse iperf output" )
+ main.log.error( self.name + ": invalid iperfudp results" )
return main.FALSE
# otherwise results are vaild
- main.log.report(self.name + ": iperfudp test successful")
+ main.log.report( self.name + ": iperfudp test successful" )
return main.TRUE
else:
- main.log.error(self.name + ": invalid iperfudp results")
+ main.log.error( self.name + ": invalid iperfudp results" )
return main.FALSE
except pexpect.TIMEOUT:
- main.log.error(self.name + ": TIMEOUT exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": TIMEOUT exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
@@ -1579,8 +1578,8 @@
self.handle.sendline( command )
self.handle.expect( "mininet>" )
except pexpect.TIMEOUT:
- main.log.error(self.name + ": TIMEOUT exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": TIMEOUT exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
@@ -1622,7 +1621,7 @@
args = utilities.parse_args( [ "SW", "INTF" ], **yankargs )
sw = args[ "SW" ] if args[ "SW" ] is not None else ""
intf = args[ "INTF" ] if args[ "INTF" ] is not None else ""
- command = "py " + str( sw ) + '.detach("' + str(intf) + '")'
+ command = "py " + str( sw ) + '.detach("' + str( intf ) + '")'
try:
response = self.execute(
cmd=command,
@@ -1644,7 +1643,7 @@
args = utilities.parse_args( [ "SW", "INTF" ], **plugargs )
sw = args[ "SW" ] if args[ "SW" ] is not None else ""
intf = args[ "INTF" ] if args[ "INTF" ] is not None else ""
- command = "py " + str( sw ) + '.attach("' + str(intf) + '")'
+ command = "py " + str( sw ) + '.attach("' + str( intf ) + '")'
try:
response = self.execute(
cmd=command,
@@ -1966,11 +1965,11 @@
candidateSwitches.append( switchName )
else:
graphDict = self.getGraphDict( timeout=timeout, useId=False )
- if graphDict == None:
+ if graphDict is None:
return None
self.graph.update( graphDict )
candidateSwitches = self.graph.getNonCutVertices()
- if candidateSwitches == None:
+ if candidateSwitches is None:
return None
elif len( candidateSwitches ) == 0:
main.log.info( self.name + ": No candidate switch for deletion" )
@@ -2000,7 +1999,7 @@
"""
try:
switch = self.getSwitchRandom( timeout, nonCut )
- if switch == None:
+ if switch is None:
return None
else:
deletionResult = self.delSwitch( switch )
@@ -2105,11 +2104,11 @@
candidateLinks.append( [ link[ 'node1' ], link[ 'node2' ] ] )
else:
graphDict = self.getGraphDict( timeout=timeout, useId=False )
- if graphDict == None:
+ if graphDict is None:
return None
self.graph.update( graphDict )
candidateLinks = self.graph.getNonCutEdges()
- if candidateLinks == None:
+ if candidateLinks is None:
return None
elif len( candidateLinks ) == 0:
main.log.info( self.name + ": No candidate link for deletion" )
@@ -2139,7 +2138,7 @@
"""
try:
link = self.getLinkRandom( timeout, nonCut )
- if link == None:
+ if link is None:
return None
else:
deletionResult = self.delLink( link[ 0 ], link[ 1 ] )
@@ -2302,8 +2301,8 @@
fileName +
"\" | grep -v grep | awk '{print $2}'\`" )
except pexpect.TIMEOUT:
- main.log.error(self.name + ": TIMEOUT exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": TIMEOUT exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
@@ -2331,7 +2330,7 @@
ethDevice = '-I ' + ethDevice + ' '
cmd = srcHost + " arping -c1 "
if noResult:
- cmd += "-w10 " # If we don't want the actural arping result, set -w10, arping will exit after 10 ms.
+ cmd += "-w10 " # If we don't want the actural arping result, set -w10, arping will exit after 10 ms.
cmd += ethDevice + dstHost
try:
if output:
@@ -2416,11 +2415,11 @@
assert flowTable1, "flowTable1 is empty or None"
assert flowTable2, "flowTable2 is empty or None"
returnValue = main.TRUE
- if len(flowTable1) != len(flowTable2):
+ if len( flowTable1 ) != len( flowTable2 ):
main.log.warn( "Flow table lengths do not match" )
returnValue = main.FALSE
- dFields = ["n_bytes", "cookie", "n_packets", "duration"]
- for flow1, flow2 in zip(flowTable1, flowTable2):
+ dFields = [ "n_bytes", "cookie", "n_packets", "duration" ]
+ for flow1, flow2 in zip( flowTable1, flowTable2 ):
for field in dFields:
try:
flow1.pop( field )
@@ -2430,10 +2429,10 @@
flow2.pop( field )
except KeyError:
pass
- for i in range( len(flowTable1) ):
- if flowTable1[i] not in flowTable2:
+ for i in range( len( flowTable1 ) ):
+ if flowTable1[ i ] not in flowTable2:
main.log.warn( "Flow tables do not match:" )
- main.log.warn( "Old flow:\n{}\n not in new flow table".format( flowTable1[i] ) )
+ main.log.warn( "Old flow:\n{}\n not in new flow table".format( flowTable1[ i ] ) )
returnValue = main.FALSE
break
return returnValue
@@ -2461,63 +2460,64 @@
for flow in flowTable:
jsonFlow = {}
# split up the fields of the flow
- parsedFlow = flow.split(", ")
+ parsedFlow = flow.split( ", " )
# get rid of any spaces in front of the field
- for i in range( len(parsedFlow) ):
- item = parsedFlow[i]
- if item[0] == " ":
- parsedFlow[i] = item[1:]
+ for i in range( len( parsedFlow ) ):
+ item = parsedFlow[ i ]
+ if item[ 0 ] == " ":
+ parsedFlow[ i ] = item[ 1: ]
# grab the selector and treatment from the parsed flow
# the last element is the selector and the treatment
- temp = parsedFlow.pop(-1)
+ temp = parsedFlow.pop( -1 )
# split up the selector and the treatment
- temp = temp.split(" ")
+ temp = temp.split( " " )
index = 0
# parse the flags
# NOTE: This only parses one flag
flag = {}
if version == "1.3":
- flag = {"flag":[temp[index]]}
+ flag = { "flag": [ temp[ index ] ] }
index += 1
# the first element is the selector and split it up
- sel = temp[index]
+ sel = temp[ index ]
index += 1
- sel = sel.split(",")
+ sel = sel.split( "," )
# the priority is stuck in the selecter so put it back
# in the flow
- parsedFlow.append(sel.pop(0))
+ parsedFlow.append( sel.pop( 0 ) )
# parse selector
criteria = []
for item in sel:
# this is the type of the packet e.g. "arp"
if "=" not in item:
- criteria.append( {"type":item} )
+ criteria.append( { "type": item } )
else:
- field = item.split("=")
- criteria.append( {field[0]:field[1]} )
- selector = {"selector": {"criteria":sorted(criteria)} }
- treat = temp[index]
+ field = item.split( "=" )
+ criteria.append( { field[ 0 ]: field[ 1 ] } )
+ selector = { "selector": { "criteria": sorted( criteria ) } }
+ treat = temp[ index ]
# get rid of the action part e.g. "action=output:2"
# we will add it back later
- treat = treat.split("=")
- treat.pop(0)
+ treat = treat.split( "=" )
+ treat.pop( 0 )
# parse treatment
action = []
for item in treat:
- field = item.split(":")
- action.append( {field[0]:field[1]} )
+ field = item.split( ":" )
+ action.append( { field[ 0 ]: field[ 1 ] } )
# create the treatment field and add the actions
- treatment = {"treatment": {"action":sorted(action)} }
+ treatment = { "treatment": { "action": sorted( action ) } }
# parse the rest of the flow
for item in parsedFlow:
- field = item.split("=")
- jsonFlow.update( {field[0]:field[1]} )
+ field = item.split( "=" )
+ jsonFlow.update( { field[ 0 ]: field[ 1 ] } )
# add the treatment and the selector to the json flow
jsonFlow.update( selector )
jsonFlow.update( treatment )
jsonFlow.update( flag )
- if debug: main.log.debug( "\033[94mJson flow:\033[0m\n{}\n".format(jsonFlow) )
+ if debug:
+ main.log.debug( "\033[94mJson flow:\033[0m\n{}\n".format( jsonFlow ) )
# add the json flow to the json flow table
jsonFlowTable.append( jsonFlow )
@@ -2535,7 +2535,7 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- def getFlowTable( self, sw, version="", debug=False):
+ def getFlowTable( self, sw, version="", debug=False ):
'''
Discription: Returns the flow table(s) on a switch or switches in a list.
Each element is a flow.
@@ -2549,9 +2549,10 @@
'''
try:
switches = []
- if type(sw) is list:
- switches.extend(sw)
- else: switches.append(sw)
+ if isinstance( sw, list ):
+ switches.extend( sw )
+ else:
+ switches.append( sw )
flows = []
for s in switches:
@@ -2571,10 +2572,11 @@
# the first element is the command that was sent
# the second is the table header
# the last element is empty
- response = response[2:-1]
+ response = response[ 2:-1 ]
flows.extend( response )
- if debug: print "Flows:\n{}\n\n".format(flows)
+ if debug:
+ print "Flows:\n{}\n\n".format( flows )
return self.parseFlowTable( flows, version, debug )
@@ -2602,13 +2604,14 @@
try:
main.log.info( "Getting flows from Mininet" )
flows = self.getFlowTable( sw, version, debug )
- if flows == None:
+ if flows is None:
return main.ERROR
- if debug: print "flow ids:\n{}\n\n".format(flowId)
+ if debug:
+ print "flow ids:\n{}\n\n".format( flowId )
# Check flowId is a list or a string
- if type( flowId ) is str:
+ if isinstance( flowId, str ):
result = False
for f in flows:
if flowId in f.get( 'cookie' ):
@@ -2622,7 +2625,8 @@
# Save the IDs that are not in Mininet
absentIds = [ x for x in flowId if x not in mnFlowIds ]
- if debug: print "mn flow ids:\n{}\n\n".format(mnFlowIds)
+ if debug:
+ print "mn flow ids:\n{}\n\n".format( mnFlowIds )
# Print out the IDs that are not in Mininet
if absentIds:
@@ -2639,7 +2643,6 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
-
def startTcpdump( self, filename, intf="eth0", port="port 6653" ):
"""
Runs tpdump on an interface and saves the file
@@ -2704,8 +2707,8 @@
self.handle.sendline( "" )
self.handle.expect( "mininet>" )
except pexpect.TIMEOUT:
- main.log.error(self.name + ": TIMEOUT exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": TIMEOUT exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
@@ -2948,7 +2951,7 @@
"""
try:
self.update()
- response = self.links(timeout=timeout).split( '\n' )
+ response = self.links( timeout=timeout ).split( '\n' )
# Examples:
# s1-eth3<->s2-eth1 (OK OK)
@@ -3271,11 +3274,11 @@
main.log.info( output )
return hostResults
except pexpect.EOF:
- main.log.error(self.name + ": EOF exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except Exception:
- main.log.exception(self.name + ": Uncaught exception!")
+ main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
def getHostsOld( self ):
@@ -3304,8 +3307,8 @@
return hostList
except pexpect.TIMEOUT:
- main.log.error(self.name + ": TIMEOUT exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": TIMEOUT exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
@@ -3357,8 +3360,8 @@
return switchList
except pexpect.TIMEOUT:
- main.log.error(self.name + ": TIMEOUT exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": TIMEOUT exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
@@ -3414,7 +3417,7 @@
# Get port index from OVS
# The index extracted from port name may be inconsistent with ONOS
portIndex = -1
- if not nodeName1 in portDict.keys():
+ if nodeName1 not in portDict.keys():
portList = self.getOVSPorts( nodeName1 )
if len( portList ) == 0:
main.log.warn( self.name + ": No port found on switch " + nodeName1 )
@@ -3433,21 +3436,21 @@
else:
node1 = nodeName1
node2 = nodeName2
- if not node1 in graphDict.keys():
+ if node1 not in graphDict.keys():
if useId:
- graphDict[ node1 ] = { 'edges':{},
- 'dpid':switches[ nodeName1 ][ 'dpid' ],
- 'name':nodeName1,
- 'ports':switches[ nodeName1 ][ 'ports' ],
- 'swClass':switches[ nodeName1 ][ 'swClass' ],
- 'pid':switches[ nodeName1 ][ 'pid' ],
- 'options':switches[ nodeName1 ][ 'options' ] }
+ graphDict[ node1 ] = { 'edges': {},
+ 'dpid': switches[ nodeName1 ][ 'dpid' ],
+ 'name': nodeName1,
+ 'ports': switches[ nodeName1 ][ 'ports' ],
+ 'swClass': switches[ nodeName1 ][ 'swClass' ],
+ 'pid': switches[ nodeName1 ][ 'pid' ],
+ 'options': switches[ nodeName1 ][ 'options' ] }
else:
- graphDict[ node1 ] = { 'edges':{} }
+ graphDict[ node1 ] = { 'edges': {} }
else:
# Assert node2 is not connected to any current links of node1
assert node2 not in graphDict[ node1 ][ 'edges' ].keys()
- graphDict[ node1 ][ 'edges' ][ node2 ] = { 'port':portIndex }
+ graphDict[ node1 ][ 'edges' ][ node2 ] = { 'port': portIndex }
# Swap two nodes/ports
nodeName1, nodeName2 = nodeName2, nodeName1
port1, port2 = port2, port1
@@ -3484,8 +3487,8 @@
return main.TRUE
except pexpect.TIMEOUT:
- main.log.error(self.name + ": TIMEOUT exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": TIMEOUT exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
@@ -3545,8 +3548,8 @@
return main.TRUE
except pexpect.TIMEOUT:
- main.log.error(self.name + ": TIMEOUT exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": TIMEOUT exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
@@ -3572,8 +3575,8 @@
getattr( main, name )
except AttributeError:
# namespace is clear, creating component
- main.componentDictionary[name] = main.componentDictionary[self.name].copy()
- main.componentDictionary[name]['connect_order'] = str( int( main.componentDictionary[name]['connect_order'] ) + 1 )
+ main.componentDictionary[ name ] = main.componentDictionary[ self.name ].copy()
+ main.componentDictionary[ name ][ 'connect_order' ] = str( int( main.componentDictionary[ name ][ 'connect_order' ] ) + 1 )
main.componentInit( name )
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
@@ -3606,7 +3609,7 @@
# Delete component
delattr( main, name )
# Delete component from ComponentDictionary
- del( main.componentDictionary[name] )
+ del( main.componentDictionary[ name ] )
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
main.log.error( self.name + ": " + self.handle.before )
@@ -3656,25 +3659,24 @@
try:
cmd = devicename + " ifconfig " + intf + " " + status
self.handle.sendline( cmd )
- self.handle.expect("mininet>")
+ self.handle.expect( "mininet>" )
return main.TRUE
except pexpect.TIMEOUT:
- main.log.exception(self.name + ": Command timed out")
+ main.log.exception( self.name + ": Command timed out" )
return main.FALSE
except pexpect.EOF:
- main.log.exception(self.name + ": connection closed.")
+ main.log.exception( self.name + ": connection closed." )
main.cleanAndExit()
except TypeError:
- main.log.exception(self.name + ": TypeError")
+ main.log.exception( self.name + ": TypeError" )
main.cleanAndExit()
except Exception:
- main.log.exception(self.name + ": Uncaught exception!")
+ main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
else:
- main.log.warn("Interface status should be up or down!")
+ main.log.warn( "Interface status should be up or down!" )
return main.FALSE
-
if __name__ != "__main__":
sys.modules[ __name__ ] = MininetCliDriver()
diff --git a/TestON/drivers/common/cli/emulator/remotemininetdriver.py b/TestON/drivers/common/cli/emulator/remotemininetdriver.py
index 71de94c..41284cb 100644
--- a/TestON/drivers/common/cli/emulator/remotemininetdriver.py
+++ b/TestON/drivers/common/cli/emulator/remotemininetdriver.py
@@ -56,7 +56,7 @@
self.name = self.options[ 'name' ]
try:
- if os.getenv( str( self.ip_address ) ) != None:
+ if os.getenv( str( self.ip_address ) ) is not None:
self.ip_address = os.getenv( str( self.ip_address ) )
else:
main.log.info( self.name +
@@ -152,7 +152,7 @@
self.handle.expect( self.prompt )
return main.TRUE
except TypeError:
- main.log.exception(self.name + ": Object not as expected")
+ main.log.exception( self.name + ": Object not as expected" )
return main.FALSE
except pexpect.TIMEOUT:
main.log.exception( self.name + ": TIMEOUT exception found in pingLong" )
@@ -166,7 +166,6 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
-
def pingstatus( self, **pingParams ):
"""
Tails the respective ping output file and check that
@@ -203,7 +202,7 @@
main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except Exception:
- main.log.exception(self.name + ": Uncaught exception!")
+ main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
def pingKill( self, testONUser, testONIP ):
@@ -262,11 +261,11 @@
main.log.error( self.name + ": " + self.handle.before )
return main.FALSE
except pexpect.EOF:
- main.log.error(self.name + ": EOF exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except Exception:
- main.log.exception(self.name + ": Uncaught exception!")
+ main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
def pingHostOptical( self, **pingParams ):
@@ -336,11 +335,11 @@
main.lastResult = main.FALSE
return main.FALSE
except pexpect.EOF:
- main.log.error(self.name + ": EOF exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except Exception:
- main.log.exception(self.name + ": Uncaught exception!")
+ main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
def checknum( self, num ):
@@ -407,7 +406,7 @@
self.handle.sendline( "" )
self.handle.sendline( "" )
i = self.handle.expect( [ 'No\ssuch\device', 'listening\son',
- pexpect.TIMEOUT, self.prompt ], timeout=10 )
+ pexpect.TIMEOUT, self.prompt ], timeout=10 )
main.log.info( self.handle.before + self.handle.after )
if i == 0:
main.log.error( self.name + ": tcpdump - No such device exists.\
@@ -468,15 +467,15 @@
self.handle.sendline( "" )
self.handle.expect( self.prompt )
self.handle.sendline( "cd ~/" + name + "/tools/test/topos" )
- self.handle.expect( "topos"+ self.prompt )
- if ctrllerIP == None:
+ self.handle.expect( "topos" + self.prompt )
+ if ctrllerIP is None:
main.log.info( "You need to specify the IP" )
return main.FALSE
else:
controller = ''
if isinstance( ctrllerIP, types.ListType ):
for i in xrange( len( ctrllerIP ) ):
- controller += ctrllerIP[i] + ' '
+ controller += ctrllerIP[ i ] + ' '
main.log.info( "Mininet topology is being loaded with " +
"controllers: " + controller )
elif isinstance( ctrllerIP, types.StringType ):
@@ -489,10 +488,10 @@
cmd = "sudo -E python opticalTest.py " + controller
main.log.info( self.name + ": cmd = " + cmd )
self.handle.sendline( cmd )
- time.sleep(30)
+ time.sleep( 30 )
self.handle.sendline( "" )
self.handle.sendline( "" )
- self.handle.expect("mininet>")
+ self.handle.expect( "mininet>" )
return main.TRUE
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
@@ -536,7 +535,7 @@
elif i == 1:
self.handle.sendline( "exit" )
self.handle.expect( "exit" )
- self.handle.expect(self.prompt)
+ self.handle.expect( self.prompt )
self.handle.sendline( "exit" )
self.handle.expect( "exit" )
self.handle.expect( "closed" )
@@ -548,11 +547,11 @@
main.log.error( self.name + ": " + self.handle.before )
return main.FALSE
except pexpect.EOF:
- main.log.error(self.name + ": EOF exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except Exception:
- main.log.exception(self.name + ": Uncaught exception!")
+ main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
def setIpTablesOUTPUT( self, dstIp, dstPort, action='add',
diff --git a/TestON/drivers/common/cli/emulator/scapyclidriver.py b/TestON/drivers/common/cli/emulator/scapyclidriver.py
index 74905f8..6ef63c4 100644
--- a/TestON/drivers/common/cli/emulator/scapyclidriver.py
+++ b/TestON/drivers/common/cli/emulator/scapyclidriver.py
@@ -199,8 +199,8 @@
getattr( main, name )
except AttributeError:
# namespace is clear, creating component
- main.componentDictionary[name] = main.componentDictionary[self.name].copy()
- main.componentDictionary[name]['connect_order'] = str( int( main.componentDictionary[name]['connect_order'] ) + 1 )
+ main.componentDictionary[ name ] = main.componentDictionary[ self.name ].copy()
+ main.componentDictionary[ name ][ 'connect_order' ] = str( int( main.componentDictionary[ name ][ 'connect_order' ] ) + 1 )
main.componentInit( name )
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
@@ -228,7 +228,7 @@
# Delete component
delattr( main, name )
# Delete component from ComponentDictionary
- del( main.componentDictionary[name] )
+ del( main.componentDictionary[ name ] )
return main.TRUE
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
@@ -267,12 +267,12 @@
mplsPath - The path where the MPLS class is located
NOTE: This can be a relative path from the user's home dir
"""
- mplsLines = ['import imp',
- 'imp.load_source( "mplsClass", "{}mplsClass.py" )'.format(mplsPath),
- 'from mplsClass import MPLS',
- 'bind_layers(Ether, MPLS, type = 0x8847)',
- 'bind_layers(MPLS, MPLS, bottom_of_label_stack = 0)',
- 'bind_layers(MPLS, IP)']
+ mplsLines = [ 'import imp',
+ 'imp.load_source( "mplsClass", "{}mplsClass.py" )'.format( mplsPath ),
+ 'from mplsClass import MPLS',
+ 'bind_layers(Ether, MPLS, type = 0x8847)',
+ 'bind_layers(MPLS, MPLS, bottom_of_label_stack = 0)',
+ 'bind_layers(MPLS, IP)' ]
try:
self.handle.sendline( "scapy" )
@@ -649,7 +649,7 @@
self.handle.sendline( "packet = ether/ipv6/sctp" )
else:
main.log.error( "Unrecognized option for ipVersion, given " +
- repr( ipVersion ) )
+ repr( ipVersion ) )
return main.FALSE
self.handle.expect( self.scapyPrompt )
if "Traceback" in self.handle.before:
@@ -777,7 +777,7 @@
self.handle.sendline( "packet = ether/ipv6/icmp6" )
else:
main.log.error( "Unrecognized option for ipVersion, given " +
- repr( ipVersion ) )
+ repr( ipVersion ) )
return main.FALSE
self.handle.expect( self.scapyPrompt )
if "Traceback" in self.handle.before:
@@ -991,7 +991,7 @@
match = re.search( pattern, self.handle.before )
if match:
# NOTE: The command will return 0.0.0.0 if the iface doesn't exist
- if IPv6 != True:
+ if IPv6 is not True:
if match.group() == '0.0.0.0':
main.log.warn( 'iface {0} has no IPv4 address'.format( ifaceName ) )
return match.group()
@@ -1006,14 +1006,14 @@
if ifaceName == "lo":
continue
ip = getIPofInterface( ifaceName )
- if ip != None:
- newip =ip
+ if ip is not None:
+ newip = ip
tmp = newip.split( "\\x" )
ip = ""
counter = 0
for i in tmp:
if i != "":
- counter = counter + 1;
+ counter = counter + 1
if counter % 2 == 0 and counter < 16:
ip = ip + i + ":"
else:
@@ -1049,7 +1049,7 @@
self.handle.sendline( 'get_if_list()' )
self.handle.expect( self.scapyPrompt )
ifList = self.handle.before.split( '\r\n' )
- ifList = ifList[ 1 ].replace( "'","" )[ 1:-1 ].split( ', ' )
+ ifList = ifList[ 1 ].replace( "'", "" )[ 1:-1 ].split( ', ' )
return ifList
except pexpect.TIMEOUT:
diff --git a/TestON/drivers/common/cli/onosclidriver.py b/TestON/drivers/common/cli/onosclidriver.py
index f7f488d..0e5e376 100755
--- a/TestON/drivers/common/cli/onosclidriver.py
+++ b/TestON/drivers/common/cli/onosclidriver.py
@@ -61,10 +61,11 @@
self.graph = Graph()
super( OnosCliDriver, self ).__init__()
- def checkOptions(self, var, defaultVar):
+ def checkOptions( self, var, defaultVar ):
if var is None or var == "":
return defaultVar
return var
+
def connect( self, **connectargs ):
"""
Creates ssh handle for ONOS cli.
@@ -81,9 +82,9 @@
elif key == "karaf_password":
self.karafPass = self.options[ key ]
- self.home = self.checkOptions(self.home, "~/onos")
- self.karafUser = self.checkOptions(self.karafUser, self.user_name)
- self.karafPass = self.checkOptions(self.karafPass, self.pwd )
+ self.home = self.checkOptions( self.home, "~/onos" )
+ self.karafUser = self.checkOptions( self.karafUser, self.user_name )
+ self.karafPass = self.checkOptions( self.karafPass, self.pwd )
for key in self.options:
if key == 'onosIp':
@@ -227,12 +228,12 @@
# Expect the cellname in the ONOSCELL variable.
# Note that this variable name is subject to change
# and that this driver will have to change accordingly
- self.handle.expect(str(cellname))
+ self.handle.expect( str( cellname ) )
handleBefore = self.handle.before
handleAfter = self.handle.after
# Get the rest of the handle
- self.handle.sendline("")
- self.handle.expect(self.prompt)
+ self.handle.sendline( "" )
+ self.handle.expect( self.prompt )
handleMore = self.handle.before
main.log.info( "Cell call returned: " + handleBefore +
@@ -272,7 +273,7 @@
# Check if we are already in the cli
self.handle.sendline( "" )
x = self.handle.expect( [
- self.prompt, "onos>" ], commandlineTimeout)
+ self.prompt, "onos>" ], commandlineTimeout )
if x == 1:
main.log.info( "ONOS cli is already running" )
return main.TRUE
@@ -355,7 +356,7 @@
try:
self.handle.sendline( "" )
x = self.handle.expect( [
- self.prompt, "onos>" ], commandlineTimeout)
+ self.prompt, "onos>" ], commandlineTimeout )
if x == 1:
main.log.info( "ONOS cli is already running" )
@@ -478,7 +479,7 @@
self.handle.sendline( "" )
i = self.handle.expect( [ "onos>", self.prompt, pexpect.TIMEOUT ] )
if i == 1:
- main.log.error( self.name + ": onos cli session closed. ")
+ main.log.error( self.name + ": onos cli session closed. " )
if self.onosIp:
main.log.warn( "Trying to reconnect " + self.onosIp )
reconnectResult = self.startOnosCli( self.onosIp )
@@ -504,9 +505,9 @@
self.log( logStr, noExit=noExit )
self.handle.sendline( cmdStr )
if dollarSign:
- i = self.handle.expect( ["onos>"], timeout )
+ i = self.handle.expect( [ "onos>" ], timeout )
else:
- i = self.handle.expect( ["onos>", self.prompt], timeout )
+ i = self.handle.expect( [ "onos>", self.prompt ], timeout )
response = self.handle.before
# TODO: do something with i
main.log.info( "Command '" + str( cmdStr ) + "' sent to "
@@ -541,7 +542,7 @@
main.log.debug( self.name + ": split output" )
for r in output:
main.log.debug( self.name + ": " + repr( r ) )
- output = output[1].strip()
+ output = output[ 1 ].strip()
if showResponse:
main.log.info( "Response from ONOS: {}".format( output ) )
return output
@@ -647,7 +648,7 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- def nodes( self, jsonFormat=True):
+ def nodes( self, jsonFormat=True ):
"""
List the nodes currently visible
Issues command: 'nodes'
@@ -852,7 +853,7 @@
main.log.info( "\n" + self.checkMasters( False ) )
return main.FALSE
main.log.info( "Mastership balanced between " +
- str( len(masters) ) + " masters" )
+ str( len( masters ) ) + " masters" )
return main.TRUE
except ( TypeError, ValueError ):
main.log.exception( "{}: Object not as expected: {!r}".format( self.name, mastersOutput ) )
@@ -1186,9 +1187,9 @@
else:
main.log.info( "Host intent installed between " +
str( hostIdOne ) + " and " + str( hostIdTwo ) )
- match = re.search('id=0x([\da-f]+),', handle)
+ match = re.search( 'id=0x([\da-f]+),', handle )
if match:
- return match.group()[3:-1]
+ return match.group()[ 3:-1 ]
else:
main.log.error( "Error, intent ID not found" )
main.log.debug( "Response from ONOS was: " +
@@ -1234,9 +1235,9 @@
main.log.info( "Optical intent installed between " +
str( ingressDevice ) + " and " +
str( egressDevice ) )
- match = re.search('id=0x([\da-f]+),', handle)
+ match = re.search( 'id=0x([\da-f]+),', handle )
if match:
- return match.group()[3:-1]
+ return match.group()[ 3:-1 ]
else:
main.log.error( "Error, intent ID not found" )
return None
@@ -1374,9 +1375,9 @@
main.log.info( "Point-to-point intent installed between " +
str( ingressDevice ) + " and " +
str( egressDevice ) )
- match = re.search('id=0x([\da-f]+),', handle)
+ match = re.search( 'id=0x([\da-f]+),', handle )
if match:
- return match.group()[3:-1]
+ return match.group()[ 3:-1 ]
else:
main.log.error( "Error, intent ID not found" )
return None
@@ -1532,9 +1533,9 @@
"intent" )
return None
else:
- match = re.search('id=0x([\da-f]+),', handle)
+ match = re.search( 'id=0x([\da-f]+),', handle )
if match:
- return match.group()[3:-1]
+ return match.group()[ 3:-1 ]
else:
main.log.error( "Error, intent ID not found" )
return None
@@ -1691,9 +1692,9 @@
"intent" )
return None
else:
- match = re.search('id=0x([\da-f]+),', handle)
+ match = re.search( 'id=0x([\da-f]+),', handle )
if match:
- return match.group()[3:-1]
+ return match.group()[ 3:-1 ]
else:
main.log.error( "Error, intent ID not found" )
return None
@@ -1729,7 +1730,7 @@
tcpDst="",
ingressLabel="",
egressLabel="",
- priority=""):
+ priority="" ):
"""
Required:
* ingressDevice: device id of ingress device
@@ -1824,9 +1825,9 @@
main.log.info( "MPLS intent installed between " +
str( ingressDevice ) + " and " +
str( egressDevice ) )
- match = re.search('id=0x([\da-f]+),', handle)
+ match = re.search( 'id=0x([\da-f]+),', handle )
if match:
- return match.group()[3:-1]
+ return match.group()[ 3:-1 ]
else:
main.log.error( "Error, intent ID not found" )
return None
@@ -1956,6 +1957,30 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
+ def wipeout( self ):
+ """
+ Wipe out the flows,intents,links,devices,hosts, and groups from the ONOS.
+ """
+ try:
+ cmdStr = "wipe-out please"
+ handle = self.sendline( cmdStr, timeout=60 )
+ assert handle is not None, "Error in sendline"
+ assert "Command not found:" not in handle, handle
+ return main.TRUE
+ except AssertionError:
+ main.log.exception( "" )
+ return None
+ except TypeError:
+ main.log.exception( self.name + ": Object not as expected" )
+ return None
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanAndExit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanAndExit()
+
def routes( self, jsonFormat=False ):
"""
NOTE: This method should be used after installing application:
@@ -2000,7 +2025,7 @@
assert handle is not None, "Error in sendline"
assert "Command not found:" not in handle, handle
jsonResult = json.loads( handle )
- return len(jsonResult['routes4'])
+ return len( jsonResult[ 'routes4' ] )
except AssertionError:
main.log.exception( "" )
return None
@@ -2015,7 +2040,7 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- #=============Function to check Bandwidth allocation========
+ # =============Function to check Bandwidth allocation========
def allocations( self, jsonFormat = True, dollarSign = True ):
"""
Description:
@@ -2043,7 +2068,7 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- def intents( self, jsonFormat = True, summary = False, **intentargs):
+ def intents( self, jsonFormat = True, summary = False, **intentargs ):
"""
Description:
Obtain intents from the ONOS cli.
@@ -2091,7 +2116,7 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- def getIntentState(self, intentsId, intentsJson=None):
+ def getIntentState( self, intentsId, intentsJson=None ):
"""
Description:
Gets intent state. Accepts a single intent ID (string type) or a
@@ -2169,15 +2194,15 @@
returnValue = main.TRUE
# Generating a dictionary: intent id as a key and state as value
- #intentsDict = self.getIntentState( intentsId )
+ # intentsDict = self.getIntentState( intentsId )
intentsDict = []
for intent in json.loads( self.intents() ):
- if isinstance ( intentsId, types.StringType) \
- and intent.get('id') == intentsId:
- intentsDict.append(intent)
- elif isinstance ( intentsId, types.ListType ) \
+ if isinstance( intentsId, types.StringType ) \
+ and intent.get( 'id' ) == intentsId:
+ intentsDict.append( intent )
+ elif isinstance( intentsId, types.ListType ) \
and any( intent.get( 'id' ) == ids for ids in intentsId ):
- intentsDict.append(intent)
+ intentsDict.append( intent )
if not intentsDict:
main.log.info( self.name + ": There is something wrong " +
@@ -2248,10 +2273,10 @@
expected = expected.rstrip()
main.log.debug( "Expect: {}\nactual: {}".format( expected, actual ) )
if actual != expected and 'allocated' in actual and 'allocated' in expected:
- marker1 = actual.find('allocated')
- m1 = actual[:marker1]
- marker2 = expected.find('allocated')
- m2 = expected[:marker2]
+ marker1 = actual.find( 'allocated' )
+ m1 = actual[ :marker1 ]
+ marker2 = expected.find( 'allocated' )
+ m2 = expected[ :marker2 ]
if m1 != m2:
bandwidthFailed = True
elif actual != expected and 'allocated' not in actual and 'allocated' not in expected:
@@ -2260,7 +2285,7 @@
ONOSOutput.close()
if bandwidthFailed:
- main.log.error("Bandwidth not allocated correctly using Intents!!")
+ main.log.error( "Bandwidth not allocated correctly using Intents!!" )
returnValue = main.FALSE
return returnValue
except TypeError:
@@ -2350,10 +2375,12 @@
# get total and installed number, see if they are match
allState = response.get( 'all' )
- if allState.get('total') == allState.get('installed'):
- main.log.info( 'Total Intents: {} Installed Intents: {}'.format( allState.get('total'), allState.get('installed') ) )
+ if allState.get( 'total' ) == allState.get( 'installed' ):
+ main.log.info( 'Total Intents: {} Installed Intents: {}'.format(
+ allState.get( 'total' ), allState.get( 'installed' ) ) )
return main.TRUE
- main.log.info( 'Verified Intents failed Excepte intetnes: {} installed intents: {}'.format( allState.get('total'), allState.get('installed') ) )
+ main.log.info( 'Verified Intents failed Expected intents: {} installed intents: {}'.format(
+ allState.get( 'total' ), allState.get( 'installed' ) ) )
return main.FALSE
except ( TypeError, ValueError ):
@@ -2415,7 +2442,7 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- def checkFlowCount(self, min=0, timeout=60 ):
+ def checkFlowCount( self, min=0, timeout=60 ):
count = self.getTotalFlowsNum( timeout=timeout )
count = int( count ) if count else 0
return count if ( count > min ) else False
@@ -2435,9 +2462,9 @@
parameter is set true, return main.FALSE otherwise.
"""
try:
- states = ["PENDING_ADD", "PENDING_REMOVE", "REMOVED", "FAILED"]
+ states = [ "PENDING_ADD", "PENDING_REMOVE", "REMOVED", "FAILED" ]
checkedStates = []
- statesCount = [0, 0, 0, 0]
+ statesCount = [ 0, 0, 0, 0 ]
for s in states:
rawFlows = self.flows( state=s, timeout = timeout )
if rawFlows:
@@ -2447,19 +2474,19 @@
else:
return main.FALSE
for i in range( len( states ) ):
- for c in checkedStates[i]:
+ for c in checkedStates[ i ]:
try:
- statesCount[i] += int( c.get( "flowCount" ) )
+ statesCount[ i ] += int( c.get( "flowCount" ) )
except TypeError:
main.log.exception( "Json object not as expected" )
- main.log.info( states[i] + " flows: " + str( statesCount[i] ) )
+ main.log.info( states[ i ] + " flows: " + str( statesCount[ i ] ) )
# We want to count PENDING_ADD if isPENDING is true
if isPENDING:
- if statesCount[1] + statesCount[2] + statesCount[3] > 0:
+ if statesCount[ 1 ] + statesCount[ 2 ] + statesCount[ 3 ] > 0:
return main.FALSE
else:
- if statesCount[0] + statesCount[1] + statesCount[2] + statesCount[3] > 0:
+ if statesCount[ 0 ] + statesCount[ 1 ] + statesCount[ 2 ] + statesCount[ 3 ] > 0:
return main.FALSE
return main.TRUE
except ( TypeError, ValueError ):
@@ -2554,12 +2581,12 @@
cmd = "flows -c added"
rawFlows = self.sendline( cmd, timeout=timeout, noExit=noExit )
if rawFlows:
- rawFlows = rawFlows.split("\n")
+ rawFlows = rawFlows.split( "\n" )
totalFlows = 0
for l in rawFlows:
- totalFlows += int(l.split("Count=")[1])
+ totalFlows += int( l.split( "Count=" )[ 1 ] )
else:
- main.log.error("Response not as expected!")
+ main.log.error( "Response not as expected!" )
return None
return totalFlows
@@ -2596,7 +2623,7 @@
if response is None:
return -1
response = json.loads( response )
- return int( response.get("intents") )
+ return int( response.get( "intents" ) )
except ( TypeError, ValueError ):
main.log.exception( "{}: Object not as expected: {!r}".format( self.name, response ) )
return None
@@ -2690,7 +2717,7 @@
"""
try:
# Obtain output of intents function
- intentsStr = self.intents(jsonFormat=True)
+ intentsStr = self.intents( jsonFormat=True )
if intentsStr is None:
raise TypeError
# Convert to a dictionary
@@ -2800,7 +2827,7 @@
main.log.info( "There are no nodes to get id from" )
return idList
nodesJson = json.loads( nodesStr )
- idList = [ node.get('id') for node in nodesJson ]
+ idList = [ node.get( 'id' ) for node in nodesJson ]
return idList
except ( TypeError, ValueError ):
main.log.exception( "{}: Object not as expected: {!r}".format( self.name, nodesStr ) )
@@ -2852,7 +2879,7 @@
import json
try:
# either onos:topology or 'topology' will work in CLI
- topology = json.loads(topologyOutput)
+ topology = json.loads( topologyOutput )
main.log.debug( topology )
return topology
except ( TypeError, ValueError ):
@@ -2866,7 +2893,7 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- def checkStatus(self, numoswitch, numolink, numoctrl = -1, logLevel="info"):
+ def checkStatus( self, numoswitch, numolink, numoctrl = -1, logLevel="info" ):
"""
Checks the number of switches & links that ONOS sees against the
supplied values. By default this will report to main.log, but the
@@ -2966,7 +2993,7 @@
return main.TRUE
else:
main.log.error( "Invalid 'role' given to device_role(). " +
- "Value was '" + str(role) + "'." )
+ "Value was '" + str( role ) + "'." )
return main.FALSE
except AssertionError:
main.log.exception( "" )
@@ -3314,9 +3341,9 @@
output = json.loads( rawOutput )
results = []
for dict in output:
- if dict["topic"] == topic:
- leader = dict["leader"]
- candidates = re.split( ", ", dict["candidates"][1:-1] )
+ if dict[ "topic" ] == topic:
+ leader = dict[ "leader" ]
+ candidates = re.split( ", ", dict[ "candidates" ][ 1:-1 ] )
results.append( leader )
results.extend( candidates )
return results
@@ -3455,8 +3482,8 @@
appsJson = json.loads( output )
state = None
for app in appsJson:
- if appName == app.get('name'):
- state = app.get('state')
+ if appName == app.get( 'name' ):
+ state = app.get( 'state' )
break
if state == "ACTIVE" or state == "INSTALLED":
return state
@@ -3508,7 +3535,7 @@
# Invalid option
main.log.error( "The ONOS app command argument only takes " +
"the values: (activate|deactivate|uninstall)" +
- "; was given '" + option + "'")
+ "; was given '" + option + "'" )
return main.FALSE
cmdStr = "onos:app " + option + " " + appName
output = self.sendline( cmdStr )
@@ -3566,7 +3593,7 @@
if status == "INSTALLED":
response = self.app( appName, "activate" )
if check and response == main.TRUE:
- for i in range(10): # try 10 times then give up
+ for i in range( 10 ): # try 10 times then give up
status = self.appStatus( appName )
if status == "ACTIVE":
return main.TRUE
@@ -3619,7 +3646,7 @@
elif status == "ACTIVE":
response = self.app( appName, "deactivate" )
if check and response == main.TRUE:
- for i in range(10): # try 10 times then give up
+ for i in range( 10 ): # try 10 times then give up
status = self.appStatus( appName )
if status == "INSTALLED":
return main.TRUE
@@ -3668,7 +3695,7 @@
if status == "INSTALLED":
response = self.app( appName, "uninstall" )
if check and response == main.TRUE:
- for i in range(10): # try 10 times then give up
+ for i in range( 10 ): # try 10 times then give up
status = self.appStatus( appName )
if status == "UNINSTALLED":
return main.TRUE
@@ -3683,7 +3710,7 @@
"currently active." )
response = self.app( appName, "uninstall" )
if check and response == main.TRUE:
- for i in range(10): # try 10 times then give up
+ for i in range( 10 ): # try 10 times then give up
status = self.appStatus( appName )
if status == "UNINSTALLED":
return main.TRUE
@@ -3788,8 +3815,8 @@
# there is more than one app with this ID
result = main.FALSE
# We will log this later in the method
- elif not current[0][ 'name' ] == appName:
- currentName = current[0][ 'name' ]
+ elif not current[ 0 ][ 'name' ] == appName:
+ currentName = current[ 0 ][ 'name' ]
result = main.FALSE
main.log.error( "'app-ids' has " + str( currentName ) +
" registered under id:" + str( appID ) +
@@ -3804,12 +3831,12 @@
namesList.append( item[ 'name' ] )
if len( idsList ) != len( set( idsList ) ) or\
len( namesList ) != len( set( namesList ) ):
- main.log.error( "'app-ids' has some duplicate entries: \n"
- + json.dumps( ids,
- sort_keys=True,
- indent=4,
- separators=( ',', ': ' ) ) )
- result = main.FALSE
+ main.log.error( "'app-ids' has some duplicate entries: \n"
+ + json.dumps( ids,
+ sort_keys=True,
+ indent=4,
+ separators=( ',', ': ' ) ) )
+ result = main.FALSE
return result
except ( TypeError, ValueError ):
main.log.exception( "{}: Object not as expected: {!r}".format( self.name, rawJson ) )
@@ -3994,9 +4021,9 @@
output = self.distPrimitivesSend( cmdStr )
positiveMatch = "\[(.*)\] was added to the set " + str( setName )
negativeMatch = "\[(.*)\] was already in set " + str( setName )
- if re.search( positiveMatch, output):
+ if re.search( positiveMatch, output ):
return main.TRUE
- elif re.search( negativeMatch, output):
+ elif re.search( negativeMatch, output ):
return main.FALSE
else:
main.log.error( self.name + ": setTestAdd did not" +
@@ -4678,14 +4705,14 @@
cmd += " -j"
cmd += " " + uri
if isinstance( ip, str ):
- ip = [ip]
+ ip = [ ip ]
for item in ip:
if ":" in item:
sitem = item.split( ":" )
- if len(sitem) == 3:
+ if len( sitem ) == 3:
cmd += " " + item
- elif "." in sitem[1]:
- cmd += " {}:{}".format(item, port)
+ elif "." in sitem[ 1 ]:
+ cmd += " {}:{}".format( item, port )
else:
main.log.error( "Malformed entry: " + item )
raise TypeError
@@ -4725,7 +4752,7 @@
If a host cannot be removed, then this function will return main.FALSE
'''
try:
- if type( device ) is str:
+ if isinstance( device, str ):
deviceStr = device
device = []
device.append( deviceStr )
@@ -4766,7 +4793,7 @@
If a host cannot be removed, then this function will return main.FALSE
'''
try:
- if type( host ) is str:
+ if isinstance( host, str ):
host = list( host )
for h in host:
@@ -4927,7 +4954,7 @@
assert idToDevice[ nodeA ][ 'available' ] and idToDevice[ nodeB ][ 'available' ]
if nodeA not in graphDict.keys():
graphDict[ nodeA ] = { 'edges': {},
- 'dpid': idToDevice[ nodeA ][ 'id' ][3:],
+ 'dpid': idToDevice[ nodeA ][ 'id' ][ 3: ],
'type': idToDevice[ nodeA ][ 'type' ],
'available': idToDevice[ nodeA ][ 'available' ],
'role': idToDevice[ nodeA ][ 'role' ],
@@ -4978,9 +5005,9 @@
# Delete any white space in line
temp = re.sub( r'\s+', '', l )
temp = temp.split( ":" )
- respDic[ temp[0] ] = temp[ 1 ]
+ respDic[ temp[ 0 ] ] = temp[ 1 ]
- except (TypeError, ValueError):
+ except ( TypeError, ValueError ):
main.log.exception( self.name + ": Object not as expected" )
return None
except KeyError:
@@ -5022,7 +5049,7 @@
total: return how many lines in karaf log
"""
try:
- assert type( searchTerm ) is str
+ assert isinstance( searchTerm, str )
# Build the log paths string
logPath = '/opt/onos/log/karaf.log.'
logPaths = '/opt/onos/log/karaf.log'
@@ -5030,7 +5057,8 @@
logPaths = logPath + str( i ) + " " + logPaths
cmd = "cat " + logPaths
if startLine:
- # 100000000 is just a extreme large number to make sure this function can grep all the lines after startLine
+ # 100000000 is just a extreme large number to make sure this function can
+ # grep all the lines after startLine
cmd = cmd + " | grep -A 100000000 \'" + startLine + "\'"
if mode == 'all':
cmd = cmd + " | grep \'" + searchTerm + "\'"
@@ -5044,14 +5072,14 @@
return num
elif mode == 'total':
totalLines = self.sendline( "cat /opt/onos/log/karaf.log | wc -l" )
- return int(totalLines)
+ return int( totalLines )
else:
main.log.error( self.name + " unsupported mode" )
return main.ERROR
before = self.sendline( cmd )
before = before.splitlines()
# make sure the returned list only contains the search term
- returnLines = [line for line in before if searchTerm in line]
+ returnLines = [ line for line in before if searchTerm in line ]
return returnLines
except AssertionError:
main.log.error( self.name + " searchTerm is not string type" )
@@ -5097,7 +5125,7 @@
main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except NotImplementedError:
- main.log.exception( self.name + ": Json output not supported")
+ main.log.exception( self.name + ": Json output not supported" )
return None
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
@@ -5119,7 +5147,7 @@
for match in mIter:
item = {}
item[ 'name' ] = match.group( 'name' )
- ifaces = match.group( 'interfaces' ).split( ', ')
+ ifaces = match.group( 'interfaces' ).split( ', ' )
if ifaces == [ "" ]:
ifaces = []
item[ 'interfaces' ] = ifaces
@@ -5159,7 +5187,7 @@
main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except NotImplementedError:
- main.log.exception( self.name + ": Json output not supported")
+ main.log.exception( self.name + ": Json output not supported" )
return None
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
@@ -5391,7 +5419,7 @@
main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except NotImplementedError:
- main.log.exception( self.name + ": Json output not supported")
+ main.log.exception( self.name + ": Json output not supported" )
return None
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
@@ -5413,15 +5441,15 @@
Please look at the "logsearch" Function in onosclidriver.py
'''
if logNum < 0:
- main.log.error("Get wrong log number ")
+ main.log.error( "Get wrong log number ")
return main.ERROR
lines = self.logSearch( mode=mode, searchTerm=searchTerm, startLine=startLine, logNum=logNum )
- if len(lines) == 0:
+ if len( lines ) == 0:
main.log.warn( "Captured timestamp string is empty" )
return main.ERROR
lines = lines[ 0 ]
try:
- assert type(lines) is str
+ assert isinstance( lines, str )
# get the target value
line = lines.split( splitTerm_before )
key = line[ 1 ].split( splitTerm_after )
@@ -5589,7 +5617,7 @@
return None
else:
match = re.search( pattern, output )
- return match.group(0)
+ return match.group( 0 )
except ( AttributeError, TypeError ):
main.log.exception( self.name + ": Object not as expected; " + str( output ) )
return None
@@ -5622,7 +5650,7 @@
return None
else:
match = re.search( pattern, output )
- return match.group(0)
+ return match.group( 0 )
except ( AttributeError, TypeError ):
main.log.exception( self.name + ": Object not as expected; " + str( output ) )
return None
@@ -5655,7 +5683,7 @@
return None
else:
match = re.search( pattern, output )
- return match.group(0)
+ return match.group( 0 )
except ( AttributeError, TypeError ):
main.log.exception( self.name + ": Object not as expected; " + str( output ) )
return None
@@ -5709,4 +5737,238 @@
main.cleanAndExit()
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
- main.cleanAndExit()
\ No newline at end of file
+ main.cleanAndExit()
+
+ def issu( self ):
+ """
+ Short summary of In-Service Software Upgrade status
+
+ Returns the output of the cli command or None on Error
+ """
+ try:
+ cmdStr = "issu"
+ handle = self.sendline( cmdStr )
+ assert handle is not None, "Error in sendline"
+ assert "Command not found:" not in handle, handle
+ assert "Unsupported command:" not in handle, handle
+ return handle
+ except AssertionError:
+ main.log.exception( "" )
+ return None
+ except TypeError:
+ main.log.exception( self.name + ": Object not as expected" )
+ return None
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanAndExit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanAndExit()
+
+ def issuInit( self ):
+ """
+ Initiates an In-Service Software Upgrade
+
+ Returns main.TRUE on success, main.ERROR on error, else main.FALSE
+ """
+ try:
+ cmdStr = "issu init"
+ handle = self.sendline( cmdStr )
+ assert handle is not None, "Error in sendline"
+ assert "Command not found:" not in handle, handle
+ assert "Unsupported command:" not in handle, handle
+ if "Initialized" in handle:
+ return main.TRUE
+ else:
+ return main.FALSE
+ except AssertionError:
+ main.log.exception( "" )
+ return main.ERROR
+ except TypeError:
+ main.log.exception( self.name + ": Object not as expected" )
+ return main.ERROR
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanAndExit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanAndExit()
+
+ def issuUpgrade( self ):
+ """
+ Transitions stores to upgraded nodes
+
+ Returns main.TRUE on success, main.ERROR on error, else main.FALSE
+ """
+ try:
+ cmdStr = "issu upgrade"
+ handle = self.sendline( cmdStr )
+ assert handle is not None, "Error in sendline"
+ assert "Command not found:" not in handle, handle
+ assert "Unsupported command:" not in handle, handle
+ if "Upgraded" in handle:
+ return main.TRUE
+ else:
+ return main.FALSE
+ except AssertionError:
+ main.log.exception( "" )
+ return main.ERROR
+ except TypeError:
+ main.log.exception( self.name + ": Object not as expected" )
+ return main.ERROR
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanAndExit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanAndExit()
+
+ def issuCommit( self ):
+ """
+ Finalizes an In-Service Software Upgrade
+
+ Returns main.TRUE on success, main.ERROR on error, else main.FALSE
+ """
+ try:
+ cmdStr = "issu commit"
+ handle = self.sendline( cmdStr )
+ assert handle is not None, "Error in sendline"
+ assert "Command not found:" not in handle, handle
+ assert "Unsupported command:" not in handle, handle
+ # TODO: Check the version returned by this command
+ if "Committed version" in handle:
+ return main.TRUE
+ else:
+ return main.FALSE
+ except AssertionError:
+ main.log.exception( "" )
+ return main.ERROR
+ except TypeError:
+ main.log.exception( self.name + ": Object not as expected" )
+ return main.ERROR
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanAndExit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanAndExit()
+
+ def issuRollback( self ):
+ """
+ Rolls back an In-Service Software Upgrade
+
+ Returns main.TRUE on success, main.ERROR on error, else main.FALSE
+ """
+ try:
+ cmdStr = "issu rollback"
+ handle = self.sendline( cmdStr )
+ assert handle is not None, "Error in sendline"
+ assert "Command not found:" not in handle, handle
+ assert "Unsupported command:" not in handle, handle
+ # TODO: Check the version returned by this command
+ if "Rolled back to version" in handle:
+ return main.TRUE
+ else:
+ return main.FALSE
+ except AssertionError:
+ main.log.exception( "" )
+ return main.ERROR
+ except TypeError:
+ main.log.exception( self.name + ": Object not as expected" )
+ return main.ERROR
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanAndExit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanAndExit()
+
+ def issuReset( self ):
+ """
+ Resets the In-Service Software Upgrade status after a rollback
+
+ Returns main.TRUE on success, main.ERROR on error, else main.FALSE
+ """
+ try:
+ cmdStr = "issu reset"
+ handle = self.sendline( cmdStr )
+ assert handle is not None, "Error in sendline"
+ assert "Command not found:" not in handle, handle
+ assert "Unsupported command:" not in handle, handle
+ # TODO: Check the version returned by this command
+ if "Reset version" in handle:
+ return main.TRUE
+ else:
+ return main.FALSE
+ except AssertionError:
+ main.log.exception( "" )
+ return main.ERROR
+ except TypeError:
+ main.log.exception( self.name + ": Object not as expected" )
+ return main.ERROR
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanAndExit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanAndExit()
+
+ def issuStatus( self ):
+ """
+ Status of an In-Service Software Upgrade
+
+ Returns the output of the cli command or None on Error
+ """
+ try:
+ cmdStr = "issu status"
+ handle = self.sendline( cmdStr )
+ assert handle is not None, "Error in sendline"
+ assert "Command not found:" not in handle, handle
+ assert "Unsupported command:" not in handle, handle
+ return handle
+ except AssertionError:
+ main.log.exception( "" )
+ return None
+ except TypeError:
+ main.log.exception( self.name + ": Object not as expected" )
+ return None
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanAndExit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanAndExit()
+
+ def issuVersion( self ):
+ """
+ Get the version of an In-Service Software Upgrade
+
+ Returns the output of the cli command or None on Error
+ """
+ try:
+ cmdStr = "issu version"
+ handle = self.sendline( cmdStr )
+ assert handle is not None, "Error in sendline"
+ assert "Command not found:" not in handle, handle
+ assert "Unsupported command:" not in handle, handle
+ return handle
+ except AssertionError:
+ main.log.exception( "" )
+ return None
+ except TypeError:
+ main.log.exception( self.name + ": Object not as expected" )
+ return None
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanAndExit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanAndExit()
diff --git a/TestON/drivers/common/cli/onosclusterdriver.py b/TestON/drivers/common/cli/onosclusterdriver.py
index 5093cfe..af59a60 100755
--- a/TestON/drivers/common/cli/onosclusterdriver.py
+++ b/TestON/drivers/common/cli/onosclusterdriver.py
@@ -34,10 +34,12 @@
# FIXME: Move this to it's own file?
class Controller():
+
def __str__( self ):
return self.name
+
def __repr__( self ):
- #TODO use repr() for components?
+ # TODO use repr() for components?
return "%s<IP=%s, CLI=%s, REST=%s, Bench=%s >" % ( self.name,
self.ipAddress,
self.CLI,
@@ -77,10 +79,8 @@
return f
raise AttributeError( "Could not find the attribute %s in %r or it's component handles" % ( name, self ) )
-
-
def __init__( self, name, ipAddress, CLI=None, REST=None, Bench=None, pos=None, userName=None, server=None ):
- #TODO: validate these arguments
+ # TODO: validate these arguments
self.name = str( name )
self.ipAddress = ipAddress
self.CLI = CLI
@@ -131,7 +131,7 @@
elif key == "cluster_name":
prefix = self.options[ key ]
- self.home = self.checkOptions(self.home, "~/onos")
+ self.home = self.checkOptions(self.home, "~/onos" )
self.karafUser = self.checkOptions(self.karafUser, self.user_name)
self.karafPass = self.checkOptions(self.karafPass, self.pwd )
prefix = self.checkOptions( prefix, "ONOS" )
@@ -366,7 +366,6 @@
main.log.error( name + " component already exists!" )
main.cleanAndExit()
-
def setServerOptions( self, name, ipAddress ):
"""
Parse the cluster options to create an ONOS "server" component with the given name
@@ -383,7 +382,6 @@
main.componentDictionary[name]['connect_order'] = str( int( main.componentDictionary[name]['connect_order'] ) + 1 )
main.log.debug( main.componentDictionary[name] )
-
def createServerComponent( self, name, ipAddress ):
"""
Creates a new onos "server" component. This will be connected to the
@@ -414,7 +412,6 @@
main.log.error( name + " component already exists!" )
main.cleanAndExit()
-
def createComponents( self, prefix='', createServer=True ):
"""
Creates a CLI and REST component for each nodes in the cluster
diff --git a/TestON/drivers/common/cli/onosdriver.py b/TestON/drivers/common/cli/onosdriver.py
index eed4057..31f26d5 100755
--- a/TestON/drivers/common/cli/onosdriver.py
+++ b/TestON/drivers/common/cli/onosdriver.py
@@ -72,10 +72,9 @@
break
self.maxNodes = None
- if self.maxNodes == None or self.maxNodes == "":
+ if self.maxNodes is None or self.maxNodes == "":
self.maxNodes = 100
-
# Grabs all OC environment variables based on max number of nodes
self.onosIps = {} # Dictionary of all possible ONOS ip
@@ -110,7 +109,7 @@
main.log.error( "Uncaught exception: " + str( inst ) )
try:
- if os.getenv( str( self.ip_address ) ) != None:
+ if os.getenv( str( self.ip_address ) ) is not None:
self.ip_address = os.getenv( str( self.ip_address ) )
else:
main.log.info( self.name +
@@ -305,7 +304,7 @@
'Runtime\sEnvironment\sto\scontinue',
'BUILD\sFAILURE',
'BUILD\sSUCCESS',
- 'onos' + self.prompt, #TODO: fix this to be more generic?
+ 'onos' + self.prompt, # TODO: fix this to be more generic?
'ONOS' + self.prompt,
pexpect.TIMEOUT ], mciTimeout )
if i == 0:
@@ -417,7 +416,7 @@
self.handle.expect( self.prompt )
cmd = "git pull"
if comp1 != "":
- cmd += ' ' + comp1
+ cmd += ' ' + comp1
if fastForward:
cmd += ' ' + " --ff-only"
self.handle.sendline( cmd )
@@ -498,7 +497,7 @@
self.handle.expect( self.prompt )
return main.ERROR
except Exception:
- main.log.exception( "Couldn't exit editor prompt!")
+ main.log.exception( "Couldn't exit editor prompt!" )
main.cleanAndExit()
elif i == 10: # In the middle of a merge commit
@@ -652,11 +651,11 @@
self.handle.sendline( "git name-rev --name-only HEAD" )
self.handle.expect( "git name-rev --name-only HEAD" )
self.handle.expect( self.prompt )
- lines = self.handle.before.splitlines()
- if lines[1] == "master" or re.search( "^onos-\d+(\.\d+)+$", lines[1] ):
- return lines[1]
+ lines = self.handle.before.splitlines()
+ if lines[ 1 ] == "master" or re.search( "^onos-\d+(\.\d+)+$", lines[ 1 ] ):
+ return lines[ 1 ]
else:
- main.log.info( lines[1] )
+ main.log.info( lines[ 1 ] )
return "unexpected ONOS branch"
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
@@ -702,11 +701,11 @@
# as xml specific tags that cause errors
line = line.replace( "<", "[" )
line = line.replace( ">", "]" )
- #main.log.wiki( "\t" + line )
+ # main.log.wiki( "\t" + line )
main.log.wiki( line + "<br /> " )
main.log.summary( line )
main.log.wiki( "</blockquote>" )
- main.log.summary("\n")
+ main.log.summary( "\n" )
return lines[ 2 ]
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
@@ -837,14 +836,14 @@
handleAfter = self.handle.after
# Get the rest of the handle
self.handle.expect( self.prompt )
- time.sleep(10)
+ time.sleep( 10 )
handleMore = self.handle.before
cell_result = handleBefore + handleAfter + handleMore
- #print cell_result
+ # print cell_result
if( re.search( "No such cell", cell_result ) ):
main.log.error( "Cell call returned: " + handleBefore +
- handleAfter + handleMore )
+ handleAfter + handleMore )
main.cleanAndExit()
return main.TRUE
@@ -899,11 +898,11 @@
self.handle.sendline( "" )
self.handle.expect( ":~" )
self.handle.sendline( cfgStr )
- self.handle.expect("cfg set")
+ self.handle.expect( "cfg set" )
self.handle.expect( ":~" )
- paramValue = configParam.split(" ")[1]
- paramName = configParam.split(" ")[0]
+ paramValue = configParam.split( " " )[ 1 ]
+ paramName = configParam.split( " " )[ 0 ]
checkStr = 'onos {} cfg get " {} {} " '.format( ONOSIp, configName, paramName )
@@ -911,7 +910,7 @@
self.handle.expect( ":~" )
if "value=" + paramValue + "," in self.handle.before:
- main.log.info("cfg " + configName + " successfully set to " + configParam)
+ main.log.info( "cfg " + configName + " successfully set to " + configParam )
return main.TRUE
except pexpect.ExceptionPexpect as e:
main.log.exception( self.name + ": Pexpect exception found: " )
@@ -967,7 +966,7 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- def onosSecureSSH( self, userName="onos", userPWD="rocks", node=""):
+ def onosSecureSSH( self, userName="onos", userPWD="rocks", node="" ):
"""
Enables secure access to ONOS console
by removing default users & keys.
@@ -985,9 +984,9 @@
# NOTE: this timeout may need to change depending on the network
# and size of ONOS
# TODO: Handle the other possible error
- i = self.handle.expect([ "Network\sis\sunreachable",
- self.prompt,
- pexpect.TIMEOUT ], timeout=180 )
+ i = self.handle.expect( [ "Network\sis\sunreachable",
+ self.prompt,
+ pexpect.TIMEOUT ], timeout=180 )
if i == 0:
# can't reach ONOS node
main.log.warn( "Network is unreachable" )
@@ -996,8 +995,8 @@
elif i == 1:
# Process started
main.log.info(
- "Secure SSH performed on " +
- node)
+ "Secure SSH performed on " +
+ node )
return main.TRUE
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
@@ -1007,7 +1006,6 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
-
def onosInstall( self, options="-f", node="" ):
"""
Installs ONOS bits on the designated cell machine.
@@ -1337,7 +1335,7 @@
self.handle.sendline( "onos-wait-for-start " + node )
self.handle.expect( "onos-wait-for-start" )
# NOTE: this timeout is arbitrary"
- i = self.handle.expect([self.prompt, pexpect.TIMEOUT], timeout)
+ i = self.handle.expect( [ self.prompt, pexpect.TIMEOUT ], timeout )
if i == 0:
main.log.info( self.name + ": " + node + " is up" )
return main.TRUE
@@ -1376,7 +1374,6 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
-
def pushTestIntentsShell(
self,
dpidSrc,
@@ -1518,7 +1515,7 @@
self.handle.expect( self.prompt )
self.handle.sendline( "" )
if grepOptions:
- grepStr = "grep "+str(grepOptions)
+ grepStr = "grep " + str( grepOptions )
else:
grepStr = "grep"
@@ -1527,12 +1524,12 @@
str( interface ) +
" -t e | " +
grepStr + " --line-buffered \"" +
- str(grep) +
+ str( grep ) +
"\" >" +
directory +
" &" )
- self.handle.sendline(cmd)
- main.log.info(cmd)
+ self.handle.sendline( cmd )
+ main.log.info( cmd )
self.handle.expect( "Capturing on" )
self.handle.sendline( "\n" )
self.handle.expect( self.prompt )
@@ -1600,7 +1597,7 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- def dumpONOSCmd(self, ONOSIp, CMD, destDir, filename, options=""):
+ def dumpONOSCmd( self, ONOSIp, CMD, destDir, filename, options="" ):
"""
Dump Cmd to a desired directory.
For debugging purposes, you may want to use
@@ -1622,8 +1619,8 @@
localtime = localtime.replace( ":", "" )
if destDir[ -1: ] != "/":
destDir += "/"
- cmd=CMD + " " + options + " > " + str( destDir ) + str( filename ) + localtime
- return self.onosCli(ONOSIp, cmd)
+ cmd = CMD + " " + options + " > " + str( destDir ) + str( filename ) + localtime
+ return self.onosCli( ONOSIp, cmd )
def cpLogsToDir( self, logToCopy,
destDir, copyFileName="" ):
@@ -1678,7 +1675,7 @@
except Exception:
main.log.exception( "Copying files failed" )
- def checkLogs( self, onosIp, restart=False):
+ def checkLogs( self, onosIp, restart=False ):
"""
runs onos-check-logs on the given onos node
If restart is True, use the old version of onos-check-logs which
@@ -1804,14 +1801,14 @@
cmd = "sudo iptables " + actionFlag + " " +\
direction +\
" -s " + str( ip )
- # " -p " + str( packet_type ) +\
+ # " -p " + str( packet_type ) +\
if packet_type:
cmd += " -p " + str( packet_type )
if port:
cmd += " --dport " + str( port )
if states:
cmd += " -m state --state="
- #FIXME- Allow user to configure which states to block
+ # FIXME- Allow user to configure which states to block
cmd += "INVALID,ESTABLISHED,NEW,RELATED,UNTRACKED"
cmd += " -j " + str( rule )
@@ -1840,7 +1837,7 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- def detailed_status(self, log_filename):
+ def detailed_status( self, log_filename ):
"""
This method is used by STS to check the status of the controller
Reports RUNNING, STARTING, STOPPED, FROZEN, ERROR (and reason)
@@ -1884,7 +1881,7 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- def createLinkGraphFile( self, benchIp, ONOSIpList, deviceCount):
+ def createLinkGraphFile( self, benchIp, ONOSIpList, deviceCount ):
'''
Create/formats the LinkGraph.cfg file based on arguments
-only creates a linear topology and connects islands
@@ -1895,114 +1892,113 @@
deviceCount - number of switches to be assigned
'''
- main.log.info("Creating link graph configuration file." )
+ main.log.info( "Creating link graph configuration file." )
linkGraphPath = self.home + "/tools/package/etc/linkGraph.cfg"
tempFile = "/tmp/linkGraph.cfg"
- linkGraph = open(tempFile, 'w+')
- linkGraph.write("# NullLinkProvider topology description (config file).\n")
- linkGraph.write("# The NodeId is only added if the destination is another node's device.\n")
- linkGraph.write("# Bugs: Comments cannot be appended to a line to be read.\n")
+ linkGraph = open( tempFile, 'w+' )
+ linkGraph.write( "# NullLinkProvider topology description (config file).\n" )
+ linkGraph.write( "# The NodeId is only added if the destination is another node's device.\n" )
+ linkGraph.write( "# Bugs: Comments cannot be appended to a line to be read.\n" )
- clusterCount = len(ONOSIpList)
+ clusterCount = len( ONOSIpList )
- if type(deviceCount) is int or type(deviceCount) is str:
- deviceCount = int(deviceCount)
- switchList = [0]*(clusterCount+1)
+ if isinstance( deviceCount, int ) or isinstance( deviceCount, str ):
+ deviceCount = int( deviceCount )
+ switchList = [ 0 ]*( clusterCount+1 )
baselineSwitchCount = deviceCount/clusterCount
- for node in range(1, clusterCount + 1):
- switchList[node] = baselineSwitchCount
+ for node in range( 1, clusterCount + 1 ):
+ switchList[ node ] = baselineSwitchCount
- for node in range(1, (deviceCount%clusterCount)+1):
- switchList[node] += 1
+ for node in range( 1, ( deviceCount % clusterCount )+1 ):
+ switchList[ node ] += 1
- if type(deviceCount) is list:
- main.log.info("Using provided device distribution")
- switchList = [0]
+ if isinstance( deviceCount, list ):
+ main.log.info( "Using provided device distribution" )
+ switchList = [ 0 ]
for i in deviceCount:
- switchList.append(int(i))
+ switchList.append( int( i ) )
- tempList = ['0']
- tempList.extend(ONOSIpList)
+ tempList = [ '0' ]
+ tempList.extend( ONOSIpList )
ONOSIpList = tempList
myPort = 6
lastSwitch = 0
- for node in range(1, clusterCount+1):
- if switchList[node] == 0:
+ for node in range( 1, clusterCount+1 ):
+ if switchList[ node ] == 0:
continue
- linkGraph.write("graph " + ONOSIpList[node] + " {\n")
+ linkGraph.write( "graph " + ONOSIpList[ node ] + " {\n" )
if node > 1:
- #connect to last device on previous node
- line = ("\t0:5 -> " + str(lastSwitch) + ":6:" + lastIp + "\n") #ONOSIpList[node-1]
- linkGraph.write(line)
+ # connect to last device on previous node
+ line = ( "\t0:5 -> " + str( lastSwitch ) + ":6:" + lastIp + "\n" ) # ONOSIpList[node-1]
+ linkGraph.write( line )
lastSwitch = 0
- for switch in range (0, switchList[node]-1):
+ for switch in range( 0, switchList[ node ]-1 ):
line = ""
- line = ("\t" + str(switch) + ":" + str(myPort))
+ line = ( "\t" + str( switch ) + ":" + str( myPort ) )
line += " -- "
- line += (str(switch+1) + ":" + str(myPort-1) + "\n")
- linkGraph.write(line)
+ line += ( str( switch+1 ) + ":" + str( myPort-1 ) + "\n" )
+ linkGraph.write( line )
lastSwitch = switch+1
- lastIp = ONOSIpList[node]
+ lastIp = ONOSIpList[ node ]
- #lastSwitch += 1
- if node < (clusterCount):
- #connect to first device on the next node
- line = ("\t" + str(lastSwitch) + ":6 -> 0:5:" + ONOSIpList[node+1] + "\n")
- linkGraph.write(line)
+ # lastSwitch += 1
+ if node < ( clusterCount ):
+ # connect to first device on the next node
+ line = ( "\t" + str( lastSwitch ) + ":6 -> 0:5:" + ONOSIpList[ node+1 ] + "\n" )
+ linkGraph.write( line )
- linkGraph.write("}\n")
+ linkGraph.write( "}\n" )
linkGraph.close()
- #SCP
- os.system( "scp " + tempFile + " " + self.user_name + "@" + benchIp + ":" + linkGraphPath)
- main.log.info("linkGraph.cfg creation complete")
+ # SCP
+ os.system( "scp " + tempFile + " " + self.user_name + "@" + benchIp + ":" + linkGraphPath )
+ main.log.info( "linkGraph.cfg creation complete" )
- def configNullDev( self, ONOSIpList, deviceCount, numPorts=10):
-
+ def configNullDev( self, ONOSIpList, deviceCount, numPorts=10 ):
'''
ONOSIpList = list of Ip addresses of nodes switches will be devided amongst
deviceCount = number of switches to distribute, or list of values to use as custom distribution
numPorts = number of ports per device. Defaults to 10 both in this function and in ONOS. Optional arg
'''
- main.log.info("Configuring Null Device Provider" )
- clusterCount = len(ONOSIpList)
+ main.log.info( "Configuring Null Device Provider" )
+ clusterCount = len( ONOSIpList )
try:
- if type(deviceCount) is int or type(deviceCount) is str:
- main.log.info("Creating device distribution")
- deviceCount = int(deviceCount)
- switchList = [0]*(clusterCount+1)
+ if isinstance( deviceCount, int ) or isinstance( deviceCount, str ):
+ main.log.info( "Creating device distribution" )
+ deviceCount = int( deviceCount )
+ switchList = [ 0 ]*( clusterCount+1 )
baselineSwitchCount = deviceCount/clusterCount
- for node in range(1, clusterCount + 1):
- switchList[node] = baselineSwitchCount
+ for node in range( 1, clusterCount + 1 ):
+ switchList[ node ] = baselineSwitchCount
- for node in range(1, (deviceCount%clusterCount)+1):
- switchList[node] += 1
+ for node in range( 1, ( deviceCount % clusterCount )+1 ):
+ switchList[ node ] += 1
- if type(deviceCount) is list:
- main.log.info("Using provided device distribution")
+ if isinstance( deviceCount, list ):
+ main.log.info( "Using provided device distribution" )
- if len(deviceCount) == clusterCount:
- switchList = ['0']
- switchList.extend(deviceCount)
+ if len( deviceCount ) == clusterCount:
+ switchList = [ '0' ]
+ switchList.extend( deviceCount )
- if len(deviceCount) == (clusterCount + 1):
- if deviceCount[0] == '0' or deviceCount[0] == 0:
+ if len( deviceCount ) == ( clusterCount + 1 ):
+ if deviceCount[ 0 ] == '0' or deviceCount[ 0 ] == 0:
switchList = deviceCount
- assert len(switchList) == (clusterCount + 1)
+ assert len( switchList ) == ( clusterCount + 1 )
except AssertionError:
- main.log.error( "Bad device/Ip list match")
+ main.log.error( "Bad device/Ip list match" )
except TypeError:
main.log.exception( self.name + ": Object not as expected" )
return None
@@ -2010,73 +2006,71 @@
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
+ ONOSIp = [ 0 ]
+ ONOSIp.extend( ONOSIpList )
- ONOSIp = [0]
- ONOSIp.extend(ONOSIpList)
-
- devicesString = "devConfigs = "
- for node in range(1, len(ONOSIp)):
- devicesString += (ONOSIp[node] + ":" + str(switchList[node] ))
+ devicesString = "devConfigs = "
+ for node in range( 1, len( ONOSIp ) ):
+ devicesString += ( ONOSIp[ node ] + ":" + str( switchList[ node ] ) )
if node < clusterCount:
- devicesString += (",")
+ devicesString += ( "," )
try:
- self.handle.sendline("onos $OC1 cfg set org.onosproject.provider.nil.device.impl.NullDeviceProvider devConfigs " + devicesString )
- self.handle.expect(":~")
- self.handle.sendline("onos $OC1 cfg set org.onosproject.provider.nil.device.impl.NullDeviceProvider numPorts " + str(numPorts) )
- self.handle.expect(":~")
+ self.handle.sendline( "onos $OC1 cfg set org.onosproject.provider.nil.device.impl.NullDeviceProvider devConfigs " + devicesString )
+ self.handle.expect( ":~" )
+ self.handle.sendline( "onos $OC1 cfg set org.onosproject.provider.nil.device.impl.NullDeviceProvider numPorts " + str( numPorts ) )
+ self.handle.expect( ":~" )
- for i in range(10):
- self.handle.sendline("onos $OC1 cfg get org.onosproject.provider.nil.device.impl.NullDeviceProvider")
- self.handle.expect(":~")
+ for i in range( 10 ):
+ self.handle.sendline( "onos $OC1 cfg get org.onosproject.provider.nil.device.impl.NullDeviceProvider" )
+ self.handle.expect( ":~" )
verification = self.handle.before
- if (" value=" + str(numPorts)) in verification and (" value=" + devicesString) in verification:
+ if ( " value=" + str( numPorts ) ) in verification and ( " value=" + devicesString ) in verification:
break
else:
- time.sleep(1)
+ time.sleep( 1 )
- assert ("value=" + str(numPorts)) in verification and (" value=" + devicesString) in verification
+ assert ( "value=" + str( numPorts ) ) in verification and ( " value=" + devicesString ) in verification
except AssertionError:
- main.log.error("Incorrect Config settings: " + verification)
+ main.log.error( "Incorrect Config settings: " + verification )
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
- def configNullLink( self,fileName="/opt/onos/apache-karaf-3.0.3/etc/linkGraph.cfg", eventRate=0):
+ def configNullLink( self, fileName="/opt/onos/apache-karaf-3.0.3/etc/linkGraph.cfg", eventRate=0 ):
'''
fileName default is currently the same as the default on ONOS, specify alternate file if
you want to use a different topology file than linkGraph.cfg
'''
-
try:
- self.handle.sendline("onos $OC1 cfg set org.onosproject.provider.nil.link.impl.NullLinkProvider eventRate " + str(eventRate))
- self.handle.expect(":~")
- self.handle.sendline("onos $OC1 cfg set org.onosproject.provider.nil.link.impl.NullLinkProvider cfgFile " + fileName )
- self.handle.expect(":~")
+ self.handle.sendline( "onos $OC1 cfg set org.onosproject.provider.nil.link.impl.NullLinkProvider eventRate " + str( eventRate ) )
+ self.handle.expect( ":~" )
+ self.handle.sendline( "onos $OC1 cfg set org.onosproject.provider.nil.link.impl.NullLinkProvider cfgFile " + fileName )
+ self.handle.expect( ":~" )
- for i in range(10):
- self.handle.sendline("onos $OC1 cfg get org.onosproject.provider.nil.link.impl.NullLinkProvider")
- self.handle.expect(":~")
+ for i in range( 10 ):
+ self.handle.sendline( "onos $OC1 cfg get org.onosproject.provider.nil.link.impl.NullLinkProvider" )
+ self.handle.expect( ":~" )
verification = self.handle.before
- if (" value=" + str(eventRate)) in verification and (" value=" + fileName) in verification:
+ if ( " value=" + str( eventRate ) ) in verification and ( " value=" + fileName ) in verification:
break
else:
- time.sleep(1)
+ time.sleep( 1 )
- assert ("value=" + str(eventRate)) in verification and (" value=" + fileName) in verification
+ assert ( "value=" + str( eventRate ) ) in verification and ( " value=" + fileName ) in verification
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
except AssertionError:
- main.log.info("Settings did not post to ONOS")
- main.log.error(varification)
+ main.log.info( "Settings did not post to ONOS" )
+ main.log.error( varification )
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
- main.log.error(varification)
+ main.log.error( varification )
main.cleanAndExit()
def getOnosIps( self ):
@@ -2136,16 +2130,16 @@
"""
try:
main.log.info( " Log Report for {} ".format( nodeIp ).center( 70, '=' ) )
- if type( searchTerms ) is str:
- searchTerms = [searchTerms]
+ if isinstance( searchTerms, str ):
+ searchTerms = [ searchTerms ]
numTerms = len( searchTerms )
outputMode = outputMode.lower()
totalHits = 0
logLines = []
for termIndex in range( numTerms ):
- term = searchTerms[termIndex]
- logLines.append( [term] )
+ term = searchTerms[ termIndex ]
+ logLines.append( [ term ] )
if startStr and endStr:
cmd = "onos-ssh {} \"sed -n '/{}/,/{}/p' /opt/onos/log/karaf.log | grep {}\"".format( nodeIp,
startStr,
@@ -2162,7 +2156,7 @@
if term in line and "grep" not in line:
count += 1
if before.index( line ) > ( len( before ) - 7 ):
- logLines[termIndex].append( line )
+ logLines[ termIndex ].append( line )
main.log.info( "{}: {}".format( term, count ) )
totalHits += count
if termIndex == numTerms - 1:
@@ -2170,10 +2164,10 @@
if outputMode != "s":
outputString = ""
for term in logLines:
- outputString = term[0] + ": \n"
+ outputString = term[ 0 ] + ": \n"
for line in range( 1, len( term ) ):
- outputString += ( "\t" + term[line] + "\n" )
- if outputString != ( term[0] + ": \n" ):
+ outputString += ( "\t" + term[ line ] + "\n" )
+ if outputString != ( term[ 0 ] + ": \n" ):
main.log.info( outputString )
main.log.info( "=" * 70 )
return totalHits
@@ -2243,22 +2237,22 @@
main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
- def jvmSet(self, memory=8):
+ def jvmSet( self, memory=8 ):
import os
- homeDir = os.path.expanduser('~')
+ homeDir = os.path.expanduser( '~' )
filename = "/onos/tools/package/bin/onos-service"
- serviceConfig = open(homeDir + filename, 'w+')
- serviceConfig.write("#!/bin/bash\n ")
- serviceConfig.write("#------------------------------------- \n ")
- serviceConfig.write("# Starts ONOS Apache Karaf container\n ")
- serviceConfig.write("#------------------------------------- \n ")
- serviceConfig.write("#export JAVA_HOME=${JAVA_HOME:-/usr/lib/jvm/java-7-openjdk-amd64/}\n ")
- serviceConfig.write("""export JAVA_OPTS="${JAVA_OPTS:--Xms""" + str(memory) + "G -Xmx" + str(memory) + """G}" \n """)
- serviceConfig.write("[ -d $ONOS_HOME ] && cd $ONOS_HOME || ONOS_HOME=$(dirname $0)/..\n")
- serviceConfig.write("""${ONOS_HOME}/apache-karaf-$KARAF_VERSION/bin/karaf "$@" \n """)
+ serviceConfig = open( homeDir + filename, 'w+' )
+ serviceConfig.write( "#!/bin/bash\n " )
+ serviceConfig.write( "#------------------------------------- \n " )
+ serviceConfig.write( "# Starts ONOS Apache Karaf container\n " )
+ serviceConfig.write( "#------------------------------------- \n " )
+ serviceConfig.write( "#export JAVA_HOME=${JAVA_HOME:-/usr/lib/jvm/java-7-openjdk-amd64/}\n " )
+ serviceConfig.write( """export JAVA_OPTS="${JAVA_OPTS:--Xms""" + str( memory ) + "G -Xmx" + str( memory ) + """G}" \n """ )
+ serviceConfig.write( "[ -d $ONOS_HOME ] && cd $ONOS_HOME || ONOS_HOME=$(dirname $0)/..\n" )
+ serviceConfig.write( """${ONOS_HOME}/apache-karaf-$KARAF_VERSION/bin/karaf "$@" \n """ )
serviceConfig.close()
def createDBFile( self, testData ):
@@ -2267,7 +2261,7 @@
DBString = ""
for item in testData:
- if type( item ) is string:
+ if isinstance( item, string ):
item = "'" + item + "'"
if testData.index( item ) < len( testData - 1 ):
item += ","
@@ -2279,14 +2273,14 @@
def verifySummary( self, ONOSIp, *deviceCount ):
- self.handle.sendline( "onos " + ONOSIp + " summary" )
+ self.handle.sendline( "onos " + ONOSIp + " summary" )
self.handle.expect( ":~" )
summaryStr = self.handle.before
print "\nSummary\n==============\n" + summaryStr + "\n\n"
- #passed = "SCC(s)=1" in summaryStr
- #if deviceCount:
+ # passed = "SCC(s)=1" in summaryStr
+ # if deviceCount:
# passed = "devices=" + str(deviceCount) + "," not in summaryStr
passed = False
@@ -2298,7 +2292,7 @@
if deviceCount:
print" ============================="
- checkStr = "devices=" + str( deviceCount[0] ) + ","
+ checkStr = "devices=" + str( deviceCount[ 0 ] ) + ","
print "Checkstr: " + checkStr
if checkStr not in summaryStr:
passed = False
@@ -2330,7 +2324,7 @@
# IF self.ip_address is an ip address and matches
# self.nicAddr: return self.ip_address
if match:
- curIp = match.group(0)
+ curIp = match.group( 0 )
if nicPat:
nicMatch = re.search( nicPat, curIp )
if nicMatch:
@@ -2345,7 +2339,7 @@
ips = re.findall( ifPat, raw )
if iface:
if ips:
- ip = ips[0]
+ ip = ips[ 0 ]
self.ip_address = ip
return ip
else:
@@ -2359,8 +2353,8 @@
return ip
else: # If only one non-localhost ip, return that
tmpList = [ ip for ip in ips if ip is not LOCALHOST ]
- if len(tmpList) == 1:
- curIp = tmpList[0]
+ if len( tmpList ) == 1:
+ curIp = tmpList[ 0 ]
self.ip_address = curIp
return curIp
# Either no non-localhost IPs, or more than 1
@@ -2410,7 +2404,7 @@
onosStatus = True
for nd in nodeList:
onosStatus = onosStatus & self.isup( node = nd )
- #print "onosStatus is: " + str( onosStatus )
+ # print "onosStatus is: " + str( onosStatus )
return main.TRUE if onosStatus else main.FALSE
@@ -2443,3 +2437,37 @@
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanAndExit()
+
+ def formCluster( self, onosIPs ):
+ """
+ From ONOS cluster for IP addresses in onosIPs list
+ """
+ try:
+ onosIPs = " ".join( onosIPs )
+ command = "onos-form-cluster {}".format( onosIPs )
+ main.log.info( "Sending: " + command )
+ self.handle.sendline( "" )
+ self.handle.expect( self.prompt )
+ self.handle.sendline( command )
+ self.handle.expect( self.prompt )
+ handle = self.handle.before
+ main.log.debug( handle )
+ assert handle is not None, "Error in sendline"
+ assert "Command not found:" not in handle, handle
+ assert "Error" not in handle, handle
+ assert "Exception:" not in handle, handle
+ assert "curl:" not in handle, handle
+ return main.TRUE
+ except AssertionError:
+ main.log.exception( "{} Error in onos-form-cluster output:".format( self.name ) )
+ return main.FALSE
+ except TypeError:
+ main.log.exception( self.name + ": Object not as expected" )
+ return main.FALSE
+ except pexpect.EOF:
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
+ main.cleanAndExit()
+ except Exception:
+ main.log.exception( self.name + ": Uncaught exception!" )
+ main.cleanAndExit()
diff --git a/TestON/drivers/common/cli/ovsdbdriver.py b/TestON/drivers/common/cli/ovsdbdriver.py
index 636a7b2..fc029ce 100644
--- a/TestON/drivers/common/cli/ovsdbdriver.py
+++ b/TestON/drivers/common/cli/ovsdbdriver.py
@@ -51,11 +51,11 @@
def connect( self, **connectargs ):
try:
for key in connectargs:
- vars( self)[ key ] = connectargs[ key ]
+ vars( self )[ key ] = connectargs[ key ]
self.name = self.options[ 'name' ]
- if os.getenv( str( self.ip_address ) ) != None:
- self.ip_address = os.getenv(str ( self.ip_address ) )
+ if os.getenv( str( self.ip_address ) ) is not None:
+ self.ip_address = os.getenv( str( self.ip_address ) )
else:
main.log.info( self.name + ": Trying to connect to " +
self.ip_address )
@@ -63,7 +63,7 @@
user_name=self.user_name,
ip_address=self.ip_address,
port=self.port,
- pwd=self.pwd)
+ pwd=self.pwd )
if self.handle:
main.log.info( "Connection successful to the ovsdb node " +
@@ -91,7 +91,7 @@
return response
def setManager( self, ip, port, delaytime="5" ):
- command= "sudo ovs-vsctl set-manager tcp:" + str( ip ) + ":" + str( port )
+ command = "sudo ovs-vsctl set-manager tcp:" + str( ip ) + ":" + str( port )
try:
handle = self.execute(
cmd=command,
@@ -102,7 +102,7 @@
return main.FALSE
else:
main.log.info( "Ovsdb manager " + str( ip ) + " set" )
- #delay time for ovsdb connection create
+ # delay time for ovsdb connection create
main.log.info( "Wait " + str( delaytime ) + " seconds for ovsdb connection create" )
time.sleep( int( delaytime ) )
return main.TRUE
@@ -112,7 +112,7 @@
main.cleanAndExit()
def delManager( self, delaytime="5" ):
- command= "sudo ovs-vsctl del-manager"
+ command = "sudo ovs-vsctl del-manager"
try:
handle = self.execute(
cmd=command,
@@ -123,7 +123,7 @@
return main.FALSE
else:
main.log.info( "Ovsdb manager delete" )
- #delay time for ovsdb connection delete
+ # delay time for ovsdb connection delete
main.log.info( "Wait " + str( delaytime ) + " seconds for ovsdb connection delete" )
time.sleep( int( delaytime ) )
return main.TRUE
@@ -133,7 +133,7 @@
main.cleanAndExit()
def getManager( self ):
- command= "sudo ovs-vsctl get-manager"
+ command = "sudo ovs-vsctl get-manager"
try:
response = self.execute(
cmd=command,
@@ -152,7 +152,7 @@
The output of the command from the linux
or main.FALSE on timeout
"""
- command= "sudo ovs-vsctl list-br"
+ command = "sudo ovs-vsctl list-br"
try:
response = self.execute(
cmd=command,
@@ -174,7 +174,7 @@
The output of the command from the linux
or main.FALSE on timeout
"""
- command= "sudo ovs-vsctl list-ports " + str( sw )
+ command = "sudo ovs-vsctl list-ports " + str( sw )
try:
response = self.execute(
cmd=command,
@@ -199,7 +199,7 @@
try:
response = self.execute(
cmd=command,
- timeout=10)
+ timeout=10 )
if response:
return response
else:
@@ -220,7 +220,7 @@
try:
response = self.execute(
cmd=command,
- timeout=10)
+ timeout=10 )
if response:
return response
else:
@@ -251,8 +251,8 @@
else:
return main.FALSE
except pexpect.EOF:
- main.log.error(self.name + ": EOF exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
def createHost( self, hostname ):
@@ -260,7 +260,7 @@
try:
handle = self.execute(
cmd=command,
- timeout=10)
+ timeout=10 )
if re.search( "Error", handle ):
main.log.error( "Error in create host" + str( hostname ) )
main.log.error( handle )
@@ -269,20 +269,20 @@
main.log.info( "Create " + str( hostname ) + " sucess" )
return main.TRUE
except pexpect.EOF:
- main.log.error(self.name + ": EOF exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
- def createHostport(self, hostname="host1", hostport="host1-eth0", ovsport="port1", hostportmac="000000000001" ):
- command = "sudo ip link add " + str(hostport) +" type veth peer name " + str(ovsport)
- command += ";" + "sudo ip link set " + str(hostport) + " up"
- command += ";" + "sudo ip link set " + str(ovsport) + " up"
- command += ";" +" sudo ifconfig " + str(hostport) + " hw ether " + str(hostportmac)
- command += ";" +" sudo ip link set " + str(hostport) + " netns " + str(hostname)
+ def createHostport( self, hostname="host1", hostport="host1-eth0", ovsport="port1", hostportmac="000000000001" ):
+ command = "sudo ip link add " + str( hostport ) + " type veth peer name " + str( ovsport )
+ command += ";" + "sudo ip link set " + str( hostport ) + " up"
+ command += ";" + "sudo ip link set " + str( ovsport ) + " up"
+ command += ";" + " sudo ifconfig " + str( hostport ) + " hw ether " + str( hostportmac )
+ command += ";" + " sudo ip link set " + str( hostport ) + " netns " + str( hostname )
try:
handle = self.execute(
cmd=command,
- timeout=10)
+ timeout=10 )
if re.search( "Error", handle ):
main.log.error( "Error in create host port " + str( hostport ) + " on " + str( hostname ) )
main.log.error( handle )
@@ -291,40 +291,40 @@
main.log.info( "Create host port " + str( hostport ) + " on " + str( hostname ) + " sucess" )
return main.TRUE
except pexpect.EOF:
- main.log.error(self.name + ": EOF exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
- def addPortToOvs(self, ifaceId, attachedMac, vmuuid, port="port1", ovsname="br-int" ):
- command = "sudo ovs-vsctl add-port " + str(ovsname) +" " + str(port)
+ def addPortToOvs( self, ifaceId, attachedMac, vmuuid, port="port1", ovsname="br-int" ):
+ command = "sudo ovs-vsctl add-port " + str( ovsname ) + " " + str( port )
if ifaceId:
- command += " -- set Interface " + str(port) + " external-ids:iface-id=" + str(ifaceId) + " external-ids:iface-status=active"
+ command += " -- set Interface " + str( port ) + " external-ids:iface-id=" + str( ifaceId ) + " external-ids:iface-status=active"
if attachedMac:
- command += " external-ids:attached-mac=" + str(attachedMac)
+ command += " external-ids:attached-mac=" + str( attachedMac )
if vmuuid:
- command += " external-ids:vm-uuid=" + str(vmuuid)
+ command += " external-ids:vm-uuid=" + str( vmuuid )
try:
handle = self.execute(
cmd=command,
- timeout=10)
+ timeout=10 )
if re.search( "Error", handle ):
- main.log.error( "Error in add port " + str(port) + " to ovs " + str( ovsname ) )
+ main.log.error( "Error in add port " + str( port ) + " to ovs " + str( ovsname ) )
main.log.error( handle )
return main.FALSE
else:
- main.log.info( "Add port " + str(port) + " to ovs " + str( ovsname ) + " sucess" )
+ main.log.info( "Add port " + str( port ) + " to ovs " + str( ovsname ) + " sucess" )
return main.TRUE
except pexpect.EOF:
- main.log.error(self.name + ": EOF exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
- def setHostportIp(self, ip, hostname="host1", hostport1="host1-eth0" ):
- command = "sudo ip netns exec " + str(hostname) +" ifconfig " + str(hostport1) + " " + str(ip)
+ def setHostportIp( self, ip, hostname="host1", hostport1="host1-eth0" ):
+ command = "sudo ip netns exec " + str( hostname ) + " ifconfig " + str( hostport1 ) + " " + str( ip )
try:
handle = self.execute(
cmd=command,
- timeout=10)
+ timeout=10 )
if re.search( "Error", handle ):
main.log.error( "Error in set host ip for " + str( hostport1 ) + " on host " + str( hostname ) )
main.log.error( handle )
@@ -333,32 +333,32 @@
main.log.info( "Set host ip for " + str( hostport1 ) + " on host " + str( hostname ) + " sucess" )
return main.TRUE
except pexpect.EOF:
- main.log.error(self.name + ": EOF exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
- def hostPing(self, src, target, hostname="host1" ):
+ def hostPing( self, src, target, hostname="host1" ):
if src:
- command = "sudo ip netns exec " + str( hostname ) +" ping -c 1 -S " +\
- str( src ) + " " + str( target )
+ command = "sudo ip netns exec " + str( hostname ) + " ping -c 1 -S " +\
+ str( src ) + " " + str( target )
else:
- command = "sudo ip netns exec " + str( hostname ) +" ping -c 1 " + str( target )
+ command = "sudo ip netns exec " + str( hostname ) + " ping -c 1 " + str( target )
try:
- for i in range(1,5):
+ for i in range( 1, 5 ):
handle = self.execute(
cmd=command,
- timeout=10)
- if re.search(',\s0\%\spacket\sloss', handle):
- main.log.info(self.name + ": no packets lost, host is reachable")
+ timeout=10 )
+ if re.search( ',\s0\%\spacket\sloss', handle ):
+ main.log.info( self.name + ": no packets lost, host is reachable" )
return main.TRUE
break
- time.sleep(5)
+ time.sleep( 5 )
else:
- main.log.info(self.name + ": packets lost, host is unreachable")
+ main.log.info( self.name + ": packets lost, host is unreachable" )
return main.FALSE
except pexpect.EOF:
- main.log.error(self.name + ": EOF exception found")
- main.log.error(self.name + ": " + self.handle.before)
+ main.log.error( self.name + ": EOF exception found" )
+ main.log.error( self.name + ": " + self.handle.before )
main.cleanAndExit()
def delBr( self, sw ):
@@ -368,7 +368,7 @@
Return:
Delete sucess return main.TRUE or main.FALSE on delete failed
"""
- command= "sudo ovs-vsctl del-br " + str( sw )
+ command = "sudo ovs-vsctl del-br " + str( sw )
try:
response = self.execute(
cmd=command,
@@ -389,7 +389,7 @@
Return:
Delete sucess return main.TRUE or main.FALSE on delete failed
"""
- command= "sudo ip netns delete " + str( hostname )
+ command = "sudo ip netns delete " + str( hostname )
try:
response = self.execute(
cmd=command,
diff --git a/TestON/drivers/common/cli/quaggaclidriver.py b/TestON/drivers/common/cli/quaggaclidriver.py
index e1cbeef..01905c2 100644
--- a/TestON/drivers/common/cli/quaggaclidriver.py
+++ b/TestON/drivers/common/cli/quaggaclidriver.py
@@ -58,7 +58,7 @@
ip_address="1.1.1.1",
port=self.port,
pwd=self.pwd )
- #main.log.info( "connect parameters:" + str( self.user_name ) + ";"
+ # main.log.info( "connect parameters:" + str( self.user_name ) + ";"
# + str( self.ip_address ) + ";" + str( self.port )
# + ";" + str(self.pwd ) )
@@ -208,7 +208,7 @@
routesJsonObj = json.loads( getRoutesResult )
allRoutesActual = []
- for route in routesJsonObj['routes4']:
+ for route in routesJsonObj[ 'routes4' ]:
if 'prefix' in route:
if route[ 'prefix' ] == '172.16.10.0/24':
continue
@@ -221,7 +221,7 @@
routesJsonObj = json.loads( getRoutesResult )
allRoutesActual = []
- for route in routesJsonObj['routes4']:
+ for route in routesJsonObj[ 'routes4' ]:
if route[ 'prefix' ] == '172.16.10.0/24':
continue
allRoutesActual.append(
@@ -237,10 +237,10 @@
intentsJsonObj = json.loads( getIntentsResult )
for intent in intentsJsonObj:
- #if intent[ 'appId' ] != "org.onosproject.sdnip":
+ # if intent[ 'appId' ] != "org.onosproject.sdnip":
# continue
if intent[ 'type' ] == "MultiPointToSinglePointIntent" \
- and intent[ 'state' ] == 'INSTALLED':
+ and intent[ 'state' ] == 'INSTALLED':
egress = str( intent[ 'egress' ][ 'device' ] ) + ":" \
+ str( intent[ 'egress' ][ 'port' ] )
ingress = []
@@ -266,7 +266,7 @@
num = 0
for intent in intentsJsonObj:
if intent[ 'type' ] == "MultiPointToSinglePointIntent" \
- and intent[ 'state' ] == 'INSTALLED':
+ and intent[ 'state' ] == 'INSTALLED':
num = num + 1
return num
@@ -276,7 +276,7 @@
num = 0
for intent in intentsJsonObj:
if intent[ 'type' ] == "PointToPointIntent" \
- and intent[ 'state' ] == 'INSTALLED':
+ and intent[ 'state' ] == 'INSTALLED':
num = num + 1
return num
@@ -288,10 +288,10 @@
intentsJsonObj = json.loads( getIntentsResult )
for intent in intentsJsonObj:
- #if intent[ 'appId' ] != "org.onosproject.sdnip":
+ # if intent[ 'appId' ] != "org.onosproject.sdnip":
# continue
if intent[ 'type' ] == "PointToPointIntent" \
- and "protocol=6" in str( intent[ 'selector' ] ):
+ and "protocol=6" in str( intent[ 'selector' ] ):
ingress = str( intent[ 'ingress' ][ 'device' ] ) + ":" \
+ str( intent[ 'ingress' ][ 'port' ] )
egress = str( intent[ 'egress' ][ 'device' ] ) + ":" + \
@@ -325,15 +325,15 @@
# find out the BGP speaker IP address for this BGP peer
bgpSpeakerIpAddress = ""
for interfaceAddress in \
- sdnipData[ 'bgpSpeakers' ][ 0 ][ 'interfaceAddresses' ]:
+ sdnipData[ 'bgpSpeakers' ][ 0 ][ 'interfaceAddresses' ]:
# if eq( interfaceAddress[ 'interfaceDpid' ],sdnipData[
# 'bgpSpeakers' ][ 0 ][ 'attachmentDpid' ] ) and eq(
# interfaceAddress[ 'interfacePort' ], sdnipData[ 'bgpSpeakers'
# ][ 0 ][ 'attachmentPort' ] ):
if eq( interfaceAddress[ 'interfaceDpid' ],
peer[ 'attachmentDpid' ] ) \
- and eq( interfaceAddress[ 'interfacePort' ],
- peer[ 'attachmentPort' ] ):
+ and eq( interfaceAddress[ 'interfacePort' ],
+ peer[ 'attachmentPort' ] ):
bgpSpeakerIpAddress = interfaceAddress[ 'ipAddress' ]
break
else:
@@ -345,7 +345,7 @@
+ "IPV4_DST{ip=" + peer[ 'ipAddress' ] + "/32}," \
+ "IP_PROTO{protocol=6}, ETH_TYPE{ethType=800}, \
TCP_DST{tcpPort=179}"
- selector = selectorStr.replace( " ", "" ).replace("[", "" )\
+ selector = selectorStr.replace( " ", "" ).replace( "[", "" )\
.replace( "]", "" ).split( "," )
intent = bgpSpeakerAttachmentPoint + "/" + \
bgpPeerAttachmentPoint + "/" + str( sorted( selector ) )
@@ -355,7 +355,7 @@
+ "IPV4_DST{ip=" + peer[ 'ipAddress' ] + "/32}," \
+ "IP_PROTO{protocol=6}, ETH_TYPE{ethType=800}, \
TCP_SRC{tcpPort=179}"
- selector = selectorStr.replace( " ", "" ).replace("[", "" )\
+ selector = selectorStr.replace( " ", "" ).replace( "[", "" )\
.replace( "]", "" ).split( "," )
intent = bgpSpeakerAttachmentPoint + "/" \
+ bgpPeerAttachmentPoint + "/" + str( sorted( selector ) )
@@ -367,7 +367,7 @@
+ "IPV4_DST{ip=" + bgpSpeakerIpAddress + "/32}," \
+ "IP_PROTO{protocol=6}, ETH_TYPE{ethType=800}, \
TCP_DST{tcpPort=179}"
- selector = selectorStr.replace( " ", "" ).replace("[", "" )\
+ selector = selectorStr.replace( " ", "" ).replace( "[", "" )\
.replace( "]", "" ).split( "," )
intent = bgpPeerAttachmentPoint + "/" \
+ bgpSpeakerAttachmentPoint + "/" + str( sorted( selector ) )
@@ -400,23 +400,23 @@
chunk_size = 20
- if len(routes) > chunk_size:
- num_iter = (int) (len(routes) / chunk_size)
+ if len( routes ) > chunk_size:
+ num_iter = ( int )( len( routes ) / chunk_size )
else:
- num_iter = 1;
+ num_iter = 1
total = 0
- for n in range( 0, num_iter + 1):
+ for n in range( 0, num_iter + 1 ):
routeCmd = ""
- if (len( routes ) - (n * chunk_size)) >= chunk_size:
- m = (n + 1) * chunk_size
+ if ( len( routes ) - ( n * chunk_size ) ) >= chunk_size:
+ m = ( n + 1 ) * chunk_size
else:
m = len( routes )
for i in range( n * chunk_size, m ):
routeCmd = routeCmd + "network " + routes[ i ] + "\n"
total = total + 1
- main.log.info(routeCmd)
+ main.log.info( routeCmd )
try:
self.handle.sendline( routeCmd )
self.handle.expect( "bgpd", timeout=5 )
@@ -425,8 +425,8 @@
self.disconnect()
# waitTimer = 1.00 / routeRate
- main.log.info("Total routes so far " + ((str) (total)) + " wait for 0 sec")
- #time.sleep( 1 )
+ main.log.info( "Total routes so far " + ( str( total ) ) + " wait for 0 sec" )
+ # time.sleep( 1 )
if routesAdded == len( routes ):
main.log.info( "Finished adding routes" )
return main.TRUE
@@ -489,7 +489,6 @@
main.log.info( "NO HANDLE" )
return main.FALSE
-
# Please use the generateRoutes plus addRoutes instead of this one!
def addRoute( self, net, numRoutes, routeRate ):
try:
@@ -613,7 +612,7 @@
count = 0
while True:
i = child.expect( [ '17\d\.\d{1,3}\.\d{1,3}\.\d{1,3}',
- 'CLI#', pexpect.TIMEOUT ] )
+ 'CLI#', pexpect.TIMEOUT ] )
if i == 0:
count = count + 1
elif i == 1:
@@ -700,4 +699,3 @@
main.log.error( "Connection failed to the host" )
response = main.FALSE
return response
-
diff --git a/TestON/drivers/common/cli/tool/dpctlclidriver.py b/TestON/drivers/common/cli/tool/dpctlclidriver.py
index 0dd15ee..6375c0f 100644
--- a/TestON/drivers/common/cli/tool/dpctlclidriver.py
+++ b/TestON/drivers/common/cli/tool/dpctlclidriver.py
@@ -1,4 +1,4 @@
-#/usr/bin/env python
+# /usr/bin/env python
"""
Created on 26-Nov-2012
Copyright 2012 Open Networking Foundation (ONF)
@@ -50,9 +50,9 @@
self.handle = super(
DpctlCliDriver, self ).connect( user_name=self.user_name,
- ip_address=self.ip_address,
- port=None,
- pwd=self.pwd )
+ ip_address=self.ip_address,
+ port=None,
+ pwd=self.pwd )
if self.handle:
main.log.info( "Connected to the host" )
return main.TRUE
@@ -74,7 +74,7 @@
"INPORT",
"ACTION",
"TIMEOUT" ],
- **flowParameters )
+ **flowParameters )
cmd = "dpctl add-flow tcp:"
tcpIP = args[ "TCPIP" ] if args[ "TCPIP" ] is not None else "127.0.0.1"
@@ -258,7 +258,7 @@
"TCPIP",
"TCPPORT",
"STRING" ],
- **flowParameters )
+ **flowParameters )
tcpIP = args[ "TCPIP" ] if args[ "TCPIP" ] is not None else "127.0.0.1"
tcpPort = args[ "TCPPORT" ] if args[
@@ -286,7 +286,7 @@
"TCPIP",
"TCPPORT",
"STRING" ],
- **flowParameters )
+ **flowParameters )
tcpIP = args[ "TCPIP" ] if args[ "TCPIP" ] is not None else "127.0.0.1"
tcpPort = args[ "TCPPORT" ] if args[
@@ -306,4 +306,3 @@
if __name__ != "__main__":
import sys
sys.modules[ __name__ ] = DpctlCliDriver()
-
diff --git a/TestON/drivers/common/clidriver.py b/TestON/drivers/common/clidriver.py
index 086ae0a..10b5152 100644
--- a/TestON/drivers/common/clidriver.py
+++ b/TestON/drivers/common/clidriver.py
@@ -10,7 +10,7 @@
TestON is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
- ( at your option ) any later version.
+ (at your option) any later version.
TestON is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -34,10 +34,10 @@
def __init__( self ):
super( CLI, self ).__init__()
- def checkPrompt(self):
+ def checkPrompt( self ):
for key in self.options:
- if key == "prompt" and self.options['prompt'] is not None:
- self.prompt = self.options['prompt']
+ if key == "prompt" and self.options[ 'prompt' ] is not None:
+ self.prompt = self.options[ 'prompt' ]
break
def connect( self, **connectargs ):
@@ -56,20 +56,22 @@
self.ip_address + " port 22: Connection refused"
if self.port:
self.handle = pexpect.spawn(
- 'ssh -p ' +
+ 'ssh -X -p ' +
self.port +
' ' +
self.user_name +
'@' +
- self.ip_address,
+ self.ip_address +
+ ' -o ServerAliveInterval=120 -o TCPKeepAlive=yes',
env={ "TERM": "xterm-mono" },
- maxread=50000 )
+ maxread=1000000 )
else:
self.handle = pexpect.spawn(
'ssh -X ' +
self.user_name +
'@' +
- self.ip_address,
+ self.ip_address +
+ ' -o ServerAliveInterval=120 -o TCPKeepAlive=yes',
env={ "TERM": "xterm-mono" },
maxread=1000000,
timeout=60 )
@@ -100,7 +102,7 @@
else:
main.log.info( "Server asked for password, but none was "
"given in the .topo file. Trying "
- "no password.")
+ "no password." )
self.pwd = ""
self.handle.sendline( self.pwd )
j = self.handle.expect( [
@@ -181,8 +183,8 @@
if index == 0:
self.LASTRSP = self.LASTRSP + \
self.handle.before + self.handle.after
- main.log.info( "Executed :" + str(cmd ) +
- " \t\t Expected Prompt '" + str( expectPrompt) +
+ main.log.info( "Executed :" + str( cmd ) +
+ " \t\t Expected Prompt '" + str( expectPrompt ) +
"' Found" )
elif index == 1:
self.LASTRSP = self.LASTRSP + self.handle.before
@@ -397,7 +399,7 @@
else:
main.log.info( "Server asked for password, but none was "
"given in the .topo file. Trying "
- "no password.")
+ "no password." )
pwd = ""
handle.sendline( pwd )
j = handle.expect( [ self.prompt,
@@ -479,16 +481,16 @@
assert "cannot access" not in output
assert "command not found" not in output
main.log.debug( output )
- lines = [ line for line in output.split('\r\n') ]
+ lines = [ line for line in output.split( '\r\n' ) ]
retValue = True
if ignoreRoot:
lastIndex = -2
else:
lastIndex = -1
- for line in lines[1:lastIndex]:
+ for line in lines[ 1:lastIndex ]:
parsed = line.split()
- sizeMatch = parsed[0]
- folder = parsed[1]
+ sizeMatch = parsed[ 0 ]
+ folder = parsed[ 1 ]
match = re.search( sizeRe, sizeMatch )
num = match.group( 'number' )
unitMatch = match.group( 'unit' )
diff --git a/TestON/drivers/component.py b/TestON/drivers/component.py
index 968306e..c3d3297 100644
--- a/TestON/drivers/component.py
+++ b/TestON/drivers/component.py
@@ -117,4 +117,3 @@
if __name__ != "__main__":
import sys
sys.modules[ __name__ ] = Component()
-
diff --git a/TestON/tests/FUNC/FUNCbgpls/FUNCbgpls.params b/TestON/tests/FUNC/FUNCbgpls/FUNCbgpls.params
index cefc4f1..1e5474b 100755
--- a/TestON/tests/FUNC/FUNCbgpls/FUNCbgpls.params
+++ b/TestON/tests/FUNC/FUNCbgpls/FUNCbgpls.params
@@ -12,6 +12,11 @@
<testcases>1,2,3,4,5,6</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<DEPENDENCY>
<path>/tests/FUNC/FUNCbgpls/Dependencies/</path>
</DEPENDENCY>
diff --git a/TestON/tests/FUNC/FUNCflow/FUNCflow.params b/TestON/tests/FUNC/FUNCflow/FUNCflow.params
index c0ce79f..6eda334 100755
--- a/TestON/tests/FUNC/FUNCflow/FUNCflow.params
+++ b/TestON/tests/FUNC/FUNCflow/FUNCflow.params
@@ -23,6 +23,11 @@
<testcases>1,2,10,1000,3000,1100,3000,1200,3000,1300,3000,1400,3000,1500,3000,1600,3000,1700,3000,1800,3000,1900,3000,2000,100</testcases>
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>
<max>1</max>
</SCALE>
diff --git a/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.params b/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.params
new file mode 100644
index 0000000..be399f6
--- /dev/null
+++ b/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.params
@@ -0,0 +1,44 @@
+<PARAMS>
+ # CASE - Description
+ # 0 - Variable initialization and optional pull and build ONOS package
+ # 1 - install ONOS with single node
+ # 2 - Starting ONOS with forming clusters.
+ # 3 - Checking the ONOS configuration with single node
+ # 4 - Checking the ONOS configuration with cluster formed
+ # 5 - Starting Mininet and verifying topology
+
+ <testcases>0,1,3,2,4,5</testcases>
+
+ <DEPENDENCY>
+ <path>/tests/FUNC/FUNCformCluster/dependencies/</path>
+ <file>formClusterFuncs</file>
+ </DEPENDENCY>
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+ <ENV>
+ <cellApps>drivers,openflow</cellApps>
+ <additionalApp>org.onosproject.fwd</additionalApp>
+ <cellBasicName>singleTemp</cellBasicName>
+ </ENV>
+ <GIT>
+ <pull>False</pull>
+ <branch>master</branch>
+ </GIT>
+ <TEST>
+ <numNodes>7</numNodes>
+ </TEST>
+ <RETRY>
+ <pingall>2</pingall>
+ <topoCheck>2</topoCheck>
+ </RETRY>
+ <SLEEP>
+ <afterONOSStart>15</afterONOSStart>
+ <pingall>3</pingall>
+ </SLEEP>
+ <MININET>
+ <topo>mn --topo tree,2,2</topo>
+ </MININET>
+
+</PARAMS>
diff --git a/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.py b/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.py
new file mode 100644
index 0000000..55b6e41
--- /dev/null
+++ b/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.py
@@ -0,0 +1,261 @@
+"""
+Copyright 2017 Open Networking Foundation ( ONF )
+
+Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+
+ TestON is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 2 of the License, or
+ ( at your option ) any later version.
+
+ TestON is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TestON. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+
+class FUNCformCluster:
+
+ def __init__( self ):
+ self.default = ''
+
+ def CASE0( self, main ):
+ import imp
+ import re
+
+ try:
+ from tests.dependencies.ONOSSetup import ONOSSetup
+ main.testSetUp = ONOSSetup()
+ except ImportError:
+ main.log.error( "ONOSSetup not found. exiting the test" )
+ main.cleanAndExit()
+ main.testSetUp.envSetupDescription()
+ stepResult = main.TRUE
+ try:
+ main.apps = main.params[ 'ENV' ][ 'cellApps' ]
+ main.additionalApp = main.params[ 'ENV' ][ 'additionalApp' ]
+ main.cellBasicName = main.params[ 'ENV' ][ 'cellBasicName' ]
+ main.mnTopo = main.params[ 'MININET' ][ 'topo' ]
+ main.startSleep = int( main.params[ 'SLEEP' ][ 'afterONOSStart' ] )
+ dependencyPath = main.testOnDirectory + \
+ main.params[ 'DEPENDENCY' ][ 'path' ]
+ dependencyFile = main.params[ 'DEPENDENCY' ][ 'file' ]
+ main.numNodes = int( main.params[ 'TEST' ][ 'numNodes' ] )
+ main.funcs = imp.load_source( dependencyFile,
+ dependencyPath +
+ dependencyFile +
+ ".py" )
+ main.pingallRetry = int( main.params[ 'RETRY' ][ 'pingall' ] )
+ main.topoCheckRetry = int( main.params[ 'RETRY' ][ 'topoCheck' ] )
+ main.pingallSleep = int( main.params[ 'SLEEP' ][ 'pingall' ] )
+
+ except Exception as e:
+ main.testSetUp.envSetupException( e )
+ if len( main.Cluster.runningNodes ) != main.numNodes:
+ main.log.error( "The number of the nodes needs to be " + str( main.numNodes ) +
+ "\nExiting Test..." )
+ main.cleanAndExit()
+ main.testSetUp.evnSetupConclusion( stepResult )
+
+ def CASE1( self, main ):
+ """
+ - Create cells with single node
+ - apply each cell to each cluster
+ - install ONOS
+ - ssh-secure
+ - start the ONOS
+ - activate org.onosproject.fwd to cluster 1 only.
+ """
+ main.case( "Starting ONOS with indepenent configuration" )
+ main.caseExplanation = "Starting ONOS with one node itself."
+ main.testSetUp.killingAllOnos( main.Cluster, True, False )
+ threads = []
+ i = 0
+ for cluster in main.Cluster.runningNodes:
+ i += 1
+ t = main.Thread( target=cluster.Bench.createCellFile,
+ name="create-cell",
+ args=[ main.ONOSbench.ip_address,
+ main.cellBasicName + str( i ),
+ main.Mininet1.ip_address,
+ main.apps,
+ cluster.ip_address,
+ main.ONOScell.karafUser,
+ True ] )
+ threads.append( t )
+ t.start()
+ cellResult = main.TRUE
+ for t in threads:
+ t.join()
+ cellResult = cellResult and t.result
+
+ threads = []
+ i = 0
+ for cluster in main.Cluster.runningNodes:
+ i += 1
+ t = main.Thread( target=cluster.Bench.setCell,
+ name="set-cell",
+ args=[ main.cellBasicName + str( i ) ] )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ cellResult = cellResult and t.result
+
+ threads = []
+ i = 0
+ for cluster in main.Cluster.runningNodes:
+ i += 1
+ t = main.Thread( target=cluster.Bench.verifyCell,
+ name="verify-cell" )
+ threads.append( t )
+ t.start()
+ for t in threads:
+ t.join()
+ cellResult = cellResult and t.result
+
+ uninstallResult = main.testSetUp.uninstallOnos( main.Cluster, True )
+ buildResult = main.testSetUp.buildOnos( main.Cluster )
+ installResult = main.testSetUp.installOnos( main.Cluster, True, True )
+ secureSshResult = main.testSetUp.setupSsh( main.Cluster )
+ onosServiceResult = main.testSetUp.checkOnosService( main.Cluster )
+ onosCliResult = main.testSetUp.startOnosClis( main.Cluster )
+ activateResult = main.Cluster.active( 0 ).CLI.activateApp( main.additionalApp )
+
+ result = cellResult and uninstallResult and buildResult and installResult and \
+ secureSshResult and onosServiceResult and onosCliResult and activateResult
+ utilities.assert_equals( expect=main.TRUE,
+ actual=result,
+ onpass="Successfully started the ONOS",
+ onfail="Failed to start the ONOS" )
+
+ def CASE2( self, main ):
+ """
+ - Execute onos-form-cluster to all the nodes.
+ - start the ONOS.
+ - activate org.onosproject.fwd to cluster 1.
+ """
+ main.case( "Starting ONOS with form cluster." )
+ main.caseExplanation = "This will connect all the clusters of the ONOS."
+ main.step( "Executing onos-form-cluster" )
+ formClusterResult = main.ONOSbench.formCluster( main.Cluster.getIps( True, True ) )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=result,
+ onpass="Successfully formed clusters to ONOS",
+ onfail="Failed to form clusters to ONOS" )
+ onosServiceResult = main.testSetUp.checkOnosService( main.Cluster )
+ onosCliResult = main.testSetUp.startOnosClis( main.Cluster )
+ activateResult = main.Cluster.active( 0 ).CLI.activateApp( main.additionalApp )
+ result = formClusterResult and onosServiceResult and onosCliResult and activateResult
+ utilities.assert_equals( expect=main.TRUE,
+ actual=result,
+ onpass="Successfully formed clusters to ONOS and started",
+ onfail="Failed to form clusters to ONOS and started" )
+
+ def CASE3( self, main ):
+ """
+ Checking the configuration of the ONOS with single-node ONOS.
+ It will check :
+ - the number of the node : They should only have 1 node.
+ - App status : Only the first node should have additional app installed.
+ """
+ import time
+ main.case( "Checking the configuration of the ONOS" )
+ main.caseExplanation = "Checking the number of the nodes and apps"
+ main.step( "Checking the number of the nodes" )
+ main.log.info( "Sleep for " + str( main.startSleep ) + " to give enough time to ONOS")
+ time.sleep( main.startSleep )
+ result = main.funcs.checkingNumNodes( main, 1 )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=result,
+ onpass="Successfully checking the nodes numbers of the ONOS",
+ onfail="Failed to checking the nodes numbers of the ONOS" )
+ main.step( "Checking the app status. Only the first node should have " +
+ main.additionalApp + " installed." )
+ i = 0
+ appResult = main.TRUE
+ for cluster in main.Cluster.active():
+ appResult = appResult and main.funcs.checkingApp( main, main.additionalApp, cluster, True if i == 0 else False )
+ i += 1
+ main.Cluster.active( 0 ).CLI.deactivateApp( main.additionalApp )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=appResult,
+ onpass="Successfully checking the app status of the ONOS",
+ onfail="Failed to checking the app status of the ONOS" )
+
+ def CASE4( self, main ):
+ """
+ Checking the configuration of the ONOS with form-cluster.
+ It will check :
+ - the number of the node : They should only have 7 nodes.
+ - state of the node.
+ - App status : All the nodes should have additional app.
+ """
+ import time
+ main.case( "Checking the configuration of the ONOS after form-cluster" )
+ main.caseExplanation = "Checking the number of the nodes and apps"
+ main.step( "Checking the number of the nodes" )
+ main.log.info( "Sleep for " + str( main.startSleep ) + " to give enough time to ONOS")
+ time.sleep( main.startSleep )
+ result = main.funcs.checkingNumNodes( main, main.numNodes )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=result,
+ onpass="Successfully checking the nodes numbers of the ONOS",
+ onfail="Failed to checking the nodes numbers of the ONOS" )
+ main.step( "Checking the status of the nodes" )
+ nodeStatusResult = main.TRUE if main.Cluster.nodesCheck() else main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=nodeStatusResult,
+ onpass="The status of the nodes were in READY as expected",
+ onfail="The status of the nodes were NOT in READY as expected" )
+ main.step( "Checking the app status. All nodes should have " +
+ main.additionalApp + " installed." )
+ appResult = main.TRUE
+ for cluster in main.Cluster.active():
+ appResult = appResult and main.funcs.checkingApp( main, main.additionalApp, cluster, True )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=appResult,
+ onpass="Successfully checking the app status of the ONOS",
+ onfail="Failed to checking the app status of the ONOS" )
+
+ def CASE5( self, main ):
+ """
+ Run simple mininet to check connectivity of ONOS clusters.
+ - It will do ping all
+ - It will compare topos between mininet and ONOS.
+ """
+ try:
+ from tests.dependencies.topology import Topology
+ except ImportError:
+ main.log.error( "Topology not found exiting the test" )
+ main.cleanAndExit()
+ try:
+ main.Topology
+ except ( NameError, AttributeError ):
+ main.Topology = Topology()
+ main.case( "Starting 2x2 Tree Mininet and compare the Topology" )
+ main.caseExplanation = "Starting 2x2 Mininet and assign ONOS controllers to switches."
+ main.step( "Starting Mininet" )
+ for ctrl in main.Cluster.runningNodes:
+ main.mnTopo += " --controller remote,ip=" + ctrl.ipAddress
+ startMnResult = main.Mininet1.startNet( mnCmd=main.mnTopo )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=startMnResult,
+ onpass="Successfully started Mininet",
+ onfail="Failed to start Mininet" )
+ main.step( "Pingall hosts to confirm ONOS discovery" )
+ pingResult = utilities.retry( f=main.Mininet1.pingall,
+ retValue=main.FALSE,
+ attempts=main.pingallRetry,
+ sleep=main.pingallSleep )
+ utilities.assert_equals( expect=main.TRUE,
+ actual=pingResult,
+ onpass="Successfully discovered hosts",
+ onfail="Failed to discover hosts" )
+ main.Topology.compareTopos( main.Mininet1, main.topoCheckRetry )
diff --git a/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.topo b/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.topo
new file mode 100644
index 0000000..c96b419
--- /dev/null
+++ b/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.topo
@@ -0,0 +1,36 @@
+<TOPOLOGY>
+ <COMPONENT>
+
+ <ONOScell>
+ <host>localhost</host> # ONOS "bench" machine
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosClusterDriver</type>
+ <connect_order>1</connect_order>
+ <COMPONENTS>
+ <cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
+ <diff_clihost></diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used for True.
+ <karaf_username></karaf_username>
+ <karaf_password></karaf_password>
+ <web_user></web_user>
+ <web_pass></web_pass>
+ <rest_port></rest_port>
+ <prompt></prompt> # TODO: we technically need a few of these, one per component
+ <onos_home></onos_home> # defines where onos home is
+ <nodes> 7 </nodes> # number of nodes in the cluster
+ </COMPONENTS>
+ </ONOScell>
+
+ <Mininet1>
+ <host>OCN</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>MininetCliDriver</type>
+ <connect_order>2</connect_order>
+ <COMPONENTS>
+ <prompt></prompt>
+ </COMPONENTS>
+ </Mininet1>
+
+ </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/FUNC/FUNCformCluster/README b/TestON/tests/FUNC/FUNCformCluster/README
new file mode 100644
index 0000000..4ab2cc6
--- /dev/null
+++ b/TestON/tests/FUNC/FUNCformCluster/README
@@ -0,0 +1,13 @@
+Summary:
+ This test is checking the functionality of onos-form-cluster.
+ It will first run 7 single node of ONOS and check the number of the node and app.
+ Since it is single node, each of them should have 1 node.
+ Then, it will form 7 clusters to the ONOS and re-check the number of the nodes, status of nodes,
+ and app.
+ This time, it should have 7 nodes and installing app from one node should affect the other nodes.
+ The status of the Nodes should be "READY"
+ Lastly, it will run the Mininet with controllers of 7 nodes to pingall and compare topology
+ of ONOS and Mininet.
+
+Required:
+ Since it is fixed with 7 nodes, test will be forced to exit unless it has 7 clusters.
\ No newline at end of file
diff --git a/TestON/tests/FUNC/FUNCformCluster/__init__.py b/TestON/tests/FUNC/FUNCformCluster/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/FUNC/FUNCformCluster/__init__.py
diff --git a/TestON/tests/FUNC/FUNCformCluster/dependencies/formClusterFuncs.py b/TestON/tests/FUNC/FUNCformCluster/dependencies/formClusterFuncs.py
new file mode 100644
index 0000000..044c8a3
--- /dev/null
+++ b/TestON/tests/FUNC/FUNCformCluster/dependencies/formClusterFuncs.py
@@ -0,0 +1,64 @@
+"""
+Copyright 2017 Open Networking Foundation ( ONF )
+
+Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+
+ TestON is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 2 of the License, or
+ ( at your option ) any later version.
+
+ TestON is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TestON. If not, see <http://www.gnu.org/licenses/>.
+"""
+import json
+
+def checkingNumNodes( main, expected ):
+ """
+ check the number of nodes
+ :param expected:
+ Expected number of nodes
+ :return:
+ main.TRUE if all the number of the nodes are matched
+ main.FALSE if not.
+ """
+ result = main.TRUE
+ for cluster in main.Cluster.active():
+ actual = json.loads( cluster.CLI.summary() ).get( 'nodes' )
+ thisResult = main.TRUE if expected == actual else main.FALSE
+ if not thisResult:
+ main.log.error( "Number of the nodes not matched." +
+ "\nExpected nodes: " + str( expected ) +
+ "\nActual nodes: " + str( actual ) )
+ return result
+
+def checkingApp( main, appToBeChecked, cluster, expectedToBeThere ):
+ """
+ check the existence of app
+ :param appToBeChecked:
+ Name of the apps to be checked
+ :param cluster:
+ nth cluster to be checked
+ :param expectedToBeThere:
+ True if it is expected to be installed. False if it is expected not to be installed.
+ :return:
+ main.TRUE if they are all matched. Otherwise main.FALSE
+ """
+ result = False
+ appStatus = cluster.CLI.appStatus( appToBeChecked )
+ if appStatus == "ACTIVE" if expectedToBeThere else "UNINSTALL":
+ result = True
+ if result:
+ main.log.info( "App is " + ( "not " if not expectedToBeThere else "" ) + "there as expected" )
+ return main.TRUE
+ else:
+ main.log.error("App is " + ( "" if not expectedToBeThere else "not " ) + "there which should" +
+ ( "n't" if not expectedToBeThere else "" ) + " be there.")
+ return main.FALSE
diff --git a/TestON/tests/FUNC/FUNCgroup/FUNCgroup.params b/TestON/tests/FUNC/FUNCgroup/FUNCgroup.params
index ca223bd..90a5082 100644
--- a/TestON/tests/FUNC/FUNCgroup/FUNCgroup.params
+++ b/TestON/tests/FUNC/FUNCgroup/FUNCgroup.params
@@ -12,6 +12,11 @@
# 100 - Check logs for Errors and Warnings
<testcases>1,2,3,5,6,7,6,100</testcases>
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>
<max>1</max>
</SCALE>
diff --git a/TestON/tests/FUNC/FUNCintent/FUNCintent.params b/TestON/tests/FUNC/FUNCintent/FUNCintent.params
index bff8823..f1ba155 100644
--- a/TestON/tests/FUNC/FUNCintent/FUNCintent.params
+++ b/TestON/tests/FUNC/FUNCintent/FUNCintent.params
@@ -23,6 +23,11 @@
<testcases>1,[2,10,12,13,15,16,1000,2000,3000,4000,5000,6000,18,19]*2,[2,10,12,13,15,16,17,1000,2000,3000,4000,5000,6000,18,19]*2,[2,11,12,13,15,16,1000,2000,3000,4000,5000,6000,18,19]*2,[2,11,12,13,15,16,17,1000,2000,3000,4000,5000,6000,18,19]*2</testcases>
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>
<size>1,3,1,3,1,3,1,3</size>
</SCALE>
diff --git a/TestON/tests/FUNC/FUNCintentRest/FUNCintentRest.params b/TestON/tests/FUNC/FUNCintentRest/FUNCintentRest.params
index 7e8b105..d49be07 100644
--- a/TestON/tests/FUNC/FUNCintentRest/FUNCintentRest.params
+++ b/TestON/tests/FUNC/FUNCintentRest/FUNCintentRest.params
@@ -21,6 +21,12 @@
# 5000 - Test host mobility
<testcases>1,[2,10,12,13,15,16,1000,2000,3000,5000,18,19]*2,[2,10,12,13,15,16,17,1000,2000,3000,5000,18,19]*2,[2,11,12,13,15,16,1000,2000,3000,5000,18,19]*2,[2,11,12,13,15,16,17,1000,2000,3000,5000,18,19]*2</testcases>
+
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>
<size>1,3,1,3,1,3,1,3</size>
</SCALE>
diff --git a/TestON/tests/FUNC/FUNCipv6Intent/FUNCipv6Intent.params b/TestON/tests/FUNC/FUNCipv6Intent/FUNCipv6Intent.params
index 73a3599..1f36084 100644
--- a/TestON/tests/FUNC/FUNCipv6Intent/FUNCipv6Intent.params
+++ b/TestON/tests/FUNC/FUNCipv6Intent/FUNCipv6Intent.params
@@ -16,6 +16,11 @@
<testcases>1,2,11,12,13,16,1000,2000,3000,4000,5000,6000,14</testcases>
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>
<size>1</size>
</SCALE>
diff --git a/TestON/tests/FUNC/FUNCnetCfg/FUNCnetCfg.params b/TestON/tests/FUNC/FUNCnetCfg/FUNCnetCfg.params
index 0f08d38..ce62f92 100644
--- a/TestON/tests/FUNC/FUNCnetCfg/FUNCnetCfg.params
+++ b/TestON/tests/FUNC/FUNCnetCfg/FUNCnetCfg.params
@@ -18,6 +18,11 @@
<testcases>1,25,2,20,11,27,26,21,22,23,24</testcases>
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<DEPENDENCY>
<path>/tests/FUNC/FUNCnetCfg/dependencies/</path>
<wrapper1>startUp</wrapper1>
diff --git a/TestON/tests/FUNC/FUNCnetconf/FUNCnetconf.params b/TestON/tests/FUNC/FUNCnetconf/FUNCnetconf.params
index f5339f2..e233dc5 100644
--- a/TestON/tests/FUNC/FUNCnetconf/FUNCnetconf.params
+++ b/TestON/tests/FUNC/FUNCnetconf/FUNCnetconf.params
@@ -10,6 +10,11 @@
<testcases>1,[2,100,200,300,19]*2</testcases>
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>
<size>1,3</size>
</SCALE>
diff --git a/TestON/tests/FUNC/FUNCoptical/FUNCoptical.params b/TestON/tests/FUNC/FUNCoptical/FUNCoptical.params
index ed94439..2b5a733 100644
--- a/TestON/tests/FUNC/FUNCoptical/FUNCoptical.params
+++ b/TestON/tests/FUNC/FUNCoptical/FUNCoptical.params
@@ -14,6 +14,12 @@
# 32 - Add and test bidirectional host intents
<testcases>1,[2,10,22,23,31,32,14,19,2,10,16,22,23,31,32,14,19]*1,[2,10,17,22,23,31,32,14,19,2,10,16,17,22,23,31,32,14,19]*1</testcases>
+
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>
<size>1,3,1,3</size>
</SCALE>
diff --git a/TestON/tests/FUNC/FUNCovsdbtest/FUNCovsdbtest.params b/TestON/tests/FUNC/FUNCovsdbtest/FUNCovsdbtest.params
index dba74d1..aa15174 100644
--- a/TestON/tests/FUNC/FUNCovsdbtest/FUNCovsdbtest.params
+++ b/TestON/tests/FUNC/FUNCovsdbtest/FUNCovsdbtest.params
@@ -11,6 +11,11 @@
<testcases>1,3,4,2,5,6,7,8</testcases>
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<DEPENDENCY>
<path>/tests/FUNC/FUNCovsdbtest/dependencies/</path>
</DEPENDENCY>
diff --git a/TestON/tests/FUNC/FUNCvirNetNB/FUNCvirNetNB.params b/TestON/tests/FUNC/FUNCvirNetNB/FUNCvirNetNB.params
index 84cab8c..9a810bb 100644
--- a/TestON/tests/FUNC/FUNCvirNetNB/FUNCvirNetNB.params
+++ b/TestON/tests/FUNC/FUNCvirNetNB/FUNCvirNetNB.params
@@ -13,6 +13,11 @@
<testcases>1,2,3,4,5,6,7,8,9,10,11,12,13</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SLEEP>
<startup>15</startup>
</SLEEP>
diff --git a/TestON/tests/HA/HAclusterRestart/HAclusterRestart.params b/TestON/tests/HA/HAclusterRestart/HAclusterRestart.params
index 8ba346c..bbf11ae 100644
--- a/TestON/tests/HA/HAclusterRestart/HAclusterRestart.params
+++ b/TestON/tests/HA/HAclusterRestart/HAclusterRestart.params
@@ -19,6 +19,11 @@
#CASE17: Check for basic functionality with distributed primitives
<testcases>1,2,8,21,3,8,4,5,14,16,17,[6],8,3,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<apps></apps>
<ONOS_Configuration>
<org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
@@ -28,7 +33,7 @@
</ONOS_Configuration>
<ENV>
<cellName>HA</cellName>
- <appString>drivers,openflow,proxyarp,mobility</appString>
+ <appString>drivers,openflow,proxyarp,mobility,events</appString>
</ENV>
<GIT>
<pull>False</pull>
diff --git a/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py b/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py
index 2712c3f..2460c94 100644
--- a/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py
+++ b/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py
@@ -65,9 +65,6 @@
start cli sessions
start tcpdump
"""
- import imp
- import time
- import json
main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
"initialization" )
# These are for csv plotting in jenkins
@@ -90,7 +87,6 @@
except Exception as e:
main.testSetUp.envSetupException( e )
main.testSetUp.evnSetupConclusion( stepResult )
- main.HA.generateGraph( "HAclusterRestart" )
main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
extraApply=main.HA.startingMininet )
@@ -144,12 +140,6 @@
except ( NameError, AttributeError ):
main.log.error( "main.HAdata not defined, setting to []" )
main.HAdata = []
- # Reset non-persistent variables
- try:
- iCounterValue = 0
- except NameError:
- main.log.error( "iCounterValue not defined, setting to 0" )
- iCounterValue = 0
main.case( "Restart entire ONOS cluster" )
@@ -217,7 +207,7 @@
" error logs" )
leaderResult = main.FALSE
elif leaderN is None:
- main.log.error( cli.name +
+ main.log.error( ctrl.name +
" shows no leader for the election-app." )
leaderResult = main.FALSE
if len( set( leaderList ) ) != 1:
diff --git a/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.params b/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.params
index f1520f7..3d96b4c 100644
--- a/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.params
+++ b/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.params
@@ -19,7 +19,12 @@
#CASE15: Check that Leadership Election is still functional
#CASE16: Install Distributed Primitives app
#CASE17: Check for basic functionality with distributed primitives
- <testcases>1,2,8,21,3,4,5,14,16,17,[61,8,7,4,15,17,62]*1000,8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+ <testcases>1,2,8,21,3,4,5,14,16,17,[61,8,7,4,15,17,62,7,8,4,15,17]*1000,8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
<apps></apps>
<ONOS_Configuration>
@@ -30,7 +35,7 @@
</ONOS_Configuration>
<ENV>
<cellName>HA</cellName>
- <appString>drivers,openflow,proxyarp,mobility</appString>
+ <appString>drivers,openflow,proxyarp,mobility,events</appString>
</ENV>
<GIT>
<pull>False</pull>
diff --git a/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.py b/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.py
index 811e04f..9a27fea 100644
--- a/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.py
+++ b/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.py
@@ -66,10 +66,6 @@
start cli sessions
start tcpdump
"""
- import imp
- import pexpect
- import time
- import json
main.log.info( "ONOS HA test: Stop a minority of ONOS nodes - " +
"initialization" )
# set global variables
@@ -94,7 +90,6 @@
except Exception as e:
main.testSetUp.envSetupException( e )
main.testSetUp.evnSetupConclusion( stepResult )
- main.HA.generateGraph( "HAcontinuousStopNodes" )
main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
extraApply=[ main.HA.startingMininet,
@@ -141,9 +136,9 @@
try:
assert main.nodeIndex is not None, "main.nodeIndex not defined"
assert main.killCount is not None, "main.killCount not defined"
- except AttributeError as e:
+ except AttributeError:
main.log.warn( "Node to kill not selected, defaulting to node 1" )
- main.nodeIndex = 0
+ main.nodeIndex = -1
main.killCount = 1
main.case( "Stopping ONOS nodes - iteration " + str( main.killCount ) )
@@ -169,11 +164,11 @@
utilities.assert_equals( expect=main.TRUE, actual=killResults,
onpass="ONOS nodes stopped successfully",
onfail="ONOS nodes NOT successfully stopped" )
+ main.Cluster.reset()
main.step( "Checking ONOS nodes" )
- nodeResults = utilities.retry( main.HA.nodesCheck,
+ nodeResults = utilities.retry( main.Cluster.nodesCheck,
False,
- args=[ main.Cluster.active() ],
sleep=15,
attempts=5 )
@@ -195,7 +190,7 @@
"""
The bring up stopped nodes
"""
- main.HA.bringUpStoppedNode( main )
+ main.HA.bringUpStoppedNodes( main )
def CASE7( self, main ):
"""
@@ -212,9 +207,6 @@
# Test of LeadershipElection
leaderList = []
- restarted = []
- for ctrl in main.kill:
- restarted.append( ctrl.ipAddress )
leaderResult = main.TRUE
for ctrl in main.Cluster.active():
@@ -231,11 +223,6 @@
" shows no leader for the election-app was" +
" elected after the old one died" )
leaderResult = main.FALSE
- elif leaderN in restarted:
- main.log.error( ctrl.name + " shows " + str( leaderN ) +
- " as leader for the election-app, but it " +
- "was restarted" )
- leaderResult = main.FALSE
if len( set( leaderList ) ) != 1:
leaderResult = main.FALSE
main.log.error(
diff --git a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.params b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.params
index 3a8b60f..fe4cd80 100644
--- a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.params
+++ b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.params
@@ -21,6 +21,11 @@
#CASE17: Check for basic functionality with distributed primitives
<testcases>1,[2,8,21,3,4,5,14,16,17]*1,[61,8,7,4,15,17,62],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<apps></apps>
<ONOS_Configuration>
<org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
@@ -30,7 +35,7 @@
</ONOS_Configuration>
<ENV>
<cellName>HA</cellName>
- <appString>drivers,openflow,proxyarp,mobility</appString>
+ <appString>drivers,openflow,proxyarp,mobility,events</appString>
</ENV>
<GIT>
<pull>False</pull>
diff --git a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
index cf7fe73..37e106c 100644
--- a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
+++ b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
@@ -66,10 +66,6 @@
start cli sessions
start tcpdump
"""
- import imp
- import pexpect
- import time
- import json
main.log.info( "ONOS HA test: Partition ONOS nodes into two sub-clusters - " +
"initialization" )
# set global variables
@@ -93,7 +89,6 @@
except Exception as e:
main.testSetUp.envSetupException( e )
main.testSetUp.evnSetupConclusion( stepResult )
- main.HA.generateGraph( "HAfullNetPartition" )
main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
extraApply=[ main.HA.startingMininet,
@@ -135,7 +130,8 @@
"""
The Failure case.
"""
- import math
+ import pexpect
+ import time
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
main.case( "Partition ONOS nodes into two distinct partitions" )
@@ -164,37 +160,43 @@
if i not in main.partition:
for j in main.partition:
foe = main.Cluster.runningNodes[ j ]
- main.log.warn( "Setting IP Tables rule from {} to {}. ".format( iCtrl.ipAddress, foe.ipAddress ) )
+ main.log.warn( "Setting IP Tables rule from {} to {}. ".format( iCtrl.ipAddress,
+ foe.ipAddress ) )
# CMD HERE
try:
- cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", iCtrl.ipAddress, foe.ipAddress )
+ cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT",
+ iCtrl.ipAddress,
+ foe.ipAddress )
this.sendline( cmdStr )
this.expect( "\$" )
main.log.debug( this.before )
except pexpect.EOF:
- main.log.error( self.name + ": EOF exception found" )
- main.log.error( self.name + ": " + self.handle.before )
+ main.log.error( iCtrl.name + ": EOF exception found" )
+ main.log.error( iCtrl.name + ": " + this.before )
main.cleanAndExit()
except Exception:
- main.log.exception( self.name + ": Uncaught exception!" )
+ main.log.exception( iCtrl.name + ": Uncaught exception!" )
main.cleanAndExit()
else:
for j in range( 0, n ):
if j not in main.partition:
foe = main.Cluster.runningNodes[ j ]
- main.log.warn( "Setting IP Tables rule from {} to {}. ".format( iCtrl.ipAddress, foe.ipAddress ) )
+ main.log.warn( "Setting IP Tables rule from {} to {}. ".format( iCtrl.ipAddress,
+ foe.ipAddress ) )
# CMD HERE
- cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", iCtrl.ipAddress, foe.ipAddress )
+ cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT",
+ iCtrl.ipAddress,
+ foe.ipAddress )
try:
this.sendline( cmdStr )
this.expect( "\$" )
main.log.debug( this.before )
except pexpect.EOF:
- main.log.error( self.name + ": EOF exception found" )
- main.log.error( self.name + ": " + self.handle.before )
+ main.log.error( iCtrl.name + ": EOF exception found" )
+ main.log.error( iCtrl.name + ": " + this.before )
main.cleanAndExit()
except Exception:
- main.log.exception( self.name + ": Uncaught exception!" )
+ main.log.exception( iCtrl.name + ": Uncaught exception!" )
main.cleanAndExit()
main.Cluster.runningNodes[ i ].active = False
iCtrl.Bench.exitFromSsh( this, iCtrl.ipAddress )
@@ -211,7 +213,6 @@
"""
Healing Partition
"""
- import time
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
assert main.partition, "main.partition not defined"
@@ -233,22 +234,11 @@
for node in main.partition:
main.Cluster.runningNodes[ node ].active = True
- """
- # NOTE : Not sure if this can be removed
- main.activeNodes.sort()
- try:
- assert list( set( main.activeNodes ) ) == main.activeNodes,\
- "List of active nodes has duplicates, this likely indicates something was run out of order"
- except AssertionError:
- main.log.exception( "" )
- main.cleanAndExit()
- """
main.step( "Checking ONOS nodes" )
- nodeResults = utilities.retry( main.HA.nodesCheck,
+ nodeResults = utilities.retry( main.Cluster.nodesCheck,
False,
- args=[ main.Cluster.active() ],
sleep=15,
- attempts=5 )
+ attempts=50 )
utilities.assert_equals( expect=True, actual=nodeResults,
onpass="Nodes check successful",
diff --git a/TestON/tests/HA/HAkillNodes/HAkillNodes.params b/TestON/tests/HA/HAkillNodes/HAkillNodes.params
index d8f3d31..409bd1f 100644
--- a/TestON/tests/HA/HAkillNodes/HAkillNodes.params
+++ b/TestON/tests/HA/HAkillNodes/HAkillNodes.params
@@ -21,6 +21,11 @@
#CASE17: Check for basic functionality with distributed primitives
<testcases>1,2,8,21,3,4,5,14,16,17,[61,8,7,4,15,17,62],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<apps></apps>
<ONOS_Configuration>
<org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
@@ -30,7 +35,7 @@
</ONOS_Configuration>
<ENV>
<cellName>HA</cellName>
- <appString>drivers,openflow,proxyarp,mobility</appString>
+ <appString>drivers,openflow,proxyarp,mobility,events</appString>
</ENV>
<GIT>
<pull>False</pull>
diff --git a/TestON/tests/HA/HAkillNodes/HAkillNodes.py b/TestON/tests/HA/HAkillNodes/HAkillNodes.py
index 01ebe38..cd47131 100644
--- a/TestON/tests/HA/HAkillNodes/HAkillNodes.py
+++ b/TestON/tests/HA/HAkillNodes/HAkillNodes.py
@@ -87,7 +87,6 @@
except Exception as e:
main.testSetUp.envSetupException( e )
main.testSetUp.evnSetupConclusion( stepResult )
- main.HA.generateGraph( "HAkillNodes" )
main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
extraApply=[ main.HA.startingMininet,
@@ -161,9 +160,8 @@
onfail="ONOS nodes NOT successfully killed" )
main.step( "Checking ONOS nodes" )
- nodeResults = utilities.retry( main.HA.nodesCheck,
+ nodeResults = utilities.retry( main.Cluster.nodesCheck,
False,
- args=[ main.Cluster.active() ],
sleep=15,
attempts=5 )
@@ -183,7 +181,7 @@
"""
The bring up stopped nodes
"""
- main.HA.bringUpStoppedNode( main )
+ main.HA.bringUpStoppedNodes( main )
def CASE7( self, main ):
"""
diff --git a/TestON/tests/HA/HAsanity/HAsanity.params b/TestON/tests/HA/HAsanity/HAsanity.params
index 5c298ec..5a9f8f9 100644
--- a/TestON/tests/HA/HAsanity/HAsanity.params
+++ b/TestON/tests/HA/HAsanity/HAsanity.params
@@ -20,6 +20,11 @@
#1,2,8,21,8,3,4,5,14,16,17,[6],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13
<testcases>1,2,8,21,8,3,4,5,14,16,17,[6],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<apps></apps>
<ONOS_Configuration>
<org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
@@ -29,7 +34,7 @@
</ONOS_Configuration>
<ENV>
<cellName>HA</cellName>
- <appString>drivers,openflow,proxyarp,mobility</appString>
+ <appString>events,drivers,openflow,proxyarp,mobility</appString>
</ENV>
<GIT>
<pull>False</pull>
diff --git a/TestON/tests/HA/HAsanity/HAsanity.py b/TestON/tests/HA/HAsanity/HAsanity.py
index baff818..ee98b25 100644
--- a/TestON/tests/HA/HAsanity/HAsanity.py
+++ b/TestON/tests/HA/HAsanity/HAsanity.py
@@ -86,7 +86,6 @@
except Exception as e:
main.testSetUp.envSetupException( e )
main.testSetUp.evnSetupConclusion( stepResult )
- main.HA.generateGraph( "HAsanity" )
main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
extraApply=main.HA.startingMininet )
@@ -169,7 +168,7 @@
" error logs" )
leaderResult = main.FALSE
elif leaderN is None:
- main.log.error( cli.name +
+ main.log.error( ctrl.name +
" shows no leader for the election-app was" +
" elected after the old one died" )
leaderResult = main.FALSE
diff --git a/TestON/tests/HA/HAscaling/HAscaling.params b/TestON/tests/HA/HAscaling/HAscaling.params
index 9fd1760..233a55d 100644
--- a/TestON/tests/HA/HAscaling/HAscaling.params
+++ b/TestON/tests/HA/HAscaling/HAscaling.params
@@ -19,6 +19,11 @@
#CASE17: Check for basic functionality with distributed primitives
<testcases>1,2,8,21,3,8,4,5,14,16,17,[6,8,7,4,15,17]*9,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<scaling>1,3b,5b,7b,7,7b,5b,3b,1</scaling>
<server>
<port>8000</port>
diff --git a/TestON/tests/HA/HAscaling/HAscaling.py b/TestON/tests/HA/HAscaling/HAscaling.py
index 63aa1e5..e862d1d 100644
--- a/TestON/tests/HA/HAscaling/HAscaling.py
+++ b/TestON/tests/HA/HAscaling/HAscaling.py
@@ -65,9 +65,6 @@
start cli sessions
start tcpdump
"""
- import time
- import os
- import re
main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
"initialization" )
# set global variables
@@ -81,6 +78,7 @@
main.log.error( "ONOSSetup not found. exiting the test" )
main.cleanAndExit()
main.testSetUp.envSetupDescription()
+ main.Cluster.numCtrls = 1
try:
from tests.HA.dependencies.HA import HA
main.HA = HA()
@@ -94,7 +92,6 @@
except Exception as e:
main.testSetUp.envSetupException( e )
main.testSetUp.evnSetupConclusion( stepResult )
- main.HA.generateGraph( "HAscaling", index=1 )
main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
extraApply=[ main.HA.setServerForCluster,
@@ -203,9 +200,8 @@
main.Cluster.startCLIs()
main.step( "Checking ONOS nodes" )
- nodeResults = utilities.retry( main.HA.nodesCheck,
+ nodeResults = utilities.retry( main.Cluster.nodesCheck,
False,
- args=[ main.Cluster.active() ],
attempts=5 )
utilities.assert_equals( expect=True, actual=nodeResults,
onpass="Nodes check successful",
@@ -262,7 +258,7 @@
" error logs" )
leaderResult = main.FALSE
elif leaderN is None:
- main.log.error( cli.name +
+ main.log.error( ctrl.name +
" shows no leader for the election-app." )
leaderResult = main.FALSE
if len( set( leaderList ) ) != 1:
diff --git a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.params b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.params
index 67a655a..e93e655 100644
--- a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.params
+++ b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.params
@@ -18,6 +18,11 @@
#CASE17: Check for basic functionality with distributed primitives
<testcases>1,2,8,3,4,5,14,15,16,17,[6],8,3,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<apps></apps>
<ONOS_Configuration>
<org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
diff --git a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
index 584232a..b18f8c4 100644
--- a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
+++ b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
@@ -65,9 +65,6 @@
start cli sessions
start tcpdump
"""
- import imp
- import time
- import json
main.log.info( "ONOS Single node cluster restart " +
"HA test - initialization" )
main.case( "Setting up test environment" )
@@ -97,7 +94,6 @@
except Exception as e:
main.testSetUp.envSetupException( e )
main.testSetUp.evnSetupConclusion( stepResult )
- main.HA.generateGraph( "HAsingleInstanceRestart" )
main.Cluster.setRunningNode( int( main.params[ 'num_controllers' ] ) )
ip = main.Cluster.getIps( allNode=True )
main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName="SingleHA", removeLog=True,
@@ -168,6 +164,11 @@
main.log.warn( "ONOS1 intents response: " + repr( ONOSIntents ) )
else:
intentCheck = main.TRUE
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=intentCheck,
+ onpass="Intents are consistent across all ONOS nodes",
+ onfail="ONOS nodes have different views of intents" )
main.step( "Get the flows from each controller" )
global flowState
@@ -228,9 +229,6 @@
main.log.exception( "Error parsing clusters[0]: " +
repr( clusters[ 0 ] ) )
numClusters = "ERROR"
- clusterResults = main.FALSE
- if numClusters == 1:
- clusterResults = main.TRUE
utilities.assert_equals(
expect=1,
actual=numClusters,
@@ -323,13 +321,6 @@
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
- # Reset non-persistent variables
- try:
- iCounterValue = 0
- except NameError:
- main.log.error( "iCounterValue not defined, setting to 0" )
- iCounterValue = 0
-
main.case( "Restart ONOS node" )
main.caseExplanation = "Killing ONOS process and restart cli " +\
"sessions once onos is up."
@@ -557,7 +548,7 @@
mnSwitches,
json.loads( devices[ controller ] ),
json.loads( ports[ controller ] ) )
- except ( TypeError, ValueError ) as e:
+ except ( TypeError, ValueError ):
main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
devices[ controller ], ports[ controller ] ) )
else:
@@ -716,9 +707,8 @@
onpass="Topology Check Test successful",
onfail="Topology Check Test NOT successful" )
main.step( "Checking ONOS nodes" )
- nodeResults = utilities.retry( main.HA.nodesCheck,
+ nodeResults = utilities.retry( main.Cluster.nodesCheck,
False,
- args=[ main.Cluster.active() ],
attempts=5 )
utilities.assert_equals( expect=True, actual=nodeResults,
diff --git a/TestON/tests/HA/HAstopNodes/HAstopNodes.params b/TestON/tests/HA/HAstopNodes/HAstopNodes.params
index d8f3d31..de7f775 100644
--- a/TestON/tests/HA/HAstopNodes/HAstopNodes.params
+++ b/TestON/tests/HA/HAstopNodes/HAstopNodes.params
@@ -21,6 +21,11 @@
#CASE17: Check for basic functionality with distributed primitives
<testcases>1,2,8,21,3,4,5,14,16,17,[61,8,7,4,15,17,62],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<apps></apps>
<ONOS_Configuration>
<org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
@@ -30,7 +35,7 @@
</ONOS_Configuration>
<ENV>
<cellName>HA</cellName>
- <appString>drivers,openflow,proxyarp,mobility</appString>
+ <appString>events,drivers,openflow,proxyarp,mobility</appString>
</ENV>
<GIT>
<pull>False</pull>
diff --git a/TestON/tests/HA/HAstopNodes/HAstopNodes.py b/TestON/tests/HA/HAstopNodes/HAstopNodes.py
index 7b57730..4c8fe1d 100644
--- a/TestON/tests/HA/HAstopNodes/HAstopNodes.py
+++ b/TestON/tests/HA/HAstopNodes/HAstopNodes.py
@@ -87,7 +87,6 @@
except Exception as e:
main.testSetUp.envSetupException( e )
main.testSetUp.evnSetupConclusion( stepResult )
- main.HA.generateGraph( "HAstopNodes" )
main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
extraApply=[ main.HA.startingMininet,
@@ -160,9 +159,8 @@
onfail="ONOS nodes NOT successfully stopped" )
main.step( "Checking ONOS nodes" )
- nodeResults = utilities.retry( main.HA.nodesCheck,
+ nodeResults = utilities.retry( main.Cluster.nodesCheck,
False,
- args=[ main.Cluster.active() ],
sleep=15,
attempts=5 )
@@ -182,7 +180,7 @@
"""
The bring up stopped nodes
"""
- main.HA.bringUpStoppedNode( main )
+ main.HA.bringUpStoppedNodes( main )
def CASE7( self, main ):
"""
diff --git a/TestON/tests/HA/HAswapNodes/HAswapNodes.params b/TestON/tests/HA/HAswapNodes/HAswapNodes.params
index cf395cb..f78f98d 100644
--- a/TestON/tests/HA/HAswapNodes/HAswapNodes.params
+++ b/TestON/tests/HA/HAswapNodes/HAswapNodes.params
@@ -19,6 +19,11 @@
#CASE17: Check for basic functionality with distributed primitives
<testcases>1,[2,8,21,3,8,4,5,14,16,17]*1,6,[8,3,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4]*1,13</testcases>
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<server>
<port>8000</port>
<interface>eth0</interface>
diff --git a/TestON/tests/HA/HAswapNodes/HAswapNodes.py b/TestON/tests/HA/HAswapNodes/HAswapNodes.py
index e0751f6..98a2e30 100644
--- a/TestON/tests/HA/HAswapNodes/HAswapNodes.py
+++ b/TestON/tests/HA/HAswapNodes/HAswapNodes.py
@@ -65,9 +65,6 @@
start cli sessions
start tcpdump
"""
- import time
- import os
- import re
main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
"initialization" )
# set global variables
@@ -93,7 +90,6 @@
except Exception as e:
main.testSetUp.envSetupException( e )
main.testSetUp.evnSetupConclusion( stepResult )
- main.HA.generateGraph( "HAswapNodes" )
main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
extraApply=[ main.HA.setServerForCluster,
@@ -140,7 +136,6 @@
The Scaling case.
"""
import time
- import re
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
try:
@@ -201,9 +196,8 @@
main.testSetUp.startOnosClis( main.Cluster )
main.step( "Checking ONOS nodes" )
- nodeResults = utilities.retry( main.HA.nodesCheck,
+ nodeResults = utilities.retry( main.Cluster.nodesCheck,
False,
- args=[ main.Cluster.active() ],
attempts=5 )
utilities.assert_equals( expect=True, actual=nodeResults,
onpass="Nodes check successful",
diff --git a/TestON/tests/HA/HAupgrade/HAupgrade.params b/TestON/tests/HA/HAupgrade/HAupgrade.params
new file mode 100644
index 0000000..ba5d077
--- /dev/null
+++ b/TestON/tests/HA/HAupgrade/HAupgrade.params
@@ -0,0 +1,98 @@
+<PARAMS>
+ #List of test cases:
+ #CASE1: Compile ONOS and push it to the test machines
+ #CASE2: Assign devices to controllers
+ #CASE21: Assign mastership to controllers
+ #CASE3: Assign intents
+ #CASE4: Ping across added host intents
+ #CASE5: Reading state of ONOS
+ #CASE61: The Failure inducing case.
+ #CASE62: The Failure recovery case.
+ #CASE7: Check state after control plane failure
+ #CASE8: Compare topo
+ #CASE9: Link s3-s28 down
+ #CASE10: Link s3-s28 up
+ #CASE11: Switch down
+ #CASE12: Switch up
+ #CASE13: Clean up
+ #CASE14: start election app on all onos nodes
+ #CASE15: Check that Leadership Election is still functional
+ #CASE16: Install Distributed Primitives app
+ #CASE17: Check for basic functionality with distributed primitives
+ <testcases>1,2,8,21,[3,4,5,14,16,17]*1,[60,8,61,8,62,8,63,8,64,8,7,4,15,17],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
+ <apps></apps>
+ <ONOS_Configuration>
+ <org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
+ <useFlowObjectives>false</useFlowObjectives>
+ <defaultFlowObjectiveCompiler>org.onosproject.net.intent.impl.compiler.LinkCollectionIntentObjectiveCompiler</defaultFlowObjectiveCompiler>
+ </org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
+ </ONOS_Configuration>
+ <ENV>
+ <cellName>HA</cellName>
+ <appString>drivers,openflow,proxyarp,mobility</appString>
+ </ENV>
+ <GIT>
+ <pull>False</pull>
+ <branch>master</branch>
+ </GIT>
+ <num_controllers> 7 </num_controllers>
+ <tcpdump> False </tcpdump>
+
+ <CTRL>
+ <port1>6653</port1>
+ <port2>6653</port2>
+ <port3>6653</port3>
+ <port4>6653</port4>
+ <port5>6653</port5>
+ <port6>6653</port6>
+ <port7>6653</port7>
+ </CTRL>
+ <BACKUP>
+ <ENABLED> False </ENABLED>
+ <TESTONUSER>sdn</TESTONUSER>
+ <TESTONIP>10.128.30.9</TESTONIP>
+ </BACKUP>
+ <PING>
+ <source1>h8</source1>
+ <source2>h9</source2>
+ <source3>h10</source3>
+ <source4>h11</source4>
+ <source5>h12</source5>
+ <source6>h13</source6>
+ <source7>h14</source7>
+ <source8>h15</source8>
+ <source9>h16</source9>
+ <source10>h17</source10>
+ <target1>10.0.0.18</target1>
+ <target2>10.0.0.19</target2>
+ <target3>10.0.0.20</target3>
+ <target4>10.0.0.21</target4>
+ <target5>10.0.0.22</target5>
+ <target6>10.0.0.23</target6>
+ <target7>10.0.0.24</target7>
+ <target8>10.0.0.25</target8>
+ <target9>10.0.0.26</target9>
+ <target10>10.0.0.27</target10>
+ </PING>
+ <timers>
+ <LinkDiscovery>12</LinkDiscovery>
+ <SwitchDiscovery>12</SwitchDiscovery>
+ <gossip>5</gossip>
+ </timers>
+ <kill>
+ <switch> s5 </switch>
+ <dpid> 0000000000005000 </dpid>
+ <links> h5 s2 s1 s6 </links>
+ </kill>
+ <MNtcpdump>
+ <intf>eth0</intf>
+ <port> </port>
+ <folder>~/packet_captures/</folder>
+ </MNtcpdump>
+</PARAMS>
diff --git a/TestON/tests/HA/HAupgrade/HAupgrade.py b/TestON/tests/HA/HAupgrade/HAupgrade.py
new file mode 100644
index 0000000..10e8fdc
--- /dev/null
+++ b/TestON/tests/HA/HAupgrade/HAupgrade.py
@@ -0,0 +1,362 @@
+"""
+Copyright 2015 Open Networking Foundation (ONF)
+
+Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+
+ TestON is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 2 of the License, or
+ (at your option) any later version.
+
+ TestON is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TestON. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+"""
+Description: This test is to determine if ONOS can handle
+ a minority of it's nodes restarting
+
+List of test cases:
+CASE1: Compile ONOS and push it to the test machines
+CASE2: Assign devices to controllers
+CASE21: Assign mastership to controllers
+CASE3: Assign intents
+CASE4: Ping across added host intents
+CASE5: Reading state of ONOS
+CASE61: The Failure inducing case.
+CASE62: The Failure recovery case.
+CASE7: Check state after control plane failure
+CASE8: Compare topo
+CASE9: Link s3-s28 down
+CASE10: Link s3-s28 up
+CASE11: Switch down
+CASE12: Switch up
+CASE13: Clean up
+CASE14: start election app on all onos nodes
+CASE15: Check that Leadership Election is still functional
+CASE16: Install Distributed Primitives app
+CASE17: Check for basic functionality with distributed primitives
+"""
+class HAupgrade:
+
+ def __init__( self ):
+ self.default = ''
+
+ def CASE1( self, main ):
+ """
+ CASE1 is to compile ONOS and push it to the test machines
+
+ Startup sequence:
+ cell <name>
+ onos-verify-cell
+ NOTE: temporary - onos-remove-raft-logs
+ onos-uninstall
+ start mininet
+ git pull
+ mvn clean install
+ onos-package
+ onos-install -f
+ onos-wait-for-start
+ start cli sessions
+ start tcpdump
+ """
+ main.log.info( "ONOS HA test: Stop a minority of ONOS nodes - " +
+ "initialization" )
+ # These are for csv plotting in jenkins
+ main.HAlabels = []
+ main.HAdata = []
+ try:
+ from tests.dependencies.ONOSSetup import ONOSSetup
+ main.testSetUp = ONOSSetup()
+ except ImportError:
+ main.log.error( "ONOSSetup not found. exiting the test" )
+ main.cleanAndExit()
+ main.testSetUp.envSetupDescription()
+ try:
+ from tests.HA.dependencies.HA import HA
+ main.HA = HA()
+ cellName = main.params[ 'ENV' ][ 'cellName' ]
+ main.apps = main.params[ 'ENV' ][ 'appString' ]
+ stepResult = main.testSetUp.envSetup()
+ except Exception as e:
+ main.testSetUp.envSetupException( e )
+ main.testSetUp.evnSetupConclusion( stepResult )
+ main.HA.generateGraph( "HAupgrade" )
+
+ main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
+ extraApply=[ main.HA.startingMininet,
+ main.HA.copyBackupConfig ],
+ extraClean=main.HA.cleanUpGenPartition )
+
+ main.HA.initialSetUp( serviceClean=True )
+
+ def CASE2( self, main ):
+ """
+ Assign devices to controllers
+ """
+ main.HA.assignDevices( main )
+
+ def CASE21( self, main ):
+ """
+ Assign mastership to controllers
+ """
+ main.HA.assignMastership( main )
+
+ def CASE3( self, main ):
+ """
+ Assign intents
+ """
+ main.HA.assignIntents( main )
+
+ def CASE4( self, main ):
+ """
+ Ping across added host intents
+ """
+ main.HA.pingAcrossHostIntent( main )
+
+ def CASE5( self, main ):
+ """
+ Reading state of ONOS
+ """
+ main.HA.readingState( main )
+
+ def CASE60( self, main ):
+ """
+ Initialize the upgrade.
+ """
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ main.case( "Initialize upgrade" )
+ main.HA.upgradeInit( main )
+
+ def CASE61( self, main ):
+ """
+ Upgrade a minority of nodes PHASE 1
+ """
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ main.case( "Upgrade minority of ONOS nodes" )
+
+ main.step( "Checking ONOS Logs for errors" )
+ for ctrl in main.Cluster.active():
+ main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
+ main.log.warn( ctrl.checkLogs( ctrl.ipAddress ) )
+
+ main.kill = []
+ n = len( main.Cluster.runningNodes ) # Number of nodes
+ p = n / 2 # Number of nodes in the minority
+ for i in range( p ):
+ main.kill.append( main.Cluster.runningNodes[ i ] ) # ONOS node to kill, listed by index in main.nodes
+ main.HA.upgradeNodes( main )
+
+ main.step( "Checking ONOS nodes" )
+ nodeResults = utilities.retry( main.Cluster.nodesCheck,
+ False,
+ sleep=15,
+ attempts=5 )
+ utilities.assert_equals( expect=True, actual=nodeResults,
+ onpass="Nodes check successful",
+ onfail="Nodes check NOT successful" )
+
+ if not nodeResults:
+ for ctrl in main.Cluster.active():
+ main.log.debug( "{} components not ACTIVE: \n{}".format(
+ ctrl.name,
+ ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
+ main.log.error( "Failed to start ONOS, stopping test" )
+ main.cleanAndExit()
+
+ def CASE62( self, main ):
+ """
+ Transfer to new version. PHASE 2
+ """
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ main.case( "Start the upgrade" )
+
+ main.step( "Send the command to switch to new version" )
+ ctrl = main.Cluster.next().CLI
+ upgraded = ctrl.issuUpgrade()
+ utilities.assert_equals( expect=main.TRUE, actual=upgraded,
+ onpass="Cluster has moved to the upgraded nodes",
+ onfail="Error transitioning to the upgraded nodes" )
+
+ main.step( "Check the status of the upgrade" )
+ ctrl = main.Cluster.next().CLI
+ status = ctrl.issu()
+ main.log.debug( status )
+ # TODO: check things here?
+
+ main.step( "Checking ONOS nodes" )
+ nodeResults = utilities.retry( main.Cluster.nodesCheck,
+ False,
+ sleep=15,
+ attempts=5 )
+ utilities.assert_equals( expect=True, actual=nodeResults,
+ onpass="Nodes check successful",
+ onfail="Nodes check NOT successful" )
+
+ def CASE63( self, main ):
+ """
+ Upgrade the rest of the nodes
+ """
+ main.case( "Upgrade remaining nodes" )
+ upgraded = main.kill
+ main.kill = []
+ for node in main.Cluster.runningNodes:
+ if node not in upgraded:
+ main.kill.append( node )
+
+ main.HA.upgradeNodes( main )
+
+ def CASE64( self, main ):
+ """
+ Commit to the upgrade.
+ """
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ main.case( "Commit upgrade" )
+
+ main.step( "Send the command to commit the upgrade" )
+ ctrl = main.Cluster.next().CLI
+ committed = ctrl.issuCommit()
+ utilities.assert_equals( expect=main.TRUE, actual=committed,
+ onpass="Upgrade has been committed",
+ onfail="Error committing the upgrade" )
+
+ main.step( "Check the status of the upgrade" )
+ ctrl = main.Cluster.next().CLI
+ status = ctrl.issu()
+ main.log.debug( status )
+ # TODO: check things here?
+
+ def CASE7( self, main ):
+ """
+ Check state after ONOS failure
+ """
+ try:
+ main.kill
+ except AttributeError:
+ main.kill = []
+
+ main.HA.checkStateAfterEvent( main, afterWhich=0 )
+ main.step( "Leadership Election is still functional" )
+ # Test of LeadershipElection
+ leaderList = []
+
+ restarted = []
+ for ctrl in main.kill:
+ restarted.append( ctrl.ipAddress )
+ leaderResult = main.TRUE
+
+ for ctrl in main.Cluster.active():
+ leaderN = ctrl.electionTestLeader()
+ leaderList.append( leaderN )
+ if leaderN == main.FALSE:
+ # error in response
+ main.log.error( "Something is wrong with " +
+ "electionTestLeader function, check the" +
+ " error logs" )
+ leaderResult = main.FALSE
+ elif leaderN is None:
+ main.log.error( ctrl.name +
+ " shows no leader for the election-app was" +
+ " elected after the old one died" )
+ leaderResult = main.FALSE
+ elif leaderN in restarted:
+ main.log.error( ctrl.name + " shows " + str( leaderN ) +
+ " as leader for the election-app, but it " +
+ "was restarted" )
+ leaderResult = main.FALSE
+ if len( set( leaderList ) ) != 1:
+ leaderResult = main.FALSE
+ main.log.error(
+ "Inconsistent view of leader for the election test app" )
+ # TODO: print the list
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=leaderResult,
+ onpass="Leadership election passed",
+ onfail="Something went wrong with Leadership election" )
+
+ def CASE8( self, main ):
+ """
+ Compare topo
+ """
+ main.HA.compareTopo( main )
+
+ def CASE9( self, main ):
+ """
+ Link s3-s28 down
+ """
+ main.HA.linkDown( main )
+
+ def CASE10( self, main ):
+ """
+ Link s3-s28 up
+ """
+ main.HA.linkUp( main )
+
+ def CASE11( self, main ):
+ """
+ Switch Down
+ """
+ # NOTE: You should probably run a topology check after this
+ main.HA.switchDown( main )
+
+ def CASE12( self, main ):
+ """
+ Switch Up
+ """
+ # NOTE: You should probably run a topology check after this
+ main.HA.switchUp( main )
+
+ def CASE13( self, main ):
+ """
+ Clean up
+ """
+ main.HAlabels.append( "Restart" )
+ main.HAdata.append( str( main.restartTime ) )
+ main.HA.cleanUp( main )
+
+ def CASE14( self, main ):
+ """
+ start election app on all onos nodes
+ """
+ main.HA.startElectionApp( main )
+
+ def CASE15( self, main ):
+ """
+ Check that Leadership Election is still functional
+ 15.1 Run election on each node
+ 15.2 Check that each node has the same leaders and candidates
+ 15.3 Find current leader and withdraw
+ 15.4 Check that a new node was elected leader
+ 15.5 Check that that new leader was the candidate of old leader
+ 15.6 Run for election on old leader
+ 15.7 Check that oldLeader is a candidate, and leader if only 1 node
+ 15.8 Make sure that the old leader was added to the candidate list
+
+ old and new variable prefixes refer to data from before vs after
+ withdrawl and later before withdrawl vs after re-election
+ """
+ main.HA.isElectionFunctional( main )
+
+ def CASE16( self, main ):
+ """
+ Install Distributed Primitives app
+ """
+ main.HA.installDistributedPrimitiveApp( main )
+
+ def CASE17( self, main ):
+ """
+ Check for basic functionality with distributed primitives
+ """
+ main.HA.checkDistPrimitivesFunc( main )
diff --git a/TestON/tests/HA/HAupgrade/HAupgrade.topo b/TestON/tests/HA/HAupgrade/HAupgrade.topo
new file mode 100644
index 0000000..4bf4bd4
--- /dev/null
+++ b/TestON/tests/HA/HAupgrade/HAupgrade.topo
@@ -0,0 +1,53 @@
+<TOPOLOGY>
+ <COMPONENT>
+
+ <ONOScell>
+ <host>localhost</host> # ONOS "bench" machine
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosClusterDriver</type>
+ <connect_order>1</connect_order>
+ <COMPONENTS>
+ <cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
+ <diff_clihost></diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+ <karaf_username></karaf_username>
+ <karaf_password></karaf_password>
+ <web_user></web_user>
+ <web_pass></web_pass>
+ <rest_port></rest_port>
+ <prompt></prompt> # TODO: we technically need a few of these, one per component
+ <onos_home></onos_home> # defines where onos home is
+ <nodes> 7 </nodes> # number of nodes in the cluster
+ </COMPONENTS>
+ </ONOScell>
+
+ <Mininet1>
+ <host>OCN</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>MininetCliDriver</type>
+ <connect_order>2</connect_order>
+ <COMPONENTS>
+ #Specify the Option for mininet
+ <arg1> --custom ~/mininet/custom/obelisk.py </arg1>
+ <arg2> --topo obelisk </arg2>
+ <arg3> --switch ovs,protocols=OpenFlow13 </arg3>
+ <controller> none </controller>
+ <home>~/mininet/custom/</home>
+ <prompt></prompt>
+ </COMPONENTS>
+ </Mininet1>
+
+ <Mininet2>
+ <host>OCN</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>RemoteMininetDriver</type>
+ <connect_order>3</connect_order>
+ <COMPONENTS>
+ <prompt></prompt>
+ </COMPONENTS>
+ </Mininet2>
+
+ </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/HA/HAupgrade/README b/TestON/tests/HA/HAupgrade/README
new file mode 100644
index 0000000..b3ffbfa
--- /dev/null
+++ b/TestON/tests/HA/HAupgrade/README
@@ -0,0 +1,29 @@
+This test is designed to verify that an ONOS cluster behaves correctly when some
+ONOS nodes are upgrade. Then test will initilize the upgrade then stop, upgrade,
+and restart a minority of the nodes in the cluster. Then we will start the first
+phase of the upgrade process to transfer to the new version. Then we will upgrade
+and restart the rest of the cluster. After that the test will verify everything
+works and commit the upgrade.
+
+As written, the test only supports an ONOS cluster of 3, 5, or 7 nodes.
+This is because the test doesn't apply to a single node cluster and ONOS clusters
+should be deployed in odd numbers.
+
+The gerneral structure for the test:
+- Startup
+- Assign switches
+- Verify ONOS state and functionality
+ - Device mastership
+ - Intents
+ - Leadership election
+ - Distributed Primitives
+- Initialize an upgrade
+- Upgrade some ONOS nodes
+- Verify ONOS state and functionality
+- Transfer to new version
+- Upgrade the rest of the nodes
+- Verify ONOS state and functionality
+- Commit the upgrade
+- Dataplane failures
+ - link down and up
+ - switch down and up
diff --git a/TestON/tests/HA/HAupgrade/__init__.py b/TestON/tests/HA/HAupgrade/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/HA/HAupgrade/__init__.py
diff --git a/TestON/tests/HA/HAupgrade/dependencies/__init__.py b/TestON/tests/HA/HAupgrade/dependencies/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/HA/HAupgrade/dependencies/__init__.py
diff --git a/TestON/tests/HA/HAupgradeRollback/HAupgradeRollback.params b/TestON/tests/HA/HAupgradeRollback/HAupgradeRollback.params
new file mode 100644
index 0000000..ba5d077
--- /dev/null
+++ b/TestON/tests/HA/HAupgradeRollback/HAupgradeRollback.params
@@ -0,0 +1,98 @@
+<PARAMS>
+ #List of test cases:
+ #CASE1: Compile ONOS and push it to the test machines
+ #CASE2: Assign devices to controllers
+ #CASE21: Assign mastership to controllers
+ #CASE3: Assign intents
+ #CASE4: Ping across added host intents
+ #CASE5: Reading state of ONOS
+ #CASE61: The Failure inducing case.
+ #CASE62: The Failure recovery case.
+ #CASE7: Check state after control plane failure
+ #CASE8: Compare topo
+ #CASE9: Link s3-s28 down
+ #CASE10: Link s3-s28 up
+ #CASE11: Switch down
+ #CASE12: Switch up
+ #CASE13: Clean up
+ #CASE14: start election app on all onos nodes
+ #CASE15: Check that Leadership Election is still functional
+ #CASE16: Install Distributed Primitives app
+ #CASE17: Check for basic functionality with distributed primitives
+ <testcases>1,2,8,21,[3,4,5,14,16,17]*1,[60,8,61,8,62,8,63,8,64,8,7,4,15,17],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+
+ <GRAPH>
+ <nodeCluster>VM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
+ <apps></apps>
+ <ONOS_Configuration>
+ <org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
+ <useFlowObjectives>false</useFlowObjectives>
+ <defaultFlowObjectiveCompiler>org.onosproject.net.intent.impl.compiler.LinkCollectionIntentObjectiveCompiler</defaultFlowObjectiveCompiler>
+ </org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
+ </ONOS_Configuration>
+ <ENV>
+ <cellName>HA</cellName>
+ <appString>drivers,openflow,proxyarp,mobility</appString>
+ </ENV>
+ <GIT>
+ <pull>False</pull>
+ <branch>master</branch>
+ </GIT>
+ <num_controllers> 7 </num_controllers>
+ <tcpdump> False </tcpdump>
+
+ <CTRL>
+ <port1>6653</port1>
+ <port2>6653</port2>
+ <port3>6653</port3>
+ <port4>6653</port4>
+ <port5>6653</port5>
+ <port6>6653</port6>
+ <port7>6653</port7>
+ </CTRL>
+ <BACKUP>
+ <ENABLED> False </ENABLED>
+ <TESTONUSER>sdn</TESTONUSER>
+ <TESTONIP>10.128.30.9</TESTONIP>
+ </BACKUP>
+ <PING>
+ <source1>h8</source1>
+ <source2>h9</source2>
+ <source3>h10</source3>
+ <source4>h11</source4>
+ <source5>h12</source5>
+ <source6>h13</source6>
+ <source7>h14</source7>
+ <source8>h15</source8>
+ <source9>h16</source9>
+ <source10>h17</source10>
+ <target1>10.0.0.18</target1>
+ <target2>10.0.0.19</target2>
+ <target3>10.0.0.20</target3>
+ <target4>10.0.0.21</target4>
+ <target5>10.0.0.22</target5>
+ <target6>10.0.0.23</target6>
+ <target7>10.0.0.24</target7>
+ <target8>10.0.0.25</target8>
+ <target9>10.0.0.26</target9>
+ <target10>10.0.0.27</target10>
+ </PING>
+ <timers>
+ <LinkDiscovery>12</LinkDiscovery>
+ <SwitchDiscovery>12</SwitchDiscovery>
+ <gossip>5</gossip>
+ </timers>
+ <kill>
+ <switch> s5 </switch>
+ <dpid> 0000000000005000 </dpid>
+ <links> h5 s2 s1 s6 </links>
+ </kill>
+ <MNtcpdump>
+ <intf>eth0</intf>
+ <port> </port>
+ <folder>~/packet_captures/</folder>
+ </MNtcpdump>
+</PARAMS>
diff --git a/TestON/tests/HA/HAupgradeRollback/HAupgradeRollback.py b/TestON/tests/HA/HAupgradeRollback/HAupgradeRollback.py
new file mode 100644
index 0000000..fd49fb1
--- /dev/null
+++ b/TestON/tests/HA/HAupgradeRollback/HAupgradeRollback.py
@@ -0,0 +1,391 @@
+"""
+Copyright 2015 Open Networking Foundation (ONF)
+
+Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+
+ TestON is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 2 of the License, or
+ (at your option) any later version.
+
+ TestON is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TestON. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+"""
+Description: This test is to determine if ONOS can handle
+ a minority of it's nodes restarting
+
+List of test cases:
+CASE1: Compile ONOS and push it to the test machines
+CASE2: Assign devices to controllers
+CASE21: Assign mastership to controllers
+CASE3: Assign intents
+CASE4: Ping across added host intents
+CASE5: Reading state of ONOS
+CASE61: The Failure inducing case.
+CASE62: The Failure recovery case.
+CASE7: Check state after control plane failure
+CASE8: Compare topo
+CASE9: Link s3-s28 down
+CASE10: Link s3-s28 up
+CASE11: Switch down
+CASE12: Switch up
+CASE13: Clean up
+CASE14: start election app on all onos nodes
+CASE15: Check that Leadership Election is still functional
+CASE16: Install Distributed Primitives app
+CASE17: Check for basic functionality with distributed primitives
+"""
+class HAupgradeRollback:
+
+ def __init__( self ):
+ self.default = ''
+
+ def CASE1( self, main ):
+ """
+ CASE1 is to compile ONOS and push it to the test machines
+
+ Startup sequence:
+ cell <name>
+ onos-verify-cell
+ NOTE: temporary - onos-remove-raft-logs
+ onos-uninstall
+ start mininet
+ git pull
+ mvn clean install
+ onos-package
+ onos-install -f
+ onos-wait-for-start
+ start cli sessions
+ start tcpdump
+ """
+ main.log.info( "ONOS HA test: Stop a minority of ONOS nodes - " +
+ "initialization" )
+ # These are for csv plotting in jenkins
+ main.HAlabels = []
+ main.HAdata = []
+ try:
+ from tests.dependencies.ONOSSetup import ONOSSetup
+ main.testSetUp = ONOSSetup()
+ except ImportError:
+ main.log.error( "ONOSSetup not found. exiting the test" )
+ main.cleanAndExit()
+ main.testSetUp.envSetupDescription()
+ try:
+ from tests.HA.dependencies.HA import HA
+ main.HA = HA()
+ cellName = main.params[ 'ENV' ][ 'cellName' ]
+ main.apps = main.params[ 'ENV' ][ 'appString' ]
+ stepResult = main.testSetUp.envSetup()
+ except Exception as e:
+ main.testSetUp.envSetupException( e )
+ main.testSetUp.evnSetupConclusion( stepResult )
+ main.HA.generateGraph( "HAupgrade" )
+
+ main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
+ extraApply=[ main.HA.startingMininet,
+ main.HA.copyBackupConfig ],
+ extraClean=main.HA.cleanUpGenPartition )
+
+ main.HA.initialSetUp( serviceClean=True )
+
+ def CASE2( self, main ):
+ """
+ Assign devices to controllers
+ """
+ main.HA.assignDevices( main )
+
+ def CASE21( self, main ):
+ """
+ Assign mastership to controllers
+ """
+ main.HA.assignMastership( main )
+
+ def CASE3( self, main ):
+ """
+ Assign intents
+ """
+ main.HA.assignIntents( main )
+
+ def CASE4( self, main ):
+ """
+ Ping across added host intents
+ """
+ main.HA.pingAcrossHostIntent( main )
+
+ def CASE5( self, main ):
+ """
+ Reading state of ONOS
+ """
+ main.HA.readingState( main )
+
+ def CASE60( self, main ):
+ """
+ Initialize the upgrade.
+ """
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ main.case( "Initialize upgrade" )
+ main.HA.upgradeInit( main )
+
+ def CASE61( self, main ):
+ """
+ Upgrade a minority of nodes PHASE 1
+ """
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ main.case( "Upgrade minority of ONOS nodes" )
+
+ main.step( "Checking ONOS Logs for errors" )
+ for ctrl in main.Cluster.active():
+ main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
+ main.log.warn( ctrl.checkLogs( ctrl.ipAddress ) )
+
+ main.kill = []
+ n = len( main.Cluster.runningNodes ) # Number of nodes
+ p = n / 2 # Number of nodes in the minority
+ for i in range( p ):
+ main.kill.append( main.Cluster.runningNodes[ i ] ) # ONOS node to kill, listed by index in main.nodes
+ main.HA.upgradeNodes( main )
+
+ main.step( "Checking ONOS nodes" )
+ nodeResults = utilities.retry( main.Cluster.nodesCheck,
+ False,
+ sleep=15,
+ attempts=5 )
+ utilities.assert_equals( expect=True, actual=nodeResults,
+ onpass="Nodes check successful",
+ onfail="Nodes check NOT successful" )
+
+ if not nodeResults:
+ for ctrl in main.Cluster.active():
+ main.log.debug( "{} components not ACTIVE: \n{}".format(
+ ctrl.name,
+ ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
+ main.log.error( "Failed to start ONOS, stopping test" )
+ main.cleanAndExit()
+
+ def CASE62( self, main ):
+ """
+ Transfer to new version. PHASE 2
+ """
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ main.case( "Start the upgrade" )
+
+ main.step( "Send the command to switch to new version" )
+ ctrl = main.Cluster.next().CLI
+ upgraded = ctrl.issuUpgrade()
+ utilities.assert_equals( expect=main.TRUE, actual=upgraded,
+ onpass="Cluster has moved to the upgraded nodes",
+ onfail="Error transitioning to the upgraded nodes" )
+
+ main.step( "Check the status of the upgrade" )
+ ctrl = main.Cluster.next().CLI
+ status = ctrl.issu()
+ main.log.debug( status )
+ # TODO: check things here?
+
+ main.step( "Checking ONOS nodes" )
+ nodeResults = utilities.retry( main.Cluster.nodesCheck,
+ False,
+ sleep=15,
+ attempts=5 )
+ utilities.assert_equals( expect=True, actual=nodeResults,
+ onpass="Nodes check successful",
+ onfail="Nodes check NOT successful" )
+
+ def CASE63( self, main ):
+ """
+ Rollback the upgrade
+ """
+ main.case( "Rollback the upgrade" )
+ main.step( "Rollbak the upgrade" )
+ # send rollback command
+ ctrl = main.Cluster.next().CLI
+ rollback = ctrl.issuRollback()
+ utilities.assert_equals( expect=main.TRUE, actual=rollback,
+ onpass="Upgrade has been rolled back",
+ onfail="Error rolling back the upgrade" )
+
+ main.step( "Check the status of the upgrade" )
+ ctrl = main.Cluster.next().CLI
+ status = ctrl.issu()
+ main.log.debug( status )
+
+ # restart and reinstall old version on upgrade nodes
+ for ctrl in main.kill:
+ ctrl.onosStop( ctrl.ipAddress )
+ ctrl.onosUninstall( ctrl.ipAddress )
+ ctrl.onosInstall( options="-f", node=ctrl.ipAddress )
+ ctrl.onosSecureSSH( node=ctrl.ipAddress )
+ ctrl.startOnosCli( ctrl.ipAddress, waitForStart=True )
+ main.step( "Checking ONOS nodes" )
+ nodeResults = utilities.retry( main.Cluster.nodesCheck,
+ False,
+ sleep=15,
+ attempts=5 )
+ utilities.assert_equals( expect=True, actual=nodeResults,
+ onpass="Nodes check successful",
+ onfail="Nodes check NOT successful" )
+
+ if not nodeResults:
+ for ctrl in main.Cluster.active():
+ main.log.debug( "{} components not ACTIVE: \n{}".format(
+ ctrl.name,
+ ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
+ main.log.error( "Failed to start ONOS, stopping test" )
+ main.cleanAndExit()
+
+ def CASE64( self, main ):
+ """
+ Reset the upgrade state.
+ """
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ main.case( "Reset the upgrade state" )
+
+ main.step( "Send the command to reset the upgrade" )
+ ctrl = main.Cluster.next().CLI
+ committed = ctrl.issuCommit()
+ utilities.assert_equals( expect=main.TRUE, actual=committed,
+ onpass="Upgrade has been committed",
+ onfail="Error committing the upgrade" )
+
+ main.step( "Check the status of the upgrade" )
+ ctrl = main.Cluster.next().CLI
+ status = ctrl.issu()
+ main.log.debug( status )
+ # TODO: check things here?
+
+ def CASE7( self, main ):
+ """
+ Check state after ONOS failure
+ """
+ try:
+ main.kill
+ except AttributeError:
+ main.kill = []
+
+ main.HA.checkStateAfterEvent( main, afterWhich=0 )
+ main.step( "Leadership Election is still functional" )
+ # Test of LeadershipElection
+ leaderList = []
+
+ restarted = []
+ for ctrl in main.kill:
+ restarted.append( ctrl.ipAddress )
+ leaderResult = main.TRUE
+
+ for ctrl in main.Cluster.active():
+ leaderN = ctrl.electionTestLeader()
+ leaderList.append( leaderN )
+ if leaderN == main.FALSE:
+ # error in response
+ main.log.error( "Something is wrong with " +
+ "electionTestLeader function, check the" +
+ " error logs" )
+ leaderResult = main.FALSE
+ elif leaderN is None:
+ main.log.error( ctrl.name +
+ " shows no leader for the election-app was" +
+ " elected after the old one died" )
+ leaderResult = main.FALSE
+ elif leaderN in restarted:
+ main.log.error( ctrl.name + " shows " + str( leaderN ) +
+ " as leader for the election-app, but it " +
+ "was restarted" )
+ leaderResult = main.FALSE
+ if len( set( leaderList ) ) != 1:
+ leaderResult = main.FALSE
+ main.log.error(
+ "Inconsistent view of leader for the election test app" )
+ # TODO: print the list
+ utilities.assert_equals(
+ expect=main.TRUE,
+ actual=leaderResult,
+ onpass="Leadership election passed",
+ onfail="Something went wrong with Leadership election" )
+
+ def CASE8( self, main ):
+ """
+ Compare topo
+ """
+ main.HA.compareTopo( main )
+
+ def CASE9( self, main ):
+ """
+ Link s3-s28 down
+ """
+ main.HA.linkDown( main )
+
+ def CASE10( self, main ):
+ """
+ Link s3-s28 up
+ """
+ main.HA.linkUp( main )
+
+ def CASE11( self, main ):
+ """
+ Switch Down
+ """
+ # NOTE: You should probably run a topology check after this
+ main.HA.switchDown( main )
+
+ def CASE12( self, main ):
+ """
+ Switch Up
+ """
+ # NOTE: You should probably run a topology check after this
+ main.HA.switchUp( main )
+
+ def CASE13( self, main ):
+ """
+ Clean up
+ """
+ main.HAlabels.append( "Restart" )
+ main.HAdata.append( str( main.restartTime ) )
+ main.HA.cleanUp( main )
+
+ def CASE14( self, main ):
+ """
+ start election app on all onos nodes
+ """
+ main.HA.startElectionApp( main )
+
+ def CASE15( self, main ):
+ """
+ Check that Leadership Election is still functional
+ 15.1 Run election on each node
+ 15.2 Check that each node has the same leaders and candidates
+ 15.3 Find current leader and withdraw
+ 15.4 Check that a new node was elected leader
+ 15.5 Check that that new leader was the candidate of old leader
+ 15.6 Run for election on old leader
+ 15.7 Check that oldLeader is a candidate, and leader if only 1 node
+ 15.8 Make sure that the old leader was added to the candidate list
+
+ old and new variable prefixes refer to data from before vs after
+ withdrawl and later before withdrawl vs after re-election
+ """
+ main.HA.isElectionFunctional( main )
+
+ def CASE16( self, main ):
+ """
+ Install Distributed Primitives app
+ """
+ main.HA.installDistributedPrimitiveApp( main )
+
+ def CASE17( self, main ):
+ """
+ Check for basic functionality with distributed primitives
+ """
+ main.HA.checkDistPrimitivesFunc( main )
diff --git a/TestON/tests/HA/HAupgradeRollback/HAupgradeRollback.topo b/TestON/tests/HA/HAupgradeRollback/HAupgradeRollback.topo
new file mode 100644
index 0000000..4bf4bd4
--- /dev/null
+++ b/TestON/tests/HA/HAupgradeRollback/HAupgradeRollback.topo
@@ -0,0 +1,53 @@
+<TOPOLOGY>
+ <COMPONENT>
+
+ <ONOScell>
+ <host>localhost</host> # ONOS "bench" machine
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>OnosClusterDriver</type>
+ <connect_order>1</connect_order>
+ <COMPONENTS>
+ <cluster_name></cluster_name> # Used as a prefix for cluster components. Defaults to 'ONOS'
+ <diff_clihost></diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used if True.
+ <karaf_username></karaf_username>
+ <karaf_password></karaf_password>
+ <web_user></web_user>
+ <web_pass></web_pass>
+ <rest_port></rest_port>
+ <prompt></prompt> # TODO: we technically need a few of these, one per component
+ <onos_home></onos_home> # defines where onos home is
+ <nodes> 7 </nodes> # number of nodes in the cluster
+ </COMPONENTS>
+ </ONOScell>
+
+ <Mininet1>
+ <host>OCN</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>MininetCliDriver</type>
+ <connect_order>2</connect_order>
+ <COMPONENTS>
+ #Specify the Option for mininet
+ <arg1> --custom ~/mininet/custom/obelisk.py </arg1>
+ <arg2> --topo obelisk </arg2>
+ <arg3> --switch ovs,protocols=OpenFlow13 </arg3>
+ <controller> none </controller>
+ <home>~/mininet/custom/</home>
+ <prompt></prompt>
+ </COMPONENTS>
+ </Mininet1>
+
+ <Mininet2>
+ <host>OCN</host>
+ <user>sdn</user>
+ <password>rocks</password>
+ <type>RemoteMininetDriver</type>
+ <connect_order>3</connect_order>
+ <COMPONENTS>
+ <prompt></prompt>
+ </COMPONENTS>
+ </Mininet2>
+
+ </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/HA/HAupgradeRollback/README b/TestON/tests/HA/HAupgradeRollback/README
new file mode 100644
index 0000000..396c222
--- /dev/null
+++ b/TestON/tests/HA/HAupgradeRollback/README
@@ -0,0 +1,29 @@
+This test is designed to verify that an ONOS cluster behaves correctly when some
+ONOS nodes are upgrade. Then test will initilize the upgrade then stop, upgrade,
+and restart a minority of the nodes in the cluster. Then we will start the first
+phase of the upgrade process to transfer to the new version. Then we will roll-back
+the upgrade, Restarted the upgraded nodes with the older version. After that the
+test will verify everything works and reset the upgrade.
+
+As written, the test only supports an ONOS cluster of 3, 5, or 7 nodes.
+This is because the test doesn't apply to a single node cluster and ONOS clusters
+should be deployed in odd numbers.
+
+The gerneral structure for the test:
+- Startup
+- Assign switches
+- Verify ONOS state and functionality
+ - Device mastership
+ - Intents
+ - Leadership election
+ - Distributed Primitives
+- Initialize an upgrade
+- Upgrade some ONOS nodes
+- Verify ONOS state and functionality
+- Transfer to new version
+- Upgrade the rest of the nodes
+- Verify ONOS state and functionality
+- Commit the upgrade
+- Dataplane failures
+ - link down and up
+ - switch down and up
diff --git a/TestON/tests/HA/HAupgradeRollback/__init__.py b/TestON/tests/HA/HAupgradeRollback/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/HA/HAupgradeRollback/__init__.py
diff --git a/TestON/tests/HA/HAupgradeRollback/dependencies/__init__.py b/TestON/tests/HA/HAupgradeRollback/dependencies/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/HA/HAupgradeRollback/dependencies/__init__.py
diff --git a/TestON/tests/HA/dependencies/HA.py b/TestON/tests/HA/dependencies/HA.py
index 2227146..c75ccdf 100644
--- a/TestON/tests/HA/dependencies/HA.py
+++ b/TestON/tests/HA/dependencies/HA.py
@@ -20,6 +20,8 @@
"""
import json
import time
+import pexpect
+import re
class HA():
@@ -69,7 +71,6 @@
onfail="Error starting Mininet" )
def scalingMetadata( self ):
- import re
main.step( "Generate initial metadata file" )
main.scaling = main.params[ 'scaling' ].split( "," )
main.log.debug( main.scaling )
@@ -271,56 +272,10 @@
main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
return ( result, leaderList )
- def nodesCheck( self, nodes ):
- nodesOutput = []
- results = True
- threads = []
- for node in nodes:
- t = main.Thread( target=node.nodes,
- name="nodes-" + str( node ),
- args=[] )
- threads.append( t )
- t.start()
-
- for t in threads:
- t.join()
- nodesOutput.append( t.result )
- ips = sorted( main.Cluster.getIps( activeOnly=True ) )
- for i in nodesOutput:
- try:
- current = json.loads( i )
- activeIps = []
- currentResult = False
- for node in current:
- if node[ 'state' ] == 'READY':
- activeIps.append( node[ 'ip' ] )
- activeIps.sort()
- if ips == activeIps:
- currentResult = True
- except ( ValueError, TypeError ):
- main.log.error( "Error parsing nodes output" )
- main.log.warn( repr( i ) )
- currentResult = False
- results = results and currentResult
- return results
-
def generateGraph( self, testName, plotName="Plot-HA", index=2 ):
- # GRAPHS
- # NOTE: important params here:
- # job = name of Jenkins job
- # Plot Name = Plot-HA, only can be used if multiple plots
- # index = The number of the graph under plot name
- job = testName
- graphs = '<ac:structured-macro ac:name="html">\n'
- graphs += '<ac:plain-text-body><![CDATA[\n'
- graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
- '/plot/' + plotName + '/getPlot?index=' + str( index ) +\
- '&width=500&height=300"' +\
- 'noborder="0" width="500" height="300" scrolling="yes" ' +\
- 'seamless="seamless"></iframe>\n'
- graphs += ']]></ac:plain-text-body>\n'
- graphs += '</ac:structured-macro>\n'
- main.log.wiki( graphs )
+ # DEPRECATED: ONOSSetup.py now creates these graphs.
+
+ main.log.debug( "HA.generateGraph() is deprecated; ONOSSetup now creates these graphs." )
def initialSetUp( self, serviceClean=False ):
"""
@@ -342,10 +297,9 @@
main.ONOSbench.handle.expect( "\$" )
main.step( "Checking ONOS nodes" )
- nodeResults = utilities.retry( self.nodesCheck,
+ nodeResults = utilities.retry( main.Cluster.nodesCheck,
False,
- args=[ main.Cluster.active() ],
- attempts=5 )
+ attempts=9 )
utilities.assert_equals( expect=True, actual=nodeResults,
onpass="Nodes check successful",
@@ -806,7 +760,6 @@
pendingMap = main.Cluster.next().pendingMap()
if not intentAddResult or "key" in pendingMap:
- import time
installedCheck = True
main.log.info( "Sleeping 60 seconds to see if intents are found" )
time.sleep( 60 )
@@ -859,7 +812,7 @@
main.step( "Check Intent state" )
installedCheck = False
loopCount = 0
- while not installedCheck and loopCount < 40:
+ while not installedCheck and loopCount < 90:
installedCheck = True
# Print the intent states
intents = onosCli.CLI.intents()
@@ -1054,7 +1007,6 @@
Reading state of ONOS
"""
import json
- import time
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
try:
@@ -2524,8 +2476,6 @@
"""
Clean up
"""
- import os
- import time
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
@@ -2579,17 +2529,17 @@
else:
main.log.debug( "skipping saving log files" )
+ main.step( "Checking ONOS Logs for errors" )
+ for ctrl in main.Cluster.runningNodes:
+ main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
+ main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
+
main.step( "Stopping Mininet" )
mnResult = main.Mininet1.stopNet()
utilities.assert_equals( expect=main.TRUE, actual=mnResult,
onpass="Mininet stopped",
onfail="MN cleanup NOT successful" )
- main.step( "Checking ONOS Logs for errors" )
- for ctrl in main.Cluster.runningNodes:
- main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
- main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
-
try:
timerLog = open( main.logdir + "/Timers.csv", 'w' )
main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
@@ -2710,9 +2660,9 @@
"controller",
onfail="Switches were not successfully reassigned" )
- def bringUpStoppedNode( self, main ):
+ def bringUpStoppedNodes( self, main ):
"""
- The bring up stopped nodes
+ The bring up stopped nodes.
"""
import time
assert main, "main not defined"
@@ -2743,7 +2693,7 @@
onpass="ONOS restarted successfully",
onfail="ONOS restart NOT successful" )
- main.step( "Restarting ONOS nodes" )
+ main.step( "Restarting ONOS CLI" )
cliResults = main.TRUE
for ctrl in main.kill:
cliResults = cliResults and\
@@ -2753,15 +2703,13 @@
onpass="ONOS node(s) restarted",
onfail="ONOS node(s) did not restart" )
- # Grab the time of restart so we chan check how long the gossip
- # protocol has had time to work
+ # Grab the time of restart so we can have some idea of average time
main.restartTime = time.time() - restartTime
main.log.debug( "Restart time: " + str( main.restartTime ) )
# TODO: MAke this configurable. Also, we are breaking the above timer
main.step( "Checking ONOS nodes" )
- nodeResults = utilities.retry( self.nodesCheck,
+ nodeResults = utilities.retry( main.Cluster.nodesCheck,
False,
- args=[ main.Cluster.active() ],
sleep=15,
attempts=5 )
@@ -2786,7 +2734,96 @@
ctrl.electionTestRun()
utilities.assert_equals( expect=main.TRUE, actual=runResults,
onpass="ONOS nodes reran for election topic",
- onfail="Errror rerunning for election" )
+ onfail="Error rerunning for election" )
+
+ def upgradeNodes( self, main ):
+ """
+ Reinstall some nodes with an upgraded version.
+
+ This will reinstall nodes in main.kill with an upgraded version.
+ """
+ import time
+ assert main, "main not defined"
+ assert utilities.assert_equals, "utilities.assert_equals not defined"
+ assert main.kill, "main.kill not defined"
+ nodeNames = [ node.name for node in main.kill ]
+ main.step( "Upgrading" + str( nodeNames ) + " ONOS nodes" )
+
+ stopResults = main.TRUE
+ uninstallResults = main.TRUE
+ startResults = main.TRUE
+ sshResults = main.TRUE
+ isup = main.TRUE
+ restartTime = time.time()
+ for ctrl in main.kill:
+ stopResults = stopResults and\
+ ctrl.onosStop( ctrl.ipAddress )
+ uninstallResults = uninstallResults and\
+ ctrl.onosUninstall( ctrl.ipAddress )
+ # Install the new version of onos
+ startResults = startResults and\
+ ctrl.onosInstall( options="-fv", node=ctrl.ipAddress )
+ sshResults = sshResults and\
+ ctrl.onosSecureSSH( node=ctrl.ipAddress )
+ isup = isup and ctrl.isup( ctrl.ipAddress )
+ utilities.assert_equals( expect=main.TRUE, actual=stopResults,
+ onpass="ONOS nodes stopped successfully",
+ onfail="ONOS nodes NOT successfully stopped" )
+ utilities.assert_equals( expect=main.TRUE, actual=uninstallResults,
+ onpass="ONOS nodes uninstalled successfully",
+ onfail="ONOS nodes NOT successfully uninstalled" )
+ utilities.assert_equals( expect=main.TRUE, actual=startResults,
+ onpass="ONOS nodes started successfully",
+ onfail="ONOS nodes NOT successfully started" )
+ utilities.assert_equals( expect=main.TRUE, actual=sshResults,
+ onpass="Successfully secured onos ssh",
+ onfail="Failed to secure onos ssh" )
+ utilities.assert_equals( expect=main.TRUE, actual=isup,
+ onpass="ONOS nodes fully started",
+ onfail="ONOS nodes NOT fully started" )
+
+ main.step( "Restarting ONOS CLI" )
+ cliResults = main.TRUE
+ for ctrl in main.kill:
+ cliResults = cliResults and\
+ ctrl.startOnosCli( ctrl.ipAddress )
+ ctrl.active = True
+ utilities.assert_equals( expect=main.TRUE, actual=cliResults,
+ onpass="ONOS node(s) restarted",
+ onfail="ONOS node(s) did not restart" )
+
+ # Grab the time of restart so we can have some idea of average time
+ main.restartTime = time.time() - restartTime
+ main.log.debug( "Restart time: " + str( main.restartTime ) )
+ # TODO: Make this configurable.
+ main.step( "Checking ONOS nodes" )
+ nodeResults = utilities.retry( main.Cluster.nodesCheck,
+ False,
+ sleep=15,
+ attempts=5 )
+
+ utilities.assert_equals( expect=True, actual=nodeResults,
+ onpass="Nodes check successful",
+ onfail="Nodes check NOT successful" )
+
+ if not nodeResults:
+ for ctrl in main.Cluster.active():
+ main.log.debug( "{} components not ACTIVE: \n{}".format(
+ ctrl.name,
+ ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
+ main.log.error( "Failed to start ONOS, stopping test" )
+ main.cleanAndExit()
+
+ self.commonChecks()
+
+ main.step( "Rerun for election on the node(s) that were killed" )
+ runResults = main.TRUE
+ for ctrl in main.kill:
+ runResults = runResults and\
+ ctrl.electionTestRun()
+ utilities.assert_equals( expect=main.TRUE, actual=runResults,
+ onpass="ONOS nodes reran for election topic",
+ onfail="Error rerunning for election" )
def tempCell( self, cellName, ipList ):
main.step( "Create cell file" )
@@ -3096,8 +3133,8 @@
elapsed = time.time() - startTime
cliTime = time.time() - cliStart
- print "Elapsed time: " + str( elapsed )
- print "CLI time: " + str( cliTime )
+ main.log.debug( "Elapsed time: " + str( elapsed ) )
+ main.log.debug( "CLI time: " + str( cliTime ) )
if all( e is None for e in devices ) and\
all( e is None for e in hosts ) and\
@@ -3378,9 +3415,8 @@
# FIXME: move this to an ONOS state case
main.step( "Checking ONOS nodes" )
- nodeResults = utilities.retry( self.nodesCheck,
+ nodeResults = utilities.retry( main.Cluster.nodesCheck,
False,
- args=[ main.Cluster.active() ],
attempts=5 )
utilities.assert_equals( expect=True, actual=nodeResults,
onpass="Nodes check successful",
@@ -3542,15 +3578,18 @@
onfail="Inconsistent leaderboards" )
if sameResult:
+ # Check that the leader is one of the active nodes
+ ips = sorted( main.Cluster.getIps( activeOnly=True ) )
leader = leaders[ 0 ][ 0 ]
- if onosCli.ipAddress in leader:
- correctLeader = True
+ if leader in ips:
+ legitimate = True
else:
- correctLeader = False
- main.step( "First node was elected leader" )
+ legitimate = False
+ main.log.debug( leaders )
+ main.step( "Active node was elected leader?" )
utilities.assert_equals(
expect=True,
- actual=correctLeader,
+ actual=legitimate,
onpass="Correct leader was elected",
onfail="Incorrect leader" )
main.Cluster.testLeader = leader
@@ -3669,18 +3708,6 @@
else:
main.log.info( "Expected no leader, got: " + str( newLeader ) )
correctCandidateResult = main.FALSE
- elif len( oldLeaders[ 0 ] ) >= 3:
- if newLeader == oldLeaders[ 0 ][ 2 ]:
- # correct leader was elected
- correctCandidateResult = main.TRUE
- else:
- correctCandidateResult = main.FALSE
- main.log.error( "Candidate {} was elected. {} should have had priority.".format(
- newLeader, oldLeaders[ 0 ][ 2 ] ) )
- else:
- main.log.warn( "Could not determine who should be the correct leader" )
- main.log.debug( oldLeaders[ 0 ] )
- correctCandidateResult = main.FALSE
utilities.assert_equals(
expect=main.TRUE,
actual=correctCandidateResult,
@@ -3708,24 +3735,10 @@
time.sleep( 5 ) # Paremterize
positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
- # Check that the re-elected node is last on the candidate List
- if not reRunLeaders[ 0 ]:
- positionResult = main.FALSE
- elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
- main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
- str( reRunLeaders[ 0 ] ) ) )
- positionResult = main.FALSE
- utilities.assert_equals(
- expect=True,
- actual=positionResult,
- onpass="Old leader successfully re-ran for election",
- onfail="Something went wrong with Leadership election after " +
- "the old leader re-ran for election" )
-
def installDistributedPrimitiveApp( self, main ):
- """
+ '''
Install Distributed Primitives app
- """
+ '''
import time
assert main, "main not defined"
assert utilities.assert_equals, "utilities.assert_equals not defined"
@@ -3747,3 +3760,29 @@
onfail="Primitives app not activated" )
# TODO check on all nodes instead of sleeping
time.sleep( 5 ) # To allow all nodes to activate
+
+ def upgradeInit( self, main ):
+ '''
+ Initiates an update
+ '''
+ main.step( "Send the command to initialize the upgrade" )
+ ctrl = main.Cluster.next().CLI
+ initialized = ctrl.issuInit()
+ utilities.assert_equals( expect=main.TRUE, actual=initialized,
+ onpass="ISSU initialized",
+ onfail="Error initializing the upgrade" )
+
+ main.step( "Check the status of the upgrade" )
+ ctrl = main.Cluster.next().CLI
+ status = ctrl.issu()
+ main.log.debug( status )
+ # TODO: check things here?
+
+ main.step( "Checking ONOS nodes" )
+ nodeResults = utilities.retry( main.Cluster.nodesCheck,
+ False,
+ sleep=15,
+ attempts=5 )
+ utilities.assert_equals( expect=True, actual=nodeResults,
+ onpass="Nodes check successful",
+ onfail="Nodes check NOT successful" )
diff --git a/TestON/tests/MISC/SCPFbatchFlowResp/SCPFbatchFlowResp.params b/TestON/tests/MISC/SCPFbatchFlowResp/SCPFbatchFlowResp.params
index 333ac26..01d4f3e 100755
--- a/TestON/tests/MISC/SCPFbatchFlowResp/SCPFbatchFlowResp.params
+++ b/TestON/tests/MISC/SCPFbatchFlowResp/SCPFbatchFlowResp.params
@@ -15,6 +15,10 @@
<!-- <testcases>1,10,100,1000,100,2000,100,110</testcases> -->
<testcases>1,2,10,100,1000,2100,100,3100,100,110,210</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
<GIT>
<pull>False</pull>
<branch>master</branch>
diff --git a/TestON/tests/SCPF/SCPFcbench/SCPFcbench.params b/TestON/tests/SCPF/SCPFcbench/SCPFcbench.params
index 633955f..80b2382 100644
--- a/TestON/tests/SCPF/SCPFcbench/SCPFcbench.params
+++ b/TestON/tests/SCPF/SCPFcbench/SCPFcbench.params
@@ -2,6 +2,11 @@
<testcases>1,2</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>1</SCALE>
<availableNodes>1</availableNodes>
diff --git a/TestON/tests/SCPF/SCPFflowTp1g/SCPFflowTp1g.params b/TestON/tests/SCPF/SCPFflowTp1g/SCPFflowTp1g.params
index 1a910d3..b374227 100644
--- a/TestON/tests/SCPF/SCPFflowTp1g/SCPFflowTp1g.params
+++ b/TestON/tests/SCPF/SCPFflowTp1g/SCPFflowTp1g.params
@@ -2,6 +2,11 @@
<testcases>0,1,2,1,2,1,2,1,2,1,2,1,2,1,2</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>1,3,3,5,5,7,7</SCALE>
<max>7</max>
diff --git a/TestON/tests/SCPF/SCPFhostLat/SCPFhostLat.params b/TestON/tests/SCPF/SCPFhostLat/SCPFhostLat.params
index 566bd34..17a8bd3 100644
--- a/TestON/tests/SCPF/SCPFhostLat/SCPFhostLat.params
+++ b/TestON/tests/SCPF/SCPFhostLat/SCPFhostLat.params
@@ -1,6 +1,11 @@
<PARAMS>
<testcases>0,2,11,20,2,11,20,2,11,20,2,11,20</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>1,3,5,7</SCALE>
<ENV>
diff --git a/TestON/tests/SCPF/SCPFintentEventTp/SCPFintentEventTp.params b/TestON/tests/SCPF/SCPFintentEventTp/SCPFintentEventTp.params
index cf5e7e8..3a069ca 100644
--- a/TestON/tests/SCPF/SCPFintentEventTp/SCPFintentEventTp.params
+++ b/TestON/tests/SCPF/SCPFintentEventTp/SCPFintentEventTp.params
@@ -2,6 +2,11 @@
<testcases>0,1,2,1,2,1,2,1,2,1,2,1,2,1,2</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<debugMode></debugMode> #nothing means false
<ENV>
diff --git a/TestON/tests/SCPF/SCPFintentInstallWithdrawLat/SCPFintentInstallWithdrawLat.params b/TestON/tests/SCPF/SCPFintentInstallWithdrawLat/SCPFintentInstallWithdrawLat.params
index 081ebdb..126c311 100644
--- a/TestON/tests/SCPF/SCPFintentInstallWithdrawLat/SCPFintentInstallWithdrawLat.params
+++ b/TestON/tests/SCPF/SCPFintentInstallWithdrawLat/SCPFintentInstallWithdrawLat.params
@@ -2,6 +2,11 @@
<testcases>0,1,2,1,2,1,2,1,2</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>1,3,5,7</SCALE>
<max>7</max>
@@ -13,6 +18,7 @@
<TEST>
<warmUp>5</warmUp>
<sampleSize>20</sampleSize>
+ <deviceCount>7</deviceCount>
<intents>1,100,1000</intents> #list format, will be split on ','
<ingress>null:0000000000000001/6</ingress>
<egress>null:0000000000000007/5</egress>
@@ -24,19 +30,24 @@
<pull>False</pull>
<branch>master</branch>
</GIT>
-
+ <DEPENDENCY>
+ <FILE1>intentInstallLatFuncs</FILE1>
+ <PATH>/tests/SCPF/SCPFintentInstallWithdrawLat/dependencies/</PATH>
+ </DEPENDENCY>
<DATABASE>
<dbName>/tmp/IntentInstallWithdrawLatDB</dbName>
<dbFlowObj>/tmp/IntentInstallWithdrawLatDBWFO</dbFlowObj>
</DATABASE>
<ATTEMPTS>
- <verify>3</verify>
+ <verify>6</verify>
+ <maxInvalidRun>5</maxInvalidRun>
+ <cfg>5</cfg>
</ATTEMPTS>
<SLEEP>
<startup>10</startup>
<install>10</install>
- <verify>3</verify>
+ <verify>5</verify>
<reroute>3</reroute>
# timeout for pexpect
<timeout>300</timeout>
diff --git a/TestON/tests/SCPF/SCPFintentInstallWithdrawLat/SCPFintentInstallWithdrawLat.py b/TestON/tests/SCPF/SCPFintentInstallWithdrawLat/SCPFintentInstallWithdrawLat.py
index 44e0b6e..9232a73 100644
--- a/TestON/tests/SCPF/SCPFintentInstallWithdrawLat/SCPFintentInstallWithdrawLat.py
+++ b/TestON/tests/SCPF/SCPFintentInstallWithdrawLat/SCPFintentInstallWithdrawLat.py
@@ -34,6 +34,8 @@
self.default = ''
def CASE0( self, main ):
+ import imp
+ import os
"""
- GIT
- BUILDING ONOS
@@ -69,9 +71,12 @@
main.nullProviderCfg = main.params[ 'CFG' ][ 'nullProvider' ]
main.linkCollectionIntentCfg = main.params[ 'CFG' ][ 'linkCollectionIntent' ]
main.verifyAttempts = int( main.params[ 'ATTEMPTS' ][ 'verify' ] )
+ main.cfgRetry = int( main.params[ 'ATTEMPTS' ][ 'cfg' ] )
+ main.maxInvalidRun = int( main.params[ 'ATTEMPTS' ][ 'maxInvalidRun' ] )
main.sampleSize = int( main.params[ 'TEST' ][ 'sampleSize' ] )
main.warmUp = int( main.params[ 'TEST' ][ 'warmUp' ] )
main.intentsList = ( main.params[ 'TEST' ][ 'intents' ] ).split( "," )
+ main.deviceCount = int( main.params[ 'TEST' ][ 'deviceCount' ] )
main.ingress = main.params[ 'TEST' ][ 'ingress' ]
main.egress = main.params[ 'TEST' ][ 'egress' ]
main.debug = main.params[ 'TEST' ][ 'debug' ]
@@ -92,6 +97,9 @@
main.log.info( "Create Database file " + main.dbFileName )
resultsDB = open( main.dbFileName, "w+" )
resultsDB.close()
+ file1 = main.params[ "DEPENDENCY" ][ "FILE1" ]
+ main.dependencyPath = os.path.dirname( os.getcwd() ) + main.params[ "DEPENDENCY" ][ "PATH" ]
+ main.intentFuncs = imp.load_source( file1, main.dependencyPath + file1 + ".py" )
except Exception as e:
main.testSetUp.envSetupException( e )
main.testSetUp.evnSetupConclusion( stepResult )
@@ -104,149 +112,191 @@
main.maxNumBatch = 0
main.testSetUp.ONOSSetUp( main.MN1Ip, main.Cluster, True,
cellName=main.cellName, killRemoveMax=False )
+ configRetry = 0
+ main.cfgCheck = False
+ while configRetry < main.cfgRetry:
+ # configure apps
+ stepResult = main.TRUE
+ stepResult = stepResult and \
+ main.Cluster.active( 0 ).CLI.setCfg( main.nullProviderCfg,
+ "deviceCount", value=main.deviceCount )
+ stepResult = stepResult and \
+ main.Cluster.active( 0 ).CLI.setCfg( main.nullProviderCfg,
+ "topoShape", value="linear" )
+ stepResult = stepResult and \
+ main.Cluster.active( 0 ).CLI.setCfg( main.nullProviderCfg,
+ "enabled", value="true" )
+ stepResult = stepResult and \
+ main.Cluster.active( 0 ).CLI.setCfg( main.intentManagerCfg,
+ "skipReleaseResourcesOnWithdrawal",
+ value="true" )
+ if main.flowObj:
+ stepResult = stepResult and \
+ main.Cluster.active( 0 ).CLI.setCfg( main.intentConfigRegiCfg,
+ "useFlowObjectives", value="true" )
+ stepResult = stepResult and \
+ main.Cluster.active( 0 ).CLI.setCfg( main.intentConfigRegiCfg,
+ "defaultFlowObjectiveCompiler",
+ value=main.linkCollectionIntentCfg )
+ time.sleep( main.startUpSleep )
- # configure apps
- main.Cluster.active( 0 ).CLI.setCfg( main.nullProviderCfg,
- "deviceCount", value=7 )
- main.Cluster.active( 0 ).CLI.setCfg( main.nullProviderCfg,
- "topoShape", value="linear" )
- main.Cluster.active( 0 ).CLI.setCfg( main.nullProviderCfg,
- "enabled", value="true" )
- main.Cluster.active( 0 ).CLI.setCfg( main.intentManagerCfg,
- "skipReleaseResourcesOnWithdrawal",
- value="true" )
- if main.flowObj:
- main.Cluster.active( 0 ).CLI.setCfg( main.intentConfigRegiCfg,
- "useFlowObjectives", value="true" )
- main.Cluster.active( 0 ).CLI.setCfg( main.intentConfigRegiCfg,
- "defaultFlowObjectiveCompiler",
- value=main.linkCollectionIntentCfg )
- time.sleep( main.startUpSleep )
+ # balanceMasters
+ stepResult = stepResult and \
+ main.Cluster.active( 0 ).CLI.balanceMasters()
+ if stepResult:
+ main.cfgCheck = True
+ break
+ configRetry += 1
+ time.sleep( main.verifySleep )
- # balanceMasters
- main.Cluster.active( 0 ).CLI.balanceMasters()
time.sleep( main.startUpSleep )
+ if not main.cfgCheck:
+ main.log.error( "Setting configuration to the ONOS failed. Skip the rest of the steps" )
def CASE2( self, main ):
import time
import numpy
import json
- print( main.intentsList )
- for batchSize in main.intentsList:
- main.log.report( "Intent Batch size: {}".format( batchSize ) )
- main.installLatList = []
- main.withdrawLatList = []
- validrun = 0
- invalidrun = 0
- # we use two variables to control the iteration
- while validrun <= main.warmUp + main.sampleSize and invalidrun < 20:
- if validrun >= main.warmUp:
- main.log.info( "================================================" )
- main.log.info( "Starting test iteration " + str( validrun - main.warmUp ) )
- main.log.info( "Total test iteration: " + str( invalidrun + validrun ) )
- main.log.info( "================================================" )
- else:
- main.log.info( "====================Warm Up=====================" )
+ testResult = main.TRUE
+ main.case( "Installing/Withdrawing intents start" )
+ main.step( "Checking starts" )
+ if main.cfgCheck:
+ print( main.intentsList )
+ for batchSize in main.intentsList:
+ main.log.report( "Intent Batch size: {}".format( batchSize ) )
+ main.batchSize = batchSize
+ main.installLatList = []
+ main.withdrawLatList = []
+ main.validrun = 0
+ main.invalidrun = 0
+ # we use two variables to control the iteration
+ while main.validrun <= main.warmUp + main.sampleSize and main.invalidrun <= main.maxInvalidRun:
+ if main.validrun >= main.warmUp:
+ main.log.info( "================================================" )
+ main.log.info( "Starting test iteration " + str( main.validrun - main.warmUp ) )
+ main.log.info( "Total test iteration: " + str( main.invalidrun + main.validrun ) )
+ main.log.info( "================================================" )
+ else:
+ main.log.info( "====================Warm Up=====================" )
- # push intents
- installResult = main.Cluster.active( 0 ).CLI.pushTestIntents( main.ingress,
- main.egress,
- batchSize,
- offset=1,
- options="-i",
- timeout=main.timeout,
- getResponse=True )
- if isinstance( installResult, str ):
- if "Failure" in installResult:
- main.log.error( "Install Intents failure, ignore this iteration." )
- if validrun < main.warmUp:
- validrun += 1
- continue
- else:
- invalidrun += 1
- continue
+ # push intents
+ installResult = main.Cluster.active( 0 ).CLI.pushTestIntents( main.ingress,
+ main.egress,
+ batchSize,
+ offset=1,
+ options="-i",
+ timeout=main.timeout,
+ getResponse=True )
- try:
- latency = int( installResult.split()[ 5 ] )
- main.log.info( installResult )
- except:
- main.log.error( "Failed to get latency, ignore this iteration." )
- main.log.error( "Response from ONOS:" )
- print( installResult )
- if validrun < main.warmUp:
- validrun += 1
- continue
- else:
- invalidrun += 1
- continue
+ time.sleep( 2 )
+ main.intentFuncs.sanityCheck( main,
+ ( main.deviceCount - 1 ) * 2,
+ batchSize * main.deviceCount,
+ main.batchSize )
+ if not main.verify:
+ main.log.warn( "Sanity check failed, skipping this iteration..." )
+ continue
+ if isinstance( installResult, str ):
+ if "Failure" in installResult:
+ main.log.error( "Install Intents failure, ignore this iteration." )
+ if main.validrun < main.warmUp:
+ main.validrun += 1
+ continue
+ else:
+ main.invalidrun += 1
+ continue
- if validrun >= main.warmUp:
- main.installLatList.append( latency )
- else:
- invalidrun += 1
- continue
- time.sleep( 2 )
- # Withdraw Intents
- withdrawResult = main.Cluster.active( 0 ).CLI.pushTestIntents( main.ingress,
- main.egress,
- batchSize,
- offset=1,
- options="-w",
- timeout=main.timeout,
- getResponse=True )
+ try:
+ latency = int( installResult.split()[ 5 ] )
+ main.log.info( installResult )
+ except:
+ main.log.error( "Failed to get latency, ignore this iteration." )
+ main.log.error( "Response from ONOS:" )
+ print( installResult )
+ if main.validrun < main.warmUp:
+ main.validrun += 1
+ continue
+ else:
+ main.invalidrun += 1
+ continue
- if isinstance( withdrawResult, str ):
- if "Failure" in withdrawResult:
- main.log.error( "withdraw Intents failure, ignore this iteration." )
- if validrun < main.warmUp:
- validrun += 1
- continue
- else:
- invalidrun += 1
- continue
+ if main.validrun >= main.warmUp:
+ main.installLatList.append( latency )
+ else:
+ main.invalidrun += 1
+ continue
+ # Withdraw Intents
+ withdrawResult = main.Cluster.active( 0 ).CLI.pushTestIntents( main.ingress,
+ main.egress,
+ batchSize,
+ offset=1,
+ options="-w",
+ timeout=main.timeout,
+ getResponse=True )
+ time.sleep( 5 )
+ main.Cluster.active( 0 ).CLI.purgeWithdrawnIntents()
+ main.intentFuncs.sanityCheck( main, ( main.deviceCount - 1 ) * 2, 0, 0 )
+ if not main.verify:
+ main.log.warn( "Sanity check failed, skipping this iteration..." )
+ continue
+ if isinstance( withdrawResult, str ):
+ if "Failure" in withdrawResult:
+ main.log.error( "withdraw Intents failure, ignore this iteration." )
+ if main.validrun < main.warmUp:
+ main.validrun += 1
+ continue
+ else:
+ main.invalidrun += 1
+ continue
- try:
- latency = int( withdrawResult.split()[ 5 ] )
- main.log.info( withdrawResult )
- except:
- main.log.error( "Failed to get latency, ignore this iteration." )
- main.log.error( "Response from ONOS:" )
- print( withdrawResult )
- if validrun < main.warmUp:
- validrun += 1
- continue
- else:
- invalidrun += 1
- continue
+ try:
+ latency = int( withdrawResult.split()[ 5 ] )
+ main.log.info( withdrawResult )
+ except:
+ main.log.error( "Failed to get latency, ignore this iteration." )
+ main.log.error( "Response from ONOS:" )
+ print( withdrawResult )
+ if main.validrun < main.warmUp:
+ main.validrun += 1
+ continue
+ else:
+ main.invalidrun += 1
+ continue
- if validrun >= main.warmUp:
- main.withdrawLatList.append( latency )
- else:
- invalidrun += 1
- continue
- time.sleep( 2 )
- main.Cluster.active( 0 ).CLI.purgeWithdrawnIntents()
- validrun += 1
- installave = numpy.average( main.installLatList )
- installstd = numpy.std( main.installLatList )
- withdrawave = numpy.average( main.withdrawLatList )
- withdrawstd = numpy.std( main.withdrawLatList )
- # log report
- main.log.report( "----------------------------------------------------" )
- main.log.report( "Scale: " + str( main.Cluster.numCtrls ) )
- main.log.report( "Intent batch: " + str( batchSize ) )
- main.log.report( "Install average: {} std: {}".format( installave, installstd ) )
- main.log.report( "Withdraw average: {} std: {}".format( withdrawave, withdrawstd ) )
- # write result to database file
- if not ( numpy.isnan( installave ) or numpy.isnan( installstd ) or
- numpy.isnan( withdrawstd ) or numpy.isnan( withdrawave ) ):
- databaseString = "'" + main.commit + "',"
- databaseString += str( main.Cluster.numCtrls ) + ","
- databaseString += str( batchSize ) + ","
- databaseString += str( installave ) + ","
- databaseString += str( installstd ) + ","
- databaseString += str( withdrawave ) + ","
- databaseString += str( withdrawstd ) + "\n"
- resultsDB = open( main.dbFileName, "a" )
- resultsDB.write( databaseString )
- resultsDB.close()
+ if main.validrun >= main.warmUp:
+ main.withdrawLatList.append( latency )
+ else:
+ main.invalidrun += 1
+ continue
+ main.validrun += 1
+ result = ( main.TRUE if main.invalidrun <= main.maxInvalidRun else main.FALSE )
+ installave = numpy.average( main.installLatList ) if main.installLatList and result else 0
+ installstd = numpy.std( main.installLatList ) if main.installLatList and result else 0
+ withdrawave = numpy.average( main.withdrawLatList ) if main.withdrawLatList and result else 0
+ withdrawstd = numpy.std( main.withdrawLatList ) if main.withdrawLatList and result else 0
+ testResult = testResult and result
+ # log report
+ main.log.report( "----------------------------------------------------" )
+ main.log.report( "Scale: " + str( main.Cluster.numCtrls ) )
+ main.log.report( "Intent batch: " + str( batchSize ) )
+ main.log.report( "Install average: {} std: {}".format( installave, installstd ) )
+ main.log.report( "Withdraw average: {} std: {}".format( withdrawave, withdrawstd ) )
+ # write result to database file
+ if not ( numpy.isnan( installave ) or numpy.isnan( installstd ) or
+ numpy.isnan( withdrawstd ) or numpy.isnan( withdrawave ) ):
+ databaseString = "'" + main.commit + "',"
+ databaseString += str( main.Cluster.numCtrls ) + ","
+ databaseString += str( batchSize ) + ","
+ databaseString += str( installave ) + ","
+ databaseString += str( installstd ) + ","
+ databaseString += str( withdrawave ) + ","
+ databaseString += str( withdrawstd ) + "\n"
+ resultsDB = open( main.dbFileName, "a" )
+ resultsDB.write( databaseString )
+ resultsDB.close()
+ else:
+ testResult = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=testResult,
+ onpass="Installing and withdrawing intents properly",
+ onfail="There was something wrong installing and withdrawing intents" )
diff --git a/TestON/tests/SCPF/SCPFintentInstallWithdrawLat/dependencies/intentInstallLatFuncs.py b/TestON/tests/SCPF/SCPFintentInstallWithdrawLat/dependencies/intentInstallLatFuncs.py
new file mode 100644
index 0000000..032fed4
--- /dev/null
+++ b/TestON/tests/SCPF/SCPFintentInstallWithdrawLat/dependencies/intentInstallLatFuncs.py
@@ -0,0 +1,83 @@
+"""
+The functions for intentInstallWithdrawLat
+
+"""
+import numpy
+import time
+import json
+
+
+def _init_( self ):
+ self.default = ''
+
+
+def sanityCheck( main, linkNumExpected, flowNumExpected, intentNumExpected ):
+ """
+ Sanity check on numbers of links, flows and intents in ONOS
+ """
+ attemps = 0
+ main.verify = main.FALSE
+ linkNum = 0
+ flowNum = 0
+ intentNum = 0
+ while attemps <= main.verifyAttempts:
+ time.sleep( main.verifySleep )
+ summary = json.loads( main.Cluster.active( 0 ).CLI.summary( timeout=main.timeout ) )
+ linkNum = summary.get( "links" )
+ flowNum = main.Cluster.active( 0 ).CLI.getTotalFlowsNum( timeout=600, noExit=True )
+ intentNum = summary.get( "intents" )
+ if linkNum == linkNumExpected and flowNum == flowNumExpected and intentNum == intentNumExpected:
+ main.log.info( "links: {}, flows: {}, intents: {}".format( linkNum, flowNum, intentNum ) )
+ main.verify = main.TRUE
+ break
+ attemps += 1
+ if not main.verify:
+ main.log.warn( "Links or flows or intents number not as expected" )
+ main.log.warn( "[Expected] links: {}, flows: {}, intents: {}".format( linkNumExpected, flowNumExpected, intentNumExpected ) )
+ main.log.warn( "[Actual] links: {}, flows: {}, intents: {}".format( linkNum, flowNum, intentNum ) )
+ # bring back topology
+ bringBackTopology( main )
+ if main.validrun >= main.warmUp:
+ main.invalidrun += 1
+ else:
+ main.validrun += 1
+
+def bringBackTopology( main ):
+ main.log.info( "Bring back topology " )
+
+ main.Cluster.active( 0 ).CLI.pushTestIntents( main.ingress,
+ main.egress,
+ main.batchSize,
+ offset=1,
+ options="-w",
+ timeout=main.timeout )
+ main.Cluster.active( 0 ).CLI.purgeWithdrawnIntents()
+ # configure apps
+ main.Cluster.active( 0 ).CLI.setCfg( main.nullProviderCfg,
+ "deviceCount", value=0 )
+ main.Cluster.active( 0 ).CLI.setCfg( main.nullProviderCfg,
+ "enabled", value="false" )
+ main.Cluster.active( 0 ).CLI.setCfg( main.intentManagerCfg,
+ "skipReleaseResourcesOnWithdrawal",
+ value="false" )
+ if main.flowObj:
+ main.Cluster.active( 0 ).CLI.setCfg( main.intentConfigRegiCfg,
+ "useFlowObjectives", value="false" )
+ time.sleep( main.startUpSleep )
+ main.Cluster.active( 0 ).CLI.wipeout()
+ time.sleep( main.startUpSleep )
+ main.Cluster.active( 0 ).CLI.setCfg( main.nullProviderCfg,
+ "deviceCount", value=main.deviceCount )
+ main.Cluster.active( 0 ).CLI.setCfg( main.nullProviderCfg,
+ "enabled", value="true" )
+ main.Cluster.active( 0 ).CLI.setCfg( main.intentManagerCfg,
+ "skipReleaseResourcesOnWithdrawal",
+ value="true" )
+ if main.flowObj:
+ main.Cluster.active( 0 ).CLI.setCfg( main.intentConfigRegiCfg,
+ "useFlowObjectives", value="true" )
+ time.sleep( main.startUpSleep )
+
+ # balanceMasters
+ main.Cluster.active( 0 ).CLI.balanceMasters()
+ time.sleep( main.startUpSleep )
diff --git a/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.params b/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.params
index 3dfbb97..8a758e3 100644
--- a/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.params
+++ b/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.params
@@ -2,6 +2,11 @@
<testcases>0,1,2,1,2,1,2,1,2</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>1,3,5,7</SCALE>
<max>7</max>
@@ -51,15 +56,16 @@
</GIT>
<ATTEMPTS>
- <verify>5</verify>
- <maxInvalidRun>10</maxInvalidRun>
+ <verify>6</verify>
+ <maxInvalidRun>5</maxInvalidRun>
+ <cfg>5</cfg>
</ATTEMPTS>
<SLEEP>
<startup>5</startup>
<setmaster>5</setmaster>
<install>10</install>
- <verify>10</verify>
+ <verify>5</verify>
# timeout for pexpect
<timeout>300</timeout>
</SLEEP>
diff --git a/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.py b/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.py
index 41a3408..71bf1aa 100644
--- a/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.py
+++ b/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.py
@@ -72,6 +72,7 @@
main.setMasterSleep = int( main.params[ 'SLEEP' ][ 'setmaster' ] )
main.verifyAttempts = int( main.params[ 'ATTEMPTS' ][ 'verify' ] )
main.maxInvalidRun = int( main.params[ 'ATTEMPTS' ][ 'maxInvalidRun' ] )
+ main.cfgRetry = int( main.params[ 'ATTEMPTS' ][ 'cfg' ] )
main.sampleSize = int( main.params[ 'TEST' ][ 'sampleSize' ] )
main.intentManagerCfg = main.params[ 'CFG' ][ 'intentManager' ]
main.intentConfigRegiCfg = main.params[ 'CFG' ][ 'intentConfigRegi' ]
@@ -122,18 +123,44 @@
main.maxNumBatch = 0
main.testSetUp.ONOSSetUp( main.MN1Ip, main.Cluster, True,
cellName=main.cellName, killRemoveMax=False )
- # configure apps
- main.Cluster.active( 0 ).CLI.setCfg( main.nullProviderCfg, "deviceCount", value=main.deviceCount )
- main.Cluster.active( 0 ).CLI.setCfg( main.nullProviderCfg, "topoShape", value="reroute" )
- main.Cluster.active( 0 ).CLI.setCfg( main.nullProviderCfg, "enabled", value="true" )
- main.Cluster.active( 0 ).CLI.setCfg( main.intentManagerCfg, "skipReleaseResourcesOnWithdrawal",
- value="true" )
- if main.flowObj:
- main.Cluster.active( 0 ).CLI.setCfg( main.intentConfigRegiCfg,
- "useFlowObjectives", value="true" )
- main.Cluster.active( 0 ).CLI.setCfg( main.intentConfigRegiCfg,
- "defaultFlowObjectiveCompiler",
- value=main.linkCollectionIntentCfg )
+ configRetry = 0
+ main.cfgCheck = False
+ while configRetry < main.cfgRetry:
+ # configure apps
+ stepResult = main.TRUE
+ stepResult = stepResult and \
+ main.Cluster.active( 0 ).CLI.setCfg( main.nullProviderCfg,
+ "deviceCount",
+ value=main.deviceCount )
+
+ stepResult = stepResult and \
+ main.Cluster.active( 0 ).CLI.setCfg( main.nullProviderCfg,
+ "topoShape",
+ value="reroute" )
+ stepResult = stepResult and \
+ main.Cluster.active( 0 ).CLI.setCfg( main.nullProviderCfg,
+ "enabled",
+ value="true" )
+
+ stepResult = stepResult and \
+ main.Cluster.active( 0 ).CLI.setCfg( main.intentManagerCfg,
+ "skipReleaseResourcesOnWithdrawal",
+ value="true" )
+ if main.flowObj:
+ stepResult = stepResult and \
+ main.Cluster.active( 0 ).CLI.setCfg( main.intentConfigRegiCfg,
+ "useFlowObjectives",
+ value="true" )
+ stepResult = stepResult and \
+ main.Cluster.active( 0 ).CLI.setCfg( main.intentConfigRegiCfg,
+ "defaultFlowObjectiveCompiler",
+ value=main.linkCollectionIntentCfg )
+ if stepResult:
+ main.cfgCheck = True
+ break
+ configRetry += 1
+ time.sleep( main.verifySleep )
+
time.sleep( main.startUpSleep )
for ctrl in main.Cluster.active():
ctrl.CLI.logSet( "DEBUG", "org.onosproject.metrics.topology" )
@@ -145,6 +172,8 @@
main.Cluster.active( 0 ).CLI.deviceRole( main.end1[ 'name' ], main.Cluster.active( 0 ).ipAddress )
main.Cluster.active( 0 ).CLI.deviceRole( main.end2[ 'name' ], main.Cluster.active( 0 ).ipAddress )
time.sleep( main.setMasterSleep )
+ if not main.cfgCheck:
+ main.log.error( "Setting configuration to the ONOS failed. Skip the rest of the steps" )
def CASE2( self, main ):
import time
@@ -152,162 +181,174 @@
import datetime
import json
# from scipy import stats
+ testResult = main.TRUE
+ main.case( "Intent Reroute starts" )
+ main.step( "Checking intent reroute" )
+ if main.cfgCheck:
+ print( main.intentsList )
+ for batchSize in main.intentsList:
+ main.batchSize = batchSize
+ main.log.report( "Intent Batch size: " + str( batchSize ) + "\n " )
+ firstLocalLatencies = []
+ lastLocalLatencies = []
+ firstGlobalLatencies = []
+ lastGlobalLatencies = []
+ main.startLine = {}
+ main.validRun = 0
+ main.invalidRun = 0
+ while main.validRun <= main.warmUp + main.sampleSize and main.invalidRun <= main.maxInvalidRun:
+ if main.validRun >= main.warmUp:
+ main.log.info( "================================================" )
+ main.log.info( "Valid iteration: {} ".format( main.validRun - main.warmUp ) )
+ main.log.info( "Total iteration: {}".format( main.validRun + main.invalidRun ) )
+ main.log.info( "================================================" )
+ else:
+ main.log.info( "====================Warm Up=====================" )
- print( main.intentsList )
- for batchSize in main.intentsList:
- main.batchSize = batchSize
- main.log.report( "Intent Batch size: " + str( batchSize ) + "\n " )
- firstLocalLatencies = []
- lastLocalLatencies = []
- firstGlobalLatencies = []
- lastGlobalLatencies = []
- main.startLine = {}
- main.validRun = 0
- main.invalidRun = 0
- while main.validRun <= main.warmUp + main.sampleSize and main.invalidRun <= main.maxInvalidRun:
- if main.validRun >= main.warmUp:
- main.log.info( "================================================" )
- main.log.info( "Valid iteration: {} ".format( main.validRun - main.warmUp ) )
- main.log.info( "Total iteration: {}".format( main.validRun + main.invalidRun ) )
- main.log.info( "================================================" )
- else:
- main.log.info( "====================Warm Up=====================" )
+ # push intents
+ main.Cluster.active( 0 ).CLI.pushTestIntents( main.ingress,
+ main.egress,
+ main.batchSize,
+ offset=1,
+ options="-i",
+ timeout=main.timeout )
- # push intents
- main.Cluster.active( 0 ).CLI.pushTestIntents( main.ingress,
- main.egress,
- main.batchSize,
- offset=1,
- options="-i",
- timeout=main.timeout )
+ # check links, flows and intents
+ main.intentRerouteLatFuncs.sanityCheck( main,
+ main.deviceCount * 2,
+ batchSize * ( main.deviceCount - 1 ),
+ main.batchSize )
+ if not main.verify:
+ main.log.warn( "Sanity check failed, skipping this iteration..." )
+ continue
- # check links, flows and intents
- main.intentRerouteLatFuncs.sanityCheck( main,
- main.deviceCount * 2,
- batchSize * ( main.deviceCount - 1 ),
- main.batchSize )
- if not main.verify:
- main.log.warn( "Sanity check failed, skipping this iteration..." )
- continue
+ # Insert one line in karaf.log before link down
+ main.Cluster.command( "log",
+ args=[ "\'Scale: {}, Batch:{}, Iteration: {}\'".format(
+ main.Cluster.numCtrls, batchSize, main.validRun + main.invalidRun ) ],
+ returnBool=True, specificDriver=2 )
+ # bring link down
+ main.Cluster.active( 0 ).CLI.link( main.end1[ 'port' ], main.end2[ 'port' ], "down",
+ timeout=main.timeout, showResponse=False )
- # Insert one line in karaf.log before link down
- main.Cluster.command( "log",
- args=[ "\'Scale: {}, Batch:{}, Iteration: {}\'".format(
- main.Cluster.numCtrls, batchSize, main.validRun + main.invalidRun ) ],
- returnBool=True, specificDriver=2 )
- # bring link down
- main.Cluster.active( 0 ).CLI.link( main.end1[ 'port' ], main.end2[ 'port' ], "down",
- timeout=main.timeout, showResponse=False )
+ # check links, flows and intents
+ main.intentRerouteLatFuncs.sanityCheck( main,
+ ( main.deviceCount - 1 ) * 2,
+ batchSize * main.deviceCount,
+ main.batchSize )
+ if not main.verify:
+ main.log.warn( "Sanity check failed, skipping this iteration..." )
+ continue
- # check links, flows and intents
- main.intentRerouteLatFuncs.sanityCheck( main,
- ( main.deviceCount - 1 ) * 2,
- batchSize * main.deviceCount,
- main.batchSize )
- if not main.verify:
- main.log.warn( "Sanity check failed, skipping this iteration..." )
- continue
-
- # Get timestamp of last LINK_REMOVED event as separator between iterations
- skip = False
- for i in range( main.Cluster.numCtrls ):
- logNum = main.intentRerouteLatFuncs.getLogNum( main, i )
- timestamp = str( main.Cluster.active( i ).CLI.getTimeStampFromLog( "last",
- "LINK_REMOVED",
- "time = ", " ",
- logNum=logNum ) )
- if timestamp == main.ERROR:
- # Try again in case that the log number just increased
+ # Get timestamp of last LINK_REMOVED event as separator between iterations
+ skip = False
+ for i in range( main.Cluster.numCtrls ):
logNum = main.intentRerouteLatFuncs.getLogNum( main, i )
timestamp = str( main.Cluster.active( i ).CLI.getTimeStampFromLog( "last",
"LINK_REMOVED",
"time = ", " ",
logNum=logNum ) )
- if timestamp == main.ERROR:
- main.log.warn( "Cannot find the event we want in the log, skipping this iteration..." )
+ if timestamp == main.ERROR:
+ # Try again in case that the log number just increased
+ logNum = main.intentRerouteLatFuncs.getLogNum( main, i )
+ timestamp = str( main.Cluster.active( i ).CLI.getTimeStampFromLog( "last",
+ "LINK_REMOVED",
+ "time = ", " ",
+ logNum=logNum ) )
+ if timestamp == main.ERROR:
+ main.log.warn( "Cannot find the event we want in the log, skipping this iteration..." )
+ main.intentRerouteLatFuncs.bringBackTopology( main )
+ if main.validRun >= main.warmUp:
+ main.invalidRun += 1
+ else:
+ main.validRun += 1
+ skip = True
+ break
+ else:
+ main.startLine[ i ] = timestamp
+ main.log.info( "Timestamp of last LINK_REMOVED event on node {} is {}".format( i + 1,
+ main.startLine[ i ] ) )
+ if skip:
+ continue
+
+ # calculate values
+ topologyTimestamps = main.intentRerouteLatFuncs.getTopologyTimestamps( main )
+ intentTimestamps = main.intentRerouteLatFuncs.getIntentTimestamps( main )
+ if intentTimestamps == main.ERROR or topologyTimestamps == main.ERROR:
+ main.log.info( "Got invalid timestamp, skipping this iteration..." )
main.intentRerouteLatFuncs.bringBackTopology( main )
if main.validRun >= main.warmUp:
main.invalidRun += 1
else:
main.validRun += 1
- skip = True
- break
+ continue
else:
- main.startLine[ i ] = timestamp
- main.log.info( "Timestamp of last LINK_REMOVED event on node {} is {}".format( i + 1,
- main.startLine[ i ] ) )
- if skip:
- continue
+ main.log.info( "Got valid timestamps" )
- # calculate values
- topologyTimestamps = main.intentRerouteLatFuncs.getTopologyTimestamps( main )
- intentTimestamps = main.intentRerouteLatFuncs.getIntentTimestamps( main )
- if intentTimestamps == main.ERROR or topologyTimestamps == main.ERROR:
- main.log.info( "Got invalid timestamp, skipping this iteration..." )
- main.intentRerouteLatFuncs.bringBackTopology( main )
- if main.validRun >= main.warmUp:
- main.invalidRun += 1
+ firstLocalLatnecy, lastLocalLatnecy, firstGlobalLatency, lastGlobalLatnecy \
+ = main.intentRerouteLatFuncs.calculateLatency( main, topologyTimestamps, intentTimestamps )
+ if firstLocalLatnecy < 0:
+ main.log.info( "Got negative latency, skipping this iteration..." )
+ main.intentRerouteLatFuncs.bringBackTopology( main )
+ if main.validRun >= main.warmUp:
+ main.invalidRun += 1
+ else:
+ main.validRun += 1
+ continue
else:
+ main.log.info( "Got valid latencies" )
main.validRun += 1
- continue
- else:
- main.log.info( "Got valid timestamps" )
- firstLocalLatnecy, lastLocalLatnecy, firstGlobalLatency, lastGlobalLatnecy \
- = main.intentRerouteLatFuncs.calculateLatency( main, topologyTimestamps, intentTimestamps )
- if firstLocalLatnecy < 0:
- main.log.info( "Got negative latency, skipping this iteration..." )
- main.intentRerouteLatFuncs.bringBackTopology( main )
if main.validRun >= main.warmUp:
- main.invalidRun += 1
- else:
- main.validRun += 1
- continue
- else:
- main.log.info( "Got valid latencies" )
- main.validRun += 1
+ firstLocalLatencies.append( firstLocalLatnecy )
+ lastLocalLatencies.append( lastLocalLatnecy )
+ firstGlobalLatencies.append( firstGlobalLatency )
+ lastGlobalLatencies.append( lastGlobalLatnecy )
- firstLocalLatencies.append( firstLocalLatnecy )
- lastLocalLatencies.append( lastLocalLatnecy )
- firstGlobalLatencies.append( firstGlobalLatency )
- lastGlobalLatencies.append( lastGlobalLatnecy )
+ # bring up link and withdraw intents
+ main.Cluster.active( 0 ).CLI.link( main.end1[ 'port' ],
+ main.end2[ 'port' ],
+ "up",
+ timeout=main.timeout )
+ main.Cluster.active( 0 ).CLI.pushTestIntents( main.ingress,
+ main.egress,
+ batchSize,
+ offset=1,
+ options="-w",
+ timeout=main.timeout )
+ main.Cluster.active( 0 ).CLI.purgeWithdrawnIntents()
- # bring up link and withdraw intents
- main.Cluster.active( 0 ).CLI.link( main.end1[ 'port' ],
- main.end2[ 'port' ],
- "up",
- timeout=main.timeout )
- main.Cluster.active( 0 ).CLI.pushTestIntents( main.ingress,
- main.egress,
- batchSize,
- offset=1,
- options="-w",
- timeout=main.timeout )
- main.Cluster.active( 0 ).CLI.purgeWithdrawnIntents()
+ # check links, flows and intents
+ main.intentRerouteLatFuncs.sanityCheck( main, main.deviceCount * 2, 0, 0 )
+ if not main.verify:
+ main.log.warn( "Sanity check failed, skipping this iteration..." )
+ continue
+ result = ( main.TRUE if main.invalidRun <= main.maxInvalidRun else main.FALSE )
+ aveLocalLatency = numpy.average( lastLocalLatencies ) if lastLocalLatencies and result else 0
+ aveGlobalLatency = numpy.average( lastGlobalLatencies ) if lastGlobalLatencies and result else 0
+ stdLocalLatency = numpy.std( lastLocalLatencies ) if lastLocalLatencies and result else 0
+ stdGlobalLatency = numpy.std( lastGlobalLatencies ) if lastGlobalLatencies and result else 0
+ testResult = testResult and result
- # check links, flows and intents
- main.intentRerouteLatFuncs.sanityCheck( main, main.deviceCount * 2, 0, 0 )
- if not main.verify:
- continue
+ main.log.report( "Scale: " + str( main.Cluster.numCtrls ) + " \tIntent batch: " + str( batchSize ) )
+ main.log.report( "Local latency average:................" + str( aveLocalLatency ) )
+ main.log.report( "Global latency average:................" + str( aveGlobalLatency ) )
+ main.log.report( "Local latency std:................" + str( stdLocalLatency ) )
+ main.log.report( "Global latency std:................" + str( stdGlobalLatency ) )
+ main.log.report( "________________________________________________________" )
- aveLocalLatency = numpy.average( lastLocalLatencies )
- aveGlobalLatency = numpy.average( lastGlobalLatencies )
- stdLocalLatency = numpy.std( lastLocalLatencies )
- stdGlobalLatency = numpy.std( lastGlobalLatencies )
-
- main.log.report( "Scale: " + str( main.Cluster.numCtrls ) + " \tIntent batch: " + str( batchSize ) )
- main.log.report( "Local latency average:................" + str( aveLocalLatency ) )
- main.log.report( "Global latency average:................" + str( aveGlobalLatency ) )
- main.log.report( "Local latency std:................" + str( stdLocalLatency ) )
- main.log.report( "Global latency std:................" + str( stdGlobalLatency ) )
- main.log.report( "________________________________________________________" )
-
- if not ( numpy.isnan( aveLocalLatency ) or numpy.isnan( aveGlobalLatency ) ):
- # check if got NaN for result
- resultsDB = open( main.dbFileName, "a" )
- resultsDB.write( "'" + main.commit + "'," )
- resultsDB.write( str( main.Cluster.numCtrls ) + "," )
- resultsDB.write( str( batchSize ) + "," )
- resultsDB.write( str( aveLocalLatency ) + "," )
- resultsDB.write( str( stdLocalLatency ) + "\n" )
- resultsDB.close()
+ if not ( numpy.isnan( aveLocalLatency ) or numpy.isnan( aveGlobalLatency ) ):
+ # check if got NaN for result
+ resultsDB = open( main.dbFileName, "a" )
+ resultsDB.write( "'" + main.commit + "'," )
+ resultsDB.write( str( main.Cluster.numCtrls ) + "," )
+ resultsDB.write( str( batchSize ) + "," )
+ resultsDB.write( str( aveLocalLatency ) + "," )
+ resultsDB.write( str( stdLocalLatency ) + "\n" )
+ resultsDB.close()
+ else:
+ testResult = main.FALSE
+ utilities.assert_equals( expect=main.TRUE,
+ actual=testResult,
+ onpass="Installing and withdrawing intents reroute properly",
+ onfail="There was something wrong installing and withdrawing intents reroute" )
diff --git a/TestON/tests/SCPF/SCPFintentRerouteLat/dependencies/intentRerouteLatFuncs.py b/TestON/tests/SCPF/SCPFintentRerouteLat/dependencies/intentRerouteLatFuncs.py
index d91f09e..947917b 100644
--- a/TestON/tests/SCPF/SCPFintentRerouteLat/dependencies/intentRerouteLatFuncs.py
+++ b/TestON/tests/SCPF/SCPFintentRerouteLat/dependencies/intentRerouteLatFuncs.py
@@ -24,7 +24,7 @@
time.sleep( main.verifySleep )
summary = json.loads( main.Cluster.active( 0 ).CLI.summary( timeout=main.timeout ) )
linkNum = summary.get( "links" )
- flowNum = summary.get( "flows" )
+ flowNum = main.Cluster.active( 0 ).CLI.getTotalFlowsNum( timeout=600, noExit=True )
intentNum = summary.get( "intents" )
if linkNum == linkNumExpected and flowNum == flowNumExpected and intentNum == intentNumExpected:
main.log.info( "links: {}, flows: {}, intents: {}".format( linkNum, flowNum, intentNum ) )
@@ -33,7 +33,8 @@
attemps += 1
if not main.verify:
main.log.warn( "Links or flows or intents number not as expected" )
- main.log.warn( "links: {}, flows: {}, intents: {}".format( linkNum, flowNum, intentNum ) )
+ main.log.warn( "[Expected] links: {}, flows: {}, intents: {}".format( linkNumExpected, flowNumExpected, intentNumExpected ) )
+ main.log.warn( "[Actual] links: {}, flows: {}, intents: {}".format( linkNum, flowNum, intentNum ) )
# bring back topology
bringBackTopology( main )
if main.validRun >= main.warmUp:
@@ -57,6 +58,9 @@
main.Cluster.active( 0 ).CLI.setCfg( main.nullProviderCfg,
"enabled",
value="false" )
+ time.sleep( main.startUpSleep )
+ main.Cluster.active( 0 ).CLI.wipeout()
+ time.sleep( main.startUpSleep )
main.Cluster.active( 0 ).CLI.setCfg( main.nullProviderCfg,
"deviceCount",
value=main.deviceCount )
diff --git a/TestON/tests/SCPF/SCPFintentRerouteLat/dependencies/rerouteTopo.py b/TestON/tests/SCPF/SCPFintentRerouteLat/dependencies/rerouteTopo.py
new file mode 100644
index 0000000..62850e7
--- /dev/null
+++ b/TestON/tests/SCPF/SCPFintentRerouteLat/dependencies/rerouteTopo.py
@@ -0,0 +1,95 @@
+#!/usr/bin/python
+
+"""
+Copyright 2015 Open Networking Foundation ( ONF )
+
+Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+
+ TestON is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 2 of the License, or
+ ( at your option ) any later version.
+
+ TestON is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with TestON. If not, see <http://www.gnu.org/licenses/>.
+"""
+"""
+Custom topology for Mininet
+"""
+from mininet.topo import Topo
+from mininet.net import Mininet
+from mininet.node import Host, RemoteController
+from mininet.node import Node
+from mininet.link import TCLink
+from mininet.cli import CLI
+from mininet.log import setLogLevel
+from mininet.util import dumpNodeConnections
+from mininet.node import ( UserSwitch, OVSSwitch, IVSSwitch )
+
+
+class MyTopo( Topo ):
+
+ def __init__( self ):
+ # Initialize topology
+ Topo.__init__( self )
+
+ host1 = self.addHost( 'h1', ip='10.1.0.1/24' )
+ host2 = self.addHost( 'h2', ip='10.1.0.2/24' )
+ host3 = self.addHost( 'h3', ip='10.1.0.3/24' )
+ host4 = self.addHost( 'h4', ip='10.1.0.4/24' )
+ host5 = self.addHost( 'h5', ip='10.1.0.5/24' )
+ host6 = self.addHost( 'h6', ip='10.1.0.6/24' )
+ host7 = self.addHost( 'h7', ip='10.1.0.7/24' )
+ host8 = self.addHost( 'h8', ip='10.1.0.8/24' )
+
+ s1 = self.addSwitch( 's1' )
+ s2 = self.addSwitch( 's2' )
+ s3 = self.addSwitch( 's3' )
+ s4 = self.addSwitch( 's4' )
+ s5 = self.addSwitch( 's5' )
+ s6 = self.addSwitch( 's6' )
+ s7 = self.addSwitch( 's7' )
+ s8 = self.addSwitch( 's8' )
+
+ self.addLink( s1, host1 )
+ self.addLink( s2, host2 )
+ self.addLink( s3, host3 )
+ self.addLink( s4, host4 )
+ self.addLink( s5, host5 )
+ self.addLink( s6, host6 )
+ self.addLink( s7, host7 )
+ self.addLink( s8, host8 )
+
+ self.addLink( s1, s2 )
+ self.addLink( s2, s3 )
+ self.addLink( s3, s4 )
+ self.addLink( s4, s5 )
+ self.addLink( s5, s6 )
+ self.addLink( s6, s7 )
+ self.addLink( s3, s8 )
+ self.addLink( s8, s4 )
+
+topos = { 'mytopo': ( lambda: MyTopo() ) }
+
+# HERE THE CODE DEFINITION OF THE TOPOLOGY ENDS
+
+
+def setupNetwork():
+ "Create network"
+ topo = MyTopo()
+ network = Mininet( topo=topo, autoSetMacs=True, controller=None )
+ network.start()
+ CLI( network )
+ network.stop()
+
+if __name__ == '__main__':
+ setLogLevel( 'info' )
+ # setLogLevel( 'debug' )
+ setupNetwork()
diff --git a/TestON/tests/SCPF/SCPFmastershipFailoverLat/SCPFmastershipFailoverLat.params b/TestON/tests/SCPF/SCPFmastershipFailoverLat/SCPFmastershipFailoverLat.params
index 1b4fc02..497bb9d 100644
--- a/TestON/tests/SCPF/SCPFmastershipFailoverLat/SCPFmastershipFailoverLat.params
+++ b/TestON/tests/SCPF/SCPFmastershipFailoverLat/SCPFmastershipFailoverLat.params
@@ -1,6 +1,11 @@
<PARAMS>
<testcases>0,[1,2,3]*3</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>3,5,7</SCALE>
<max>7</max>
diff --git a/TestON/tests/SCPF/SCPFmastershipFailoverLat/SCPFmastershipFailoverLat.py b/TestON/tests/SCPF/SCPFmastershipFailoverLat/SCPFmastershipFailoverLat.py
index 0a99c41..e591381 100644
--- a/TestON/tests/SCPF/SCPFmastershipFailoverLat/SCPFmastershipFailoverLat.py
+++ b/TestON/tests/SCPF/SCPFmastershipFailoverLat/SCPFmastershipFailoverLat.py
@@ -186,11 +186,11 @@
for line in reversed( eventOutput ):
if "INSTANCE_DEACTIVATED" in line and len( instanceDeactivatedLats ) == CLInum:
deactivateTime = float( datetime.datetime.strptime(
- line.split()[ 0 ], "%Y-%m-%dT%H:%M:%S.%f" ).strftime( '%s.%f' ) ) * 1000.0
+ line.split()[ 0 ][ : 23 ], "%Y-%m-%dT%H:%M:%S.%f" ).strftime( '%s.%f' ) ) * 1000.0
instanceDeactivatedLats.append( deactivateTime - time1 )
elif "MASTER_CHANGED" in line and len( masterChangedLats ) == CLInum:
changedTime = float( datetime.datetime.strptime(
- line.split()[ 0 ], "%Y-%m-%dT%H:%M:%S.%f" ).strftime( '%s.%f' ) ) * 1000.0
+ line.split()[ 0 ][ : 23 ], "%Y-%m-%dT%H:%M:%S.%f" ).strftime( '%s.%f' ) ) * 1000.0
masterChangedLats.append( changedTime - time1 )
if len( instanceDeactivatedLats ) > CLInum and len( masterChangedLats ) > CLInum:
break
@@ -261,9 +261,8 @@
criticalError = True
main.log.info( "Checking ONOS nodes." )
- nodeResults = utilities.retry( main.HA.nodesCheck,
+ nodeResults = utilities.retry( main.Cluster.nodesCheck,
False,
- args=[ main.Cluster.active() ],
sleep=1,
attempts=3 )
diff --git a/TestON/tests/SCPF/SCPFportLat/SCPFportLat.params b/TestON/tests/SCPF/SCPFportLat/SCPFportLat.params
index 65e02fa..e558365 100644
--- a/TestON/tests/SCPF/SCPFportLat/SCPFportLat.params
+++ b/TestON/tests/SCPF/SCPFportLat/SCPFportLat.params
@@ -1,6 +1,11 @@
<PARAMS>
<testcases>0,1,2,1,2,1,2,1,2</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>1,3,5,7</SCALE>
<max>7</max>
diff --git a/TestON/tests/SCPF/SCPFscaleTopo/SCPFscaleTopo.params b/TestON/tests/SCPF/SCPFscaleTopo/SCPFscaleTopo.params
index b152611..2ef69df 100755
--- a/TestON/tests/SCPF/SCPFscaleTopo/SCPFscaleTopo.params
+++ b/TestON/tests/SCPF/SCPFscaleTopo/SCPFscaleTopo.params
@@ -10,6 +10,11 @@
# 1,[2,10,300,11,100,300,11,200,300,11,1000]*3
<testcases>1,[3,2,10,300,11,1000]*7,3</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<DEPENDENCY>
<path>/tests/SCPF/SCPFscaleTopo/dependencies/</path>
<wrapper1>startUp</wrapper1>
diff --git a/TestON/tests/SCPF/SCPFscalingMaxIntents/SCPFscalingMaxIntents.params b/TestON/tests/SCPF/SCPFscalingMaxIntents/SCPFscalingMaxIntents.params
index 5cb65d0..26fb3f5 100644
--- a/TestON/tests/SCPF/SCPFscalingMaxIntents/SCPFscalingMaxIntents.params
+++ b/TestON/tests/SCPF/SCPFscalingMaxIntents/SCPFscalingMaxIntents.params
@@ -8,6 +8,11 @@
# 0,1,2,10,20,1,2,10,20,1,2,10,20
<testcases>0,1,2,11,20,1,2,11,20,1,2,11,20,1,2,11,20</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<reroute>False</reroute>
<SCALE>1,3,5,7</SCALE>
diff --git a/TestON/tests/SCPF/SCPFscalingMaxIntents/SCPFscalingMaxIntents.py b/TestON/tests/SCPF/SCPFscalingMaxIntents/SCPFscalingMaxIntents.py
index 1633c44..87e14a4 100644
--- a/TestON/tests/SCPF/SCPFscalingMaxIntents/SCPFscalingMaxIntents.py
+++ b/TestON/tests/SCPF/SCPFscalingMaxIntents/SCPFscalingMaxIntents.py
@@ -440,6 +440,6 @@
# how many flows we installed before crash
temp += "," + str( totalFlows )
# other columns in database, but we didn't use in this test
- temp += "," + "0,0,0,0,0,0"
+ temp += "," + "0,0,0,0,0,0,0,0"
temp += "\n"
dbFile.write( temp )
diff --git a/TestON/tests/SCPF/SCPFswitchLat/SCPFswitchLat.params b/TestON/tests/SCPF/SCPFswitchLat/SCPFswitchLat.params
index d77eec3..8680b5c 100644
--- a/TestON/tests/SCPF/SCPFswitchLat/SCPFswitchLat.params
+++ b/TestON/tests/SCPF/SCPFswitchLat/SCPFswitchLat.params
@@ -1,6 +1,11 @@
<PARAMS>
<testcases>0,1,2,1,2,1,2,1,2</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>1,3,5,7</SCALE>
<max>7</max>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.params b/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.params
index 1a58556..cf284ca 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.params
@@ -2,6 +2,11 @@
<testcases>1,2,3</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>
<size>3</size>
<max>3</max>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.params b/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.params
index a757a48..2fa1d4e 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.params
@@ -2,6 +2,11 @@
<testcases>1,2,3,4,5,6</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>
<size>3</size>
<max>3</max>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.params b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.params
index 5f990fa..9f61a70 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.params
@@ -2,6 +2,11 @@
<testcases>1,2,3,4,5,6,7,8</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>
<size>3</size>
<max>3</max>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params
index 8f3ade9..93e3dbe 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params
@@ -2,6 +2,11 @@
<testcases>1,2,4,5</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>
<size>3</size>
<max>3</max>
diff --git a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.params b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.params
index 1a58556..cf284ca 100755
--- a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.params
+++ b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.params
@@ -2,6 +2,11 @@
<testcases>1,2,3</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>
<size>3</size>
<max>3</max>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.params b/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.params
index a757a48..2fa1d4e 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.params
@@ -2,6 +2,11 @@
<testcases>1,2,3,4,5,6</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>
<size>3</size>
<max>3</max>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.params b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.params
index 8f3ade9..93e3dbe 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.params
@@ -2,6 +2,11 @@
<testcases>1,2,4,5</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<SCALE>
<size>3</size>
<max>3</max>
diff --git a/TestON/tests/USECASE/USECASE_SdnipFunction/USECASE_SdnipFunction.params b/TestON/tests/USECASE/USECASE_SdnipFunction/USECASE_SdnipFunction.params
index 8b401d6..bf87224 100644
--- a/TestON/tests/USECASE/USECASE_SdnipFunction/USECASE_SdnipFunction.params
+++ b/TestON/tests/USECASE/USECASE_SdnipFunction/USECASE_SdnipFunction.params
@@ -2,6 +2,11 @@
<testcases>101, 100, 200, 102, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
#Environment variables
<ENV>
#Cells that you use
diff --git a/TestON/tests/USECASE/USECASE_SdnipFunction/dependencies/USECASE_SdnipI2MN.py b/TestON/tests/USECASE/USECASE_SdnipFunction/dependencies/USECASE_SdnipI2MN.py
index 23607ad..14a7c69 100644
--- a/TestON/tests/USECASE/USECASE_SdnipFunction/dependencies/USECASE_SdnipI2MN.py
+++ b/TestON/tests/USECASE/USECASE_SdnipFunction/dependencies/USECASE_SdnipI2MN.py
@@ -39,7 +39,6 @@
QUAGGA_DIR = '/usr/lib/quagga'
QUAGGA_RUN_DIR = '/usr/local/var/run/quagga'
QUAGGA_CONFIG_DIR = '~/OnosSystemTest/TestON/tests/USECASE/USECASE_SdnipFunction/dependencies/'
-# onos1IP = '10.254.1.201'
numSw = 39
diff --git a/TestON/tests/USECASE/USECASE_SdnipFunction/sdnip_single_instance b/TestON/tests/USECASE/USECASE_SdnipFunction/sdnip_single_instance
index c2c51c6..9e0be8e 100644
--- a/TestON/tests/USECASE/USECASE_SdnipFunction/sdnip_single_instance
+++ b/TestON/tests/USECASE/USECASE_SdnipFunction/sdnip_single_instance
@@ -1,8 +1,8 @@
export ONOS_CELL="sdnip_single_instance"
export ONOS_INSTALL_DIR="/opt/onos"
-export ONOS_NIC=10.254.1.*
-export OC1="10.254.1.201"
+export ONOS_NIC=10.192.19.*
+export OC1="10.192.19.68"
export OCN="127.0.0.1"
export OCI="${OC1}"
export ONOS_USER="sdn" # ONOS user on remote system
diff --git a/TestON/tests/USECASE/USECASE_SdnipFunctionCluster/USECASE_SdnipFunctionCluster.params b/TestON/tests/USECASE/USECASE_SdnipFunctionCluster/USECASE_SdnipFunctionCluster.params
index e25086a..3253ed8 100644
--- a/TestON/tests/USECASE/USECASE_SdnipFunctionCluster/USECASE_SdnipFunctionCluster.params
+++ b/TestON/tests/USECASE/USECASE_SdnipFunctionCluster/USECASE_SdnipFunctionCluster.params
@@ -2,6 +2,13 @@
<testcases>101, 100, 200, 102, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12</testcases>
#Environment variables
+
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
+
<ENV>
<cellName>SDNIP</cellName>
<appString>drivers,openflow,proxyarp</appString>
diff --git a/TestON/tests/USECASE/USECASE_SdnipFunctionCluster/sdnip_multiple_instance_BM b/TestON/tests/USECASE/USECASE_SdnipFunctionCluster/sdnip_multiple_instance_BM
index 1053083..dc9cfea 100644
--- a/TestON/tests/USECASE/USECASE_SdnipFunctionCluster/sdnip_multiple_instance_BM
+++ b/TestON/tests/USECASE/USECASE_SdnipFunctionCluster/sdnip_multiple_instance_BM
@@ -1,10 +1,10 @@
export ONOS_CELL="sdnip_multiple_instance_BM"
export ONOS_INSTALL_DIR="/opt/onos"
-export ONOS_NIC=10.254.1.*
-export OC1="10.254.1.201"
-export OC2="10.254.1.202"
-export OC3="10.254.1.203"
+export ONOS_NIC=10.192.19.*
+export OC1="10.192.19.68"
+export OC2="10.192.19.67"
+export OC3="10.192.19.66"
export OCN="127.0.0.1"
export OCI="${OC1}"
export ONOS_USER="sdn" # ONOS user on remote system
diff --git a/TestON/tests/USECASE/VPLS/VPLSBasic/VPLSBasic.params b/TestON/tests/USECASE/VPLS/VPLSBasic/VPLSBasic.params
index fc6a16b..355ff57 100755
--- a/TestON/tests/USECASE/VPLS/VPLSBasic/VPLSBasic.params
+++ b/TestON/tests/USECASE/VPLS/VPLSBasic/VPLSBasic.params
@@ -2,6 +2,11 @@
<testcases>1,2,10,11,12,13,14,15,16,11</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<num_controllers>3</num_controllers>
<GIT>
diff --git a/TestON/tests/USECASE/VPLS/VPLSfailsafe/VPLSfailsafe.params b/TestON/tests/USECASE/VPLS/VPLSfailsafe/VPLSfailsafe.params
index 53e4c6f..c44bb58 100755
--- a/TestON/tests/USECASE/VPLS/VPLSfailsafe/VPLSfailsafe.params
+++ b/TestON/tests/USECASE/VPLS/VPLSfailsafe/VPLSfailsafe.params
@@ -2,6 +2,11 @@
<testcases>1,2,50,100,200,300,310,400</testcases>
+ <GRAPH>
+ <nodeCluster>BM</nodeCluster>
+ <builds>20</builds>
+ </GRAPH>
+
<num_controllers>3</num_controllers>
<GIT>
diff --git a/TestON/tests/USECASE/VPLS/VPLSfailsafe/VPLSfailsafe.py b/TestON/tests/USECASE/VPLS/VPLSfailsafe/VPLSfailsafe.py
index a7dfa3b..9778b68 100644
--- a/TestON/tests/USECASE/VPLS/VPLSfailsafe/VPLSfailsafe.py
+++ b/TestON/tests/USECASE/VPLS/VPLSfailsafe/VPLSfailsafe.py
@@ -421,9 +421,8 @@
# Checking if all nodes appear with status READY using 'nodes' command
main.step( "Checking ONOS nodes." )
- nodeResults = utilities.retry( main.HA.nodesCheck,
+ nodeResults = utilities.retry( main.Cluster.nodesCheck,
False,
- args=[ main.Cluster.runningNodes ],
sleep=main.timeSleep,
attempts=main.numAttempts )
diff --git a/TestON/tests/dependencies/Cluster.py b/TestON/tests/dependencies/Cluster.py
index 210134f..1532039 100644
--- a/TestON/tests/dependencies/Cluster.py
+++ b/TestON/tests/dependencies/Cluster.py
@@ -18,6 +18,7 @@
You should have received a copy of the GNU General Public License
along with TestON. If not, see <http://www.gnu.org/licenses/>.
"""
+import json
class Cluster():
def __str__( self ):
@@ -339,6 +340,28 @@
self.controllers[ i ].active = True
return result
+ def nodesCheck( self ):
+ results = True
+ nodesOutput = self.command( "nodes", specificDriver=2 )
+ ips = sorted( self.getIps( activeOnly=True ) )
+ for i in nodesOutput:
+ try:
+ current = json.loads( i )
+ activeIps = []
+ currentResult = False
+ for node in current:
+ if node[ 'state' ] == 'READY':
+ activeIps.append( node[ 'ip' ] )
+ activeIps.sort()
+ if ips == activeIps:
+ currentResult = True
+ except ( ValueError, TypeError ):
+ main.log.error( "Error parsing nodes output" )
+ main.log.warn( repr( i ) )
+ currentResult = False
+ results = results and currentResult
+ return results
+
def printResult( self, results, activeList, logLevel="debug" ):
"""
Description:
@@ -471,6 +494,7 @@
maxSize = float( segmentSize ) * float( multiplier )
ret = True
for n in self.runningNodes:
- ret = ret and n.server.folderSize( "/opt/onos/apache-karaf-*/data/partitions/*/*.log",
+ # Partition logs
+ ret = ret and n.server.folderSize( "/opt/onos/apache-karaf-*/data/db/partitions/*/*.log",
size=maxSize, unit=units, ignoreRoot=False )
return ret
diff --git a/TestON/tests/dependencies/ONOSSetup.py b/TestON/tests/dependencies/ONOSSetup.py
index 750dde6..ed4d978 100644
--- a/TestON/tests/dependencies/ONOSSetup.py
+++ b/TestON/tests/dependencies/ONOSSetup.py
@@ -125,8 +125,43 @@
"test variables ",
onfail="Failed to construct test variables" )
+ url = self.generateGraphURL()
+ main.log.wiki( url )
+
main.commit = main.ONOSbench.getVersion( report=True )
+ def generateGraphURL( self, width=525, height=350 ):
+ """
+ Description:
+ Obtain the URL for the graph that corresponds to the test being run.
+ """
+
+ nodeCluster = main.params[ 'GRAPH' ][ 'nodeCluster' ]
+ testname = main.TEST
+ branch = main.ONOSbench.getBranchName()
+ maxBuildsToShow = main.params[ 'GRAPH' ][ 'builds' ]
+
+ return '<ac:structured-macro ac:name="html">\n' + \
+ '<ac:plain-text-body><![CDATA[\n' + \
+ '<img src="https://onos-jenkins.onlab.us/job/Pipeline_postjob_' + \
+ nodeCluster + \
+ '/lastSuccessfulBuild/artifact/' + \
+ testname + \
+ '_' + \
+ branch + \
+ '_' + \
+ maxBuildsToShow + \
+ '-builds_graph.jpg", alt="' + \
+ testname + \
+ '", style="width:' + \
+ str( width ) + \
+ 'px;height:' + \
+ str( height ) + \
+ 'px;border:0"' + \
+ '>' + \
+ ']]></ac:plain-text-body>\n' + \
+ '</ac:structured-macro>\n'
+
def setNumCtrls( self, hasMultiNodeRounds ):
"""
Description: