Merge "[ONOS-7112] investigate SCPFswitchLat failure and fix the issue"
diff --git a/TestON/JenkinsFile/FUNCJenkinsFile b/TestON/JenkinsFile/FUNCJenkinsFile
index e7b1d3b..de05cee 100644
--- a/TestON/JenkinsFile/FUNCJenkinsFile
+++ b/TestON/JenkinsFile/FUNCJenkinsFile
@@ -1,4 +1,5 @@
 #!groovy
+import groovy.time.*
 // This is a Jenkinsfile for a scripted pipeline for the FUNC tests
 def prop = null
 node("TestStation-VMs"){
@@ -34,15 +35,20 @@
     tests[stepName] = FUNCTest(test, toBeRun, prop)
 }
 
+def now = new Date()
 // run the tests
 for ( test in tests.keySet() ){
     tests[test].call()
 }
-
+if( prop["manualRun"] == "false" ){
+    def end = new Date()
+    TimeDuration duration = TimeCategory.minus( end, now )
+    slackSend( color:"#5816EE", message: "FUNC tests ended at: " + end.toString() + "\nTime took : " + duration )
+}
 // The testName should be the key from the FUNC
 def FUNCTest( testName, toBeRun, prop ) {
     return {
-        catchError{
+        try{
             stage(testName) {
                 if ( toBeRun ){
                     workSpace = "/var/jenkins/workspace/"+testName
@@ -174,6 +180,13 @@
                     }
                 }
             }
+        }catch (all) {
+            catchError{
+                if( prop["manualRun"] == "false" )
+                    slackSend(color:"FF0000", message: "[" + prop["ONOSBranch"] + "]" + testName + " : Failed!\n"
+                    + "https://onos-jenkins.onlab.us/blue/organizations/jenkins/${env.JOB_NAME}/detail/${env.JOB_NAME}/${env.BUILD_NUMBER}/pipeline" )
+                Failed
+            }
         }
     }
 }
\ No newline at end of file
diff --git a/TestON/JenkinsFile/HAJenkinsFile b/TestON/JenkinsFile/HAJenkinsFile
index d123075..685ffd6 100644
--- a/TestON/JenkinsFile/HAJenkinsFile
+++ b/TestON/JenkinsFile/HAJenkinsFile
@@ -1,4 +1,5 @@
 #!groovy
+import groovy.time.*
 // This is a Jenkinsfile for a scripted pipeline for the HA tests
 
 def prop = null
@@ -35,16 +36,20 @@
     tests[stepName] = HATest(test, toBeRun, prop)
 }
 
+def now = new Date()
 // run the tests
 for ( test in tests.keySet() ){
     tests[test].call()
 }
-
-
+if( prop["manualRun"] == "false" ){
+    def end = new Date()
+    TimeDuration duration = TimeCategory.minus( end, now )
+    slackSend( color:"#5816EE", message: "HA tests ended at: " + end.toString() + "\nTime took : " + duration )
+}
 // The testName should be the key from the FUNC
 def HATest( testName, toBeRun, prop ) {
     return {
-        catchError{
+        try{
             stage(testName) {
                 if ( toBeRun ){
                     workSpace = "/var/jenkins/workspace/"+testName
@@ -111,7 +116,7 @@
                             ls -al
                             cd '''
 
-                            if( prop["manualRun"] == "false" ){
+                            if( prop["manualRun"] == "false" || prop["postResult"] == "true" ){
                                 // Post Results
                                 withCredentials([
                                     string(credentialsId: 'db_pass', variable: 'pass'),
@@ -156,7 +161,7 @@
                         }
                     }
 
-                    if( prop["manualRun"] == "false" ){
+                    if( prop["manualRun"] == "false" || prop["postResult"] == "true" ){
                         def post = build job: "Pipeline_postjob_VM", propagate: false,
                             parameters: [
                                 string(name: 'Wiki_Contents', value: fileContents),
@@ -176,6 +181,13 @@
                     }
                 }
             }
+        }catch (all) {
+            catchError{
+                if( prop["manualRun"] == "false" )
+                    slackSend(color:"FF0000", message: "[" + prop["ONOSBranch"] + "]" + testName + " : Failed!\n"
+                    + "https://onos-jenkins.onlab.us/blue/organizations/jenkins/${env.JOB_NAME}/detail/${env.JOB_NAME}/${env.BUILD_NUMBER}/pipeline" )
+                Failed
+            }
         }
     }
 }
\ No newline at end of file
diff --git a/TestON/JenkinsFile/JenkinsfileTrigger b/TestON/JenkinsFile/JenkinsfileTrigger
index c20db0c..fd856eb 100644
--- a/TestON/JenkinsFile/JenkinsfileTrigger
+++ b/TestON/JenkinsFile/JenkinsfileTrigger
@@ -1,18 +1,66 @@
 #!groovy
 // This is a Jenkinsfile for a scripted pipeline for the SCPF tests
 // Define sets of tests
-SCPF_Basic = "SCPFswitchLat,SCPFportLat,SCPFintentInstallWithdrawLat,SCPFintentEventTp,SCPFflowTp1g,SCPFcbench,SCPFbatchFlowResp"
-SCPF_ExtraSetA = "SCPFintentRerouteLat,SCPFscalingMaxIntents,SCPFhostLat,SCPFscaleTopo,SCPFmastershipFailoverLat"
-SCPF_NEW_TEST = "SCPFmastershipFailoverLat"
-SCPF_ExtraSetB = "SCPFintentInstallWithdrawLatWithFlowObj,SCPFintentEventTpWithFlowObj,SCPFintentRerouteLatWithFlowObj,SCPFscalingMaxIntentsWithFlowObj,SCPFflowTp1gWithFlowObj"
-FUNC_Basic = "FUNCipv6Intent,FUNCoptical,FUNCflow,FUNCnetCfg,FUNCovsdbtest,FUNCnetconf"
-FUNC_ExtraSetA = "FUNCgroup,FUNCintent"
-FUNC_ExtraSetB = "FUNCintentRest"
-HA_Basic = "HAsanity,HAsingleInstanceRestart,HAclusterRestart"
-HA_ExtraSetA = "HAstopNodes,HAfullNetPartition"
-HA_ExtraSetB = "HAkillNodes,HAswapNodes,HAscaling"
-USECASE_Basic = "FUNCvirNetNB,FUNCbgpls,VPLSBasic,PLATdockertest,SRSanity,SRSwitchFailure,SRLinkFailure,SROnosFailure,SRClusterRestart,SRDynamic,SRHA,USECASE_SdnipFunction,USECASE_SdnipFunctionCluster"
-USECASE_NEW_TEST = "VPLSfailsafe"
+AllTheTests=
+[
+    "FUNC":[
+            "FUNCipv6Intent" : ["basic":true, "extra_A":false, "extra_B":false, "day":""],
+            "FUNCoptical" :    ["basic":true, "extra_A":false, "extra_B":false, "day":""],
+            "FUNCflow" :       ["basic":true, "extra_A":false, "extra_B":false, "day":""],
+            "FUNCnetCfg":      ["basic":true, "extra_A":false, "extra_B":false, "day":""],
+            "FUNCovsdbtest" :  ["basic":true, "extra_A":false, "extra_B":false, "day":""],
+            "FUNCnetconf" :    ["basic":true, "extra_A":false, "extra_B":false, "day":""],
+            "FUNCgroup" :      ["basic":true, "extra_A":false, "extra_B":false, "day":""],
+            "FUNCintent" :     ["basic":false, "extra_A":true, "extra_B":false, "day":""],
+            "FUNCintentRest" : ["basic":false, "extra_A":false, "extra_B":true, "day":""]
+    ],
+    "HA":[
+            "HAsanity" :                ["basic":true, "extra_A":false, "extra_B":false, "day":""],
+            "HAclusterRestart" :        ["basic":true, "extra_A":false, "extra_B":false, "day":""],
+            "HAsingleInstanceRestart" : ["basic":true, "extra_A":false, "extra_B":false, "day":""],
+            "HAstopNodes" :             ["basic":false, "extra_A":true, "extra_B":false, "day":""],
+            "HAfullNetPartition" :      ["basic":false, "extra_A":true, "extra_B":false, "day":""],
+            "HAswapNodes" :             ["basic":false, "extra_A":false, "extra_B":true, "day":""],
+            "HAscaling" :               ["basic":false, "extra_A":false, "extra_B":true, "day":""],
+            "HAkillNodes" :             ["basic":false, "extra_A":false, "extra_B":true, "day":""]
+    ],
+    "SCPF":[
+            "SCPFswitchLat":                           ["basic":true, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
+            "SCPFcbench":                              ["basic":true, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
+            "SCPFportLat":                             ["basic":true, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
+            "SCPFflowTp1g":                            ["basic":true, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
+            "SCPFintentEventTp":                       ["basic":true, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
+            "SCPFhostLat":                             ["basic":false, "extra_A":true, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
+            // batch will be on extra_A after fixing from the ONOS side.
+            "SCPFbatchFlowResp":                       ["basic":false, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
+            "SCPFintentRerouteLat":                    ["basic":false, "extra_A":true, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
+            "SCPFintentInstallWithdrawLat":            ["basic":false, "extra_A":true, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
+            "SCPFflowTp1gWithFlowObj":                 ["basic":false, "extra_A":false, "extra_B":true, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
+            "SCPFintentEventTpWithFlowObj":            ["basic":false, "extra_A":false, "extra_B":true, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
+            "SCPFintentRerouteLatWithFlowObj":         ["basic":false, "extra_A":false, "extra_B":true, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
+            "SCPFscalingMaxIntentsWithFlowObj":        ["basic":false, "extra_A":false, "extra_B":true, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
+            "SCPFintentInstallWithdrawLatWithFlowObj": ["basic":false, "extra_A":false, "extra_B":true, "extra_C":false, "extra_D":false, "new_Test":false, day:""],
+            "SCPFscaleTopo":                           ["basic":false, "extra_A":false, "extra_B":false, "extra_C":true, "extra_D":false, "new_Test":false, day:""],
+            "SCPFscalingMaxIntents":                   ["basic":false, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":true, "new_Test":false, day:""],
+            "SCPFmastershipFailoverLat":               ["basic":false, "extra_A":false, "extra_B":false, "extra_C":false, "extra_D":false, "new_Test":true, day:""]
+    ],
+    "USECASE":[
+            "FUNCvirNetNB" :                ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
+            "FUNCbgpls" :                   ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
+            "VPLSBasic" :                   ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
+            "USECASE_SdnipFunction":        ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
+            "USECASE_SdnipFunctionCluster": ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
+            "PLATdockertest":               ["basic":false, "extra_A":true, "extra_B":false, "new_Test":false, "day":""],
+            "SRSanity":                     ["basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":""],
+            "SRSwitchFailure":              ["basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":""],
+            "SRLinkFailure":                ["basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":""],
+            "SROnosFailure":                ["basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":""],
+            "SRClusterRestart":             ["basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":""],
+            "SRDynamic":                    ["basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":""],
+            "SRHighAvailability":           ["basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":""],
+            "VPLSfailsafe" :                ["basic":false, "extra_A":false, "extra_B":false, "new_Test":true, "day":""]
+    ]
+]
 testcases = [
     "FUNC" : [ tests : "" , nodeName : "VM"],
     "HA" : [ tests : "" , nodeName : "VM"],
@@ -37,62 +85,24 @@
 echo(now.toString())
 today = now[Calendar.DAY_OF_WEEK]
 day = ""
-SCPF_choices = SCPF_Basic
+SCPF_choices = ""
 USECASE_choices = ""
-FUNC_choices = FUNC_Basic
-HA_choices = HA_Basic
-
-switch (today) {
-    case Calendar.MONDAY:
-        SCPF_choices += "," + SCPF_ExtraSetB
-        FUNC_choices += "," + FUNC_ExtraSetA
-        HA_choices += "," + HA_ExtraSetA
-        day = "Monday"
-        break
-    case Calendar.TUESDAY:
-        day = "Tuesday"
-        USECASE_choices = USECASE_Basic + "," + USECASE_NEW_TEST
-        FUNC_choices += "," + FUNC_ExtraSetB
-        HA_choices += "," + HA_ExtraSetB
-        break
-    case Calendar.WEDNESDAY:
-        SCPF_choices += "," + SCPF_ExtraSetA + "," + SCPF_NEW_TEST
-        FUNC_choices += "," + FUNC_ExtraSetA
-        HA_choices += "," + HA_ExtraSetA
-        day = "Wednesday"
-        break
-    case Calendar.THURSDAY:
-        SCPF_choices += "," + SCPF_ExtraSetB
-        FUNC_choices += "," + FUNC_ExtraSetB
-        HA_choices += "," + HA_ExtraSetB
-        day = "Thursday"
-        break
-    case Calendar.FRIDAY:
-        SCPF_choices += "," + SCPF_ExtraSetA + "," + SCPF_NEW_TEST + "," + SCPF_ExtraSetB
-        FUNC_choices += "," + FUNC_ExtraSetA
-        HA_choices += "," + HA_ExtraSetA
-        day = "Friday"
-        break
-    case Calendar.SATURDAY:
-        SCPF_choices += "," + SCPF_ExtraSetA  + "," + SCPF_ExtraSetB
-        USECASE_choices = USECASE_Basic
-        FUNC_choices += "," + FUNC_ExtraSetA + "," + FUNC_ExtraSetB
-        HA_choices += "," + HA_ExtraSetA + "," + HA_ExtraSetB
-        onos_branch= "1.10"
-        day = "Saturday"
-        break
-    case Calendar.SUNDAY:
-        SCPF_choices += "," + SCPF_ExtraSetA + "," + SCPF_NEW_TEST + "," + SCPF_ExtraSetB
-        USECASE_choices = USECASE_Basic + "," + USECASE_NEW_TEST
-        FUNC_choices += "," + FUNC_ExtraSetA + "," + FUNC_ExtraSetB
-        HA_choices += "," + HA_ExtraSetA + "," + HA_ExtraSetB
-        onos_branch= "1.11"
-        day = "Sunday"
-        break
-}
+FUNC_choices = ""
+HA_choices = ""
 
 manually_run = params.manual_run
 post_result = params.PostResult
+if( !manually_run ){
+    sendToSlack( '#03CD9F', ":sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles:\n"
+                            + "Starting tests on : " + now.toString()
+                            + "\n:sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles::sparkles:" )
+    testDivider( today )
+    FUNC_choices =  lastCommaRemover( FUNC_choices )
+    HA_choices =  lastCommaRemover( HA_choices )
+    SCPF_choices =  lastCommaRemover( SCPF_choices )
+    USECASE_choices =  lastCommaRemover( USECASE_choices )
+}
+
 if ( manually_run ){
     organize_tests( params.Tests )
     onos_branch = params.ONOSVersion
@@ -107,6 +117,149 @@
 }
 print_tests( testcases )
 
+def testDivider( today ){
+    switch (today) {
+        case Calendar.MONDAY:
+            monday( true )
+            tuesday( true, false )
+            wednesday( true, false )
+            thursday( true, false )
+            friday( true, false )
+            day = "Monday"
+            sendToSlack( '#FFD988', "Tests to be run this weekdays : \n" + printDaysForTest() )
+            break
+        case Calendar.TUESDAY:
+            tuesday( true, true )
+            day = "Tuesday"
+            break
+        case Calendar.WEDNESDAY:
+            wednesday( true, true )
+            day = "Wednesday"
+            break
+        case Calendar.THURSDAY:
+            thursday( true, true)
+            day = "Thursday"
+            break
+        case Calendar.FRIDAY:
+            friday( true, true)
+            day = "Friday"
+            break
+        case Calendar.SATURDAY:
+            saturday()
+            onos_branch= "1.11"
+            day = "Saturday"
+            break
+        case Calendar.SUNDAY:
+            sunday()
+            onos_branch= "1.10"
+            day = "Sunday"
+            break
+    }
+}
+def printDaysForTest(){
+    result = ""
+    for ( String test in AllTheTests.keySet() ){
+        result += test + " : \n"
+        for( String each in AllTheTests[ test ].keySet() ){
+            AllTheTests[ test ][ each ][ "day" ] = lastCommaRemover( AllTheTests[ test ][ each ][ "day" ] )
+            result += "    " + each + ":[" + AllTheTests[ test ][ each ][ "day" ] + "]\n"
+        }
+        result += "\n"
+    }
+    return result
+}
+def lastCommaRemover( str ){
+    if ( str.size() > 0 && str[ str.size() - 1 ] == ',' ){
+        str = str.substring( 0,str.size() - 1 )
+    }
+    return str
+}
+def monday( getResult ){
+    FUNC_choices += adder( "FUNC", "basic", true, "M", getResult )
+    FUNC_choices += adder( "FUNC", "extra_A", true, "M", getResult )
+    HA_choices += adder( "HA", "basic", true, "M", getResult )
+    HA_choices += adder( "HA", "extra_A", true, "M", getResult )
+    SCPF_choices += adder( "SCPF", "basic", true, "M", getResult )
+    SCPF_choices += adder( "SCPF", "extra_B", true, "M", getResult )
+}
+def tuesday( getDay, getResult ){
+    FUNC_choices += adder( "FUNC", "basic", getDay, "T", getResult )
+    FUNC_choices += adder( "FUNC", "extra_B", getDay, "T", getResult )
+    HA_choices += adder( "HA", "basic", getDay, "T", getResult )
+    HA_choices += adder( "HA", "extra_B", getDay, "T", getResult )
+    SCPF_choices += adder( "SCPF", "basic", getDay, "T", getResult )
+    SCPF_choices += adder( "SCPF", "extra_C", getDay, "T", getResult )
+    USECASE_choices += adder( "USECASE", "basic", getDay, "T", getResult )
+    USECASE_choices += adder( "USECASE", "extra_A", getDay, "T", getResult )
+    USECASE_choices += adder( "USECASE", "new_Test", getDay, "T", getResult )
+}
+def wednesday( getDay, getResult ){
+    FUNC_choices += adder( "FUNC", "basic", getDay, "W", getResult )
+    FUNC_choices += adder( "FUNC", "extra_A", getDay, "W", getResult )
+    HA_choices += adder( "HA", "basic", getDay, "W", getResult )
+    HA_choices += adder( "HA", "extra_A", getDay, "W", getResult )
+    SCPF_choices += adder( "SCPF", "basic", getDay, "W", getResult )
+    SCPF_choices += adder( "SCPF", "extra_A", getDay, "W", getResult )
+    SCPF_choices += adder( "SCPF", "new_Test", getDay, "W", getResult )
+}
+def thursday( getDay, getResult ){
+    FUNC_choices += adder( "FUNC", "basic", getDay, "Th", getResult )
+    FUNC_choices += adder( "FUNC", "extra_B", getDay, "Th", getResult )
+    HA_choices += adder( "HA", "basic", getDay, "Th", getResult )
+    HA_choices += adder( "HA", "extra_B", getDay, "Th", getResult )
+    SCPF_choices += adder( "SCPF", "basic", getDay, "Th", getResult )
+    SCPF_choices += adder( "SCPF", "extra_B", getDay, "Th", getResult )
+}
+def friday( getDay, getResult ){
+    FUNC_choices += adder( "FUNC", "basic", getDay, "F", getResult )
+    FUNC_choices += adder( "FUNC", "extra_A", getDay, "F", getResult )
+    HA_choices += adder( "HA", "basic", getDay, "F", getResult )
+    HA_choices += adder( "HA", "extra_A", getDay, "F", getResult )
+    SCPF_choices += adder( "SCPF", "basic", getDay, "F", getResult )
+    SCPF_choices += adder( "SCPF", "extra_A", getDay, "F", getResult )
+    SCPF_choices += adder( "SCPF", "extra_D", getDay, "F", getResult )
+}
+def saturday(){
+    FUNC_choices += adder( "FUNC", "basic", false, "Sa", true )
+    FUNC_choices += adder( "FUNC", "extra_A", false, "Sa", true )
+    FUNC_choices += adder( "FUNC", "extra_B", false, "Sa", true )
+    HA_choices += adder( "HA", "basic", false, "Sa", true )
+    HA_choices += adder( "HA", "extra_A", false, "Sa", true )
+    HA_choices += adder( "HA", "extra_B", false, "Sa", true )
+    SCPF_choices += adder( "SCPF", "basic", false, "Sa", true )
+    SCPF_choices += adder( "SCPF", "extra_A", false, "Sa", true )
+    SCPF_choices += adder( "SCPF", "extra_B", false, "Sa", true )
+    SCPF_choices += adder( "SCPF", "extra_C", false, "Sa", true )
+    SCPF_choices += adder( "SCPF", "extra_D", false, "Sa", true )
+    SCPF_choices += adder( "SCPF", "new_Test", false, "Sa", true )
+    USECASE_choices += adder( "USECASE", "basic", false, "Sa", true )
+    USECASE_choices += adder( "USECASE", "new_Test", false, "Sa", true )
+}
+def sunday(){
+    FUNC_choices += adder( "FUNC", "basic", false, "S", true )
+    FUNC_choices += adder( "FUNC", "extra_A", false, "S", true )
+    FUNC_choices += adder( "FUNC", "extra_B", false, "S", true )
+    HA_choices += adder( "HA", "basic", false, "S", true )
+    HA_choices += adder( "HA", "extra_A", false, "S", true )
+    HA_choices += adder( "HA", "extra_B", false, "S", true )
+    SCPF_choices += adder( "SCPF", "basic", false, "S", true )
+    USECASE_choices += adder( "USECASE", "basic", false, "S", true )
+}
+def adder( testCat, set, dayAdding, day, getResult ){
+    result = ""
+    for( String test in AllTheTests[ testCat ].keySet()  ){
+        if( AllTheTests[ testCat ][ test ][ set ] ){
+            if( getResult )
+                result += test + ","
+            if( dayAdding )
+                dayAdder( testCat, test, day )
+        }
+    }
+    return result
+}
+def dayAdder( testCat, testName, dayOfWeek ){
+    AllTheTests[ testCat ][ testName ][ "day" ] += dayOfWeek + ","
+}
 def runTest = [
     "VM" : [:],
     "BM" : [:]
@@ -185,6 +338,9 @@
         '''
     }
 }
+def sendToSlack( color, message ){
+    slackSend(color:color, message: message)
+}
 // Initialize the environment Setup for the onos and OnosSystemTest
 def envSetup( onos_branch, test_branch, onos_tag ){
     stage("envSetup") {
diff --git a/TestON/JenkinsFile/SCPFJenkinsFile b/TestON/JenkinsFile/SCPFJenkinsFile
index 2661cca..d9593c4 100644
--- a/TestON/JenkinsFile/SCPFJenkinsFile
+++ b/TestON/JenkinsFile/SCPFJenkinsFile
@@ -1,4 +1,5 @@
 #!groovy
+import groovy.time.*
 // This is a Jenkinsfile for a scripted pipeline for the SCPF tests
 // properties([pipelineTriggers([cron('30 19 * * *')])])
 
@@ -7,6 +8,7 @@
 none = [ "" ]
 batches = [1,100,1000]
 neighbors = ['y', 'n']
+times = [ 'y', 'n' ]
 SCPF = [
     SCPFcbench: [ test:'SCPFcbench', table:'cbench_bm_tests', results:'cbench_bm_results', file:'CbenchDB', rFile:'SCPFcbench.R', extra:none, finalResult:1, graphTitle:['Cbench Test'], dbCols:'avg', dbWhere:'', y_axis:'Throughput (Responses/sec)'],
     SCPFhostLat: [ test:'SCPFhostLat', table:'host_latency_tests', results:'host_latency_results', file:'HostAddLatency', rFile:'SCPFhostLat.R', extra:none,finalResult:1, graphTitle:['Host Latency Test'], dbCols:'avg', dbWhere:'AND scale=5', y_axis:'Latency (ms)'],
@@ -18,11 +20,11 @@
     SCPFbatchFlowResp: [ test:'SCPFbatchFlowResp', table:'batch_flow_tests', results:'batch_flow_results', file:'SCPFbatchFlowRespData', rFile:'SCPFbatchFlowResp.R', extra:none, finalResult:1, graphTitle:['Batch Flow Test - Post', 'Batch Flow Test - Del'], dbCols:[ 'elapsepost, posttoconfrm', 'elapsedel, deltoconfrm' ], dbWhere:'', y_axis:'Latency (ms)'],
     SCPFintentEventTp: [ test:'SCPFintentEventTp', table:'intent_tp_tests', results:'intent_tp_results', file:'IntentEventTPDB', rFile:'SCPFintentEventTp.R n', extra:neighbors, finalResult:1, graphTitle:['Intent Throughput Test - neighbors=0','Intent Throughput Test - neighbors=4'], dbCols:'SUM( avg ) as avg', dbWhere:[ 'AND scale=5 AND neighbors=0 GROUP BY date','AND scale=5 AND NOT neighbors=0 GROUP BY date' ], y_axis:'Throughput (Ops/sec)'],
     SCPFintentRerouteLat: [ test:'SCPFintentRerouteLat', table:'intent_reroute_latency_tests', results:'intent_reroute_latency_results', file:'IntentRerouteLatDB', rFile:'SCPFIntentInstallWithdrawRerouteLat.R n', extra:batches, finalResult:1, graphTitle:['Intent Reroute Test'], dbCols:'avg', dbWhere:'AND scale=5 AND batch_size=100', y_axis:'Latency (ms)'],
-    SCPFscalingMaxIntents: [ test:'SCPFscalingMaxIntents', table:'max_intents_tests', results:'max_intents_results', file:'ScalingMaxIntentDB', rFile:'SCPFscalingMaxIntents.R n', extra:none, finalResult:0],
+    SCPFscalingMaxIntents: [ test:'SCPFscalingMaxIntents', table:'max_intents_tests', results:'max_intents_results', file:'ScalingMaxIntentDB', rFile:'SCPFscalingMaxIntents.R n', extra:times, finalResult:0],
     SCPFintentEventTpWithFlowObj: [ test:'SCPFintentEventTp --params TEST/flowObj=True', table:'intent_tp_fobj_tests', results:'intent_tp_fobj_results', file:'IntentEventTPflowObjDB', rFile:'SCPFintentEventTp.R y', extra:neighbors,finalResult:0],
     SCPFintentInstallWithdrawLat: [ test:'SCPFintentInstallWithdrawLat', table:'intent_latency_tests', results:'intent_latency_results', file:'IntentInstallWithdrawLatDB', rFile:'SCPFIntentInstallWithdrawRerouteLat.R n', extra:batches,finalResult:1, graphTitle:['Intent Installation Test','Intent Withdrawal Test'], dbCols:[ 'install_avg','withdraw_avg' ], dbWhere:'AND scale=5 AND batch_size=100', y_axis:'Latency (ms)'],
     SCPFintentRerouteLatWithFlowObj: [ test:'SCPFintentRerouteLat --params TEST/flowObj=True', table:'intent_reroute_latency_fobj_tests', results:'intent_reroute_latency_fobj_results', file:'IntentRerouteLatDBWithFlowObj', rFile:'SCPFIntentInstallWithdrawRerouteLat.R y', extra:batches, finalResult:0],
-    SCPFscalingMaxIntentsWithFlowObj: [ test:'SCPFscalingMaxIntents --params TEST/flowObj=True', table:'max_intents_fobj_tests', results:'max_intents_fobj_results', file:'ScalingMaxIntentDBWFO', rFile:'SCPFscalingMaxIntents.R y', extra:none, finalResult:0],
+    SCPFscalingMaxIntentsWithFlowObj: [ test:'SCPFscalingMaxIntents --params TEST/flowObj=True', table:'max_intents_fobj_tests', results:'max_intents_fobj_results', file:'ScalingMaxIntentDBWFO', rFile:'SCPFscalingMaxIntents.R y', extra:times, finalResult:0],
     SCPFintentInstallWithdrawLatWithFlowObj: [ test:'SCPFintentInstallWithdrawLat --params TEST/flowObj=True', table:'intent_latency_fobj_tests', results:'intent_latency_fobj_results', file:'IntentInstallWithdrawLatDBWFO', rFile:'SCPFIntentInstallWithdrawRerouteLat.R y', extra:batches, finalResult:0],
     SCPFmastershipFailoverLat: [test:'SCPFmastershipFailoverLat', table:'mastership_failover_tests', results:'mastership_failover_results', file:'mastershipFailoverLatDB', rFile:'SCPFmastershipFailoverLat.R', extra:none, finalResult:1, graphTitle:['Mastership Failover Test'], dbCols:[ 'kill_deact_avg,deact_role_avg' ], dbWhere:'AND scale=5', y_axis:'Latency (ms)' ]
 ]
@@ -47,15 +49,20 @@
     tests[stepName] = SCPFTest(test, toBeRun, prop)
 }
 
+def now = new Date()
 // run the tests
 for ( test in tests.keySet() ){
     tests[test].call()
 }
-
+if( prop["manualRun"] == "false" ){
+    def end = new Date()
+    TimeDuration duration = TimeCategory.minus( end, now )
+    slackSend( color:"#5816EE", message: "SCPF tests ended at: " + end.toString() + "\nTime took : " + duration )
+}
 // The testName should be the key from the SCPF map
 def SCPFTest( testName, toBeRun, prop ) {
     return {
-        catchError{
+        try{
             stage(testName) {
                 if ( toBeRun ){
                     workSpace = "/var/jenkins/workspace/"+testName
@@ -104,30 +111,32 @@
                             ls -al
                             cd '''
                             // Post Results
-                            withCredentials([
-                                string(credentialsId: 'db_pass', variable: 'pass'),
-                                string(credentialsId: 'db_user', variable: 'user'),
-                                string(credentialsId: 'db_host', variable: 'host'),
-                                string(credentialsId: 'db_port', variable: 'port')]) {
-                                    def database_command = pass + "|psql --host=" + host + " --port=" + port + " --username=" + user + " --password --dbname onostest -c \"INSERT INTO " + SCPF[testName]['table'] + " VALUES('\$DATE','" + SCPF[testName]['results'] + "','\$BUILD_NUMBER', '\$ONOSBranch', \$line);\""
-                                    if (testName == "SCPFscaleTopo" || testName == "SCPFswitchLat" || testName == "SCPFportLat") {
-                                        database_command = pass + "|psql --host=" + host + " --port=" + port + " --username=" + user + " --password --dbname onostest -c \"INSERT INTO " + SCPF[testName]['table'] + " VALUES('\$DATE','" + SCPF[testName]['results'] + "','\$BUILD_NUMBER', \$line, '\$ONOSBranch');\""
-                                    }
-                                    sh '''#!/bin/bash
+                            if( prop["manualRun"] == "false" || prop["postResult"] == "true" ){
+                                withCredentials([
+                                    string(credentialsId: 'db_pass', variable: 'pass'),
+                                    string(credentialsId: 'db_user', variable: 'user'),
+                                    string(credentialsId: 'db_host', variable: 'host'),
+                                    string(credentialsId: 'db_port', variable: 'port')]) {
+                                        def database_command = pass + "|psql --host=" + host + " --port=" + port + " --username=" + user + " --password --dbname onostest -c \"INSERT INTO " + SCPF[testName]['table'] + " VALUES('\$DATE','" + SCPF[testName]['results'] + "','\$BUILD_NUMBER', '\$ONOSBranch', \$line);\""
+                                        if (testName == "SCPFscaleTopo" || testName == "SCPFswitchLat" || testName == "SCPFportLat") {
+                                            database_command = pass + "|psql --host=" + host + " --port=" + port + " --username=" + user + " --password --dbname onostest -c \"INSERT INTO " + SCPF[testName]['table'] + " VALUES('\$DATE','" + SCPF[testName]['results'] + "','\$BUILD_NUMBER', \$line, '\$ONOSBranch');\""
+                                        }
+                                        sh '''#!/bin/bash
 
-                                    export DATE=\$(date +%F_%T)
-                                    cd ~
-                                    pwd
-                                    cd /tmp
-                                    while read line
-                                    do
+                                        export DATE=\$(date +%F_%T)
+                                        cd ~
+                                        pwd
+                                        cd /tmp
+                                        while read line
+                                        do
 
-                                    echo \$line
-                                    echo ''' + database_command + '''
+                                        echo \$line
+                                        echo ''' + database_command + '''
 
-                                    done< ''' + SCPF[testName]['file'] + '''
-                                    ''' + getGraphCommand( SCPF[testName]['rFile'], SCPF[testName]['extra'], host, port, user, pass, testName, prop["ONOSBranch"] ) + '''
-                                    ''' + ( SCPF[testName]['finalResult'] ? generateCombinedResultGraph( host,port, user, pass, testName, prop["ONOSBranch"] ) : "" )
+                                        done< ''' + SCPF[testName]['file'] + '''
+                                        ''' + getGraphCommand( SCPF[testName]['rFile'], SCPF[testName]['extra'], host, port, user, pass, testName, prop["ONOSBranch"] ) + '''
+                                        ''' + ( SCPF[testName]['finalResult'] ? generateCombinedResultGraph( host,port, user, pass, testName, prop["ONOSBranch"] ) : "" )
+                                }
                             }
                             // Fetch Logs
                             sh '''#!/bin/bash
@@ -148,7 +157,7 @@
                             fi'''
                         }
                     }
-                    if( prop["manualRun"] == "false" ){
+                    if( prop["manualRun"] == "false" || prop["postResult"] == "true" ){
                         def post = build job: "Pipeline_postjob_BM", propagate: false
                     }
                     node("TestStation-BMs"){
@@ -164,6 +173,13 @@
                     }
                 }
             }
+        }catch (all) {
+            catchError{
+                if( prop["manualRun"] == "false" )
+                    slackSend(color:"FF0000", message: "[" + prop["ONOSBranch"] + "]" + testName + " : Failed!\n"
+                    + "https://onos-jenkins.onlab.us/blue/organizations/jenkins/${env.JOB_NAME}/detail/${env.JOB_NAME}/${env.BUILD_NUMBER}/pipeline" )
+                Failed
+            }
         }
     }
 }
diff --git a/TestON/JenkinsFile/USECASEJenkinsFile b/TestON/JenkinsFile/USECASEJenkinsFile
index 3c78dbe..63551b9 100644
--- a/TestON/JenkinsFile/USECASEJenkinsFile
+++ b/TestON/JenkinsFile/USECASEJenkinsFile
@@ -1,4 +1,5 @@
 #!groovy
+import groovy.time.*
 // This is a Jenkinsfile for a scripted pipeline for the USECASETest tests
 
 // TODO: Exception handling around steps
@@ -19,7 +20,7 @@
     "SROnosFailure": [wiki_link:prop["WikiPrefix"]+"-"+"SR Onos node Failure", wiki_file:"SROnosFailureWiki.txt"],
     "SRClusterRestart": [wiki_link:prop["WikiPrefix"]+"-"+"SR Cluster Restart", wiki_file:"SRClusterRestartWiki.txt"],
     "SRDynamic": [wiki_link:prop["WikiPrefix"]+"-"+"SR Dynamic Config", wiki_file:"SRDynamicWiki.txt"],
-    "SRHA": [wiki_link:prop["WikiPrefix"]+"-"+"SR High Availability", wiki_file:"SRHighAvailabilityWiki.txt"],
+    "SRHighAvailability": [wiki_link:prop["WikiPrefix"]+"-"+"SR High Availability", wiki_file:"SRHighAvailabilityWiki.txt"],
     "USECASE_SdnipFunction": [wiki_link:prop["WikiPrefix"]+"-"+"SDNIP Function", wiki_file:"USECASE_SdnipFunctionWiki.txt"],
     "USECASE_SdnipFunctionCluster": [wiki_link:prop["WikiPrefix"]+"-"+"SDNIP Function Cluster", wiki_file:"USECASE_SdnipFunctionClusterWiki.txt"]
 ]
@@ -42,15 +43,20 @@
     tests[stepName] = USECASETest(test, toBeRun, prop)
 }
 
+def now = new Date()
 // run the tests
 for ( test in tests.keySet() ){
     tests[test].call()
 }
-
+if( prop["manualRun"] == "false" ){
+    def end = new Date()
+    TimeDuration duration = TimeCategory.minus( end, now )
+    slackSend( color:"#5816EE", message: "USECASE tests ended at: " + end.toString() + "\nTime took : " + duration )
+}
 // The testName should be the key from the FUNC
 def USECASETest( testName, toBeRun, prop ) {
     return {
-        catchError{
+        try{
             stage(testName) {
                 if ( toBeRun ){
                     workSpace = "/var/jenkins/workspace/"+testName
@@ -119,7 +125,7 @@
                             ls -al
                             cd '''
 
-                            if( prop["manualRun"] == "false" ){
+                            if( prop["manualRun"] == "false" || prop["postResult"] == "true"  ){
                                 // Post Results
                                 withCredentials([
                                     string(credentialsId: 'db_pass', variable: 'pass'),
@@ -164,7 +170,7 @@
 
                         }
                     }
-                    if( prop["manualRun"] == "false" ){
+                    if( prop["manualRun"] == "false" || prop["postResult"] == "true" ){
                         def post = build job: "Pipeline_postjob_BM", propagate: false,
                                     parameters: [
                                         string(name: 'Wiki_Contents', value: fileContents),
@@ -184,6 +190,13 @@
                     }
                 }
             }
+        }catch (all) {
+            catchError{
+                if( prop["manualRun"] == "false" )
+                    slackSend(color:"FF0000", message: "[" + prop["ONOSBranch"] + "]" + testName + " : Failed!\n"
+                    + "https://onos-jenkins.onlab.us/blue/organizations/jenkins/${env.JOB_NAME}/detail/${env.JOB_NAME}/${env.BUILD_NUMBER}/pipeline" )
+                Failed
+            }
         }
     }
 }
\ No newline at end of file
diff --git a/TestON/JenkinsFile/scripts/SCPFIntentInstallWithdrawRerouteLat.R b/TestON/JenkinsFile/scripts/SCPFIntentInstallWithdrawRerouteLat.R
index 186c1c2..98447ca 100644
--- a/TestON/JenkinsFile/scripts/SCPFIntentInstallWithdrawRerouteLat.R
+++ b/TestON/JenkinsFile/scripts/SCPFIntentInstallWithdrawRerouteLat.R
@@ -127,6 +127,8 @@
 
 dataFrame <- na.omit( dataFrame )   # Omit any data that doesn't exist
 
+
+
 print( "Data Frame Results:" )
 print( dataFrame )
 
@@ -166,10 +168,11 @@
 
 theme <- theme( plot.title=element_text( hjust = 0.5, size = 22, face='bold' ) )
 
+
+
 # Store plot configurations as 1 variable
 fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
 
-
 # Create the bar graph with error bars.
 # geom_bar contains:
 #    - stat: data formatting (usually "identity")
@@ -177,9 +180,11 @@
 # geom_errorbar contains similar arguments as geom_bar.
 print( "Generating bar graph with error bars." )
 barGraphFormat <- geom_bar( stat = "identity", width = width, position = "dodge" )
-errorBarFormat <- geom_errorbar( width = width, position = "dodge" )
+errorBarFormat <- geom_errorbar( width = width, position = "dodge", color=rgb( 140, 140, 140, maxColorValue=255 ) )
 title <- ggtitle( chartTitle )
-result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title
+values <- geom_text( aes( x=dataFrame$scale, y=dataFrame$ms + 0.035 * max( dataFrame$ms ), label = format( dataFrame$ms, digits=3, big.mark = ",", scientific = FALSE ) ), position=position_dodge( width=1.3 ), size = 3.2, fontface = "bold" )
+
+result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title + values
 
 # Save graph to file
 print( paste( "Saving bar chart with error bars to", errBarOutputFile ) )
diff --git a/TestON/JenkinsFile/scripts/SCPFbatchFlowResp.R b/TestON/JenkinsFile/scripts/SCPFbatchFlowResp.R
index 3698afb..72f66c7 100644
--- a/TestON/JenkinsFile/scripts/SCPFbatchFlowResp.R
+++ b/TestON/JenkinsFile/scripts/SCPFbatchFlowResp.R
@@ -105,11 +105,13 @@
 fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
 
 
-print( "Generating bar graph with error bars." )
+print( "Generating bar graph." )
 width <- 0.3
 barGraphFormat <- geom_bar( stat="identity", width = width )
+sum <- fileData[ 'posttoconfrm' ] + fileData[ 'elapsepost' ]
+values <- geom_text( aes( x=dataFrame$iterative, y=sum + 0.04 * max( sum ), label = format( sum, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold" )
 title <- ggtitle( chartTitle )
-result <- fundamentalGraphData + barGraphFormat + title
+result <- fundamentalGraphData + barGraphFormat + title + values
 
 
 print( paste( "Saving bar chart to", errBarOutputFile ) )
@@ -161,12 +163,14 @@
 fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
 
 
-print( "Generating bar graph with error bars." )
+print( "Generating bar graph." )
 width <- 0.3
 barGraphFormat <- geom_bar( stat="identity", width = width )
+sum <- fileData[ 'deltoconfrm' ] + fileData[ 'elapsedel' ]
+values <- geom_text( aes( x=dataFrame$iterative, y=sum + 0.04 * max( sum ), label = format( sum, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold" )
 chartTitle <- paste( "Single Bench Flow Latency - Del", "Last 3 Builds", sep = "\n" )
 title <- ggtitle( chartTitle )
-result <- fundamentalGraphData + barGraphFormat + title
+result <- fundamentalGraphData + barGraphFormat + title + values
 
 errBarOutputFile <- paste( args[ 7 ], args[ 5 ], sep="" )
 errBarOutputFile <- paste( errBarOutputFile, args[ 6 ], sep="_" )
diff --git a/TestON/JenkinsFile/scripts/SCPFcbench.R b/TestON/JenkinsFile/scripts/SCPFcbench.R
index bccfb38..c9a6c37 100644
--- a/TestON/JenkinsFile/scripts/SCPFcbench.R
+++ b/TestON/JenkinsFile/scripts/SCPFcbench.R
@@ -96,8 +96,8 @@
 theme_set( theme_grey( base_size = 20 ) )   # set the default text size of the graph.
 
 mainPlot <- ggplot( data = dataFrame, aes( x = iterative, y = ms, ymin = ms - std, ymax = ms + std ) )
-xScaleConfig <- scale_x_continuous( breaks = dataFrame$iterative, label = dataFrame$date )
-xLabel <- xlab( "date" )
+xScaleConfig <- scale_x_continuous( breaks = dataFrame$iterative,   label = dataFrame$date )
+xLabel <- xlab( "Build Date" )
 yLabel <- ylab( "Responses / sec" )
 fillLabel <- labs( fill="Type" )
 theme <- theme( plot.title=element_text( hjust = 0.5, size = 28, face='bold' ) )
@@ -108,9 +108,10 @@
 print( "Generating bar graph with error bars." )
 width <- 0.3
 barGraphFormat <- geom_bar( stat="identity", position = position_dodge(), width = width, fill="#00AA13" )
-errorBarFormat <- geom_errorbar( position=position_dodge( ), width = width )
+errorBarFormat <- geom_errorbar( width = width, color=rgb( 140, 140,140, maxColorValue=255 ) )
+values <- geom_text( aes( x=dataFrame$iterative, y=fileData[ 'avg' ] + 0.025 * max( fileData[ 'avg' ] ), label = format( fileData[ 'avg' ], digits=3, big.mark = ",", scientific = FALSE ) ), size = 4.5, fontface = "bold" )
 title <- ggtitle( chartTitle )
-result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title
+result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title + values
 
 
 print( paste( "Saving bar chart with error bars to", errBarOutputFile ) )
diff --git a/TestON/JenkinsFile/scripts/SCPFflowTp1g.R b/TestON/JenkinsFile/scripts/SCPFflowTp1g.R
index 89e2379..8ec053e 100644
--- a/TestON/JenkinsFile/scripts/SCPFflowTp1g.R
+++ b/TestON/JenkinsFile/scripts/SCPFflowTp1g.R
@@ -158,6 +158,7 @@
 
 theme <- theme( plot.title=element_text( hjust = 0.5, size = 28, face='bold' ) )
 
+
 # Store plot configurations as 1 variable
 fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
 
@@ -169,9 +170,10 @@
 # geom_errorbar contains similar arguments as geom_bar.
 print( "Generating bar graph with error bars." )
 barGraphFormat <- geom_bar( stat = "identity", width = width, fill="#FFA94F" )
-errorBarFormat <- geom_errorbar( position=position_dodge( ), width = width )
+errorBarFormat <- geom_errorbar( width = width, position=position_dodge(), color=rgb( 140,140,140, maxColorValue=255 ) )
+values <- geom_text( aes( x=dataFrame$scale, y=dataFrame$throughput + 0.04 * max( dataFrame$throughput ), label = format( dataFrame$throughput, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold" )
 title <- ggtitle( paste( chartTitle, "" ) )
-result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title
+result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title + values
 
 # Save graph to file
 print( paste( "Saving bar chart with error bars to", errBarOutputFile ) )
diff --git a/TestON/JenkinsFile/scripts/SCPFhostLat.R b/TestON/JenkinsFile/scripts/SCPFhostLat.R
index e17cedb..738765a 100644
--- a/TestON/JenkinsFile/scripts/SCPFhostLat.R
+++ b/TestON/JenkinsFile/scripts/SCPFhostLat.R
@@ -108,9 +108,10 @@
 print( "Generating bar graph with error bars." )
 width <- 0.9
 barGraphFormat <- geom_bar( stat="identity", position=position_dodge( ), width = width, fill="#E8BD00" )
-errorBarFormat <- geom_errorbar( position=position_dodge( ), width = width )
+errorBarFormat <- geom_errorbar( position=position_dodge(), width = width, color=rgb( 140, 140, 140, maxColorValue=255 ) )
+values <- geom_text( aes( x=dataFrame$scale, y=dataFrame$ms + 0.08 * max( dataFrame$ms ), label = format( dataFrame$ms, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold" )
 title <- ggtitle( paste( chartTitle, "with Standard Error Bars" ) )
-result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title
+result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title + values
 
 
 print( paste( "Saving bar chart with error bars to", errBarOutputFile ) )
diff --git a/TestON/JenkinsFile/scripts/SCPFintentEventTp.R b/TestON/JenkinsFile/scripts/SCPFintentEventTp.R
index 7a55371..420b444 100644
--- a/TestON/JenkinsFile/scripts/SCPFintentEventTp.R
+++ b/TestON/JenkinsFile/scripts/SCPFintentEventTp.R
@@ -120,7 +120,6 @@
 #    These are variables that apply to all of the graphs being generated, regardless of type.
 #
 # 2. Type specific graph data is generated.
-#     Data specific for the error bar and stacked bar graphs are generated.
 #
 # 3. Generate and save the graphs.
 #      Graphs are saved to the filename above, in the directory provided in command line args
@@ -156,16 +155,12 @@
 }
 
 theme <- theme( plot.title=element_text( hjust = 0.5, size = 28, face='bold' ) )
+values <- geom_text( aes( x=dataFrame$scale, y=dataFrame$throughput + 0.04 * max( dataFrame$throughput ), label = format( dataFrame$throughput, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold" )
 
 # Store plot configurations as 1 variable
-fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
+fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme + values
 
 
-# Create the stacked bar graph with error bars.
-# geom_bar contains:
-#    - stat: data formatting (usually "identity")
-#    - width: the width of the bar types (declared above)
-# geom_errorbar contains similar arguments as geom_bar.
 print( "Generating bar graph." )
 barGraphFormat <- geom_bar( stat = "identity", width = width, fill="#169EFF" )
 title <- ggtitle( paste( chartTitle, "" ) )
diff --git a/TestON/JenkinsFile/scripts/SCPFmastershipFailoverLat.R b/TestON/JenkinsFile/scripts/SCPFmastershipFailoverLat.R
index 09ac5cb..9eea330 100644
--- a/TestON/JenkinsFile/scripts/SCPFmastershipFailoverLat.R
+++ b/TestON/JenkinsFile/scripts/SCPFmastershipFailoverLat.R
@@ -125,10 +125,11 @@
 
 print( "Generating bar graph with error bars." )
 width <- 0.9
-barGraphFormat <- geom_bar( stat="identity", position=position_dodge( ), width = width )
-errorBarFormat <- geom_errorbar( position=position_dodge( ), width = width )
-title <- ggtitle( paste( chartTitle, "with Standard Error Bars" ) )
-result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title
+barGraphFormat <- geom_bar( stat="identity", position=position_dodge(), width = width )
+errorBarFormat <- geom_errorbar( width = width, position=position_dodge(), color=rgb( 140, 140, 140, maxColorValue=255 ) )
+values <- geom_text( aes( x=avgData$scale, y=avgData$ms + 0.04 * max( avgData$ms ), label = format( avgData$ms, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold", position=position_dodge( 0.9 ) )
+title <- ggtitle( paste( chartTitle, "" ) )
+result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title + values
 
 
 print( paste( "Saving bar chart with error bars to", errBarOutputFile ) )
@@ -140,8 +141,10 @@
 
 print( "Generating stacked bar chart." )
 stackedBarFormat <- geom_bar( stat="identity", width=width )
-title <- ggtitle( paste( chartTitle, "Total Latency" ) )
-result <- fundamentalGraphData + stackedBarFormat + title
+title <- ggtitle( paste( chartTitle, "" ) )
+sum <- fileData[ 'deact_role_avg' ] + fileData[ 'kill_deact_avg' ]
+values <- geom_text( aes( x=avgData$scale, y=sum + 0.04 * max( sum ), label = format( sum, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold" )
+result <- fundamentalGraphData + stackedBarFormat + title + values
 
 
 print( paste( "Saving stacked bar chart to", stackedBarOutputFile ) )
diff --git a/TestON/JenkinsFile/scripts/SCPFportLat.R b/TestON/JenkinsFile/scripts/SCPFportLat.R
index e4b9d4f..fd24c8d 100644
--- a/TestON/JenkinsFile/scripts/SCPFportLat.R
+++ b/TestON/JenkinsFile/scripts/SCPFportLat.R
@@ -124,10 +124,11 @@
 
 print( "Generating bar graph with error bars (Port Up Latency)." )
 barGraphFormat <- geom_bar( stat="identity", width = width )
-errorBarFormat <- geom_errorbar( width = width )
-
+errorBarFormat <- geom_errorbar( width = width, color=rgb( 140, 140, 140, maxColorValue=255 ) )
+sum <- fileData[ 'up_ofp_to_dev_avg' ] + fileData[ 'up_dev_to_link_avg' ] + fileData[ 'up_link_to_graph_avg' ]
+values <- geom_text( aes( x=upAvgsData$scale, y=sum + 0.04 * max( sum ), label = format( sum, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold" )
 title <- ggtitle( "Port Up Latency" )
-result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title
+result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title + values
 
 
 print( paste( "Saving bar chart with error bars (Port Up Latency) to", errBarOutputFileUp ) )
@@ -146,10 +147,12 @@
 
 print( "Generating bar graph with error bars (Port Down Latency)." )
 barGraphFormat <- geom_bar( stat="identity", width = width )
-errorBarFormat <- geom_errorbar( width = width )
+errorBarFormat <- geom_errorbar( width = width, color=rgb( 140, 140, 140, maxColorValue=255 ) )
+sum <- fileData[ 'down_ofp_to_dev_avg' ] + fileData[ 'down_dev_to_link_avg' ] + fileData[ 'down_link_to_graph_avg' ]
+values <- geom_text( aes( x=downAvgsData$scale, y=sum + 0.04 * max( sum ), label = format( sum, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold" )
 
 title <- ggtitle( "Port Down Latency" )
-result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title
+result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title + values
 
 
 print( paste( "Saving bar chart with error bars (Port Down Latency) to", errBarOutputFileDown ) )
diff --git a/TestON/JenkinsFile/scripts/SCPFscaleTopo.R b/TestON/JenkinsFile/scripts/SCPFscaleTopo.R
index de22c3a..195019d 100644
--- a/TestON/JenkinsFile/scripts/SCPFscaleTopo.R
+++ b/TestON/JenkinsFile/scripts/SCPFscaleTopo.R
@@ -85,11 +85,10 @@
 dataFrame$type <- factor( dataFrame$type, levels=unique( dataFrame$type ) )
 dataFrame$iterative <- seq( 1, nrow( fileData ), by = 1 )
 
-# Obtain the sum of the averages for the plot size and center of standard deviation bars.
-avgsSum <- fileData$total_time
-
 dataFrame <- na.omit( dataFrame )   # Omit any data that doesn't exist
 
+sum <- fileData[ 'last_role_request_to_last_topology' ] + fileData[ 'last_connection_to_last_role_request' ] + fileData[ 'first_connection_to_last_connection' ]
+
 print( "Data Frame Results:" )
 print( dataFrame )
 
@@ -103,7 +102,6 @@
 #    These are variables that apply to all of the graphs being generated, regardless of type.
 #
 # 2. Type specific graph data is generated.
-#     Data specific for the error bar and stacked bar graphs are generated.
 #
 # 3. Generate and save the graphs.
 #      Graphs are saved to the filename above, in the directory provided in command line args
@@ -129,21 +127,17 @@
 fillLabel <- labs( fill="Type" )
 chartTitle <- paste( "Scale Topology Latency Test" )
 theme <- theme( plot.title=element_text( hjust = 0.5, size = 28, face='bold' ) )
+values <- geom_text( aes( x=dataFrame$iterative, y=sum + 0.02 * max( sum ), label = format( sum, big.mark = ",", scientific = FALSE ), fontface = "bold" ) )
 
 # Store plot configurations as 1 variable
-fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
+fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme + values
 
-# Create the stacked bar graph with error bars.
-# geom_bar contains:
-#    - stat: data formatting (usually "identity")
-#    - width: the width of the bar types (declared above)
-# geom_errorbar contains similar arguments as geom_bar.
-print( "Generating bar graph with error bars." )
+print( "Generating bar graph." )
 barGraphFormat <- geom_bar( stat = "identity", width = width )
 title <- ggtitle( paste( chartTitle, "" ) )
 result <- fundamentalGraphData + barGraphFormat + title
 
 # Save graph to file
-print( paste( "Saving bar chart with error bars to", outputFile ) )
+print( paste( "Saving bar chart to", outputFile ) )
 ggsave( outputFile, width = 10, height = 6, dpi = 200 )
-print( paste( "Successfully wrote bar chart with error bars out to", outputFile ) )
+print( paste( "Successfully wrote bar chart out to", outputFile ) )
diff --git a/TestON/JenkinsFile/scripts/SCPFscalingMaxIntents.R b/TestON/JenkinsFile/scripts/SCPFscalingMaxIntents.R
index bc90806..02f4f28 100644
--- a/TestON/JenkinsFile/scripts/SCPFscalingMaxIntents.R
+++ b/TestON/JenkinsFile/scripts/SCPFscalingMaxIntents.R
@@ -75,7 +75,7 @@
 fileData <- dbGetQuery( con, command )
 
 if ( args[ 1 ] == "y" ){
-    chartTitle <- "Number of Installed Intents & Flows w/ FlowObj"
+    chartTitle <- "Number of Installed Intents & Flows\n with Flow Objectives"
 } else {
     chartTitle <- "Number of Installed Intents & Flows"
 }
@@ -125,8 +125,9 @@
 print( "Generating bar graph bars." )
 width <- 1.3
 barGraphFormat <- geom_bar( stat="identity", position=position_dodge( ), width = width )
+values <- geom_text( aes( x=dataFrame$scale, y=dataFrame$ms + 0.02 * max( dataFrame$ms ), label = format( dataFrame$ms, digits=3, big.mark = ",", scientific = FALSE ) ), size = 3.2, fontface = "bold", position=position_dodge( width=1.25 ) )
 title <- ggtitle( chartTitle )
-result <- fundamentalGraphData + barGraphFormat + title
+result <- fundamentalGraphData + barGraphFormat + title + values
 
 
 print( paste( "Saving bar chart to", outputFile ) )
diff --git a/TestON/JenkinsFile/scripts/SCPFswitchLat.R b/TestON/JenkinsFile/scripts/SCPFswitchLat.R
index 0493859..8a03863 100644
--- a/TestON/JenkinsFile/scripts/SCPFswitchLat.R
+++ b/TestON/JenkinsFile/scripts/SCPFswitchLat.R
@@ -120,32 +120,36 @@
 
 print( "Generating bar graph with error bars (Switch Up Latency)." )
 barGraphFormat <- geom_bar( stat="identity", width = width )
-errorBarFormat <- geom_errorbar( width = width )
-
+errorBarFormat <- geom_errorbar( width = width, color=rgb( 140, 140, 140, maxColorValue=255 ) )
+sum <- fileData[ 'up_device_to_graph_avg' ] + fileData[ 'role_reply_to_device_avg' ] + fileData[ 'role_request_to_role_reply_avg' ] + fileData[ 'feature_reply_to_role_request_avg' ] + fileData[ 'tcp_to_feature_reply_avg' ]
+values <- geom_text( aes( x=upAvgsData$scale, y=sum + 0.04 * max( sum ), label = format( sum, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold" )
 title <- ggtitle( "Switch Up Latency" )
-result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title
+result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title + values
 
 
 print( paste( "Saving bar chart with error bars (Switch Up Latency) to", errBarOutputFileUp ) )
 ggsave( errBarOutputFileUp, width = 10, height = 6, dpi = 200 )
 
-
 print( paste( "Successfully wrote bar chart with error bars (Switch Up Latency) out to", errBarOutputFileUp ) )
 
+# Generate switch down latency graph
 
 print( "Generating fundamental graph data (Switch Down Latency)." )
 
 mainPlot <- ggplot( data = downAvgsData, aes( x = scale, y = ms, fill = type, ymin = fileData[ 'down_end_to_end_avg' ] - stds, ymax = fileData[ 'down_end_to_end_avg' ] + stds ) )
 theme <- theme( plot.title=element_text( hjust = 0.5, size = 28, face='bold' ) )
 
+
 fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
 
 print( "Generating bar graph with error bars (Switch Down Latency)." )
 barGraphFormat <- geom_bar( stat="identity", width = width )
-errorBarFormat <- geom_errorbar( width = width )
+errorBarFormat <- geom_errorbar( width = width, color=rgb( 140, 140, 140, maxColorValue=255 ) )
 
 title <- ggtitle( "Switch Down Latency" )
-result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title
+sum <- fileData[ 'down_device_to_graph_avg' ] + fileData[ 'ack_to_device_avg' ] + fileData[ 'fin_ack_to_ack_avg' ]
+values <- geom_text( aes( x=downAvgsData$scale, y=sum + 0.04 * max( sum ), label = format( sum, digits=3, big.mark = ",", scientific = FALSE ) ), size = 5, fontface = "bold" )
+result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title + values
 
 
 print( paste( "Saving bar chart with error bars (Switch Down Latency) to", errBarOutputFileDown ) )
diff --git a/TestON/bin/cli.py b/TestON/bin/cli.py
index 5a0b3d5..a7e1297 100755
--- a/TestON/bin/cli.py
+++ b/TestON/bin/cli.py
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-"""
+'''
 Created on 20-Dec-2012
 Copyright 2012 Open Networking Foundation
 
@@ -10,7 +10,7 @@
     TestON is free software: you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
     the Free Software Foundation, either version 2 of the License, or
-    ( at your option ) any later version.
+    (at your option) any later version.
 
     TestON is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,7 +21,9 @@
     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
 
 
-"""
+'''
+
+
 """
 cli will provide the CLI shell for teston framework.
 
@@ -33,6 +35,7 @@
 teston> run test DpctlTest
 Several useful commands are provided.
 """
+
 from subprocess import call
 from cmd import Cmd
 from os import isatty
@@ -43,18 +46,16 @@
 import threading
 import __builtin__
 import pprint
-dump = pprint.PrettyPrinter( indent=4 )
+dump = pprint.PrettyPrinter(indent=4)
 __builtin__.testthread = False
 introduction = "TestON is the testing framework \nDeveloped by Paxterra Solutions (www.paxterrasolutions.com)"
 __builtin__.COLORS = False
 
-path = re.sub( "/bin$", "", sys.path[ 0 ] )
+path = re.sub( "/bin$", "", sys.path[0] )
 sys.path.insert( 1, path )
 from core.teston import *
 
-
-class CLI( threading.Thread, Cmd, object ):
-
+class CLI( threading.Thread,Cmd,object ):
     "command-line interface to execute the test."
 
     prompt = 'teston> '
@@ -63,7 +64,7 @@
         self.teston = teston
 
         self._mainevent = threading.Event()
-        threading.Thread.__init__( self )
+        threading.Thread.__init__(self)
         self.main_stop = False
         self.locals = { 'test': teston }
         self.stdin = stdin
@@ -85,70 +86,69 @@
         Cmd.do_help( self, line )
         if line is '':
             output( self.helpStr )
-
-    def do_run( self, args ):
-        """
+    def do_run(self,args):
+        '''
         run command will execute the test with following optional command line arguments
         logdir <directory to store logs in>
         testcases <list of testcases separated by comma or range of testcases separated by hypen>
         mail <mail-id or list of mail-ids seperated by comma>
         example 1, to execute the examples specified in the ~/examples diretory.
-        """
+        '''
         try:
             args = args.split()
             options = {}
-            options = self.parseArgs( args, options )
-            options = dictToObj( options )
+            options = self.parseArgs(args,options)
+            options = dictToObj(options)
             if not testthread:
-                test = TestThread( options )
+                test = TestThread(options)
                 test.start()
                 while test.isAlive():
-                    test.join( 1 )
+                    test.join(1)
             else:
-                print main.TEST + " test execution paused, please resume that before executing to another test"
+                print main.TEST+ " test execution paused, please resume that before executing to another test"
         except KeyboardInterrupt, SystemExit:
             print "Interrupt called, Exiting."
             test._Thread__stop()
             main.cleanup()
             main.exit()
 
-    def do_resume( self, line ):
-        """
+    def do_resume(self, line):
+        '''
         resume command will continue the execution of paused test.
         teston>resume
-        [ 2013-01-07 23:03:44.640723 ] [ PoxTest ] [ STEP ]  1.1: Checking the host reachability using pingHost
+        [2013-01-07 23:03:44.640723] [PoxTest] [STEP]  1.1: Checking the host reachability using pingHost
         2013-01-07 23:03:44,858 - PoxTest - INFO - Expected Prompt Found
         ....
-        """
+        '''
         if testthread:
             testthread.play()
-        else:
+        else :
             print "There is no test to resume"
 
-    def do_nextstep( self, line ):
-        """
+    def do_nextstep(self,line):
+        '''
         nextstep will execute the next-step of the paused test and
         it will pause the test after finishing of step.
 
         teston> nextstep
         Will pause the test's execution, after completion of this step.....
 
-        teston> [ 2013-01-07 21:24:26.286601 ] [ PoxTest ] [ STEP ]  1.8: Checking the host reachability using pingHost
+        teston> [2013-01-07 21:24:26.286601] [PoxTest] [STEP]  1.8: Checking the host reachability using pingHost
         2013-01-07 21:24:26,455 - PoxTest - INFO - Expected Prompt Found
         .....
         teston>
 
-        """
+        '''
         if testthread:
-            main.log.info( "Executing the nextstep, Will pause test execution, after completion of the step" )
+            main.log.info("Executing the nextstep, Will pause test execution, after completion of the step")
             testthread.play()
-            time.sleep( .1 )
+            time.sleep(.1)
             testthread.pause()
         else:
             print "There is no paused test "
 
-    def do_dumpvar( self, line ):
-        """
+    def do_dumpvar(self,line):
+        '''
         dumpvar will print all the test data in raw format.
         usgae :
         teston>dumpvar main
@@ -159,54 +159,56 @@
 
         teston>dumpvar topology
         here 'topology' will be topology specification of the test specified in topo file.
-        """
+        '''
         if testthread:
             if line == "main":
-                dump.pprint( vars( main ) )
-            else:
-                try:
-                    dump.pprint( vars( main )[ line ] )
+                dump.pprint(vars(main))
+            else :
+                try :
+                    dump.pprint(vars(main)[line])
                 except KeyError as e:
                     print e
-        else:
+        else :
             print "There is no paused test "
 
-    def do_currentcase( self, line ):
-        """
+    def do_currentcase(self,line):
+        '''
         currentcase will return the current case in the test execution.
 
         teston>currentcase
         Currently executing test case is: 2
 
-        """
+        '''
         if testthread:
-            print "Currently executing test case is: " + str( main.CurrentTestCaseNumber )
-        else:
+            print "Currently executing test case is: "+str(main.CurrentTestCaseNumber)
+        else :
             print "There is no paused test "
 
-    def do_currentstep( self, line ):
-        """
+
+    def do_currentstep(self,line):
+        '''
         currentstep will return the current step in the test execution.
 
         teston>currentstep
         Currently executing test step is: 2.3
-        """
+        '''
         if testthread:
-            print "Currently executing test step is: " + str( main.CurrentTestCaseNumber ) + '.' + str( main.stepCount )
-        else:
+            print "Currently executing test step is: "+str(main.CurrentTestCaseNumber)+'.'+str(main.stepCount)
+        else :
             print "There is no paused test "
 
-    def do_stop( self, line ):
-        """
+
+    def do_stop(self,line):
+        '''
         Will stop the paused test, if any !
-        """
+        '''
         if testthread:
             testthread.stop()
 
         return 'exited by user command'
 
-    def do_gettest( self, line ):
-        """
+    def do_gettest(self,line):
+        '''
         gettest will return the test name which is under execution or recently executed.
 
         Test under execution:
@@ -215,18 +217,18 @@
 
         Test recently executed:
         Recently executed test is: MininetTest
-        """
-        try:
-            if testthread:
-                print "Currently executing Test is: " + main.TEST
-            else:
-                print "Recently executed test is: " + main.TEST
+        '''
+        try :
+            if testthread :
+                print "Currently executing Test is: "+main.TEST
+            else :
+                print "Recently executed test is: "+main.TEST
 
         except NameError:
             print "There is no previously executed Test"
 
-    def do_showlog( self, line ):
-        """
+    def do_showlog(self,line):
+        '''
         showlog will show the test's Log
         teston>showlog
         Last executed test's log is : //home/openflow/TestON/logs/PoxTest_07_Jan_2013_21_42_11/PoxTest_07_Jan_2013_21_42_11.log
@@ -234,17 +236,17 @@
         teston>showlog
         Currently executing Test's log is: /home/openflow/TestON/logs/PoxTest_07_Jan_2013_21_46_58/PoxTest_07_Jan_2013_21_46_58.log
         .....
-        """
-        try:
-            if testthread:
-                print "Currently executing Test's log is: " + main.LogFileName
+        '''
+        try :
+            if testthread :
+                print "Currently executing Test's log is: "+main.LogFileName
 
-            else:
-                print "Last executed test's log is : " + main.LogFileName
+            else :
+                print "Last executed test's log is : "+main.LogFileName
 
             logFile = main.LogFileName
-            logFileHandler = open( logFile, 'r' )
-            for msg in logFileHandler.readlines():
+            logFileHandler = open(logFile, 'r')
+            for msg in logFileHandler.readlines() :
                 print msg,
 
             logFileHandler.close()
@@ -252,77 +254,79 @@
         except NameError:
             print "There is no previously executed Test"
 
-    def parseArgs( self, args, options ):
-        """
+
+
+    def parseArgs(self,args,options):
+        '''
         This will parse the command line arguments.
-        """
-        options = self.initOptions( options )
-        try:
+        '''
+        options = self.initOptions(options)
+        try :
             index = 0
             while index < len( args ):
-                option = args[ index ]
-                if index > 0:
-                    if re.match( "--params", option, flags=0 ):
+                option = args[index]
+                if index > 0 :
+                    if re.match("--params", option, flags=0):
                         # check if there is a params
-                        options[ 'params' ].append( args[ index + 1 ] )
-                    elif re.match( "logdir|mail|example|testdir|testcases|onoscell", option, flags=0 ):
-                        options[ option ] = args[ index + 1 ]
-                        options = self.testcasesInRange( index + 1, option, args, options )
+                        options['params'].append(args[index+1])
+                    elif re.match("logdir|mail|example|testdir|testcases|onoscell", option, flags = 0):
+                        options[option] = args[index+1]
+                        options = self.testcasesInRange(index+1,option,args,options)
                     index += 2
-                else:
-                    options[ 'testname' ] = option
+                else :
+                    options['testname'] = option
                     index += 1
         except IndexError as e:
-            print ( e )
+            print (e)
             main.cleanup()
             main.exit()
 
         return options
 
-    def initOptions( self, options ):
-        """
+    def initOptions(self,options):
+        '''
         This will initialize the commandline options.
-        """
-        options[ 'logdir' ] = None
-        options[ 'mail' ] = None
-        options[ 'example' ] = None
-        options[ 'testdir' ] = None
-        options[ 'testcases' ] = None
-        options[ 'onoscell' ] = None
+        '''
+        options['logdir'] = None
+        options['mail'] = None
+        options['example'] = None
+        options['testdir'] = None
+        options['testcases'] = None
+        options['onoscell'] = None
         # init params as a empty list
-        options[ 'params' ] = []
+        options['params'] = []
         return options
 
-    def testcasesInRange( self, index, option, args, options ):
-        """
-        This method will handle testcases list,specified in range [ 1-10 ].
-        """
-        if re.match( "testcases", option, 1 ):
+    def testcasesInRange(self,index,option,args,options):
+        '''
+        This method will handle testcases list,specified in range [1-10].
+        '''
+        if re.match("testcases",option,1):
             testcases = []
-            args[ index ] = re.sub( "\[|\]", "", args[ index ], 0 )
-            m = re.match( "(\d+)\-(\d+)", args[ index ], flags=0 )
+            args[index] = re.sub("\[|\]","",args[index],0)
+            m = re.match("(\d+)\-(\d+)",args[index],flags=0)
             if m:
-                start_case = eval( m.group( 1 ) )
-                end_case = eval( m.group( 2 ) )
-                if ( start_case <= end_case ):
+                start_case = eval(m.group(1))
+                end_case = eval(m.group(2))
+                if (start_case <= end_case):
                     i = start_case
                     while i <= end_case:
-                        testcases.append( i )
-                        i = i + 1
-                else:
+                        testcases.append(i)
+                        i= i+1
+                else :
                     print "Please specify testcases properly like 1-5"
-            else:
-                options[ option ] = args[ index ]
+            else :
+                options[option] = args[index]
                 return options
-            options[ option ] = str( testcases )
+            options[option] = str(testcases)
 
         return options
 
-    def cmdloop( self, intro=introduction ):
+    def cmdloop(self, intro=introduction):
         print introduction
         while True:
             try:
-                super( CLI, self ).cmdloop( intro="" )
+                super(CLI, self).cmdloop(intro="")
                 self.postloop()
             except KeyboardInterrupt:
                 if testthread:
@@ -332,68 +336,69 @@
                     sys.exit()
 
     def do_echo( self, line ):
-        """
+        '''
         Echoing of given input.
-        """
-        output( line )
+        '''
+        output(line)
 
     def do_sh( self, line ):
-        """
+        '''
         Run an external shell command
         sh pwd
         sh ifconfig etc.
-        """
+        '''
         call( line, shell=True )
 
+
     def do_py( self, line ):
-        """
+        '''
         Evaluate a Python expression.
 
-        py main.log.info( "Sample Log Information" )
+        py main.log.info("Sample Log Information")
         2013-01-07 12:07:26,804 - PoxTest - INFO - Sample Log Information
 
-        """
+        '''
         try:
             exec( line )
         except Exception as e:
             output( str( e ) + '\n' )
 
-    def do_interpret( self, line ):
-        """
+    def do_interpret(self,line):
+        '''
         interpret will translate the single line openspeak statement to equivalent python script.
 
         teston> interpret ASSERT result EQUALS main.TRUE ONPASS "Ping executed successfully" ONFAIL "Ping failed"
-        utilities.assert_equals( expect=main.TRUE,actual=result,onpass="Ping executed successfully",onfail="Ping failed" )
+        utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Ping executed successfully",onfail="Ping failed")
 
-        """
+        '''
         from core import openspeak
         ospk = openspeak.OpenSpeak()
-        try:
-            translated_code = ospk.interpret( text=line )
+        try :
+            translated_code = ospk.interpret(text=line)
             print translated_code
         except AttributeError as e:
             print 'Dynamic params are not allowed in single statement translations'
 
-    def do_do( self, line ):
-        """
+    def do_do (self,line):
+        '''
         Do will translate and execute the openspeak statement for the paused test.
         do <OpenSpeak statement>
-        """
+        '''
         if testthread:
             from core import openspeak
             ospk = openspeak.OpenSpeak()
-            try:
-                translated_code = ospk.interpret( text=line )
-                eval( translated_code )
+            try :
+                translated_code = ospk.interpret(text=line)
+                eval(translated_code)
             except ( AttributeError, SyntaxError ) as e:
                 print 'Dynamic params are not allowed in single statement translations:'
                 print e
-        else:
+        else :
             print "Do will translate and execute the openspeak statement for the paused test.\nPlease use interpret to translate the OpenSpeak statement."
 
-    def do_compile( self, line ):
-        """
-        compile will translate the openspeak ( .ospk ) file into TestON test script ( python ).
+    def do_compile(self,line):
+        '''
+        compile will translate the openspeak (.ospk) file into TestON test script (python).
         It will receive the openspeak file path as input and will generate
         equivalent test-script file in the same directory.
 
@@ -402,15 +407,15 @@
         teston>compile /home/openflow/TestON/PoxTest.ospk
 
         Auto-generated test-script file is /home/openflow/TestON/PoxTest.py
-        """
+        '''
         from core import openspeak
         openspeak = openspeak.OpenSpeak()
         openspeakfile = line
-        if os.path.exists( openspeakfile ):
-            openspeak.compiler( openspeakfile=openspeakfile, writetofile=1 )
-            print "Auto-generated test-script file is " + re.sub( "ospk", "py", openspeakfile, 0 )
+        if os.path.exists(openspeakfile) :
+            openspeak.compiler(openspeakfile=openspeakfile,writetofile=1)
+            print "Auto-generated test-script file is "+ re.sub("ospk","py",openspeakfile,0)
         else:
-            print 'There is no such file : ' + line
+            print 'There is no such file : '+line
 
     def do_exit( self, _line ):
         "Exit"
@@ -435,7 +440,7 @@
         return isatty( self.stdin.fileno() )
 
     def do_source( self, line ):
-        """
+        '''
         Read shell commands from an input file and execute them sequentially.
         cmdsource.txt :
 
@@ -446,9 +451,10 @@
         /home/openflow/TestON/bin/
         cli.py  __init__.py
 
-        """
+        '''
+
         args = line.split()
-        if len( args ) != 1:
+        if len(args) != 1:
             error( 'usage: source <file>\n' )
             return
         try:
@@ -465,45 +471,43 @@
     def do_time( self, line ):
         "Measure time taken for any command in TestON."
         start = time.time()
-        self.onecmd( line )
+        self.onecmd(line)
         elapsed = time.time() - start
-        self.stdout.write( "*** Elapsed time: %0.6f secs\n" % elapsed )
+        self.stdout.write("*** Elapsed time: %0.6f secs\n" % elapsed)
 
     def default( self, line ):
-        "Called on an input line when the command prefix is not recognized."
+        """Called on an input line when the command prefix is not recognized."""
         first, args, line = self.parseline( line )
         if not args:
             return
-        if args and len( args ) > 0 and args[ -1 ] == '\n':
+        if args and len(args) > 0 and args[ -1 ] == '\n':
             args = args[ :-1 ]
         rest = args.split( ' ' )
 
         error( '*** Unknown command: %s\n' % first )
 
-
-class TestThread( threading.Thread ):
-
-    """
+class TestThread(threading.Thread):
+    '''
     TestThread class will handle the test execution and will communicate with the thread in the do_run.
-    """
-    def __init__( self, options ):
+    '''
+    def __init__(self,options):
         self._stopevent = threading.Event()
-        threading.Thread.__init__( self )
+        threading.Thread.__init__(self)
         self.is_stop = False
         self.options = options
         __builtin__.testthread = self
 
-    def run( self ):
-        """
+    def run(self):
+        '''
         Will execute the test.
-        """
-        while not self.is_stop:
+        '''
+        while not self.is_stop :
             if not self._stopevent.isSet():
-                self.test_on = TestON( self.options )
-                try:
+                self.test_on = TestON(self.options)
+                try :
                     if self.test_on.init_result:
                         result = self.test_on.run()
-                        if not self.is_stop:
+                        if not self.is_stop :
                             result = self.test_on.cleanup()
                         self.is_stop = True
                 except KeyboardInterrupt:
@@ -513,10 +517,10 @@
 
         __builtin__.testthread = False
 
-    def pause( self ):
-        """
+    def pause(self):
+        '''
         Will pause the test.
-        """
+        '''
         if not cli.pause:
             print "Will pause the test's execution, after completion of this step.....\n\n\n\n"
             cli.pause = True
@@ -529,59 +533,56 @@
             result = self.test_on.cleanup()
             self.is_stop = True
 
-    def play( self ):
-        """
+    def play(self):
+        '''
         Will resume the paused test.
-        """
+        '''
         self._stopevent.clear()
         cli.pause = False
 
-    def stop( self ):
-        """
+    def stop(self):
+        '''
         Will stop the test execution.
-        """
+        '''
+
         print "Stopping the test"
         self.is_stop = True
         cli.stop = True
         __builtin__.testthread = False
 
-
-def output( msg ):
-    """
+def output(msg):
+    '''
     Simply, print the message in console
-    """
+    '''
     print msg
 
-
-def error( msg ):
-    """
+def error(msg):
+    '''
     print the error message.
-    """
+    '''
     print msg
 
-
-def dictToObj( dictionary ):
-    """
+def dictToObj(dictionary):
+    '''
     This will facilitates the converting of the dictionary to the object.
     This method will help to send options as object format to the test.
-    """
-    if isinstance( dictionary, list ):
-        dictionary = [ dictToObj( x ) for x in dictionary ]
-    if not isinstance( dictionary, dict ):
+    '''
+    if isinstance(dictionary, list):
+        dictionary = [dictToObj(x) for x in dictionary]
+    if not isinstance(dictionary, dict):
         return dictionary
-
-    class Convert( object ):
+    class Convert(object):
         pass
     obj = Convert()
     for k in dictionary:
-        obj.__dict__[ k ] = dictToObj( dictionary[ k ] )
+        obj.__dict__[k] = dictToObj(dictionary[k])
     return obj
 
 
 if __name__ == '__main__':
-    if len( sys.argv ) > 1:
+    if len(sys.argv) > 1:
         __builtin__.COLORS = True
-        CLI( "test" ).onecmd( ' '.join( sys.argv[ 1: ] ) )
+        CLI("test").onecmd(' '.join(sys.argv[1:]))
     else:
         __builtin__.COLORS = False
-        CLI( "test" ).cmdloop()
+        CLI("test").cmdloop()
diff --git a/TestON/core/Thread.py b/TestON/core/Thread.py
index 9c655a5..4c040a6 100644
--- a/TestON/core/Thread.py
+++ b/TestON/core/Thread.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 
-"""
-Copyright 2015 Open Networking Foundation ( ONF )
+'''
+Copyright 2015 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -10,7 +10,7 @@
     TestON is free software: you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
     the Free Software Foundation, either version 2 of the License, or
-    ( at your option ) any later version.
+    (at your option) any later version.
 
     TestON is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -19,12 +19,10 @@
 
     You should have received a copy of the GNU General Public License
     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
-"""
+'''
 import threading
 
-
 class Thread( threading.Thread ):
-
     def __init__( self, target=None, threadID=None, name="", args=(),
                   kwargs={} ):
         super( Thread, self ).__init__()
diff --git a/TestON/core/ast.py b/TestON/core/ast.py
index 33464da..fd5dfdb 100644
--- a/TestON/core/ast.py
+++ b/TestON/core/ast.py
@@ -19,7 +19,7 @@
     Additionally various helper functions are provided that make working with
     the trees simpler.  The main intention of the helper functions and this
     module in general is to provide an easy to use interface for libraries
-    that work tightly with the python syntax ( template engines for example ).
+    that work tightly with the python syntax (template engines for example).
 
 
     :copyright: Copyright 2008 by Armin Ronacher.
@@ -29,59 +29,58 @@
 from _ast import __version__
 
 
-def parse( source, filename='<unknown>', mode='exec' ):
+def parse(source, filename='<unknown>', mode='exec'):
     """
     Parse the source into an AST node.
-    Equivalent to compile( source, filename, mode, PyCF_ONLY_AST ).
+    Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
     """
-    return compile( source, filename, mode, PyCF_ONLY_AST )
+    return compile(source, filename, mode, PyCF_ONLY_AST)
 
 
-def literal_eval( node_or_string ):
+def literal_eval(node_or_string):
     """
     Safely evaluate an expression node or a string containing a Python
     expression.  The string or node provided may only consist of the following
     Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
     and None.
     """
-    _safe_names = { 'None': None, 'True': True, 'False': False }
-    if isinstance( node_or_string, basestring ):
-        node_or_string = parse( node_or_string, mode='eval' )
-    if isinstance( node_or_string, Expression ):
+    _safe_names = {'None': None, 'True': True, 'False': False}
+    if isinstance(node_or_string, basestring):
+        node_or_string = parse(node_or_string, mode='eval')
+    if isinstance(node_or_string, Expression):
         node_or_string = node_or_string.body
-
-    def _convert( node ):
-        if isinstance( node, Str ):
+    def _convert(node):
+        if isinstance(node, Str):
             return node.s
-        elif isinstance( node, Num ):
+        elif isinstance(node, Num):
             return node.n
-        elif isinstance( node, Tuple ):
-            return tuple( map( _convert, node.elts ) )
-        elif isinstance( node, List ):
-            return list( map( _convert, node.elts ) )
-        elif isinstance( node, Dict ):
-            return dict( ( _convert( k ), _convert( v ) ) for k, v
-                         in zip( node.keys, node.values ) )
-        elif isinstance( node, Name ):
+        elif isinstance(node, Tuple):
+            return tuple(map(_convert, node.elts))
+        elif isinstance(node, List):
+            return list(map(_convert, node.elts))
+        elif isinstance(node, Dict):
+            return dict((_convert(k), _convert(v)) for k, v
+                        in zip(node.keys, node.values))
+        elif isinstance(node, Name):
             if node.id in _safe_names:
-                return _safe_names[ node.id ]
-        elif isinstance( node, BinOp ) and \
-             isinstance( node.op, ( Add, Sub ) ) and \
-             isinstance( node.right, Num ) and \
-             isinstance( node.right.n, complex ) and \
-             isinstance( node.left, Num ) and \
-             isinstance( node.left.n, ( int, long, float ) ):
+                return _safe_names[node.id]
+        elif isinstance(node, BinOp) and \
+             isinstance(node.op, (Add, Sub)) and \
+             isinstance(node.right, Num) and \
+             isinstance(node.right.n, complex) and \
+             isinstance(node.left, Num) and \
+             isinstance(node.left.n, (int, long, float)):
             left = node.left.n
             right = node.right.n
-            if isinstance( node.op, Add ):
+            if isinstance(node.op, Add):
                 return left + right
             else:
                 return left - right
-        raise ValueError( 'malformed string' )
-    return _convert( node_or_string )
+        raise ValueError('malformed string')
+    return _convert(node_or_string)
 
 
-def dump( node, annotate_fields=True, include_attributes=False ):
+def dump(node, annotate_fields=True, include_attributes=False):
     """
     Return a formatted dump of the tree in *node*.  This is mainly useful for
     debugging purposes.  The returned string will show the names and the values
@@ -90,40 +89,40 @@
     numbers and column offsets are not dumped by default.  If this is wanted,
     *include_attributes* can be set to True.
     """
-    def _format( node ):
-        if isinstance( node, AST ):
-            fields = [ ( a, _format( b ) ) for a, b in iter_fields( node ) ]
-            rv = '%s(%s' % ( node.__class__.__name__, ', '.join(
-                ( '%s=%s' % field for field in fields )
+    def _format(node):
+        if isinstance(node, AST):
+            fields = [(a, _format(b)) for a, b in iter_fields(node)]
+            rv = '%s(%s' % (node.__class__.__name__, ', '.join(
+                ('%s=%s' % field for field in fields)
                 if annotate_fields else
-                ( b for a, b in fields )
-            ) )
+                (b for a, b in fields)
+            ))
             if include_attributes and node._attributes:
                 rv += fields and ', ' or ' '
-                rv += ', '.join( '%s=%s' % ( a, _format( getattr( node, a ) ) )
-                                 for a in node._attributes )
+                rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
+                                for a in node._attributes)
             return rv + ')'
-        elif isinstance( node, list ):
-            return '[%s]' % ', '.join( _format( x ) for x in node )
-        return repr( node )
-    if not isinstance( node, AST ):
-        raise TypeError( 'expected AST, got %r' % node.__class__.__name__ )
-    return _format( node )
+        elif isinstance(node, list):
+            return '[%s]' % ', '.join(_format(x) for x in node)
+        return repr(node)
+    if not isinstance(node, AST):
+        raise TypeError('expected AST, got %r' % node.__class__.__name__)
+    return _format(node)
 
 
-def copy_location( new_node, old_node ):
+def copy_location(new_node, old_node):
     """
-    Copy source location ( `lineno` and `col_offset` attributes ) from
+    Copy source location (`lineno` and `col_offset` attributes) from
     *old_node* to *new_node* if possible, and return *new_node*.
     """
     for attr in 'lineno', 'col_offset':
         if attr in old_node._attributes and attr in new_node._attributes \
-           and hasattr( old_node, attr ):
-            setattr( new_node, attr, getattr( old_node, attr ) )
+           and hasattr(old_node, attr):
+            setattr(new_node, attr, getattr(old_node, attr))
     return new_node
 
 
-def fix_missing_locations( node ):
+def fix_missing_locations(node):
     """
     When you compile a node tree with compile(), the compiler expects lineno and
     col_offset attributes for every node that supports them.  This is rather
@@ -131,92 +130,91 @@
     recursively where not already set, by setting them to the values of the
     parent node.  It works recursively starting at *node*.
     """
-    def _fix( node, lineno, col_offset ):
+    def _fix(node, lineno, col_offset):
         if 'lineno' in node._attributes:
-            if not hasattr( node, 'lineno' ):
+            if not hasattr(node, 'lineno'):
                 node.lineno = lineno
             else:
                 lineno = node.lineno
         if 'col_offset' in node._attributes:
-            if not hasattr( node, 'col_offset' ):
+            if not hasattr(node, 'col_offset'):
                 node.col_offset = col_offset
             else:
                 col_offset = node.col_offset
-        for child in iter_child_nodes( node ):
-            _fix( child, lineno, col_offset )
-    _fix( node, 1, 0 )
+        for child in iter_child_nodes(node):
+            _fix(child, lineno, col_offset)
+    _fix(node, 1, 0)
     return node
 
 
-def increment_lineno( node, n=1 ):
+def increment_lineno(node, n=1):
     """
     Increment the line number of each node in the tree starting at *node* by *n*.
     This is useful to "move code" to a different location in a file.
     """
-    for child in walk( node ):
+    for child in walk(node):
         if 'lineno' in child._attributes:
-            child.lineno = getattr( child, 'lineno', 0 ) + n
+            child.lineno = getattr(child, 'lineno', 0) + n
     return node
 
 
-def iter_fields( node ):
+def iter_fields(node):
     """
-    Yield a tuple of ``( fieldname, value )`` for each field in ``node._fields``
+    Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
     that is present on *node*.
     """
     for field in node._fields:
         try:
-            yield field, getattr( node, field )
+            yield field, getattr(node, field)
         except AttributeError:
             pass
 
 
-def iter_child_nodes( node ):
+def iter_child_nodes(node):
     """
     Yield all direct child nodes of *node*, that is, all fields that are nodes
     and all items of fields that are lists of nodes.
     """
-    for name, field in iter_fields( node ):
-        if isinstance( field, AST ):
+    for name, field in iter_fields(node):
+        if isinstance(field, AST):
             yield field
-        elif isinstance( field, list ):
+        elif isinstance(field, list):
             for item in field:
-                if isinstance( item, AST ):
+                if isinstance(item, AST):
                     yield item
 
 
-def get_docstring( node, clean=True ):
+def get_docstring(node, clean=True):
     """
     Return the docstring for the given node or None if no docstring can
     be found.  If the node provided does not have docstrings a TypeError
     will be raised.
     """
-    if not isinstance( node, ( FunctionDef, ClassDef, Module ) ):
-        raise TypeError( "%r can't have docstrings" % node.__class__.__name__ )
-    if node.body and isinstance( node.body[ 0 ], Expr ) and \
-       isinstance( node.body[ 0 ].value, Str ):
+    if not isinstance(node, (FunctionDef, ClassDef, Module)):
+        raise TypeError("%r can't have docstrings" % node.__class__.__name__)
+    if node.body and isinstance(node.body[0], Expr) and \
+       isinstance(node.body[0].value, Str):
         if clean:
             import inspect
-            return inspect.cleandoc( node.body[ 0 ].value.s )
-        return node.body[ 0 ].value.s
+            return inspect.cleandoc(node.body[0].value.s)
+        return node.body[0].value.s
 
 
-def walk( node ):
+def walk(node):
     """
     Recursively yield all descendant nodes in the tree starting at *node*
-    ( including *node* itself ), in no specified order.  This is useful if you
+    (including *node* itself), in no specified order.  This is useful if you
     only want to modify nodes in place and don't care about the context.
     """
     from collections import deque
-    todo = deque( [ node ] )
+    todo = deque([node])
     while todo:
         node = todo.popleft()
-        todo.extend( iter_child_nodes( node ) )
+        todo.extend(iter_child_nodes(node))
         yield node
 
 
-class NodeVisitor( object ):
-
+class NodeVisitor(object):
     """
     A node visitor base class that walks the abstract syntax tree and calls a
     visitor function for every node found.  This function may return a value
@@ -229,31 +227,31 @@
     class name of the node.  So a `TryFinally` node visit function would
     be `visit_TryFinally`.  This behavior can be changed by overriding
     the `visit` method.  If no visitor function exists for a node
-    ( return value `None` ) the `generic_visit` visitor is used instead.
+    (return value `None`) the `generic_visit` visitor is used instead.
 
     Don't use the `NodeVisitor` if you want to apply changes to nodes during
-    traversing.  For this a special visitor exists ( `NodeTransformer` ) that
+    traversing.  For this a special visitor exists (`NodeTransformer`) that
     allows modifications.
     """
-    def visit( self, node ):
-        "Visit a node."
+
+    def visit(self, node):
+        """Visit a node."""
         method = 'visit_' + node.__class__.__name__
-        visitor = getattr( self, method, self.generic_visit )
-        return visitor( node )
+        visitor = getattr(self, method, self.generic_visit)
+        return visitor(node)
 
-    def generic_visit( self, node ):
-        "Called if no explicit visitor function exists for a node."
-        for field, value in iter_fields( node ):
-            if isinstance( value, list ):
+    def generic_visit(self, node):
+        """Called if no explicit visitor function exists for a node."""
+        for field, value in iter_fields(node):
+            if isinstance(value, list):
                 for item in value:
-                    if isinstance( item, AST ):
-                        self.visit( item )
-            elif isinstance( value, AST ):
-                self.visit( value )
+                    if isinstance(item, AST):
+                        self.visit(item)
+            elif isinstance(value, AST):
+                self.visit(value)
 
 
-class NodeTransformer( NodeVisitor ):
-
+class NodeTransformer(NodeVisitor):
     """
     A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
     allows modification of nodes.
@@ -265,48 +263,49 @@
     original node in which case no replacement takes place.
 
     Here is an example transformer that rewrites all occurrences of name lookups
-    ( ``foo`` ) to ``data[ 'foo' ]``::
+    (``foo``) to ``data['foo']``::
 
-       class RewriteName( NodeTransformer ):
+       class RewriteName(NodeTransformer):
 
-           def visit_Name( self, node ):
-               return copy_location( Subscript(
-                   value=Name( id='data', ctx=Load() ),
-                   slice=Index( value=Str( s=node.id ) ),
+           def visit_Name(self, node):
+               return copy_location(Subscript(
+                   value=Name(id='data', ctx=Load()),
+                   slice=Index(value=Str(s=node.id)),
                    ctx=node.ctx
-               ), node )
+               ), node)
 
     Keep in mind that if the node you're operating on has child nodes you must
     either transform the child nodes yourself or call the :meth:`generic_visit`
     method for the node first.
 
-    For nodes that were part of a collection of statements ( that applies to all
-    statement nodes ), the visitor may also return a list of nodes rather than
+    For nodes that were part of a collection of statements (that applies to all
+    statement nodes), the visitor may also return a list of nodes rather than
     just a single node.
 
     Usually you use the transformer like this::
 
-       node = YourTransformer().visit( node )
+       node = YourTransformer().visit(node)
     """
-    def generic_visit( self, node ):
-        for field, old_value in iter_fields( node ):
-            old_value = getattr( node, field, None )
-            if isinstance( old_value, list ):
+
+    def generic_visit(self, node):
+        for field, old_value in iter_fields(node):
+            old_value = getattr(node, field, None)
+            if isinstance(old_value, list):
                 new_values = []
                 for value in old_value:
-                    if isinstance( value, AST ):
-                        value = self.visit( value )
+                    if isinstance(value, AST):
+                        value = self.visit(value)
                         if value is None:
                             continue
-                        elif not isinstance( value, AST ):
-                            new_values.extend( value )
+                        elif not isinstance(value, AST):
+                            new_values.extend(value)
                             continue
-                    new_values.append( value )
-                old_value[ : ] = new_values
-            elif isinstance( old_value, AST ):
-                new_node = self.visit( old_value )
+                    new_values.append(value)
+                old_value[:] = new_values
+            elif isinstance(old_value, AST):
+                new_node = self.visit(old_value)
                 if new_node is None:
-                    delattr( node, field )
+                    delattr(node, field)
                 else:
-                    setattr( node, field, new_node )
+                    setattr(node, field, new_node)
         return node
diff --git a/TestON/core/graph.py b/TestON/core/graph.py
index b5bf9f6..b1cbdb2 100644
--- a/TestON/core/graph.py
+++ b/TestON/core/graph.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 
-"""
-Copyright 2016 Open Networking Foundation ( ONF )
+'''
+Copyright 2016 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -10,7 +10,7 @@
     TestON is free software: you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
     the Free Software Foundation, either version 2 of the License, or
-    ( at your option ) any later version.
+    (at your option) any later version.
 
     TestON is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -20,28 +20,29 @@
     You should have received a copy of the GNU General Public License
     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
 
-"""
+'''
+
+
 import time
 import random
 
-
 class Graph:
-
     """
     Graph class provides implementations of graph algorithms.
     The functions currently supported include:
     - Comparing two graphs with specified attributes for vertices and edges
-    - Getting DFI ( Depth First Index ) and back edges during a DFS
+    - Getting DFI (Depth First Index) and back edges during a DFS
     - Chain decomposition of a graph
-    - Finding ( non- )cut-edges and vertices
+    - Finding (non-)cut-edges and vertices
     """
+
     def __init__( self ):
         # We use a dictionary to store all information about the graph
         self.graphDict = {}
         # Depth-first index of each vertex
         self.DFI = {}
         self.currentDFI = 0
-        # Parent vertex ( and edge to that vertex ) of each vertex in depth-first search tree
+        # Parent vertex (and edge to that vertex) of each vertex in depth-first search tree
         self.parentVertexInDFS = {}
         self.parentEdgeInDFS = {}
         # Back edges of the graph generated during DFS
@@ -59,7 +60,7 @@
           vertex2: { 'edges': ..., 'name': ..., 'protocol': ... } }
         Each vertex should at least have an 'edges' attribute which describes the
         adjacency information. The value of 'edges' attribute is also represented by
-        a dictionary, which maps each edge ( identified by the neighbor vertex ) to a
+        a dictionary, which maps each edge (identified by the neighbor vertex) to a
         list of attributes.
         An example of the edges dictionary:
         'edges': { vertex2: { 'port': ..., 'type': ... },
@@ -68,7 +69,7 @@
         self.graphDict = graphDict
         return main.TRUE
 
-    def compareGraphs( self, graphDictA, graphDictB, vertexAttributes=[ 'edges' ], edgeAttributes=[ 'port' ] ):
+    def compareGraphs( self, graphDictA, graphDictB, vertexAttributes=['edges'], edgeAttributes=['port'] ):
         """
         Compare two graphs.
         By default only the adjacency relationship, i.e. 'port' attribute in
@@ -102,7 +103,7 @@
                         # Compare two attributes
                         attributeValueA = graphDictA[ vertex ][ vertexAttribute ]
                         attributeValueB = graphDictB[ vertex ][ vertexAttribute ]
-                        # FIXME: the comparison may not work for ( sub )attribute values that are of list type
+                        # FIXME: the comparison may not work for (sub)attribute values that are of list type
                         # For attributes except for 'edges', we just rely on '==' for comparison
                         if not vertexAttribute == 'edges':
                             if not attributeValueA == attributeValueB:
@@ -149,8 +150,8 @@
                                                                                                                                    attributeValueA,
                                                                                                                                    attributeValueB ) )
             if not result:
-                # main.log.debug( "Graph: graphDictA: {}".format( graphDictA ) )
-                # main.log.debug( "Graph: graphDictB: {}".format( graphDictB ) )
+                #main.log.debug( "Graph: graphDictA: {}".format( graphDictA ) )
+                #main.log.debug( "Graph: graphDictB: {}".format( graphDictB ) )
                 pass
             return result
         except TypeError:
@@ -165,8 +166,8 @@
 
     def getNonCutEdges( self ):
         """
-        Get a list of non-cut-edges ( non-bridges ).
-        The definition of a cut-edge ( bridge ) is: the deletion of a cut-edge will
+        Get a list of non-cut-edges (non-bridges).
+        The definition of a cut-edge (bridge) is: the deletion of a cut-edge will
         increase the number of connected component of a graph.
         The function is realized by impelementing Schmidt's algorithm based on
         chain decomposition.
@@ -182,7 +183,7 @@
             for chain in self.chains:
                 for edge in chain:
                     nonCutEdges.append( edge )
-            # main.log.debug( 'Non-cut-edges: {}'.format( nonCutEdges ) )
+            #main.log.debug( 'Non-cut-edges: {}'.format( nonCutEdges ) )
             return nonCutEdges
         except Exception:
             main.log.exception( "Graph: Uncaught exception" )
@@ -206,16 +207,16 @@
                 # chain, the chain is a cycle chain
                 if chain[ 0 ][ 0 ] == chain[ -1 ][ 1 ]:
                     cycleChains.append( chain )
-            # main.log.debug( 'Cycle chains: {}'.format( cycleChains ) )
+            #main.log.debug( 'Cycle chains: {}'.format( cycleChains ) )
             # Get a set of vertices which are the first vertices of a cycle chain (excluding the first
             # cycle chain), and these vertices are a subset of all cut-vertices
             subsetOfCutVertices = []
             if len( cycleChains ) > 1:
                 for cycleChain in cycleChains[ 1: ]:
                     subsetOfCutVertices.append( cycleChain[ 0 ][ 0 ] )
-            # main.log.debug( 'Subset of cut vertices: {}'.format( subsetOfCutVertices ) )
+            #main.log.debug( 'Subset of cut vertices: {}'.format( subsetOfCutVertices ) )
             nonCutVertices = []
-            assert nonCutEdges is not None
+            assert nonCutEdges != None
             for vertex in self.graphDict.keys():
                 if vertex in subsetOfCutVertices:
                     continue
@@ -223,12 +224,12 @@
                 for neighbor in self.graphDict[ vertex ][ 'edges' ].keys():
                     edge = [ vertex, neighbor ]
                     backwardEdge = [ neighbor, vertex ]
-                    if edge not in nonCutEdges and backwardEdge not in nonCutEdges:
+                    if not edge in nonCutEdges and not backwardEdge in nonCutEdges:
                         vertexIsNonCut = False
                         break
                 if vertexIsNonCut:
                     nonCutVertices.append( vertex )
-            # main.log.debug( 'Non-cut-vertices: {}'.format( nonCutVertices ) )
+            #main.log.debug( 'Non-cut-vertices: {}'.format( nonCutVertices ) )
             return nonCutVertices
         except KeyError:
             main.log.exception( "Graph: KeyError exception found" )
@@ -246,7 +247,7 @@
         as generates the back edges
         """
         try:
-            assert self.graphDict is not None and len( self.graphDict ) != 0
+            assert self.graphDict != None and len( self.graphDict ) != 0
             for vertex in self.graphDict.keys():
                 self.DFI[ vertex ] = -1
                 self.parentVertexInDFS[ vertex ] = ''
@@ -287,14 +288,14 @@
                 else:
                     key = self.DFI[ neighbor ]
                     if key in self.backEdges.keys():
-                        if edge not in self.backEdges[ key ] and\
-                                backwardEdge not in self.backEdges[ key ]:
+                        if not edge in self.backEdges[ key ] and\
+                        not backwardEdge in self.backEdges[ key ]:
                             self.backEdges[ key ].append( backwardEdge )
                     else:
                         tempKey = self.DFI[ vertex ]
                         if tempKey in self.backEdges.keys():
-                            if edge not in self.backEdges[ tempKey ] and\
-                                    backwardEdge not in self.backEdges[ tempKey ]:
+                            if not edge in self.backEdges[ tempKey ] and\
+                            not backwardEdge in self.backEdges[ tempKey ]:
                                 self.backEdges[ key ] = [ backwardEdge ]
                         else:
                             self.backEdges[ key ] = [ backwardEdge ]
@@ -310,7 +311,8 @@
         """
         This function finds all the chains in chain-decomposition algorithm
         """
-        keyList = sorted( self.backEdges.keys() )
+        keyList = self.backEdges.keys()
+        keyList.sort()
         vertexIsVisited = {}
         self.chains = []
         for vertex in self.graphDict.keys():
@@ -327,7 +329,7 @@
                         nextVertex = currentEdge[ 1 ]
                         vertexIsVisited[ currentVertex ] = True
                         chain.append( currentEdge )
-                        if nextVertex == sourceVertex or vertexIsVisited[ nextVertex ]:
+                        if nextVertex == sourceVertex or vertexIsVisited[ nextVertex ] == True:
                             break
                         currentEdge = self.parentEdgeInDFS[ nextVertex ]
                     self.chains.append( chain )
diff --git a/TestON/core/logger.py b/TestON/core/logger.py
index 559f84d..ed9b0bd 100644
--- a/TestON/core/logger.py
+++ b/TestON/core/logger.py
@@ -1,5 +1,5 @@
-# /usr/bin/env python
-"""
+#/usr/bin/env python
+'''
 Created on 07-Jan-2013
 Modified 2015 by ON.Lab
 
@@ -10,7 +10,7 @@
     TestON is free software: you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
     the Free Software Foundation, either version 2 of the License, or
-    ( at your option ) any later version.
+    (at your option) any later version.
 
     TestON is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,107 +21,105 @@
     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
 
 
-"""
+'''
+
 import logging
 import datetime
 import re
 import os
-
-
 class Logger:
-
-    """
+    '''
         Add continuous logs and reports of the test.
 
-        author:: Raghav Kashyap( raghavkashyap@paxterrasolutions.com )
-    """
-    def _printHeader( self, main ):
-        """
+        @author: Raghav Kashyap(raghavkashyap@paxterrasolutions.com)
+    '''
+    def _printHeader(self,main) :
+        '''
             Log's header will be append to the Log file
-        """
-        logmsg = "\n" + " " * 32 + "+----------------+\n" + "-" * 30 + " { Script And Files }  " + "-" * 30 + "\n" + " " * 32 + "+----------------+\n"
+        '''
+        logmsg = "\n"+" " * 32+"+----------------+\n" +"-" * 30+" { Script And Files }  "+"-" * 30+"\n" +" " * 32+"+----------------+\n";
         logmsg = logmsg + "\n\tScript Log File : " + main.LogFileName + ""
         logmsg = logmsg + "\n\tReport Log File : " + main.ReportFileName + ""
         for component in main.componentDictionary.keys():
-            logmsg = logmsg + "\n\t" + component + " Session Log : " + main.logdir + "/" + component + ".session" + ""
+            logmsg = logmsg + "\n\t"+component+" Session Log : " + main.logdir+"/"+component+".session" + ""
 
-        logmsg = logmsg + "\n\tTest Script :" + path + "Tests/" + main.TEST + ".py" + ""
+        logmsg = logmsg + "\n\tTest Script :" + path + "Tests/" + main.TEST + ".py"+ ""
         logmsg = logmsg + "\n\tTest Params : " + path + "Tests/" + main.TEST + ".params" + ""
-        logmsg = logmsg + "\n\tTopology : " + path + "Tests/" + main.TEST + ".topo" + ""
-        logmsg = logmsg + "\n" + " " * 30 + "+" + "-" * 18 + "+" + "\n" + "-" * 27 + "  { Script Exec Params }  " + "-" * 27 + "\n" + " " * 30 + "+" + "-" * 18 + "+\n"
-        values = "\n\t" + str( main.params )
-        values = re.sub( ",", "\n\t", values )
-        values = re.sub( "{", "\n\t", values )
-        values = re.sub( "}", "\n\t", values )
+        logmsg = logmsg + "\n\tTopology : " + path + "Tests/" +main.TEST + ".topo" + ""
+        logmsg = logmsg + "\n"+" " * 30+"+" +"-" * 18+"+" +"\n" +"-" * 27+"  { Script Exec Params }  "+"-" * 27 +"\n" +" " * 30 +"+"+"-" * 18 +"+\n";
+        values = "\n\t" + str(main.params)
+        values = re.sub(",", "\n\t", values)
+        values = re.sub("{", "\n\t", values)
+        values = re.sub("}", "\n\t", values)
         logmsg = logmsg + values
-        logmsg = logmsg + "\n\n" + " " * 31 + "+---------------+\n" + "-" * 29 + " { Components Used }  " + "-" * 29 + "\n" + " " * 31 + "+---------------+\n"
+        logmsg = logmsg + "\n\n"+" " * 31+"+---------------+\n" +"-" * 29+" { Components Used }  " +"-" * 29+"\n"+" " * 31+"+---------------+\n"
         component_list = []
-        component_list.append( None )
+        component_list.append(None)
 
         # Listing the components in the order of test_target component should be first.
-        if isinstance( main.componentDictionary, dict ):
+        if type(main.componentDictionary) == dict:
             for key in main.componentDictionary.keys():
-                if main.test_target == key:
-                    component_list[ 0 ] = key + "-Test Target"
-                else:
-                    component_list.append( key )
+                if main.test_target == key :
+                    component_list[0] = key+"-Test Target"
+                else :
+                    component_list.append(key)
 
-        for index in range( len( component_list ) ):
-            if index == 0:
-                if component_list[ index ]:
-                    logmsg += "\t" + component_list[ index ] + "\n"
-            elif index > 0:
-                logmsg += "\t" + str( component_list[ index ] ) + "\n"
+        for index in range(len(component_list)) :
+            if index==0:
+                if component_list[index]:
+                    logmsg+="\t"+component_list[index]+"\n"
+            elif index > 0 :
+                logmsg+="\t"+str(component_list[index])+"\n"
 
-        logmsg = logmsg + "\n\n" + " " * 30 + "+--------+\n" + "-" * 28 + " { Topology }  " + "-" * 28 + "\n" + " " * 30 + "+--------+\n"
-        values = "\n\t" + str( main.topology[ 'COMPONENT' ] )
-        values = re.sub( ",", "\n\t", values )
-        values = re.sub( "{", "\n\t", values )
-        values = re.sub( "}", "\n\t", values )
+        logmsg = logmsg + "\n\n"+" " * 30+"+--------+\n" +"-" * 28+" { Topology }  "+"-" * 28 +"\n" +" " * 30+"+--------+\n"
+        values = "\n\t" + str(main.topology['COMPONENT'])
+        values = re.sub(",", "\n\t", values)
+        values = re.sub("{", "\n\t", values)
+        values = re.sub("}", "\n\t", values)
         logmsg = logmsg + values
-        logmsg = logmsg + "\n" + "-" * 60 + "\n"
+        logmsg = logmsg + "\n"+"-" * 60+"\n"
 
         # enter into log file all headers
-        logfile = open( main.LogFileName, "w+" )
-        logfile.write( logmsg )
+        logfile = open(main.LogFileName,"w+")
+        logfile.write (logmsg)
         print logmsg
         main.logHeader = logmsg
         logfile.close()
 
-        # enter into report file all headers
-        main.reportFile = open( main.ReportFileName, "w+" )
-        main.reportFile.write( logmsg )
+        #enter into report file all headers
+        main.reportFile = open(main.ReportFileName,"w+")
+        main.reportFile.write(logmsg)
         main.reportFile.close()
 
-        # Sumamry file header
-        currentTime = str( main.STARTTIME.strftime( "%d %b %Y %H:%M:%S" ) )
+        #Sumamry file header
+        currentTime = str( main.STARTTIME.strftime("%d %b %Y %H:%M:%S") )
         main.summaryFile = open( main.SummaryFileName, "w+" )
         main.summaryFile.write( main.TEST + " at " + currentTime + "\n" )
         main.summaryFile.close()
 
-        # wiki file header
-        currentTime = str( main.STARTTIME.strftime( "%d %b %Y %H:%M:%S" ) )
+        #wiki file header
+        currentTime = str( main.STARTTIME.strftime("%d %b %Y %H:%M:%S") )
         main.wikiFile = open( main.WikiFileName, "w+" )
         main.wikiFile.write( main.TEST + " at " + currentTime + "<p></p>\n" )
         main.wikiFile.close()
 
-    def initlog( self, main ):
-        """
+    def initlog(self,main):
+        '''
             Initialise all the log handles.
-        """
+        '''
         main._getTest()
         main.STARTTIME = datetime.datetime.now()
 
-        currentTime = re.sub( "-|\s|:|\.", "_", str( main.STARTTIME.strftime( "%d %b %Y %H:%M:%S" ) ) )
+        currentTime = re.sub("-|\s|:|\.", "_", str(main.STARTTIME.strftime("%d %b %Y %H:%M:%S")))
         if main.logdir:
-            main.logdir = main.logdir + "/" + main.TEST + "_" + currentTime
+            main.logdir = main.logdir+ "/"+main.TEST + "_" + currentTime
         else:
             main.logdir = main.logs_path + main.TEST + "_" + currentTime
 
-        os.mkdir( main.logdir )
+        os.mkdir(main.logdir)
 
-        main.LogFileName = main.logdir + "/" + main.TEST + "_" + str( currentTime ) + ".log"
-        main.ReportFileName = main.logdir + "/" + main.TEST + "_" + str( currentTime ) + ".rpt"
+        main.LogFileName = main.logdir + "/" + main.TEST + "_" +str(currentTime) + ".log"
+        main.ReportFileName = main.logdir + "/" + main.TEST + "_" + str(currentTime) + ".rpt"
         main.WikiFileName = main.logdir + "/" + main.TEST + "Wiki.txt"
         main.SummaryFileName = main.logdir + "/" + main.TEST + "Summary.txt"
         main.JenkinsCSV = main.logdir + "/" + main.TEST + ".csv"
@@ -129,128 +127,126 @@
 
         main.TOTAL_TC_SUCCESS = 0
 
-        # Add log-level - Report
-        logging.addLevelName( 9, "REPORT" )
-        logging.addLevelName( 7, "EXACT" )
-        logging.addLevelName( 11, "CASE" )
-        logging.addLevelName( 12, "STEP" )
-        main.log = logging.getLogger( main.TEST )
-
-        def report( msg ):
-            """
+        #### Add log-level - Report
+        logging.addLevelName(9, "REPORT")
+        logging.addLevelName(7, "EXACT")
+        logging.addLevelName(11, "CASE")
+        logging.addLevelName(12, "STEP")
+        main.log = logging.getLogger(main.TEST)
+        def report(msg):
+            '''
                 Will append the report message to the logs.
-            """
-            main.log._log( 9, msg, "OpenFlowAutoMattion", "OFAutoMation" )
+            '''
+            main.log._log(9,msg,"OpenFlowAutoMattion","OFAutoMation")
             currentTime = datetime.datetime.now()
-            currentTime = currentTime.strftime( "%d %b %Y %H:%M:%S" )
-            newmsg = "\n[REPORT] " + "[" + str( currentTime ) + "] " + msg
+            currentTime = currentTime.strftime("%d %b %Y %H:%M:%S")
+            newmsg = "\n[REPORT] " +"["+ str(currentTime)+"] "+msg
             print newmsg
-            main.reportFile = open( main.ReportFileName, "a+" )
-            main.reportFile.write( newmsg )
+            main.reportFile = open(main.ReportFileName,"a+")
+            main.reportFile.write(newmsg)
             main.reportFile.close()
 
         main.log.report = report
 
         def summary( msg ):
-            """
+            '''
                 Will append the message to the txt file for the summary.
-            """
-            main.log._log( 6, msg, "OpenFlowAutoMattion", "OFAutoMation" )
-            main.summaryFile = open( main.SummaryFileName, "a+" )
-            main.summaryFile.write( msg + "\n" )
+            '''
+            main.log._log(6,msg,"OpenFlowAutoMattion","OFAutoMation")
+            main.summaryFile = open(main.SummaryFileName,"a+")
+            main.summaryFile.write(msg+"\n")
             main.summaryFile.close()
 
         main.log.summary = summary
 
         def wiki( msg ):
-            """
+            '''
                 Will append the message to the txt file for the wiki.
-            """
-            main.log._log( 6, msg, "OpenFlowAutoMattion", "OFAutoMation" )
-            main.wikiFile = open( main.WikiFileName, "a+" )
-            main.wikiFile.write( msg + "\n" )
+            '''
+            main.log._log(6,msg,"OpenFlowAutoMattion","OFAutoMation")
+            main.wikiFile = open(main.WikiFileName,"a+")
+            main.wikiFile.write(msg+"\n")
             main.wikiFile.close()
 
         main.log.wiki = wiki
 
-        def exact( exmsg ):
-            """
+        def exact(exmsg):
+            '''
                Will append the raw formatted message to the logs
-            """
-            main.log._log( 7, exmsg, "OpenFlowAutoMattion", "OFAutoMation" )
-            main.reportFile = open( main.ReportFileName, "a+" )
-            main.reportFile.write( exmsg )
+            '''
+            main.log._log(7,exmsg,"OpenFlowAutoMattion","OFAutoMation")
+            main.reportFile = open(main.ReportFileName,"a+")
+            main.reportFile.write(exmsg)
             main.reportFile.close()
-            logfile = open( main.LogFileName, "a" )
-            logfile.write( "\n" + str( exmsg ) + "\n" )
+            logfile = open(main.LogFileName,"a")
+            logfile.write("\n"+ str(exmsg) +"\n")
             logfile.close()
             print exmsg
 
         main.log.exact = exact
 
-        def case( msg ):
-            """
+        def case(msg):
+            '''
                Format of the case type log defined here.
-            """
-            main.log._log( 9, msg, "OpenFlowAutoMattion", "OFAutoMation" )
+            '''
+            main.log._log(9,msg,"OpenFlowAutoMattion","OFAutoMation")
             currentTime = datetime.datetime.now()
-            newmsg = "[" + str( currentTime ) + "] " + "[" + main.TEST + "] " + "[CASE] " + msg
-            logfile = open( main.LogFileName, "a" )
-            logfile.write( "\n" + str( newmsg ) + "\n" )
+            newmsg = "["+str(currentTime)+"] " + "["+main.TEST+"] " + "[CASE] " +msg
+            logfile = open(main.LogFileName,"a")
+            logfile.write("\n"+ str(newmsg) +"\n")
             logfile.close()
             print newmsg
 
         main.log.case = case
 
-        def step( msg ):
-            """
+        def step(msg):
+            '''
                 Format of the step type log defined here.
-            """
-            main.log._log( 9, msg, "OpenFlowAutoMattion", "OFAutoMation" )
+            '''
+            main.log._log(9,msg,"OpenFlowAutoMattion","OFAutoMation")
             currentTime = datetime.datetime.now()
-            newmsg = "[" + str( currentTime ) + "] " + "[" + main.TEST + "] " + "[STEP] " + msg
-            logfile = open( main.LogFileName, "a" )
-            logfile.write( "\n" + str( newmsg ) + "\n" )
+            newmsg = "["+str(currentTime)+"] " + "["+main.TEST+"] " + "[STEP] " +msg
+            logfile = open(main.LogFileName,"a")
+            logfile.write("\n"+ str(newmsg) +"\n")
             logfile.close()
             print newmsg
 
         main.log.step = step
 
-        main.LogFileHandler = logging.FileHandler( main.LogFileName )
-        self._printHeader( main )
+        main.LogFileHandler = logging.FileHandler(main.LogFileName)
+        self._printHeader(main)
 
-        # initializing logging module and setting log level
-        main.log.setLevel( logging.INFO )
-        main.log.setLevel( logging.DEBUG )  # Temporary
-        main.LogFileHandler.setLevel( logging.INFO )
+        ### initializing logging module and settig log level
+        main.log.setLevel(logging.INFO)
+        main.log.setLevel(logging.DEBUG) # Temporary
+        main.LogFileHandler.setLevel(logging.INFO)
 
         # create console handler with a higher log level
         main.ConsoleHandler = logging.StreamHandler()
-        main.ConsoleHandler.setLevel( logging.INFO )
-        main.ConsoleHandler.setLevel( logging.DEBUG )  # Temporary
+        main.ConsoleHandler.setLevel(logging.INFO)
+        main.ConsoleHandler.setLevel(logging.DEBUG) #Temporary
         # create formatter and add it to the handlers
-        # formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s' )
-
+        #formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
         class MyFormatter( logging.Formatter ):
             colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
                        'blue': '\033[94m', 'green': '\033[92m',
                        'yellow': '\033[93m', 'red': '\033[91m',
                        'end': '\033[0m' }
 
-            FORMATS = { 'DEFAULT': '%(asctime)s - %(name)s - %(levelname)s - %(message)s' }
+            FORMATS = {'DEFAULT': '%(asctime)s - %(name)s - %(levelname)s - %(message)s'}
             if COLORS:  # NOTE:colors will only be loaded if command is run from one line
                         #      IE:   './cli.py run testname'
                         #      This is to prevent issues with Jenkins parsing
                         # TODO: Make colors configurable
-                levels = { logging.ERROR: colors[ 'red' ] +
-                                           FORMATS[ 'DEFAULT' ] +
-                                           colors[ 'end' ],
-                           logging.WARN: colors[ 'yellow' ] +
-                                          FORMATS[ 'DEFAULT' ] +
-                                          colors[ 'end' ],
-                           logging.DEBUG: colors[ 'purple' ] +
-                                          FORMATS[ 'DEFAULT' ] +
-                                          colors[ 'end' ] }
+                levels = { logging.ERROR : colors['red'] +
+                                           FORMATS['DEFAULT'] +
+                                           colors['end'],
+                           logging.WARN : colors['yellow'] +
+                                          FORMATS['DEFAULT'] +
+                                          colors['end'],
+                           logging.DEBUG : colors['purple'] +
+                                          FORMATS['DEFAULT'] +
+                                          colors['end'] }
                 FORMATS.update( levels )
 
             def format( self, record ):
@@ -258,87 +254,89 @@
                                               self.FORMATS[ 'DEFAULT' ] )
                 return logging.Formatter.format( self, record )
         formatter = MyFormatter()
-        main.ConsoleHandler.setFormatter( formatter )
-        main.LogFileHandler.setFormatter( formatter )
+        main.ConsoleHandler.setFormatter(formatter)
+        main.LogFileHandler.setFormatter(formatter)
 
         # add the handlers to logger
-        main.log.addHandler( main.ConsoleHandler )
-        main.log.addHandler( main.LogFileHandler )
+        main.log.addHandler(main.ConsoleHandler)
+        main.log.addHandler(main.LogFileHandler)
 
-    def testSummary( self, main ):
-        """
+    def testSummary(self,main):
+        '''
             testSummary will take care about the Summary of test.
-        """
+        '''
+
         main.ENDTIME = datetime.datetime.now()
         main.EXECTIME = main.ENDTIME - main.STARTTIME
-        if ( main.TOTAL_TC_PASS == 0 ):
+        if (main.TOTAL_TC_PASS == 0):
             main.TOTAL_TC_SUCCESS = 0
         else:
-            main.TOTAL_TC_SUCCESS = str( ( main.TOTAL_TC_PASS * 100 ) / main.TOTAL_TC_RUN )
-        if ( main.TOTAL_TC_RUN == 0 ):
+            main.TOTAL_TC_SUCCESS = str((main.TOTAL_TC_PASS*100)/main.TOTAL_TC_RUN)
+        if (main.TOTAL_TC_RUN == 0) :
             main.TOTAL_TC_EXECPERCENT = 0
-        else:
-            main.TOTAL_TC_EXECPERCENT = str( ( main.TOTAL_TC_RUN * 100 ) / main.TOTAL_TC_PLANNED )
-        testResult = "\n\n" + "*" * 37 + "\n" + "\tTest Execution Summary\n" + "\n" + "*" * 37 + " \n"
-        testResult = testResult + "\n Test Start           : " + str( main.STARTTIME.strftime( "%d %b %Y %H:%M:%S" ) )
-        testResult = testResult + "\n Test End             : " + str( main.ENDTIME.strftime( "%d %b %Y %H:%M:%S" ) )
-        testResult = testResult + "\n Execution Time       : " + str( main.EXECTIME )
-        testResult = testResult + "\n Total tests planned  : " + str( main.TOTAL_TC_PLANNED )
-        testResult = testResult + "\n Total tests RUN      : " + str( main.TOTAL_TC_RUN )
-        testResult = testResult + "\n Total Pass           : " + str( main.TOTAL_TC_PASS )
-        testResult = testResult + "\n Total Fail           : " + str( main.TOTAL_TC_FAIL )
-        testResult = testResult + "\n Total No Result      : " + str( main.TOTAL_TC_NORESULT )
-        testResult = testResult + "\n Success Percentage   : " + str( main.TOTAL_TC_SUCCESS ) + "%"
-        testResult = testResult + "\n Execution Result     : " + str( main.TOTAL_TC_EXECPERCENT ) + "%\n"
+        else :
+            main.TOTAL_TC_EXECPERCENT = str((main.TOTAL_TC_RUN*100)/main.TOTAL_TC_PLANNED)
+        testResult = "\n\n"+"*" * 37+"\n" + "\tTest Execution Summary\n" + "\n"+"*" * 37+" \n"
+        testResult =  testResult + "\n Test Start           : " + str(main.STARTTIME.strftime("%d %b %Y %H:%M:%S"))
+        testResult =  testResult + "\n Test End             : " + str(main.ENDTIME.strftime("%d %b %Y %H:%M:%S"))
+        testResult =  testResult + "\n Execution Time       : " + str(main.EXECTIME)
+        testResult =  testResult + "\n Total tests planned  : " + str(main.TOTAL_TC_PLANNED)
+        testResult =  testResult + "\n Total tests RUN      : " + str(main.TOTAL_TC_RUN)
+        testResult =  testResult + "\n Total Pass           : " + str(main.TOTAL_TC_PASS)
+        testResult =  testResult + "\n Total Fail           : " + str(main.TOTAL_TC_FAIL)
+        testResult =  testResult + "\n Total No Result      : " + str(main.TOTAL_TC_NORESULT)
+        testResult =  testResult + "\n Success Percentage   : " + str(main.TOTAL_TC_SUCCESS) + "%"
+        testResult =  testResult + "\n Execution Result     : " + str(main.TOTAL_TC_EXECPERCENT) + "%\n"
         if main.failedCase:
-            testResult = testResult + "\n Case Failed          : " + str( main.failedCase )
+            testResult =  testResult + "\n Case Failed          : " + str( main.failedCase )
         if main.noResultCase:
-            testResult = testResult + "\n Case NoResult        : " + str( main.noResultCase )
-        testResult = testResult + "\n Case Executed        : " + str( main.executedCase )
-        testResult = testResult + "\n Case Not Executed    : " + str( main.leftCase )
-        # main.log.report( testResult )
+            testResult =  testResult + "\n Case NoResult        : " + str( main.noResultCase )
+        testResult =  testResult + "\n Case Executed        : " + str( main.executedCase )
+        testResult =  testResult + "\n Case Not Executed    : " + str( main.leftCase )
+        #main.log.report(testResult)
         main.testResult = testResult
-        main.log.exact( testResult )
+        main.log.exact(testResult)
 
-        # CSV output needed for Jenkins plot plugin
-        # NOTE: the elements were ordered based on the colors assigned to the data
-        logfile = open( main.JenkinsCSV, "w" )
-        logfile.write( ",".join( [ 'Tests Failed', 'Tests Passed', 'Tests Planned' ] ) + "\n" )
-        logfile.write( ",".join( [ str( int( main.TOTAL_TC_FAIL ) ), str( int( main.TOTAL_TC_PASS ) ), str( int( main.TOTAL_TC_PLANNED ) ) ] ) + "\n" )
+        ##CSV output needed for Jenkin's plot plugin
+        #NOTE: the elements were orded based on the colors assigned to the data
+        logfile = open(main.JenkinsCSV ,"w")
+        logfile.write(",".join( ['Tests Failed', 'Tests Passed', 'Tests Planned'] ) + "\n")
+        logfile.write(",".join( [str(int(main.TOTAL_TC_FAIL)), str(int(main.TOTAL_TC_PASS)), str(int(main.TOTAL_TC_PLANNED))] ) + "\n")
         logfile.close()
 
-        executedStatus = open( main.resultFile, "w" )
+        executedStatus = open(main.resultFile, "w")
         if main.TOTAL_TC_FAIL == 0 and main.TOTAL_TC_NORESULT + main.TOTAL_TC_PASS == main.TOTAL_TC_PLANNED:
-            executedStatus.write( "1\n" )
+            executedStatus.write("1\n")
         else:
-            executedStatus.write( "0\n" )
+            executedStatus.write("0\n")
         executedStatus.close()
 
-    def updateCaseResults( self, main ):
-        """
+    def updateCaseResults(self,main):
+        '''
             Update the case result based on the steps execution and asserting each step in the test-case
-        """
-        case = str( main.CurrentTestCaseNumber )
-        currentResult = main.testCaseResult.get( case, 2 )
+        '''
+        case = str(main.CurrentTestCaseNumber)
+        currentResult = main.testCaseResult.get(case, 2)
 
         if currentResult == 2:
-            main.TOTAL_TC_RUN = main.TOTAL_TC_RUN + 1
+            main.TOTAL_TC_RUN  = main.TOTAL_TC_RUN + 1
             main.TOTAL_TC_NORESULT = main.TOTAL_TC_NORESULT + 1
-            main.log.exact( "\n " + "*" * 29 + "\n" + "\n Result: No Assertion Called \n" + "*" * 29 + "\n" )
-            line = "Case " + case + ": " + main.CurrentTestCase + " - No Result"
+            main.log.exact("\n "+"*" * 29+"\n" + "\n Result: No Assertion Called \n"+"*" * 29+"\n")
+            line = "Case "+case+": "+main.CurrentTestCase+" - No Result"
         elif currentResult == 1:
-            main.TOTAL_TC_RUN = main.TOTAL_TC_RUN + 1
-            main.TOTAL_TC_PASS = main.TOTAL_TC_PASS + 1
-            main.log.exact( "\n" + "*" * 29 + "\n Result: Pass \n" + "*" * 29 + "\n" )
-            line = "Case " + case + ": " + main.CurrentTestCase + " - PASS"
+            main.TOTAL_TC_RUN  = main.TOTAL_TC_RUN  + 1
+            main.TOTAL_TC_PASS =  main.TOTAL_TC_PASS + 1
+            main.log.exact("\n"+"*" * 29+"\n Result: Pass \n"+"*" * 29+"\n")
+            line = "Case "+case+": "+main.CurrentTestCase+" - PASS"
         elif currentResult == 0:
-            main.TOTAL_TC_RUN = main.TOTAL_TC_RUN + 1
+            main.TOTAL_TC_RUN  = main.TOTAL_TC_RUN  + 1
             main.TOTAL_TC_FAIL = main.TOTAL_TC_FAIL + 1
-            main.log.exact( "\n" + "*" * 29 + "\n Result: Failed \n" + "*" * 29 + "\n" )
-            line = "Case " + case + ": " + main.CurrentTestCase + " - FAIL"
+            main.log.exact("\n"+"*" * 29+"\n Result: Failed \n"+"*" * 29+"\n")
+            line = "Case "+case+": "+main.CurrentTestCase+" - FAIL"
         else:
             main.log.error( " Unknown result of case " + case +
                             ". Result was: " + currentResult )
-            line = "Case " + case + ": " + main.CurrentTestCase + " - ERROR"
+            line = "Case "+case+": "+main.CurrentTestCase+" - ERROR"
         main.log.wiki( "<h3>" + line + "</h3>" )
         main.log.summary( line )
+
diff --git a/TestON/core/openspeak.py b/TestON/core/openspeak.py
index dfcfe42..b98c68b 100644
--- a/TestON/core/openspeak.py
+++ b/TestON/core/openspeak.py
@@ -1,5 +1,5 @@
-# /usr/bin/env python
-"""
+#/usr/bin/env python
+'''
 Created on 20-Dec-2012
 Modified 2015 by ON.Lab
 
@@ -10,7 +10,7 @@
     TestON is free software: you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
     the Free Software Foundation, either version 2 of the License, or
-    ( at your option ) any later version.
+    (at your option) any later version.
 
     TestON is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,772 +21,797 @@
     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
 
 
-"""
+'''
 import re
 import inspect
 
 
 class OpenSpeak:
 
-    def __init__( self ):
+    def __init__(self):
         self.default = ''
         self.flag = 0
         self.CurrentStep = 0
         self.grtrOrLssr = 0
 
-    def compiler( self, **compileParameters ):
-        """
+    def compiler(self,**compileParameters):
+        '''
          This method will parse the openspeak file and will write to a python module with the equivalent translations.
          It can accept OpenSpeak syntax in string or an OpenSpeak file as an input parameter.
          Translated form can be written into python module if attribute "WRITETOFILE" is set to 1.
-        """
-        args = self.parse_args( [ "OPENSPEAKFILE", "TEXT", "WRITETOFILE", "FILEHANDLE" ], **compileParameters )
+        '''
+
+        args = self.parse_args(["OPENSPEAKFILE","TEXT","WRITETOFILE","FILEHANDLE"],**compileParameters)
         resultString = ''
         Test = "Mininet"
-        args[ "WRITETOFILE" ] = args[ "WRITETOFILE" ] if args[ "WRITETOFILE" ] is not None else 1
+        args["WRITETOFILE"] = args["WRITETOFILE"] if args["WRITETOFILE"] != None else 1
         self.CurrentStep = 0
         self.CurrentCase = ''
 
-        # here Open Speak file will be parsed by each line and translated.
-        if args[ "OPENSPEAKFILE" ] is not None and args[ "TEXT" ] is None and args[ "FILEHANDLE" ] is None:
-            self.openspeakfile = args[ "OPENSPEAKFILE" ]
-            openSpeakFile = open( args[ "OPENSPEAKFILE" ], "r" ).readlines()
+        ## here Open Speak file will be parsed by each line and translated.
+        if args["OPENSPEAKFILE"] !=None and args["TEXT"] ==None and args["FILEHANDLE"] == None:
+            self.openspeakfile = args["OPENSPEAKFILE"]
+            openSpeakFile = open(args["OPENSPEAKFILE"],"r").readlines()
 
-        elif args[ "OPENSPEAKFILE" ] is None and args[ "TEXT" ] and args[ "FILEHANDLE" ] is None:
-            openSpeakFile = args[ "TEXT" ].split( "\n" )
-        elif args[ "FILEHANDLE" ] and args[ "OPENSPEAKFILE" ] is None and args[ "TEXT" ] is None:
-            openSpeakFile = args[ "FILEHANDLE" ].readlines()
+        elif args["OPENSPEAKFILE"] ==None and args["TEXT"] and args["FILEHANDLE"] == None:
+            openSpeakFile =  args["TEXT"].split("\n")
+        elif args["FILEHANDLE"] and args["OPENSPEAKFILE"] ==None and args["TEXT"] ==None:
+            openSpeakFile = args["FILEHANDLE"].readlines()
 
         index = 0
         outputFile = []
-        testName = re.search( "\/(.*)\.ospk$", self.openspeakfile, 0 )
-        testName = testName.group( 1 )
-        testName = testName.split( "/" )
-        testName = testName[ len( testName ) - 1 ]
-        outputFile.append( "\nclass " + testName + " :" + "\n" )
-        outputFile.append( "\n" + " " * 4 + "def __init__(self) :" )
-        outputFile.append( "\n" + " " * 8 + "self.default = \'\'" + "\n" )
+        testName = re.search("\/(.*)\.ospk$",self.openspeakfile,0)
+        testName = testName.group(1)
+        testName = testName.split("/")
+        testName = testName[len(testName)-1]
+        outputFile.append("\nclass " + testName + " :" + "\n")
+        outputFile.append("\n" + " " * 4 + "def __init__(self) :")
+        outputFile.append("\n" + " " * 8 + "self.default = \'\'" + "\n")
 
-        while index < len( openSpeakFile ):
-            ifelseMatch = re.match( "\s+IF|\s+ELSE|\s+ELIF", openSpeakFile[ index ], flags=0 )
-            line = openSpeakFile[ index ]
-            repeatMatch = re.match( "\s*REPEAT", openSpeakFile[ index ], flags=0 )
-            if ifelseMatch:
-                result = self.verify_and_translate( line )
-                initialSpaces = len( line ) - len( line.lstrip() )
+        while index < len(openSpeakFile):
+            ifelseMatch = re.match("\s+IF|\s+ELSE|\s+ELIF",openSpeakFile[index],flags=0)
+            line = openSpeakFile[index]
+            repeatMatch = re.match("\s*REPEAT", openSpeakFile[index], flags=0)
+            if ifelseMatch :
+                result =  self.verify_and_translate(line)
+                initialSpaces = len(line) -len(line.lstrip())
                 self.outLoopSpace = initialSpaces
-                nextLine = openSpeakFile[ index + 1 ]
-                nextinitialSpaces = len( nextLine ) - len( nextLine.lstrip() )
+                nextLine = openSpeakFile[index+1]
+                nextinitialSpaces = len(nextLine) -len(nextLine.lstrip())
 
-                while nextinitialSpaces > initialSpaces:
-                    try:
-                        elseMatch = re.match( "\s*ELSE|\s*ELIF", nextLine, flags=0 )
-                        if elseMatch:
-                            self.flag = self.flag - 1
-                        result = result + self.verify_and_translate( nextLine )
-                        nextLine = openSpeakFile[ index + 1 ]
-                        nextinitialSpaces = len( nextLine ) - len( nextLine.lstrip() )
+
+                while nextinitialSpaces > initialSpaces :
+                    try :
+                        elseMatch = re.match("\s*ELSE|\s*ELIF",nextLine,flags=0)
+                        if elseMatch :
+                            self.flag = self.flag -1
+                        result = result + self.verify_and_translate(nextLine)
+                        nextLine = openSpeakFile[index + 1]
+                        nextinitialSpaces = len(nextLine) -len(nextLine.lstrip())
                     except IndexError:
                         pass
                     index = index + 1
                 self.flag = 0
             elif repeatMatch:
                 self.flag = 0
-                result = self.verify_and_translate( line )
+                result =  self.verify_and_translate(line)
                 index = index + 1
-                endMatch = re.match( "\s*END", openSpeakFile[ index ], flags=0 )
-                while not endMatch:
-                    try:
+                endMatch = re.match("\s*END",openSpeakFile[index],flags=0)
+                while not endMatch :
+                    try :
 
                         self.flag = self.flag + 1
-                        result = result + self.verify_and_translate( openSpeakFile[ index ] )
+                        result =  result + self.verify_and_translate(openSpeakFile[index])
                         index = index + 1
 
-                    except IndexError:
+                    except IndexError :
                         pass
 
-            else:
-                self.flag = 0
-                result = self.verify_and_translate( line )
-                index = index + 1
-            outputFile.append( result )
 
-        if args[ "WRITETOFILE" ] == 1:
-            testscript = re.sub( "ospk", "py", self.openspeakfile, 0 )
-            testScript = open( testscript, "w" )
-            for lines in outputFile:
-                testScript.write( lines )
+            else :
+                self.flag = 0
+                result = self.verify_and_translate(line)
+                index = index + 1
+            outputFile.append(result)
+
+        if args["WRITETOFILE"] == 1 :
+            testscript = re.sub("ospk","py",self.openspeakfile,0)
+            testScript = open(testscript,"w")
+            for lines in outputFile :
+                testScript.write(lines)
             testScript.close()
         return resultString
 
-    def verify_and_translate( self, line ):
-        """
+    def verify_and_translate(self,line):
+        '''
           It will accept the each line and calls the suitable API to conver into pyton equivalent syntax .
           It will return the translated python syntax .
-        """
-        lineSpace = re.match( "^\s+", line, flags=0 )
-        initialSpaces = len( line ) - len( line.lstrip() )
-        line = re.sub( "^\s+", "", line ) if lineSpace else line
+        '''
+        lineSpace = re.match("^\s+",line,flags=0)
+        initialSpaces = len(line) -len(line.lstrip())
+        line = re.sub("^\s+","",line) if lineSpace else line
+
 
         resultString = None
-        resultString = "\n" + " " * 4 if str( inspect.stack()[ 1 ][ 3 ] ) == "compiler" else "\n"
-        indent = " " * ( 4 + 4 * self.flag ) if self.flag > 0 else " " * 4
-        caseMatch = re.search( "^CASE\s+(\d+)", line, flags=0 )
-        nameMatch = re.match( "^NAME\s+\"( .* )\"", line, flags=0 )
-        commentMatch = re.match( "^COMMENT\s+\"( .* )\"", line, flags=0 )
-        stepMatch = re.match( "^STEP\s+\"( .* )\"", line, flags=0 )
-        connectMatch = re.match( "^CONNECT\s+(\w+)\s+USING\s+(.*)", line, flags=0 )
-        disconnectMatch = re.match( "^DISCONNECT\s+(.*)", line, flags=0 )
-        ondoMatch = re.match( "^ON\s+(.*)\s+DO\s+(.*)", line, flags=0 )
+        resultString = "\n" + " " * 4 if str(inspect.stack()[1][3]) == "compiler" else "\n"
+        indent = " " *(4 + 4 * self.flag) if self.flag > 0 else " " * 4
+        caseMatch = re.search("^CASE\s+(\d+)",line,flags=0)
+        nameMatch = re.match("^NAME\s+\"(.*)\"",line,flags=0)
+        commentMatch = re.match("^COMMENT\s+\"(.*)\"",line,flags=0)
+        stepMatch = re.match("^STEP\s+\"(.*)\"",line,flags=0)
+        connectMatch = re.match("^CONNECT\s+(\w+)\s+USING\s+(.*)",line,flags=0)
+        disconnectMatch = re.match("^DISCONNECT\s+(.*)",line,flags=0)
+        ondoMatch = re.match("^ON\s+(.*)\s+DO\s+(.*)",line,flags=0)
 
-        storeMatch = re.match( "^STORE\s+(.*)\s+IN\s+(.*)", line, flags=0 )
-        variableMatch = re.match( "^(.*)\s+=\s+(.*)", line, flags=0 )
-        assertMatch = re.match( "^ASSERT\s+(\w+)\s+(.*)\s+(.*)\s+ONPASS\s+(.*)\s+ONFAIL\s+(.*)", line, flags=0 )
-        logMatch = re.match( "^(ERROR|INFO|DEBUG|CRITICAL|REPORT|EXACT|WARN)\s+(.*)", line, flags=0 )
-        ifloop = re.match( "IF\s+(\w+)\s*(..|\w+)\s*(.*)", line, flags=0 )
-        elseloopMatch = re.match( "ELSE\s*$", line, flags=0 )
-        elifloop = re.match( "ELSE\sIF\s+(\w+)\s*(..|\w+)\s*(.*)", line, flags=0 )
-        forloopMatch = re.match( "\s*REPEAT\s+(/d+)\s+TIMES", line, flags=0 )
-        experimentalMatch = re.match( "EXPERIMENTAL\s+MODE\s+(\w+)", line, flags=0 )
-        repeatMatch = re.match( "\s*REPEAT\s+(\d+)\s+TIMES", line, flags=0 )
+        storeMatch = re.match("^STORE\s+(.*)\s+IN\s+(.*)",line,flags=0)
+        variableMatch = re.match("^(.*)\s+=\s+(.*)",line,flags=0)
+        assertMatch = re.match("^ASSERT\s+(\w+)\s+(.*)\s+(.*)\s+ONPASS\s+(.*)\s+ONFAIL\s+(.*)",line,flags=0)
+        logMatch = re.match("^(ERROR|INFO|DEBUG|CRITICAL|REPORT|EXACT|WARN)\s+(.*)",line,flags=0)
+        ifloop = re.match("IF\s+(\w+)\s*(..|\w+)\s*(.*)",line,flags=0)
+        elseloopMatch = re.match("ELSE\s*$",line,flags=0)
+        elifloop = re.match("ELSE\sIF\s+(\w+)\s*(..|\w+)\s*(.*)",line,flags=0)
+        forloopMatch = re.match("\s*REPEAT\s+(/d+)\s+TIMES",line,flags=0)
+        experimentalMatch = re.match("EXPERIMENTAL\s+MODE\s+(\w+)",line,flags=0)
+        repeatMatch = re.match("\s*REPEAT\s+(\d+)\s+TIMES", line, flags=0)
 
-        response_pasrse = re.match( "\s*PARSE\s+(\w+)\s+AS\s+(\w+)\s+INTO\s+(\w+)", line, flags=0 )
+        response_pasrse = re.match("\s*PARSE\s+(\w+)\s+AS\s+(\w+)\s+INTO\s+(\w+)", line, flags=0)
 
-        if caseMatch:
+        if caseMatch :
             self.CurrentStep = 0
-            self.CurrentCase = "CASE" + caseMatch.group( 1 )
-            resultString = resultString + self.translate_case_block( casenumber=caseMatch.group( 1 ) )
+            self.CurrentCase = "CASE" + caseMatch.group(1)
+            resultString = resultString + self.translate_case_block(casenumber=caseMatch.group(1))
         elif repeatMatch:
-            resultString = resultString + indent + self.translate_repeat( repeat=repeatMatch.group( 1 ) )
-        elif nameMatch:
-            resultString = resultString + indent + self.translate_testcase_name( testname=nameMatch.group( 1 ) )
-        elif commentMatch:
-            resultString = resultString + indent + self.translate_comment( comment=commentMatch.group( 1 ) )
-        elif stepMatch:
+            resultString = resultString + indent + self.translate_repeat(repeat=repeatMatch.group(1))
+        elif nameMatch :
+            resultString = resultString +  indent + self.translate_testcase_name(testname=nameMatch.group(1))
+        elif commentMatch :
+            resultString = resultString + indent + self.translate_comment(comment=commentMatch.group(1))
+        elif stepMatch :
             self.CurrentStep = self.CurrentStep + 1
-            resultString = resultString + indent + self.translate_step( step=stepMatch.group( 1 ) )
-        elif connectMatch:
-            resultString = resultString + indent + self.translate_connect( component=connectMatch.group( 1 ),
-                                                                           arguments=connectMatch.group( 2 ) )
-        elif disconnectMatch:
-            resultString = resultString + indent + self.translate_disconnect( component=disconnectMatch.group( 1 ) )
-        elif ondoMatch:
-            resultString = resultString + indent + self.translate_onDOAs( component=ondoMatch.group( 1 ), action=ondoMatch.group( 2 ) )
-        elif storeMatch:
-            resultString = resultString + indent + self.translate_store( variable=storeMatch.group( 2 ),
-                                                                         value=storeMatch.group( 1 ) )
-        elif variableMatch:
-            resultString = resultString + indent + self.translate_store( variable=variableMatch.group( 1 ),
-                                                                         value=variableMatch.group( 2 ) )
-        elif assertMatch:
-            resultString = resultString + indent + self.translate_assertion( leftvalue=assertMatch.group( 1 ),
-                                                                             operator=assertMatch.group( 2 ),
-                                                                             rightvalue=assertMatch.group( 3 ),
-                                                                             onpass=assertMatch.group( 4 ),
-                                                                             onfail=assertMatch.group( 5 ) )
-        elif logMatch:
-            resultString = resultString + indent + self.translate_logs( loglevel=logMatch.group( 1 ),
-                                                                        message=logMatch.group( 2 ) )
-        elif ifloop:
+            resultString = resultString + indent + self.translate_step(step=stepMatch.group(1))
+        elif connectMatch :
+            resultString = resultString + indent + self.translate_connect(component=connectMatch.group(1),
+                                                                           arguments=connectMatch.group(2) )
+        elif disconnectMatch :
+            resultString = resultString + indent + self.translate_disconnect(component=disconnectMatch.group(1))
+        elif ondoMatch :
+            resultString = resultString + indent + self.translate_onDOAs(component=ondoMatch.group(1),action=ondoMatch.group(2))
+        elif storeMatch :
+            resultString = resultString + indent + self.translate_store(variable=storeMatch.group(2),
+                                                                         value=storeMatch.group(1))
+        elif variableMatch :
+            resultString = resultString + indent + self.translate_store(variable=variableMatch.group(1),
+                                                                         value=variableMatch.group(2))
+        elif assertMatch :
+            resultString = resultString + indent + self.translate_assertion(leftvalue=assertMatch.group(1),
+                                                                        operator=assertMatch.group(2),
+                                                                            rightvalue=assertMatch.group(3),
+                                                                            onpass=assertMatch.group(4),
+                                                                            onfail=assertMatch.group(5))
+        elif logMatch :
+            resultString = resultString + indent + self.translate_logs(loglevel=logMatch.group(1),
+                                                                        message=logMatch.group(2))
+        elif ifloop :
 
             self.initSpace = initialSpaces
-            operand = ifloop.group( 1 )
-            operator = ifloop.group( 2 )
-            value = ifloop.group( 3 )
-            resultString = resultString + indent + "if " + operand + self.translate_if_else_operator( conditionoperator=operator ) + value + ":"
+            operand = ifloop.group(1)
+            operator = ifloop.group(2)
+            value = ifloop.group(3)
+            resultString = resultString + indent + "if " + operand + self.translate_if_else_operator(conditionoperator=operator) + value + ":"
             self.flag = self.flag + 1
-        elif experimentalMatch:
-            resultString = resultString + indent + self.translate_experimental_mode( mode=experimentalMatch.group( 1 ) )
+        elif experimentalMatch :
+            resultString = resultString + indent + self.translate_experimental_mode(mode=experimentalMatch.group(1))
 
-        elif elseloopMatch:
+        elif elseloopMatch :
             if initialSpaces == self.initSpace or initialSpaces == self.outLoopSpace:
                 resultString = resultString + indent + "else :"
                 self.flag = self.flag + 1
-            else:
-                indent = " " * ( 4 + 4 * ( self.flag - 1 ) )
+            else :
+                indent = " " *(4 + 4 * (self.flag-1))
                 resultString = resultString + indent + "else :"
                 self.flag = self.flag + 1
 
-        elif elifloop:
+        elif elifloop :
 
-            operand = elifloop.group( 1 )
-            operator = elifloop.group( 2 )
-            value = elifloop.group( 3 )
+            operand = elifloop.group(1)
+            operator = elifloop.group(2)
+            value = elifloop.group(3)
             if initialSpaces == self.initSpace or initialSpaces == self.outLoopSpace:
-                resultString = resultString + indent + "elif " + operand + self.translate_if_else_operator( conditionoperator=operator ) + value + ":"
+                resultString = resultString + indent + "elif " + operand + self.translate_if_else_operator(conditionoperator=operator) + value + ":"
                 self.flag = self.flag + 1
-            else:
-                indent = " " * ( 4 + 4 * ( self.flag - 1 ) )
-                resultString = resultString + indent + "elif " + operand + self.translate_if_else_operator( conditionoperator=operator ) + value + ":"
+            else :
+                indent = " " *(4 + 4 * (self.flag-1))
+                resultString = resultString + indent + "elif " + operand + self.translate_if_else_operator(conditionoperator=operator) + value + ":"
                 self.flag = self.flag + 1
-        elif response_pasrse:
-            output_string = response_pasrse.group( 1 )
-            req_format = response_pasrse.group( 2 )
-            store_in = response_pasrse.group( 3 )
-            resultString = resultString + indent + store_in + '= main.response_parser(' + output_string + ",\"" + req_format + "\")"
+        elif response_pasrse :
+            output_string = response_pasrse.group(1)
+            req_format = response_pasrse.group(2)
+            store_in = response_pasrse.group(3)
+            resultString = resultString + indent + store_in +'= main.response_parser('+output_string+",\""+req_format+"\")"
             self.flag = self.flag + 1
 
         return resultString
 
-    def translate_repeat( self, **repeatStatement ):
-        """
+    def translate_repeat(self,**repeatStatement):
+        '''
         this will transalte the repeat statement into a python equivalen while loop
-        """
-        args = self.parse_args( [ "REPEAT" ], **repeatStatement )
+        '''
+
+        args = self.parse_args(["REPEAT"],**repeatStatement)
         resultString = ''
 
         resultString = "i = 0"
-        resultString = resultString + "\n" + " " * 8 + "while i<" + args[ "REPEAT" ] + " :"
+        resultString = resultString + "\n" + " " * 8 +"while i<" + args["REPEAT"] + " :"
         return resultString
 
-    def translate_if_else_operator( self, **loopBlock ):
-        """
+    def translate_if_else_operator(self,**loopBlock):
+        '''
           This method will translate if-else loop block into its equivalent python code.
           Whole loop block will be passed into loopBlock List.
           It returns the transalted reuslt as a string.
-        """
-        args = self.parse_args( [ "CONDITIONOPERATOR" ], **loopBlock )
+        '''
+        args = self.parse_args(["CONDITIONOPERATOR"],**loopBlock)
         resultString = ''
         # process the loopBlock List translate all statements underlying the given loop block
-        equalsMatch = re.match( "EQUALS$|==\s*$", args[ "CONDITIONOPERATOR" ], flags=0 )
-        greaterMatch = re.match( "GREATER\s+THAN$|>\s*$", args[ "CONDITIONOPERATOR" ], flags=0 )
-        lesserMatch = re.match( "LESSER\s+THAN$|<\s*$", args[ "CONDITIONOPERATOR" ], flags=0 )
-        greaterEqualMatch = re.match( "GREATER\s+THAN\s+OR\s+EQUALS$|>=\s*$", args[ "CONDITIONOPERATOR" ], flags=0 )
-        lesserEqualMatch = re.match( "LESSER\s+THAN\s+OR\s+EQUALS$|<=\s*$", args[ "CONDITIONOPERATOR" ], flags=0 )
-        if equalsMatch:
+        equalsMatch = re.match("EQUALS$|==\s*$",args["CONDITIONOPERATOR"],flags=0)
+        greaterMatch = re.match("GREATER\s+THAN$|>\s*$",args["CONDITIONOPERATOR"],flags=0)
+        lesserMatch = re.match("LESSER\s+THAN$|<\s*$",args["CONDITIONOPERATOR"],flags=0)
+        greaterEqualMatch =  re.match("GREATER\s+THAN\s+OR\s+EQUALS$|>=\s*$",args["CONDITIONOPERATOR"],flags=0)
+        lesserEqualMatch = re.match("LESSER\s+THAN\s+OR\s+EQUALS$|<=\s*$",args["CONDITIONOPERATOR"],flags=0)
+        if equalsMatch :
             resultString = resultString + " == "
-        elif greaterMatch:
+        elif greaterMatch :
             resultString = resultString + " > "
-        elif lesserMatch:
+        elif lesserMatch :
             resultString = resultString + " < "
         elif greaterEqualMatch:
             resultString = resultString + " >= "
-        elif lesserEqualMatch:
+        elif lesserEqualMatch :
             resultString = resultString + " <= "
-        else:
+        else :
             print "\n Error: Given Operator is not listed "
 
         return resultString
 
-    def translate_experimental_mode( self, **modeType ):
-        """
+    def translate_experimental_mode(self,**modeType):
+        '''
          This API will translate statment EXPERIMENTAL MODE ON/OFF into python equivalent.
          It will return the transalted value.
-         """
-        args = self.parse_args( [ "MODE" ], **modeType )
+         '''
+        args = self.parse_args(["MODE"],**modeType)
         resultString = ''
-        ONmatch = re.match( "\s*ON", args[ "MODE" ], flags=0 )
-        OFFmatch = re.match( "\sOFF", args[ "MODE" ], flags=0 )
+        ONmatch = re.match("\s*ON",args["MODE"],flags=0)
+        OFFmatch = re.match("\sOFF",args["MODE"],flags=0)
 
-        if ONmatch:
+        if ONmatch :
             resultString = "main.EXPERIMENTAL_MODE = main.TRUE"
-        elif OFFmatch:
+        elif OFFmatch :
             resultString = "main.EXPERIMENTAL_MODE = main.FALSE"
 
         return resultString
 
-    def interpret( self, **interpetParameters ):
-        """
+    def interpret(self,**interpetParameters):
+        '''
          This method will accept the OpenSpeak syntax into a string and will return
          a python equivalent translations statement
-        """
-        args = self.parse_args( [ "TEXT", "WRITETOFILE" ], **interpetParameters )
-        resultString = ''
-        # here Open Speak syntax will be translated into python equivalent.
-        resultString = self.verify_and_translate( args[ "TEXT" ] )
-        lineSpace = re.match( "^\s+", resultString, flags=0 )
+        '''
 
-        resultString = re.sub( "^\s+", "", resultString ) if lineSpace else resultString
+        args = self.parse_args(["TEXT","WRITETOFILE"],**interpetParameters)
+        resultString = ''
+        ## here Open Speak syntax will be translated into python equivalent.
+        resultString = self.verify_and_translate(args["TEXT"])
+        lineSpace = re.match("^\s+",resultString,flags=0)
+
+        resultString = re.sub("^\s+","",resultString) if lineSpace else resultString
         return resultString
 
-    def translate_logs( self, **logStatement ):
-        """
+    def translate_logs(self,**logStatement):
+        '''
          This will translate the OpenSpeak log message statements into python equivalent
          to resultString and returns resultString
-        """
-        args = self.parse_args( [ "LOGLEVEL", "MESSAGE" ], **logStatement )
+        '''
+        args = self.parse_args(["LOGLEVEL","MESSAGE"],**logStatement)
         resultString = ''
         # convert the statement here
-        message = self.translate_log_message( message=args[ "MESSAGE" ] )
-        if args[ "LOGLEVEL" ] == "INFO":
+        message = self.translate_log_message(message=args["MESSAGE"])
+        if args["LOGLEVEL"] == "INFO" :
             resultString = resultString + "main.log.info(" + message + ")"
-        elif args[ "LOGLEVEL" ] == "ERROR":
-            resultString = resultString + "main.log.error(" + message + ")"
-        elif args[ "LOGLEVEL" ] == "DEBUG":
+        elif args["LOGLEVEL"] == "ERROR" :
+            resultString = resultString + "main.log.error(" + message  + ")"
+        elif args["LOGLEVEL"] == "DEBUG" :
             resultString = resultString + "main.log.debug(" + message + ")"
-        elif args[ "LOGLEVEL" ] == "REPORT":
+        elif args["LOGLEVEL"] == "REPORT" :
             resultString = resultString + "main.log.report(" + message + ")"
-        elif args[ "LOGLEVEL" ] == "CRITICAL":
+        elif args["LOGLEVEL"] == "CRITICAL" :
             resultString = resultString + "main.log.critical(" + message + ")"
-        elif args[ "LOGLEVEL" ] == "WARN":
-            resultString = resultString + "main.log.warn(" + args[ "MESSAGE" ] + ")"
-        elif args[ "LOGLEVEL" ] == "EXACT":
-            resultString = resultString + "main.log.exact(" + args[ "MESSAGE" ] + ")"
+        elif args["LOGLEVEL"] == "WARN" :
+            resultString = resultString + "main.log.warn(" + args["MESSAGE"] + ")"
+        elif args["LOGLEVEL"] == "EXACT" :
+            resultString = resultString + "main.log.exact(" + args["MESSAGE"] + ")"
+
 
         return resultString
 
-    def translate_log_message( self, **messageStatement ):
-        """
+    def translate_log_message(self,**messageStatement) :
+        '''
          This API will translate log messages if it is a string or Variable or combination
          of string and variable.
          It will return the analysed and translate message.
-        """
-        args = self.parse_args( [ "MESSAGE" ], **messageStatement )
+        '''
+        args = self.parse_args(["MESSAGE"],**messageStatement)
         resultString = ''
 
-        paramsMatch = re.match( "PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]|LAST_RESULT|LAST_RESPONSE", args[ "MESSAGE" ], flags=0 )
-        stringMatch = re.match( "\s*\"( .* )\"\s*$", args[ "MESSAGE" ], flags=0 )
-        stringWidVariableMatch = re.match( "\"( .* )\"\s+\+\s+(.*)", args[ "MESSAGE" ], flags=0 )
-        varRefMatch = re.search( "\<(\w+)\>", args[ "MESSAGE" ], flags=0 )
-        if paramsMatch:
-            resultString = resultString + self.translate_parameters( parameters=args[ "MESSAGE" ] )
-        elif stringMatch:
-            resultString = resultString + args[ "MESSAGE" ]
+        paramsMatch = re.match("PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]|LAST_RESULT|LAST_RESPONSE",args["MESSAGE"],flags=0)
+        stringMatch = re.match("\s*\"(.*)\"\s*$",args["MESSAGE"],flags=0)
+        stringWidVariableMatch = re.match("\"(.*)\"\s+\+\s+(.*)",args["MESSAGE"],flags=0)
+        varRefMatch = re.search("\<(\w+)\>",args["MESSAGE"],flags=0)
+        if paramsMatch :
+            resultString = resultString + self.translate_parameters(parameters=args["MESSAGE"])
+        elif stringMatch :
+            resultString = resultString + args["MESSAGE"]
         elif stringWidVariableMatch:
-            quoteWord = stringWidVariableMatch.group( 1 )
-            variableRef = stringWidVariableMatch.group( 2 )
-            varMatch = re.search( "PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]", variableRef, flags=0 )
-            varRefMatch = re.search( "\<(\w+)\>", variableRef, flags=0 )
-            if varMatch:
-                resultString = resultString + "\"" + quoteWord + "\"" + " + " + self.translate_parameters( parameters=variableRef )
-            elif varRefMatch:
-                resultString = resultString + "\"" + quoteWord + "\"" + " + " + varRefMatch.group( 1 )
+            quoteWord = stringWidVariableMatch.group(1)
+            variableRef = stringWidVariableMatch.group(2)
+            varMatch = re.search("PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]",variableRef,flags=0)
+            varRefMatch = re.search("\<(\w+)\>",variableRef,flags=0)
+            if varMatch :
+                resultString = resultString + "\"" + quoteWord + "\"" + " + " + self.translate_parameters(parameters=variableRef)
+            elif varRefMatch :
+                resultString = resultString + "\"" + quoteWord + "\"" +  " + " + varRefMatch.group(1)
         elif varRefMatch:
-            resultString = resultString + varRefMatch.group( 1 )
-        else:
-            print "\nError : Syntax error , Not defined way to give log message" + args[ "MESSAGE" ]
+            resultString = resultString + varRefMatch.group(1)
+        else :
+            print "\nError : Syntax error , Not defined way to give log message" + args["MESSAGE"]
 
         return resultString
 
-    def translate_assertion( self, **assertStatement ):
-        """
+    def translate_assertion(self,**assertStatement):
+        '''
          This will translate the ASSERT <value1> <COMPARISON OPERATOR> <value2> into python
          equivalent to resultString and returns resultString
-        """
-        args = self.parse_args( [ "LEFTVALUE", "OPERATOR", "RIGHTVALUE", "ONPASS", "ONFAIL" ], **assertStatement )
+        '''
+        args = self.parse_args(["LEFTVALUE","OPERATOR","RIGHTVALUE","ONPASS","ONFAIL"],**assertStatement)
         resultString = ''
         # convert the statement here
-        notOperatorMatch = re.search( "NOT\s+(.*)", args[ "OPERATOR" ], flags=0 )
-        notOperatorSymbMatch = re.search( "\!(.*)", args[ "OPERATOR" ], flags=0 )
+        notOperatorMatch = re.search("NOT\s+(.*)",args["OPERATOR"],flags=0)
+        notOperatorSymbMatch = re.search("\!(.*)",args["OPERATOR"],flags=0)
         operator = ''
-        lastresultMatch = re.match( "LAST_RESULT", args[ "RIGHTVALUE" ], flags=0 )
-        lastresponseMatch = re.match( "LAST_RESPONSE", args[ "RIGHTVALUE" ], flags=0 )
-        if lastresultMatch:
+        lastresultMatch = re.match("LAST_RESULT",args["RIGHTVALUE"],flags=0)
+        lastresponseMatch = re.match("LAST_RESPONSE",args["RIGHTVALUE"],flags=0)
+        if lastresultMatch :
             operator = "main.last_result"
-        elif lastresponseMatch:
+        elif lastresponseMatch :
             operator = "main.last_response"
-        else:
-            operator = args[ "RIGHTVALUE" ]
+        else :
+            operator = args["RIGHTVALUE"]
 
-        if args[ "OPERATOR" ] is None or args[ "OPERATOR" ] == "":
+        if args["OPERATOR"] == None or args["OPERATOR"] == "" :
             print "\n Error : Operator has not been specified !!!"
         elif notOperatorMatch or notOperatorSymbMatch:
 
-            operators = notOperatorMatch.group( 1 ) if notOperatorMatch else notOperatorSymbMatch.group( 1 )
-            operators = self.translate_operator( operator=operators )
-            if self.grtrOrLssr == 0:
+            operators = notOperatorMatch.group(1) if notOperatorMatch else notOperatorSymbMatch.group(1)
+            operators = self.translate_operator(operator=operators)
+            if self.grtrOrLssr == 0 :
                 resultString = resultString + "utilities.assert_not_" + operators + "(expect=" +\
-                               self.translate_response_result( operator=args[ "RIGHTVALUE" ] ) + ",actual=" + self.translate_response_result( operator=args[ "LEFTVALUE" ] ) +\
-                               ",onpass=" + self.translate_assertMessage( message=args[ "ONPASS" ] ) +\
-                               ",onfail=" + self.translate_assertMessage( message=args[ "ONFAIL" ] ) + ")"
-            else:
+                               self.translate_response_result(operator=args["RIGHTVALUE"]) + ",actual=" + self.translate_response_result(operator=args["LEFTVALUE"]) +\
+                               ",onpass=" + self.translate_assertMessage(message=args["ONPASS"]) +\
+                               ",onfail=" + self.translate_assertMessage(message=args["ONFAIL"]) + ")"
+            else :
                 resultString = resultString + "utilities.assert_not_" + operators + "(expect=" +\
-                               self.translate_response_result( operator=args[ "LEFTVALUE" ] ) + ",actual=" + self.translate_response_result( operator=args[ "RIGHTVALUE" ] ) +\
-                               ",onpass=" + self.translate_assertMessage( message=args[ "ONPASS" ] ) +\
-                               ",onfail=" + self.translate_assertMessage( message=args[ "ONFAIL" ] ) + ")"
+                               self.translate_response_result(operator=args["LEFTVALUE"]) + ",actual=" + self.translate_response_result(operator=args["RIGHTVALUE"]) +\
+                               ",onpass=" + self.translate_assertMessage(message=args["ONPASS"]) +\
+                               ",onfail=" + self.translate_assertMessage(message=args["ONFAIL"]) + ")"
 
-        else:
-            operators = self.translate_operator( operator=args[ "OPERATOR" ] )
-            if self.grtrOrLssr == 0:
+        else :
+            operators = self.translate_operator(operator=args["OPERATOR"])
+            if self.grtrOrLssr == 0 :
                 resultString = resultString + "utilities.assert_" + operators + "(expect=" +\
-                               self.translate_response_result( operator=args[ "RIGHTVALUE" ] ) +\
-                               ",actual=" + self.translate_response_result( operator=args[ "LEFTVALUE" ] ) +\
-                               ",onpass=" + self.translate_assertMessage( message=args[ "ONPASS" ] ) +\
-                               ",onfail=" + self.translate_assertMessage( message=args[ "ONFAIL" ] ) + ")"
-            else:
+                               self.translate_response_result(operator=args["RIGHTVALUE"]) +\
+                               ",actual=" + self.translate_response_result(operator=args["LEFTVALUE"]) +\
+                               ",onpass=" + self.translate_assertMessage(message=args["ONPASS"]) +\
+                               ",onfail=" + self.translate_assertMessage(message=args["ONFAIL"]) + ")"
+            else :
                 resultString = resultString + "utilities.assert_" + operators + "(expect=" +\
-                               self.translate_response_result( operator=args[ "LEFTVALUE" ] ) +\
-                               ",actual=" + self.translate_response_result( operator=args[ "RIGHTVALUE" ] ) +\
-                               ",onpass=" + self.translate_assertMessage( message=args[ "ONPASS" ] ) +\
-                               ",onfail=" + self.translate_assertMessage( message=args[ "ONFAIL" ] ) + ")"
+                               self.translate_response_result(operator=args["LEFTVALUE"]) +\
+                               ",actual=" + self.translate_response_result(operator=args["RIGHTVALUE"]) +\
+                               ",onpass=" + self.translate_assertMessage(message=args["ONPASS"]) +\
+                               ",onfail=" + self.translate_assertMessage(message=args["ONFAIL"]) + ")"
+
 
         return resultString
 
-    def translate_response_result( self, **operatorStatement ):
-        """
+    def translate_response_result(self,**operatorStatement):
+        '''
          It will translate the LAST_RESPONSE or LAST_RESULT statement into its equivalent.
          It returns the translate form in resulString.
-        """
-        args = self.parse_args( [ "OPERATOR" ], **operatorStatement )
+        '''
+        args = self.parse_args(["OPERATOR"],**operatorStatement)
         resultString = ''
-        lastResultMatch = re.match( "LAST_RESULT", args[ "OPERATOR" ], flags=0 )
-        lastResponseMatch = re.match( "LAST_RESPONSE", args[ "OPERATOR" ], flags=0 )
-        if lastResultMatch:
+        lastResultMatch = re.match("LAST_RESULT",args["OPERATOR"],flags=0)
+        lastResponseMatch = re.match("LAST_RESPONSE",args["OPERATOR"],flags=0)
+        if lastResultMatch :
             resultString = resultString + "main.last_result"
         elif lastResponseMatch:
             resultString = resultString + "main.last_response"
-        else:
-            resultString = resultString + args[ "OPERATOR" ]
+        else :
+            resultString = resultString + args["OPERATOR"]
         return resultString
 
-    def translate_assertMessage( self, **messageStatement ):
-        """
+
+    def translate_assertMessage(self,**messageStatement) :
+        '''
          This API will facilitate the translation of assert ONPASS or ONFAIL messages . The message can be
          a string or calling another API in OpenSpeak syntax.
          It will return the translated message
-        """
-        args = self.parse_args( [ "MESSAGE" ], **messageStatement )
+        '''
+        args = self.parse_args(["MESSAGE"],**messageStatement)
 
-        connectMatch = re.search( "CONNECT\s+(\w+)\s+USING\s+(.*)", args[ "MESSAGE" ], flags=0 )
-        disconnectMatch = re.search( "DISCONNECT\s+(.*)", args[ "MESSAGE" ], flags=0 )
-        ondoMatch = re.search( "ON\s+(.*)\s+DO\s+(.*)", args[ "MESSAGE" ], flags=0 )
-        paramsMatch = re.search( "PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]", args[ "MESSAGE" ], flags=0 )
-        stringMatch = re.search( "\"( .* )\"|\'(.*)\'", args[ "MESSAGE" ], flags=0 )
-        variableMatch = re.search( "\<(.*)\>", args[ "MESSAGE" ], flags=0 )
+        connectMatch = re.search("CONNECT\s+(\w+)\s+USING\s+(.*)",args["MESSAGE"],flags=0)
+        disconnectMatch = re.search("DISCONNECT\s+(.*)",args["MESSAGE"],flags=0)
+        ondoMatch = re.search("ON\s+(.*)\s+DO\s+(.*)",args["MESSAGE"],flags=0)
+        paramsMatch = re.search("PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]",args["MESSAGE"],flags=0)
+        stringMatch = re.search("\"(.*)\"|\'(.*)\'",args["MESSAGE"],flags=0)
+        variableMatch = re.search("\<(.*)\>",args["MESSAGE"],flags=0)
 
         resultString = ''
-        if connectMatch:
-            resultString = resultString + self.translate_connect( component=connectMatch.group( 1 ),
-                                                                  arguments=connectMatch.group( 2 ) )
-        elif disconnectMatch:
-            resultString = resultString + self.translate_disconnect( component=disconnectMatch.group( 1 ) )
-        elif ondoMatch:
-            resultString = resultString + self.translate_onDOAs( component=ondoMatch.group( 1 ),
-                                                                 action=ondoMatch.group( 2 ) )
-        elif paramsMatch:
-            resultString = resultString + self.translate_parameters( parameters=args[ "MESSAGE" ] )
-        elif stringMatch:
+        if connectMatch :
+            resultString = resultString + self.translate_connect(component=connectMatch.group(1),
+                                                                 arguments=connectMatch.group(2) )
+        elif disconnectMatch :
+            resultString = resultString + self.translate_disconnect(component=disconnectMatch.group(1))
+        elif ondoMatch :
+            resultString = resultString + self.translate_onDOAs(component=ondoMatch.group(1),
+                                                                action=ondoMatch.group(2))
+        elif paramsMatch :
+            resultString = resultString + self.translate_parameters(parameters=args["MESSAGE"])
+        elif stringMatch :
             resultString = resultString + "\"" + stringMatch.group(1) + "\""
-        elif variableMatch:
-            resultString = resultString + variableMatch.group( 1 )
-        elif args[ "MESSAGE" ] is None:
+        elif variableMatch :
+            resultString = resultString + variableMatch.group(1)
+        elif args["MESSAGE"]  == None :
             print "\n Error : Please pass a message or action for assertion "
 
         return resultString
 
-    def translate_operator( self, **operatorStatement ):
-        """
+    def translate_operator(self,**operatorStatement) :
+        '''
           It will translate the operator for assertion , by ensuring against given arguments.
           It will return the translated assertion operator.
-        """
-        args = self.parse_args( [ "OPERATOR" ], **operatorStatement )
+        '''
+        args = self.parse_args(["OPERATOR"],**operatorStatement)
 
         resultString = ''
-        equalsMatch = re.match( "EQUALS$|==$", args[ "OPERATOR" ], flags=0 )
-        greaterMatch = re.match( "GREATER\s+THAN$|>$", args[ "OPERATOR" ], flags=0 )
-        lesserMatch = re.match( "LESSER\s+THAN$|<$", args[ "OPERATOR" ], flags=0 )
-        stringMatch = re.match( "MATCHES|~$", args[ "OPERATOR" ], flags=0 )
-        greaterEqualMatch = re.match( "GREATER\s+THAN\s+OR\s+EQUALS$|>=$", args[ "OPERATOR" ], flags=0 )
-        lesserEqualMatch = re.match( "LESSER\s+THAN\s+OR\s+EQUALS$|<=$", args[ "OPERATOR" ], flags=0 )
-        if equalsMatch:
+        equalsMatch = re.match("EQUALS$|==$",args["OPERATOR"],flags=0)
+        greaterMatch = re.match("GREATER\s+THAN$|>$",args["OPERATOR"],flags=0)
+        lesserMatch = re.match("LESSER\s+THAN$|<$",args["OPERATOR"],flags=0)
+        stringMatch = re.match("MATCHES|~$",args["OPERATOR"],flags=0)
+        greaterEqualMatch =  re.match("GREATER\s+THAN\s+OR\s+EQUALS$|>=$",args["OPERATOR"],flags=0)
+        lesserEqualMatch = re.match("LESSER\s+THAN\s+OR\s+EQUALS$|<=$",args["OPERATOR"],flags=0)
+        if equalsMatch :
 
             resultString = resultString + "equals"
-        elif greaterMatch:
+        elif greaterMatch :
             self.grtrOrLssr = self.grtrOrLssr + 1
             resultString = resultString + "greater"
-        elif lesserMatch:
+        elif lesserMatch :
             self.grtrOrLssr = self.grtrOrLssr + 1
             resultString = resultString + "lesser"
-        elif stringMatch:
+        elif stringMatch :
 
             resultString = resultString + "matches"
         elif greaterEqualMatch:
 
             resultString = resultString + "greater_equals"
-        elif lesserEqualMatch:
+        elif lesserEqualMatch :
 
             resultString = resultString + "lesser_equals"
-        else:
+        else :
             print "\n Error: Given Operator is not listed for assertion"
         return resultString
 
-    def translate_store( self, **storeStatement ):
-        """
+    def translate_store(self,**storeStatement):
+        '''
          This will translate the STORE <variable> IN <value> or <variable> = <value>
          into python equivalent to resultString and returns resultString
-        """
-        args = self.parse_args( [ "VARIABLE", "VALUE" ], **storeStatement )
+        '''
+        args = self.parse_args(["VARIABLE","VALUE"],**storeStatement)
         resultString = ''
         # convert the statement here
-        ondoMatch = re.match( "^\s*ON\s+(.*)\s+DO\s+(.*)", args[ "VALUE" ], flags=0 )
-        paramsMatch = re.match( "^\s*PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]|LAST_RESULT|LAST_RESPONSE", args[ "VALUE" ], flags=0 )
-        if paramsMatch:
-            argString = self.translate_parameters( parameters=args[ "VALUE" ] )
-            resultString = args[ "VARIABLE" ] + " = " + argString
-        elif ondoMatch:
-            resultString = args[ "VARIABLE" ] + " = " + self.translate_onDOAs( component=ondoMatch.group( 1 ), action=ondoMatch.group( 2 ) )
-        else:
-            resultString = args[ "VARIABLE" ] + " = " + args[ "VALUE" ]
+        ondoMatch = re.match("^\s*ON\s+(.*)\s+DO\s+(.*)",args["VALUE"],flags=0)
+        paramsMatch = re.match("^\s*PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]|LAST_RESULT|LAST_RESPONSE",args["VALUE"],flags=0)
+        if paramsMatch :
+            argString = self.translate_parameters(parameters=args["VALUE"])
+            resultString = args["VARIABLE"] + " = " + argString
+        elif ondoMatch :
+            resultString = args["VARIABLE"] + " = "  + self.translate_onDOAs(component=ondoMatch.group(1),action=ondoMatch.group(2))
+        else :
+            resultString = args["VARIABLE"] + " = " + args["VALUE"]
+
 
         return resultString
 
-    def translate_disconnect( self, **disconnectStatement ):
-        """
+    def translate_disconnect(self,**disconnectStatement):
+        '''
          This will translate the DISCONNECT <component_name> into python
          equivalent to resultString and returns resultString
-        """
-        args = self.parse_args( [ "COMPONENT" ], **disconnectStatement )
+        '''
+        args = self.parse_args(["COMPONENT"],**disconnectStatement)
         resultString = ''
         # convert the statement here
-        resultString = "main." + args[ "COMPONENT" ] + ".disconnect()"
+        resultString = "main." + args["COMPONENT"] + ".disconnect()"
         return resultString
 
-    def translate_onDOAs( self, **onDoStatement ):
-        """
+    def translate_onDOAs(self,**onDoStatement):
+        '''
          This will translate the ON <component> DO <action> USING <arg1> AS <value1>,<arg2> AS <value2>
          into python equivalent to resultString and returns resultString
-        """
-        args = self.parse_args( [ "COMPONENT", "ACTION", "ARGUMENTS" ], **onDoStatement )
+        '''
+        args = self.parse_args(["COMPONENT","ACTION","ARGUMENTS"],**onDoStatement)
         subString = ''
 
-        usingMatch = re.match( "\s*(.*)\s+USING\s+(.*)", args[ "ACTION" ], flags=0 )
+        usingMatch = re.match("\s*(.*)\s+USING\s+(.*)",args["ACTION"],flags=0)
         action = ''
-        if usingMatch:
-            action = usingMatch.group( 1 )
-            arguments = usingMatch.group( 2 )
-            subString = self.translate_usingas( arguments=arguments )
+        if usingMatch :
+            action = usingMatch.group(1)
+            arguments = usingMatch.group(2)
+            subString = self.translate_usingas(arguments=arguments)
 
-        else:
-            andCheck = re.search( "(.*)\s+AND\s+(.*)", args[ "ACTION" ], flags=0 )
+        else :
+            andCheck = re.search ("(.*)\s+AND\s+(.*)",args["ACTION"],flags=0)
 
             action = action + "()"
             if andCheck:
-                action = andCheck.group( 1 ) + "()"
-                subString = subString + self.handle_conjuction( statement=andCheck.group( 2 ) )
-            else:
-                action = args[ "ACTION" ]
+                action = andCheck.group(1) + "()"
+                subString = subString + self.handle_conjuction(statement=andCheck.group(2))
+            else :
+                action = args["ACTION"]
                 action = action + "()"
         # convert the statement here
-        resultString = "main." + args[ "COMPONENT" ] + "." + action + subString
+        resultString = "main." + args["COMPONENT"] + "." + action + subString
         return resultString
 
-    def handle_conjuction( self, **conjuctStatement ):
-        """
+
+    def handle_conjuction(self,**conjuctStatement):
+        '''
         This will handle the conjuctions
-        """
-        args = self.parse_args( [ "STATEMENT" ], **conjuctStatement )
+        '''
+
+        args = self.parse_args(["STATEMENT"],**conjuctStatement)
         subSentence = ''
 
-        storeMatch = re.match( "\s*STORE\s+(.*)\s+IN\s+(.*)", args[ "STATEMENT" ], flags=0 )
-        assertMatch = re.match( "\s*ASSERT\s+(\w+)\s+(.*)\s+(.*)\s+ONPASS\s+(.*)\s+ONFAIL\s+(.*)", args[ "STATEMENT" ], flags=0 )
-        if storeMatch:
-            subSentence = "\n" + " " * 8 + self.translate_store( variable=storeMatch.group( 2 ),
-                                                                         value=storeMatch.group( 1 ) )
-        elif assertMatch:
-            subSentence = "\n" + " " * 8 + self.translate_assertion( leftvalue=assertMatch.group( 1 ),
-                                                                     operator=assertMatch.group( 2 ),
-                                                                     rightvalue=assertMatch.group( 3 ),
-                                                                     onpass=assertMatch.group( 4 ),
-                                                                     onfail=assertMatch.group( 5 ) )
+        storeMatch = re.match("\s*STORE\s+(.*)\s+IN\s+(.*)",args["STATEMENT"],flags=0)
+        assertMatch = re.match("\s*ASSERT\s+(\w+)\s+(.*)\s+(.*)\s+ONPASS\s+(.*)\s+ONFAIL\s+(.*)",args["STATEMENT"],flags=0)
+        if storeMatch :
+            subSentence =  "\n" + " " * 8 + self.translate_store(variable=storeMatch.group(2),
+                                                                         value=storeMatch.group(1))
+        elif assertMatch :
+            subSentence = "\n" + " " * 8 + self.translate_assertion(leftvalue=assertMatch.group(1),
+                                                                    operator=assertMatch.group(2),
+                                                                    rightvalue=assertMatch.group(3),
+                                                                    onpass=assertMatch.group(4),
+                                                                    onfail=assertMatch.group(5))
         return subSentence
 
-    def translate_usingas( self, **argumentAS ):
-        """
+    def translate_usingas(self,**argumentAS) :
+        '''
          This will tranlate USING argument AS value Statement into equivalent argument passing.
          It will return translated form into resultString
-        """
-        args = self.parse_args( [ "ARGUMENTS" ], **argumentAS )
+        '''
+        args = self.parse_args(["ARGUMENTS"],**argumentAS)
         resultString = ''
         argsList = []
         subString = ''
         subSentence = ''
         line = ''
-        andCheck = re.search( "(.*)\s+AND\s+(.*)", args[ "ARGUMENTS" ], flags=0 )
+        andCheck = re.search ("(.*)\s+AND\s+(.*)",args["ARGUMENTS"],flags=0)
         if andCheck:
-            line = andCheck.group( 1 )
-            subSentence = self.handle_conjuction( statement=andCheck.group( 2 ) )
-        else:
-            line = args[ "ARGUMENTS" ]
+            line = andCheck.group(1)
+            subSentence = self.handle_conjuction(statement=andCheck.group(2))
+        else :
+            line = args["ARGUMENTS"]
 
-        argsMatch = re.search( "(.*),(.*)", line, flags=0 )
 
-        if args[ "ARGUMENTS" ] is None or args[ "ARGUMENTS" ] == '':
+
+        argsMatch = re.search("(.*),(.*)",line,flags=0)
+
+
+        if args["ARGUMENTS"] == None or args["ARGUMENTS"] == '' :
             subString = ''
-        elif argsMatch:
+        elif argsMatch :
 
-            argsList = line.split( "," )
-            for index, arguments in enumerate( argsList ):
-                argMatch = re.search( "(.*)\s+AS\s+(.*)", arguments, flags=0 )
+            argsList = line.split(",")
+            for index, arguments in enumerate(argsList):
+                argMatch = re.search("(.*)\s+AS\s+(.*)",arguments,flags=0)
                 if argMatch:
-                    argsKey = argMatch.group( 1 )
-                    argsValue = argMatch.group( 2 )
-                    paramsMatch = re.search( "PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]|LAST_RESPONSE|LAST_RESULT", argsValue, flags=0 )
-                    if not paramsMatch:
-                        if index == len( argsList ) - 1:
-                            subString = subString + argsKey + "=" + argsValue
-                        else:
-                            subString = subString + argsKey + "=" + argsValue + ","
-                    else:
-                        argString = self.translate_parameters( parameters=argsValue )
-                        if index == len( argsList ) - 1:
-                            subString = subString + argsKey + "=" + argString
-                        else:
-                            subString = subString + argsKey + "=" + argString + ","
-                else:
-                    if index == len( argsList ) - 1:
-                        subString = subString + arguments
-                    else:
+                    argsKey =  argMatch.group(1)
+                    argsValue = argMatch.group(2)
+                    paramsMatch = re.search("PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]|LAST_RESPONSE|LAST_RESULT",argsValue,flags=0)
+                    if not paramsMatch :
+                        if index == len(argsList) - 1 :
+                            subString = subString +  argsKey + "=" + argsValue
+                        else :
+                            subString = subString +  argsKey + "=" + argsValue + ","
+                    else :
+                        argString = self.translate_parameters(parameters=argsValue)
+                        if index == len(argsList) - 1 :
+                            subString = subString +  argsKey + "=" + argString
+                        else :
+                            subString = subString +  argsKey + "=" + argString + ","
+                else :
+                    if index == len(argsList) - 1 :
+                        subString = subString +  arguments
+                    else :
                         subString = subString + arguments + ","
-        else:
-            argMatch = re.search( "(.*)\s+AS\s+(.*)", args[ "ARGUMENTS" ], flags=0 )
+        else :
+            argMatch = re.search("(.*)\s+AS\s+(.*)",args["ARGUMENTS"],flags=0)
             if argMatch:
-                argsKey = argMatch.group( 1 )
-                argsValue = argMatch.group( 2 )
-                paramsMatch = re.search( "PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]|LAST_RESPONSE|LAST_RESULT", argsValue, flags=0 )
-                if not paramsMatch:
-                    subString = subString + argsKey + "=" + argsValue
-                else:
-                    argString = self.translate_parameters( parameters=argsValue )
-                    subString = subString + argsKey + "=" + argString
-            else:
-                paramsMatch = re.match( "PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]|LAST_RESPONSE|LAST_RESULT", line, flags=0 )
-                if paramsMatch:
-                    subString = subString + self.translate_parameters( parameters=line )
-                else:
-                    subString = subString + line
-        resultString = "(" + subString + ")" + subSentence
+                argsKey =  argMatch.group(1)
+                argsValue = argMatch.group(2)
+                paramsMatch = re.search("PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]|LAST_RESPONSE|LAST_RESULT",argsValue,flags=0)
+                if not paramsMatch :
+                    subString = subString +  argsKey + "=" + argsValue
+                else :
+                    argString = self.translate_parameters(parameters=argsValue)
+                    subString = subString +  argsKey + "=" + argString
+            else :
+                paramsMatch = re.match("PARAMS\[(.*)\]|STEP\[(.*)\]|TOPO\[(.*)\]|CASE\[(.*)\]|LAST_RESPONSE|LAST_RESULT",line,flags=0)
+                if paramsMatch :
+                    subString = subString + self.translate_parameters(parameters=line)
+                else :
+                    subString = subString +  line
+        resultString = "(" + subString + ")"+ subSentence
         return resultString
 
-    def translate_connect( self, **connectStatement ):
-        """
+
+    def translate_connect(self,**connectStatement):
+        '''
          This will translate the CONNECT <component_name> USING1 <arg1> AS <value1>, <arg2> AS <value2>
          into python equivalent to resultString and returns resultString
-        """
-        args = self.parse_args( [ "COMPONENT", "ARGUMENTS" ], **connectStatement )
+        '''
+        args = self.parse_args(["COMPONENT","ARGUMENTS"],**connectStatement)
         resultString = ''
-        subString = self.translate_usingas( arguments=args[ "ARGUMENTS" ] )
+        subString = self.translate_usingas(arguments=args["ARGUMENTS"])
         # convert the statement here
-        resultString = "main." + args[ "COMPONENT" ] + ".connect(" + subString + ")"
+        resultString = "main." + args["COMPONENT"] + ".connect(" + subString + ")"
         return resultString
 
-    def translate_parameters( self, **parameterStatement ):
-        """
+
+    def translate_parameters(self,**parameterStatement):
+        '''
          This will translate the OpenSpeak Case and Params parameters into python equivalent
          to resultString and returns resultString
-        """
-        args = self.parse_args( [ "PARAMETERS" ], **parameterStatement )
-        argument = args[ "PARAMETERS" ]
+        '''
+        args = self.parse_args(["PARAMETERS"],**parameterStatement)
+        argument = args["PARAMETERS"]
         resultString = ''
-        # match arguments
-        paramsMatch = re.search( "PARAMS((\[(.*)\])*)", argument, flags=0 )
-        stepsMatch = re.search( "STEP((\[(.*)\])*)", argument, flags=0 )
-        casesMatch = re.search( "CASE((\[(.*)\])*)", argument, flags=0 )
-        topoMatch = re.search( "TOPO((\[(.*)\])*)", argument, flags=0 )
-        lastResultMatch = re.match( "LAST_RESULT", argument, flags=0 )
-        lastResponseMatch = re.match( "LAST_RESPONSE", argument, flags=0 )
+        ### match arguments
+        paramsMatch = re.search("PARAMS((\[(.*)\])*)",argument,flags=0)
+        stepsMatch = re.search("STEP((\[(.*)\])*)",argument,flags=0)
+        casesMatch = re.search("CASE((\[(.*)\])*)",argument,flags=0)
+        topoMatch = re.search("TOPO((\[(.*)\])*)",argument,flags=0)
+        lastResultMatch = re.match("LAST_RESULT",argument,flags=0)
+        lastResponseMatch = re.match("LAST_RESPONSE",argument,flags=0)
         # convert the statement here
-        if paramsMatch:
-            params = paramsMatch.group( 1 )
-            resultString = resultString + "main.params" + self._argsCheck( checkvar=params )
-        elif stepsMatch:
-            resultString = resultString + "main.params[\'" + self.CurrentCase +\
-                           "\'][\'STEP" + str( self.CurrentStep ) + "\']" +\
-                           self._argsCheck( checkvar=stepsMatch.group( 1 ) )
-        elif casesMatch:
+        if paramsMatch :
+            params = paramsMatch.group(1)
+            resultString = resultString + "main.params" + self._argsCheck(checkvar=params)
+        elif stepsMatch :
+            resultString = resultString +"main.params[\'" + self.CurrentCase +\
+                           "\'][\'STEP" + str(self.CurrentStep) + "\']" +\
+                           self._argsCheck(checkvar=stepsMatch.group(1))
+        elif casesMatch :
             resultString = resultString + "main.params[\'" + self.CurrentCase + "\']" +\
-                           self._argsCheck( checkvar=casesMatch.group( 1 ) )
-        elif topoMatch:
+                           self._argsCheck(checkvar=casesMatch.group(1))
+        elif topoMatch :
             resultString = resultString + "main.componentDictionary" +\
-                           self._argsCheck( checkvar=topoMatch.group( 1 ) )
-        elif lastResultMatch:
+                           self._argsCheck(checkvar=topoMatch.group(1))
+        elif lastResultMatch :
             resultString = resultString + "main.last_result"
-        elif lastResponseMatch:
+        elif lastResponseMatch :
             resultString = resultString + "main.last_response"
         return resultString
 
-    def _argsCheck( self, **args ):
-        """ This API will check if given argument is varibale reference or String and will translate accordingly.
+    def _argsCheck(self,**args):
+        ''' This API will check if given argument is varibale reference or String and will translate accordingly.
             It will return the tanslate form in resultString.
-         """
-        args = self.parse_args( [ "CHECKVAR" ], **args )
-        params = args[ "CHECKVAR" ]
-        argsList = params.split( "]" )
+         '''
+        args = self.parse_args(["CHECKVAR"],**args)
+        params = args["CHECKVAR"]
+        argsList = params.split("]")
         resultString = ''
-        del argsList[ len( argsList ) - 1 ]
-        for index, paramArgs in enumerate( argsList ):
-            argsWidVariable = re.search( "(\"|\')\s*(\w+)\s*(\'|\")", paramArgs, flags=0 )
-            if argsWidVariable:
+        del argsList[len(argsList) - 1]
+        for index,paramArgs in enumerate(argsList) :
+            argsWidVariable = re.search("(\"|\')\s*(\w+)\s*(\'|\")",paramArgs,flags=0)
+            if argsWidVariable :
                 resultString = resultString + "[\'" + argsWidVariable.group(2) + "\']"
-            else:
+            else :
                 resultString = resultString + paramArgs + "]"
         return resultString
 
-    def translate_step( self, **stepStatement ):
-        """
+    def translate_step(self,**stepStatement):
+        '''
          This will translate the STEP "DO SOMETHING HERE" into python equivalent
          to resultString and returns resultString
-        """
-        args = self.parse_args( [ "STEP" ], **stepStatement )
+        '''
+        args = self.parse_args(["STEP"],**stepStatement)
         resultString = ''
         resultString = "main.step(\"" + args["STEP"] + "\")"
         # convert the statement here
         return resultString
 
-    def translate_comment( self, **commentStatement ):
-        """
+
+    def translate_comment(self,**commentStatement):
+        '''
          This will translate the COMMENT "DO SOMETHING HERE" into python equivalent
          to resultString and returns resultString
-        """
-        args = self.parse_args( [ "COMMENT" ], **commentStatement )
+        '''
+        args = self.parse_args(["COMMENT"],**commentStatement)
         resultString = ''
-        resultString = "#" + args[ "COMMENT" ]
+        resultString = "#" + args["COMMENT"]
         # convert the statement here
         return resultString
 
-    def translate_testcase_name( self, **nameStatement ):
-        """
+    def translate_testcase_name(self,**nameStatement):
+        '''
          This method will convert NAME "<Testcase_name>" into python equivalent statement
          to resultString and returns resultString
-        """
-        args = self.parse_args( [ "TESTNAME" ], **nameStatement )
+        '''
+        args = self.parse_args(["TESTNAME"],**nameStatement)
 
         resultString = ''
-        resultString = "main.case(\"" + args["TESTNAME"] + "\")"
+        resultString = "main.case(\"" + args["TESTNAME"]  + "\")"
         # convert the statement here
         return resultString
 
-    def translate_case_block( self, **caseBlock ):
-        """
+
+    def translate_case_block(self,**caseBlock):
+        '''
          This method will translate the case block in test script .
          It returns the translated equivalent python code for test script
-        """
-        args = self.parse_args( [ "CASENUMBER" ], **caseBlock )
+        '''
+        args = self.parse_args(["CASENUMBER"],**caseBlock)
         resultString = ""
-        resultString = "def CASE" + str( args[ "CASENUMBER" ] ) + "(self,main) :\n"
+        resultString = "def CASE" + str(args["CASENUMBER"]) + "(self,main) :\n"
         # process the caseBlock List translate all statements underlying the given case
         return resultString
 
-    def translate_loop_block( self, *loopBlock ):
-        """
+
+
+    def translate_loop_block(self,*loopBlock):
+        '''
          This method will translate for loop block into its equivalent python code.
          Whole loop block will be passed into loopBlock List.
          It returns the transalted reuslt as a string.
-        """
+        '''
         resultString = ''
         # process the loopBlock List translate all statements underlying the given loop block
         return resultString
 
-    def translate_conjuction( self, conjuctionStatement ):
-        """
+
+    def translate_conjuction(self,conjuctionStatement):
+        '''
          This will translate the AND conjuction statements into python equivalent
          to resultString and returns resultString
-        """
+        '''
         resultString = ''
         # convert the statement here
         return resultString
 
-    def parse_args( self, args, **kwargs ):
-        """
-        It will accept the ( key,value ) pair and will return the ( key,value ) pairs with keys in uppercase.
-        """
+
+    def parse_args(self,args, **kwargs):
+        '''
+        It will accept the (key,value) pair and will return the (key,value) pairs with keys in uppercase.
+        '''
         newArgs = {}
-        for key, value in kwargs.iteritems():
-            # currentKey =  str.upper( key )
-            if isinstance( args, list ) and str.upper( key ) in args:
+        for key,value in kwargs.iteritems():
+            #currentKey =  str.upper(key)
+            if isinstance(args,list) and str.upper(key) in args:
                 for each in args:
-                    if each == str.upper( key ):
-                        newArgs[ str( each ) ] = value
-                    elif each != str.upper( key ) and not ( ( str( each ) in newArgs ) ):
-                        newArgs[ str( each ) ] = None
+                    if each==str.upper(key):
+                        newArgs [str(each)] = value
+                    elif each != str.upper(key) and (newArgs.has_key(str(each)) == False ):
+                        newArgs[str(each)] = None
+
+
 
         return newArgs
diff --git a/TestON/core/teston.py b/TestON/core/teston.py
index d6ebc89..89315dc 100644
--- a/TestON/core/teston.py
+++ b/TestON/core/teston.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
-"""
+'''
 Created on 22-Oct-2012
-Copyright 2012 Open Networking Foundation ( ONF )
+Copyright 2012 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -10,7 +10,7 @@
     TestON is free software: you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
     the Free Software Foundation, either version 2 of the License, or
-    ( at your option ) any later version.
+    (at your option) any later version.
 
     TestON is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -24,7 +24,8 @@
 
 teston is the main module.
 
-"""
+'''
+
 import sys
 import getpass
 import os
@@ -54,24 +55,21 @@
 from core.utilities import Utilities
 from core.Thread import Thread
 
-
 class SkipCase( Exception ):
     pass
 
-
 class TestON:
-
-    """
+    '''
     TestON will initiate the specified test.
     The main tasks are:
     * Initiate the required Component handles for the test.
     * Create Log file  Handles.
-    """
+    '''
     def __init__( self, options ):
-        """
+        '''
            Initialise the component handles specified in the topology file of
            the specified test.
-        """
+        '''
         # Initialization of the variables.
         __builtin__.main = self
         __builtin__.path = path
@@ -112,19 +110,19 @@
         verifyOptions( options )
         load_logger()
         self.componentDictionary = {}
-        self.componentDictionary = self.topology[ 'COMPONENT' ]
+        self.componentDictionary = self.topology['COMPONENT']
         self.driversList = []
-        if isinstance( self.componentDictionary, str ):
+        if isinstance( self.componentDictionary, str):
             self.componentDictionary = dict( self.componentDictionary )
 
         for component in self.componentDictionary:
-            self.driversList.append( self.componentDictionary[ component ][ 'type' ] )
+            self.driversList.append( self.componentDictionary[component]['type'] )
 
         self.driversList = list( set( self.driversList ) )  # Removing duplicates.
         # Checking the test_target option set for the component or not
         if isinstance( self.componentDictionary, dict ):
             for component in self.componentDictionary.keys():
-                if 'test_target' in self.componentDictionary[ component ].keys():
+                if 'test_target' in self.componentDictionary[component].keys():
                     self.test_target = component
 
         # Checking for the openspeak file and test script
@@ -138,20 +136,20 @@
         components_connect_order = {}
         if isinstance( self.componentDictionary, dict ):
             for component in self.componentDictionary.keys():
-                if 'connect_order' not in self.componentDictionary[ component ].keys():
-                    self.componentDictionary[ component ][ 'connect_order' ] = str( self.get_random() )
-                components_connect_order[ component ] = eval( self.componentDictionary[ component ][ 'connect_order' ] )
+                if 'connect_order' not in self.componentDictionary[component].keys():
+                    self.componentDictionary[component]['connect_order'] = str( self.get_random() )
+                components_connect_order[component] = eval( self.componentDictionary[component]['connect_order'] )
             # Ordering components based on the connect order.
             ordered_component_list = sorted( components_connect_order,
-                                             key=lambda key: components_connect_order[ key ] )
+                                             key=lambda key: components_connect_order[key] )
             print ordered_component_list
             for component in ordered_component_list:
                 self.componentInit( component )
 
     def configparser( self ):
-        """
-        It will parse the config file ( teston.cfg ) and return as dictionary
-        """
+        '''
+        It will parse the config file (teston.cfg) and return as dictionary
+        '''
         matchFileName = re.match( r'(.*)\.cfg', self.configFile, re.M | re.I )
         if matchFileName:
             xml = open( self.configFile ).read()
@@ -164,57 +162,57 @@
             print "There is no such file to parse " + self.configFile
 
     def componentInit( self, component ):
-        """
+        '''
         This method will initialize specified component
-        """
+        '''
         global driver_options
         self.initiated = False
         self.log.info( "Creating component Handle: " + component )
         driver_options = {}
-        if 'COMPONENTS' in self.componentDictionary[ component ].keys():
-            driver_options = dict( self.componentDictionary[ component ][ 'COMPONENTS' ] )
-        driver_options[ 'name' ] = component
-        driverName = self.componentDictionary[ component ][ 'type' ]
-        driver_options[ 'type' ] = driverName
+        if 'COMPONENTS' in self.componentDictionary[component].keys():
+            driver_options = dict( self.componentDictionary[component]['COMPONENTS'] )
+        driver_options['name'] = component
+        driverName = self.componentDictionary[component]['type']
+        driver_options['type'] = driverName
 
         classPath = self.getDriverPath( driverName.lower() )
         driverModule = importlib.import_module( classPath )
         driverClass = getattr( driverModule, driverName )
         driverObject = driverClass()
 
-        if "OCN" in self.componentDictionary[ component ][ 'host' ] and\
+        if "OCN" in self.componentDictionary[component]['host'] and\
            main.onoscell:
-            self.componentDictionary[ component ][ 'host' ] = main.mnIP
+            self.componentDictionary[component]['host'] = main.mnIP
 
-        user_name = self.componentDictionary[ component ].get( 'user',
-                                                               getpass.getuser() )
-        ip_address = self.componentDictionary[ component ].get( 'host',
-                                                                'localhost' )
-        pwd = self.componentDictionary[ component ].get( 'password',
-                                                         'changeme' )
-        port = self.componentDictionary[ component ].get( 'port' )
+        user_name = self.componentDictionary[component].get( 'user',
+                                                             getpass.getuser() )
+        ip_address = self.componentDictionary[component].get( 'host',
+                                                              'localhost' )
+        pwd = self.componentDictionary[component].get( 'password',
+                                                       'changeme' )
+        port = self.componentDictionary[component].get( 'port' )
         connect_result = driverObject.connect( user_name=user_name,
                                                ip_address=ip_address,
                                                pwd=pwd,
                                                port=port,
-                                               options=driver_options )
+                                               options=driver_options)
 
         if not connect_result:
             self.log.error( "Exiting from the test execution because connecting to the " +
                             component + " component failed." )
             self.exit()
 
-        vars( self )[ component ] = driverObject
+        vars( self )[component] = driverObject
         self.initiated = True
         return driverObject
 
     def run( self ):
-        """
+        '''
            The Execution of the test script's cases listed in the Test params
            file will be done here then update each test case result.
            This method will return main.TRUE if it executed all the test cases
            successfully, else will retun main.FALSE
-        """
+        '''
         self.testCaseResult = {}
         self.TOTAL_TC = 0
         self.TOTAL_TC_RUN = 0
@@ -272,12 +270,12 @@
         self.CASERESULT = self.NORESULT
         stopped = False
         try:
-            self.code[ self.testCaseNumber ]
+            self.code[self.testCaseNumber]
         except KeyError:
             self.log.error( "There is no Test-Case " + self.testCaseNumber )
             return self.FALSE
         self.stepCount = 0
-        while self.stepCount < len( self.code[ self.testCaseNumber ].keys() ):
+        while self.stepCount < len( self.code[self.testCaseNumber].keys() ):
             result = self.runStep( self.code, self.testCaseNumber )
             if result == self.FALSE:
                 break
@@ -301,7 +299,7 @@
                 self.CASERESULT = self.TRUE
             else:
                 self.CASERESULT = self.NORESULT
-            self.testCaseResult[ str( self.CurrentTestCaseNumber ) ] = self.CASERESULT
+            self.testCaseResult[str( self.CurrentTestCaseNumber )] = self.CASERESULT
             self.organizeResult( self.CurrentTestCaseNumber, self.CASERESULT )
             self.logger.updateCaseResults( self )
             self.log.wiki( "<p>" + self.caseExplanation + "</p>" )
@@ -352,7 +350,7 @@
                 # NOTE: This is needed to catch results of main.step()'s
                 #       called inside functions or loops
                 self.stepResults = ( [], [], [], [] )
-                exec code[ testCaseNumber ][ step ] in module.__dict__
+                exec code[testCaseNumber][step] in module.__dict__
                 self.stepCount = self.stepCount + 1
                 self.parseStepResults( testCaseNumber )
             except SkipCase:  # Raised in self.skipCase()
@@ -363,9 +361,9 @@
                 self.stepCache += "\t\t" + self.onFailMsg + "\n"
                 self.stepCount = self.stepCount + 1
                 return self.FALSE
-            except Exception as e:
+            except StandardError as e:
                 try:
-                    stepNo = self.stepResults[ 0 ][ self.stepNumber - 1 ]
+                    stepNo = self.stepResults[0][ self.stepNumber - 1 ]
                 except IndexError:
                     stepNo = "<IndexError>"
                     main.log.warn( "Error trying to get step number. " +
@@ -373,7 +371,7 @@
                                    str( self.stepNumber ) + " and step " +
                                    str( self.stepNumber + 1 ) )
                 try:
-                    stepName = self.stepResults[ 1 ][ self.stepNumber - 1 ]
+                    stepName = self.stepResults[1][ self.stepNumber - 1 ]
                 except IndexError:
                     stepName = "<IndexError>"
                 self.log.error( "\nException in the following section of" +
@@ -403,7 +401,7 @@
         if cli.stop:
             cli.stop = False
             self.TOTAL_TC_NORESULT = self.TOTAL_TC_NORESULT + 1
-            self.testCaseResult[ str( self.CurrentTestCaseNumber ) ] = "Stopped"
+            self.testCaseResult[str( self.CurrentTestCaseNumber )] = "Stopped"
             self.logger.updateCaseResults( self )
             result = self.cleanup()
             return self.FALSE
@@ -414,12 +412,12 @@
         """
         try:
             # Iterate through each of the steps and print them
-            for index in range( len( self.stepResults[ 0 ] ) ):
+            for index in range( len( self.stepResults[0] ) ):
                 # stepResults = ( stepNo, stepName, stepResult, onFail )
-                stepNo = self.stepResults[ 0 ][ index ]
-                stepName = self.stepResults[ 1 ][ index ]
-                stepResult = self.stepResults[ 2 ][ index ]
-                onFail = self.stepResults[ 3 ][ index ]
+                stepNo = self.stepResults[0][ index ]
+                stepName = self.stepResults[1][ index ]
+                stepResult = self.stepResults[2][ index ]
+                onFail = self.stepResults[3][ index ]
                 self.stepCache += "\t" + str( testCaseNumber ) + "."
                 self.stepCache += str( stepNo ) + " "
                 self.stepCache += stepName + " - "
@@ -462,10 +460,10 @@
         caseHeader = "\n" + "*" * 40 + "\nStart of Test Case" +\
                      str( self.CurrentTestCaseNumber ) + " : "
         for driver in self.componentDictionary.keys():
-            vars( self )[ driver + 'log' ].info( caseHeader )
+            vars( self )[driver + 'log'].info( caseHeader )
 
     def addCaseFooter( self ):
-        stepNo = self.stepResults[ 0 ][ -2 ]
+        stepNo = self.stepResults[0][-2]
         if stepNo > 0:
             previousStep = " " + str( self.CurrentTestCaseNumber ) + "." +\
                            str( stepNo ) + ": " + str( self.stepName )
@@ -476,10 +474,10 @@
                      str( self.CurrentTestCaseNumber ) + "\n" + "*" * 40 + "\n"
 
         for driver in self.driversList:
-            vars( self )[ driver ].write( stepHeader + "\n" + caseFooter )
+            vars( self )[driver].write( stepHeader + "\n" + caseFooter )
 
     def cleanup( self ):
-        """
+        '''
         Print a summary of the current test's results then attempt to release
         all the component handles and the close opened file handles.
 
@@ -488,7 +486,7 @@
 
         This will return TRUE if all the component handles and log handles
         closed properly, else return FALSE.
-        """
+        '''
         result = self.TRUE
         lock = self.cleanupLock
         if lock.acquire( False ):
@@ -499,12 +497,12 @@
                         self.logger.testSummary( self )
                     components = self.componentDictionary
                     for component in sorted( components,
-                                             key=lambda item: components[ item ][ 'connect_order' ],
+                                             key=lambda item: components[item]['connect_order'],
                                              reverse=True ):
                         try:
-                            tempObject = vars( self )[ component ]
+                            tempObject = vars( self )[component]
                             print "Disconnecting from " + str( tempObject.name ) +\
-                                  ": " + str( tempObject.__class__ )
+                                  ": " + str( tempObject.__class__)
                             tempObject.disconnect()
                         except KeyboardInterrupt:
                             pass
@@ -512,14 +510,14 @@
                             # Component not created yet
                             self.log.warn( "Could not find the component " +
                                            str( component ) )
-                        except Exception:
+                        except StandardError:
                             self.log.exception( "Exception while disconnecting from " +
                                                  str( component ) )
                             result = self.FALSE
                     # Closing all the driver's session files
                     for driver in self.componentDictionary.keys():
                         try:
-                            vars( self )[ driver ].close_log_handles()
+                            vars( self )[driver].close_log_handles()
                         except KeyboardInterrupt:
                             pass
                         except KeyError:
@@ -527,7 +525,7 @@
                             self.log.warn( "Could not find the component " +
                                            str( driver ) + " while trying to" +
                                            " close log file" )
-                        except Exception:
+                        except StandardError:
                             self.log.exception( "Exception while closing log files for " +
                                                  str( driver ) )
                             result = self.FALSE
@@ -545,41 +543,41 @@
         return result
 
     def pause( self ):
-        """
+        '''
         This function will pause the test's execution, and will continue after
         user provide 'resume' command.
-        """
+        '''
         __builtin__.testthread.pause()
 
     def onfail( self, *components ):
-        """
+        '''
         When test step failed, calling all the components onfail.
-        """
+        '''
         if not components:
             try:
                 for component in self.componentDictionary.keys():
-                    tempObject = vars( self )[ component ]
+                    tempObject = vars( self )[component]
                     result = tempObject.onfail()
-            except Exception as e:
+            except StandardError as e:
                 print str( e )
                 result = self.FALSE
         else:
             try:
                 for component in components:
-                    tempObject = vars( self )[ component ]
+                    tempObject = vars( self )[component]
                     result = tempObject.onfail()
-            except Exception as e:
+            except StandardError as e:
                 print str( e )
                 result = self.FALSE
 
     def getDriverPath( self, driverName ):
-        """
+        '''
            Based on the component 'type' specified in the params , this method
            will find the absolute path, by recursively searching the name of
            the component.
 
            NOTE: This function requires the linux 'find' command.
-        """
+        '''
         import commands
 
         cmd = "find " + drivers_path + " -name " + driverName + ".py"
@@ -604,66 +602,66 @@
         return result
 
     def step( self, stepDesc ):
-        """
+        '''
            The step information of the test-case will append to the logs.
-        """
+        '''
         previousStep = " " + str( self.CurrentTestCaseNumber ) + "." +\
                        str( self.stepNumber ) + ": " + str( self.stepName )
         self.stepName = stepDesc
         self.stepNumber += 1
-        self.stepResults[ 0 ].append( self.stepNumber )
-        self.stepResults[ 1 ].append( stepDesc )
-        self.stepResults[ 2 ].append( self.NORESULT )
-        self.stepResults[ 3 ].append( "No on fail message given" )
+        self.stepResults[0].append( self.stepNumber )
+        self.stepResults[1].append( stepDesc )
+        self.stepResults[2].append( self.NORESULT )
+        self.stepResults[3].append( "No on fail message given" )
 
         stepName = " " + str( self.CurrentTestCaseNumber ) + "." +\
                    str( self.stepNumber ) + ": " + str( stepDesc )
-        self.log.step( stepName )
+        self.log.step(stepName)
         stepHeader = ""
         line = "\n" + "-" * 45 + "\n"
         if self.stepNumber > 1:
             stepHeader = line + "End of Step " + previousStep + line
         stepHeader += line + "Start of Step" + stepName + line
         for driver in self.componentDictionary.keys():
-            vars( self )[ driver + 'log' ].info( stepHeader )
+            vars( self )[driver + 'log'].info( stepHeader )
 
     def case( self, testCaseName ):
-        """
+        '''
            Test's each test-case information will append to the logs.
-        """
+        '''
         self.CurrentTestCase = testCaseName
         testCaseName = " " + str( testCaseName )
         self.log.case( testCaseName )
         caseHeader = testCaseName + "\n" + "*" * 40 + "\n"
         for driver in self.componentDictionary.keys():
-            vars( self )[ driver + 'log' ].info( caseHeader )
+            vars( self )[driver + 'log'].info( caseHeader )
 
     def testDesc( self, description ):
-        """
+        '''
            Test description will append to the logs.
-        """
+        '''
         description = "Test Description : " + str( description )
         self.log.info( description )
 
     def _getTest( self ):
-        """
+        '''
         This method will parse the test script to find required test
         information.
-        """
+        '''
         testFileHandler = open( main.testFile, 'r' )
         testFileList = testFileHandler.readlines()
         testFileHandler.close()
         counter = 0
         for index in range( len( testFileList ) ):
             lineMatch = re.match( '\s+def CASE(\d+)(.*):',
-                                  testFileList[ index ],
+                                  testFileList[index],
                                   0 )
             if lineMatch:
                 counter = counter + 1
                 self.TC_PLANNED = len( self.testcases_list )
 
     def response_parser( self, response, return_format ):
-        " It will load the default response parser "
+        ''' It will load the default response parser '''
         response_dict = {}
         response_dict = self.response_to_dict( response, return_format )
         return_format_string = self.dict_to_return_format( response,
@@ -680,14 +678,14 @@
             self.log.info( "Response is in 'JSON' format, converting to '" +
                            return_format + "' format" )
             # Formatting the json string
-            response = re.sub( r"{\s*'?(\w)", r'{ "\1', response )
+            response = re.sub( r"{\s*'?(\w)", r'{"\1', response )
             response = re.sub( r",\s*'?(\w)", r',"\1', response )
             response = re.sub( r"(\w)'?\s*:", r'\1":', response )
             response = re.sub( r":\s*'(\w)'\s*([,}])", r':"\1"\2', response )
             try:
                 import json
                 response_dict = json.loads( response )
-            except Exception:
+            except StandardError:
                 self.log.exception( "Json Parser is unable to parse the string" )
             return response_dict
         elif ini_match:
@@ -706,21 +704,21 @@
                 response_dict = xmldict.xml_to_dict( "<response> " +
                                                      str( response ) +
                                                      " </response>" )
-            except Exception:
+            except StandardError:
                 self.log.exception()
             return response_dict
 
     def dict_to_return_format( self, response, return_format, response_dict ):
         if return_format == 'table':
-            " Will return in table format"
+            ''' Will return in table format'''
             to_do = "Call the table output formatter"
             global response_table
             response_table = '\n'
             response_table = response_table + '\t'.join( response_dict ) + "\n"
 
             def get_table( value_to_convert ):
-                """ This will parse the dictionary recusrsively and print as
-                    table format"""
+                ''' This will parse the dictionary recusrsively and print as
+                    table format'''
                 table_data = ""
                 if isinstance( value_to_convert, dict ):
                     table_data = table_data + '\t'.join( value_to_convert ) +\
@@ -736,7 +734,7 @@
             return response_table
 
         elif return_format == 'config':
-            " Will return in config format"
+            ''' Will return in config format'''
             to_do = 'Call dict to config coverter'
             response_string = str( response_dict )
             print response_string
@@ -747,12 +745,12 @@
             response_config = re.sub( ":", " =", response_config )
             return "[response]\n\t " + response_config
         elif return_format == 'xml':
-            " Will return in xml format"
+            ''' Will return in xml format'''
             response_xml = xmldict.dict_to_xml( response_dict )
             response_xml = re.sub( ">\s*<", ">\n<", response_xml )
             return "\n" + response_xml
         elif return_format == 'json':
-            " Will return in json format"
+            ''' Will return in json format'''
             to_do = 'Call dict to xml coverter'
             import json
             response_json = json.dumps( response_dict )
@@ -817,10 +815,10 @@
 
 
 def verifyOptions( options ):
-    """
+    '''
     This will verify the command line options and set to default values,
     if any option not given in command line.
-    """
+    '''
     verifyTest( options )
     verifyExample( options )
     verifyTestScript( options )
@@ -830,7 +828,6 @@
     verifyTestCases( options )
     verifyOnosCell( options )
 
-
 def verifyTest( options ):
     try:
         if options.testname:
@@ -845,14 +842,12 @@
         print "Test or Example not specified please specify the --test <test name > or --example <example name>"
         main.exit()
 
-
 def verifyExample( options ):
     if options.example:
         main.testDir = path + '/examples/'
         main.tests_path = path + "/examples/"
         main.classPath = "examples." + main.TEST + "." + main.TEST
 
-
 def verifyLogdir( options ):
     # Verifying Log directory option
     if options.logdir:
@@ -860,22 +855,20 @@
     else:
         main.logdir = main.FALSE
 
-
 def verifyMail( options ):
     # Mail-To: field
     if options.mail:  # Test run specific
         main.mail = options.mail
-    elif main.params.get( 'mail' ):  # Test suite specific
+    elif main.params.get('mail'):  # Test suite specific
         main.mail = main.params.get( 'mail' )
     else:  # TestON specific
-        main.mail = main.config[ 'config' ].get( 'mail_to' )
+        main.mail = main.config['config'].get( 'mail_to' )
     # Mail-From: field
-    main.sender = main.config[ 'config' ].get( 'mail_from' )
+    main.sender = main.config['config'].get( 'mail_from' )
     # Mail smtp server
-    main.smtp = main.config[ 'config' ].get( 'mail_server' )
+    main.smtp = main.config['config'].get( 'mail_server' )
     # Mail-From account password
-    main.senderPwd = main.config[ 'config' ].get( 'mail_pass' )
-
+    main.senderPwd = main.config['config'].get( 'mail_pass' )
 
 def evalTestCase( tempList ):
     tList = []
@@ -883,10 +876,9 @@
         if isinstance( tcase, list ):
             tList.extend( evalTestCase( tcase ) )
         else:
-            tList.extend( [ tcase ] )
+            tList.extend( [tcase] )
     return tList
 
-
 def verifyTestCases( options ):
     # Getting Test cases list
     if options.testcases:
@@ -896,14 +888,13 @@
         main.testcases_list = eval( testcases_list + "," )
     else:
         if 'testcases' in main.params.keys():
-            temp = eval( main.params[ 'testcases' ] + "," )
+            temp = eval( main.params['testcases'] + "," )
             main.testcases_list = evalTestCase( list( temp ) )
         else:
             print "Testcases not specifed in params, please provide in " +\
                   "params file or 'testcases' commandline argument"
             sys.exit()
 
-
 def verifyOnosCell( options ):
     # Verifying onoscell option
     if options.onoscell:
@@ -911,41 +902,40 @@
         main.ONOSip = []
         main.mnIP = ""
         cellCMD = ". ~/onos/tools/dev/bash_profile; cell " + main.onoscell
-        output = subprocess.check_output( [ "bash", '-c', cellCMD ] )
+        output = subprocess.check_output( ["bash", '-c', cellCMD] )
         splitOutput = output.splitlines()
         main.apps = ""
         for i in range( len( splitOutput ) ):
-            if re.match( "OCN", splitOutput[ i ] ):
-                mnNode = splitOutput[ i ].split( "=" )
-                main.mnIP = mnNode[ 1 ]
+            if re.match( "OCN", splitOutput[i] ):
+                mnNode = splitOutput[i].split( "=" )
+                main.mnIP = mnNode[1]
             # cell already sorts OC variables in bash, so no need to
             # sort in TestON
-            elif re.match( "OC[1-9]", splitOutput[ i ] ):
-                onosNodes = splitOutput[ i ].split( "=" )
-                main.ONOSip.append( onosNodes[ 1 ] )
-            elif re.match( "ONOS_APPS", splitOutput[ i ] ):
-                main.apps = ( splitOutput[ i ].split( "=" ) )[ 1 ]
+            elif re.match( "OC[1-9]", splitOutput[i] ):
+                onosNodes = splitOutput[i].split( "=" )
+                main.ONOSip.append( onosNodes[1] )
+            elif re.match( "ONOS_APPS", splitOutput[i] ):
+                main.apps = ( splitOutput[i].split( "=" ) )[1]
     else:
         main.onoscell = main.FALSE
 
-
 def verifyTestScript( options ):
-    """
+    '''
     Verifyies test script.
-    """
+    '''
     main.openspeak = openspeak.OpenSpeak()
     directory = main.testDir + "/" + main.TEST
     if os.path.exists( directory ):
         pass
     else:
         directory = ""
-        for root, dirs, files in os.walk( main.testDir, topdown=True ):
+        for root, dirs, files in os.walk( main.testDir, topdown=True):
             if not directory:
                 for name in dirs:
                     if name == main.TEST:
                         directory = ( os.path.join( root, name ) )
                         index = directory.find( "/tests/" ) + 1
-                        main.classPath = directory[ index: ].replace( '/', '.' ) + "." + main.TEST
+                        main.classPath = directory[index:].replace( '/', '.' ) + "." + main.TEST
                         break
     openspeakfile = directory + "/" + main.TEST + ".ospk"
     main.testFile = directory + "/" + main.TEST + ".py"
@@ -965,7 +955,7 @@
         testModule = __import__( main.classPath,
                                  globals(),
                                  locals(),
-                                 [ main.TEST ],
+                                 [main.TEST],
                                  -1 )
     except ImportError:
         print "There was an import error, it might mean that there is " +\
@@ -978,22 +968,21 @@
     main.params = main.parser.parseParams( main.classPath )
     main.topology = main.parser.parseTopology( main.classPath )
 
-
 def verifyParams( options ):
     try:
-        main.params = main.params[ 'PARAMS' ]
+        main.params = main.params['PARAMS']
     except KeyError:
         print "Error with the params file: Either the file not specified " +\
               "or the format is not correct"
         main.exit()
     try:
-        main.topology = main.topology[ 'TOPOLOGY' ]
+        main.topology = main.topology['TOPOLOGY']
     except KeyError:
         print "Error with the Topology file: Either the file not specified " +\
               "or the format is not correct"
         main.exit()
     # Overwrite existing params variables if they are specified from command line
-    if len( options.params ) > 0:
+    if len(options.params) > 0:
         # Some params variables are specified from command line
         for param in options.params:
             if not re.search( ".=.", param ):
@@ -1008,7 +997,7 @@
             # Get the innermost dictionary
             try:
                 while len( keyList ) > 1:
-                    key = keyList.pop( 0 )
+                    key = keyList.pop(0)
                     assert isinstance( paramDict[ key ], dict )
                     paramDict = paramDict[ key ]
             except KeyError:
@@ -1018,39 +1007,38 @@
                 print( "Error when parsing params: \"" + key + "\" is already the innermost level in main.params" )
                 main.exit()
             # Change the value
-            if keyList[ 0 ] not in paramDict:
+            if not paramDict.has_key( keyList[0] ):
                 print( "Error when parsing params: key \"" + keyList[0] + "\" not found in main.params" )
                 main.exit()
-            elif isinstance( paramDict[ keyList[ 0 ] ], dict ):
+            elif isinstance( paramDict[ keyList[0] ], dict ):
                 print( "Error when parsing params: more levels under key \"" + keyList[0] + "\" in main.params" )
                 main.exit()
             else:
-                paramDict[ keyList[ 0 ] ] = value
-
+                paramDict[ keyList[0] ] = value
 
 def load_parser():
-    """
+    '''
     It facilitates the loading customised parser for topology and params file.
     It loads parser mentioned in tab named parser of teston.cfg file.
     It also loads default xmlparser if no parser have specified in teston.cfg
     file.
 
-    """
+    '''
     confighash = main.configDict
-    if 'file' in confighash[ 'config' ][ 'parser' ] and\
-       'class' in confighash[ 'config' ][ 'parser' ]:
-        path = confighash[ 'config' ][ 'parser' ][ 'file' ]
+    if 'file' in confighash['config']['parser'] and\
+       'class' in confighash['config']['parser']:
+        path = confighash['config']['parser']['file']
         if path is not None or\
-           confighash[ 'config' ][ 'parser' ][ 'class' ] is not None:
+           confighash['config']['parser']['class'] is not None:
             try:
                 module = re.sub( r".py\s*$", "", path )
-                moduleList = module.split( "/" )
-                newModule = ".".join( moduleList[ -2: ] )
-                parsingClass = confighash[ 'config' ][ 'parser' ][ 'class' ]
+                moduleList = module.split("/")
+                newModule = ".".join( moduleList[-2:] )
+                parsingClass = confighash['config']['parser']['class']
                 parsingModule = __import__( newModule,
                                             globals(),
                                             locals(),
-                                            [ parsingClass ],
+                                            [parsingClass],
                                             -1 )
                 parsingClass = getattr( parsingModule, parsingClass )
                 main.parser = parsingClass()
@@ -1065,26 +1053,25 @@
                 print "Could not find the file " + path +\
                       " using default parser."
                 load_defaultParser()
-        elif confighash[ 'config' ][ 'parser' ][ 'file' ] is None or\
-             confighash[ 'config' ][ 'parser' ][ 'class' ] is None:
+        elif confighash['config']['parser']['file'] is None or\
+             confighash['config']['parser']['class'] is None:
             load_defaultParser()
     else:
         load_defaultParser()
 
-
 def load_defaultParser():
-    """
+    '''
     It will load the default parser which is xml parser to parse the params and
     topology file.
-    """
-    moduleList = main.parserPath.split( "/" )
-    newModule = ".".join( moduleList[ -2: ] )
+    '''
+    moduleList = main.parserPath.split("/")
+    newModule = ".".join( moduleList[-2:] )
     try:
         parsingClass = main.parsingClass
         parsingModule = __import__( newModule,
                                     globals(),
                                     locals(),
-                                    [ parsingClass ],
+                                    [parsingClass],
                                     -1 )
         parsingClass = getattr( parsingModule, parsingClass )
         main.parser = parsingClass()
@@ -1095,31 +1082,30 @@
         else:
             main.exit()
     except ImportError:
-        print sys.exc_info()[ 1 ]
-
+        print sys.exc_info()[1]
 
 def load_logger():
-    """
+    '''
     It facilitates the loading customised parser for topology and params file.
     It loads parser mentioned in tab named parser of teston.cfg file.
     It also loads default xmlparser if no parser have specified in teston.cfg
     file.
-    """
+    '''
     confighash = main.configDict
-    if 'file' in confighash[ 'config' ][ 'logger' ] and\
-       'class' in confighash[ 'config' ][ 'logger' ]:
-        path = confighash[ 'config' ][ 'logger' ][ 'file' ]
+    if 'file' in confighash['config']['logger'] and\
+       'class' in confighash['config']['logger']:
+        path = confighash['config']['logger']['file']
         if path is not None or\
-           confighash[ 'config' ][ 'logger' ][ 'class' ] is not None:
+           confighash['config']['logger']['class'] is not None:
             try:
                 module = re.sub( r".py\s*$", "", path )
                 moduleList = module.split( "/" )
-                newModule = ".".join( moduleList[ -2: ] )
-                loggerClass = confighash[ 'config' ][ 'logger' ][ 'class' ]
+                newModule = ".".join( moduleList[-2:] )
+                loggerClass = confighash['config']['logger']['class']
                 loggerModule = __import__( newModule,
                                            globals(),
                                            locals(),
-                                           [ loggerClass ],
+                                           [loggerClass],
                                            -1 )
                 loggerClass = getattr( loggerModule, loggerClass )
                 main.logger = loggerClass()
@@ -1127,34 +1113,32 @@
                 print "Could not find the file " + path +\
                       " using default logger."
                 load_defaultlogger()
-        elif confighash[ 'config' ][ 'parser' ][ 'file' ] is None or\
-             confighash[ 'config' ][ 'parser' ][ 'class' ] is None:
+        elif confighash['config']['parser']['file'] is None or\
+             confighash['config']['parser']['class'] is None:
             load_defaultlogger()
     else:
         load_defaultlogger()
 
-
 def load_defaultlogger():
-    """
+    '''
     It will load the default parser which is xml parser to parse the params and
     topology file.
-    """
-    moduleList = main.loggerPath.split( "/" )
-    newModule = ".".join( moduleList[ -2: ] )
+    '''
+    moduleList = main.loggerPath.split("/")
+    newModule = ".".join( moduleList[-2:] )
     try:
         loggerClass = main.loggerClass
         loggerModule = __import__( newModule,
                                    globals(),
                                    locals(),
-                                   [ loggerClass ],
+                                   [loggerClass],
                                    -1 )
         loggerClass = getattr( loggerModule, loggerClass )
         main.logger = loggerClass()
 
     except ImportError:
-        print sys.exc_info()[ 1 ]
+        print sys.exc_info()[1]
         main.exit()
 
-
 def _echo( self ):
     print "THIS IS ECHO"
diff --git a/TestON/core/testparser.py b/TestON/core/testparser.py
index 08e4198..904ebc0 100644
--- a/TestON/core/testparser.py
+++ b/TestON/core/testparser.py
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-"""
+'''
 Created on 26-Dec-2012
 Modified 2015 by ON.Lab
 
@@ -10,7 +10,7 @@
     TestON is free software: you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
     the Free Software Foundation, either version 2 of the License, or
-    ( at your option ) any later version.
+    (at your option) any later version.
 
     TestON is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,19 +21,16 @@
     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
 
 
-"""
+'''
 import re
 import sys
-
-
 class TestParser:
-
-    def __init__( self, testFile ):
-        try:
-            testFileHandler = open( testFile, 'r' )
+    def __init__(self,testFile):
+        try :
+            testFileHandler = open(testFile, 'r')
         except IOError:
-            print "No such file " + testFile
-            sys.exit( 0 )
+            print "No such file "+testFile
+            sys.exit(0)
 
         testFileList = testFileHandler.readlines()
         self.testscript = testFileList
@@ -42,89 +39,89 @@
         self.statementsList = []
         index = 0
         self.statementsList = []
-        # initialSpaces = len( line ) -len( line.lstrip() )
-        while index < len( testFileList ):
-            testFileList[ index ] = re.sub( "^\s{8}|^\s{4}", "", testFileList[ index ] )
+        #initialSpaces = len(line) -len(line.lstrip())
+        while index < len(testFileList):
+            testFileList[index] = re.sub("^\s{8}|^\s{4}", "", testFileList[index])
             # Skip multiline comments
-            if re.match( '^(\'\'\' )|^( \"\"\" )', testFileList[ index ], 0 ):
+            if re.match('^(\'\'\')|^(\"\"\")',testFileList[index],0) :
                 index = index + 1
-                try:
-                    while not re.match( '^\s*(\'\'\' )|^\s*( \"\"\" )', testFileList[ index ], 0 ):
+                try :
+                    while not re.match('^\s*(\'\'\')|^\s*(\"\"\")',testFileList[index],0) :
                         index = index + 1
                 except IndexError:
                     print ''
 
             # skip empty lines and single line comments
-            elif not re.match( '#|^\s*$', testFileList[ index ], 0 ):
-                self.statementsList.append( testFileList[ index ] )
+            elif not re.match('#|^\s*$',testFileList[index],0):
+                self.statementsList.append(testFileList[index])
             index = index + 1
 
-    def case_code( self ):
+    def case_code(self):
         index = 0
         statementsList = self.statementsList
-        while index < len( statementsList ):
-            m = re.match( 'def\s+CASE(\d+)', statementsList[ index ], 0 )
+        while index < len(statementsList):
+            m= re.match('def\s+CASE(\d+)',statementsList[index],0)
             self.caseBlock = []
             if m:
                 index = index + 1
-                try:
-                    while not re.match( '\s*def\s+CASE(\d+)', statementsList[ index ], 0 ):
-                        self.caseBlock.append( statementsList[ index ] )
-                        if index < len( statementsList ) - 1:
+                try :
+                    while not re.match('\s*def\s+CASE(\d+)',statementsList[index],0) :
+                        self.caseBlock.append(statementsList[index])
+                        if index < len(statementsList)-1:
                             index = index + 1
-                        else:
+                        else :
                             break
                     index = index - 1
                 except IndexError:
                     print ''
-                self.caseCode[ str( m.group( 1 ) ) ] = self.caseBlock
+                self.caseCode [str(m.group(1))] = self.caseBlock
             index = index + 1
         return self.caseCode
 
-    def step_code( self, caseStatements ):
+    def step_code(self,caseStatements):
         index = 0
         step = 0
         stepCode = {}
         step_flag = False
-        while index < len( caseStatements ):
-            m = re.match( 'main\.step', caseStatements[ index ], 0 )
+        while index < len(caseStatements):
+            m= re.match('main\.step',caseStatements[index],0)
             stepBlock = ''
             if m:
                 step_flag = True
-                if step == 0:
+                if step == 0 :
                     i = 0
                     block = ''
-                    while i < index:
-                        block += caseStatements[ i ]
+                    while i < index :
+                        block += caseStatements[i]
                         i = i + 1
-                    stepCode[ step ] = block
+                    stepCode[step] = block
                     step = step + 1
-                stepBlock = stepBlock + caseStatements[ index ]
+                stepBlock = stepBlock + caseStatements[index]
                 index = index + 1
-                try:
-                    while not re.match( 'main\.step', caseStatements[ index ], 0 ):
-                        stepBlock = stepBlock + caseStatements[ index ]
-                        if index < len( caseStatements ) - 1:
+                try :
+                    while not re.match('main\.step',caseStatements[index],0) :
+                        stepBlock = stepBlock + caseStatements[index]
+                        if index < len(caseStatements)-1:
                             index = index + 1
-                        else:
+                        else :
                             break
                     index = index - 1
                 except IndexError:
                     print ''
-                stepCode[ step ] = stepBlock
+                stepCode[step] = stepBlock
                 step = step + 1
             index = index + 1
         # If there is no step defined !!
-        if not step_flag:
-            stepCode[ step ] = "".join( caseStatements )
+        if not step_flag :
+            stepCode[step] = "".join(caseStatements)
         return stepCode
 
-    def getStepCode( self ):
+    def getStepCode(self):
         case_step_code = {}
         case_block = self.case_code()
-        for case in case_block:
-            case_step_code[ case ] = {}
-            step_block = self.step_code( case_block[ case ] )
-            for step in step_block:
-                case_step_code[ case ][ step ] = step_block[ step ]
+        for case in case_block :
+            case_step_code[case] = {}
+            step_block = self.step_code(case_block[case])
+            for step in step_block :
+                case_step_code[case][step] = step_block[step]
         return case_step_code
diff --git a/TestON/core/utilities.py b/TestON/core/utilities.py
index fee2899..2f7e5bb 100644
--- a/TestON/core/utilities.py
+++ b/TestON/core/utilities.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
-"""
+'''
 Created on 23-Oct-2012
-Copyright 2012 Open Networking Foundation ( ONF )
+Copyright 2012 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -10,7 +10,7 @@
     TestON is free software: you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
     the Free Software Foundation, either version 2 of the License, or
-    ( at your option ) any later version.
+    (at your option) any later version.
 
     TestON is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -26,7 +26,7 @@
    * parse_args for key-value pair handling
    * Parsing the params or topology file.
 
-"""
+'''
 import re
 from configobj import ConfigObj
 from core import ast as ast
@@ -38,57 +38,56 @@
 import time
 import random
 
-
 class Utilities:
-
-    """
+    '''
        Utilities will take care about the basic functions like :
        * Extended assertion,
        * parse_args for key-value pair handling
        * Parsing the params or topology file.
-    """
-    def __init__( self ):
-        self.wrapped = sys.modules[ __name__ ]
+    '''
 
-    def __getattr__( self, name ):
-        """
+    def __init__(self):
+        self.wrapped = sys.modules[__name__]
+
+    def __getattr__(self, name):
+        '''
         This will invoke, if the attribute wasn't found the usual ways.
         Here it will look for assert_attribute and will execute when AttributeError occurs.
         It will return the result of the assert_attribute.
-        """
+        '''
         try:
-            return getattr( self.wrapped, name )
+            return getattr(self.wrapped, name)
         except AttributeError:
-            def assertHandling( **kwargs ):
-                nameVar = re.match( "^assert", name, flags=0 )
-                matchVar = re.match( "assert(_not_|_)(equals|matches|greater|lesser)", name, flags=0 )
+            def assertHandling(**kwargs):
+                nameVar = re.match("^assert",name,flags=0)
+                matchVar = re.match("assert(_not_|_)(equals|matches|greater|lesser)",name,flags=0)
                 notVar = 0
                 operators = ""
 
-                try:
-                    if matchVar.group( 1 ) == "_not_" and matchVar.group( 2 ):
+                try :
+                    if matchVar.group(1) == "_not_" and matchVar.group(2) :
                         notVar = 1
-                        operators = matchVar.group( 2 )
-                    elif matchVar.group( 1 ) == "_" and matchVar.group( 2 ):
-                        operators = matchVar.group( 2 )
+                        operators = matchVar.group(2)
+                    elif matchVar.group(1) == "_" and matchVar.group(2):
+                        operators = matchVar.group(2)
                 except AttributeError:
-                    if matchVar is None and nameVar:
-                        operators = 'equals'
-                result = self._assert( NOT=notVar, operator=operators, **kwargs )
+                    if matchVar==None and nameVar:
+                        operators ='equals'
+                result = self._assert(NOT=notVar,operator=operators,**kwargs)
                 if result == main.TRUE:
-                    main.log.info( "Assertion Passed" )
+                    main.log.info("Assertion Passed")
                     main.STEPRESULT = main.TRUE
                 elif result == main.FALSE:
-                    main.log.warn( "Assertion Failed" )
+                    main.log.warn("Assertion Failed")
                     main.STEPRESULT = main.FALSE
                 else:
-                    main.log.error( "There is an Error in Assertion" )
+                    main.log.error("There is an Error in Assertion")
                     main.STEPRESULT = main.ERROR
                 return result
             return assertHandling
 
-    def _assert( self, **assertParam ):
-        """
+    def _assert (self,**assertParam):
+        '''
         It will take the arguments :
         expect:'Expected output'
         actual:'Actual output'
@@ -99,29 +98,30 @@
 
         It will return the assertion result.
 
-        """
-        arguments = self.parse_args( [ "EXPECT", "ACTUAL", "ONPASS", "ONFAIL", "NOT", "OPERATOR" ], **assertParam )
+        '''
+
+        arguments = self.parse_args(["EXPECT","ACTUAL","ONPASS","ONFAIL","NOT","OPERATOR"],**assertParam)
 
         result = 0
         valuetype = ''
-        operation = "not " + str( arguments[ "OPERATOR" ] ) if arguments[ 'NOT' ] and arguments[ 'NOT' ] == 1 else arguments[ "OPERATOR" ]
-        operators = { 'equals': { 'STR': '==', 'NUM': '==' }, 'matches': '=~', 'greater': '>', 'lesser': '<' }
+        operation = "not "+ str(arguments["OPERATOR"]) if arguments['NOT'] and arguments['NOT'] == 1 else arguments["OPERATOR"]
+        operators = {'equals':{'STR':'==','NUM':'=='}, 'matches' : '=~', 'greater':'>' ,'lesser':'<'}
 
-        expectMatch = re.match( '^\s*[+-]?0(e0)?\s*$', str( arguments[ "EXPECT" ] ), re.I + re.M )
-        if not ( ( not expectMatch ) and ( arguments[ "EXPECT" ] == 0 ) ):
+        expectMatch = re.match('^\s*[+-]?0(e0)?\s*$', str(arguments["EXPECT"]), re.I+re.M)
+        if not ((not expectMatch) and (arguments["EXPECT"]==0)):
             valuetype = 'NUM'
-        else:
-            if arguments[ "OPERATOR" ] == 'greater' or arguments[ "OPERATOR" ] == 'lesser':
-                main.log.error( "Numeric comparison on strings is not possibele" )
+        else :
+            if arguments["OPERATOR"] == 'greater' or arguments["OPERATOR"] == 'lesser':
+                main.log.error("Numeric comparison on strings is not possibele")
                 return main.ERROR
 
         valuetype = 'STR'
-        arguments[ "ACTUAL" ] = str( arguments[ "ACTUAL" ] )
-        if arguments[ "OPERATOR" ] != 'matches':
-            arguments[ "EXPECT" ] = str( arguments[ "EXPECT" ] )
+        arguments["ACTUAL"] = str(arguments["ACTUAL"])
+        if arguments["OPERATOR"] != 'matches':
+            arguments["EXPECT"] = str(arguments["EXPECT"])
 
-        try:
-            opcode = operators[ str( arguments[ "OPERATOR" ] ) ][ valuetype ] if arguments[ "OPERATOR" ] == 'equals' else operators[ str( arguments[ "OPERATOR" ] ) ]
+        try :
+            opcode = operators[str(arguments["OPERATOR"])][valuetype] if arguments["OPERATOR"] == 'equals' else operators[str(arguments["OPERATOR"])]
 
         except KeyError as e:
             print "Key Error in assertion"
@@ -130,111 +130,111 @@
 
         if opcode == '=~':
             try:
-                assert re.search( str( arguments[ "EXPECT" ] ), str( arguments[ "ACTUAL" ] ) )
+                assert re.search(str(arguments["EXPECT"]),str(arguments["ACTUAL"]))
                 result = main.TRUE
             except AssertionError:
-                try:
-                    assert re.match( str( arguments[ "EXPECT" ] ), str( arguments[ "ACTUAL" ] ) )
+                try :
+                    assert re.match(str(arguments["EXPECT"]),str(arguments["ACTUAL"]))
                     result = main.TRUE
                 except AssertionError:
-                    main.log.error( "Assertion Failed" )
+                    main.log.error("Assertion Failed")
                     result = main.FALSE
-        else:
+        else :
             try:
-                if str( opcode ) == "==":
-                    main.log.info( "Verifying the Expected is equal to the actual or not using assert_equal" )
-                    if ( arguments[ "EXPECT" ] == arguments[ "ACTUAL" ] ):
+                if str(opcode)=="==":
+                    main.log.info("Verifying the Expected is equal to the actual or not using assert_equal")
+                    if (arguments["EXPECT"] == arguments["ACTUAL"]):
                         result = main.TRUE
-                    else:
+                    else :
                         result = main.FALSE
-                elif str( opcode ) == ">":
-                    main.log.info( "Verifying the Expected is Greater than the actual or not using assert_greater" )
-                    if ( ast.literal_eval( arguments[ "EXPECT" ] ) > ast.literal_eval( arguments[ "ACTUAL" ] ) ):
+                elif str(opcode) == ">":
+                    main.log.info("Verifying the Expected is Greater than the actual or not using assert_greater")
+                    if (ast.literal_eval(arguments["EXPECT"]) > ast.literal_eval(arguments["ACTUAL"])) :
                         result = main.TRUE
-                    else:
+                    else :
                         result = main.FALSE
-                elif str( opcode ) == "<":
-                    main.log.info( "Verifying the Expected is Lesser than the actual or not using assert_lesser" )
-                    if ( ast.literal_eval( arguments[ "EXPECT" ] ) < ast.literal_eval( arguments[ "ACTUAL" ] ) ):
+                elif str(opcode) == "<":
+                    main.log.info("Verifying the Expected is Lesser than the actual or not using assert_lesser")
+                    if (ast.literal_eval(arguments["EXPECT"]) < ast.literal_eval(arguments["ACTUAL"])):
                         result = main.TRUE
-                    else:
+                    else :
                         result = main.FALSE
             except AssertionError:
-                main.log.error( "Assertion Failed" )
+                main.log.error("Assertion Failed")
                 result = main.FALSE
         result = result if result else 0
-        result = not result if arguments[ "NOT" ] and arguments[ "NOT" ] == 1 else result
+        result = not result if arguments["NOT"] and arguments["NOT"] == 1 else result
         resultString = ""
-        if result:
-            resultString = str( resultString ) + "PASS"
-            main.log.info( arguments[ "ONPASS" ] )
-        else:
-            resultString = str( resultString ) + "FAIL"
-            if not isinstance( arguments[ "ONFAIL" ], str ):
-                eval( str( arguments[ "ONFAIL" ] ) )
-            else:
-                main.log.error( arguments[ "ONFAIL" ] )
-                main.log.report( arguments[ "ONFAIL" ] )
+        if result :
+            resultString = str(resultString) + "PASS"
+            main.log.info(arguments["ONPASS"])
+        else :
+            resultString = str(resultString) + "FAIL"
+            if not isinstance(arguments["ONFAIL"],str):
+                eval(str(arguments["ONFAIL"]))
+            else :
+                main.log.error(arguments["ONFAIL"])
+                main.log.report(arguments["ONFAIL"])
                 main.onFailMsg = arguments[ 'ONFAIL' ]
 
-        msg = arguments[ "ON" + str( resultString ) ]
+        msg = arguments["ON" + str(resultString)]
 
-        if not isinstance( msg, str ):
+        if not isinstance(msg,str):
             try:
-                eval( str( msg ) )
+                eval(str(msg))
             except SyntaxError as e:
                 print "function definition is not right"
                 print e
 
         main.last_result = result
-        if main.stepResults[ 2 ]:
-            main.stepResults[ 2 ][ -1 ] = result
+        if main.stepResults[2]:
+            main.stepResults[2][-1] = result
             try:
-                main.stepResults[ 3 ][ -1 ] = arguments[ 'ONFAIL' ]
+                main.stepResults[3][-1] = arguments[ 'ONFAIL' ]
             except AttributeError:
                 pass
         else:
             main.log.warn( "Assertion called before a test step" )
         return result
 
-    def parse_args( self, args, **kwargs ):
-        """
-        It will accept the ( key,value ) pair and will return the ( key,value ) pairs with keys in uppercase.
-        """
+    def parse_args(self,args, **kwargs):
+        '''
+        It will accept the (key,value) pair and will return the (key,value) pairs with keys in uppercase.
+        '''
         newArgs = {}
-        for key, value in kwargs.iteritems():
-            if isinstance( args, list ) and str.upper( key ) in args:
+        for key,value in kwargs.iteritems():
+            if isinstance(args,list) and str.upper(key) in args:
                 for each in args:
-                    if each == str.upper( key ):
-                        newArgs[ str( each ) ] = value
-                    elif each != str.upper( key ) and not ( ( str( each ) in newArgs ) ):
-                        newArgs[ str( each ) ] = None
+                    if each==str.upper(key):
+                        newArgs [str(each)] = value
+                    elif each != str.upper(key) and (newArgs.has_key(str(each)) == False ):
+                        newArgs[str(each)] = None
 
         return newArgs
 
-    def send_mail( self ):
+    def send_mail(self):
         # Create a text/plain message
         msg = email.mime.Multipart.MIMEMultipart()
-        try:
+        try :
             if main.test_target:
                 sub = "Result summary of \"" + main.TEST + "\" run on component \"" +\
                       main.test_target + "\" Version \"" +\
-                      vars( main )[ main.test_target ].get_version() + "\": " +\
+                      vars( main )[main.test_target].get_version() + "\": " +\
                       str( main.TOTAL_TC_SUCCESS ) + "% Passed"
-            else:
+            else :
                 sub = "Result summary of \"" + main.TEST + "\": " +\
                       str( main.TOTAL_TC_SUCCESS ) + "% Passed"
         except ( KeyError, AttributeError ):
             sub = "Result summary of \"" + main.TEST + "\": " +\
                   str( main.TOTAL_TC_SUCCESS ) + "% Passed"
 
-        msg[ 'Subject' ] = sub
-        msg[ 'From' ] = main.sender
-        msg[ 'To' ] = main.mail
+        msg['Subject'] = sub
+        msg['From'] = main.sender
+        msg['To'] = main.mail
 
         # The main body is just another attachment
         body = email.mime.Text.MIMEText( main.logHeader + "\n" +
-                                         main.testResult )
+                                         main.testResult)
         msg.attach( body )
 
         # Attachments
@@ -252,7 +252,7 @@
             smtp = smtplib.SMTP( main.smtp )
             smtp.starttls()
             smtp.login( main.sender, main.senderPwd )
-            smtp.sendmail( msg[ 'From' ], [ msg[ 'To' ] ], msg.as_string() )
+            smtp.sendmail( msg['From'], [msg['To']], msg.as_string() )
             smtp.quit()
         except Exception:
             main.log.exception( "Error sending email" )
@@ -265,32 +265,32 @@
             # Create a text/plain message
             msg = email.mime.Multipart.MIMEMultipart()
 
-            msg[ 'Subject' ] = subject
-            msg[ 'From' ] = main.sender
-            msg[ 'To' ] = main.mail
+            msg['Subject'] = subject
+            msg['From'] = main.sender
+            msg['To'] = main.mail
 
             smtp = smtplib.SMTP( main.smtp )
             smtp.starttls()
             smtp.login( main.sender, main.senderPwd )
-            smtp.sendmail( msg[ 'From' ], [ msg[ 'To' ] ], msg.as_string() )
+            smtp.sendmail( msg['From'], [msg['To']], msg.as_string() )
             smtp.quit()
         except Exception:
             main.log.exception( "" )
             return main.FALSE
         return main.TRUE
 
-    def parse( self, fileName ):
-        """
+    def parse(self,fileName):
+        '''
         This will parse the params or topo or cfg file and return content in the file as Dictionary
-        """
+        '''
         self.fileName = fileName
-        matchFileName = re.match( r'(.*)\.(cfg|params|topo)', self.fileName, re.M | re.I )
+        matchFileName = re.match(r'(.*)\.(cfg|params|topo)',self.fileName,re.M|re.I)
         if matchFileName:
-            try:
-                parsedInfo = ConfigObj( self.fileName )
+            try :
+                parsedInfo = ConfigObj(self.fileName)
                 return parsedInfo
-            except Exception:
-                print "There is no such file to parse " + fileName
+            except StandardError:
+                print "There is no such file to parse "+fileName
         else:
             return 0
 
@@ -303,7 +303,7 @@
 
         Arguments:
         f        - a callable object
-        retValue - Return value( s ) of f to retry on. This can be a list or an
+        retValue - Return value(s) of f to retry on. This can be a list or an
                    object.
         args     - A tuple containing the arguments of f.
         kwargs   - A dictionary containing the keyword arguments of f.
@@ -314,7 +314,7 @@
         random   - Boolean indicating if the wait time is random between 0
                    and sleep or exactly sleep seconds. Defaults to False.
         """
-        # TODO: be able to pass in a conditional statement( s ). For example:
+        # TODO: be able to pass in a conditional statement(s). For example:
         #      retCondition = "< 7"
         #      Then we do something like 'if eval( "ret " + retCondition ):break'
         try:
@@ -327,7 +327,7 @@
             for i in range( 0, attempts ):
                 ret = f( *args, **kwargs )
                 if ret not in retValue:
-                    # NOTE that False in [ 0 ] == True
+                # NOTE that False in [ 0 ] == True
                     break
                 if randomTime:
                     sleeptime = random.randint( 0, sleep )
@@ -350,4 +350,4 @@
 if __name__ != "__main__":
     import sys
 
-    sys.modules[ __name__ ] = Utilities()
+    sys.modules[__name__] = Utilities()
diff --git a/TestON/core/xmldict.py b/TestON/core/xmldict.py
index a85c9ad..808b365 100644
--- a/TestON/core/xmldict.py
+++ b/TestON/core/xmldict.py
@@ -1,17 +1,17 @@
-"""
+'''
 Created on 03-Dec-2012
-Copyright 2012 Open Networking Foundation ( ONF )
+Copyright 2012 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
 or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
 
-author:: Anil Kumar ( anilkumar.s@paxterrasolutions.com )
+@author: Anil Kumar (anilkumar.s@paxterrasolutions.com)
 
     TestON is free software: you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
     the Free Software Foundation, either version 2 of the License, or
-    ( at your option ) any later version.
+    (at your option) any later version.
 
     TestON is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,7 +21,8 @@
     You should have received a copy of the GNU General Public License
     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
 
-"""
+'''
+
 """
     xmldict
     ~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -30,153 +31,141 @@
 """
 import datetime
 
-
-def xml_to_dict( root_or_str, strict=True ):
+def xml_to_dict(root_or_str, strict=True):
     """
     Converts `root_or_str` which can be parsed xml or a xml string to dict.
 
     """
     root = root_or_str
-    if isinstance( root, str ):
+    if isinstance(root, str):
         import xml.etree.cElementTree as ElementTree
-        root = ElementTree.XML( root_or_str )
-    try:
-        return { root.tag: _from_xml( root, strict ) }
-    except Exception:
+        root = ElementTree.XML(root_or_str)
+    try :
+        return {root.tag: _from_xml(root, strict)}
+    except StandardError:
         return None
 
-
-def dict_to_xml( dict_xml ):
+def dict_to_xml(dict_xml):
     """
     Converts `dict_xml` which is a python dict to corresponding xml.
     """
-    return _to_xml( dict_xml )
+    return _to_xml(dict_xml)
 
-
-def _to_xml( el ):
+def _to_xml(el):
     """
     Converts `el` to its xml representation.
     """
     val = None
-    if isinstance( el, dict ):
-        val = _dict_to_xml( el )
-    elif isinstance( el, bool ):
-        val = str( el ).lower()
+    if isinstance(el, dict):
+        val = _dict_to_xml(el)
+    elif isinstance(el, bool):
+        val = str(el).lower()
     else:
         val = el
-    if val is None:
-        val = 'null'
+    if val is None: val = 'null'
     return val
 
-
-def _extract_attrs( els ):
+def _extract_attrs(els):
     """
     Extracts attributes from dictionary `els`. Attributes are keys which start
     with '@'
     """
-    if not isinstance( els, dict ):
+    if not isinstance(els, dict):
         return ''
-    return ''.join( ' %s="%s"' % ( key[ 1: ], value ) for key, value in els.iteritems()
-                    if key.startswith( '@' ) )
+    return ''.join(' %s="%s"' % (key[1:], value) for key, value in els.iteritems()
+                   if key.startswith('@'))
 
-
-def _dict_to_xml( els ):
+def _dict_to_xml(els):
     """
     Converts `els` which is a python dict to corresponding xml.
     """
-    def process_content( tag, content ):
-        attrs = _extract_attrs( content )
-        text = isinstance( content, dict ) and content.get( '#text', '' ) or ''
-        return '<%s%s>%s%s</%s>' % ( tag, attrs, _to_xml( content ), text, tag )
+    def process_content(tag, content):
+        attrs = _extract_attrs(content)
+        text = isinstance(content, dict) and content.get('#text', '') or ''
+        return '<%s%s>%s%s</%s>' % (tag, attrs, _to_xml(content), text, tag)
 
     tags = []
     for tag, content in els.iteritems():
         # Text and attributes
-        if tag.startswith( '@' ) or tag == '#text':
+        if tag.startswith('@') or tag == '#text':
             continue
-        elif isinstance( content, list ):
+        elif isinstance(content, list):
             for el in content:
-                tags.append( process_content( tag, el ) )
-        elif isinstance( content, dict ):
-            tags.append( process_content( tag, content ) )
+                tags.append(process_content(tag, el))
+        elif isinstance(content, dict):
+            tags.append(process_content(tag, content))
         else:
-            tags.append( '<%s>%s</%s>' % ( tag, _to_xml( content ), tag ) )
-    return ''.join( tags )
+            tags.append('<%s>%s</%s>' % (tag, _to_xml(content), tag))
+    return ''.join(tags)
 
-
-def _is_xml_el_dict( el ):
+def _is_xml_el_dict(el):
     """
     Returns true if `el` is supposed to be a dict.
     This function makes sense only in the context of making dicts out of xml.
     """
-    if len( el ) == 1 or el[ 0 ].tag != el[ 1 ].tag:
+    if len(el) == 1  or el[0].tag != el[1].tag:
         return True
     return False
 
-
-def _is_xml_el_list( el ):
+def _is_xml_el_list(el):
     """
     Returns true if `el` is supposed to be a list.
     This function makes sense only in the context of making lists out of xml.
     """
-    if len( el ) > 1 and el[ 0 ].tag == el[ 1 ].tag:
+    if len(el) > 1 and el[0].tag == el[1].tag:
         return True
     return False
 
-
-def _str_to_datetime( date_str ):
+def _str_to_datetime(date_str):
     try:
-        val = datetime.datetime.strptime( date_str, "%Y-%m-%dT%H:%M:%SZ" )
+        val = datetime.datetime.strptime(date_str,  "%Y-%m-%dT%H:%M:%SZ")
     except ValueError:
         val = date_str
     return val
 
-
-def _str_to_boolean( bool_str ):
-    if bool_str.lower() != 'false' and bool( bool_str ):
+def _str_to_boolean(bool_str):
+    if bool_str.lower() != 'false' and bool(bool_str):
         return True
     return False
 
-
-def _from_xml( el, strict ):
+def _from_xml(el, strict):
     """
     Extracts value of xml element element `el`.
     """
     val = None
     # Parent node.
     if el:
-        if _is_xml_el_dict( el ):
-            val = _dict_from_xml( el, strict )
-        elif _is_xml_el_list( el ):
-            val = _list_from_xml( el, strict )
+        if _is_xml_el_dict(el):
+            val = _dict_from_xml(el, strict)
+        elif _is_xml_el_list(el):
+            val = _list_from_xml(el, strict)
     # Simple node.
     else:
         attribs = el.items()
         # An element with attributes.
         if attribs and strict:
-            val = dict( ( '@%s' % k, v ) for k, v in dict( attribs ).iteritems() )
+            val = dict(('@%s' % k, v) for k, v in dict(attribs).iteritems())
             if el.text:
-                converted = _val_and_maybe_convert( el )
-                val[ '#text' ] = el.text
+                converted = _val_and_maybe_convert(el)
+                val['#text'] = el.text
                 if converted != el.text:
-                    val[ '#value' ] = converted
+                    val['#value'] = converted
         elif el.text:
             # An element with no subelements but text.
-            val = _val_and_maybe_convert( el )
+            val = _val_and_maybe_convert(el)
         elif attribs:
-            val = dict( attribs )
+            val = dict(attribs)
     return val
 
-
-def _val_and_maybe_convert( el ):
+def _val_and_maybe_convert(el):
     """
     Converts `el.text` if `el` has attribute `type` with valid value.
     """
     text = el.text.strip()
-    data_type = el.get( 'type' )
-    convertor = _val_and_maybe_convert.convertors.get( data_type )
+    data_type = el.get('type')
+    convertor = _val_and_maybe_convert.convertors.get(data_type)
     if convertor:
-        return convertor( text )
+        return convertor(text)
     else:
         return text
 _val_and_maybe_convert.convertors = {
@@ -185,24 +174,23 @@
     'integer': int
 }
 
-
-def _list_from_xml( els, strict ):
+def _list_from_xml(els, strict):
     """
     Converts xml elements list `el_list` to a python list.
     """
+
     temp = {}
     for el in els:
-        tag = el.attrib[ "name" ]
-        temp[ tag ] = ( _from_xml( el, strict ) )
+        tag = el.attrib["name"]
+        temp[tag] = (_from_xml(el, strict))
     return temp
 
-
-def _dict_from_xml( els, strict ):
+def _dict_from_xml(els, strict):
     """
     Converts xml doc with root `root` to a python dict.
     """
     # An element with subelements.
     res = {}
     for el in els:
-        res[ el.tag ] = _from_xml( el, strict )
+        res[el.tag] = _from_xml(el, strict)
     return res
diff --git a/TestON/core/xmlparser.py b/TestON/core/xmlparser.py
index 181f646..a41ed92 100644
--- a/TestON/core/xmlparser.py
+++ b/TestON/core/xmlparser.py
@@ -1,18 +1,18 @@
-# /usr/bin/env python
-"""
+#/usr/bin/env python
+'''
 Created on 07-Jan-2013
-Copyright 2013 Open Networking Foundation ( ONF )
+Copyright 2013 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
 or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
 
-author:: Raghav Kashyap( raghavkashyap@paxterrasolutions.com )
+@author: Raghav Kashyap(raghavkashyap@paxterrasolutions.com)
 
     TestON is free software: you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
     the Free Software Foundation, either version 2 of the License, or
-    ( at your option ) any later version.
+    (at your option) any later version.
 
     TestON is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -23,49 +23,50 @@
     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
 
 
-"""
+'''
+
 import xmldict
 import re
 
+class xmlparser :
 
-class xmlparser:
-
-    def __init__( self ):
+    def __init__(self) :
         self.default = ''
 
-    def parse( self, fileName ):
-        """
+    def parse(self,fileName) :
+        '''
          This will parse the params or topo or cfg file and return content in the file as Dictionary
-        """
+        '''
         self.fileName = fileName
-        matchFileName = re.match( r'(.*)\.(params|topo|cfg)', self.fileName, re.M | re.I )
+        matchFileName = re.match(r'(.*)\.(params|topo|cfg)', self.fileName, re.M | re.I)
         if matchFileName:
-            xml = open( fileName ).read()
-            try:
-                parsedInfo = xmldict.xml_to_dict( xml )
+            xml = open(fileName).read()
+            try :
+                parsedInfo = xmldict.xml_to_dict(xml)
                 return parsedInfo
-            except Exception as e:
+            except StandardError as e:
                 print "Error parsing file " + fileName + ": " + e.message
-        else:
+        else :
             print "File name is not correct"
 
-    def parseParams( self, paramsPath ):
-        """
+    def parseParams(self,paramsPath):
+        '''
          It will take the params file path and will return the params dictionary
-        """
-        paramsPath = re.sub( "\.", "/", paramsPath )
-        paramsPath = re.sub( "tests|examples", "", paramsPath )
-        params = self.parse( main.tests_path + paramsPath + ".params" )
-        paramsAsString = str( params )
-        return eval( paramsAsString )
+        '''
+        paramsPath = re.sub("\.","/",paramsPath)
+        paramsPath = re.sub("tests|examples","",paramsPath)
+        params = self.parse(main.tests_path+paramsPath+".params")
+        paramsAsString = str(params)
+        return eval(paramsAsString)
 
-    def parseTopology( self, topologyPath ):
-        """
+    def parseTopology(self,topologyPath):
+        '''
           It will take topology file path and will return topology dictionary
-        """
-        topologyPath = re.sub( "\.", "/", topologyPath )
-        topologyPath = re.sub( "tests|examples", "", topologyPath )
-        # topology = self.parse( main.tests_path+"/"+topologyPath+".topo" )
-        topology = self.parse( main.tests_path + topologyPath + ".topo" )
-        topoAsString = str( topology )
-        return eval( topoAsString )
+        '''
+        topologyPath = re.sub("\.","/",topologyPath)
+        topologyPath = re.sub("tests|examples","",topologyPath)
+        #topology = self.parse(main.tests_path+"/"+topologyPath+".topo")
+        topology = self.parse(main.tests_path+topologyPath+".topo")
+        topoAsString = str(topology)
+        return eval(topoAsString)
+
diff --git a/TestON/drivers/common/api/controller/onosrestdriver.py b/TestON/drivers/common/api/controller/onosrestdriver.py
old mode 100644
new mode 100755
index b5d2062..312f1f1
--- a/TestON/drivers/common/api/controller/onosrestdriver.py
+++ b/TestON/drivers/common/api/controller/onosrestdriver.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 """
 Created on 07-08-2015
-Copyright 2015 Open Networking Foundation ( ONF )
+Copyright 2015 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -48,7 +48,7 @@
         except Exception as e:
             main.log.exception( e )
         try:
-            if os.getenv( str( self.ip_address ) ) is not None:
+            if os.getenv( str( self.ip_address ) ) != None:
                 self.ip_address = os.getenv( str( self.ip_address ) )
             else:
                 main.log.info( self.name + ": ip set to " + self.ip_address )
@@ -74,12 +74,12 @@
             if isinstance( jsonObject, str ):
                 jsonObject = json.loads( jsonObject )
             return json.dumps( jsonObject, sort_keys=True,
-                               indent=4, separators=( ',', ': ' ) )
+                               indent=4, separators=(',', ': '))
         except ( TypeError, ValueError ):
             main.log.exception( "Error parsing jsonObject" )
             return None
 
-    def send( self, url, ip="DEFAULT", port="DEFAULT", base="/onos/v1", method="GET",
+    def send( self, url, ip = "DEFAULT", port = "DEFAULT", base="/onos/v1", method="GET",
               query=None, data=None, debug=False ):
         """
         Arguments:
@@ -94,25 +94,25 @@
                          the request
             dict data: Dictionary to be sent in the body of the request
         """
-        # TODO: Authentication - simple http ( user,pass ) tuple
+        # TODO: Authentication - simple http (user,pass) tuple
         # TODO: should we maybe just pass kwargs straight to response?
         # TODO: Do we need to allow for other protocols besides http?
         # ANSWER: Not yet, but potentially https with certificates
         if ip == "DEFAULT":
-            main.log.warn( "No ip given, reverting to ip from topo file" )
-            ip = self.ip_address
+                main.log.warn( "No ip given, reverting to ip from topo file" )
+                ip = self.ip_address
         if port == "DEFAULT":
-            main.log.warn( "No port given, reverting to port " +
-                           "from topo file" )
-            port = self.port
+                main.log.warn( "No port given, reverting to port " +
+                               "from topo file" )
+                port = self.port
 
         try:
             path = "http://" + str( ip ) + ":" + str( port ) + base + url
             if self.user_name and self.pwd:
-                main.log.info( "user/passwd is: " + self.user_name + "/" + self.pwd )
-                auth = ( self.user_name, self.pwd )
+                main.log.info("user/passwd is: " + self.user_name + "/" + self.pwd)
+                auth = (self.user_name, self.pwd)
             else:
-                auth = None
+                auth=None
             main.log.info( "Sending request " + path + " using " +
                            method.upper() + " method." )
             response = requests.request( method.upper(),
@@ -148,7 +148,7 @@
                 main.log.warn( "No port given, reverting to port " +
                                "from topo file" )
                 port = self.port
-            response = self.send( url="/intents", ip=ip, port=port )
+            response = self.send( url="/intents", ip = ip, port = port )
             if response:
                 if 200 <= response[ 0 ] <= 299:
                     output = response[ 1 ]
@@ -195,7 +195,7 @@
                 port = self.port
             # NOTE: REST url requires the intent id to be in decimal form
             query = "/" + str( appId ) + "/" + str( intentId )
-            response = self.send( url="/intents" + query, ip=ip, port=port )
+            response = self.send( url="/intents" + query, ip = ip, port = port )
             if response:
                 if 200 <= response[ 0 ] <= 299:
                     output = response[ 1 ]
@@ -229,7 +229,7 @@
                 main.log.warn( "No port given, reverting to port " +
                                "from topo file" )
                 port = self.port
-            response = self.send( url="/applications", ip=ip, port=port )
+            response = self.send( url="/applications", ip = ip, port = port )
             if response:
                 if 200 <= response[ 0 ] <= 299:
                     output = response[ 1 ]
@@ -273,7 +273,7 @@
             query = "/" + str( appName ) + "/active"
             response = self.send( method="POST",
                                   url="/applications" + query,
-                                  ip=ip, port=port )
+                                  ip = ip, port = port)
             if response:
                 output = response[ 1 ]
                 app = json.loads( output )
@@ -329,7 +329,7 @@
             query = "/" + str( appName ) + "/active"
             self.send( method="DELETE",
                        url="/applications" + query,
-                       ip=ip, port=port )
+                       ip = ip, port = port )
             response = self.getApp( appName, ip, port )
             if response:
                 output = response[ 1 ]
@@ -383,7 +383,7 @@
                 port = self.port
             query = "/" + str( appName )
             response = self.send( url="/applications" + query,
-                                  ip=ip, port=port )
+                                  ip = ip, port = port )
             if response:
                 if 200 <= response[ 0 ] <= 299:
                     return response
@@ -414,17 +414,17 @@
             error on requests; Returns None for exceptions
         """
         try:
-            intentJson = { "two": str( hostIdTwo ),
-                           "selector": { "criteria": [] }, "priority": 7,
-                           "treatment": { "deferred": [], "instructions": [] },
-                           "appId": appId, "one": str( hostIdOne ),
-                           "type": "HostToHostIntent",
-                           "constraints": [ { "type": "LinkTypeConstraint",
-                                              "types": [ "OPTICAL" ],
-                                              "inclusive": 'false' } ] }
+            intentJson = {"two": str( hostIdTwo ),
+                          "selector": {"criteria": []}, "priority": 7,
+                          "treatment": {"deferred": [], "instructions": []},
+                          "appId": appId, "one": str( hostIdOne ),
+                          "type": "HostToHostIntent",
+                          "constraints": [{"type": "LinkTypeConstraint",
+                                           "types": ["OPTICAL"],
+                                           "inclusive": 'false' }]}
             if vlanId:
-                intentJson[ 'selector' ][ 'criteria' ].append( { "type": "VLAN_VID",
-                                                                 "vlanId": vlanId } )
+                intentJson[ 'selector' ][ 'criteria' ].append( { "type":"VLAN_VID",
+                                                                 "vlanId":vlanId } )
             output = None
             if ip == "DEFAULT":
                 main.log.warn( "No ip given, reverting to ip from topo file" )
@@ -434,7 +434,7 @@
                                "from topo file" )
                 port = self.port
             response = self.send( method="POST",
-                                  url="/intents", ip=ip, port=port,
+                                  url="/intents", ip = ip, port = port,
                                   data=json.dumps( intentJson ) )
             if response:
                 if "201" in str( response[ 0 ] ):
@@ -485,9 +485,9 @@
             * ethType: specify ethType
             * ethSrc: specify ethSrc ( i.e. src mac addr )
             * ethDst: specify ethDst ( i.e. dst mac addr )
-            * bandwidth: specify bandwidth capacity of link ( TODO )
+            * bandwidth: specify bandwidth capacity of link (TODO)
             * lambdaAlloc: if True, intent will allocate lambda
-              for the specified intent ( TODO )
+              for the specified intent (TODO)
             * ipProto: specify ip protocol
             * ipSrc: specify ip source address with mask eg. ip#/24
             * ipDst: specify ip destination address eg. ip#/24
@@ -520,63 +520,63 @@
                     main.log.debug( self.name + ": Egress port not specified" )
                     return main.FALSE
 
-            intentJson = { "ingressPoint": { "device": ingressDevice,
-                                             "port": ingressPort },
-                           "selector": { "criteria": [] },
-                           "priority": 55,
-                           "treatment": { "deferred": [],
-                                          "instructions": [] },
-                           "egressPoint": { "device": egressDevice,
-                                            "port": egressPort },
-                           "appId": appId,
-                           "type": "PointToPointIntent",
-                           "constraints": [ { "type": "LinkTypeConstraint",
-                                              "types": [ "OPTICAL" ],
-                                              "inclusive": "false" } ] }
+            intentJson ={ "ingressPoint": { "device": ingressDevice,
+                                           "port": ingressPort },
+                          "selector": { "criteria": [] },
+                          "priority": 55,
+                          "treatment": { "deferred": [],
+                                         "instructions": [] },
+                          "egressPoint": { "device": egressDevice,
+                                           "port": egressPort },
+                          "appId": appId,
+                          "type": "PointToPointIntent",
+                          "constraints": [ { "type": "LinkTypeConstraint",
+                                             "types": [ "OPTICAL" ],
+                                             "inclusive": "false" } ] }
 
             # if protected:
-            #     intentJson[ 'constraints' ].append( { "type": "Protection", "types": [ "Protection" ], "inclusive": "true" } )
+            #     intentJson['constraints'].append( { "type": "Protection", "types": ["Protection"], "inclusive": "true" } )
 
             if ethType == "IPV4":
                 intentJson[ 'selector' ][ 'criteria' ].append( {
-                                                         "type": "ETH_TYPE",
-                                                         "ethType": 2048 } )
+                                                         "type":"ETH_TYPE",
+                                                         "ethType":2048 } )
             elif ethType:
                 intentJson[ 'selector' ][ 'criteria' ].append( {
-                                                         "type": "ETH_TYPE",
-                                                         "ethType": ethType } )
+                                                         "type":"ETH_TYPE",
+                                                         "ethType":ethType } )
 
             if ethSrc:
                 intentJson[ 'selector' ][ 'criteria' ].append(
-                                                       { "type": "ETH_SRC",
-                                                         "mac": ethSrc } )
+                                                       { "type":"ETH_SRC",
+                                                         "mac":ethSrc } )
             if ethDst:
                 intentJson[ 'selector' ][ 'criteria' ].append(
-                                                       { "type": "ETH_DST",
-                                                         "mac": ethDst } )
+                                                       { "type":"ETH_DST",
+                                                         "mac":ethDst } )
             if ipSrc:
                 intentJson[ 'selector' ][ 'criteria' ].append(
-                                                       { "type": "IPV4_SRC",
-                                                         "ip": ipSrc } )
+                                                       { "type":"IPV4_SRC",
+                                                         "ip":ipSrc } )
             if ipDst:
                 intentJson[ 'selector' ][ 'criteria' ].append(
-                                                       { "type": "IPV4_DST",
-                                                         "ip": ipDst } )
+                                                       { "type":"IPV4_DST",
+                                                         "ip":ipDst } )
             if tcpSrc:
                 intentJson[ 'selector' ][ 'criteria' ].append(
-                                                       { "type": "TCP_SRC",
+                                                       { "type":"TCP_SRC",
                                                          "tcpPort": tcpSrc } )
             if tcpDst:
                 intentJson[ 'selector' ][ 'criteria' ].append(
-                                                       { "type": "TCP_DST",
+                                                       { "type":"TCP_DST",
                                                          "tcpPort": tcpDst } )
             if ipProto:
                 intentJson[ 'selector' ][ 'criteria' ].append(
-                                                       { "type": "IP_PROTO",
+                                                       { "type":"IP_PROTO",
                                                          "protocol": ipProto } )
             if vlanId:
                 intentJson[ 'selector' ][ 'criteria' ].append(
-                                                       { "type": "VLAN_VID",
+                                                       { "type":"VLAN_VID",
                                                          "vlanId": vlanId } )
 
             # TODO: Bandwidth and Lambda will be implemented if needed
@@ -590,7 +590,7 @@
                                "from topo file" )
                 port = self.port
             response = self.send( method="POST",
-                                  url="/intents", ip=ip, port=port,
+                                  url="/intents", ip = ip, port = port,
                                   data=json.dumps( intentJson ) )
 
             main.log.debug( intentJson )
@@ -613,26 +613,26 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    def addSinglepointToMultipointIntent( self,
-                                          ingressDevice,
-                                          egressDeviceList,
-                                          portEgressList,
-                                          appId='org.onosproject.cli',
-                                          portIngress="",
-                                          ethType="",
-                                          ethSrc="",
-                                          ethDst="",
-                                          bandwidth="",
-                                          lambdaAlloc=False,
-                                          ipProto="",
-                                          ipSrc="",
-                                          ipDst="",
-                                          tcpSrc="",
-                                          tcpDst="",
-                                          partial=False,
-                                          ip="DEFAULT",
-                                          port="DEFAULT",
-                                          vlanId="" ):
+    def addSinglepointToMultipointIntent(self,
+                       ingressDevice,
+                       egressDeviceList,
+                       portEgressList,
+                       appId='org.onosproject.cli',
+                       portIngress="",
+                       ethType="",
+                       ethSrc="",
+                       ethDst="",
+                       bandwidth="",
+                       lambdaAlloc=False,
+                       ipProto="",
+                       ipSrc="",
+                       ipDst="",
+                       tcpSrc="",
+                       tcpDst="",
+                       partial=False,
+                       ip="DEFAULT",
+                       port="DEFAULT",
+                       vlanId="" ):
         """
         Description:
             Adds a point-to-multi point intent ( uni-directional ) by
@@ -647,9 +647,9 @@
             * ethType: specify ethType
             * ethSrc: specify ethSrc ( i.e. src mac addr )
             * ethDst: specify ethDst ( i.e. dst mac addr )
-            * bandwidth: specify bandwidth capacity of link ( TODO )
+            * bandwidth: specify bandwidth capacity of link (TODO)
             * lambdaAlloc: if True, intent will allocate lambda
-              for the specified intent ( TODO )
+              for the specified intent (TODO)
             * ipProto: specify ip protocol
             * ipSrc: specify ip source address with mask eg. ip#/24
             * ipDst: specify ip destination address eg. ip#/24
@@ -694,7 +694,7 @@
                            "appId": appId,
                            "type": "SinglePointToMultiPointIntent",
                            "constraints": [ { "type": "LinkTypeConstraint",
-                                              "types": [ "OPTICAL" ],
+                                              "types": ["OPTICAL"],
                                               "inclusive": "false" } ] }
 
             index = 0
@@ -752,16 +752,16 @@
                                "from topo file" )
                 port = self.port
             response = self.send( method="POST",
-                                  url="/intents", ip=ip, port=port,
-                                  data=json.dumps( intentJson ) )
+                                 url="/intents", ip=ip, port=port,
+                                 data=json.dumps( intentJson ) )
 
-            main.log.debug( intentJson )
+            main.log.debug(intentJson)
 
             if response:
                 if "201" in str( response[ 0 ] ):
                     main.log.info( self.name + ": Successfully POST point" +
                                    " intent between ingress: " + ingressDevice +
-                                   " and egress: " + str( egressDeviceList ) + " devices" )
+                                   " and egress: " + str(egressDeviceList) + " devices" )
                     return main.TRUE
                 else:
                     main.log.error( "Error with REST request, response was: " + str( response ) )
@@ -795,7 +795,7 @@
             # NOTE: REST url requires the intent id to be in decimal form
             query = "/" + str( appId ) + "/" + str( int( intentId, 16 ) )
             response = self.send( method="DELETE",
-                                  url="/intents" + query, ip=ip, port=port )
+                                  url="/intents" + query, ip = ip, port = port )
             if response:
                 if 200 <= response[ 0 ] <= 299:
                     return main.TRUE
@@ -834,7 +834,7 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    def removeAllIntents( self, intentIdList='ALL', appId='org.onosproject.cli',
+    def removeAllIntents( self, intentIdList ='ALL',appId='org.onosproject.cli',
                           ip="DEFAULT", port="DEFAULT", delay=5 ):
         """
         Description:
@@ -864,7 +864,7 @@
                 import time
                 time.sleep( delay )
                 intentRemain = len( json.loads( self.intents() ) )
-                if all( result == main.TRUE for result in results ) and \
+                if all( result==main.TRUE for result in results ) and \
                    intentRemain == 0:
                     main.log.info( self.name + ": All intents are removed " )
                     return main.TRUE
@@ -900,7 +900,7 @@
                 main.log.warn( "No port given, reverting to port " +
                                "from topo file" )
                 port = self.port
-            response = self.send( url="/hosts", ip=ip, port=port )
+            response = self.send( url="/hosts", ip = ip, port = port )
             if response:
                 if 200 <= response[ 0 ] <= 299:
                     output = response[ 1 ]
@@ -945,12 +945,12 @@
                                "from topo file" )
                 port = self.port
             query = "/" + mac + "/" + vlan
-            response = self.send( url="/hosts" + query, ip=ip, port=port )
+            response = self.send( url="/hosts" + query, ip = ip, port = port )
             if response:
-                # NOTE: What if the person wants other values? would it be better
-                # to have a function that gets a key and return a value instead?
-                # This function requires mac and vlan and returns an ID which
-                # makes this current function useless
+            # NOTE: What if the person wants other values? would it be better
+            # to have a function that gets a key and return a value instead?
+            # This function requires mac and vlan and returns an ID which
+            # makes this current function useless
                 if 200 <= response[ 0 ] <= 299:
                     output = response[ 1 ]
                     hostId = json.loads( output ).get( 'id' )
@@ -983,7 +983,7 @@
                 main.log.warn( "No port given, reverting to port " +
                                "from topo file" )
                 port = self.port
-            response = self.send( url="/topology", ip=ip, port=port )
+            response = self.send( url="/topology", ip = ip, port = port )
             if response:
                 if 200 <= response[ 0 ] <= 299:
                     output = response[ 1 ]
@@ -1019,7 +1019,7 @@
                 main.log.warn( "No port given, reverting to port " +
                                "from topo file" )
                 port = self.port
-            response = self.send( url="/devices", ip=ip, port=port )
+            response = self.send( url="/devices", ip = ip, port = port )
             if response:
                 if 200 <= response[ 0 ] <= 299:
                     output = response[ 1 ]
@@ -1043,11 +1043,11 @@
         """
         Description:
             Get intent state.
-            Accepts a single intent ID ( string type ) or a list of intent IDs.
-            Returns the state( string type ) of the id if a single intent ID is
+            Accepts a single intent ID (string type) or a list of intent IDs.
+            Returns the state(string type) of the id if a single intent ID is
             accepted.
         Required:
-            intentId: intent ID ( string type )
+            intentId: intent ID (string type)
             intentsJson: parsed json object from the onos:intents api
         Returns:
             Returns a dictionary with intent IDs as the key and its
@@ -1095,7 +1095,7 @@
             main.cleanAndExit()
 
     def checkIntentState( self, intentsId="ALL", expectedState='INSTALLED',
-                          ip="DEFAULT", port="DEFAULT" ):
+                          ip="DEFAULT", port="DEFAULT"):
         """
         Description:
             Check intents state based on expected state which defaults to
@@ -1103,7 +1103,7 @@
         Required:
             intentsId - List of intents ID to be checked
         Optional:
-            expectedState - Check the expected state( s ) of each intents
+            expectedState - Check the expected state(s) of each intents
                             state in the list.
                             *NOTE: You can pass in a list of expected state,
                             Eg: expectedState = [ 'INSTALLED' , 'INSTALLING' ]
@@ -1227,7 +1227,7 @@
             if flowId:
                 url += "/" + str( int( flowId ) )
             print url
-            response = self.send( url=url, ip=ip, port=port )
+            response = self.send( url=url, ip = ip, port = port )
             if response:
                 if 200 <= response[ 0 ] <= 299:
                     output = response[ 1 ]
@@ -1262,9 +1262,9 @@
             The ip and port option are for the requests input's ip and port
             of the ONOS node
         """
+
         try:
-            if debug:
-                main.log.debug( "Adding flow: " + self.pprint( flowJson ) )
+            if debug: main.log.debug( "Adding flow: " + self.pprint( flowJson ) )
             output = None
             if ip == "DEFAULT":
                 main.log.warn( "No ip given, reverting to ip from topo file" )
@@ -1275,7 +1275,7 @@
                 port = self.port
             url = "/flows/" + deviceId
             response = self.send( method="POST",
-                                  url=url, ip=ip, port=port,
+                                  url=url, ip = ip, port = port,
                                   data=json.dumps( flowJson ) )
             if response:
                 if "201" in str( response[ 0 ] ):
@@ -1330,9 +1330,9 @@
             * ethDst: specify ethDst ( i.e. dst mac addr )
             * ipProto: specify ip protocol
             * ipSrc: specify ip source address with mask eg. ip#/24
-                as a tuple ( type, ip# )
+                as a tuple (type, ip#)
             * ipDst: specify ip destination address eg. ip#/24
-                as a tuple ( type, ip# )
+                as a tuple (type, ip#)
             * tcpSrc: specify tcp source port
             * tcpDst: specify tcp destination port
         Returns:
@@ -1344,75 +1344,75 @@
             of the ONOS node
         """
         try:
-            flowJson = { "priority": priority,
-                           "isPermanent": "true",
-                           "timeout": 0,
-                           "deviceId": deviceId,
-                           "treatment": { "instructions": [] },
-                           "selector": { "criteria": [] } }
+            flowJson = {   "priority":priority,
+                           "isPermanent":"true",
+                           "timeout":0,
+                           "deviceId":deviceId,
+                           "treatment":{"instructions":[]},
+                           "selector": {"criteria":[]}}
             if appId:
                 flowJson[ "appId" ] = appId
 
             if groupId:
                 flowJson[ 'treatment' ][ 'instructions' ].append( {
-                                                        "type": "GROUP",
-                                                        "groupId": groupId } )
+                                                        "type":"GROUP",
+                                                        "groupId":groupId } )
 
             if egressPort:
                 flowJson[ 'treatment' ][ 'instructions' ].append( {
-                                                        "type": "OUTPUT",
-                                                        "port": egressPort } )
+                                                        "type":"OUTPUT",
+                                                        "port":egressPort } )
             if ingressPort:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "IN_PORT",
-                                                        "port": ingressPort } )
+                                                        "type":"IN_PORT",
+                                                        "port":ingressPort } )
             if ethType:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "ETH_TYPE",
-                                                        "ethType": ethType } )
+                                                        "type":"ETH_TYPE",
+                                                        "ethType":ethType } )
             if ethSrc:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "ETH_SRC",
-                                                        "mac": ethSrc } )
+                                                        "type":"ETH_SRC",
+                                                        "mac":ethSrc } )
             if ethDst:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "ETH_DST",
-                                                        "mac": ethDst } )
+                                                        "type":"ETH_DST",
+                                                        "mac":ethDst } )
             if vlan:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "VLAN_VID",
-                                                        "vlanId": vlan } )
+                                                        "type":"VLAN_VID",
+                                                        "vlanId":vlan } )
             if mpls:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "MPLS_LABEL",
-                                                        "label": mpls } )
+                                                        "type":"MPLS_LABEL",
+                                                        "label":mpls } )
             if ipSrc:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": ipSrc[ 0 ],
-                                                        "ip": ipSrc[ 1 ] } )
+                                                        "type":ipSrc[0],
+                                                        "ip":ipSrc[1] } )
             if ipDst:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": ipDst[ 0 ],
-                                                        "ip": ipDst[ 1 ] } )
+                                                        "type":ipDst[0],
+                                                        "ip":ipDst[1] } )
             if tcpSrc:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "TCP_SRC",
+                                                        "type":"TCP_SRC",
                                                         "tcpPort": tcpSrc } )
             if tcpDst:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "TCP_DST",
+                                                        "type":"TCP_DST",
                                                         "tcpPort": tcpDst } )
             if udpSrc:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "UDP_SRC",
+                                                        "type":"UDP_SRC",
                                                         "udpPort": udpSrc } )
             if udpDst:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "UDP_DST",
+                                                        "type":"UDP_DST",
                                                         "udpPort": udpDst } )
             if ipProto:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "IP_PROTO",
+                                                        "type":"IP_PROTO",
                                                         "protocol": ipProto } )
 
             return self.sendFlow( deviceId=deviceId, flowJson=flowJson, debug=debug )
@@ -1448,7 +1448,7 @@
             # NOTE: REST url requires the intent id to be in decimal form
             query = "/" + str( deviceId ) + "/" + str( int( flowId ) )
             response = self.send( method="DELETE",
-                                  url="/flows" + query, ip=ip, port=port )
+                                  url="/flows" + query, ip = ip, port = port )
             if response:
                 if 200 <= response[ 0 ] <= 299:
                     return main.TRUE
@@ -1463,7 +1463,7 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    def checkFlowsState( self, ip="DEFAULT", port="DEFAULT" ):
+    def checkFlowsState( self , ip="DEFAULT", port="DEFAULT" ):
         """
         Description:
             Check if all the current flows are in ADDED state
@@ -1516,7 +1516,7 @@
                     url += "/" + subjectKey
                     if configKey:
                         url += "/" + configKey
-            response = self.send( url=url, ip=ip, port=port )
+            response = self.send( url=url, ip = ip, port = port )
             if response:
                 if 200 <= response[ 0 ] <= 299:
                     output = response[ 1 ]
@@ -1566,7 +1566,7 @@
                     if configKey:
                         url += "/" + configKey
             response = self.send( method="POST",
-                                  url=url, ip=ip, port=port,
+                                  url=url, ip = ip, port = port,
                                   data=json.dumps( cfgJson ) )
             if response:
                 if 200 <= response[ 0 ] <= 299:
@@ -1611,7 +1611,7 @@
                     if configKey:
                         url += "/" + configKey
             response = self.send( method="DELETE",
-                                  url=url, ip=ip, port=port )
+                                  url=url, ip = ip, port = port )
             if response:
                 if 200 <= response[ 0 ] <= 299:
                     main.log.info( self.name + ": Successfully delete cfg" )
@@ -1628,30 +1628,30 @@
             main.cleanAndExit()
 
     def createFlowBatch( self,
-                         numSw=1,
-                         swIndex=1,
-                         batchSize=1,
-                         batchIndex=1,
-                         deviceIdpreFix="of:",
-                         appId=0,
-                         deviceID="",
-                         ingressPort="",
-                         egressPort="",
-                         ethType="",
-                         ethSrc="",
-                         ethDst="",
-                         vlan="",
-                         ipProto="",
-                         ipSrc=(),
-                         ipDst=(),
-                         tcpSrc="",
-                         tcpDst="",
-                         udpDst="",
-                         udpSrc="",
-                         mpls="",
-                         ip="DEFAULT",
-                         port="DEFAULT",
-                         debug=False ):
+                      numSw = 1,
+                      swIndex = 1,
+                      batchSize = 1,
+                      batchIndex = 1,
+                      deviceIdpreFix = "of:",
+                      appId=0,
+                      deviceID="",
+                      ingressPort="",
+                      egressPort="",
+                      ethType="",
+                      ethSrc="",
+                      ethDst="",
+                      vlan="",
+                      ipProto="",
+                      ipSrc=(),
+                      ipDst=(),
+                      tcpSrc="",
+                      tcpDst="",
+                      udpDst="",
+                      udpSrc="",
+                      mpls="",
+                      ip="DEFAULT",
+                      port="DEFAULT",
+                      debug=False ):
         """
         Description:
             Creates batches of MAC-rule flows for POST.
@@ -1668,9 +1668,9 @@
             * ethDst: specify ethDst ( i.e. dst mac addr )
             * ipProto: specify ip protocol
             * ipSrc: specify ip source address with mask eg. ip#/24
-                as a tuple ( type, ip# )
+                as a tuple (type, ip#)
             * ipDst: specify ip destination address eg. ip#/24
-                as a tuple ( type, ip# )
+                as a tuple (type, ip#)
             * tcpSrc: specify tcp source port
             * tcpDst: specify tcp destination port
         Returns:
@@ -1681,110 +1681,111 @@
             The ip and port option are for the requests input's ip and port
             of the ONOS node
         """
-        # from pprint import pprint
+        #from pprint import pprint
 
         flowJsonList = []
-        flowJsonBatch = { "flows": flowJsonList }
+        flowJsonBatch = {"flows":flowJsonList}
         dev = swIndex
 
-        for fl in range( 1, batchSize + 1 ):
-            flowJson = { "priority": 100,
-                           "deviceId": "",
-                           "isPermanent": "true",
-                           "timeout": 0,
-                           "treatment": { "instructions": [] },
-                           "selector": { "criteria": [] } }
+        for fl in range(1, batchSize + 1):
+            flowJson = { "priority":100,
+                           "deviceId":"",
+                           "isPermanent":"true",
+                           "timeout":0,
+                           "treatment":{"instructions":[]},
+                           "selector": {"criteria":[]}}
 
-            # main.log.info( "fl: " + str( fl ) )
+            #main.log.info("fl: " + str(fl))
             if dev <= numSw:
-                deviceId = deviceIdpreFix + "{0:0{1}x}".format( dev, 16 )
-                # print deviceId
-                flowJson[ 'deviceId' ] = deviceId
+                deviceId = deviceIdpreFix + "{0:0{1}x}".format(dev,16)
+                #print deviceId
+                flowJson['deviceId'] = deviceId
                 dev += 1
             else:
                 dev = 1
-                deviceId = deviceIdpreFix + "{0:0{1}x}".format( dev, 16 )
-                # print deviceId
-                flowJson[ 'deviceId' ] = deviceId
+                deviceId = deviceIdpreFix + "{0:0{1}x}".format(dev,16)
+                #print deviceId
+                flowJson['deviceId'] = deviceId
                 dev += 1
 
                 # ethSrc starts with "0"; ethDst starts with "1"
                 # 2 Hex digit of device number; 5 digits of batch index number; 5 digits of batch size
-            ethS = "%02X" % int( "0" + "{0:0{1}b}".format( dev, 7 ), 2 ) + \
-                   "{0:0{1}x}".format( batchIndex, 5 ) + "{0:0{1}x}".format( fl, 5 )
-            ethSrc = ':'.join( ethS[ i:i + 2 ] for i in range( 0, len( ethS ), 2 ) )
-            ethD = "%02X" % int( "1" + "{0:0{1}b}".format( dev, 7 ), 2 ) + \
-                   "{0:0{1}x}".format( batchIndex, 5 ) + "{0:0{1}x}".format( fl, 5 )
-            ethDst = ':'.join( ethD[ i:i + 2 ] for i in range( 0, len( ethD ), 2 ) )
+            ethS = "%02X" %int( "0" + "{0:0{1}b}".format(dev,7), 2 ) + \
+                   "{0:0{1}x}".format(batchIndex,5) + "{0:0{1}x}".format(fl,5)
+            ethSrc = ':'.join(ethS[i:i+2] for i in range(0,len(ethS),2))
+            ethD = "%02X" %int( "1" + "{0:0{1}b}".format(dev,7), 2 ) + \
+                   "{0:0{1}x}".format(batchIndex,5) + "{0:0{1}x}".format(fl,5)
+            ethDst = ':'.join(ethD[i:i+2] for i in range(0,len(ethD),2))
 
             if appId:
                 flowJson[ "appId" ] = appId
 
             if egressPort:
                 flowJson[ 'treatment' ][ 'instructions' ].append( {
-                                                        "type": "OUTPUT",
-                                                        "port": egressPort } )
+                                                        "type":"OUTPUT",
+                                                        "port":egressPort } )
             if ingressPort:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "IN_PORT",
-                                                        "port": ingressPort } )
+                                                        "type":"IN_PORT",
+                                                        "port":ingressPort } )
             if ethType:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "ETH_TYPE",
-                                                        "ethType": ethType } )
+                                                        "type":"ETH_TYPE",
+                                                        "ethType":ethType } )
             if ethSrc:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "ETH_SRC",
-                                                        "mac": ethSrc } )
+                                                        "type":"ETH_SRC",
+                                                        "mac":ethSrc } )
             if ethDst:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "ETH_DST",
-                                                        "mac": ethDst } )
+                                                        "type":"ETH_DST",
+                                                        "mac":ethDst } )
             if vlan:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "VLAN_VID",
-                                                        "vlanId": vlan } )
+                                                        "type":"VLAN_VID",
+                                                        "vlanId":vlan } )
             if mpls:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "MPLS_LABEL",
-                                                        "label": mpls } )
+                                                        "type":"MPLS_LABEL",
+                                                        "label":mpls } )
             if ipSrc:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": ipSrc[ 0 ],
-                                                        "ip": ipSrc[ 1 ] } )
+                                                        "type":ipSrc[0],
+                                                        "ip":ipSrc[1] } )
             if ipDst:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": ipDst[ 0 ],
-                                                        "ip": ipDst[ 1 ] } )
+                                                        "type":ipDst[0],
+                                                        "ip":ipDst[1] } )
             if tcpSrc:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "TCP_SRC",
+                                                        "type":"TCP_SRC",
                                                         "tcpPort": tcpSrc } )
             if tcpDst:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "TCP_DST",
+                                                        "type":"TCP_DST",
                                                         "tcpPort": tcpDst } )
             if udpSrc:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "UDP_SRC",
+                                                        "type":"UDP_SRC",
                                                         "udpPort": udpSrc } )
             if udpDst:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "UDP_DST",
+                                                        "type":"UDP_DST",
                                                         "udpPort": udpDst } )
             if ipProto:
                 flowJson[ 'selector' ][ 'criteria' ].append( {
-                                                        "type": "IP_PROTO",
+                                                        "type":"IP_PROTO",
                                                         "protocol": ipProto } )
-            # pprint( flowJson )
-            flowJsonList.append( flowJson )
+            #pprint(flowJson)
+            flowJsonList.append(flowJson)
 
-        main.log.info( "Number of flows in batch: " + str( len( flowJsonList ) ) )
-        flowJsonBatch[ 'flows' ] = flowJsonList
-        # pprint( flowJsonBatch )
+        main.log.info("Number of flows in batch: " + str( len(flowJsonList) ) )
+        flowJsonBatch['flows'] = flowJsonList
+        #pprint(flowJsonBatch)
 
         return flowJsonBatch
 
+
     def sendFlowBatch( self, batch={}, ip="DEFAULT", port="DEFAULT", debug=False ):
         """
         Description:
@@ -1802,8 +1803,7 @@
         import time
 
         try:
-            if debug:
-                main.log.debug( "Adding flow: " + self.pprint( batch ) )
+            if debug: main.log.debug( "Adding flow: " + self.pprint( batch ) )
             output = None
             if ip == "DEFAULT":
                 main.log.warn( "No ip given, reverting to ip from topo file" )
@@ -1814,10 +1814,10 @@
                 port = self.port
             url = "/flows/"
             response = self.send( method="POST",
-                                  url=url, ip=ip, port=port,
+                                  url=url, ip = ip, port = port,
                                   data=json.dumps( batch ) )
-            # main.log.info( "Post response is: ", str( response[ 0 ] ) )
-            if response[ 0 ] == 200:
+            #main.log.info("Post response is: ", str(response[0]))
+            if response[0] == 200:
                 main.log.info( self.name + ": Successfully POST flow batch" )
                 return main.TRUE, response
             else:
@@ -1834,7 +1834,7 @@
             main.cleanAndExit()
 
     def removeFlowBatch( self, batch={},
-                         ip="DEFAULT", port="DEFAULT" ):
+                       ip="DEFAULT", port="DEFAULT" ):
         """
         Description:
             Remove a batch of flows
@@ -1856,8 +1856,8 @@
             # NOTE: REST url requires the intent id to be in decimal form
 
             response = self.send( method="DELETE",
-                                  url="/flows/", ip=ip, port=port,
-                                  data=json.dumps( batch ) )
+                                  url="/flows/", ip = ip, port = port,
+                                  data = json.dumps(batch) )
             if response:
                 if 200 <= response[ 0 ] <= 299:
                     return main.TRUE
@@ -1882,7 +1882,7 @@
         import json
         try:
             # either onos:topology or 'topology' will work in CLI
-            topology = json.loads( topologyOutput )
+            topology = json.loads(topologyOutput)
             main.log.debug( topology )
             return topology
         except pexpect.EOF:
@@ -1914,7 +1914,7 @@
         """
         try:
             topology = self.getTopology( self.topology() )
-            # summary = self.summary()
+            #summary = self.summary()
             if topology == {}:
                 return main.ERROR
             output = ""
@@ -1978,7 +1978,7 @@
                           "appCookie": appCookie,
                           "groupId": groupId,
                           "buckets": bucketList
-                          }
+                        }
             return self.sendGroup( deviceId=deviceId, groupJson=groupJson, ip="DEFAULT", port="DEFAULT", debug=False )
 
         except ( AttributeError, TypeError ):
@@ -2004,8 +2004,7 @@
             of the ONOS node
         """
         try:
-            if debug:
-                main.log.debug( "Adding group: " + self.pprint( groupJson ) )
+            if debug: main.log.debug( "Adding group: " + self.pprint( groupJson ) )
             output = None
             if ip == "DEFAULT":
                 main.log.warn( "No ip given, reverting to ip from topo file" )
@@ -2016,7 +2015,7 @@
                 port = self.port
             url = "/groups/" + deviceId
             response = self.send( method="POST",
-                                  url=url, ip=ip, port=port,
+                                  url=url, ip = ip, port = port,
                                   data=json.dumps( groupJson ) )
             if response:
                 if "201" in str( response[ 0 ] ):
@@ -2065,8 +2064,8 @@
             if deviceId:
                 url += "/" + deviceId
                 if appCookie:
-                    url += "/" + appCookie
-            response = self.send( url=url, ip=ip, port=port )
+                   url += "/" + appCookie
+            response = self.send( url=url, ip = ip, port = port )
             if response:
                 if 200 <= response[ 0 ] <= 299:
                     output = response[ 1 ]
@@ -2113,7 +2112,7 @@
                 port = self.port
             query = "/" + str( deviceId ) + "/" + str( appCookie )
             response = self.send( method="DELETE",
-                                  url="/groups" + query, ip=ip, port=port )
+                                  url="/groups" + query, ip = ip, port = port )
             if response:
                 if 200 <= response[ 0 ] <= 299:
                     return main.TRUE
@@ -2127,3 +2126,4 @@
         except Exception:
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
+
diff --git a/TestON/drivers/common/api/controllerdriver.py b/TestON/drivers/common/api/controllerdriver.py
index 0c3305c..956374c 100644
--- a/TestON/drivers/common/api/controllerdriver.py
+++ b/TestON/drivers/common/api/controllerdriver.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 """
 Created on 29-Nov-2012
-Copyright 2012 Open Networking Foundation ( ONF )
+Copyright 2012 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -32,3 +32,4 @@
 
     def __init__( self ):
         super( Controller, self ).__init__()
+
diff --git a/TestON/drivers/common/api/dockerapidriver.py b/TestON/drivers/common/api/dockerapidriver.py
index 3b9b77a..3fac610 100644
--- a/TestON/drivers/common/api/dockerapidriver.py
+++ b/TestON/drivers/common/api/dockerapidriver.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 """
-Copyright 2016 Open Networking Foundation ( ONF )
+Copyright 2016 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -9,7 +9,7 @@
     TestON is free software: you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
     the Free Software Foundation, either version 2 of the License, or
-    ( at your option ) any later version.
+    (at your option) any later version.
 
     TestON is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -19,6 +19,7 @@
     You should have received a copy of the GNU General Public License
     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
 """
+
 import json
 import os
 import re
@@ -27,7 +28,6 @@
 from docker import errors
 from drivers.common.apidriver import API
 
-
 class DockerApiDriver( API ):
 
     def __init__( self ):
@@ -55,7 +55,7 @@
                 self.home = "/var/tmp"
 
             self.handle = super( DockerApiDriver, self ).connect()
-            self.dockerClient = Client( base_url='unix://var/run/docker.sock' )
+            self.dockerClient = Client(base_url='unix://var/run/docker.sock')
             return self.handle
         except Exception as e:
             main.log.exception( e )
@@ -72,29 +72,29 @@
                 if imageDict[ 'RepoTags' ] is not None:
                     if len( imageDict[ 'RepoTags' ] ) > 1:
                         duplicateTagDetected = 1
-                    imageListToSend.append( imageDict[ 'RepoTags' ][ 0 ].encode( 'UTF8' ).split( ':' )[ 1 ] )
+                    imageListToSend.append( imageDict['RepoTags'][0].encode('UTF8').split(':')[1] )
             return imageListToSend, duplicateTagDetected
         except Exception as e:
             main.log.exception( e )
 
-    def dockerPull( self, onosRepo="onosproject/onos", onosTag="latest" ):
+    def dockerPull( self, onosRepo ="onosproject/onos", onosTag="latest" ):
         """
         Pulls Docker image from repository
         """
         try:
             main.log.info( self.name +
-                           ": Pulling Docker image " + onosRepo + ":" + onosTag )
-            for line in self.dockerClient.pull( repository=onosRepo,
-                                                tag=onosTag, stream=True ):
-                print "#",
-            main.log.info( json.dumps( json.loads( line ), indent=4 ) )
+                           ": Pulling Docker image " + onosRepo + ":"+ onosTag )
+            for line in self.dockerClient.pull( repository = onosRepo, \
+                    tag = onosTag, stream = True ):
+                    print "#",
+            main.log.info(json.dumps(json.loads(line), indent =4))
 
-            # response = json.dumps( json.load( pullResult ), indent=4 )
+            #response = json.dumps( json.load( pullResult ), indent=4 )
             if re.search( "for onosproject/onos:" + onosTag, line ):
                 main.log.info( "onos docker image pulled is: " + line )
                 return main.TRUE
             else:
-                main.log.error( "Failed to download image from: " + onosRepo + ":" + onosTag )
+                main.log.error( "Failed to download image from: " + onosRepo +":"+ onosTag )
                 main.log.error( "Error respone: " )
                 main.log.error( line )
                 return main.FALSE
@@ -109,17 +109,17 @@
         try:
             main.log.info( self.name +
                            ": Creating Docker container for node: " + onosNode )
-            response = self.dockerClient.create_container( image=onosImage,
-                                                           tty=True, name=onosNode, detach=True )
-            # print response
-            # print response.get( "Id" )
-            # print response.get( "Warnings" )
-            if( str( response.get( "Warnings" ) ) == 'None' ):
-                main.log.info( "Created container for node: " + onosNode + "; container id is: " + response.get( "Id" ) )
-                return ( main.TRUE, response.get( "Id" ) )
+            response = self.dockerClient.create_container( image=onosImage, \
+                    tty=True, name=onosNode, detach=True )
+            #print response
+            #print response.get("Id")
+            #print response.get("Warnings")
+            if( str( response.get("Warnings") ) == 'None' ):
+                main.log.info( "Created container for node: " + onosNode + "; container id is: " + response.get("Id") )
+                return ( main.TRUE, response.get("Id") )
             else:
                 main.log.info( "Noticed warnings during create" )
-                return ( main.FALSE, null )
+                return ( main.FALSE, null)
         except Exception:
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
@@ -131,7 +131,7 @@
         try:
             main.log.info( self.name +
                            ": Starting Docker conatiner Id " + ctID )
-            response = self.dockerClient.start( container=ctID )
+            response = self.dockerClient.start( container = ctID )
             if response is None:
                 main.log.info( "Started container for Id: " + ctID )
                 return main.TRUE
@@ -157,11 +157,11 @@
                 main.log.info( "Noticed warnings during stop" )
                 return main.FALSE
         except errors.NotFound:
-            main.log.info( ctName + " not found! Continue on tests..." )
+            main.log.info( ctName + " not found! Continue on tests...")
             return main.TRUE
         except Exception:
             main.log.exception( self.name + ": Uncaught exception!" )
-            # main.cleanAndExit()
+            #main.cleanAndExit()
 
     def dockerRestartCT( self, ctName ):
         """
@@ -181,27 +181,27 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    def dockerCheckCTName( self, ctName ):
+    def dockerCheckCTName( self, ctName):
         """
             Check Docker conatiner status
         """
         try:
             main.log.info( self.name +
                            ": Checking Docker Status for CT with 'Names'  " + ctName )
-            namelist = [ response[ "Names" ] for response in self.dockerClient.containers( all=True ) if not [] ]
-            main.log.info( "Name list is: " + str( namelist ) )
-            if( [ ctName ] in namelist ):
+            namelist = [response["Names"] for response in self.dockerClient.containers(all=True) if not []]
+            main.log.info("Name list is: " + str(namelist) )
+            if( [ctName] in namelist):
                 main.log.info( "Container " + ctName + " exists" )
                 return main.TRUE
             else:
                 main.log.info( "Container " + ctName + " does not exist" )
                 return main.FALSE
         except errors.NotFound:
-            main.log.warn( ctName + "not found! Continue with the tests..." )
+            main.log.warn( ctName + "not found! Continue with the tests...")
             return main.FALSE
         except Exception:
             main.log.exception( self.name + ": Uncaught exception! Continue tests..." )
-            # main.cleanAndExit()
+            #main.cleanAndExit()
 
     def dockerRemoveCT( self, ctName ):
         """
@@ -215,15 +215,15 @@
                 main.log.info( "Removed container for node: " + ctName )
                 return main.TRUE
             else:
-                main.log.info( "Noticed warnings during Remove " + ctName )
+                main.log.info( "Noticed warnings during Remove " + ctName)
                 return main.FALSE
-            main.log.exception( self.name + ": not found, continuing..." )
+            main.log.exception(self.name + ": not found, continuing...")
         except errors.NotFound:
-            main.log.warn( ctName + "not found! Continue with the tests..." )
+            main.log.warn( ctName + "not found! Continue with the tests...")
             return main.TRUE
         except Exception:
             main.log.exception( self.name + ": Uncaught exception! Continuing..." )
-            # main.cleanAndExit()
+            #main.cleanAndExit()
 
     def dockerRemoveImage( self, imageRepoTag=None ):
         """
@@ -234,13 +234,13 @@
             main.log.info( "No docker image found" )
             return rmResult
         else:
-            imageList = [ image[ "Id" ] for image in self.dockerClient.images()
-                                        if image[ "RepoTags" ] is None
-                                           or imageRepoTag in image[ "RepoTags" ] ]
+            imageList = [ image["Id"] for image in self.dockerClient.images()
+                                        if image["RepoTags"] is None
+                                           or imageRepoTag in image["RepoTags"] ]
             for id in imageList:
                 try:
                     main.log.info( self.name + ": Removing Docker image " + id )
-                    response = self.dockerClient.remove_image( id, force=True )
+                    response = self.dockerClient.remove_image(id, force = True)
                     if response is None:
                         main.log.info( "Removed Docker image: " + id )
                         rmResult = rmResult and main.TRUE
@@ -248,12 +248,12 @@
                         main.log.info( "Noticed warnings during Remove " + id )
                         rmResult = rmResult and main.FALSE
                 except errors.NotFound:
-                    main.log.warn( image + "not found! Continue with the tests..." )
+                    main.log.warn( image + "not found! Continue with the tests...")
                     rmResult = rmResult and main.TRUE
                 except Exception:
                     main.log.exception( self.name + ": Uncaught exception! Continuing..." )
                     rmResult = rmResult and main.FALSE
-                    # main.cleanAndExit()
+                    #main.cleanAndExit()
         return rmResult
 
     def fetchLatestClusterFile( self, branch="master" ):
@@ -263,7 +263,7 @@
         try:
             command = "wget -N https://raw.githubusercontent.com/opennetworkinglab/\
                     onos/" + branch + "/tools/package/bin/onos-form-cluster"
-            subprocess.call( command )  # output checks are missing for now
+            subprocess.call( command ) # output checks are missing for now
             command = "chmod u+x " + "onos-form-cluster"
             subprocess.call( command )
             return main.TRUE
@@ -276,7 +276,7 @@
             From ONOS cluster for IP addresses in onosIPs list
         """
         try:
-            onosIPs = " ".join( onosIPs )
+            onosIPs = " ".join(onosIPs)
             command = "{}/onos-form-cluster -u {} -p {} {}".format( cmdPath,
                                                                     user,
                                                                     passwd,
@@ -285,7 +285,7 @@
             if result == 0:
                 return main.TRUE
             else:
-                main.log.info( "Something is not right in forming cluster>" )
+                main.log.info("Something is not right in forming cluster>")
                 return main.FALSE
         except Exception:
             main.log.exception( self.name + ": Uncaught exception!" )
@@ -297,10 +297,11 @@
         """
         try:
             output = self.dockerClient.inspect_container( ctName )
-            nodeIP = output[ 'NetworkSettings' ][ 'IPAddress' ]
-            main.log.info( " Docker IP " + str( nodeIP ) )
-            return str( nodeIP )
+            nodeIP = output['NetworkSettings']['IPAddress']
+            main.log.info( " Docker IP " + str(nodeIP) )
+            return str(nodeIP)
 
         except Exception:
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
+
diff --git a/TestON/drivers/common/api/fvtapidriver.py b/TestON/drivers/common/api/fvtapidriver.py
index c44d32d..4f20c89 100644
--- a/TestON/drivers/common/api/fvtapidriver.py
+++ b/TestON/drivers/common/api/fvtapidriver.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 """
 Created on 26-Oct-2012
-Copyright 2012 Open Networking Foundation ( ONF )
+Copyright 2012 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -25,7 +25,8 @@
 
 
 fvtapidriver is the basic driver which will handle the fvtapidriver functions
-""""""
+"""
+"""
 There are two changes required in flowvisor-test framework :
 
 1. In ~/flowvisortests/tests/templatetest.py line : 15 comment 'basic_logger = None'
@@ -187,3 +188,4 @@
         #    self.logfile_handler.close()
 
         return main.TRUE
+
diff --git a/TestON/drivers/common/apidriver.py b/TestON/drivers/common/apidriver.py
index 890e89f..ec416ee 100644
--- a/TestON/drivers/common/apidriver.py
+++ b/TestON/drivers/common/apidriver.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 """
 Created on 22-Nov-2012
-Copyright 2012 Open Networking Foundation ( ONF )
+Copyright 2012 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -44,3 +44,4 @@
         super( API, self ).connect()
 
         return main.TRUE
+
diff --git a/TestON/drivers/common/cli/dpclidriver.py b/TestON/drivers/common/cli/dpclidriver.py
index 5091d1a..2386c45 100644
--- a/TestON/drivers/common/cli/dpclidriver.py
+++ b/TestON/drivers/common/cli/dpclidriver.py
@@ -1,5 +1,5 @@
 """
-Copyright 2015 Open Networking Foundation ( ONF )
+Copyright 2015 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -8,7 +8,7 @@
     TestON is free software: you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
     the Free Software Foundation, either version 2 of the License, or
-    ( at your option ) any later version.
+    (at your option) any later version.
 
     TestON is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -38,9 +38,9 @@
 
         self.name = self.options[ 'name' ]
         self.handle = super( DPCliDriver, self ).connect( user_name=self.user_name,
-                                                          ip_address=self.ip_address,
-                                                          port=self.port,
-                                                          pwd=self.pwd )
+                        ip_address=self.ip_address,
+                        port=self.port,
+                        pwd=self.pwd )
 
         if self.handle:
             return self.handle
@@ -148,7 +148,7 @@
         self.handle.sendline( "sudo fping -S " + str( netsrc ) + "." +
                               str( netstrt ) + ".1.1 -f /tmp/ip_table" +
                               str( netdst ) + ".txt" )
-        while True:
+        while 1:
             i = self.handle.expect( [
                                     "reachable",
                                     "unreachable",
@@ -210,3 +210,4 @@
             main.log.exception( "Connection failed to the host" )
             response = main.FALSE
         return response
+
diff --git a/TestON/drivers/common/cli/emulator/flowvisordriver.py b/TestON/drivers/common/cli/emulator/flowvisordriver.py
index 0e53125..e1877ae 100644
--- a/TestON/drivers/common/cli/emulator/flowvisordriver.py
+++ b/TestON/drivers/common/cli/emulator/flowvisordriver.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 """
 Created on 26-Mar-2013
-Copyright 2013 Open Networking Foundation ( ONF )
+Copyright 2013 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -42,7 +42,7 @@
         self.wrapped = sys.modules[ __name__ ]
 
     def connect( self, **connectargs ):
-        # user_name, ip_address, pwd,options ):
+        #,user_name, ip_address, pwd,options ):
         # Here the main is the TestON instance after creating all the log
         # handles.
         for key in connectargs:
@@ -63,7 +63,7 @@
         if self.handle:
             self.execute( cmd='\r', prompt=self.prompt, timeout=10 )
             self.options[ 'path' ] = '/home/openflow/flowvisor/scripts/'
-            # self.handle.logfile = sys.stdout
+            #self.handle.logfile = sys.stdout
             self.execute(
                 cmd='cd ' +
                 self.options[ 'path' ],
@@ -90,9 +90,9 @@
                 self.fvprocess_id,
                 onfail="Failed to start FlowVisor" )
             main.log.info( response )
-            # import time
+            #import time
             # time.sleep( 10 )
-            # response = self.execute( cmd='./start_visualizer.sh & \r',prompt=self.prompt,timeout=10 )
+            #response = self.execute( cmd='./start_visualizer.sh & \r',prompt=self.prompt,timeout=10 )
 
             return main.TRUE
         else:
@@ -192,7 +192,7 @@
 
     def listDevices( self ):
         # self.execute( cmd="clear",prompt=self.prompt,timeout=10 )
-        # self.execute( cmd="./fvctl.sh listDevices ",prompt="passwd:",timeout=10 )
+        #self.execute( cmd="./fvctl.sh listDevices ",prompt="passwd:",timeout=10 )
         # self.execute( cmd="\n",prompt=self.prompt,timeout=10 )
         devices_list = ''
         last_response = re.findall(
diff --git a/TestON/drivers/common/cli/emulator/lincoedriver.py b/TestON/drivers/common/cli/emulator/lincoedriver.py
index f41b6d2..d5e114a 100644
--- a/TestON/drivers/common/cli/emulator/lincoedriver.py
+++ b/TestON/drivers/common/cli/emulator/lincoedriver.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 
 """
-Copyright 2014 Open Networking Foundation ( ONF )
+Copyright 2014 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -10,7 +10,7 @@
     TestON is free software: you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
     the Free Software Foundation, either version 2 of the License, or
-    ( at your option ) any later version.
+    (at your option) any later version.
 
     TestON is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -35,6 +35,7 @@
 
 OCT 20 2014
 """
+
 import pexpect
 import sys
 from drivers.common.cli.emulatordriver import Emulator
@@ -55,6 +56,7 @@
         """
         Create ssh handle for Linc-OE cli
         """
+
         for key in connectargs:
             vars( self )[ key ] = connectargs[ key ]
 
@@ -219,7 +221,7 @@
         """
             Since executing opticalTest.py will give you mininet
             prompt, you would at some point require to get onto
-            console of LincOE ( ( linc@onosTestBench )1> ) to execute
+            console of LincOE ((linc@onosTestBench)1>) to execute
             commands like bring a optical port up or down on a ROADM
             You can attach to console of Linc-OE session by a cmd:
             sudo ~/linc-oe/rel/linc/bin/linc attach
diff --git a/TestON/drivers/common/cli/emulator/lincoemininetdriver.py b/TestON/drivers/common/cli/emulator/lincoemininetdriver.py
index 2a85d63..bfa466f 100644
--- a/TestON/drivers/common/cli/emulator/lincoemininetdriver.py
+++ b/TestON/drivers/common/cli/emulator/lincoemininetdriver.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 
 """
-Copyright 2016 Open Networking Foundation ( ONF )
+Copyright 2016 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -23,6 +23,7 @@
 
 LincOEMininetDriver is an extension of the mininetclidriver to handle linc oe
 """
+
 import pexpect
 import re
 import sys
@@ -31,8 +32,7 @@
 
 
 class LincOEMininetDriver( MininetCliDriver ):
-
-    def runOpticalMnScript( self, onosDirectory='onos', ctrllerIP=None, topology='opticalTest' ):
+    def runOpticalMnScript( self, onosDirectory = 'onos', ctrllerIP = None, topology = 'opticalTest' ):
         import time
         import types
         """
@@ -41,22 +41,22 @@
                 It runs python script "opticalTest.py" to create the
                 packet layer( mn ) and optical topology
             Optional:
-                name - Name of onos directory. ( ONOS | onos )
+                name - Name of onos directory. (ONOS | onos)
                 topology - Name of optical topology to activate, defaults to opticalTest.py
             Required:
-                ctrllerIP = Controller( s ) IP address
+                ctrllerIP = Controller(s) IP address
             TODO: If no ctrllerIP is provided, a default
                 $OC1 can be accepted
         """
         try:
-            if ctrllerIP is None:
+            if ctrllerIP == None:
                 main.log.error( "You need to specify the IP" )
                 return main.FALSE
             else:
                 controller = ''
                 if isinstance( ctrllerIP, types.ListType ):
                     for i in xrange( len( ctrllerIP ) ):
-                        controller += ctrllerIP[ i ] + ' '
+                        controller += ctrllerIP[i] + ' '
                     main.log.info( "Mininet topology is being loaded with " +
                                    "controllers: " + controller )
                 elif isinstance( ctrllerIP, types.StringType ):
@@ -70,12 +70,12 @@
                 cmd = "sudo -E python {0} {1}".format( topoFile, controller )
                 main.log.info( self.name + ": cmd = " + cmd )
                 self.handle.sendline( cmd )
-                lincStart = self.handle.expect( [ "mininet>", pexpect.TIMEOUT ], timeout=120 )
+                lincStart = self.handle.expect( [ "mininet>", pexpect.TIMEOUT ],timeout=120 )
                 if lincStart == 1:
                     self.handle.sendline( "\x03" )
                     self.handle.sendline( "sudo mn -c" )
                     self.handle.sendline( cmd )
-                    lincStart = self.handle.expect( [ "mininet>", pexpect.TIMEOUT ], timeout=120 )
+                    lincStart = self.handle.expect( [ "mininet>", pexpect.TIMEOUT ],timeout=120 )
                 if lincStart == 1:
                     main.log.error( "OpticalTest.py failed to start." )
                     return main.FALSE
@@ -85,7 +85,7 @@
             main.log.error( self.name + ":     " + self.handle.before )
             return main.FALSE
         except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
+            main.log.exception(self.name + ": Uncaught exception!")
             main.cleanAndExit()
             return main.FALSE
 
@@ -120,7 +120,7 @@
             main.log.error( self.name + ":     " + self.handle.before )
             main.cleanAndExit()
         except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
+            main.log.exception(self.name + ": Uncaught exception!")
             main.cleanAndExit()
         main.log.info( self.name + ": Ping Response: " + response )
         if re.search( ',\s0\%\spacket\sloss', response ):
diff --git a/TestON/drivers/common/cli/emulator/mininetclidriver.py b/TestON/drivers/common/cli/emulator/mininetclidriver.py
index a1b691c..d69fbeb 100644
--- a/TestON/drivers/common/cli/emulator/mininetclidriver.py
+++ b/TestON/drivers/common/cli/emulator/mininetclidriver.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 """
 Created on 26-Oct-2012
-Copyright 2012 Open Networking Foundation ( ONF )
+Copyright 2012 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -45,7 +45,6 @@
 
 
 class MininetCliDriver( Emulator ):
-
     """
        MininetCliDriver is the basic driver which will handle
        the Mininet functions"""
@@ -126,12 +125,12 @@
     def startNet( self, topoFile='', args='', mnCmd='', timeout=120 ):
         """
         Description:
-            Starts Mininet accepts a topology( .py ) file and/or an optional
+            Starts Mininet accepts a topology(.py) file and/or an optional
             argument, to start the mininet, as a parameter.
             Can also send regular mininet command to load up desired topology.
             Eg. Pass in a string 'mn --topo=tree,3,3' to mnCmd
         Options:
-            topoFile = file path for topology file ( .py )
+            topoFile = file path for topology file (.py)
             args = extra option added when starting the topology from the file
             mnCmd = Mininet command use to start topology
         Returns:
@@ -185,7 +184,7 @@
                         main.log.info(
                             "Starting Mininet from topo file " +
                             topoFile )
-                        cmdString += "-E python " + topoFile + " "
+                        cmdString +=  "-E python " + topoFile + " "
                         if args is None:
                             args = ''
                             # TODO: allow use of args from .topo file?
@@ -435,6 +434,7 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
+
     def pingallHosts( self, hostList, wait=1 ):
         """
             Ping all specified IPv4 hosts
@@ -462,7 +462,7 @@
                 pingList = hostList[ :listIndex ] + \
                     hostList[ ( listIndex + 1 ): ]
 
-                pingResponse += str( str( host ) + " -> " )
+                pingResponse += str(str(host) + " -> ")
 
                 for temp in pingList:
                     # Current host pings all other hosts specified
@@ -471,14 +471,14 @@
                     self.handle.expect( "mininet>", timeout=wait + 1 )
                     response = self.handle.before
                     if re.search( ',\s0\%\spacket\sloss', response ):
-                        pingResponse += str( " h" + str( temp[ 1: ] ) )
+                        pingResponse += str(" h" + str( temp[1:] ))
                     else:
                         pingResponse += " X"
                         # One of the host to host pair is unreachable
                         isReachable = main.FALSE
                         failedPings += 1
                 pingResponse += "\n"
-            main.log.info( pingResponse + "Failed pings: " + str( failedPings ) )
+            main.log.info( pingResponse + "Failed pings: " + str(failedPings) )
             return isReachable
         except pexpect.TIMEOUT:
             main.log.exception( self.name + ": TIMEOUT exception" )
@@ -513,23 +513,23 @@
                 pingList = hostList[ :listIndex ] + \
                     hostList[ ( listIndex + 1 ): ]
 
-                pingResponse += str( str( host ) + " -> " )
+                pingResponse += str(str(host) + " -> ")
 
                 for temp in pingList:
                     # Current host pings all other hosts specified
-                    pingCmd = str( host ) + cmd + str( self.getIPAddress( temp, proto='IPv6' ) )
+                    pingCmd = str( host ) + cmd + str( self.getIPAddress(temp,proto='IPv6') )
                     self.handle.sendline( pingCmd )
                     self.handle.expect( "mininet>", timeout=wait + 1 )
                     response = self.handle.before
                     if re.search( ',\s0\%\spacket\sloss', response ):
-                        pingResponse += str( " h" + str( temp[ 1: ] ) )
+                        pingResponse += str(" h" + str( temp[1:] ))
                     else:
                         pingResponse += " X"
                         # One of the host to host pair is unreachable
                         isReachable = main.FALSE
                         failedPings += 1
                 pingResponse += "\n"
-            main.log.info( pingResponse + "Failed pings: " + str( failedPings ) )
+            main.log.info( pingResponse + "Failed pings: " + str(failedPings) )
             return isReachable
 
         except pexpect.TIMEOUT:
@@ -549,7 +549,7 @@
         Currently the only supported Params: SRC, TARGET, and WAIT
         """
         args = utilities.parse_args( [ "SRC", "TARGET", 'WAIT' ], **pingParams )
-        wait = args[ 'WAIT' ]
+        wait = args['WAIT']
         wait = int( wait if wait else 1 )
         command = args[ "SRC" ] + " ping " + \
             args[ "TARGET" ] + " -c 1 -i 1 -W " + str( wait ) + " "
@@ -590,11 +590,11 @@
         """
            IPv6 Ping between a pair of mininet hosts
            Currently the only supported Params are: SRC, TARGET, and WAIT
-           FLOWLABEL and -I ( src interface ) will be added later after running some tests.
+           FLOWLABEL and -I (src interface) will be added later after running some tests.
            Example: main.Mininet1.ping6pair( src="h1", target="1000::2" )
         """
         args = utilities.parse_args( [ "SRC", "TARGET", 'WAIT' ], **pingParams )
-        wait = args[ 'WAIT' ]
+        wait = args['WAIT']
         wait = int( wait if wait else 1 )
         command = args[ "SRC" ] + " ping6 " + \
             args[ "TARGET" ] + " -c 1 -i 1 -W " + str( wait ) + " "
@@ -838,8 +838,8 @@
                 return main.TRUE
 
             except pexpect.TIMEOUT:
-                main.log.error( self.name + ": TIMEOUT exception found" )
-                main.log.error( self.name + ":     " + self.handle.before )
+                main.log.error(self.name + ": TIMEOUT exception found")
+                main.log.error(self.name + ":     " + self.handle.before)
                 main.cleanAndExit()
             except pexpect.EOF:
                 main.log.error( self.name + ": EOF exception found" )
@@ -877,7 +877,7 @@
                 self.handle.expect( "mininet>" )
 
                 # Determine ip and mac address of the host-oldSw interface
-                cmd = "px ipaddr = " + str( IP )
+                cmd = "px ipaddr = " + str(IP)
                 print "cmd3= ", cmd
                 self.handle.sendline( cmd )
                 self.handle.expect( "mininet>" )
@@ -925,13 +925,13 @@
                 self.handle.expect( "mininet>" )
 
                 cmd = host + " ifconfig"
-                print "cmd9 =", cmd
-                response = self.execute( cmd=cmd, prompt="mininet>", timeout=10 )
+                print "cmd9 =",cmd
+                response = self.execute( cmd = cmd, prompt="mininet>" ,timeout=10 )
                 print response
                 pattern = "h\d-eth([\w])"
                 ipAddressSearch = re.search( pattern, response )
-                print ipAddressSearch.group( 1 )
-                intf = host + "-eth" + str( ipAddressSearch.group( 1 ) )
+                print ipAddressSearch.group(1)
+                intf= host + "-eth" + str(ipAddressSearch.group(1))
                 cmd = host + " ip -6 addr add %s dev %s" % ( IP, intf )
                 print "cmd10 = ", cmd
                 self.handle.sendline( cmd )
@@ -952,8 +952,8 @@
 
                 return main.TRUE
             except pexpect.TIMEOUT:
-                main.log.error( self.name + ": TIMEOUT exception found" )
-                main.log.error( self.name + ":     " + self.handle.before )
+                main.log.error(self.name + ": TIMEOUT exception found")
+                main.log.error(self.name + ":     " + self.handle.before)
                 main.cleanAndExit()
             except pexpect.EOF:
                 main.log.error( self.name + ": EOF exception found" )
@@ -982,8 +982,8 @@
                     newIP )
                 return main.TRUE
             except pexpect.TIMEOUT:
-                main.log.error( self.name + ": TIMEOUT exception found" )
-                main.log.error( self.name + ":     " + self.handle.before )
+                main.log.error(self.name + ": TIMEOUT exception found")
+                main.log.error(self.name + ":     " + self.handle.before)
                 main.cleanAndExit()
             except pexpect.EOF:
                 main.log.error( self.name + ": EOF exception found" )
@@ -1011,8 +1011,8 @@
                     newGW )
                 return main.TRUE
             except pexpect.TIMEOUT:
-                main.log.error( self.name + ": TIMEOUT exception found" )
-                main.log.error( self.name + ":     " + self.handle.before )
+                main.log.error(self.name + ": TIMEOUT exception found")
+                main.log.error(self.name + ":     " + self.handle.before)
                 main.cleanAndExit()
             except pexpect.EOF:
                 main.log.error( self.name + ": EOF exception found" )
@@ -1040,8 +1040,8 @@
                     macaddr )
                 return main.TRUE
             except pexpect.TIMEOUT:
-                main.log.error( self.name + ": TIMEOUT exception found" )
-                main.log.error( self.name + ":     " + self.handle.before )
+                main.log.error(self.name + ": TIMEOUT exception found")
+                main.log.error(self.name + ":     " + self.handle.before)
                 main.cleanAndExit()
             except pexpect.EOF:
                 main.log.error( self.name + ": EOF exception found" )
@@ -1064,8 +1064,8 @@
                 main.log.info( host + " arp -an = " + response )
                 return main.TRUE
             except pexpect.TIMEOUT:
-                main.log.error( self.name + ": TIMEOUT exception found" )
-                main.log.error( self.name + ":     " + self.handle.before )
+                main.log.error(self.name + ": TIMEOUT exception found")
+                main.log.error(self.name + ":     " + self.handle.before)
                 main.cleanAndExit()
             except pexpect.EOF:
                 main.log.error( self.name + ": EOF exception found" )
@@ -1138,7 +1138,7 @@
         else:
             main.log.error( "Connection failed to the host" )
 
-    def getIPAddress( self, host, proto='IPV4' ):
+    def getIPAddress( self, host , proto='IPV4'):
         """
            Verifies the host's ip configured or not."""
         if self.handle:
@@ -1214,8 +1214,8 @@
                 response = self.handle.before
                 return response
             except pexpect.TIMEOUT:
-                main.log.error( self.name + ": TIMEOUT exception found" )
-                main.log.error( self.name + ":     " + self.handle.before )
+                main.log.error(self.name + ": TIMEOUT exception found")
+                main.log.error(self.name + ":     " + self.handle.before)
                 main.cleanAndExit()
             except pexpect.EOF:
                 main.log.error( self.name + ": EOF exception found" )
@@ -1307,33 +1307,33 @@
             main.cleanAndExit()
         return response
 
-    def iperftcpAll( self, hosts, timeout=6 ):
-        """
+    def iperftcpAll(self, hosts, timeout=6):
+        '''
         Runs the iperftcp function with a given set of hosts and specified timeout.
 
         @parm:
             timeout: The defualt timeout is 6 sec to allow enough time for a successful test to complete,
              and short enough to stop an unsuccessful test from quiting and cleaning up mininet.
-        """
+        '''
         try:
             for host1 in hosts:
                 for host2 in hosts:
                     if host1 != host2:
-                        if self.iperftcp( host1, host2, timeout ) == main.FALSE:
-                            main.log.error( self.name + ": iperftcp test failed for " + host1 + " and " + host2 )
+                        if self.iperftcp(host1, host2, timeout) == main.FALSE:
+                            main.log.error(self.name + ": iperftcp test failed for " + host1 + " and " + host2)
         except Exception:
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    def iperftcp( self, host1="h1", host2="h2", timeout=6 ):
-        """
+    def iperftcp(self, host1="h1", host2="h2", timeout=6):
+        '''
         Creates an iperf TCP test between two hosts. Returns main.TRUE if test results
         are valid.
 
         @parm:
             timeout: The defualt timeout is 6 sec to allow enough time for a successful test to complete,
              and short enough to stop an unsuccessful test from quiting and cleaning up mininet.
-        """
+        '''
         main.log.info( self.name + ": Simple iperf TCP test between two hosts" )
         try:
             # Setup the mininet command
@@ -1344,26 +1344,26 @@
 
             # checks if there are results in the mininet response
             if "Results:" in response:
-                main.log.report( self.name + ": iperf test completed" )
+                main.log.report(self.name + ": iperf test completed")
                 # parse the mn results
-                response = response.split( "\r\n" )
-                response = response[ len( response ) - 2 ]
-                response = response.split( ": " )
-                response = response[ len( response ) - 1 ]
-                response = response.replace( "[", "" )
-                response = response.replace( "]", "" )
-                response = response.replace( "\'", "" )
+                response = response.split("\r\n")
+                response = response[len(response)-2]
+                response = response.split(": ")
+                response = response[len(response)-1]
+                response = response.replace("[", "")
+                response = response.replace("]", "")
+                response = response.replace("\'", "")
 
                 # this is the bandwith two and from the two hosts
-                bandwidth = response.split( ", " )
+                bandwidth = response.split(", ")
 
                 # there should be two elements in the bandwidth list
-                # [ 'host1 to host2', 'host2 to host1" ]
-                if len( bandwidth ) == 2:
-                    main.log.report( self.name + ": iperf test successful" )
+                # ['host1 to host2', 'host2 to host1"]
+                if len(bandwidth) == 2:
+                    main.log.report(self.name + ": iperf test successful")
                     return main.TRUE
                 else:
-                    main.log.error( self.name + ": invalid iperf results" )
+                    main.log.error(self.name + ": invalid iperf results")
                     return main.FALSE
             else:
                 main.log.error( self.name + ": iperf test failed" )
@@ -1385,22 +1385,22 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    def iperftcpipv6( self, host1="h1", host2="h2", timeout=50 ):
+    def iperftcpipv6(self, host1="h1", host2="h2", timeout=50):
         main.log.info( self.name + ": Simple iperf TCP test between two hosts" )
         try:
             IP1 = self.getIPAddress( host1, proto='IPV6' )
-            cmd1 = host1 + ' iperf -V -sD -B ' + str( IP1 )
+            cmd1 = host1 +' iperf -V -sD -B '+ str(IP1)
             self.handle.sendline( cmd1 )
-            outcome1 = self.handle.expect( "mininet>" )
-            cmd2 = host2 + ' iperf -V -c ' + str( IP1 ) + ' -t 5'
+            outcome1 = self.handle.expect( "mininet>")
+            cmd2 = host2 +' iperf -V -c '+ str(IP1) +' -t 5'
             self.handle.sendline( cmd2 )
-            outcome2 = self.handle.expect( "mininet>" )
+            outcome2 = self.handle.expect( "mininet>")
             response1 = self.handle.before
             response2 = self.handle.after
-            print response1, response2
-            pattern = "connected with " + str( IP1 )
+            print response1,response2
+            pattern = "connected with "+ str(IP1)
             if pattern in response1:
-                main.log.report( self.name + ": iperf test completed" )
+                main.log.report(self.name + ": iperf test completed")
                 return main.TRUE
             else:
                 main.log.error( self.name + ": iperf test failed" )
@@ -1420,76 +1420,77 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    def iperfudpAll( self, hosts, bandwidth="10M" ):
-        """
+    def iperfudpAll(self, hosts, bandwidth="10M"):
+        '''
         Runs the iperfudp function with a given set of hosts and specified
         bandwidth
 
         @param:
-            bandwidth: the targeted bandwidth, in megabits ( 'M' )
-        """
+            bandwidth: the targeted bandwidth, in megabits ('M')
+        '''
         try:
             for host1 in hosts:
                 for host2 in hosts:
                     if host1 != host2:
-                        if self.iperfudp( host1, host2, bandwidth ) == main.FALSE:
-                            main.log.error( self.name + ": iperfudp test failed for " + host1 + " and " + host2 )
+                        if self.iperfudp(host1, host2, bandwidth) == main.FALSE:
+                            main.log.error(self.name + ": iperfudp test failed for " + host1 + " and " + host2)
         except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
+            main.log.exception(self.name + ": Object not as expected")
             return main.FALSE
         except Exception:
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    def iperfudp( self, bandwidth="10M", host1="h1", host2="h2" ):
-        """
+    def iperfudp( self, bandwidth="10M", host1="h1", host2="h2"):
+
+        '''
         Creates an iperf UDP test with a specific bandwidth.
         Returns true if results are valid.
 
         @param:
-            bandwidth: the targeted bandwidth, in megabits ( 'M' ), to run the test
-        """
-        main.log.info( self.name + ": Simple iperf UDP test between two hosts" )
+            bandwidth: the targeted bandwidth, in megabits ('M'), to run the test
+        '''
+        main.log.info(self.name + ": Simple iperf UDP test between two hosts")
         try:
             # setup the mininet command
             cmd = 'iperfudp ' + bandwidth + " " + host1 + " " + host2
-            self.handle.sendline( cmd )
-            self.handle.expect( "mininet>" )
+            self.handle.sendline(cmd)
+            self.handle.expect("mininet>")
             response = self.handle.before
 
             # check if there are in results in the mininet response
             if "Results:" in response:
-                main.log.report( self.name + ": iperfudp test completed" )
+                main.log.report(self.name + ": iperfudp test completed")
                 # parse the results
-                response = response.split( "\r\n" )
-                response = response[ len( response ) - 2 ]
-                response = response.split( ": " )
-                response = response[ len( response ) - 1 ]
-                response = response.replace( "[", "" )
-                response = response.replace( "]", "" )
-                response = response.replace( "\'", "" )
+                response = response.split("\r\n")
+                response = response[len(response)-2]
+                response = response.split(": ")
+                response = response[len(response)-1]
+                response = response.replace("[", "")
+                response = response.replace("]", "")
+                response = response.replace("\'", "")
 
-                mnBandwidth = response.split( ", " )
+                mnBandwidth = response.split(", ")
 
                 # check to see if there are at least three entries
-                # [ 'bandwidth', 'host1 to host2', 'host2 to host1' ]
-                if len( mnBandwidth ) == 3:
+                # ['bandwidth', 'host1 to host2', 'host2 to host1']
+                if len(mnBandwidth) == 3:
                     # if one entry is blank then something is wrong
                     for item in mnBandwidth:
                         if item == "":
-                            main.log.error( self.name + ": Could not parse iperf output" )
-                            main.log.error( self.name + ": invalid iperfudp results" )
+                            main.log.error(self.name + ": Could not parse iperf output")
+                            main.log.error(self.name + ": invalid iperfudp results")
                             return main.FALSE
                     # otherwise results are vaild
-                    main.log.report( self.name + ": iperfudp test successful" )
+                    main.log.report(self.name + ": iperfudp test successful")
                     return main.TRUE
                 else:
-                    main.log.error( self.name + ": invalid iperfudp results" )
+                    main.log.error(self.name + ": invalid iperfudp results")
                     return main.FALSE
 
         except pexpect.TIMEOUT:
-            main.log.error( self.name + ": TIMEOUT exception found" )
-            main.log.error( self.name + ":     " + self.handle.before )
+            main.log.error(self.name + ": TIMEOUT exception found")
+            main.log.error(self.name + ":     " + self.handle.before)
             main.cleanAndExit()
         except pexpect.EOF:
             main.log.error( self.name + ": EOF exception found" )
@@ -1578,8 +1579,8 @@
             self.handle.sendline( command )
             self.handle.expect( "mininet>" )
         except pexpect.TIMEOUT:
-            main.log.error( self.name + ": TIMEOUT exception found" )
-            main.log.error( self.name + ":     " + self.handle.before )
+            main.log.error(self.name + ": TIMEOUT exception found")
+            main.log.error(self.name + ":     " + self.handle.before)
             main.cleanAndExit()
         except pexpect.EOF:
             main.log.error( self.name + ": EOF exception found" )
@@ -1599,7 +1600,7 @@
         main.log.info( command )
 
         try:
-            response = self.execute( cmd=command, prompt="mininet>" )
+            response = self.execute( cmd = command, prompt = "mininet>" )
             if re.search( "Unknown command", response ):
                 main.log.warn( response )
                 return main.FALSE
@@ -1741,7 +1742,7 @@
         Return:
             Returns main.TRUE if mininet correctly assigned switches to
             controllers, otherwise it will return main.FALSE or an appropriate
-            exception( s )
+            exception(s)
         """
         assignResult = main.TRUE
         # Initial ovs command
@@ -1948,9 +1949,9 @@
     def getSwitchRandom( self, timeout=60, nonCut=True ):
         """
         Randomly get a switch from Mininet topology.
-        If nonCut is True, it gets a list of non-cut switches ( the deletion
+        If nonCut is True, it gets a list of non-cut switches (the deletion
         of a non-cut switch will not increase the number of connected
-        components of a graph ) and randomly returns one of them, otherwise
+        components of a graph) and randomly returns one of them, otherwise
         it just randomly returns one switch from all current switches in
         Mininet.
         Returns the name of the chosen switch.
@@ -1965,11 +1966,11 @@
                     candidateSwitches.append( switchName )
             else:
                 graphDict = self.getGraphDict( timeout=timeout, useId=False )
-                if graphDict is None:
+                if graphDict == None:
                     return None
                 self.graph.update( graphDict )
                 candidateSwitches = self.graph.getNonCutVertices()
-            if candidateSwitches is None:
+            if candidateSwitches == None:
                 return None
             elif len( candidateSwitches ) == 0:
                 main.log.info( self.name + ": No candidate switch for deletion" )
@@ -1990,16 +1991,16 @@
     def delSwitchRandom( self, timeout=60, nonCut=True ):
         """
         Randomly delete a switch from Mininet topology.
-        If nonCut is True, it gets a list of non-cut switches ( the deletion
+        If nonCut is True, it gets a list of non-cut switches (the deletion
         of a non-cut switch will not increase the number of connected
-        components of a graph ) and randomly chooses one for deletion,
+        components of a graph) and randomly chooses one for deletion,
         otherwise it just randomly delete one switch from all current
         switches in Mininet.
         Returns the name of the deleted switch
         """
         try:
             switch = self.getSwitchRandom( timeout, nonCut )
-            if switch is None:
+            if switch == None:
                 return None
             else:
                 deletionResult = self.delSwitch( switch )
@@ -2084,9 +2085,9 @@
     def getLinkRandom( self, timeout=60, nonCut=True ):
         """
         Randomly get a link from Mininet topology.
-        If nonCut is True, it gets a list of non-cut links ( the deletion
+        If nonCut is True, it gets a list of non-cut links (the deletion
         of a non-cut link will not increase the number of connected
-        component of a graph ) and randomly returns one of them, otherwise
+        component of a graph) and randomly returns one of them, otherwise
         it just randomly returns one link from all current links in
         Mininet.
         Returns the link as a list, e.g. [ 's1', 's2' ]
@@ -2104,11 +2105,11 @@
                     candidateLinks.append( [ link[ 'node1' ], link[ 'node2' ] ] )
             else:
                 graphDict = self.getGraphDict( timeout=timeout, useId=False )
-                if graphDict is None:
+                if graphDict == None:
                     return None
                 self.graph.update( graphDict )
                 candidateLinks = self.graph.getNonCutEdges()
-            if candidateLinks is None:
+            if candidateLinks == None:
                 return None
             elif len( candidateLinks ) == 0:
                 main.log.info( self.name + ": No candidate link for deletion" )
@@ -2129,16 +2130,16 @@
     def delLinkRandom( self, timeout=60, nonCut=True ):
         """
         Randomly delete a link from Mininet topology.
-        If nonCut is True, it gets a list of non-cut links ( the deletion
+        If nonCut is True, it gets a list of non-cut links (the deletion
         of a non-cut link will not increase the number of connected
-        component of a graph ) and randomly chooses one for deletion,
+        component of a graph) and randomly chooses one for deletion,
         otherwise it just randomly delete one link from all current links
         in Mininet.
         Returns the deleted link as a list, e.g. [ 's1', 's2' ]
         """
         try:
             link = self.getLinkRandom( timeout, nonCut )
-            if link is None:
+            if link == None:
                 return None
             else:
                 deletionResult = self.delLink( link[ 0 ], link[ 1 ] )
@@ -2301,8 +2302,8 @@
                         fileName +
                         "\" | grep -v grep | awk '{print $2}'\`" )
             except pexpect.TIMEOUT:
-                main.log.error( self.name + ": TIMEOUT exception found" )
-                main.log.error( self.name + ":     " + self.handle.before )
+                main.log.error(self.name + ": TIMEOUT exception found")
+                main.log.error(self.name + ":     " + self.handle.before)
                 main.cleanAndExit()
             except pexpect.EOF:
                 main.log.error( self.name + ": EOF exception found" )
@@ -2330,7 +2331,7 @@
             ethDevice = '-I ' + ethDevice + ' '
         cmd = srcHost + " arping -c1 "
         if noResult:
-            cmd += "-w10 "  # If we don't want the actural arping result, set -w10, arping will exit after 10 ms.
+            cmd += "-w10 " # If we don't want the actural arping result, set -w10, arping will exit after 10 ms.
         cmd += ethDevice + dstHost
         try:
             if output:
@@ -2415,11 +2416,11 @@
             assert flowTable1, "flowTable1 is empty or None"
             assert flowTable2, "flowTable2 is empty or None"
             returnValue = main.TRUE
-            if len( flowTable1 ) != len( flowTable2 ):
+            if len(flowTable1) != len(flowTable2):
                 main.log.warn( "Flow table lengths do not match" )
                 returnValue = main.FALSE
-            dFields = [ "n_bytes", "cookie", "n_packets", "duration" ]
-            for flow1, flow2 in zip( flowTable1, flowTable2 ):
+            dFields = ["n_bytes", "cookie", "n_packets", "duration"]
+            for flow1, flow2 in zip(flowTable1, flowTable2):
                 for field in dFields:
                     try:
                         flow1.pop( field )
@@ -2429,10 +2430,10 @@
                         flow2.pop( field )
                     except KeyError:
                         pass
-            for i in range( len( flowTable1 ) ):
-                if flowTable1[ i ] not in flowTable2:
+            for i in range( len(flowTable1) ):
+                if flowTable1[i] not in flowTable2:
                     main.log.warn( "Flow tables do not match:" )
-                    main.log.warn( "Old flow:\n{}\n not in new flow table".format( flowTable1[ i ] ) )
+                    main.log.warn( "Old flow:\n{}\n not in new flow table".format( flowTable1[i] ) )
                     returnValue = main.FALSE
                     break
             return returnValue
@@ -2444,7 +2445,7 @@
             main.cleanAndExit()
 
     def parseFlowTable( self, flowTable, version="", debug=True ):
-        """
+        '''
         Discription: Parses flows into json format.
         NOTE: this can parse any string thats separated with commas
         Arguments:
@@ -2454,70 +2455,69 @@
                 version: The version of OpenFlow. Currently, 1.3 and 1.0 are supported.
                 debug: prints out the final result
         returns: A list of flows in json format
-        """
+        '''
         jsonFlowTable = []
         try:
             for flow in flowTable:
                 jsonFlow = {}
                 # split up the fields of the flow
-                parsedFlow = flow.split( ", " )
+                parsedFlow = flow.split(", ")
                 # get rid of any spaces in front of the field
-                for i in range( len( parsedFlow ) ):
-                    item = parsedFlow[ i ]
-                    if item[ 0 ] == " ":
-                        parsedFlow[ i ] = item[ 1: ]
+                for i in range( len(parsedFlow) ):
+                    item = parsedFlow[i]
+                    if item[0] == " ":
+                        parsedFlow[i] = item[1:]
                 # grab the selector and treatment from the parsed flow
                 # the last element is the selector and the treatment
-                temp = parsedFlow.pop( -1 )
+                temp = parsedFlow.pop(-1)
                 # split up the selector and the treatment
-                temp = temp.split( " " )
+                temp = temp.split(" ")
                 index = 0
                 # parse the flags
                 # NOTE: This only parses one flag
                 flag = {}
                 if version == "1.3":
-                    flag = { "flag": [ temp[ index ] ] }
+                    flag = {"flag":[temp[index]]}
                     index += 1
                 # the first element is the selector and split it up
-                sel = temp[ index ]
+                sel = temp[index]
                 index += 1
-                sel = sel.split( "," )
+                sel = sel.split(",")
                 # the priority is stuck in the selecter so put it back
                 # in the flow
-                parsedFlow.append( sel.pop( 0 ) )
+                parsedFlow.append(sel.pop(0))
                 # parse selector
                 criteria = []
                 for item in sel:
                     # this is the type of the packet e.g. "arp"
                     if "=" not in item:
-                        criteria.append( { "type": item } )
+                        criteria.append( {"type":item} )
                     else:
-                        field = item.split( "=" )
-                        criteria.append( { field[ 0 ]: field[ 1 ] } )
-                selector = { "selector": { "criteria": sorted( criteria ) } }
-                treat = temp[ index ]
+                        field = item.split("=")
+                        criteria.append( {field[0]:field[1]} )
+                selector = {"selector": {"criteria":sorted(criteria)} }
+                treat = temp[index]
                 # get rid of the action part e.g. "action=output:2"
                 # we will add it back later
-                treat = treat.split( "=" )
-                treat.pop( 0 )
+                treat = treat.split("=")
+                treat.pop(0)
                 # parse treatment
                 action = []
                 for item in treat:
-                    field = item.split( ":" )
-                    action.append( { field[ 0 ]: field[ 1 ] } )
+                    field = item.split(":")
+                    action.append( {field[0]:field[1]} )
                 # create the treatment field and add the actions
-                treatment = { "treatment": { "action": sorted( action ) } }
+                treatment = {"treatment": {"action":sorted(action)} }
                 # parse the rest of the flow
                 for item in parsedFlow:
-                    field = item.split( "=" )
-                    jsonFlow.update( { field[ 0 ]: field[ 1 ] } )
+                    field = item.split("=")
+                    jsonFlow.update( {field[0]:field[1]} )
                 # add the treatment and the selector to the json flow
                 jsonFlow.update( selector )
                 jsonFlow.update( treatment )
                 jsonFlow.update( flag )
 
-                if debug:
-                    main.log.debug( "\033[94mJson flow:\033[0m\n{}\n".format( jsonFlow ) )
+                if debug: main.log.debug( "\033[94mJson flow:\033[0m\n{}\n".format(jsonFlow) )
 
                 # add the json flow to the json flow table
                 jsonFlowTable.append( jsonFlow )
@@ -2535,24 +2535,23 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    def getFlowTable( self, sw, version="", debug=False ):
-        """
-        Discription: Returns the flow table( s ) on a switch or switches in a list.
+    def getFlowTable( self, sw, version="", debug=False):
+        '''
+        Discription: Returns the flow table(s) on a switch or switches in a list.
             Each element is a flow.
         Arguments:
             Required:
-                sw: The switch name ( "s1" ) to retrive the flow table. Can also be
+                sw: The switch name ("s1") to retrive the flow table. Can also be
                     a list of switches.
             Optional:
                 version: The version of OpenFlow. Currently, 1.3 and 1.0 are supported.
                 debug: prints out the final result
-        """
+        '''
         try:
             switches = []
-            if isinstance( sw, list ):
-                switches.extend( sw )
-            else:
-                switches.append( sw )
+            if type(sw) is list:
+                switches.extend(sw)
+            else: switches.append(sw)
 
             flows = []
             for s in switches:
@@ -2572,11 +2571,10 @@
                 # the first element is the command that was sent
                 # the second is the table header
                 # the last element is empty
-                response = response[ 2:-1 ]
+                response = response[2:-1]
                 flows.extend( response )
 
-            if debug:
-                print "Flows:\n{}\n\n".format( flows )
+            if debug: print "Flows:\n{}\n\n".format(flows)
 
             return self.parseFlowTable( flows, version, debug )
 
@@ -2588,11 +2586,11 @@
             main.cleanAndExit()
 
     def checkFlowId( self, sw, flowId, version="1.3", debug=True ):
-        """
+        '''
         Discription: Checks whether the ID provided matches a flow ID in Mininet
         Arguments:
             Required:
-                sw: The switch name ( "s1" ) to retrive the flow table. Can also be
+                sw: The switch name ("s1") to retrive the flow table. Can also be
                     a list of switches.
                 flowId: the flow ID in hex format. Can also be a list of IDs
             Optional:
@@ -2600,18 +2598,17 @@
                 debug: prints out the final result
         returns: main.TRUE if all IDs are present, otherwise returns main.FALSE
         NOTE: prints out IDs that are not present
-        """
+        '''
         try:
             main.log.info( "Getting flows from Mininet" )
             flows = self.getFlowTable( sw, version, debug )
-            if flows is None:
+            if flows == None:
                 return main.ERROR
 
-            if debug:
-                print "flow ids:\n{}\n\n".format( flowId )
+            if debug: print "flow ids:\n{}\n\n".format(flowId)
 
             # Check flowId is a list or a string
-            if isinstance( flowId, str ):
+            if type( flowId ) is str:
                 result = False
                 for f in flows:
                     if flowId in f.get( 'cookie' ):
@@ -2625,8 +2622,7 @@
                 # Save the IDs that are not in Mininet
                 absentIds = [ x for x in flowId if x not in mnFlowIds ]
 
-                if debug:
-                    print "mn flow ids:\n{}\n\n".format( mnFlowIds )
+                if debug: print "mn flow ids:\n{}\n\n".format(mnFlowIds)
 
                 # Print out the IDs that are not in Mininet
                 if absentIds:
@@ -2643,6 +2639,7 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
+
     def startTcpdump( self, filename, intf="eth0", port="port 6653" ):
         """
            Runs tpdump on an interface and saves the file
@@ -2707,8 +2704,8 @@
             self.handle.sendline( "" )
             self.handle.expect( "mininet>" )
         except pexpect.TIMEOUT:
-            main.log.error( self.name + ": TIMEOUT exception found" )
-            main.log.error( self.name + ":     " + self.handle.before )
+            main.log.error(self.name + ": TIMEOUT exception found")
+            main.log.error(self.name + ":     " + self.handle.before)
             main.cleanAndExit()
         except pexpect.EOF:
             main.log.error( self.name + ": EOF exception found" )
@@ -2789,7 +2786,7 @@
                 for line in response.split( "\n" ):
                     # Regex patterns to parse 'ovs-ofctl dump-ports-desc' output
                     # Example port:
-                    # 1( s1-eth1 ): addr:ae:60:72:77:55:51
+                    # 1(s1-eth1): addr:ae:60:72:77:55:51
                     pattern = "(?P<index>\d+)\((?P<name>[^-]+-eth(?P<port>\d+))\):\saddr:(?P<mac>([a-f0-9]{2}:){5}[a-f0-9]{2})"
                     result = re.search( pattern, line )
                     if result:
@@ -2889,7 +2886,7 @@
             dump = self.dump().split( "\n" )
             hosts = {}
             for line in dump:
-                if "Host" in line:
+                if "Host" in line :
                     result = re.search( hostRE, line )
                     name = result.group( 'name' )
                     interfaces = []
@@ -2950,12 +2947,12 @@
               hosts, this is just the eth#.
         """
         try:
-            self.update( updateTimeout )
-            response = self.links( timeout=timeout ).split( '\n' )
+            self.update()
+            response = self.links(timeout=timeout).split( '\n' )
 
             # Examples:
-            # s1-eth3<->s2-eth1 ( OK OK )
-            # s13-eth3<->h27-eth0 ( OK OK )
+            # s1-eth3<->s2-eth1 (OK OK)
+            # s13-eth3<->h27-eth0 (OK OK)
             linkRE = "(?P<node1>[\w]+)\-eth(?P<port1>[\d]+)\<\-\>" +\
                      "(?P<node2>[\w]+)\-eth(?P<port2>[\d]+)"
             links = []
@@ -3274,11 +3271,11 @@
                     main.log.info( output )
             return hostResults
         except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":     " + self.handle.before )
+            main.log.error(self.name + ": EOF exception found")
+            main.log.error(self.name + ":     " + self.handle.before)
             main.cleanAndExit()
         except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
+            main.log.exception(self.name + ": Uncaught exception!")
             main.cleanAndExit()
 
     def getHostsOld( self ):
@@ -3307,8 +3304,8 @@
 
             return hostList
         except pexpect.TIMEOUT:
-            main.log.error( self.name + ": TIMEOUT exception found" )
-            main.log.error( self.name + ":     " + self.handle.before )
+            main.log.error(self.name + ": TIMEOUT exception found")
+            main.log.error(self.name + ":     " + self.handle.before)
             main.cleanAndExit()
         except pexpect.EOF:
             main.log.error( self.name + ": EOF exception found" )
@@ -3350,7 +3347,7 @@
             nodesList = nodesStr.split( "," )
 
             nodesSet = set( nodesList )
-            # discarding default controller( s ) node
+            # discarding default controller(s) node
             nodesSet.discard( 'c0' )
             nodesSet.discard( 'c1' )
             nodesSet.discard( 'c2' )
@@ -3360,8 +3357,8 @@
 
             return switchList
         except pexpect.TIMEOUT:
-            main.log.error( self.name + ": TIMEOUT exception found" )
-            main.log.error( self.name + ":     " + self.handle.before )
+            main.log.error(self.name + ": TIMEOUT exception found")
+            main.log.error(self.name + ":     " + self.handle.before)
             main.cleanAndExit()
         except pexpect.EOF:
             main.log.error( self.name + ": EOF exception found" )
@@ -3380,7 +3377,7 @@
           vertex2: { 'edges': ..., 'name': ..., 'protocol': ... } }
         Each vertex should at least have an 'edges' attribute which describes the
         adjacency information. The value of 'edges' attribute is also represented by
-        a dictionary, which maps each edge ( identified by the neighbor vertex ) to a
+        a dictionary, which maps each edge (identified by the neighbor vertex) to a
         list of attributes.
         An example of the edges dictionary:
         'edges': { vertex2: { 'port': ..., 'weight': ... },
@@ -3388,7 +3385,7 @@
         If useId == True, dpid/mac will be used instead of names to identify
         vertices, which is helpful when e.g. comparing Mininet topology with ONOS
         topology.
-        If includeHost == True, all hosts ( and host-switch links ) will be included
+        If includeHost == True, all hosts (and host-switch links) will be included
         in topology data.
         Note that link or switch that are brought down by 'link x x down' or 'switch
         x down' commands still show in the output of Mininet CLI commands such as
@@ -3417,7 +3414,7 @@
                     # Get port index from OVS
                     # The index extracted from port name may be inconsistent with ONOS
                     portIndex = -1
-                    if nodeName1 not in portDict.keys():
+                    if not nodeName1 in portDict.keys():
                         portList = self.getOVSPorts( nodeName1 )
                         if len( portList ) == 0:
                             main.log.warn( self.name + ": No port found on switch " + nodeName1 )
@@ -3436,21 +3433,21 @@
                     else:
                         node1 = nodeName1
                         node2 = nodeName2
-                    if node1 not in graphDict.keys():
+                    if not node1 in graphDict.keys():
                         if useId:
-                            graphDict[ node1 ] = { 'edges': {},
-                                                   'dpid': switches[ nodeName1 ][ 'dpid' ],
-                                                   'name': nodeName1,
-                                                   'ports': switches[ nodeName1 ][ 'ports' ],
-                                                   'swClass': switches[ nodeName1 ][ 'swClass' ],
-                                                   'pid': switches[ nodeName1 ][ 'pid' ],
-                                                   'options': switches[ nodeName1 ][ 'options' ] }
+                            graphDict[ node1 ] = { 'edges':{},
+                                                   'dpid':switches[ nodeName1 ][ 'dpid' ],
+                                                   'name':nodeName1,
+                                                   'ports':switches[ nodeName1 ][ 'ports' ],
+                                                   'swClass':switches[ nodeName1 ][ 'swClass' ],
+                                                   'pid':switches[ nodeName1 ][ 'pid' ],
+                                                   'options':switches[ nodeName1 ][ 'options' ] }
                         else:
-                            graphDict[ node1 ] = { 'edges': {} }
+                            graphDict[ node1 ] = { 'edges':{} }
                     else:
                         # Assert node2 is not connected to any current links of node1
                         assert node2 not in graphDict[ node1 ][ 'edges' ].keys()
-                    graphDict[ node1 ][ 'edges' ][ node2 ] = { 'port': portIndex }
+                    graphDict[ node1 ][ 'edges' ][ node2 ] = { 'port':portIndex }
                     # Swap two nodes/ports
                     nodeName1, nodeName2 = nodeName2, nodeName1
                     port1, port2 = port2, port1
@@ -3487,8 +3484,8 @@
 
             return main.TRUE
         except pexpect.TIMEOUT:
-            main.log.error( self.name + ": TIMEOUT exception found" )
-            main.log.error( self.name + ":     " + self.handle.before )
+            main.log.error(self.name + ": TIMEOUT exception found")
+            main.log.error(self.name + ":     " + self.handle.before)
             main.cleanAndExit()
         except pexpect.EOF:
             main.log.error( self.name + ": EOF exception found" )
@@ -3548,8 +3545,8 @@
 
                 return main.TRUE
             except pexpect.TIMEOUT:
-                main.log.error( self.name + ": TIMEOUT exception found" )
-                main.log.error( self.name + ":     " + self.handle.before )
+                main.log.error(self.name + ": TIMEOUT exception found")
+                main.log.error(self.name + ":     " + self.handle.before)
                 main.cleanAndExit()
             except pexpect.EOF:
                 main.log.error( self.name + ": EOF exception found" )
@@ -3575,8 +3572,8 @@
             getattr( main, name )
         except AttributeError:
             # namespace is clear, creating component
-            main.componentDictionary[ name ] = main.componentDictionary[ self.name ].copy()
-            main.componentDictionary[ name ][ 'connect_order' ] = str( int( main.componentDictionary[ name ][ 'connect_order' ] ) + 1 )
+            main.componentDictionary[name] = main.componentDictionary[self.name].copy()
+            main.componentDictionary[name]['connect_order'] = str( int( main.componentDictionary[name]['connect_order'] ) + 1 )
             main.componentInit( name )
         except pexpect.EOF:
             main.log.error( self.name + ": EOF exception found" )
@@ -3609,7 +3606,7 @@
             # Delete component
             delattr( main, name )
             # Delete component from ComponentDictionary
-            del( main.componentDictionary[ name ] )
+            del( main.componentDictionary[name] )
         except pexpect.EOF:
             main.log.error( self.name + ": EOF exception found" )
             main.log.error( self.name + ":     " + self.handle.before )
@@ -3645,7 +3642,8 @@
             main.cleanAndExit()
 
     def changeInterfaceStatus( self, devicename, intf, status ):
-        """
+        '''
+
         Args:
             devicename: switch name
             intf: port name on switch
@@ -3653,29 +3651,30 @@
 
         Returns: boolean to show success change status
 
-        """
+        '''
         if status == "down" or status == "up":
             try:
                 cmd = devicename + " ifconfig " + intf + " " + status
                 self.handle.sendline( cmd )
-                self.handle.expect( "mininet>" )
+                self.handle.expect("mininet>")
                 return main.TRUE
             except pexpect.TIMEOUT:
-                main.log.exception( self.name + ": Command timed out" )
+                main.log.exception(self.name + ": Command timed out")
                 return main.FALSE
             except pexpect.EOF:
-                main.log.exception( self.name + ": connection closed." )
+                main.log.exception(self.name + ": connection closed.")
                 main.cleanAndExit()
             except TypeError:
-                main.log.exception( self.name + ": TypeError" )
+                main.log.exception(self.name + ": TypeError")
                 main.cleanAndExit()
             except Exception:
-                main.log.exception( self.name + ": Uncaught exception!" )
+                main.log.exception(self.name + ": Uncaught exception!")
                 main.cleanAndExit()
         else:
-            main.log.warn( "Interface status should be up or down!" )
+            main.log.warn("Interface status should be up or down!")
             return main.FALSE
 
 
+
 if __name__ != "__main__":
     sys.modules[ __name__ ] = MininetCliDriver()
diff --git a/TestON/drivers/common/cli/emulator/poxclidriver.py b/TestON/drivers/common/cli/emulator/poxclidriver.py
index ec22d0b..de7a7c3 100644
--- a/TestON/drivers/common/cli/emulator/poxclidriver.py
+++ b/TestON/drivers/common/cli/emulator/poxclidriver.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 """
 Created on 26-Oct-2012
-Copyright 2012 Open Networking Foundation ( ONF )
+Copyright 2012 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -45,7 +45,7 @@
         self.wrapped = sys.modules[ __name__ ]
 
     def connect( self, **connectargs ):
-        # user_name, ip_address, pwd,options ):
+        #,user_name, ip_address, pwd,options ):
         """
           this subroutine is to launch pox controller . It must have arguments as :
           user_name  = host name ,
diff --git a/TestON/drivers/common/cli/emulator/remotemininetdriver.py b/TestON/drivers/common/cli/emulator/remotemininetdriver.py
index 6deca9a..71de94c 100644
--- a/TestON/drivers/common/cli/emulator/remotemininetdriver.py
+++ b/TestON/drivers/common/cli/emulator/remotemininetdriver.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 """
 Created on 26-Oct-2012
-Copyright 2012 Open Networking Foundation ( ONF )
+Copyright 2012 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -56,7 +56,7 @@
             self.name = self.options[ 'name' ]
 
             try:
-                if os.getenv( str( self.ip_address ) ) is not None:
+                if os.getenv( str( self.ip_address ) ) != None:
                     self.ip_address = os.getenv( str( self.ip_address ) )
                 else:
                     main.log.info( self.name +
@@ -152,7 +152,7 @@
             self.handle.expect( self.prompt )
             return main.TRUE
         except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
+            main.log.exception(self.name + ": Object not as expected")
             return main.FALSE
         except pexpect.TIMEOUT:
             main.log.exception( self.name + ": TIMEOUT exception found in pingLong" )
@@ -166,6 +166,7 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
+
     def pingstatus( self, **pingParams ):
         """
         Tails the respective ping output file and check that
@@ -202,7 +203,7 @@
             main.log.error( self.name + ":     " + self.handle.before )
             main.cleanAndExit()
         except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
+            main.log.exception(self.name + ": Uncaught exception!")
             main.cleanAndExit()
 
     def pingKill( self, testONUser, testONIP ):
@@ -261,11 +262,11 @@
             main.log.error( self.name + ":    " + self.handle.before )
             return main.FALSE
         except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
+            main.log.error(self.name + ": EOF exception found")
+            main.log.error(self.name + ":    " + self.handle.before)
             main.cleanAndExit()
         except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
+            main.log.exception(self.name + ": Uncaught exception!")
             main.cleanAndExit()
 
     def pingHostOptical( self, **pingParams ):
@@ -335,11 +336,11 @@
                 main.lastResult = main.FALSE
                 return main.FALSE
         except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
+            main.log.error(self.name + ": EOF exception found")
+            main.log.error(self.name + ":    " + self.handle.before)
             main.cleanAndExit()
         except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
+            main.log.exception(self.name + ": Uncaught exception!")
             main.cleanAndExit()
 
     def checknum( self, num ):
@@ -406,7 +407,7 @@
             self.handle.sendline( "" )
             self.handle.sendline( "" )
             i = self.handle.expect( [ 'No\ssuch\device', 'listening\son',
-                                      pexpect.TIMEOUT, self.prompt ], timeout=10 )
+                                    pexpect.TIMEOUT, self.prompt ], timeout=10 )
             main.log.info( self.handle.before + self.handle.after )
             if i == 0:
                 main.log.error( self.name + ": tcpdump - No such device exists.\
@@ -457,9 +458,9 @@
                 It runs python script "opticalTest.py" to create the
                 packet layer( mn ) and optical topology
             Optional:
-                name - Name of onos directory. ( ONOS | onos )
+                name - Name of onos directory. (ONOS | onos)
             Required:
-                ctrllerIP = Controller( s ) IP address
+                ctrllerIP = Controller(s) IP address
             TODO: If no ctrllerIP is provided, a default
                 $OC1 can be accepted
         """
@@ -467,15 +468,15 @@
             self.handle.sendline( "" )
             self.handle.expect( self.prompt )
             self.handle.sendline( "cd ~/" + name + "/tools/test/topos" )
-            self.handle.expect( "topos" + self.prompt )
-            if ctrllerIP is None:
+            self.handle.expect( "topos"+ self.prompt )
+            if ctrllerIP == None:
                 main.log.info( "You need to specify the IP" )
                 return main.FALSE
             else:
                 controller = ''
                 if isinstance( ctrllerIP, types.ListType ):
                     for i in xrange( len( ctrllerIP ) ):
-                        controller += ctrllerIP[ i ] + ' '
+                        controller += ctrllerIP[i] + ' '
                     main.log.info( "Mininet topology is being loaded with " +
                                    "controllers: " + controller )
                 elif isinstance( ctrllerIP, types.StringType ):
@@ -488,10 +489,10 @@
                 cmd = "sudo -E python opticalTest.py " + controller
                 main.log.info( self.name + ": cmd = " + cmd )
                 self.handle.sendline( cmd )
-                time.sleep( 30 )
+                time.sleep(30)
                 self.handle.sendline( "" )
                 self.handle.sendline( "" )
-                self.handle.expect( "mininet>" )
+                self.handle.expect("mininet>")
                 return main.TRUE
         except pexpect.EOF:
             main.log.error( self.name + ": EOF exception found" )
@@ -502,7 +503,7 @@
         """
             Since executing opticalTest.py will give you mininet
             prompt, you would at some point require to get onto
-            console of LincOE ( ( linc@onosTestBench )1> ) to execute
+            console of LincOE ((linc@onosTestBench)1>) to execute
             commands like bring a optical port up or down on a ROADM
             You can attach to console of Linc-OE session by a cmd:
             sudo ~/linc-oe/rel/linc/bin/linc attach
@@ -535,7 +536,7 @@
                 elif i == 1:
                     self.handle.sendline( "exit" )
                     self.handle.expect( "exit" )
-                    self.handle.expect( self.prompt )
+                    self.handle.expect(self.prompt)
                     self.handle.sendline( "exit" )
                     self.handle.expect( "exit" )
                     self.handle.expect( "closed" )
@@ -547,11 +548,11 @@
             main.log.error( self.name + ":    " + self.handle.before )
             return main.FALSE
         except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
+            main.log.error(self.name + ": EOF exception found")
+            main.log.error(self.name + ":    " + self.handle.before)
             main.cleanAndExit()
         except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
+            main.log.exception(self.name + ": Uncaught exception!")
             main.cleanAndExit()
 
     def setIpTablesOUTPUT( self, dstIp, dstPort, action='add',
diff --git a/TestON/drivers/common/cli/emulatordriver.py b/TestON/drivers/common/cli/emulatordriver.py
index 9bfd648..f63a49f 100644
--- a/TestON/drivers/common/cli/emulatordriver.py
+++ b/TestON/drivers/common/cli/emulatordriver.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 """
 Created on 26-Oct-2012
-Copyright 2012 Open Networking Foundation ( ONF )
+Copyright 2012 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -26,7 +26,6 @@
 """
 from drivers.common.clidriver import CLI
 
-
 class Emulator( CLI ):
     # The common functions for emulator included in emulatordriver
 
diff --git a/TestON/drivers/common/cli/onosclidriver.py b/TestON/drivers/common/cli/onosclidriver.py
old mode 100644
new mode 100755
index 4807763..f7f488d
--- a/TestON/drivers/common/cli/onosclidriver.py
+++ b/TestON/drivers/common/cli/onosclidriver.py
@@ -2,7 +2,7 @@
 
 """
 OCT 13 2014
-Copyright 2014 Open Networking Foundation ( ONF )
+Copyright 2014 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -11,7 +11,7 @@
     TestON is free software: you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
     the Free Software Foundation, either version 2 of the License, or
-    ( at your option ) any later version.
+    (at your option) any later version.
 
     TestON is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,6 +21,7 @@
     You should have received a copy of the GNU General Public License
     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
 """
+
 """
 This driver enters the onos> prompt to issue commands.
 
@@ -46,7 +47,6 @@
 from cStringIO import StringIO
 from itertools import izip
 
-
 class OnosCliDriver( CLI ):
 
     def __init__( self ):
@@ -61,11 +61,10 @@
         self.graph = Graph()
         super( OnosCliDriver, self ).__init__()
 
-    def checkOptions( self, var, defaultVar ):
+    def checkOptions(self, var, defaultVar):
         if var is None or var == "":
             return defaultVar
         return var
-
     def connect( self, **connectargs ):
         """
         Creates ssh handle for ONOS cli.
@@ -82,9 +81,9 @@
                 elif key == "karaf_password":
                     self.karafPass = self.options[ key ]
 
-            self.home = self.checkOptions( self.home, "~/onos" )
-            self.karafUser = self.checkOptions( self.karafUser, self.user_name )
-            self.karafPass = self.checkOptions( self.karafPass, self.pwd )
+            self.home = self.checkOptions(self.home, "~/onos")
+            self.karafUser = self.checkOptions(self.karafUser, self.user_name)
+            self.karafPass = self.checkOptions(self.karafPass, self.pwd )
 
             for key in self.options:
                 if key == 'onosIp':
@@ -164,7 +163,7 @@
         """
         Sends 'logout' command to ONOS cli
         Returns main.TRUE if exited CLI and
-                main.FALSE on timeout ( not guranteed you are disconnected )
+                main.FALSE on timeout (not guranteed you are disconnected)
                 None on TypeError
                 Exits test on unknown error or pexpect exits unexpectedly
         """
@@ -228,12 +227,12 @@
                 # Expect the cellname in the ONOSCELL variable.
                 # Note that this variable name is subject to change
                 #   and that this driver will have to change accordingly
-                self.handle.expect( str( cellname ) )
+                self.handle.expect(str(cellname))
                 handleBefore = self.handle.before
                 handleAfter = self.handle.after
                 # Get the rest of the handle
-                self.handle.sendline( "" )
-                self.handle.expect( self.prompt )
+                self.handle.sendline("")
+                self.handle.expect(self.prompt)
                 handleMore = self.handle.before
 
                 main.log.info( "Cell call returned: " + handleBefore +
@@ -273,7 +272,7 @@
             # Check if we are already in the cli
             self.handle.sendline( "" )
             x = self.handle.expect( [
-                self.prompt, "onos>" ], commandlineTimeout )
+                self.prompt, "onos>" ], commandlineTimeout)
             if x == 1:
                 main.log.info( "ONOS cli is already running" )
                 return main.TRUE
@@ -352,10 +351,11 @@
         Note: karafTimeout is left as str so that this could be read
         and passed to startOnosCli from PARAMS file as str.
         """
+
         try:
             self.handle.sendline( "" )
             x = self.handle.expect( [
-                self.prompt, "onos>" ], commandlineTimeout )
+                self.prompt, "onos>" ], commandlineTimeout)
 
             if x == 1:
                 main.log.info( "ONOS cli is already running" )
@@ -478,7 +478,7 @@
             self.handle.sendline( "" )
             i = self.handle.expect( [ "onos>", self.prompt, pexpect.TIMEOUT ] )
             if i == 1:
-                main.log.error( self.name + ": onos cli session closed. " )
+                main.log.error( self.name + ": onos cli session closed. ")
                 if self.onosIp:
                     main.log.warn( "Trying to reconnect " + self.onosIp )
                     reconnectResult = self.startOnosCli( self.onosIp )
@@ -504,12 +504,12 @@
                 self.log( logStr, noExit=noExit )
             self.handle.sendline( cmdStr )
             if dollarSign:
-                i = self.handle.expect( [ "onos>" ], timeout )
+                i = self.handle.expect( ["onos>"], timeout )
             else:
-                i = self.handle.expect( [ "onos>", self.prompt ], timeout )
+                i = self.handle.expect( ["onos>", self.prompt], timeout )
             response = self.handle.before
             # TODO: do something with i
-            main.log.info( "Command '" + str(cmdStr) + "' sent to "
+            main.log.info( "Command '" + str( cmdStr ) + "' sent to "
                            + self.name + "." )
             if debug:
                 main.log.debug( self.name + ": Raw output" )
@@ -523,7 +523,7 @@
                 main.log.debug( self.name + ": " + repr( response ) )
 
             # Remove extra return chars that get added
-            response = re.sub( r"\s\r", "", response )
+            response = re.sub(  r"\s\r", "", response )
             if debug:
                 main.log.debug( self.name + ": Removed extra returns " +
                                 "from output" )
@@ -541,7 +541,7 @@
                 main.log.debug( self.name + ": split output" )
                 for r in output:
                     main.log.debug( self.name + ": " + repr( r ) )
-            output = output[ 1 ].strip()
+            output = output[1].strip()
             if showResponse:
                 main.log.info( "Response from ONOS: {}".format( output ) )
             return output
@@ -647,7 +647,7 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    def nodes( self, jsonFormat=True ):
+    def nodes( self, jsonFormat=True):
         """
         List the nodes currently visible
         Issues command: 'nodes'
@@ -794,7 +794,7 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    def checkMasters( self, jsonFormat=True ):
+    def checkMasters( self, jsonFormat=True  ):
         """
             Returns the output of the masters command.
             Optional argument:
@@ -852,7 +852,7 @@
                     main.log.info( "\n" + self.checkMasters( False ) )
                     return main.FALSE
             main.log.info( "Mastership balanced between " +
-                           str( len( masters ) ) + " masters" )
+                           str( len(masters) ) + " masters" )
             return main.TRUE
         except ( TypeError, ValueError ):
             main.log.exception( "{}: Object not as expected: {!r}".format( self.name, mastersOutput ) )
@@ -1186,9 +1186,9 @@
             else:
                 main.log.info( "Host intent installed between " +
                                str( hostIdOne ) + " and " + str( hostIdTwo ) )
-                match = re.search( 'id=0x([\da-f]+),', handle )
+                match = re.search('id=0x([\da-f]+),', handle)
                 if match:
-                    return match.group()[ 3:-1 ]
+                    return match.group()[3:-1]
                 else:
                     main.log.error( "Error, intent ID not found" )
                     main.log.debug( "Response from ONOS was: " +
@@ -1234,9 +1234,9 @@
                 main.log.info( "Optical intent installed between " +
                                str( ingressDevice ) + " and " +
                                str( egressDevice ) )
-                match = re.search( 'id=0x([\da-f]+),', handle )
+                match = re.search('id=0x([\da-f]+),', handle)
                 if match:
-                    return match.group()[ 3:-1 ]
+                    return match.group()[3:-1]
                 else:
                     main.log.error( "Error, intent ID not found" )
                     return None
@@ -1374,9 +1374,9 @@
                 main.log.info( "Point-to-point intent installed between " +
                                str( ingressDevice ) + " and " +
                                str( egressDevice ) )
-                match = re.search( 'id=0x([\da-f]+),', handle )
+                match = re.search('id=0x([\da-f]+),', handle)
                 if match:
-                    return match.group()[ 3:-1 ]
+                    return match.group()[3:-1]
                 else:
                     main.log.error( "Error, intent ID not found" )
                     return None
@@ -1532,9 +1532,9 @@
                                 "intent" )
                 return None
             else:
-                match = re.search( 'id=0x([\da-f]+),', handle )
+                match = re.search('id=0x([\da-f]+),', handle)
                 if match:
-                    return match.group()[ 3:-1 ]
+                    return match.group()[3:-1]
                 else:
                     main.log.error( "Error, intent ID not found" )
                     return None
@@ -1691,9 +1691,9 @@
                                 "intent" )
                 return None
             else:
-                match = re.search( 'id=0x([\da-f]+),', handle )
+                match = re.search('id=0x([\da-f]+),', handle)
                 if match:
-                    return match.group()[ 3:-1 ]
+                    return match.group()[3:-1]
                 else:
                     main.log.error( "Error, intent ID not found" )
                     return None
@@ -1729,7 +1729,7 @@
             tcpDst="",
             ingressLabel="",
             egressLabel="",
-            priority="" ):
+            priority=""):
         """
         Required:
             * ingressDevice: device id of ingress device
@@ -1824,9 +1824,9 @@
                 main.log.info( "MPLS intent installed between " +
                                str( ingressDevice ) + " and " +
                                str( egressDevice ) )
-                match = re.search( 'id=0x([\da-f]+),', handle )
+                match = re.search('id=0x([\da-f]+),', handle)
                 if match:
-                    return match.group()[ 3:-1 ]
+                    return match.group()[3:-1]
                 else:
                     main.log.error( "Error, intent ID not found" )
                     return None
@@ -2000,7 +2000,7 @@
             assert handle is not None, "Error in sendline"
             assert "Command not found:" not in handle, handle
             jsonResult = json.loads( handle )
-            return len( jsonResult[ 'routes4' ] )
+            return len(jsonResult['routes4'])
         except AssertionError:
             main.log.exception( "" )
             return None
@@ -2015,8 +2015,8 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    # =============Function to check Bandwidth allocation========
-    def allocations( self, jsonFormat=True, dollarSign=True ):
+    #=============Function to check Bandwidth allocation========
+    def allocations( self, jsonFormat = True, dollarSign = True ):
         """
         Description:
             Obtain Bandwidth Allocation Information from ONOS cli.
@@ -2043,7 +2043,7 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    def intents( self, jsonFormat=True, summary=False, **intentargs ):
+    def intents( self, jsonFormat = True, summary = False, **intentargs):
         """
         Description:
             Obtain intents from the ONOS cli.
@@ -2091,21 +2091,22 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    def getIntentState( self, intentsId, intentsJson=None ):
+    def getIntentState(self, intentsId, intentsJson=None):
         """
         Description:
-            Gets intent state. Accepts a single intent ID ( string type ) or a
+            Gets intent state. Accepts a single intent ID (string type) or a
             list of intent IDs.
         Parameters:
             intentsId: intent ID, both string type and list type are acceptable
             intentsJson: parsed json object from the onos:intents api
         Returns:
-            Returns the state ( string type ) of the ID if a single intent ID is
+            Returns the state (string type) of the ID if a single intent ID is
             accepted.
             Returns a list of dictionaries if a list of intent IDs is accepted,
             and each dictionary maps 'id' to the Intent ID and 'state' to
             corresponding intent state.
         """
+
         try:
             state = "State is Undefined"
             if not intentsJson:
@@ -2156,7 +2157,7 @@
         Required:
             intentsId - List of intents ID to be checked
         Optional:
-            expectedState - Check the expected state( s ) of each intents
+            expectedState - Check the expected state(s) of each intents
                             state in the list.
                             *NOTE: You can pass in a list of expected state,
                             Eg: expectedState = [ 'INSTALLED' , 'INSTALLING' ]
@@ -2168,15 +2169,15 @@
             returnValue = main.TRUE
             # Generating a dictionary: intent id as a key and state as value
 
-            # intentsDict = self.getIntentState( intentsId )
+            #intentsDict = self.getIntentState( intentsId )
             intentsDict = []
             for intent in json.loads( self.intents() ):
-                if isinstance( intentsId, types.StringType ) \
-                        and intent.get( 'id' ) == intentsId:
-                    intentsDict.append( intent )
-                elif isinstance( intentsId, types.ListType ) \
+                if isinstance ( intentsId, types.StringType) \
+                        and intent.get('id') == intentsId:
+                    intentsDict.append(intent)
+                elif isinstance ( intentsId, types.ListType ) \
                         and any( intent.get( 'id' ) == ids for ids in intentsId ):
-                    intentsDict.append( intent )
+                            intentsDict.append(intent)
 
             if not intentsDict:
                 main.log.info( self.name + ": There is something wrong " +
@@ -2247,10 +2248,10 @@
                 expected = expected.rstrip()
                 main.log.debug( "Expect: {}\nactual: {}".format( expected, actual ) )
                 if actual != expected and 'allocated' in actual and 'allocated' in expected:
-                    marker1 = actual.find( 'allocated' )
-                    m1 = actual[ :marker1 ]
-                    marker2 = expected.find( 'allocated' )
-                    m2 = expected[ :marker2 ]
+                    marker1 = actual.find('allocated')
+                    m1 = actual[:marker1]
+                    marker2 = expected.find('allocated')
+                    m2 = expected[:marker2]
                     if m1 != m2:
                         bandwidthFailed = True
                 elif actual != expected and 'allocated' not in actual and 'allocated' not in expected:
@@ -2259,7 +2260,7 @@
             ONOSOutput.close()
 
             if bandwidthFailed:
-                main.log.error( "Bandwidth not allocated correctly using Intents!!" )
+                main.log.error("Bandwidth not allocated correctly using Intents!!")
                 returnValue = main.FALSE
             return returnValue
         except TypeError:
@@ -2337,6 +2338,7 @@
             Returns main.TRUE only if the number of all installed intents are the same as total intents number
             , otherwise, returns main.FALSE.
         """
+
         try:
             cmd = "intents -s -j"
 
@@ -2348,10 +2350,10 @@
 
             # get total and installed number, see if they are match
             allState = response.get( 'all' )
-            if allState.get( 'total' ) == allState.get( 'installed' ):
-                main.log.info( 'Total Intents: {}   Installed Intents: {}'.format( allState.get( 'total' ), allState.get( 'installed' ) ) )
+            if allState.get('total') == allState.get('installed'):
+                main.log.info( 'Total Intents: {}   Installed Intents: {}'.format( allState.get('total'), allState.get('installed') ) )
                 return main.TRUE
-            main.log.info( 'Verified Intents failed Excepte intetnes: {} installed intents: {}'.format( allState.get( 'total' ), allState.get( 'installed' ) ) )
+            main.log.info( 'Verified Intents failed Excepte intetnes: {} installed intents: {}'.format( allState.get('total'), allState.get('installed') ) )
             return main.FALSE
 
         except ( TypeError, ValueError ):
@@ -2413,7 +2415,7 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    def checkFlowCount( self, min=0, timeout=60 ):
+    def checkFlowCount(self, min=0, timeout=60 ):
         count = self.getTotalFlowsNum( timeout=timeout )
         count = int( count ) if count else 0
         return count if ( count > min ) else False
@@ -2433,11 +2435,11 @@
                           parameter is set true, return main.FALSE otherwise.
         """
         try:
-            states = [ "PENDING_ADD", "PENDING_REMOVE", "REMOVED", "FAILED" ]
+            states = ["PENDING_ADD", "PENDING_REMOVE", "REMOVED", "FAILED"]
             checkedStates = []
-            statesCount = [ 0, 0, 0, 0 ]
+            statesCount = [0, 0, 0, 0]
             for s in states:
-                rawFlows = self.flows( state=s, timeout=timeout )
+                rawFlows = self.flows( state=s, timeout = timeout )
                 if rawFlows:
                     # if we didn't get flows or flows function return None, we should return
                     # main.Flase
@@ -2445,19 +2447,19 @@
                 else:
                     return main.FALSE
             for i in range( len( states ) ):
-                for c in checkedStates[ i ]:
+                for c in checkedStates[i]:
                     try:
-                        statesCount[ i ] += int( c.get( "flowCount" ) )
+                        statesCount[i] += int( c.get( "flowCount" ) )
                     except TypeError:
                         main.log.exception( "Json object not as expected" )
-                main.log.info( states[ i ] + " flows: " + str( statesCount[ i ] ) )
+                main.log.info( states[i] + " flows: " + str( statesCount[i] ) )
 
             # We want to count PENDING_ADD if isPENDING is true
             if isPENDING:
-                if statesCount[ 1 ] + statesCount[ 2 ] + statesCount[ 3 ] > 0:
+                if statesCount[1] + statesCount[2] + statesCount[3] > 0:
                     return main.FALSE
             else:
-                if statesCount[ 0 ] + statesCount[ 1 ] + statesCount[ 2 ] + statesCount[ 3 ] > 0:
+                if statesCount[0] + statesCount[1] + statesCount[2] + statesCount[3] > 0:
                     return main.FALSE
             return main.TRUE
         except ( TypeError, ValueError ):
@@ -2479,7 +2481,7 @@
             main.cleanAndExit()
 
     def pushTestIntents( self, ingress, egress, batchSize, offset="",
-                         options="", timeout=10, background=False, noExit=False, getResponse=False ):
+                         options="", timeout=10, background = False, noExit=False, getResponse=False ):
         """
         Description:
             Push a number of intents in a batch format to
@@ -2546,17 +2548,18 @@
             The number of ADDED flows
             Or return None if any exceptions
         """
+
         try:
             # get total added flows number
             cmd = "flows -c added"
             rawFlows = self.sendline( cmd, timeout=timeout, noExit=noExit )
             if rawFlows:
-                rawFlows = rawFlows.split( "\n" )
+                rawFlows = rawFlows.split("\n")
                 totalFlows = 0
                 for l in rawFlows:
-                    totalFlows += int( l.split( "Count=" )[ 1 ] )
+                    totalFlows += int(l.split("Count=")[1])
             else:
-                main.log.error( "Response not as expected!" )
+                main.log.error("Response not as expected!")
                 return None
             return totalFlows
 
@@ -2578,7 +2581,7 @@
                 main.cleanAndExit()
             return None
 
-    def getTotalIntentsNum( self, timeout=60, noExit=False ):
+    def getTotalIntentsNum( self, timeout=60, noExit = False ):
         """
         Description:
             Get the total number of intents, include every states.
@@ -2593,7 +2596,7 @@
             if response is None:
                 return -1
             response = json.loads( response )
-            return int( response.get( "intents" ) )
+            return int( response.get("intents") )
         except ( TypeError, ValueError ):
             main.log.exception( "{}: Object not as expected: {!r}".format( self.name, response ) )
             return None
@@ -2687,7 +2690,7 @@
         """
         try:
             # Obtain output of intents function
-            intentsStr = self.intents( jsonFormat=True )
+            intentsStr = self.intents(jsonFormat=True)
             if intentsStr is None:
                 raise TypeError
             # Convert to a dictionary
@@ -2797,7 +2800,7 @@
                 main.log.info( "There are no nodes to get id from" )
                 return idList
             nodesJson = json.loads( nodesStr )
-            idList = [ node.get( 'id' ) for node in nodesJson ]
+            idList = [ node.get('id') for node in nodesJson ]
             return idList
         except ( TypeError, ValueError ):
             main.log.exception( "{}: Object not as expected: {!r}".format( self.name, nodesStr ) )
@@ -2849,7 +2852,7 @@
         import json
         try:
             # either onos:topology or 'topology' will work in CLI
-            topology = json.loads( topologyOutput )
+            topology = json.loads(topologyOutput)
             main.log.debug( topology )
             return topology
         except ( TypeError, ValueError ):
@@ -2863,7 +2866,7 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    def checkStatus( self, numoswitch, numolink, numoctrl=-1, logLevel="info" ):
+    def checkStatus(self, numoswitch, numolink, numoctrl = -1, logLevel="info"):
         """
         Checks the number of switches & links that ONOS sees against the
         supplied values. By default this will report to main.log, but the
@@ -3222,7 +3225,7 @@
         Returns a dictionary containing the current intent states and the count
         """
         try:
-            intents = self.intents()
+            intents = self.intents( )
             states = []
             for intent in json.loads( intents ):
                 states.append( intent.get( 'state', None ) )
@@ -3298,7 +3301,7 @@
 
     def specificLeaderCandidate( self, topic ):
         """
-        Returns a list in format [ leader,candidate1,candidate2,... ] for a given
+        Returns a list in format [leader,candidate1,candidate2,...] for a given
         topic parameter and an empty list if the topic doesn't exist
         If no leader is elected leader in the returned list will be "none"
         Returns None if there is a type error processing the json object
@@ -3311,9 +3314,9 @@
             output = json.loads( rawOutput )
             results = []
             for dict in output:
-                if dict[ "topic" ] == topic:
-                    leader = dict[ "leader" ]
-                    candidates = re.split( ", ", dict[ "candidates" ][ 1:-1 ] )
+                if dict["topic"] == topic:
+                    leader = dict["leader"]
+                    candidates = re.split( ", ", dict["candidates"][1:-1] )
                     results.append( leader )
                     results.extend( candidates )
             return results
@@ -3402,10 +3405,10 @@
         information about installed ONOS applications
         """
         # Sample JSON object
-        # [ { "name":"org.onosproject.openflow","id":0,"version":"1.2.0",
+        # [{"name":"org.onosproject.openflow","id":0,"version":"1.2.0",
         # "description":"ONOS OpenFlow protocol southbound providers",
         # "origin":"ON.Lab","permissions":"[]","featuresRepo":"",
-        # "features":"[onos-openflow]","state":"ACTIVE" } ]
+        # "features":"[onos-openflow]","state":"ACTIVE"}]
         try:
             cmdStr = "onos:apps"
             if summary:
@@ -3452,8 +3455,8 @@
             appsJson = json.loads( output )
             state = None
             for app in appsJson:
-                if appName == app.get( 'name' ):
-                    state = app.get( 'state' )
+                if appName == app.get('name'):
+                    state = app.get('state')
                     break
             if state == "ACTIVE" or state == "INSTALLED":
                 return state
@@ -3505,7 +3508,7 @@
                 # Invalid option
                 main.log.error( "The ONOS app command argument only takes " +
                                 "the values: (activate|deactivate|uninstall)" +
-                                "; was given '" + option + "'" )
+                                "; was given '" + option + "'")
                 return main.FALSE
             cmdStr = "onos:app " + option + " " + appName
             output = self.sendline( cmdStr )
@@ -3563,7 +3566,7 @@
             if status == "INSTALLED":
                 response = self.app( appName, "activate" )
                 if check and response == main.TRUE:
-                    for i in range( 10 ):  # try 10 times then give up
+                    for i in range(10):  # try 10 times then give up
                         status = self.appStatus( appName )
                         if status == "ACTIVE":
                             return main.TRUE
@@ -3616,7 +3619,7 @@
             elif status == "ACTIVE":
                 response = self.app( appName, "deactivate" )
                 if check and response == main.TRUE:
-                    for i in range( 10 ):  # try 10 times then give up
+                    for i in range(10):  # try 10 times then give up
                         status = self.appStatus( appName )
                         if status == "INSTALLED":
                             return main.TRUE
@@ -3665,7 +3668,7 @@
             if status == "INSTALLED":
                 response = self.app( appName, "uninstall" )
                 if check and response == main.TRUE:
-                    for i in range( 10 ):  # try 10 times then give up
+                    for i in range(10):  # try 10 times then give up
                         status = self.appStatus( appName )
                         if status == "UNINSTALLED":
                             return main.TRUE
@@ -3680,7 +3683,7 @@
                                 "currently active." )
                 response = self.app( appName, "uninstall" )
                 if check and response == main.TRUE:
-                    for i in range( 10 ):  # try 10 times then give up
+                    for i in range(10):  # try 10 times then give up
                         status = self.appStatus( appName )
                         if status == "UNINSTALLED":
                             return main.TRUE
@@ -3785,8 +3788,8 @@
                     # there is more than one app with this ID
                     result = main.FALSE
                     # We will log this later in the method
-                elif not current[ 0 ][ 'name' ] == appName:
-                    currentName = current[ 0 ][ 'name' ]
+                elif not current[0][ 'name' ] == appName:
+                    currentName = current[0][ 'name' ]
                     result = main.FALSE
                     main.log.error( "'app-ids' has " + str( currentName ) +
                                     " registered under id:" + str( appID ) +
@@ -3801,12 +3804,12 @@
                 namesList.append( item[ 'name' ] )
             if len( idsList ) != len( set( idsList ) ) or\
                len( namesList ) != len( set( namesList ) ):
-                main.log.error( "'app-ids' has some duplicate entries: \n"
-                                + json.dumps( ids,
-                                              sort_keys=True,
-                                              indent=4,
-                                              separators=( ',', ': ' ) ) )
-                result = main.FALSE
+                    main.log.error( "'app-ids' has some duplicate entries: \n"
+                                    + json.dumps( ids,
+                                                  sort_keys=True,
+                                                  indent=4,
+                                                  separators=( ',', ': ' ) ) )
+                    result = main.FALSE
             return result
         except ( TypeError, ValueError ):
             main.log.exception( "{}: Object not as expected: {!r}".format( self.name, rawJson ) )
@@ -3877,7 +3880,7 @@
             propName - The case sensitive name of the property to be set/unset
         Optional arguments:
             value - The value to set the property to. If None, will unset the
-                    property and revert it to it's default value( if applicable )
+                    property and revert it to it's default value(if applicable)
             check - Boolean, Check whether the option was successfully set this
                     only applies when a value is given.
         returns:
@@ -3977,7 +3980,7 @@
         CLI command to add elements to a distributed set.
         Arguments:
             setName - The name of the set to add to.
-            values - The value( s ) to add to the set, space seperated.
+            values - The value(s) to add to the set, space seperated.
         Example usages:
             setTestAdd( "set1", "a b c" )
             setTestAdd( "set2", "1" )
@@ -3991,9 +3994,9 @@
             output = self.distPrimitivesSend( cmdStr )
             positiveMatch = "\[(.*)\] was added to the set " + str( setName )
             negativeMatch = "\[(.*)\] was already in set " + str( setName )
-            if re.search( positiveMatch, output ):
+            if re.search( positiveMatch, output):
                 return main.TRUE
-            elif re.search( negativeMatch, output ):
+            elif re.search( negativeMatch, output):
                 return main.FALSE
             else:
                 main.log.error( self.name + ": setTestAdd did not" +
@@ -4012,11 +4015,11 @@
         CLI command to remove elements from a distributed set.
         Required arguments:
             setName - The name of the set to remove from.
-            values - The value( s ) to remove from the set, space seperated.
+            values - The value(s) to remove from the set, space seperated.
         Optional arguments:
             clear - Clear all elements from the set
-            retain - Retain only the  given values. ( intersection of the
-                     original set and the given set )
+            retain - Retain only the  given values. (intersection of the
+                     original set and the given set)
         returns:
             main.TRUE on success OR
             main.FALSE if the set was not changed OR
@@ -4076,7 +4079,7 @@
         Required arguments:
             setName - The name of the set to remove from.
         Optional arguments:
-            values - The value( s ) to check if in the set, space seperated.
+            values - The value(s) to check if in the set, space seperated.
         returns:
             main.ERROR on error OR
             A list of elements in the set if no optional arguments are
@@ -4556,8 +4559,8 @@
             None on Error
 
             Example output
-            { 'Key1': { 'oldValue': 'oldTestValue', 'value': 'Testing' },
-              'Key2': { 'value': 'Testing' } }
+            { 'Key1': {'oldValue': 'oldTestValue', 'value': 'Testing'},
+              'Key2': {'value': 'Testing'} }
         """
         try:
             numKeys = str( numKeys )
@@ -4647,10 +4650,10 @@
 
     def setSwController( self, uri, ip, proto="tcp", port="6653", jsonFormat=True ):
         """
-        Descrition: sets the controller( s ) for the specified device
+        Descrition: sets the controller(s) for the specified device
 
         Parameters:
-            Required: uri - String: The uri of the device( switch ).
+            Required: uri - String: The uri of the device(switch).
                       ip - String or List: The ip address of the controller.
                       This parameter can be formed in a couple of different ways.
                         VALID:
@@ -4675,14 +4678,14 @@
                 cmd += " -j"
             cmd += " " + uri
             if isinstance( ip, str ):
-                ip = [ ip ]
+                ip = [ip]
             for item in ip:
                 if ":" in item:
                     sitem = item.split( ":" )
-                    if len( sitem ) == 3:
+                    if len(sitem) == 3:
                         cmd += " " + item
-                    elif "." in sitem[ 1 ]:
-                        cmd += " {}:{}".format( item, port )
+                    elif "." in sitem[1]:
+                        cmd += " {}:{}".format(item, port)
                     else:
                         main.log.error( "Malformed entry: " + item )
                         raise TypeError
@@ -4710,19 +4713,19 @@
             main.cleanAndExit()
 
     def removeDevice( self, device ):
-        """
+        '''
         Description:
-            Remove a device from ONOS by passing the uri of the device( s ).
+            Remove a device from ONOS by passing the uri of the device(s).
         Parameters:
-            device - ( str or list ) the id or uri of the device ex. "of:0000000000000001"
+            device - (str or list) the id or uri of the device ex. "of:0000000000000001"
         Returns:
             Returns main.FALSE if an exception is thrown or an error is present
             in the response. Otherwise, returns main.TRUE.
         NOTE:
             If a host cannot be removed, then this function will return main.FALSE
-        """
+        '''
         try:
-            if isinstance( device, str ):
+            if type( device ) is str:
                 deviceStr = device
                 device = []
                 device.append( deviceStr )
@@ -4751,19 +4754,19 @@
             main.cleanAndExit()
 
     def removeHost( self, host ):
-        """
+        '''
         Description:
-            Remove a host from ONOS by passing the id of the host( s )
+            Remove a host from ONOS by passing the id of the host(s)
         Parameters:
-            hostId - ( str or list ) the id or mac of the host ex. "00:00:00:00:00:01"
+            hostId - (str or list) the id or mac of the host ex. "00:00:00:00:00:01"
         Returns:
             Returns main.FALSE if an exception is thrown or an error is present
             in the response. Otherwise, returns main.TRUE.
         NOTE:
             If a host cannot be removed, then this function will return main.FALSE
-        """
+        '''
         try:
-            if isinstance( host, str ):
+            if type( host ) is str:
                 host = list( host )
 
             for h in host:
@@ -4790,16 +4793,16 @@
             main.cleanAndExit()
 
     def link( self, begin, end, state, timeout=30, showResponse=True ):
-        """
+        '''
         Description:
             Bring link down or up in the null-provider.
         params:
-            begin - ( string ) One end of a device or switch.
-            end - ( string ) the other end of the device or switch
+            begin - (string) One end of a device or switch.
+            end - (string) the other end of the device or switch
         returns:
             main.TRUE if no exceptions were thrown and no Errors are
             present in the resoponse. Otherwise, returns main.FALSE
-        """
+        '''
         try:
             cmd = "null-link null:{} null:{} {}".format( begin, end, state )
             response = self.sendline( cmd, showResponse=showResponse, timeout=timeout )
@@ -4824,18 +4827,18 @@
             main.cleanAndExit()
 
     def portstate( self, dpid, port, state ):
-        """
+        '''
         Description:
              Changes the state of port in an OF switch by means of the
              PORTSTATUS OF messages.
         params:
-            dpid - ( string ) Datapath ID of the device. Ex: 'of:0000000000000102'
-            port - ( string ) target port in the device. Ex: '2'
-            state - ( string ) target state ( enable or disable )
+            dpid - (string) Datapath ID of the device. Ex: 'of:0000000000000102'
+            port - (string) target port in the device. Ex: '2'
+            state - (string) target state (enable or disable)
         returns:
             main.TRUE if no exceptions were thrown and no Errors are
             present in the resoponse. Otherwise, returns main.FALSE
-        """
+        '''
         try:
             state = state.lower()
             assert state == 'enable' or state == 'disable', "Unknown state"
@@ -4898,12 +4901,12 @@
           vertex2: { 'edges': ..., 'name': ..., 'protocol': ... } }
         Each vertex should at least have an 'edges' attribute which describes the
         adjacency information. The value of 'edges' attribute is also represented by
-        a dictionary, which maps each edge ( identified by the neighbor vertex ) to a
+        a dictionary, which maps each edge (identified by the neighbor vertex) to a
         list of attributes.
         An example of the edges dictionary:
         'edges': { vertex2: { 'port': ..., 'weight': ... },
                    vertex3: { 'port': ..., 'weight': ... } }
-        If includeHost == True, all hosts ( and host-switch links ) will be included
+        If includeHost == True, all hosts (and host-switch links) will be included
         in topology data.
         """
         graphDict = {}
@@ -4924,7 +4927,7 @@
                 assert idToDevice[ nodeA ][ 'available' ] and idToDevice[ nodeB ][ 'available' ]
                 if nodeA not in graphDict.keys():
                     graphDict[ nodeA ] = { 'edges': {},
-                                           'dpid': idToDevice[ nodeA ][ 'id' ][ 3: ],
+                                           'dpid': idToDevice[ nodeA ][ 'id' ][3:],
                                            'type': idToDevice[ nodeA ][ 'type' ],
                                            'available': idToDevice[ nodeA ][ 'available' ],
                                            'role': idToDevice[ nodeA ][ 'role' ],
@@ -4933,7 +4936,7 @@
                                            'sw': idToDevice[ nodeA ][ 'sw' ],
                                            'serial': idToDevice[ nodeA ][ 'serial' ],
                                            'chassisId': idToDevice[ nodeA ][ 'chassisId' ],
-                                           'annotations': idToDevice[ nodeA ][ 'annotations' ] }
+                                           'annotations': idToDevice[ nodeA ][ 'annotations' ]}
                 else:
                     # Assert nodeB is not connected to any current links of nodeA
                     assert nodeB not in graphDict[ nodeA ][ 'edges' ].keys()
@@ -4959,11 +4962,11 @@
             return None
 
     def getIntentPerfSummary( self ):
-        """
+        '''
         Send command to check intent-perf summary
         Returns: dictionary for intent-perf summary
                  if something wrong, function will return None
-        """
+        '''
         cmd = "intent-perf -s"
         respDic = {}
         resp = self.sendline( cmd )
@@ -4975,9 +4978,9 @@
                 # Delete any white space in line
                 temp = re.sub( r'\s+', '', l )
                 temp = temp.split( ":" )
-                respDic[ temp[ 0 ] ] = temp[ 1 ]
+                respDic[ temp[0] ] = temp[ 1 ]
 
-        except ( TypeError, ValueError ):
+        except (TypeError, ValueError):
             main.log.exception( self.name + ": Object not as expected" )
             return None
         except KeyError:
@@ -5019,7 +5022,7 @@
                 total: return how many lines in karaf log
         """
         try:
-            assert isinstance( searchTerm, str )
+            assert type( searchTerm ) is str
             # Build the log paths string
             logPath = '/opt/onos/log/karaf.log.'
             logPaths = '/opt/onos/log/karaf.log'
@@ -5041,14 +5044,14 @@
                 return num
             elif mode == 'total':
                 totalLines = self.sendline( "cat /opt/onos/log/karaf.log | wc -l" )
-                return int( totalLines )
+                return int(totalLines)
             else:
                 main.log.error( self.name + " unsupported mode" )
                 return main.ERROR
             before = self.sendline( cmd )
             before = before.splitlines()
             # make sure the returned list only contains the search term
-            returnLines = [ line for line in before if searchTerm in line ]
+            returnLines = [line for line in before if searchTerm in line]
             return returnLines
         except AssertionError:
             main.log.error( self.name + " searchTerm is not string type" )
@@ -5094,7 +5097,7 @@
             main.log.error( self.name + ":    " + self.handle.before )
             main.cleanAndExit()
         except NotImplementedError:
-            main.log.exception( self.name + ": Json output not supported" )
+            main.log.exception( self.name + ": Json output not supported")
             return None
         except Exception:
             main.log.exception( self.name + ": Uncaught exception!" )
@@ -5116,7 +5119,7 @@
             for match in mIter:
                 item = {}
                 item[ 'name' ] = match.group( 'name' )
-                ifaces = match.group( 'interfaces' ).split( ', ' )
+                ifaces = match.group( 'interfaces' ).split( ', ')
                 if ifaces == [ "" ]:
                     ifaces = []
                 item[ 'interfaces' ] = ifaces
@@ -5156,7 +5159,7 @@
             main.log.error( self.name + ":    " + self.handle.before )
             main.cleanAndExit()
         except NotImplementedError:
-            main.log.exception( self.name + ": Json output not supported" )
+            main.log.exception( self.name + ": Json output not supported")
             return None
         except Exception:
             main.log.exception( self.name + ": Uncaught exception!" )
@@ -5388,14 +5391,14 @@
             main.log.error( self.name + ":    " + self.handle.before )
             main.cleanAndExit()
         except NotImplementedError:
-            main.log.exception( self.name + ": Json output not supported" )
+            main.log.exception( self.name + ": Json output not supported")
             return None
         except Exception:
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
     def getTimeStampFromLog( self, mode, searchTerm, splitTerm_before, splitTerm_after, startLine='', logNum=1 ):
-        """
+        '''
         Get the timestamp of searchTerm from karaf log.
 
         Arguments:
@@ -5408,17 +5411,17 @@
 
             others:
                 Please look at the "logsearch" Function in onosclidriver.py
-        """
+        '''
         if logNum < 0:
-            main.log.error( "Get wrong log number " )
+            main.log.error("Get wrong log number ")
             return main.ERROR
         lines = self.logSearch( mode=mode, searchTerm=searchTerm, startLine=startLine, logNum=logNum )
-        if len( lines ) == 0:
+        if len(lines) == 0:
             main.log.warn( "Captured timestamp string is empty" )
             return main.ERROR
         lines = lines[ 0 ]
         try:
-            assert isinstance( lines, str )
+            assert type(lines) is str
             # get the target value
             line = lines.split( splitTerm_before )
             key = line[ 1 ].split( splitTerm_after )
@@ -5586,7 +5589,7 @@
                 return None
             else:
                 match = re.search( pattern, output )
-                return match.group( 0 )
+                return match.group(0)
         except ( AttributeError, TypeError ):
             main.log.exception( self.name + ": Object not as expected; " + str( output ) )
             return None
@@ -5619,7 +5622,7 @@
                 return None
             else:
                 match = re.search( pattern, output )
-                return match.group( 0 )
+                return match.group(0)
         except ( AttributeError, TypeError ):
             main.log.exception( self.name + ": Object not as expected; " + str( output ) )
             return None
@@ -5652,7 +5655,7 @@
                 return None
             else:
                 match = re.search( pattern, output )
-                return match.group( 0 )
+                return match.group(0)
         except ( AttributeError, TypeError ):
             main.log.exception( self.name + ": Object not as expected; " + str( output ) )
             return None
@@ -5706,4 +5709,4 @@
             main.cleanAndExit()
         except Exception:
             main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanAndExit()
+            main.cleanAndExit()
\ No newline at end of file
diff --git a/TestON/drivers/common/cli/onosclusterdriver.py b/TestON/drivers/common/cli/onosclusterdriver.py
old mode 100644
new mode 100755
index cc32b94..5093cfe
--- a/TestON/drivers/common/cli/onosclusterdriver.py
+++ b/TestON/drivers/common/cli/onosclusterdriver.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 """
-Copyright 2017 Open Networking Foundation ( ONF )
+Copyright 2017 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -9,7 +9,7 @@
     TestON is free software: you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
     the Free Software Foundation, either version 2 of the License, or
-    ( at your option ) any later version.
+    (at your option) any later version.
 
     TestON is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -33,15 +33,11 @@
 from drivers.common.clidriver import CLI
 
 # FIXME: Move this to it's own file?
-
-
 class Controller():
-
     def __str__( self ):
         return self.name
-
     def __repr__( self ):
-        # TODO: use repr() for components?
+        #TODO use repr() for components?
         return "%s<IP=%s, CLI=%s, REST=%s, Bench=%s >" % ( self.name,
                                                            self.ipAddress,
                                                            self.CLI,
@@ -51,9 +47,9 @@
     def __getattr__( self, name ):
         """
         Called when an attribute lookup has not found the attribute
-        in the usual places ( i.e. it is not an instance attribute nor
-        is it found in the class tree for self ). name is the attribute
-        name. This method should return the ( computed ) attribute value
+        in the usual places (i.e. it is not an instance attribute nor
+        is it found in the class tree for self). name is the attribute
+        name. This method should return the (computed) attribute value
         or raise an AttributeError exception.
 
         We will look into each of the node's component handles to try to find the attreibute, looking at REST first
@@ -63,26 +59,28 @@
             main.log.warn( "Rest driver has attribute '%s'" % ( name ) )
             if not usedDriver:
                 usedDriver = True
-                main.log.debug( "Using Rest driver's attribute for '%s'" % ( name ) )
-                f = getattr( self.REST, name )
+                main.log.debug("Using Rest driver's attribute for '%s'" % (name))
+                f = getattr( self.REST, name)
         if hasattr( self.CLI, name ):
             main.log.warn( "CLI driver has attribute '%s'" % ( name ) )
             if not usedDriver:
                 usedDriver = True
-                main.log.debug( "Using CLI driver's attribute for '%s'" % ( name ) )
-                f = getattr( self.CLI, name )
+                main.log.debug("Using CLI driver's attribute for '%s'" % (name))
+                f = getattr( self.CLI, name)
         if hasattr( self.Bench, name ):
             main.log.warn( "Bench driver has attribute '%s'" % ( name ) )
             if not usedDriver:
                 usedDriver = True
-                main.log.debug( "Using Bench driver's attribute for '%s'" % ( name ) )
-                f = getattr( self.Bench, name )
+                main.log.debug("Using Bench driver's attribute for '%s'" % (name))
+                f = getattr( self.Bench, name)
         if usedDriver:
             return f
         raise AttributeError( "Could not find the attribute %s in %r or it's component handles" % ( name, self ) )
 
+
+
     def __init__( self, name, ipAddress, CLI=None, REST=None, Bench=None, pos=None, userName=None, server=None ):
-        # TODO: validate these arguments
+        #TODO: validate these arguments
         self.name = str( name )
         self.ipAddress = ipAddress
         self.CLI = CLI
@@ -94,7 +92,6 @@
         self.user_name = userName
         self.server = server
 
-
 class OnosClusterDriver( CLI ):
 
     def __init__( self ):
@@ -134,9 +131,9 @@
                 elif key == "cluster_name":
                     prefix = self.options[ key ]
 
-            self.home = self.checkOptions( self.home, "~/onos" )
-            self.karafUser = self.checkOptions( self.karafUser, self.user_name )
-            self.karafPass = self.checkOptions( self.karafPass, self.pwd )
+            self.home = self.checkOptions(self.home, "~/onos")
+            self.karafUser = self.checkOptions(self.karafUser, self.user_name)
+            self.karafPass = self.checkOptions(self.karafPass, self.pwd )
             prefix = self.checkOptions( prefix, "ONOS" )
 
             self.name = self.options[ 'name' ]
@@ -249,13 +246,13 @@
         """
         Parse the cluster options to create an ONOS cli component with the given name
         """
-        main.componentDictionary[ name ] = main.componentDictionary[ self.name ].copy()
+        main.componentDictionary[name] = main.componentDictionary[self.name].copy()
         clihost = main.componentDictionary[ name ][ 'COMPONENTS' ].get( "diff_clihost", "" )
         if clihost == "True":
             main.componentDictionary[ name ][ 'host' ] = host
-        main.componentDictionary[ name ][ 'type' ] = "OnosCliDriver"
-        main.componentDictionary[ name ][ 'connect_order' ] = str( int( main.componentDictionary[ name ][ 'connect_order' ] ) + 1 )
-        main.log.debug( main.componentDictionary[ name ] )
+        main.componentDictionary[name]['type'] = "OnosCliDriver"
+        main.componentDictionary[name]['connect_order'] = str( int( main.componentDictionary[name]['connect_order'] ) + 1 )
+        main.log.debug( main.componentDictionary[name] )
 
     def createCliComponent( self, name, host ):
         """
@@ -289,18 +286,18 @@
         """
         Parse the cluster options to create an ONOS cli component with the given name
         """
-        main.componentDictionary[ name ] = main.componentDictionary[ self.name ].copy()
-        main.log.debug( main.componentDictionary[ name ] )
-        user = main.componentDictionary[ name ][ 'COMPONENTS' ].get( "web_user", "onos" )
-        main.componentDictionary[ name ][ 'user' ] = self.checkOptions( user, "onos" )
-        password = main.componentDictionary[ name ][ 'COMPONENTS' ].get( "web_pass", "rocks" )
-        main.componentDictionary[ name ][ 'pass' ] = self.checkOptions( password, "rocks" )
-        main.componentDictionary[ name ][ 'host' ] = host
-        port = main.componentDictionary[ name ][ 'COMPONENTS' ].get( "rest_port", "8181" )
-        main.componentDictionary[ name ][ 'port' ] = self.checkOptions( port, "8181" )
-        main.componentDictionary[ name ][ 'type' ] = "OnosRestDriver"
-        main.componentDictionary[ name ][ 'connect_order' ] = str( int( main.componentDictionary[ name ][ 'connect_order' ] ) + 1 )
-        main.log.debug( main.componentDictionary[ name ] )
+        main.componentDictionary[name] = main.componentDictionary[self.name].copy()
+        main.log.debug( main.componentDictionary[name] )
+        user = main.componentDictionary[name]['COMPONENTS'].get( "web_user", "onos" )
+        main.componentDictionary[name]['user'] = self.checkOptions( user, "onos" )
+        password = main.componentDictionary[name]['COMPONENTS'].get( "web_pass", "rocks" )
+        main.componentDictionary[name]['pass'] = self.checkOptions( password, "rocks" )
+        main.componentDictionary[name]['host'] = host
+        port = main.componentDictionary[name]['COMPONENTS'].get( "rest_port", "8181" )
+        main.componentDictionary[name]['port'] = self.checkOptions( port, "8181" )
+        main.componentDictionary[name]['type'] = "OnosRestDriver"
+        main.componentDictionary[name]['connect_order'] = str( int( main.componentDictionary[name]['connect_order'] ) + 1 )
+        main.log.debug( main.componentDictionary[name] )
 
     def createRestComponent( self, name, ipAddress ):
         """
@@ -334,12 +331,12 @@
         """
         Parse the cluster options to create an ONOS "bench" component with the given name
         """
-        main.componentDictionary[ name ] = main.componentDictionary[ self.name ].copy()
-        main.componentDictionary[ name ][ 'type' ] = "OnosDriver"
-        home = main.componentDictionary[ name ][ 'COMPONENTS' ].get( "onos_home", None )
-        main.componentDictionary[ name ][ 'home' ] = self.checkOptions( home, None )
-        main.componentDictionary[ name ][ 'connect_order' ] = str( int( main.componentDictionary[ name ][ 'connect_order' ] ) + 1 )
-        main.log.debug( main.componentDictionary[ name ] )
+        main.componentDictionary[name] = main.componentDictionary[self.name].copy()
+        main.componentDictionary[name]['type'] = "OnosDriver"
+        home = main.componentDictionary[name]['COMPONENTS'].get( "onos_home", None )
+        main.componentDictionary[name]['home'] = self.checkOptions( home, None )
+        main.componentDictionary[name]['connect_order'] = str( int( main.componentDictionary[name]['connect_order'] ) + 1 )
+        main.log.debug( main.componentDictionary[name] )
 
     def createBenchComponent( self, name ):
         """
@@ -369,6 +366,7 @@
             main.log.error( name + " component already exists!" )
             main.cleanAndExit()
 
+
     def setServerOptions( self, name, ipAddress ):
         """
         Parse the cluster options to create an ONOS "server" component with the given name
@@ -385,6 +383,7 @@
         main.componentDictionary[name]['connect_order'] = str( int( main.componentDictionary[name]['connect_order'] ) + 1 )
         main.log.debug( main.componentDictionary[name] )
 
+
     def createServerComponent( self, name, ipAddress ):
         """
         Creates a new onos "server" component. This will be connected to the
@@ -415,6 +414,7 @@
             main.log.error( name + " component already exists!" )
             main.cleanAndExit()
 
+
     def createComponents( self, prefix='', createServer=True ):
         """
         Creates a CLI and REST component for each nodes in the cluster
@@ -425,7 +425,7 @@
         benchPrefix = prefix + "bench"
         serverPrefix = prefix + "server"
         for i in xrange( 1, self.maxNodes + 1 ):
-            cliName = cliPrefix + str( i )
+            cliName = cliPrefix + str( i  )
             restName = restPrefix + str( i )
             benchName = benchPrefix + str( i )
             serverName = serverPrefix + str( i )
diff --git a/TestON/drivers/common/cli/onosdriver.py b/TestON/drivers/common/cli/onosdriver.py
old mode 100644
new mode 100755
index dadc78d..eed4057
--- a/TestON/drivers/common/cli/onosdriver.py
+++ b/TestON/drivers/common/cli/onosdriver.py
@@ -30,7 +30,6 @@
 from requests.models import Response
 from drivers.common.clidriver import CLI
 
-
 class OnosDriver( CLI ):
 
     def __init__( self ):
@@ -73,9 +72,10 @@
                     break
                 self.maxNodes = None
 
-            if self.maxNodes is None or self.maxNodes == "":
+            if self.maxNodes == None or self.maxNodes == "":
                 self.maxNodes = 100
 
+
             # Grabs all OC environment variables based on max number of nodes
             self.onosIps = {}  # Dictionary of all possible ONOS ip
 
@@ -110,7 +110,7 @@
                 main.log.error( "Uncaught exception: " + str( inst ) )
 
             try:
-                if os.getenv( str( self.ip_address ) ) is not None:
+                if os.getenv( str( self.ip_address ) ) != None:
                     self.ip_address = os.getenv( str( self.ip_address ) )
                 else:
                     main.log.info( self.name +
@@ -172,7 +172,7 @@
         Returns milliseconds since epoch
 
         When checking multiple nodes in a for loop,
-        around a hundred milliseconds of difference ( ascending ) is
+        around a hundred milliseconds of difference (ascending) is
         generally acceptable due to calltime of the function.
         Few seconds, however, is not and it means clocks
         are off sync.
@@ -213,7 +213,7 @@
                     ret = main.FALSE
                     continue  # expect again
                 elif i == 2:
-                    # File( s ) not found
+                    # File(s) not found
                     main.log.error( "onos-package could not find a file or directory" )
                     ret = main.FALSE
                     continue  # expect again
@@ -305,7 +305,7 @@
                     'Runtime\sEnvironment\sto\scontinue',
                     'BUILD\sFAILURE',
                     'BUILD\sSUCCESS',
-                    'onos' + self.prompt,  # TODO: fix this to be more generic?
+                    'onos' + self.prompt,  #TODO: fix this to be more generic?
                     'ONOS' + self.prompt,
                     pexpect.TIMEOUT ], mciTimeout )
                 if i == 0:
@@ -417,7 +417,7 @@
             self.handle.expect( self.prompt )
             cmd = "git pull"
             if comp1 != "":
-                cmd += ' ' + comp1
+                cmd += ' ' +  comp1
             if fastForward:
                 cmd += ' ' + " --ff-only"
             self.handle.sendline( cmd )
@@ -498,7 +498,7 @@
                     self.handle.expect( self.prompt )
                     return main.ERROR
                 except Exception:
-                    main.log.exception( "Couldn't exit editor prompt!" )
+                    main.log.exception( "Couldn't exit editor prompt!")
 
                     main.cleanAndExit()
             elif i == 10:  # In the middle of a merge commit
@@ -652,11 +652,11 @@
             self.handle.sendline( "git name-rev --name-only HEAD" )
             self.handle.expect( "git name-rev --name-only HEAD" )
             self.handle.expect( self.prompt )
-            lines = self.handle.before.splitlines()
-            if lines[ 1 ] == "master" or re.search( "^onos-\d+(\.\d+)+$", lines[ 1 ] ):
-                return lines[ 1 ]
+            lines =  self.handle.before.splitlines()
+            if lines[1] == "master" or re.search( "^onos-\d+(\.\d+)+$", lines[1] ):
+                return lines[1]
             else:
-                main.log.info( lines[ 1 ] )
+                main.log.info( lines[1] )
                 return "unexpected ONOS branch"
         except pexpect.EOF:
             main.log.error( self.name + ": EOF exception found" )
@@ -702,11 +702,11 @@
                     # as xml specific tags that cause errors
                     line = line.replace( "<", "[" )
                     line = line.replace( ">", "]" )
-                    # main.log.wiki( "\t" + line )
+                    #main.log.wiki( "\t" + line )
                     main.log.wiki( line + "<br /> " )
                     main.log.summary( line )
                 main.log.wiki( "</blockquote>" )
-                main.log.summary( "\n" )
+                main.log.summary("\n")
             return lines[ 2 ]
         except pexpect.EOF:
             main.log.error( self.name + ": EOF exception found" )
@@ -733,7 +733,7 @@
                   supported currently
             * ONOS IP addresses ( onosIpAddrs )
                 - Must be passed in as last arguments
-            * ONOS USER ( onosUser )
+            * ONOS USER (onosUser)
                 - optional argument to set ONOS_USER environment variable
 
         NOTE: Assumes cells are located at:
@@ -837,14 +837,14 @@
                 handleAfter = self.handle.after
                 # Get the rest of the handle
                 self.handle.expect( self.prompt )
-                time.sleep( 10 )
+                time.sleep(10)
                 handleMore = self.handle.before
 
                 cell_result = handleBefore + handleAfter + handleMore
-                # print cell_result
+                #print cell_result
                 if( re.search( "No such cell", cell_result ) ):
                     main.log.error( "Cell call returned: " + handleBefore +
-                                    handleAfter + handleMore )
+                               handleAfter + handleMore )
 
                     main.cleanAndExit()
                 return main.TRUE
@@ -886,7 +886,7 @@
         Uses 'onos <node-ip> cfg set' to change a parameter value of an
         application.
 
-        ex )
+        ex)
             onos 10.0.0.1 cfg set org.onosproject.myapp appSetting 1
         ONOSIp = '10.0.0.1'
         configName = 'org.onosproject.myapp'
@@ -899,11 +899,11 @@
             self.handle.sendline( "" )
             self.handle.expect( ":~" )
             self.handle.sendline( cfgStr )
-            self.handle.expect( "cfg set" )
+            self.handle.expect("cfg set")
             self.handle.expect( ":~" )
 
-            paramValue = configParam.split( " " )[ 1 ]
-            paramName = configParam.split( " " )[ 0 ]
+            paramValue = configParam.split(" ")[1]
+            paramName = configParam.split(" ")[0]
 
             checkStr = 'onos {} cfg get " {} {} " '.format( ONOSIp, configName, paramName )
 
@@ -911,7 +911,7 @@
             self.handle.expect( ":~" )
 
             if "value=" + paramValue + "," in self.handle.before:
-                main.log.info( "cfg " + configName + " successfully set to " + configParam )
+                main.log.info("cfg " + configName + " successfully set to " + configParam)
                 return main.TRUE
         except pexpect.ExceptionPexpect as e:
             main.log.exception( self.name + ": Pexpect exception found: " )
@@ -967,7 +967,7 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    def onosSecureSSH( self, userName="onos", userPWD="rocks", node="" ):
+    def onosSecureSSH( self, userName="onos", userPWD="rocks", node=""):
         """
         Enables secure access to ONOS console
         by removing default users & keys.
@@ -976,6 +976,7 @@
 
         Returns: main.TRUE on success and main.FALSE on failure
         """
+
         try:
             self.handle.sendline( "" )
             self.handle.expect( self.prompt )
@@ -984,9 +985,9 @@
             # NOTE: this timeout may need to change depending on the network
             # and size of ONOS
             # TODO: Handle the other possible error
-            i = self.handle.expect( [ "Network\sis\sunreachable",
-                                      self.prompt,
-                                      pexpect.TIMEOUT ], timeout=180 )
+            i = self.handle.expect([ "Network\sis\sunreachable",
+                                     self.prompt,
+                                     pexpect.TIMEOUT ], timeout=180 )
             if i == 0:
                 # can't reach ONOS node
                 main.log.warn( "Network is unreachable" )
@@ -995,8 +996,8 @@
             elif i == 1:
                 # Process started
                 main.log.info(
-                    "Secure SSH performed on " +
-                    node )
+                "Secure SSH performed on " +
+                node)
                 return main.TRUE
         except pexpect.EOF:
             main.log.error( self.name + ": EOF exception found" )
@@ -1006,6 +1007,7 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
+
     def onosInstall( self, options="-f", node="" ):
         """
         Installs ONOS bits on the designated cell machine.
@@ -1327,7 +1329,7 @@
     def isup( self, node="", timeout=240 ):
         """
         Run's onos-wait-for-start which only returns once ONOS is at run
-        level 100( ready for use )
+        level 100(ready for use)
 
         Returns: main.TRUE if ONOS is running and main.FALSE on timeout
         """
@@ -1335,7 +1337,7 @@
             self.handle.sendline( "onos-wait-for-start " + node )
             self.handle.expect( "onos-wait-for-start" )
             # NOTE: this timeout is arbitrary"
-            i = self.handle.expect( [ self.prompt, pexpect.TIMEOUT ], timeout )
+            i = self.handle.expect([self.prompt, pexpect.TIMEOUT], timeout)
             if i == 0:
                 main.log.info( self.name + ": " + node + " is up" )
                 return main.TRUE
@@ -1374,6 +1376,7 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
+
     def pushTestIntentsShell(
             self,
             dpidSrc,
@@ -1515,7 +1518,7 @@
             self.handle.expect( self.prompt )
             self.handle.sendline( "" )
             if grepOptions:
-                grepStr = "grep " + str( grepOptions )
+                grepStr = "grep "+str(grepOptions)
             else:
                 grepStr = "grep"
 
@@ -1524,12 +1527,12 @@
                 str( interface ) +
                 " -t e | " +
                 grepStr + " --line-buffered \"" +
-                str( grep ) +
+                str(grep) +
                 "\" >" +
                 directory +
                 " &" )
-            self.handle.sendline( cmd )
-            main.log.info( cmd )
+            self.handle.sendline(cmd)
+            main.log.info(cmd)
             self.handle.expect( "Capturing on" )
             self.handle.sendline( "\n" )
             self.handle.expect( self.prompt )
@@ -1597,7 +1600,7 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    def dumpONOSCmd( self, ONOSIp, CMD, destDir, filename, options="" ):
+    def dumpONOSCmd(self, ONOSIp, CMD, destDir, filename, options=""):
         """
         Dump Cmd to a desired directory.
         For debugging purposes, you may want to use
@@ -1612,14 +1615,15 @@
             * fileName: Name of the file
             * options: Options for ONOS command
         """
+
         localtime = time.strftime( '%x %X' )
         localtime = localtime.replace( "/", "" )
         localtime = localtime.replace( " ", "_" )
         localtime = localtime.replace( ":", "" )
         if destDir[ -1: ] != "/":
             destDir += "/"
-        cmd = CMD + " " + options + " > " + str( destDir ) + str( filename ) + localtime
-        return self.onosCli( ONOSIp, cmd )
+        cmd=CMD + " " + options + " > " + str( destDir ) + str( filename ) + localtime
+        return self.onosCli(ONOSIp, cmd)
 
     def cpLogsToDir( self, logToCopy,
                      destDir, copyFileName="" ):
@@ -1674,7 +1678,7 @@
         except Exception:
             main.log.exception( "Copying files failed" )
 
-    def checkLogs( self, onosIp, restart=False ):
+    def checkLogs( self, onosIp, restart=False):
         """
         runs onos-check-logs on the given onos node
         If restart is True, use the old version of onos-check-logs which
@@ -1737,17 +1741,17 @@
                      direction='INPUT', rule='DROP', states=True ):
         """
         Description:
-            add or remove iptables rule to DROP ( default ) packets from
+            add or remove iptables rule to DROP (default) packets from
             specific IP and PORT
         Usage:
-        * specify action ( 'add' or 'remove' )
+        * specify action ('add' or 'remove')
           when removing, pass in the same argument as you would add. It will
           delete that specific rule.
         * specify the ip to block
-        * specify the destination port to block ( defaults to all ports )
-        * optional packet type to block ( default tcp )
-        * optional iptables rule ( default DROP )
-        * optional direction to block ( default 'INPUT' )
+        * specify the destination port to block (defaults to all ports)
+        * optional packet type to block (default tcp)
+        * optional iptables rule (default DROP)
+        * optional direction to block (default 'INPUT')
         * States boolean toggles adding all supported tcp states to the
           firewall rule
         Returns:
@@ -1758,6 +1762,7 @@
         * This function uses root privilege iptables command which may result
           in unwanted network errors. USE WITH CAUTION
         """
+
         # NOTE*********
         #   The strict checking methods of this driver function is intentional
         #   to discourage any misuse or error of iptables, which can cause
@@ -1799,13 +1804,14 @@
             cmd = "sudo iptables " + actionFlag + " " +\
                   direction +\
                   " -s " + str( ip )
+                  # " -p " + str( packet_type ) +\
             if packet_type:
                 cmd += " -p " + str( packet_type )
             if port:
                 cmd += " --dport " + str( port )
             if states:
                 cmd += " -m state --state="
-                # FIXME- Allow user to configure which states to block
+                #FIXME- Allow user to configure which states to block
                 cmd += "INVALID,ESTABLISHED,NEW,RELATED,UNTRACKED"
             cmd += " -j " + str( rule )
 
@@ -1834,10 +1840,10 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    def detailed_status( self, log_filename ):
+    def detailed_status(self, log_filename):
         """
         This method is used by STS to check the status of the controller
-        Reports RUNNING, STARTING, STOPPED, FROZEN, ERROR ( and reason )
+        Reports RUNNING, STARTING, STOPPED, FROZEN, ERROR (and reason)
         """
         import re
         try:
@@ -1878,8 +1884,8 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    def createLinkGraphFile( self, benchIp, ONOSIpList, deviceCount ):
-        """
+    def createLinkGraphFile( self, benchIp, ONOSIpList, deviceCount):
+        '''
             Create/formats the LinkGraph.cfg file based on arguments
                 -only creates a linear topology and connects islands
                 -evenly distributes devices
@@ -1888,113 +1894,115 @@
                 ONOSIpList - list of all of the node IPs to be used
 
                 deviceCount - number of switches to be assigned
-        """
-        main.log.info( "Creating link graph configuration file." )
+        '''
+        main.log.info("Creating link graph configuration file." )
         linkGraphPath = self.home + "/tools/package/etc/linkGraph.cfg"
         tempFile = "/tmp/linkGraph.cfg"
 
-        linkGraph = open( tempFile, 'w+' )
-        linkGraph.write( "# NullLinkProvider topology description (config file).\n" )
-        linkGraph.write( "# The NodeId is only added if the destination is another node's device.\n" )
-        linkGraph.write( "# Bugs: Comments cannot be appended to a line to be read.\n" )
+        linkGraph = open(tempFile, 'w+')
+        linkGraph.write("# NullLinkProvider topology description (config file).\n")
+        linkGraph.write("# The NodeId is only added if the destination is another node's device.\n")
+        linkGraph.write("# Bugs: Comments cannot be appended to a line to be read.\n")
 
-        clusterCount = len( ONOSIpList )
+        clusterCount = len(ONOSIpList)
 
-        if isinstance( deviceCount, int ) or isinstance( deviceCount, str ):
-            deviceCount = int( deviceCount )
-            switchList = [ 0 ] * ( clusterCount + 1 )
-            baselineSwitchCount = deviceCount / clusterCount
+        if type(deviceCount) is int or type(deviceCount) is str:
+            deviceCount = int(deviceCount)
+            switchList = [0]*(clusterCount+1)
+            baselineSwitchCount = deviceCount/clusterCount
 
-            for node in range( 1, clusterCount + 1 ):
-                switchList[ node ] = baselineSwitchCount
+            for node in range(1, clusterCount + 1):
+                switchList[node] = baselineSwitchCount
 
-            for node in range( 1, ( deviceCount % clusterCount ) + 1 ):
-                switchList[ node ] += 1
+            for node in range(1, (deviceCount%clusterCount)+1):
+                switchList[node] += 1
 
-        if isinstance( deviceCount, list ):
-            main.log.info( "Using provided device distribution" )
-            switchList = [ 0 ]
+        if type(deviceCount) is list:
+            main.log.info("Using provided device distribution")
+            switchList = [0]
             for i in deviceCount:
-                switchList.append( int( i ) )
+                switchList.append(int(i))
 
-        tempList = [ '0' ]
-        tempList.extend( ONOSIpList )
+        tempList = ['0']
+        tempList.extend(ONOSIpList)
         ONOSIpList = tempList
 
         myPort = 6
         lastSwitch = 0
-        for node in range( 1, clusterCount + 1 ):
-            if switchList[ node ] == 0:
+        for node in range(1, clusterCount+1):
+            if switchList[node] == 0:
                 continue
 
-            linkGraph.write( "graph " + ONOSIpList[ node ] + " {\n" )
+            linkGraph.write("graph " + ONOSIpList[node] + " {\n")
 
             if node > 1:
-                # connect to last device on previous node
-                line = ( "\t0:5 -> " + str( lastSwitch ) + ":6:" + lastIp + "\n" )  # ONOSIpList[ node-1 ]
-                linkGraph.write( line )
+                #connect to last device on previous node
+                line = ("\t0:5 -> " + str(lastSwitch) + ":6:" + lastIp + "\n")     #ONOSIpList[node-1]
+                linkGraph.write(line)
 
             lastSwitch = 0
-            for switch in range( 0, switchList[ node ] - 1 ):
+            for switch in range (0, switchList[node]-1):
                 line = ""
-                line = ( "\t" + str( switch ) + ":" + str( myPort ) )
+                line = ("\t" + str(switch) + ":" + str(myPort))
                 line += " -- "
-                line += ( str( switch + 1 ) + ":" + str( myPort - 1 ) + "\n" )
-                linkGraph.write( line )
-                lastSwitch = switch + 1
-            lastIp = ONOSIpList[ node ]
+                line += (str(switch+1) + ":" + str(myPort-1) + "\n")
+                linkGraph.write(line)
+                lastSwitch = switch+1
+            lastIp = ONOSIpList[node]
 
-            # lastSwitch += 1
-            if node < ( clusterCount ):
-                # connect to first device on the next node
-                line = ( "\t" + str( lastSwitch ) + ":6 -> 0:5:" + ONOSIpList[ node + 1 ] + "\n" )
-                linkGraph.write( line )
+            #lastSwitch += 1
+            if node < (clusterCount):
+                #connect to first device on the next node
+                line = ("\t" + str(lastSwitch) + ":6 -> 0:5:" + ONOSIpList[node+1] + "\n")
+                linkGraph.write(line)
 
-            linkGraph.write( "}\n" )
+            linkGraph.write("}\n")
         linkGraph.close()
 
-        # SCP
-        os.system( "scp " + tempFile + " " + self.user_name + "@" + benchIp + ":" + linkGraphPath )
-        main.log.info( "linkGraph.cfg creation complete" )
+        #SCP
+        os.system( "scp " + tempFile + " " + self.user_name + "@" + benchIp + ":" + linkGraphPath)
+        main.log.info("linkGraph.cfg creation complete")
 
-    def configNullDev( self, ONOSIpList, deviceCount, numPorts=10 ):
-        """
+    def configNullDev( self, ONOSIpList, deviceCount, numPorts=10):
+
+        '''
             ONOSIpList = list of Ip addresses of nodes switches will be devided amongst
             deviceCount = number of switches to distribute, or list of values to use as custom distribution
             numPorts = number of ports per device. Defaults to 10 both in this function and in ONOS. Optional arg
-        """
-        main.log.info( "Configuring Null Device Provider" )
-        clusterCount = len( ONOSIpList )
+        '''
+
+        main.log.info("Configuring Null Device Provider" )
+        clusterCount = len(ONOSIpList)
 
         try:
 
-            if isinstance( deviceCount, int ) or isinstance( deviceCount, str ):
-                main.log.info( "Creating device distribution" )
-                deviceCount = int( deviceCount )
-                switchList = [ 0 ] * ( clusterCount + 1 )
-                baselineSwitchCount = deviceCount / clusterCount
+            if type(deviceCount) is int or type(deviceCount) is str:
+                main.log.info("Creating device distribution")
+                deviceCount = int(deviceCount)
+                switchList = [0]*(clusterCount+1)
+                baselineSwitchCount = deviceCount/clusterCount
 
-                for node in range( 1, clusterCount + 1 ):
-                    switchList[ node ] = baselineSwitchCount
+                for node in range(1, clusterCount + 1):
+                    switchList[node] = baselineSwitchCount
 
-                for node in range( 1, ( deviceCount % clusterCount ) + 1 ):
-                    switchList[ node ] += 1
+                for node in range(1, (deviceCount%clusterCount)+1):
+                    switchList[node] += 1
 
-            if isinstance( deviceCount, list ):
-                main.log.info( "Using provided device distribution" )
+            if type(deviceCount) is list:
+                main.log.info("Using provided device distribution")
 
-                if len( deviceCount ) == clusterCount:
-                    switchList = [ '0' ]
-                    switchList.extend( deviceCount )
+                if len(deviceCount) == clusterCount:
+                    switchList = ['0']
+                    switchList.extend(deviceCount)
 
-                if len( deviceCount ) == ( clusterCount + 1 ):
-                    if deviceCount[ 0 ] == '0' or deviceCount[ 0 ] == 0:
+                if len(deviceCount) == (clusterCount + 1):
+                    if deviceCount[0] == '0' or deviceCount[0] == 0:
                         switchList = deviceCount
 
-                assert len( switchList ) == ( clusterCount + 1 )
+                assert len(switchList) == (clusterCount + 1)
 
         except AssertionError:
-            main.log.error( "Bad device/Ip list match" )
+            main.log.error( "Bad device/Ip list match")
         except TypeError:
             main.log.exception( self.name + ": Object not as expected" )
             return None
@@ -2002,76 +2010,80 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-        ONOSIp = [ 0 ]
-        ONOSIp.extend( ONOSIpList )
 
-        devicesString = "devConfigs = "
-        for node in range( 1, len( ONOSIp ) ):
-            devicesString += ( ONOSIp[ node ] + ":" + str( switchList[ node ] ) )
+        ONOSIp = [0]
+        ONOSIp.extend(ONOSIpList)
+
+        devicesString  = "devConfigs = "
+        for node in range(1, len(ONOSIp)):
+            devicesString += (ONOSIp[node] + ":" + str(switchList[node] ))
             if node < clusterCount:
-                devicesString += ( "," )
+                devicesString += (",")
 
         try:
-            self.handle.sendline( "onos $OC1 cfg set org.onosproject.provider.nil.device.impl.NullDeviceProvider devConfigs " + devicesString )
-            self.handle.expect( ":~" )
-            self.handle.sendline( "onos $OC1 cfg set org.onosproject.provider.nil.device.impl.NullDeviceProvider numPorts " + str( numPorts ) )
-            self.handle.expect( ":~" )
+            self.handle.sendline("onos $OC1 cfg set org.onosproject.provider.nil.device.impl.NullDeviceProvider devConfigs " + devicesString )
+            self.handle.expect(":~")
+            self.handle.sendline("onos $OC1 cfg set org.onosproject.provider.nil.device.impl.NullDeviceProvider numPorts " + str(numPorts) )
+            self.handle.expect(":~")
 
-            for i in range( 10 ):
-                self.handle.sendline( "onos $OC1 cfg get org.onosproject.provider.nil.device.impl.NullDeviceProvider" )
-                self.handle.expect( ":~" )
+            for i in range(10):
+                self.handle.sendline("onos $OC1 cfg get org.onosproject.provider.nil.device.impl.NullDeviceProvider")
+                self.handle.expect(":~")
                 verification = self.handle.before
-                if ( " value=" + str( numPorts ) ) in verification and ( " value=" + devicesString ) in verification:
+                if (" value=" + str(numPorts)) in verification and (" value=" + devicesString) in verification:
                     break
                 else:
-                    time.sleep( 1 )
+                    time.sleep(1)
 
-            assert ( "value=" + str( numPorts ) ) in verification and ( " value=" + devicesString ) in verification
+            assert ("value=" + str(numPorts)) in verification and (" value=" + devicesString) in verification
 
         except AssertionError:
-            main.log.error( "Incorrect Config settings: " + verification )
+            main.log.error("Incorrect Config settings: " + verification)
         except Exception:
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    def configNullLink( self, fileName="/opt/onos/apache-karaf-3.0.3/etc/linkGraph.cfg", eventRate=0 ):
-        """
+    def configNullLink( self,fileName="/opt/onos/apache-karaf-3.0.3/etc/linkGraph.cfg", eventRate=0):
+        '''
                 fileName default is currently the same as the default on ONOS, specify alternate file if
                 you want to use a different topology file than linkGraph.cfg
-        """
-        try:
-            self.handle.sendline( "onos $OC1 cfg set org.onosproject.provider.nil.link.impl.NullLinkProvider eventRate " + str( eventRate ) )
-            self.handle.expect( ":~" )
-            self.handle.sendline( "onos $OC1 cfg set org.onosproject.provider.nil.link.impl.NullLinkProvider cfgFile " + fileName )
-            self.handle.expect( ":~" )
+        '''
 
-            for i in range( 10 ):
-                self.handle.sendline( "onos $OC1 cfg get org.onosproject.provider.nil.link.impl.NullLinkProvider" )
-                self.handle.expect( ":~" )
+
+        try:
+            self.handle.sendline("onos $OC1 cfg set org.onosproject.provider.nil.link.impl.NullLinkProvider eventRate " + str(eventRate))
+            self.handle.expect(":~")
+            self.handle.sendline("onos $OC1 cfg set org.onosproject.provider.nil.link.impl.NullLinkProvider cfgFile " + fileName )
+            self.handle.expect(":~")
+
+            for i in range(10):
+                self.handle.sendline("onos $OC1 cfg get org.onosproject.provider.nil.link.impl.NullLinkProvider")
+                self.handle.expect(":~")
                 verification = self.handle.before
-                if ( " value=" + str( eventRate ) ) in verification and ( " value=" + fileName ) in verification:
+                if (" value=" + str(eventRate)) in verification and (" value=" + fileName) in verification:
                     break
                 else:
-                    time.sleep( 1 )
+                    time.sleep(1)
 
-            assert ( "value=" + str( eventRate ) ) in verification and ( " value=" + fileName ) in verification
+            assert ("value=" + str(eventRate)) in verification and (" value=" + fileName) in verification
 
         except pexpect.EOF:
             main.log.error( self.name + ": EOF exception found" )
             main.log.error( self.name + ":    " + self.handle.before )
             main.cleanAndExit()
         except AssertionError:
-            main.log.info( "Settings did not post to ONOS" )
-            main.log.error( varification )
+            main.log.info("Settings did not post to ONOS")
+            main.log.error(varification)
         except Exception:
             main.log.exception( self.name + ": Uncaught exception!" )
-            main.log.error( varification )
+            main.log.error(varification)
             main.cleanAndExit()
 
     def getOnosIps( self ):
         """
             Get all onos IPs stored in
         """
+
         return sorted( self.onosIps.values() )
 
     def listLog( self, nodeIp ):
@@ -2124,16 +2136,16 @@
         """
         try:
             main.log.info( " Log Report for {} ".format( nodeIp ).center( 70, '=' ) )
-            if isinstance( searchTerms, str ):
-                searchTerms = [ searchTerms ]
+            if type( searchTerms ) is str:
+                searchTerms = [searchTerms]
             numTerms = len( searchTerms )
             outputMode = outputMode.lower()
 
             totalHits = 0
             logLines = []
             for termIndex in range( numTerms ):
-                term = searchTerms[ termIndex ]
-                logLines.append( [ term ] )
+                term = searchTerms[termIndex]
+                logLines.append( [term] )
                 if startStr and endStr:
                     cmd = "onos-ssh {} \"sed -n '/{}/,/{}/p' /opt/onos/log/karaf.log | grep {}\"".format( nodeIp,
                                                                                                           startStr,
@@ -2150,7 +2162,7 @@
                     if term in line and "grep" not in line:
                         count += 1
                         if before.index( line ) > ( len( before ) - 7 ):
-                            logLines[ termIndex ].append( line )
+                            logLines[termIndex].append( line )
                 main.log.info( "{}: {}".format( term, count ) )
                 totalHits += count
                 if termIndex == numTerms - 1:
@@ -2158,10 +2170,10 @@
             if outputMode != "s":
                 outputString = ""
                 for term in logLines:
-                    outputString = term[ 0 ] + ": \n"
+                    outputString = term[0] + ": \n"
                     for line in range( 1, len( term ) ):
-                        outputString += ( "\t" + term[ line ] + "\n" )
-                    if outputString != ( term[ 0 ] + ": \n" ):
+                        outputString += ( "\t" + term[line] + "\n" )
+                    if outputString != ( term[0] + ": \n" ):
                         main.log.info( outputString )
             main.log.info( "=" * 70 )
             return totalHits
@@ -2178,7 +2190,7 @@
             main.cleanAndExit()
 
     def copyMininetFile( self, fileName, localPath, userName, ip,
-                         mnPath='~/mininet/custom/', timeout=60 ):
+                         mnPath='~/mininet/custom/', timeout = 60 ):
         """
         Description:
             Copy mininet topology file from dependency folder in the test folder
@@ -2194,6 +2206,7 @@
             Return main.TRUE if successfully copied the file otherwise
             return main.FALSE
         """
+
         try:
             cmd = "scp " + localPath + fileName + " " + userName + "@" + \
                   str( ip ) + ":" + mnPath + fileName
@@ -2230,22 +2243,22 @@
             main.log.error( self.name + ":     " + self.handle.before )
             main.cleanAndExit()
 
-    def jvmSet( self, memory=8 ):
+    def jvmSet(self, memory=8):
 
         import os
 
-        homeDir = os.path.expanduser( '~' )
+        homeDir = os.path.expanduser('~')
         filename = "/onos/tools/package/bin/onos-service"
 
-        serviceConfig = open( homeDir + filename, 'w+' )
-        serviceConfig.write( "#!/bin/bash\n " )
-        serviceConfig.write( "#------------------------------------- \n " )
-        serviceConfig.write( "# Starts ONOS Apache Karaf container\n " )
-        serviceConfig.write( "#------------------------------------- \n " )
-        serviceConfig.write( "#export JAVA_HOME=${JAVA_HOME:-/usr/lib/jvm/java-7-openjdk-amd64/}\n " )
-        serviceConfig.write( """export JAVA_OPTS="${ JAVA_OPTS:--Xms""" + str(memory) + "G -Xmx" + str(memory) + """G }" \n """ )
-        serviceConfig.write( "[ -d $ONOS_HOME ] && cd $ONOS_HOME || ONOS_HOME=$(dirname $0)/..\n" )
-        serviceConfig.write( """${ONOS_HOME}/apache-karaf-$KARAF_VERSION/bin/karaf "$@" \n """ )
+        serviceConfig = open(homeDir + filename, 'w+')
+        serviceConfig.write("#!/bin/bash\n ")
+        serviceConfig.write("#------------------------------------- \n ")
+        serviceConfig.write("# Starts ONOS Apache Karaf container\n ")
+        serviceConfig.write("#------------------------------------- \n ")
+        serviceConfig.write("#export JAVA_HOME=${JAVA_HOME:-/usr/lib/jvm/java-7-openjdk-amd64/}\n ")
+        serviceConfig.write("""export JAVA_OPTS="${JAVA_OPTS:--Xms""" + str(memory) + "G -Xmx" + str(memory) + """G}" \n """)
+        serviceConfig.write("[ -d $ONOS_HOME ] && cd $ONOS_HOME || ONOS_HOME=$(dirname $0)/..\n")
+        serviceConfig.write("""${ONOS_HOME}/apache-karaf-$KARAF_VERSION/bin/karaf "$@" \n """)
         serviceConfig.close()
 
     def createDBFile( self, testData ):
@@ -2254,7 +2267,7 @@
         DBString = ""
 
         for item in testData:
-            if isinstance( item, string ):
+            if type( item ) is string:
                 item = "'" + item + "'"
             if testData.index( item ) < len( testData - 1 ):
                 item += ","
@@ -2266,15 +2279,15 @@
 
     def verifySummary( self, ONOSIp, *deviceCount ):
 
-        self.handle.sendline( "onos " + ONOSIp + " summary" )
+        self.handle.sendline( "onos " + ONOSIp  + " summary" )
         self.handle.expect( ":~" )
 
         summaryStr = self.handle.before
         print "\nSummary\n==============\n" + summaryStr + "\n\n"
 
-        # passed = "SCC(s)=1" in summaryStr
-        # if deviceCount:
-        #    passed = "devices=" + str( deviceCount ) + "," not in summaryStr
+        #passed = "SCC(s)=1" in summaryStr
+        #if deviceCount:
+        #    passed = "devices=" + str(deviceCount) + "," not in summaryStr
 
         passed = False
         if "SCC(s)=1," in summaryStr:
@@ -2285,7 +2298,7 @@
 
         if deviceCount:
             print" ============================="
-            checkStr = "devices=" + str( deviceCount[ 0 ] ) + ","
+            checkStr = "devices=" + str( deviceCount[0] ) + ","
             print "Checkstr: " + checkStr
             if checkStr not in summaryStr:
                 passed = False
@@ -2317,7 +2330,7 @@
             # IF self.ip_address is an ip address and matches
             #    self.nicAddr: return self.ip_address
             if match:
-                curIp = match.group( 0 )
+                curIp = match.group(0)
                 if nicPat:
                     nicMatch = re.search( nicPat, curIp )
                     if nicMatch:
@@ -2332,7 +2345,7 @@
             ips = re.findall( ifPat, raw )
             if iface:
                 if ips:
-                    ip = ips[ 0 ]
+                    ip = ips[0]
                     self.ip_address = ip
                     return ip
                 else:
@@ -2346,8 +2359,8 @@
                         return ip
             else:  # If only one non-localhost ip, return that
                 tmpList = [ ip for ip in ips if ip is not LOCALHOST ]
-                if len( tmpList ) == 1:
-                    curIp = tmpList[ 0 ]
+                if len(tmpList) == 1:
+                    curIp = tmpList[0]
                     self.ip_address = curIp
                     return curIp
             # Either no non-localhost IPs, or more than 1
@@ -2361,9 +2374,9 @@
             main.log.exception( "Uncaught exception" )
 
     def startBasicONOS( self, nodeList, opSleep=60, onosStartupSleep=30, onosUser="sdn" ):
-        """
+        '''
         Start onos cluster with defined nodes, but only with drivers app
-        """
+        '''
         import time
 
         self.createCellFile( self.ip_address,
@@ -2396,8 +2409,8 @@
 
         onosStatus = True
         for nd in nodeList:
-            onosStatus = onosStatus & self.isup( node=nd )
-            # print "onosStatus is: " + str( onosStatus )
+            onosStatus = onosStatus & self.isup( node = nd )
+            #print "onosStatus is: " + str( onosStatus )
 
         return main.TRUE if onosStatus else main.FALSE
 
diff --git a/TestON/drivers/common/cli/ovsdbdriver.py b/TestON/drivers/common/cli/ovsdbdriver.py
index 7c2a43d..636a7b2 100644
--- a/TestON/drivers/common/cli/ovsdbdriver.py
+++ b/TestON/drivers/common/cli/ovsdbdriver.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 
 """
-Copyright 2015 Open Networking Foundation ( ONF )
+Copyright 2015 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -10,7 +10,7 @@
     TestON is free software: you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
     the Free Software Foundation, either version 2 of the License, or
-    ( at your option ) any later version.
+    (at your option) any later version.
 
     TestON is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,6 +21,7 @@
     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
 
 """
+
 """
 drivers for ovsdb commands.
 
@@ -50,11 +51,11 @@
     def connect( self, **connectargs ):
         try:
             for key in connectargs:
-                vars( self )[ key ] = connectargs[ key ]
+                vars( self)[ key ] = connectargs[ key ]
 
             self.name = self.options[ 'name' ]
-            if os.getenv( str( self.ip_address ) ) is not None:
-                self.ip_address = os.getenv( str( self.ip_address ) )
+            if os.getenv( str( self.ip_address ) ) != None:
+                self.ip_address = os.getenv(str ( self.ip_address ) )
             else:
                 main.log.info( self.name + ": Trying to connect to " +
                                self.ip_address )
@@ -62,7 +63,7 @@
                     user_name=self.user_name,
                     ip_address=self.ip_address,
                     port=self.port,
-                    pwd=self.pwd )
+                    pwd=self.pwd)
 
             if self.handle:
                 main.log.info( "Connection successful to the ovsdb node " +
@@ -90,7 +91,7 @@
         return response
 
     def setManager( self, ip, port, delaytime="5" ):
-        command = "sudo ovs-vsctl set-manager tcp:" + str( ip ) + ":" + str( port )
+        command= "sudo ovs-vsctl set-manager tcp:" + str( ip ) + ":" + str( port )
         try:
             handle = self.execute(
                 cmd=command,
@@ -101,7 +102,7 @@
                 return main.FALSE
             else:
                 main.log.info( "Ovsdb manager " + str( ip ) + " set" )
-                # delay time  for ovsdb connection create
+                #delay time  for ovsdb connection create
                 main.log.info( "Wait " + str( delaytime ) + " seconds for ovsdb connection create" )
                 time.sleep( int( delaytime ) )
                 return main.TRUE
@@ -111,7 +112,7 @@
             main.cleanAndExit()
 
     def delManager( self, delaytime="5" ):
-        command = "sudo ovs-vsctl del-manager"
+        command= "sudo ovs-vsctl del-manager"
         try:
             handle = self.execute(
                 cmd=command,
@@ -122,7 +123,7 @@
                 return main.FALSE
             else:
                 main.log.info( "Ovsdb manager delete" )
-                # delay time  for ovsdb connection delete
+                #delay time  for ovsdb connection delete
                 main.log.info( "Wait " + str( delaytime ) + " seconds for ovsdb connection delete" )
                 time.sleep( int( delaytime ) )
                 return main.TRUE
@@ -132,7 +133,7 @@
             main.cleanAndExit()
 
     def getManager( self ):
-        command = "sudo ovs-vsctl get-manager"
+        command= "sudo ovs-vsctl get-manager"
         try:
             response = self.execute(
                 cmd=command,
@@ -151,7 +152,7 @@
             The output of the command from the linux
             or main.FALSE on timeout
         """
-        command = "sudo ovs-vsctl list-br"
+        command= "sudo ovs-vsctl list-br"
         try:
             response = self.execute(
                 cmd=command,
@@ -173,7 +174,7 @@
             The output of the command from the linux
             or main.FALSE on timeout
         """
-        command = "sudo ovs-vsctl list-ports " + str( sw )
+        command= "sudo ovs-vsctl list-ports " + str( sw )
         try:
             response = self.execute(
                 cmd=command,
@@ -198,7 +199,7 @@
         try:
             response = self.execute(
                 cmd=command,
-                timeout=10 )
+                timeout=10)
             if response:
                 return response
             else:
@@ -219,7 +220,7 @@
         try:
             response = self.execute(
                 cmd=command,
-                timeout=10 )
+                timeout=10)
             if response:
                 return response
             else:
@@ -250,8 +251,8 @@
             else:
                 return main.FALSE
         except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":     " + self.handle.before )
+            main.log.error(self.name + ": EOF exception found")
+            main.log.error(self.name + ":     " + self.handle.before)
             main.cleanAndExit()
 
     def createHost( self, hostname ):
@@ -259,7 +260,7 @@
         try:
             handle = self.execute(
                 cmd=command,
-                timeout=10 )
+                timeout=10)
             if re.search( "Error", handle ):
                 main.log.error( "Error in create host" + str( hostname ) )
                 main.log.error( handle )
@@ -268,20 +269,20 @@
                 main.log.info( "Create " + str( hostname ) + " sucess" )
                 return main.TRUE
         except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":     " + self.handle.before )
+            main.log.error(self.name + ": EOF exception found")
+            main.log.error(self.name + ":     " + self.handle.before)
             main.cleanAndExit()
 
-    def createHostport( self, hostname="host1", hostport="host1-eth0", ovsport="port1", hostportmac="000000000001" ):
-        command = "sudo ip link add " + str( hostport ) + " type veth peer name " + str( ovsport )
-        command += ";" + "sudo ip link set " + str( hostport ) + " up"
-        command += ";" + "sudo ip link set " + str( ovsport ) + " up"
-        command += ";" + " sudo ifconfig " + str( hostport ) + " hw ether " + str( hostportmac )
-        command += ";" + " sudo ip link set " + str( hostport ) + " netns " + str( hostname )
+    def createHostport(self, hostname="host1", hostport="host1-eth0", ovsport="port1", hostportmac="000000000001" ):
+        command = "sudo ip link add " + str(hostport) +" type veth peer name " + str(ovsport)
+        command += ";" + "sudo ip link set " + str(hostport) + " up"
+        command += ";" + "sudo ip link set " + str(ovsport) + " up"
+        command += ";" +" sudo ifconfig " + str(hostport) + " hw ether " + str(hostportmac)
+        command += ";" +" sudo ip link set " + str(hostport) + " netns " + str(hostname)
         try:
             handle = self.execute(
                 cmd=command,
-                timeout=10 )
+                timeout=10)
             if re.search( "Error", handle ):
                 main.log.error( "Error in create host port " + str( hostport ) + " on " + str( hostname ) )
                 main.log.error( handle )
@@ -290,40 +291,40 @@
                 main.log.info( "Create host port " + str( hostport ) + " on " + str( hostname ) + " sucess" )
                 return main.TRUE
         except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":     " + self.handle.before )
+            main.log.error(self.name + ": EOF exception found")
+            main.log.error(self.name + ":     " + self.handle.before)
             main.cleanAndExit()
 
-    def addPortToOvs( self, ifaceId, attachedMac, vmuuid, port="port1", ovsname="br-int" ):
-        command = "sudo ovs-vsctl add-port " + str( ovsname ) + " " + str( port )
+    def addPortToOvs(self, ifaceId, attachedMac, vmuuid, port="port1", ovsname="br-int" ):
+        command = "sudo ovs-vsctl add-port " + str(ovsname) +" " + str(port)
         if ifaceId:
-            command += " -- set Interface " + str( port ) + " external-ids:iface-id=" + str( ifaceId ) + " external-ids:iface-status=active"
+            command += " -- set Interface " + str(port) + " external-ids:iface-id=" + str(ifaceId) + " external-ids:iface-status=active"
         if attachedMac:
-            command += " external-ids:attached-mac=" + str( attachedMac )
+            command += " external-ids:attached-mac=" + str(attachedMac)
         if vmuuid:
-            command += " external-ids:vm-uuid=" + str( vmuuid )
+            command += " external-ids:vm-uuid=" + str(vmuuid)
         try:
             handle = self.execute(
                 cmd=command,
-                timeout=10 )
+                timeout=10)
             if re.search( "Error", handle ):
-                main.log.error( "Error in add port " + str( port ) + " to ovs " + str( ovsname ) )
+                main.log.error( "Error in add port " + str(port) + " to ovs " + str( ovsname ) )
                 main.log.error( handle )
                 return main.FALSE
             else:
-                main.log.info( "Add port " + str( port ) + " to ovs " + str( ovsname ) + " sucess" )
+                main.log.info( "Add port " + str(port) + " to ovs " + str( ovsname )  + " sucess" )
                 return main.TRUE
         except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":     " + self.handle.before )
+            main.log.error(self.name + ": EOF exception found")
+            main.log.error(self.name + ":     " + self.handle.before)
             main.cleanAndExit()
 
-    def setHostportIp( self, ip, hostname="host1", hostport1="host1-eth0" ):
-        command = "sudo ip netns exec " + str( hostname ) + " ifconfig " + str( hostport1 ) + " " + str( ip )
+    def setHostportIp(self, ip, hostname="host1", hostport1="host1-eth0" ):
+        command = "sudo ip netns exec " + str(hostname) +" ifconfig " + str(hostport1) + " " + str(ip)
         try:
             handle = self.execute(
                 cmd=command,
-                timeout=10 )
+                timeout=10)
             if re.search( "Error", handle ):
                 main.log.error( "Error in set host ip for " + str( hostport1 ) + " on host " + str( hostname ) )
                 main.log.error( handle )
@@ -332,32 +333,32 @@
                 main.log.info( "Set host ip for " + str( hostport1 ) + " on host " + str( hostname ) + " sucess" )
                 return main.TRUE
         except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":     " + self.handle.before )
+            main.log.error(self.name + ": EOF exception found")
+            main.log.error(self.name + ":     " + self.handle.before)
             main.cleanAndExit()
 
-    def hostPing( self, src, target, hostname="host1" ):
+    def hostPing(self, src, target, hostname="host1" ):
         if src:
-            command = "sudo ip netns exec " + str( hostname ) + " ping -c 1 -S " +\
-                str( src ) + " " + str( target )
+            command = "sudo ip netns exec " + str( hostname ) +" ping -c 1 -S " +\
+             str( src ) + " " + str( target )
         else:
-            command = "sudo ip netns exec " + str( hostname ) + " ping -c 1 " + str( target )
+            command = "sudo ip netns exec " + str( hostname ) +" ping -c 1 " + str( target )
         try:
-            for i in range( 1, 5 ):
+            for i in range(1,5):
                 handle = self.execute(
                     cmd=command,
-                    timeout=10 )
-                if re.search( ',\s0\%\spacket\sloss', handle ):
-                    main.log.info( self.name + ": no packets lost, host is reachable" )
+                    timeout=10)
+                if re.search(',\s0\%\spacket\sloss', handle):
+                    main.log.info(self.name + ": no packets lost, host is reachable")
                     return main.TRUE
                     break
-                time.sleep( 5 )
+                time.sleep(5)
             else:
-                main.log.info( self.name + ": packets lost, host is unreachable" )
+                main.log.info(self.name + ": packets lost, host is unreachable")
                 return main.FALSE
         except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":     " + self.handle.before )
+            main.log.error(self.name + ": EOF exception found")
+            main.log.error(self.name + ":     " + self.handle.before)
             main.cleanAndExit()
 
     def delBr( self, sw ):
@@ -367,7 +368,7 @@
         Return:
             Delete sucess return main.TRUE or main.FALSE on delete failed
         """
-        command = "sudo ovs-vsctl del-br " + str( sw )
+        command= "sudo ovs-vsctl del-br " + str( sw )
         try:
             response = self.execute(
                 cmd=command,
@@ -388,7 +389,7 @@
         Return:
             Delete sucess return main.TRUE or main.FALSE on delete failed
         """
-        command = "sudo ip netns delete " + str( hostname )
+        command= "sudo ip netns delete " + str( hostname )
         try:
             response = self.execute(
                 cmd=command,
diff --git a/TestON/drivers/common/cli/quaggaclidriver.py b/TestON/drivers/common/cli/quaggaclidriver.py
index 02c6af3..e1cbeef 100644
--- a/TestON/drivers/common/cli/quaggaclidriver.py
+++ b/TestON/drivers/common/cli/quaggaclidriver.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 """
-Copyright 2015 Open Networking Foundation ( ONF )
+Copyright 2015 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -9,7 +9,7 @@
     TestON is free software: you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
     the Free Software Foundation, either version 2 of the License, or
-    ( at your option ) any later version.
+    (at your option) any later version.
 
     TestON is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -19,6 +19,7 @@
     You should have received a copy of the GNU General Public License
     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
 """
+
 import time
 import pexpect
 import sys
@@ -57,9 +58,9 @@
                 ip_address="1.1.1.1",
                 port=self.port,
                 pwd=self.pwd )
-        # main.log.info( "connect parameters:" + str( self.user_name ) + ";"
+        #main.log.info( "connect parameters:" + str( self.user_name ) + ";"
         #               + str( self.ip_address ) + ";" + str( self.port )
-        #               + ";" + str( self.pwd ) )
+        #               + ";" + str(self.pwd ) )
 
         if self.handle:
             # self.handle.expect( "",timeout=10 )
@@ -207,7 +208,7 @@
         routesJsonObj = json.loads( getRoutesResult )
 
         allRoutesActual = []
-        for route in routesJsonObj[ 'routes4' ]:
+        for route in routesJsonObj['routes4']:
             if 'prefix' in route:
                 if route[ 'prefix' ] == '172.16.10.0/24':
                     continue
@@ -220,7 +221,7 @@
         routesJsonObj = json.loads( getRoutesResult )
 
         allRoutesActual = []
-        for route in routesJsonObj[ 'routes4' ]:
+        for route in routesJsonObj['routes4']:
             if route[ 'prefix' ] == '172.16.10.0/24':
                 continue
             allRoutesActual.append(
@@ -236,10 +237,10 @@
         intentsJsonObj = json.loads( getIntentsResult )
 
         for intent in intentsJsonObj:
-            # if intent[ 'appId' ] != "org.onosproject.sdnip":
+            #if intent[ 'appId' ] != "org.onosproject.sdnip":
             #    continue
             if intent[ 'type' ] == "MultiPointToSinglePointIntent" \
-                    and intent[ 'state' ] == 'INSTALLED':
+            and intent[ 'state' ] == 'INSTALLED':
                 egress = str( intent[ 'egress' ][ 'device' ] ) + ":" \
                     + str( intent[ 'egress' ][ 'port' ] )
                 ingress = []
@@ -265,7 +266,7 @@
         num = 0
         for intent in intentsJsonObj:
             if intent[ 'type' ] == "MultiPointToSinglePointIntent" \
-                    and intent[ 'state' ] == 'INSTALLED':
+            and intent[ 'state' ] == 'INSTALLED':
                 num = num + 1
         return num
 
@@ -275,7 +276,7 @@
         num = 0
         for intent in intentsJsonObj:
             if intent[ 'type' ] == "PointToPointIntent" \
-                    and intent[ 'state' ] == 'INSTALLED':
+            and intent[ 'state' ] == 'INSTALLED':
                 num = num + 1
         return num
 
@@ -287,10 +288,10 @@
         intentsJsonObj = json.loads( getIntentsResult )
 
         for intent in intentsJsonObj:
-            # if intent[ 'appId' ] != "org.onosproject.sdnip":
+            #if intent[ 'appId' ] != "org.onosproject.sdnip":
             #    continue
             if intent[ 'type' ] == "PointToPointIntent" \
-                    and "protocol=6" in str( intent[ 'selector' ] ):
+            and "protocol=6" in str( intent[ 'selector' ] ):
                 ingress = str( intent[ 'ingress' ][ 'device' ] ) + ":" \
                     + str( intent[ 'ingress' ][ 'port' ] )
                 egress = str( intent[ 'egress' ][ 'device' ] ) + ":" + \
@@ -324,15 +325,15 @@
             # find out the BGP speaker IP address for this BGP peer
             bgpSpeakerIpAddress = ""
             for interfaceAddress in \
-                    sdnipData[ 'bgpSpeakers' ][ 0 ][ 'interfaceAddresses' ]:
+            sdnipData[ 'bgpSpeakers' ][ 0 ][ 'interfaceAddresses' ]:
                 # if eq( interfaceAddress[ 'interfaceDpid' ],sdnipData[
                 # 'bgpSpeakers' ][ 0 ][ 'attachmentDpid' ] ) and eq(
                 # interfaceAddress[ 'interfacePort' ], sdnipData[ 'bgpSpeakers'
                 # ][ 0 ][ 'attachmentPort' ] ):
                 if eq( interfaceAddress[ 'interfaceDpid' ],
                        peer[ 'attachmentDpid' ] ) \
-                    and eq( interfaceAddress[ 'interfacePort' ],
-                            peer[ 'attachmentPort' ] ):
+                and eq( interfaceAddress[ 'interfacePort' ],
+                        peer[ 'attachmentPort' ] ):
                     bgpSpeakerIpAddress = interfaceAddress[ 'ipAddress' ]
                     break
                 else:
@@ -342,9 +343,9 @@
             # direction
             selectorStr = "IPV4_SRC{ip=" + bgpSpeakerIpAddress + "/32}," \
                 + "IPV4_DST{ip=" + peer[ 'ipAddress' ] + "/32}," \
-                + "IP_PROTO{ protocol=6 }, ETH_TYPE{ ethType=800 }, \
-                TCP_DST{ tcpPort=179 }"
-            selector = selectorStr.replace( " ", "" ).replace( "[", "" )\
+                + "IP_PROTO{protocol=6}, ETH_TYPE{ethType=800}, \
+                TCP_DST{tcpPort=179}"
+            selector = selectorStr.replace( " ", "" ).replace("[", "" )\
                 .replace( "]", "" ).split( "," )
             intent = bgpSpeakerAttachmentPoint + "/" + \
                 bgpPeerAttachmentPoint + "/" + str( sorted( selector ) )
@@ -352,9 +353,9 @@
 
             selectorStr = "IPV4_SRC{ip=" + bgpSpeakerIpAddress + "/32}," \
                 + "IPV4_DST{ip=" + peer[ 'ipAddress' ] + "/32}," \
-                + "IP_PROTO{ protocol=6 }, ETH_TYPE{ ethType=800 }, \
-                TCP_SRC{ tcpPort=179 }"
-            selector = selectorStr.replace( " ", "" ).replace( "[", "" )\
+                + "IP_PROTO{protocol=6}, ETH_TYPE{ethType=800}, \
+                TCP_SRC{tcpPort=179}"
+            selector = selectorStr.replace( " ", "" ).replace("[", "" )\
                 .replace( "]", "" ).split( "," )
             intent = bgpSpeakerAttachmentPoint + "/" \
                 + bgpPeerAttachmentPoint + "/" + str( sorted( selector ) )
@@ -364,9 +365,9 @@
             # direction
             selectorStr = "IPV4_SRC{ip=" + peer[ 'ipAddress' ] + "/32}," \
                 + "IPV4_DST{ip=" + bgpSpeakerIpAddress + "/32}," \
-                + "IP_PROTO{ protocol=6 }, ETH_TYPE{ ethType=800 }, \
-                TCP_DST{ tcpPort=179 }"
-            selector = selectorStr.replace( " ", "" ).replace( "[", "" )\
+                + "IP_PROTO{protocol=6}, ETH_TYPE{ethType=800}, \
+                TCP_DST{tcpPort=179}"
+            selector = selectorStr.replace( " ", "" ).replace("[", "" )\
                 .replace( "]", "" ).split( "," )
             intent = bgpPeerAttachmentPoint + "/" \
                 + bgpSpeakerAttachmentPoint + "/" + str( sorted( selector ) )
@@ -374,8 +375,8 @@
 
             selectorStr = "IPV4_SRC{ip=" + peer[ 'ipAddress' ] + "/32}," \
                 + "IPV4_DST{ip=" + bgpSpeakerIpAddress + "/32}," \
-                + "IP_PROTO{ protocol=6 }, ETH_TYPE{ ethType=800 }, \
-                TCP_SRC{ tcpPort=179 }"
+                + "IP_PROTO{protocol=6}, ETH_TYPE{ethType=800}, \
+                TCP_SRC{tcpPort=179}"
             selector = selectorStr.replace( " ", "" ).replace( "[", "" )\
                 .replace( "]", "" ).split( "," )
             intent = bgpPeerAttachmentPoint + "/" \
@@ -399,23 +400,23 @@
 
         chunk_size = 20
 
-        if len( routes ) > chunk_size:
-            num_iter = ( int )( len( routes ) / chunk_size )
+        if len(routes) > chunk_size:
+            num_iter = (int) (len(routes) / chunk_size)
         else:
-            num_iter = 1
+            num_iter = 1;
 
         total = 0
-        for n in range( 0, num_iter + 1 ):
+        for n in range( 0, num_iter + 1):
             routeCmd = ""
-            if ( len( routes ) - ( n * chunk_size ) ) >= chunk_size:
-                m = ( n + 1 ) * chunk_size
+            if (len( routes ) - (n * chunk_size)) >= chunk_size:
+                m = (n + 1) * chunk_size
             else:
                 m = len( routes )
             for i in range( n * chunk_size, m ):
                 routeCmd = routeCmd + "network " + routes[ i ] + "\n"
                 total = total + 1
 
-            main.log.info( routeCmd )
+            main.log.info(routeCmd)
             try:
                 self.handle.sendline( routeCmd )
                 self.handle.expect( "bgpd", timeout=5 )
@@ -424,8 +425,8 @@
                 self.disconnect()
 
             # waitTimer = 1.00 / routeRate
-            main.log.info( "Total routes so far " + ( ( str )( total ) ) + " wait for 0 sec" )
-            # time.sleep( 1 )
+            main.log.info("Total routes so far " + ((str) (total)) + " wait for 0 sec")
+            #time.sleep( 1 )
         if routesAdded == len( routes ):
             main.log.info( "Finished adding routes" )
             return main.TRUE
@@ -488,6 +489,7 @@
             main.log.info( "NO HANDLE" )
             return main.FALSE
 
+
     # Please use the generateRoutes plus addRoutes instead of this one!
     def addRoute( self, net, numRoutes, routeRate ):
         try:
@@ -611,7 +613,7 @@
             count = 0
             while True:
                 i = child.expect( [ '17\d\.\d{1,3}\.\d{1,3}\.\d{1,3}',
-                                    'CLI#', pexpect.TIMEOUT ] )
+                                   'CLI#', pexpect.TIMEOUT ] )
                 if i == 0:
                     count = count + 1
                 elif i == 1:
@@ -698,3 +700,4 @@
             main.log.error( "Connection failed to the host" )
             response = main.FALSE
         return response
+
diff --git a/TestON/drivers/common/cli/remotesysdriver.py b/TestON/drivers/common/cli/remotesysdriver.py
index 3c5dd17..48046f1 100644
--- a/TestON/drivers/common/cli/remotesysdriver.py
+++ b/TestON/drivers/common/cli/remotesysdriver.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 """
 Created on 26-Oct-2012
-Copyright 2012 Open Networking Foundation ( ONF )
+Copyright 2012 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
diff --git a/TestON/drivers/common/cli/remotetestbed/floodlightclidriver.py b/TestON/drivers/common/cli/remotetestbed/floodlightclidriver.py
index 58667a1..b923a20 100644
--- a/TestON/drivers/common/cli/remotetestbed/floodlightclidriver.py
+++ b/TestON/drivers/common/cli/remotetestbed/floodlightclidriver.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 """
 Created on 12-Feb-2013
-Copyright 2013 Open Networking Foundation ( ONF )
+Copyright 2013 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -28,7 +28,6 @@
 """
 from drivers.common.cli.remotetestbeddriver import RemoteTestBedDriver
 
-
 class FloodLightCliDriver( RemoteTestBedDriver ):
 
     """
diff --git a/TestON/drivers/common/cli/remotetestbed/necswitchdriver.py b/TestON/drivers/common/cli/remotetestbed/necswitchdriver.py
index 9ab454d..b04b95c 100644
--- a/TestON/drivers/common/cli/remotetestbed/necswitchdriver.py
+++ b/TestON/drivers/common/cli/remotetestbed/necswitchdriver.py
@@ -1,5 +1,5 @@
 """
-Copyright 2015 Open Networking Foundation ( ONF )
+Copyright 2015 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -18,6 +18,7 @@
     You should have received a copy of the GNU General Public License
     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
 """
+
 class NEC:
 
     def __init__( self ):
diff --git a/TestON/drivers/common/cli/remotetestbed/remotepoxdriver.py b/TestON/drivers/common/cli/remotetestbed/remotepoxdriver.py
index 8369c24..9a556c9 100644
--- a/TestON/drivers/common/cli/remotetestbed/remotepoxdriver.py
+++ b/TestON/drivers/common/cli/remotetestbed/remotepoxdriver.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 """
 Created on 12-Feb-2013
-Copyright 2013 Open Networking Foundation ( ONF )
+Copyright 2013 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
diff --git a/TestON/drivers/common/cli/remotetestbed/remotevmdriver.py b/TestON/drivers/common/cli/remotetestbed/remotevmdriver.py
index ebb3d75..5875169 100644
--- a/TestON/drivers/common/cli/remotetestbed/remotevmdriver.py
+++ b/TestON/drivers/common/cli/remotetestbed/remotevmdriver.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 """
 Created on 12-Feb-2013
-Copyright 2013 Open Networking Foundation ( ONF )
+Copyright 2013 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
diff --git a/TestON/drivers/common/cli/remotetestbeddriver.py b/TestON/drivers/common/cli/remotetestbeddriver.py
index 8dc73d5..e4bbbad 100644
--- a/TestON/drivers/common/cli/remotetestbeddriver.py
+++ b/TestON/drivers/common/cli/remotetestbeddriver.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 """
 Created on 26-Oct-2012
-Copyright 2012 Open Networking Foundation ( ONF )
+Copyright 2012 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
diff --git a/TestON/drivers/common/cli/tool/dpctlclidriver.py b/TestON/drivers/common/cli/tool/dpctlclidriver.py
index dfff992..0dd15ee 100644
--- a/TestON/drivers/common/cli/tool/dpctlclidriver.py
+++ b/TestON/drivers/common/cli/tool/dpctlclidriver.py
@@ -1,7 +1,7 @@
-# /usr/bin/env python
+#/usr/bin/env python
 """
 Created on 26-Nov-2012
-Copyright 2012 Open Networking Foundation ( ONF )
+Copyright 2012 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -50,9 +50,9 @@
 
         self.handle = super(
                    DpctlCliDriver, self ).connect( user_name=self.user_name,
-                                                   ip_address=self.ip_address,
-                                                   port=None,
-                                                   pwd=self.pwd )
+                   ip_address=self.ip_address,
+                   port=None,
+                   pwd=self.pwd )
         if self.handle:
             main.log.info( "Connected to the host" )
             return main.TRUE
@@ -74,7 +74,7 @@
                                      "INPORT",
                                      "ACTION",
                                      "TIMEOUT" ],
-                                     **flowParameters )
+                             **flowParameters )
 
         cmd = "dpctl add-flow tcp:"
         tcpIP = args[ "TCPIP" ] if args[ "TCPIP" ] is not None else "127.0.0.1"
@@ -258,7 +258,7 @@
                                      "TCPIP",
                                      "TCPPORT",
                                      "STRING" ],
-                                     **flowParameters )
+                            **flowParameters )
 
         tcpIP = args[ "TCPIP" ] if args[ "TCPIP" ] is not None else "127.0.0.1"
         tcpPort = args[ "TCPPORT" ] if args[
@@ -286,7 +286,7 @@
                                      "TCPIP",
                                      "TCPPORT",
                                      "STRING" ],
-                                     **flowParameters )
+                             **flowParameters )
 
         tcpIP = args[ "TCPIP" ] if args[ "TCPIP" ] is not None else "127.0.0.1"
         tcpPort = args[ "TCPPORT" ] if args[
@@ -306,3 +306,4 @@
 if __name__ != "__main__":
     import sys
     sys.modules[ __name__ ] = DpctlCliDriver()
+
diff --git a/TestON/drivers/common/cli/toolsdriver.py b/TestON/drivers/common/cli/toolsdriver.py
index c31dbf7..79a0db2 100644
--- a/TestON/drivers/common/cli/toolsdriver.py
+++ b/TestON/drivers/common/cli/toolsdriver.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 """
 Created on 26-Nov-2012
-Copyright 2012 Open Networking Foundation ( ONF )
+Copyright 2012 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
diff --git a/TestON/drivers/common/clidriver.py b/TestON/drivers/common/clidriver.py
index 90b7e3e..086ae0a 100644
--- a/TestON/drivers/common/clidriver.py
+++ b/TestON/drivers/common/clidriver.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 """
 Created on 24-Oct-2012
-Copyright 2012 Open Networking Foundation ( ONF )
+Copyright 2012 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -34,10 +34,10 @@
     def __init__( self ):
         super( CLI, self ).__init__()
 
-    def checkPrompt( self ):
+    def checkPrompt(self):
         for key in self.options:
-            if key == "prompt" and self.options[ 'prompt' ] is not None:
-                self.prompt = self.options[ 'prompt' ]
+            if key == "prompt" and self.options['prompt'] is not None:
+                self.prompt = self.options['prompt']
                 break
 
     def connect( self, **connectargs ):
@@ -100,7 +100,7 @@
                 else:
                     main.log.info( "Server asked for password, but none was "
                                     "given in the .topo file. Trying "
-                                    "no password." )
+                                    "no password.")
                     self.pwd = ""
                 self.handle.sendline( self.pwd )
                 j = self.handle.expect( [
@@ -181,8 +181,8 @@
         if index == 0:
             self.LASTRSP = self.LASTRSP + \
                 self.handle.before + self.handle.after
-            main.log.info( "Executed :" + str( cmd ) +
-                           " \t\t Expected Prompt '" + str( expectPrompt ) +
+            main.log.info( "Executed :" + str(cmd ) +
+                           " \t\t Expected Prompt '" + str( expectPrompt) +
                            "' Found" )
         elif index == 1:
             self.LASTRSP = self.LASTRSP + self.handle.before
@@ -397,7 +397,7 @@
                 else:
                     main.log.info( "Server asked for password, but none was "
                                     "given in the .topo file. Trying "
-                                    "no password." )
+                                    "no password.")
                     pwd = ""
                 handle.sendline( pwd )
                 j = handle.expect( [ self.prompt,
diff --git a/TestON/drivers/component.py b/TestON/drivers/component.py
index 7951c2e..968306e 100644
--- a/TestON/drivers/component.py
+++ b/TestON/drivers/component.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 """
 Created on 24-Oct-2012
-Copyright 2012 Open Networking Foundation ( ONF )
+Copyright 2012 Open Networking Foundation (ONF)
 
 Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
 the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
@@ -45,9 +45,9 @@
     def __getattr__( self, name ):
         """
          Called when an attribute lookup has not found the attribute
-         in the usual places ( i.e. it is not an instance attribute nor
-         is it found in the class tree for self ). name is the attribute
-         name. This method should return the ( computed ) attribute value
+         in the usual places (i.e. it is not an instance attribute nor
+         is it found in the class tree for self). name is the attribute
+         name. This method should return the (computed) attribute value
          or raise an AttributeError exception.
         """
         try:
@@ -117,3 +117,4 @@
 if __name__ != "__main__":
     import sys
     sys.modules[ __name__ ] = Component()
+
diff --git a/TestON/tests/SCPF/SCPFscaleTopo/SCPFscaleTopo.py b/TestON/tests/SCPF/SCPFscaleTopo/SCPFscaleTopo.py
index 923f056..b0b8880 100644
--- a/TestON/tests/SCPF/SCPFscaleTopo/SCPFscaleTopo.py
+++ b/TestON/tests/SCPF/SCPFscaleTopo/SCPFscaleTopo.py
@@ -217,6 +217,7 @@
         # First capture
 
         main.postResult = True
+        main.step( "Grep information from the ONOS log" )
         for i in range( 3 ):
             # Calculate total time
             main.allinfo[ 0 ][ 'info' + str( i ) ][ 'totalTime' ] = main.scaleTopoFunction.getInfoFromLog( main, main.searchTerm[ 'start' ], 'first', main.searchTerm[ 'end' ], 'last', index=i, funcMode='TD' )
@@ -230,7 +231,10 @@
             main.allinfo[ 0 ][ 'info' + str( i ) ][ 'disconnectRate' ] = main.scaleTopoFunction.getInfoFromLog( main, main.searchTerm[ 'Disconnect' ], 'num', main.searchTerm[ 'start' ], 'num', index=i, funcMode='DR' )
         main.log.debug( "The data is " + str( main.allinfo[ 0 ] ) )
         if -1 in main.allinfo[ 0 ][ 'info0' ].values() or -1 in main.allinfo[ 0 ][ 'info1' ].values() or -1 in main.allinfo[ 0 ][ 'info2' ].values():
-            main.log.warn( "Something happened to ONOS. Skip the rest of the steps" )
+            utilities.assert_equals( expect=main.TRUE,
+                                     actual=main.FALSE,
+                                     onpass="Everything installed properly to the ONOS.",
+                                     onfail="Something happened to ONOS. Skip the rest of the steps." )
             main.postResult = False
         else:
             main.case( "Verifying topology: TORUS %sx%s" % ( main.currScale, main.currScale ) )
@@ -239,87 +243,102 @@
 
             main.log.info( "Gathering topology information" )
             time.sleep( main.MNSleep )
-            stepResult = main.TRUE
-            main.step( "Comparing MN topology to ONOS topology" )
+
             compareRetry = 0
+            main.step( "Checking if ONOS is stable" )
+            main.scaleTopoFunction.checkingONOSStablility( main )
 
-            while compareRetry < 3:
-                currentDevicesResult = main.TRUE
-                currentLinksResult = main.TRUE
-                # While loop for retry
-                devices = main.topoRelated.getAll( "devices" )
-                ports = main.topoRelated.getAll( "ports" )
-                links = main.topoRelated.getAll( "links" )
-                mnSwitches = main.Mininet1.getSwitches( updateTimeout=main.basicMNTime + int( main.currScale ) * main.MNupdateTime )
-                main.log.info( "Comparing switches..." )
-                devicePool = []
-                for controller in range( len( main.Cluster.active() ) ):
-                    t = main.Thread( target=main.topoRelated.compareDevicePort,
-                                     threadID=main.threadID,
-                                     name="Compare-Device-Port",
-                                     args=[ main.Mininet1, controller,
-                                            mnSwitches,
-                                            devices, ports ] )
-                    devicePool.append( t )
-                    t.start()
-                    main.threadID = main.threadID + 1
+            if main.postResult:
+                main.step( "Comparing MN topology to ONOS topology" )
 
-                mnLinks = main.Mininet1.getLinks( timeout=main.basicMNTime + int( main.currScale ) * main.MNLinksTime,
-                                                  updateTimeout=main.basicMNTime + int(main.currScale) * main.MNupdateTime )
-                main.log.info( "Comparing links..." )
-                linkPool = []
-                for controller in range( len( main.Cluster.active() ) ):
-                    t = main.Thread( target=main.topoRelated.compareBase,
-                                     threadID=main.threadID,
-                                     name="Compare-Link-Result",
-                                     args=[ links, controller,
-                                            main.Mininet1.compareLinks,
-                                            [ mnSwitches, mnLinks ] ] )
-                    linkPool.append( t )
-                    t.start()
-                    main.threadID = main.threadID + 1
-
-                for t in devicePool:
-                    t.join()
-                    currentDevicesResult = currentDevicesResult and t.result
-                for t in linkPool:
-                    t.join()
-                    currentLinksResult = currentLinksResult and t.result
-                stepResult = stepResult and currentDevicesResult and currentLinksResult
-                if stepResult:
-                    break
-                compareRetry += 1
-            utilities.assert_equals( expect=main.TRUE,
-                                     actual=stepResult,
-                                     onpass=" Topology match Mininet",
-                                     onfail="ONOS Topology doesn't match Mininet" )
-
-            if stepResult:
-                if main.hostDiscover:
-                    hostList = []
-                    for i in range( 1, int( main.currScale ) + 1 ):
-                        for j in range( 1, int( main.currScale ) + 1 ):
-                            # Generate host list
-                            hoststr = "h" + str( i ) + "x" + str( j )
-                            hostList.append( hoststr )
-                    for i in range( len( hostList ) ):
-                        main.topo.sendArpPackage( main, hostList[ i ] )
-                    time.sleep( 20 )
-                    totalHost = main.topo.getHostNum( main )
-                    if totalHost == int( main.currScale ) * int( main.currScale ):
-                        main.log.info( "Discovered all hosts" )
-                        stepResult = stepResult and main.TRUE
-                    else:
-                        main.log.warn( "Some hosts ware not discovered by ONOS... Topology doesn't match!" )
+                compareRetry = 0
+                while compareRetry < 2:
+                    stepResult = main.TRUE
+                    currentDevicesResult = main.TRUE
+                    currentLinksResult = main.TRUE
+                    # While loop for retry
+                    devices = main.topoRelated.getAll( "devices" )
+                    ports = main.topoRelated.getAll( "ports" )
+                    links = main.topoRelated.getAll( "links" )
+                    if None in devices or None in ports or None in links:
+                        main.log.warn( "Something went wrong. Retrying..." )
+                        time.sleep( 20 )
                         stepResult = main.FALSE
+                        compareRetry += 1
+                        continue
+                    mnSwitches = main.Mininet1.getSwitches( updateTimeout=main.basicMNTime + int( main.currScale ) * main.MNupdateTime )
+                    main.log.info( "Comparing switches..." )
+                    devicePool = []
+                    for controller in range( len( main.Cluster.active() ) ):
+                        t = main.Thread( target=main.topoRelated.compareDevicePort,
+                                         threadID=main.threadID,
+                                         name="Compare-Device-Port",
+                                         args=[ main.Mininet1, controller,
+                                                mnSwitches,
+                                                devices, ports ] )
+                        devicePool.append( t )
+                        t.start()
+                        main.threadID = main.threadID + 1
+
+                    mnLinks = main.Mininet1.getLinks( timeout=main.basicMNTime + int( main.currScale ) * main.MNLinksTime,
+                                                      updateTimeout=main.basicMNTime + int(main.currScale) * main.MNupdateTime )
+                    main.log.info( "Comparing links..." )
+                    linkPool = []
+                    for controller in range( len( main.Cluster.active() ) ):
+                        t = main.Thread( target=main.topoRelated.compareBase,
+                                         threadID=main.threadID,
+                                         name="Compare-Link-Result",
+                                         args=[ links, controller,
+                                                main.Mininet1.compareLinks,
+                                                [ mnSwitches, mnLinks ] ] )
+                        linkPool.append( t )
+                        t.start()
+                        main.threadID = main.threadID + 1
+
+                    for t in devicePool:
+                        t.join()
+                        currentDevicesResult = currentDevicesResult and t.result
+                    for t in linkPool:
+                        t.join()
+                        currentLinksResult = currentLinksResult and t.result
+                    stepResult = stepResult and currentDevicesResult and currentLinksResult
+                    if stepResult:
+                        break
+                    compareRetry += 1
+                utilities.assert_equals( expect=main.TRUE,
+                                         actual=stepResult,
+                                         onpass=" Topology match Mininet",
+                                         onfail="ONOS Topology doesn't match Mininet" )
+                main.scaleTopoFunction.checkingONOSStablility( main )
+                if stepResult and main.postResult:
+                    if main.hostDiscover:
+                        hostList = []
+                        for i in range( 1, int( main.currScale ) + 1 ):
+                            for j in range( 1, int( main.currScale ) + 1 ):
+                                # Generate host list
+                                hoststr = "h" + str( i ) + "x" + str( j )
+                                hostList.append( hoststr )
+                        for i in range( len( hostList ) ):
+                            main.topo.sendArpPackage( main, hostList[ i ] )
+                        time.sleep( 20 )
+                        totalHost = main.topo.getHostNum( main )
+                        if totalHost == int( main.currScale ) * int( main.currScale ):
+                            main.log.info( "Discovered all hosts" )
+                            stepResult = stepResult and main.TRUE
+                        else:
+                            main.log.warn( "Some hosts ware not discovered by ONOS... Topology doesn't match!" )
+                            stepResult = main.FALSE
+                        utilities.assert_equals( expect=main.TRUE,
+                                                 actual=stepResult,
+                                                 onpass=" Topology match Mininet",
+                                                 onfail="ONOS Topology doesn't match Mininet" )
+                    main.log.info( "Finished this iteration, continue to scale next topology." )
+                else:
                     utilities.assert_equals( expect=main.TRUE,
-                                             actual=stepResult,
-                                             onpass=" Topology match Mininet",
-                                             onfail="ONOS Topology doesn't match Mininet" )
-                main.log.info( "Finished this iteration, continue to scale next topology." )
-            else:
-                main.log.info( "Clean up and exit TestON. Finished this test." )
-                main.cleanAndExit()
+                                             actual=main.FALSE,
+                                             onpass="ONOS is stable.",
+                                             onfail="Something happened to ONOS. Skip the rest of the steps." )
+                    main.postResult = False
 
     def CASE100( self, main ):
         """
diff --git a/TestON/tests/SCPF/SCPFscaleTopo/dependencies/scaleTopoFunction.py b/TestON/tests/SCPF/SCPFscaleTopo/dependencies/scaleTopoFunction.py
index 964f0c2..a48ddf7 100644
--- a/TestON/tests/SCPF/SCPFscaleTopo/dependencies/scaleTopoFunction.py
+++ b/TestON/tests/SCPF/SCPFscaleTopo/dependencies/scaleTopoFunction.py
@@ -440,3 +440,15 @@
         restartResult = main.FALSE
 
     return restartResult
+
+def checkingONOSStablility( main ):
+    compareRetry = 0
+    while compareRetry < 3 and main.postResult:
+        for controller in main.Cluster.active():
+            if controller.CLI.summary() is None:
+                main.info.error( "Something happened to ONOS. Skip the rest of the steps" )
+                main.postResult = False
+                break
+            time.sleep( 5 )
+        compareRetry += 1
+        time.sleep( 10 )