Merge "Temporarily adding 0's to the extra column for scaling max intents test."
diff --git a/TestON/JenkinsFile/FUNCJenkinsFile b/TestON/JenkinsFile/FUNCJenkinsFile
index 3555299..857ff54 100644
--- a/TestON/JenkinsFile/FUNCJenkinsFile
+++ b/TestON/JenkinsFile/FUNCJenkinsFile
@@ -15,7 +15,8 @@
 "FUNCnetconf" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCnetconf", wiki_file:"FUNCnetconfWiki.txt"],
 "FUNCgroup" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCgroup", wiki_file:"FUNCgroupWiki.txt"],
 "FUNCintent" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCintent", wiki_file:"FUNCintentWiki.txt"],
-"FUNCintentRest" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCintentRest", wiki_file:"FUNCintentRestWiki.txt"]
+"FUNCintentRest" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCintentRest", wiki_file:"FUNCintentRestWiki.txt"],
+"FUNCformCluster" : [wiki_link:prop["WikiPrefix"]+"-"+"FUNCformCluster", wiki_file:"FUNCformClusterWiki.txt"]
 ]
 table_name = "executed_test_tests"
 result_name = "executed_test_results"
@@ -118,7 +119,7 @@
                             done
                             ls -al
                             cd '''
-                            if( prop["manualRun"] == "false" ){
+                            if( prop["manualRun"] == "false" || prop["postResult"] == "true" ){
                                 // Post Results
                                 withCredentials([
                                     string(credentialsId: 'db_pass', variable: 'pass'),
@@ -163,7 +164,7 @@
                         }
                     }
 
-                    if( prop["manualRun"] == "false" ){
+                    if( prop["manualRun"] == "false" || prop["postResult"] == "true" ){
                         def post = build job: "Pipeline_postjob_VM", propagate: false,
                             parameters: [
                                 string(name: 'Wiki_Contents', value: fileContents),
diff --git a/TestON/JenkinsFile/JenkinsfileTrigger b/TestON/JenkinsFile/JenkinsfileTrigger
index d0ce382..6b1da7b 100644
--- a/TestON/JenkinsFile/JenkinsfileTrigger
+++ b/TestON/JenkinsFile/JenkinsfileTrigger
@@ -6,15 +6,16 @@
 AllTheTests=
 [
     "FUNC":[
-            "FUNCipv6Intent" : ["basic":true, "extra_A":false, "extra_B":false, "day":""],
-            "FUNCoptical" :    ["basic":true, "extra_A":false, "extra_B":false, "day":""],
-            "FUNCflow" :       ["basic":true, "extra_A":false, "extra_B":false, "day":""],
-            "FUNCnetCfg":      ["basic":true, "extra_A":false, "extra_B":false, "day":""],
-            "FUNCovsdbtest" :  ["basic":true, "extra_A":false, "extra_B":false, "day":""],
-            "FUNCnetconf" :    ["basic":true, "extra_A":false, "extra_B":false, "day":""],
-            "FUNCgroup" :      ["basic":true, "extra_A":false, "extra_B":false, "day":""],
-            "FUNCintent" :     ["basic":false, "extra_A":true, "extra_B":false, "day":""],
-            "FUNCintentRest" : ["basic":false, "extra_A":false, "extra_B":true, "day":""]
+            "FUNCipv6Intent" : ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
+            "FUNCoptical" :    ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
+            "FUNCflow" :       ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
+            "FUNCnetCfg":      ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
+            "FUNCovsdbtest" :  ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
+            "FUNCnetconf" :    ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
+            "FUNCgroup" :      ["basic":true, "extra_A":false, "extra_B":false, "new_Test":false, "day":""],
+            "FUNCformCluster" :["basic":false, "extra_A":false, "extra_B":false, "new_Test":true, "day":""],
+            "FUNCintent" :     ["basic":false, "extra_A":true, "extra_B":false, "new_Test":false, "day":""],
+            "FUNCintentRest" : ["basic":false, "extra_A":false, "extra_B":true, "new_Test":false, "day":""],
     ],
     "HA":[
             "HAsanity" :                ["basic":true, "extra_A":false, "extra_B":false, "day":""],
@@ -197,6 +198,7 @@
 }
 def monday( getResult ){
     FUNC_choices += adder( "FUNC", "basic", true, "M", getResult )
+    FUNC_choices += adder( "FUNC", "new_Test", true, "M", getResult )
     FUNC_choices += adder( "FUNC", "extra_A", true, "M", getResult )
     HA_choices += adder( "HA", "basic", true, "M", getResult )
     HA_choices += adder( "HA", "extra_A", true, "M", getResult )
@@ -205,6 +207,7 @@
 }
 def tuesday( getDay, getResult ){
     FUNC_choices += adder( "FUNC", "basic", getDay, "T", getResult )
+    FUNC_choices += adder( "FUNC", "new_Test", getDay, "T", getResult )
     FUNC_choices += adder( "FUNC", "extra_B", getDay, "T", getResult )
     HA_choices += adder( "HA", "basic", getDay, "T", getResult )
     HA_choices += adder( "HA", "extra_B", getDay, "T", getResult )
@@ -216,6 +219,7 @@
 }
 def wednesday( getDay, getResult ){
     FUNC_choices += adder( "FUNC", "basic", getDay, "W", getResult )
+    FUNC_choices += adder( "FUNC", "new_Test", getDay, "W", getResult )
     FUNC_choices += adder( "FUNC", "extra_A", getDay, "W", getResult )
     HA_choices += adder( "HA", "basic", getDay, "W", getResult )
     HA_choices += adder( "HA", "extra_A", getDay, "W", getResult )
@@ -225,6 +229,7 @@
 }
 def thursday( getDay, getResult ){
     FUNC_choices += adder( "FUNC", "basic", getDay, "Th", getResult )
+    FUNC_choices += adder( "FUNC", "new_Test", getDay, "Th", getResult )
     FUNC_choices += adder( "FUNC", "extra_B", getDay, "Th", getResult )
     HA_choices += adder( "HA", "basic", getDay, "Th", getResult )
     HA_choices += adder( "HA", "extra_B", getDay, "Th", getResult )
@@ -233,6 +238,7 @@
 }
 def friday( getDay, getResult ){
     FUNC_choices += adder( "FUNC", "basic", getDay, "F", getResult )
+    FUNC_choices += adder( "FUNC", "new_Test", getDay, "F", getResult )
     FUNC_choices += adder( "FUNC", "extra_A", getDay, "F", getResult )
     HA_choices += adder( "HA", "basic", getDay, "F", getResult )
     HA_choices += adder( "HA", "extra_A", getDay, "F", getResult )
@@ -422,7 +428,7 @@
 }
 def oldFlowCheck( jobOn, onos_branch ){
     result = ""
-    if( isOldFlow && jobOn == "SCPF" && onos_branch="master" )
+    if( isOldFlow && jobOn == "SCPF" && onos_branch== "master" )
         result = '''sed -i -e 's/@Component(immediate = true)/@Component(enabled = false)/g' ~/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/DistributedFlowRuleStore.java
         sed -i -e 's/@Component(enabled = false)/@Component(immediate = true)/g' ~/onos/core/store/dist/src/main/java/org/onosproject/store/flow/impl/ECFlowRuleStore.java'''
     return result
diff --git a/TestON/JenkinsFile/scripts/SCPFIntentInstallWithdrawRerouteLat.R b/TestON/JenkinsFile/scripts/SCPFIntentInstallWithdrawRerouteLat.R
index 6c1a220..037b6d4 100644
--- a/TestON/JenkinsFile/scripts/SCPFIntentInstallWithdrawRerouteLat.R
+++ b/TestON/JenkinsFile/scripts/SCPFIntentInstallWithdrawRerouteLat.R
@@ -70,7 +70,7 @@
                                   "<using-old-flow>",
                                   "<directory-to-save-graphs>",
                                   sep=" " ) )
-    q()  # basically exit(), but in R
+    quit( status = 1 )  # basically exit(), but in R
 }
 
 # -----------------------------------
@@ -187,8 +187,22 @@
 print( "Combining Install, Withdraw, and Reroute Latencies Data" )
 
 if ( ncol( rerouteData ) == 0 ){  # Checks if rerouteData exists, so we can exclude it if necessary
-    avgs <- c( installWithdrawData[ 'install_avg' ],
-               installWithdrawData[ 'withdraw_avg' ] )
+
+    requiredColumns <- c( "install_avg",
+                          "withdraw_avg"  )
+
+    tryCatch( avgs <- c( installWithdrawData[ requiredColumns] ),
+              error = function( e ) {
+                  print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+                  print( "Required columns: " )
+                  print( requiredColumns )
+                  print( "Actual columns: " )
+                  print( names( fileData ) )
+                  print( "Error dump:" )
+                  print( e )
+                  quit( status = 1 )
+              }
+             )
 } else{
     colnames( rerouteData ) <- c( "date",
                                   "name",
@@ -201,9 +215,21 @@
                                   "reroute_avg",
                                   "reroute_std" )
 
-    avgs <- c( installWithdrawData[ 'install_avg' ],
-               installWithdrawData[ 'withdraw_avg' ],
-               rerouteData[ 'reroute_avg' ] )
+    tryCatch( avgs <- c( installWithdrawData[ 'install_avg' ],
+                         installWithdrawData[ 'withdraw_avg' ],
+                         rerouteData[ 'reroute_avg' ] ),
+              error = function( e ) {
+                  print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+                  print( "Required columns: " )
+                  print( requiredColumns )
+                  print( "Actual columns: " )
+                  print( names( fileData ) )
+                  print( "Error dump:" )
+                  print( e )
+                  quit( status = 1 )
+              }
+             )
+
 }
 
 # Combine lists into data frames.
@@ -341,9 +367,16 @@
 
 print( paste( "Saving bar chart with error bars to", errBarOutputFile ) )
 
-ggsave( errBarOutputFile,
-        width = imageWidth,
-        height = imageHeight,
-        dpi = imageDPI )
+tryCatch( ggsave( errBarOutputFile,
+                  width = imageWidth,
+                  height = imageHeight,
+                  dpi = imageDPI ),
+          error = function( e ){
+              print( "[ERROR] There was a problem saving the graph due to a graph formatting exception.  Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+        )
 
 print( paste( "[SUCCESS] Successfully wrote bar chart with error bars out to", errBarOutputFile ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/SCPFLineGraph.R b/TestON/JenkinsFile/scripts/SCPFLineGraph.R
index 3f78953..d063d0a 100644
--- a/TestON/JenkinsFile/scripts/SCPFLineGraph.R
+++ b/TestON/JenkinsFile/scripts/SCPFLineGraph.R
@@ -78,7 +78,7 @@
                                     "<using-old-flow>",
                                     "<directory-to-save-graph>",
                   sep = " " ) )
-    q()  # basically exit(), but in R
+    quit( status = 1 )  # basically exit(), but in R
 }
 
 # -------------------------------
@@ -121,8 +121,15 @@
 
 print( "Sending SQL command:" )
 print( args[ sql_commands ] )
+
 fileData <- dbGetQuery( con, args[ sql_commands ] )
 
+# Check if data has been received
+if ( nrow( fileData ) == 0 ){
+    print( "[ERROR]: No data received from the databases. Please double check this by manually running the SQL command." )
+    quit( status = 1 )
+}
+
 # **********************************************************
 # STEP 2: Organize data.
 # **********************************************************
@@ -140,6 +147,7 @@
     }
 }
 
+
 # --------------------
 # Construct Data Frame
 # --------------------
@@ -266,9 +274,16 @@
 
 print( paste( "Saving result graph to", outputFile ) )
 
-ggsave( outputFile,
-        width = imageWidth,
-        height = imageHeight,
-        dpi = imageDPI )
+tryCatch( ggsave( outputFile,
+                  width = imageWidth,
+                  height = imageHeight,
+                  dpi = imageDPI ),
+          error = function( e ){
+              print( "[ERROR] There was a problem saving the graph due to a graph formatting exception.  Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+        )
 
 print( paste( "[SUCCESS] Successfully wrote result graph out to", outputFile ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/SCPFbatchFlowResp.R b/TestON/JenkinsFile/scripts/SCPFbatchFlowResp.R
index a2166c1..d90c53e 100644
--- a/TestON/JenkinsFile/scripts/SCPFbatchFlowResp.R
+++ b/TestON/JenkinsFile/scripts/SCPFbatchFlowResp.R
@@ -57,7 +57,7 @@
 
 if ( is.na( args[ save_directory ] ) ){
 
-    print( paste( "Usage: Rscript SCPFbatchFlowResp",
+    print( paste( "Usage: Rscript SCPFbatchFlowResp.R",
                                   "<database-host>",
                                   "<database-port>",
                                   "<database-user-id>",
@@ -68,7 +68,7 @@
                                   "<directory-to-save-graphs>",
                                   sep=" " ) )
 
-    q()  # basically exit(), but in R
+    quit( status = 1 )  # basically exit(), but in R
 }
 
 # -----------------
@@ -149,8 +149,20 @@
 
 print( "Sorting data for Post." )
 
-postAvgs <- c( fileData[ 'posttoconfrm' ],
-               fileData[ 'elapsepost' ] )
+requiredColumns <- c( "posttoconfrm", "elapsepost" )
+
+tryCatch( postAvgs <- c( fileData[ requiredColumns] ),
+          error = function( e ) {
+              print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+              print( "Required columns: " )
+              print( requiredColumns )
+              print( "Actual columns: " )
+              print( names( fileData ) )
+              print( "Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+         )
 
 # -------------------------
 # Post Construct Data Frame
@@ -181,15 +193,27 @@
 # Del Data Sorting
 # ----------------
 
-print( "Sorting data for Del." )
-avgs <- c( fileData[ 'deltoconfrm' ],
-           fileData[ 'elapsedel' ] )
+requiredColumns <- c( "deltoconfrm", "elapsedel" )
+
+tryCatch( delAvgs <- c( fileData[ requiredColumns] ),
+          error = function( e ) {
+              print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+              print( "Required columns: " )
+              print( requiredColumns )
+              print( "Actual columns: " )
+              print( names( fileData ) )
+              print( "Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+         )
+
 
 # ------------------------
 # Del Construct Data Frame
 # ------------------------
 
-delDataFrame <- melt( avgs )
+delDataFrame <- melt( delAvgs )
 delDataFrame$scale <- fileData$scale
 delDataFrame$date <- fileData$date
 delDataFrame$iterative <- rev( seq( 1, nrow( fileData ), by = 1 ) )
@@ -303,10 +327,16 @@
 
 print( paste( "Saving Post bar chart to", postOutputFile ) )
 
-ggsave( postOutputFile,
-        width = imageWidth,
-        height = imageHeight,
-        dpi = imageDPI )
+tryCatch( ggsave( postOutputFile,
+                  width = imageWidth,
+                  height = imageHeight,
+                  dpi = imageDPI ),
+          error = function( e ){
+              print( "[ERROR] There was a problem saving the graph due to a graph formatting exception.  Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+        )
 
 print( paste( "[SUCCESS] Successfully wrote stacked bar chart out to", postOutputFile ) )
 
@@ -369,9 +399,16 @@
 
 print( paste( "Saving Del bar chart to", delOutputFile ) )
 
-ggsave( delOutputFile,
-        width = imageWidth,
-        height = imageHeight,
-        dpi = imageDPI )
+tryCatch( ggsave( delOutputFile,
+                  width = imageWidth,
+                  height = imageHeight,
+                  dpi = imageDPI ),
+          error = function( e ){
+              print( "[ERROR] There was a problem saving the graph due to a graph formatting exception.  Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+        )
 
 print( paste( "[SUCCESS] Successfully wrote stacked bar chart out to", delOutputFile ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/SCPFcbench.R b/TestON/JenkinsFile/scripts/SCPFcbench.R
index b62fa0f..0a28024 100644
--- a/TestON/JenkinsFile/scripts/SCPFcbench.R
+++ b/TestON/JenkinsFile/scripts/SCPFcbench.R
@@ -65,7 +65,7 @@
                                   "<directory-to-save-graphs>",
                                   sep=" " ) )
 
-    q()  # basically exit(), but in R
+    quit( status = 1 )  # basically exit(), but in R
 }
 
 # -----------------
@@ -126,7 +126,21 @@
 
 print( "Sorting data." )
 
-avgs <- c( fileData[ 'avg' ] )
+requiredColumns <- c( "avg" )
+
+tryCatch( avgs <- c( fileData[ requiredColumns] ),
+          error = function( e ) {
+              print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+              print( "Required columns: " )
+              print( requiredColumns )
+              print( "Actual columns: " )
+              print( names( fileData ) )
+              print( "Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+         )
+
 
 # --------------------
 # Construct Data Frame
@@ -236,9 +250,15 @@
 
 print( paste( "Saving bar chart with error bars to", errBarOutputFile ) )
 
-ggsave( errBarOutputFile,
-        width = imageWidth,
-        height = imageHeight,
-        dpi = imageDPI )
+tryCatch( ggsave( errBarOutputFile,
+                  width = imageWidth,
+                  height = imageHeight,
+                  dpi = imageDPI ),
+          error = function( e ){
+              print( "[ERROR] There was a problem saving the graph due to a graph formatting exception.  Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+        )
 
 print( paste( "[SUCCESS] Successfully wrote bar chart with error bars out to", errBarOutputFile ) )
diff --git a/TestON/JenkinsFile/scripts/SCPFflowTp1g.R b/TestON/JenkinsFile/scripts/SCPFflowTp1g.R
index 9123085..3d3a95e 100644
--- a/TestON/JenkinsFile/scripts/SCPFflowTp1g.R
+++ b/TestON/JenkinsFile/scripts/SCPFflowTp1g.R
@@ -72,7 +72,7 @@
                                   "<directory-to-save-graphs>",
                                   sep=" " ) )
 
-    q()  # basically exit(), but in R
+    quit( status = 1 )  # basically exit(), but in R
 }
 
 # -----------------
@@ -175,7 +175,20 @@
                            "avg",
                            "std" )
 
-avgs <- c( fileData[ 'avg' ] )
+requiredColumns <- c( "avg" )
+
+tryCatch( avgs <- c( fileData[ requiredColumns] ),
+          error = function( e ) {
+              print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+              print( "Required columns: " )
+              print( requiredColumns )
+              print( "Actual columns: " )
+              print( names( fileData ) )
+              print( "Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+         )
 
 
 # ----------------------------
@@ -295,9 +308,16 @@
 
 print( paste( "Saving bar chart with error bars to", errBarOutputFile ) )
 
-ggsave( errBarOutputFile,
-        width = imageWidth,
-        height = imageHeight,
-        dpi = imageDPI )
+tryCatch( ggsave( errBarOutputFile,
+                  width = imageWidth,
+                  height = imageHeight,
+                  dpi = imageDPI ),
+          error = function( e ){
+              print( "[ERROR] There was a problem saving the graph due to a graph formatting exception.  Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+        )
 
 print( paste( "[SUCCESS] Successfully wrote bar chart with error bars out to", errBarOutputFile ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/SCPFhostLat.R b/TestON/JenkinsFile/scripts/SCPFhostLat.R
index 56d0f11..90781a3 100644
--- a/TestON/JenkinsFile/scripts/SCPFhostLat.R
+++ b/TestON/JenkinsFile/scripts/SCPFhostLat.R
@@ -65,7 +65,7 @@
                                   "<directory-to-save-graphs>",
                                   sep=" " ) )
 
-    q()  # basically exit(), but in R
+    quit( status = 1 )  # basically exit(), but in R
 }
 
 # -----------------
@@ -127,7 +127,21 @@
 # ------------
 
 print( "Sorting data." )
-avgs <- c( fileData[ 'avg' ] )
+
+requiredColumns <- c( "avg" )
+
+tryCatch( avgs <- c( fileData[ requiredColumns] ),
+          error = function( e ) {
+              print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+              print( "Required columns: " )
+              print( requiredColumns )
+              print( "Actual columns: " )
+              print( names( fileData ) )
+              print( "Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+         )
 
 # --------------------
 # Construct Data Frame
@@ -230,9 +244,16 @@
 
 print( paste( "Saving bar chart with error bars to", errBarOutputFile ) )
 
-ggsave( errBarOutputFile,
-        width = imageWidth,
-        height = imageHeight,
-        dpi = imageDPI )
+tryCatch( ggsave( errBarOutputFile,
+                  width = imageWidth,
+                  height = imageHeight,
+                  dpi = imageDPI ),
+          error = function( e ){
+              print( "[ERROR] There was a problem saving the graph due to a graph formatting exception.  Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+        )
 
 print( paste( "[SUCCESS] Successfully wrote bar chart out to", errBarOutputFile ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/SCPFintentEventTp.R b/TestON/JenkinsFile/scripts/SCPFintentEventTp.R
index e9540f2..0b168ba 100644
--- a/TestON/JenkinsFile/scripts/SCPFintentEventTp.R
+++ b/TestON/JenkinsFile/scripts/SCPFintentEventTp.R
@@ -72,7 +72,7 @@
                                   "<directory-to-save-graphs>",
                                   sep=" " ) )
 
-    q()  # basically exit(), but in R
+    quit( status = 1 )  # basically exit(), but in R
 }
 
 # -----------------
@@ -173,7 +173,21 @@
 # ------------
 
 print( "Sorting data." )
-avgs <- c( fileData[ 'avg' ] )
+
+requiredColumns <- c( "avg" )
+
+tryCatch( avgs <- c( fileData[ requiredColumns] ),
+          error = function( e ) {
+              print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+              print( "Required columns: " )
+              print( requiredColumns )
+              print( "Actual columns: " )
+              print( names( fileData ) )
+              print( "Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+         )
 
 # --------------------
 # Construct Data Frame
@@ -278,9 +292,16 @@
 
 print( paste( "Saving bar chart to", errBarOutputFile ) )
 
-ggsave( errBarOutputFile,
-        width = imageWidth,
-        height = imageHeight,
-        dpi = imageDPI )
+tryCatch( ggsave( errBarOutputFile,
+                  width = imageWidth,
+                  height = imageHeight,
+                  dpi = imageDPI ),
+          error = function( e ){
+              print( "[ERROR] There was a problem saving the graph due to a graph formatting exception.  Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+        )
 
 print( paste( "[SUCCESS] Successfully wrote bar chart out to", errBarOutputFile ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/SCPFmastershipFailoverLat.R b/TestON/JenkinsFile/scripts/SCPFmastershipFailoverLat.R
index 8681f29..30f7bca 100644
--- a/TestON/JenkinsFile/scripts/SCPFmastershipFailoverLat.R
+++ b/TestON/JenkinsFile/scripts/SCPFmastershipFailoverLat.R
@@ -64,7 +64,7 @@
                                   "<directory-to-save-graphs>",
                                   sep=" " ) )
 
-        q()  # basically exit(), but in R
+        quit( status = 1 )  # basically exit(), but in R
 }
 
 # -----------------
@@ -134,8 +134,20 @@
 
 print( "Combining averages into a list." )
 
-avgs <- c( fileData[ 'kill_deact_avg' ],
-           fileData[ 'deact_role_avg' ] )
+requiredColumns <- c( "kill_deact_avg", "deact_role_avg" )
+
+tryCatch( avgs <- c( fileData[ requiredColumns] ),
+          error = function( e ) {
+              print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+              print( "Required columns: " )
+              print( requiredColumns )
+              print( "Actual columns: " )
+              print( names( fileData ) )
+              print( "Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+         )
 
 # --------------------
 # Construct Data Frame
@@ -193,7 +205,7 @@
                 legend.key.size = unit( 1.5, 'lines' ) )
 
 barColors <- scale_fill_manual( values=c( "#F77670",
-                                       "#619DFA" ) )
+                                          "#619DFA" ) )
 
 wrapLegend <- guides( fill=guide_legend( nrow=1, byrow=TRUE ) )
 
@@ -264,10 +276,16 @@
 
 print( paste( "Saving bar chart with error bars to", errBarOutputFile ) )
 
-ggsave( errBarOutputFile,
-        width = imageWidth,
-        height = imageHeight,
-        dpi = imageDPI )
+tryCatch( ggsave( errBarOutputFile,
+                  width = imageWidth,
+                  height = imageHeight,
+                  dpi = imageDPI ),
+          error = function( e ){
+              print( "[ERROR] There was a problem saving the graph due to a graph formatting exception.  Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+        )
 
 print( paste( "[SUCCESS] Successfully wrote bar chart with error bars out to", errBarOutputFile ) )
 
@@ -317,9 +335,16 @@
 
 print( paste( "Saving stacked bar chart to", stackedBarOutputFile ) )
 
-ggsave( stackedBarOutputFile,
-        width = imageWidth,
-        height = imageHeight,
-        dpi = imageDPI )
+tryCatch( ggsave( stackedBarOutputFile,
+                  width = imageWidth,
+                  height = imageHeight,
+                  dpi = imageDPI ),
+          error = function( e ){
+              print( "[ERROR] There was a problem saving the graph due to a graph formatting exception.  Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+        )
 
 print( paste( "[SUCCESS] Successfully wrote stacked bar chart out to", stackedBarOutputFile ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/SCPFportLat.R b/TestON/JenkinsFile/scripts/SCPFportLat.R
index 254b718..4637072 100644
--- a/TestON/JenkinsFile/scripts/SCPFportLat.R
+++ b/TestON/JenkinsFile/scripts/SCPFportLat.R
@@ -65,7 +65,7 @@
                                   "<directory-to-save-graphs>",
                                   sep=" " ) )
 
-    q()  # basically exit(), but in R
+    quit( status = 1 )  # basically exit(), but in R
 }
 
 # -----------------
@@ -129,9 +129,21 @@
 
 print( "Sorting data for Port Up Averages." )
 
-upAvgs <- c( fileData[ 'up_ofp_to_dev_avg' ],
-             fileData[ 'up_dev_to_link_avg' ],
-             fileData[ 'up_link_to_graph_avg' ] )
+requiredColumns <- c( "up_ofp_to_dev_avg", "up_dev_to_link_avg", "up_link_to_graph_avg" )
+
+tryCatch( upAvgs <- c( fileData[ requiredColumns] ),
+          error = function( e ) {
+              print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+              print( "Required columns: " )
+              print( requiredColumns )
+              print( "Actual columns: " )
+              print( names( fileData ) )
+              print( "Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+         )
+
 
 # ----------------------------
 # Port Up Construct Data Frame
@@ -166,9 +178,20 @@
 
 print( "Sorting data for Port Down Averages." )
 
-downAvgs <- c( fileData[ 'down_ofp_to_dev_avg' ],
-               fileData[ 'down_dev_to_link_avg' ],
-               fileData[ 'down_link_to_graph_avg' ] )
+requiredColumns <- c( "down_ofp_to_dev_avg", "down_dev_to_link_avg", "down_link_to_graph_avg" )
+
+tryCatch( downAvgs <- c( fileData[ requiredColumns] ),
+          error = function( e ) {
+              print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+              print( "Required columns: " )
+              print( requiredColumns )
+              print( "Actual columns: " )
+              print( names( fileData ) )
+              print( "Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+         )
 
 # ------------------------------
 # Port Down Construct Data Frame
@@ -294,10 +317,16 @@
 
 print( paste( "Saving bar chart with error bars (Port Up Latency) to", errBarOutputFileUp ) )
 
-ggsave( errBarOutputFileUp,
-        width = imageWidth,
-        height = imageHeight,
-        dpi = imageDPI )
+tryCatch( ggsave( errBarOutputFileUp,
+                  width = imageWidth,
+                  height = imageHeight,
+                  dpi = imageDPI ),
+          error = function( e ){
+              print( "[ERROR] There was a problem saving the graph due to a graph formatting exception.  Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+        )
 
 print( paste( "[SUCCESS] Successfully wrote bar chart with error bars (Port Up Latency) out to", errBarOutputFileUp ) )
 
@@ -362,9 +391,16 @@
 
 print( paste( "Saving bar chart with error bars (Port Down Latency) to", errBarOutputFileDown ) )
 
-ggsave( errBarOutputFileDown,
-        width = imageWidth,
-        height = imageHeight,
-        dpi = imageDPI )
+tryCatch( ggsave( errBarOutputFileDown,
+                  width = imageWidth,
+                  height = imageHeight,
+                  dpi = imageDPI ),
+          error = function( e ){
+              print( "[ERROR] There was a problem saving the graph due to a graph formatting exception.  Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+        )
 
 print( paste( "[SUCCESS] Successfully wrote bar chart with error bars (Port Down Latency) out to", errBarOutputFileDown ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/SCPFscaleTopo.R b/TestON/JenkinsFile/scripts/SCPFscaleTopo.R
index fdb39e4..e69a383 100644
--- a/TestON/JenkinsFile/scripts/SCPFscaleTopo.R
+++ b/TestON/JenkinsFile/scripts/SCPFscaleTopo.R
@@ -65,7 +65,7 @@
                                   "<directory-to-save-graphs>",
                                   sep=" ") )
 
-    q()  # basically exit(), but in R
+    quit( status = 1 )  # basically exit(), but in R
 }
 
 # -----------------
@@ -126,9 +126,21 @@
 # ------------
 
 print( "Sorting data." )
-avgs <- c( fileData[ 'last_role_request_to_last_topology' ],
-           fileData[ 'last_connection_to_last_role_request' ],
-           fileData[ 'first_connection_to_last_connection' ] )
+
+requiredColumns <- c( "last_role_request_to_last_topology", "last_connection_to_last_role_request", "first_connection_to_last_connection" )
+
+tryCatch( avgs <- c( fileData[ requiredColumns] ),
+          error = function( e ) {
+              print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+              print( "Required columns: " )
+              print( requiredColumns )
+              print( "Actual columns: " )
+              print( names( fileData ) )
+              print( "Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+         )
 
 # --------------------
 # Construct Data Frame
@@ -238,9 +250,16 @@
 
 print( paste( "Saving bar chart to", outputFile ) )
 
-ggsave( outputFile,
-        width = imageWidth,
-        height = imageHeight,
-        dpi = imageDPI )
+tryCatch( ggsave( outputFile,
+                  width = imageWidth,
+                  height = imageHeight,
+                  dpi = imageDPI ),
+          error = function( e ){
+              print( "[ERROR] There was a problem saving the graph due to a graph formatting exception.  Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+        )
 
 print( paste( "[SUCCESS] Successfully wrote bar chart out to", outputFile ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/SCPFscalingMaxIntents.R b/TestON/JenkinsFile/scripts/SCPFscalingMaxIntents.R
index aed27e5..21dd70f 100644
--- a/TestON/JenkinsFile/scripts/SCPFscalingMaxIntents.R
+++ b/TestON/JenkinsFile/scripts/SCPFscalingMaxIntents.R
@@ -68,7 +68,7 @@
                                   "<directory-to-save-graphs>",
                                   sep=" " ) )
 
-    q()  # basically exit(), but in R
+    quit( status = 1 )  # basically exit(), but in R
 }
 
 # -----------------
@@ -152,8 +152,20 @@
 
 print( "Sorting data." )
 
-avgs <- c( fileData[ 'max_intents_ovs' ],
-           fileData[ 'max_flows_ovs' ] )
+requiredColumns <- c( "max_intents_ovs", "max_flows_ovs" )
+
+tryCatch( avgs <- c( fileData[ requiredColumns] ),
+          error = function( e ) {
+              print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+              print( "Required columns: " )
+              print( requiredColumns )
+              print( "Actual columns: " )
+              print( names( fileData ) )
+              print( "Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+         )
 
 # --------------------
 # Construct Data Frame
@@ -259,9 +271,16 @@
 
 print( paste( "Saving bar chart to", outputFile ) )
 
-ggsave( outputFile,
-        width = imageWidth,
-        height = imageHeight,
-        dpi = imageDPI )
+tryCatch( ggsave( outputFile,
+                  width = imageWidth,
+                  height = imageHeight,
+                  dpi = imageDPI ),
+          error = function( e ){
+              print( "[ERROR] There was a problem saving the graph due to a graph formatting exception.  Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+        )
 
 print( paste( "[SUCCESS] Successfully wrote bar chart out to", outputFile ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/SCPFswitchLat.R b/TestON/JenkinsFile/scripts/SCPFswitchLat.R
index 8a68e08..de506a3 100644
--- a/TestON/JenkinsFile/scripts/SCPFswitchLat.R
+++ b/TestON/JenkinsFile/scripts/SCPFswitchLat.R
@@ -66,7 +66,7 @@
                             "<directory-to-save-graphs>",
                             sep=" ") )
 
-    q()  # basically exit(), but in R
+    quit( status = 1 )  # basically exit(), but in R
 }
 
 # -----------------
@@ -131,11 +131,24 @@
 
 print( "Sorting data for Switch Up Averages." )
 
-upAvgs <- c( fileData[ 'up_device_to_graph_avg' ],
-             fileData[ 'role_reply_to_device_avg' ],
-             fileData[ 'role_request_to_role_reply_avg' ],
-             fileData[ 'feature_reply_to_role_request_avg' ],
-             fileData[ 'tcp_to_feature_reply_avg' ] )
+requiredColumns <- c( "up_device_to_graph_avg",
+                      "role_reply_to_device_avg",
+                      "role_request_to_role_reply_avg",
+                      "feature_reply_to_role_request_avg",
+                      "tcp_to_feature_reply_avg" )
+
+tryCatch( upAvgs <- c( fileData[ requiredColumns] ),
+          error = function( e ) {
+              print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+              print( "Required columns: " )
+              print( requiredColumns )
+              print( "Actual columns: " )
+              print( names( fileData ) )
+              print( "Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+         )
 
 # ------------------------------
 # Switch Up Construct Data Frame
@@ -171,9 +184,22 @@
 
 print( "Sorting data for Switch Down Averages." )
 
-downAvgs <- c( fileData[ 'down_device_to_graph_avg' ],
-               fileData[ 'ack_to_device_avg' ],
-               fileData[ 'fin_ack_to_ack_avg' ] )
+requiredColumns <- c( "down_device_to_graph_avg",
+                      "ack_to_device_avg",
+                      "fin_ack_to_ack_avg" )
+
+tryCatch( downAvgs <- c( fileData[ requiredColumns] ),
+          error = function( e ) {
+              print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+              print( "Required columns: " )
+              print( requiredColumns )
+              print( "Actual columns: " )
+              print( names( fileData ) )
+              print( "Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+         )
 
 # --------------------------------
 # Switch Down Construct Data Frame
@@ -291,10 +317,16 @@
 
 print( paste( "Saving bar chart with error bars (Switch Up Latency) to", errBarOutputFileUp ) )
 
-ggsave( errBarOutputFileUp,
-        width = imageWidth,
-        height = imageHeight,
-        dpi = imageDPI )
+tryCatch( ggsave( errBarOutputFileUp,
+                  width = imageWidth,
+                  height = imageHeight,
+                  dpi = imageDPI ),
+          error = function( e ){
+              print( "[ERROR] There was a problem saving the graph due to a graph formatting exception.  Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+        )
 
 print( paste( "[SUCCESS] Successfully wrote bar chart with error bars (Switch Up Latency) out to", errBarOutputFileUp ) )
 
@@ -361,9 +393,16 @@
 
 print( paste( "Saving bar chart with error bars (Switch Down Latency) to", errBarOutputFileDown ) )
 
-ggsave( errBarOutputFileDown,
-        width = imageWidth,
-        height = imageHeight,
-        dpi = imageDPI )
+tryCatch( ggsave( errBarOutputFileDown,
+                  width = imageWidth,
+                  height = imageHeight,
+                  dpi = imageDPI ),
+          error = function( e ){
+              print( "[ERROR] There was a problem saving the graph due to a graph formatting exception.  Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+        )
 
 print( paste( "[SUCCESS] Successfully wrote bar chart with error bars (Switch Down Latency) out to", errBarOutputFileDown ) )
+quit( status = 0 )
diff --git a/TestON/JenkinsFile/scripts/testCaseGraphGenerator.R b/TestON/JenkinsFile/scripts/testCaseGraphGenerator.R
index fb70d9c..1938ceb 100644
--- a/TestON/JenkinsFile/scripts/testCaseGraphGenerator.R
+++ b/TestON/JenkinsFile/scripts/testCaseGraphGenerator.R
@@ -62,7 +62,7 @@
                                   "<directory-to-save-graphs>",
                                   sep=" " ) )
 
-    q()  # basically exit(), but in R
+    quit( status = 1 )  # basically exit(), but in R
 }
 
 # -------------------------------
@@ -135,9 +135,20 @@
 
 print( "Combining Passed, Failed, and Planned Data." )
 
-categories <- c( fileData[ 'num_failed' ],
-                 fileData[ 'num_passed' ],
-                 fileData[ 'num_planned' ] )
+requiredColumns <- c( "num_failed", "num_passed", "num_planned" )
+
+tryCatch( categories <- c( fileData[ requiredColumns] ),
+          error = function( e ) {
+              print( "[ERROR] One or more expected columns are missing from the data. Please check that the data and SQL command are valid, then try again." )
+              print( "Required columns: " )
+              print( requiredColumns )
+              print( "Actual columns: " )
+              print( names( fileData ) )
+              print( "Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+         )
 
 # --------------------
 # Construct Data Frame
@@ -294,9 +305,16 @@
 
 print( paste( "Saving result graph to", outputFile ) )
 
-ggsave( outputFile,
-        width = imageWidth,
-        height = imageHeight,
-        dpi = imageDPI )
+tryCatch( ggsave( outputFile,
+                  width = imageWidth,
+                  height = imageHeight,
+                  dpi = imageDPI ),
+          error = function( e ){
+              print( "[ERROR] There was a problem saving the graph due to a graph formatting exception.  Error dump:" )
+              print( e )
+              quit( status = 1 )
+          }
+        )
 
 print( paste( "[SUCCESS] Successfully wrote result graph out to", outputFile ) )
+quit( status = 0 )
diff --git a/TestON/drivers/common/cli/onosdriver.py b/TestON/drivers/common/cli/onosdriver.py
index 73b1755..31f26d5 100755
--- a/TestON/drivers/common/cli/onosdriver.py
+++ b/TestON/drivers/common/cli/onosdriver.py
@@ -2437,3 +2437,37 @@
         except Exception:
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
+
+    def formCluster( self, onosIPs ):
+        """
+            From ONOS cluster for IP addresses in onosIPs list
+        """
+        try:
+            onosIPs = " ".join( onosIPs )
+            command = "onos-form-cluster {}".format(  onosIPs )
+            main.log.info( "Sending: " + command )
+            self.handle.sendline( "" )
+            self.handle.expect( self.prompt )
+            self.handle.sendline( command )
+            self.handle.expect( self.prompt )
+            handle = self.handle.before
+            main.log.debug( handle )
+            assert handle is not None, "Error in sendline"
+            assert "Command not found:" not in handle, handle
+            assert "Error" not in handle, handle
+            assert "Exception:" not in handle, handle
+            assert "curl:" not in handle, handle
+            return main.TRUE
+        except AssertionError:
+            main.log.exception( "{} Error in onos-form-cluster output:".format( self.name ) )
+            return main.FALSE
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return main.FALSE
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanAndExit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanAndExit()
diff --git a/TestON/drivers/common/clidriver.py b/TestON/drivers/common/clidriver.py
index 0f3140a..10b5152 100644
--- a/TestON/drivers/common/clidriver.py
+++ b/TestON/drivers/common/clidriver.py
@@ -56,20 +56,22 @@
             self.ip_address + " port 22: Connection refused"
         if self.port:
             self.handle = pexpect.spawn(
-                'ssh -p ' +
+                'ssh -X -p ' +
                 self.port +
                 ' ' +
                 self.user_name +
                 '@' +
-                self.ip_address,
+                self.ip_address +
+                ' -o ServerAliveInterval=120 -o TCPKeepAlive=yes',
                 env={ "TERM": "xterm-mono" },
-                maxread=50000 )
+                maxread=1000000 )
         else:
             self.handle = pexpect.spawn(
                 'ssh -X ' +
                 self.user_name +
                 '@' +
-                self.ip_address,
+                self.ip_address +
+                ' -o ServerAliveInterval=120 -o TCPKeepAlive=yes',
                 env={ "TERM": "xterm-mono" },
                 maxread=1000000,
                 timeout=60 )
diff --git a/TestON/tests/FUNC/FUNCbgpls/FUNCbgpls.params b/TestON/tests/FUNC/FUNCbgpls/FUNCbgpls.params
index cefc4f1..1e5474b 100755
--- a/TestON/tests/FUNC/FUNCbgpls/FUNCbgpls.params
+++ b/TestON/tests/FUNC/FUNCbgpls/FUNCbgpls.params
@@ -12,6 +12,11 @@
 
     <testcases>1,2,3,4,5,6</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <DEPENDENCY>
         <path>/tests/FUNC/FUNCbgpls/Dependencies/</path>
     </DEPENDENCY>
diff --git a/TestON/tests/FUNC/FUNCflow/FUNCflow.params b/TestON/tests/FUNC/FUNCflow/FUNCflow.params
index c0ce79f..6eda334 100755
--- a/TestON/tests/FUNC/FUNCflow/FUNCflow.params
+++ b/TestON/tests/FUNC/FUNCflow/FUNCflow.params
@@ -23,6 +23,11 @@
 
     <testcases>1,2,10,1000,3000,1100,3000,1200,3000,1300,3000,1400,3000,1500,3000,1600,3000,1700,3000,1800,3000,1900,3000,2000,100</testcases>
 
+    <GRAPH>
+        <nodeCluster>VM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>
         <max>1</max>
     </SCALE>
diff --git a/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.params b/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.params
new file mode 100644
index 0000000..be399f6
--- /dev/null
+++ b/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.params
@@ -0,0 +1,44 @@
+<PARAMS>
+    # CASE - Description
+    # 0    - Variable initialization and optional pull and build ONOS package
+    # 1    - install ONOS with single node
+    # 2    - Starting ONOS with forming clusters.
+    # 3    - Checking the ONOS configuration with single node
+    # 4    - Checking the ONOS configuration with cluster formed
+    # 5    - Starting Mininet and verifying topology
+
+    <testcases>0,1,3,2,4,5</testcases>
+
+    <DEPENDENCY>
+        <path>/tests/FUNC/FUNCformCluster/dependencies/</path>
+        <file>formClusterFuncs</file>
+    </DEPENDENCY>
+    <GRAPH>
+        <nodeCluster>VM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+    <ENV>
+        <cellApps>drivers,openflow</cellApps>
+        <additionalApp>org.onosproject.fwd</additionalApp>
+        <cellBasicName>singleTemp</cellBasicName>
+    </ENV>
+    <GIT>
+        <pull>False</pull>
+        <branch>master</branch>
+    </GIT>
+    <TEST>
+        <numNodes>7</numNodes>
+    </TEST>
+    <RETRY>
+        <pingall>2</pingall>
+        <topoCheck>2</topoCheck>
+    </RETRY>
+    <SLEEP>
+        <afterONOSStart>15</afterONOSStart>
+        <pingall>3</pingall>
+    </SLEEP>
+    <MININET>
+        <topo>mn --topo tree,2,2</topo>
+    </MININET>
+
+</PARAMS>
diff --git a/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.py b/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.py
new file mode 100644
index 0000000..55b6e41
--- /dev/null
+++ b/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.py
@@ -0,0 +1,261 @@
+"""
+Copyright 2017 Open Networking Foundation ( ONF )
+
+Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+
+    TestON is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 2 of the License, or
+    ( at your option ) any later version.
+
+    TestON is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with TestON.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+
+class FUNCformCluster:
+
+    def __init__( self ):
+        self.default = ''
+
+    def CASE0( self, main ):
+        import imp
+        import re
+
+        try:
+            from tests.dependencies.ONOSSetup import ONOSSetup
+            main.testSetUp = ONOSSetup()
+        except ImportError:
+            main.log.error( "ONOSSetup not found. exiting the test" )
+            main.cleanAndExit()
+        main.testSetUp.envSetupDescription()
+        stepResult = main.TRUE
+        try:
+            main.apps = main.params[ 'ENV' ][ 'cellApps' ]
+            main.additionalApp = main.params[ 'ENV' ][ 'additionalApp' ]
+            main.cellBasicName = main.params[ 'ENV' ][ 'cellBasicName' ]
+            main.mnTopo = main.params[ 'MININET' ][ 'topo' ]
+            main.startSleep = int( main.params[ 'SLEEP' ][ 'afterONOSStart' ] )
+            dependencyPath = main.testOnDirectory + \
+                             main.params[ 'DEPENDENCY' ][ 'path' ]
+            dependencyFile = main.params[ 'DEPENDENCY' ][ 'file' ]
+            main.numNodes = int( main.params[ 'TEST' ][ 'numNodes' ] )
+            main.funcs = imp.load_source( dependencyFile,
+                                            dependencyPath +
+                                            dependencyFile +
+                                            ".py" )
+            main.pingallRetry = int( main.params[ 'RETRY' ][ 'pingall' ] )
+            main.topoCheckRetry = int( main.params[ 'RETRY' ][ 'topoCheck' ] )
+            main.pingallSleep = int( main.params[ 'SLEEP' ][ 'pingall' ] )
+
+        except Exception as e:
+            main.testSetUp.envSetupException( e )
+        if len( main.Cluster.runningNodes ) != main.numNodes:
+            main.log.error( "The number of the nodes needs to be " + str( main.numNodes ) +
+                            "\nExiting Test..." )
+            main.cleanAndExit()
+        main.testSetUp.evnSetupConclusion( stepResult )
+
+    def CASE1( self, main ):
+        """
+        - Create cells with single node
+            - apply each cell to each cluster
+        - install ONOS
+        - ssh-secure
+        - start the ONOS
+        - activate org.onosproject.fwd to cluster 1 only.
+        """
+        main.case( "Starting ONOS with indepenent configuration" )
+        main.caseExplanation = "Starting ONOS with one node itself."
+        main.testSetUp.killingAllOnos( main.Cluster, True, False )
+        threads = []
+        i = 0
+        for cluster in main.Cluster.runningNodes:
+            i += 1
+            t = main.Thread( target=cluster.Bench.createCellFile,
+                             name="create-cell",
+                             args=[ main.ONOSbench.ip_address,
+                                    main.cellBasicName + str( i ),
+                                    main.Mininet1.ip_address,
+                                    main.apps,
+                                    cluster.ip_address,
+                                    main.ONOScell.karafUser,
+                                    True ] )
+            threads.append( t )
+            t.start()
+        cellResult = main.TRUE
+        for t in threads:
+            t.join()
+            cellResult = cellResult and t.result
+
+        threads = []
+        i = 0
+        for cluster in main.Cluster.runningNodes:
+            i += 1
+            t = main.Thread( target=cluster.Bench.setCell,
+                             name="set-cell",
+                             args=[ main.cellBasicName + str( i ) ] )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            cellResult = cellResult and t.result
+
+        threads = []
+        i = 0
+        for cluster in main.Cluster.runningNodes:
+            i += 1
+            t = main.Thread( target=cluster.Bench.verifyCell,
+                             name="verify-cell" )
+            threads.append( t )
+            t.start()
+        for t in threads:
+            t.join()
+            cellResult = cellResult and t.result
+
+        uninstallResult = main.testSetUp.uninstallOnos( main.Cluster, True )
+        buildResult = main.testSetUp.buildOnos( main.Cluster )
+        installResult = main.testSetUp.installOnos( main.Cluster, True, True )
+        secureSshResult = main.testSetUp.setupSsh( main.Cluster )
+        onosServiceResult = main.testSetUp.checkOnosService( main.Cluster )
+        onosCliResult = main.testSetUp.startOnosClis( main.Cluster )
+        activateResult = main.Cluster.active( 0 ).CLI.activateApp( main.additionalApp )
+
+        result = cellResult and uninstallResult and buildResult and installResult and \
+                 secureSshResult and onosServiceResult and onosCliResult and activateResult
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=result,
+                                 onpass="Successfully started the ONOS",
+                                 onfail="Failed to start the ONOS" )
+
+    def CASE2( self, main ):
+        """
+        - Execute onos-form-cluster to all the nodes.
+        - start the ONOS.
+        - activate org.onosproject.fwd to cluster 1.
+        """
+        main.case( "Starting ONOS with form cluster." )
+        main.caseExplanation = "This will connect all the clusters of the ONOS."
+        main.step( "Executing onos-form-cluster" )
+        formClusterResult = main.ONOSbench.formCluster( main.Cluster.getIps( True, True ) )
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=result,
+                                 onpass="Successfully formed clusters to ONOS",
+                                 onfail="Failed to form clusters to ONOS" )
+        onosServiceResult = main.testSetUp.checkOnosService( main.Cluster )
+        onosCliResult = main.testSetUp.startOnosClis( main.Cluster )
+        activateResult = main.Cluster.active( 0 ).CLI.activateApp( main.additionalApp )
+        result = formClusterResult and onosServiceResult and onosCliResult and activateResult
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=result,
+                                 onpass="Successfully formed clusters to ONOS and started",
+                                 onfail="Failed to form clusters to ONOS and started" )
+
+    def CASE3( self, main ):
+        """
+            Checking the configuration of the ONOS with single-node ONOS.
+            It will check :
+                - the number of the node : They should only have 1 node.
+                - App status : Only the first node should have additional app installed.
+        """
+        import time
+        main.case( "Checking the configuration of the ONOS" )
+        main.caseExplanation = "Checking the number of the nodes and apps"
+        main.step( "Checking the number of the nodes" )
+        main.log.info( "Sleep for " + str( main.startSleep ) + " to give enough time to ONOS")
+        time.sleep( main.startSleep )
+        result = main.funcs.checkingNumNodes( main, 1 )
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=result,
+                                 onpass="Successfully checking the nodes numbers of the ONOS",
+                                 onfail="Failed to checking the nodes numbers of the ONOS" )
+        main.step( "Checking the app status. Only the first node should have " +
+                   main.additionalApp + " installed." )
+        i = 0
+        appResult = main.TRUE
+        for cluster in main.Cluster.active():
+            appResult = appResult and main.funcs.checkingApp( main, main.additionalApp, cluster, True if i == 0 else False )
+            i += 1
+        main.Cluster.active( 0 ).CLI.deactivateApp( main.additionalApp )
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=appResult,
+                                 onpass="Successfully checking the app status of the ONOS",
+                                 onfail="Failed to checking the app status of the ONOS" )
+
+    def CASE4( self, main ):
+        """
+            Checking the configuration of the ONOS with form-cluster.
+            It will check :
+                - the number of the node : They should only have 7 nodes.
+                - state of the node.
+                - App status : All the nodes should have additional app.
+        """
+        import time
+        main.case( "Checking the configuration of the ONOS after form-cluster" )
+        main.caseExplanation = "Checking the number of the nodes and apps"
+        main.step( "Checking the number of the nodes" )
+        main.log.info( "Sleep for " + str( main.startSleep ) + " to give enough time to ONOS")
+        time.sleep( main.startSleep )
+        result = main.funcs.checkingNumNodes( main, main.numNodes )
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=result,
+                                 onpass="Successfully checking the nodes numbers of the ONOS",
+                                 onfail="Failed to checking the nodes numbers of the ONOS" )
+        main.step( "Checking the status of the nodes" )
+        nodeStatusResult = main.TRUE if main.Cluster.nodesCheck() else main.FALSE
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=nodeStatusResult,
+                                 onpass="The status of the nodes were in READY as expected",
+                                 onfail="The status of the nodes were NOT in READY as expected" )
+        main.step( "Checking the app status. All nodes should have " +
+                   main.additionalApp + " installed." )
+        appResult = main.TRUE
+        for cluster in main.Cluster.active():
+            appResult = appResult and main.funcs.checkingApp( main, main.additionalApp, cluster, True )
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=appResult,
+                                 onpass="Successfully checking the app status of the ONOS",
+                                 onfail="Failed to checking the app status of the ONOS" )
+
+    def CASE5( self, main ):
+        """
+            Run simple mininet to check connectivity of ONOS clusters.
+                - It will do ping all
+                - It will compare topos between mininet and ONOS.
+        """
+        try:
+            from tests.dependencies.topology import Topology
+        except ImportError:
+            main.log.error( "Topology not found exiting the test" )
+            main.cleanAndExit()
+        try:
+            main.Topology
+        except ( NameError, AttributeError ):
+            main.Topology = Topology()
+        main.case( "Starting 2x2 Tree Mininet and compare the Topology" )
+        main.caseExplanation = "Starting 2x2 Mininet and assign ONOS controllers to switches."
+        main.step( "Starting Mininet" )
+        for ctrl in main.Cluster.runningNodes:
+            main.mnTopo += " --controller remote,ip=" + ctrl.ipAddress
+        startMnResult = main.Mininet1.startNet( mnCmd=main.mnTopo )
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=startMnResult,
+                                 onpass="Successfully started Mininet",
+                                 onfail="Failed to start Mininet" )
+        main.step( "Pingall hosts to confirm ONOS discovery" )
+        pingResult = utilities.retry( f=main.Mininet1.pingall,
+                                       retValue=main.FALSE,
+                                       attempts=main.pingallRetry,
+                                       sleep=main.pingallSleep )
+        utilities.assert_equals( expect=main.TRUE,
+                                 actual=pingResult,
+                                 onpass="Successfully discovered hosts",
+                                 onfail="Failed to discover hosts" )
+        main.Topology.compareTopos( main.Mininet1, main.topoCheckRetry )
diff --git a/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.topo b/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.topo
new file mode 100644
index 0000000..c96b419
--- /dev/null
+++ b/TestON/tests/FUNC/FUNCformCluster/FUNCformCluster.topo
@@ -0,0 +1,36 @@
+<TOPOLOGY>
+    <COMPONENT>
+
+        <ONOScell>
+            <host>localhost</host>  # ONOS "bench" machine
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosClusterDriver</type>
+            <connect_order>1</connect_order>
+            <COMPONENTS>
+                <cluster_name></cluster_name>  # Used as a prefix for cluster components. Defaults to 'ONOS'
+                <diff_clihost></diff_clihost> # if it has different host other than localhost for CLI. True or empty. OC# will be used for True.
+                <karaf_username></karaf_username>
+                <karaf_password></karaf_password>
+                <web_user></web_user>
+                <web_pass></web_pass>
+                <rest_port></rest_port>
+                <prompt></prompt>  # TODO: we technically need a few of these, one per component
+                <onos_home></onos_home>  # defines where onos home is
+                <nodes> 7 </nodes>  # number of nodes in the cluster
+            </COMPONENTS>
+        </ONOScell>
+
+        <Mininet1>
+            <host>OCN</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>MininetCliDriver</type>
+            <connect_order>2</connect_order>
+            <COMPONENTS>
+                <prompt></prompt>
+            </COMPONENTS>
+        </Mininet1>
+
+    </COMPONENT>
+</TOPOLOGY>
diff --git a/TestON/tests/FUNC/FUNCformCluster/README b/TestON/tests/FUNC/FUNCformCluster/README
new file mode 100644
index 0000000..4ab2cc6
--- /dev/null
+++ b/TestON/tests/FUNC/FUNCformCluster/README
@@ -0,0 +1,13 @@
+Summary:
+        This test is checking the functionality of onos-form-cluster.
+        It will first run 7 single node of ONOS and check the number of the node and app.
+        Since it is single node, each of them should have 1 node.
+        Then, it will form 7 clusters to the ONOS and re-check the number of the nodes, status of nodes,
+        and app.
+        This time, it should have 7 nodes and installing app from one node should affect the other nodes.
+        The status of the Nodes should be "READY"
+        Lastly, it will run the Mininet with controllers of 7 nodes to pingall and compare topology
+        of ONOS and Mininet.
+
+Required:
+        Since it is fixed with 7 nodes, test will be forced to exit unless it has 7 clusters.
\ No newline at end of file
diff --git a/TestON/tests/FUNC/FUNCformCluster/__init__.py b/TestON/tests/FUNC/FUNCformCluster/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/FUNC/FUNCformCluster/__init__.py
diff --git a/TestON/tests/FUNC/FUNCformCluster/dependencies/formClusterFuncs.py b/TestON/tests/FUNC/FUNCformCluster/dependencies/formClusterFuncs.py
new file mode 100644
index 0000000..044c8a3
--- /dev/null
+++ b/TestON/tests/FUNC/FUNCformCluster/dependencies/formClusterFuncs.py
@@ -0,0 +1,64 @@
+"""
+Copyright 2017 Open Networking Foundation ( ONF )
+
+Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+
+    TestON is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 2 of the License, or
+    ( at your option ) any later version.
+
+    TestON is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with TestON.  If not, see <http://www.gnu.org/licenses/>.
+"""
+import json
+
+def checkingNumNodes( main, expected ):
+    """
+    check the number of nodes
+    :param expected:
+        Expected number of nodes
+    :return:
+        main.TRUE if all the number of the nodes are matched
+        main.FALSE if not.
+    """
+    result = main.TRUE
+    for cluster in main.Cluster.active():
+        actual = json.loads( cluster.CLI.summary() ).get( 'nodes' )
+        thisResult = main.TRUE if expected == actual else main.FALSE
+        if not thisResult:
+            main.log.error( "Number of the nodes not matched." +
+                            "\nExpected nodes: " + str( expected ) +
+                            "\nActual nodes: " + str( actual ) )
+    return result
+
+def checkingApp( main, appToBeChecked, cluster, expectedToBeThere ):
+    """
+    check the existence of app
+    :param appToBeChecked:
+        Name of the apps to be checked
+    :param cluster:
+        nth cluster to be checked
+    :param expectedToBeThere:
+        True if it is expected to be installed. False if it is expected not to be installed.
+    :return:
+        main.TRUE if they are all matched. Otherwise main.FALSE
+    """
+    result = False
+    appStatus = cluster.CLI.appStatus( appToBeChecked )
+    if appStatus == "ACTIVE" if expectedToBeThere else "UNINSTALL":
+        result = True
+    if result:
+        main.log.info( "App is " + ( "not " if not expectedToBeThere else "" ) + "there as expected" )
+        return main.TRUE
+    else:
+        main.log.error("App is " + ( "" if not expectedToBeThere else "not " ) + "there which should" +
+                       ( "n't" if not expectedToBeThere else "" ) + " be there.")
+        return main.FALSE
diff --git a/TestON/tests/FUNC/FUNCgroup/FUNCgroup.params b/TestON/tests/FUNC/FUNCgroup/FUNCgroup.params
index ca223bd..90a5082 100644
--- a/TestON/tests/FUNC/FUNCgroup/FUNCgroup.params
+++ b/TestON/tests/FUNC/FUNCgroup/FUNCgroup.params
@@ -12,6 +12,11 @@
     # 100  - Check logs for Errors and Warnings
     <testcases>1,2,3,5,6,7,6,100</testcases>
 
+    <GRAPH>
+        <nodeCluster>VM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>
         <max>1</max>
     </SCALE>
diff --git a/TestON/tests/FUNC/FUNCintent/FUNCintent.params b/TestON/tests/FUNC/FUNCintent/FUNCintent.params
index bff8823..f1ba155 100644
--- a/TestON/tests/FUNC/FUNCintent/FUNCintent.params
+++ b/TestON/tests/FUNC/FUNCintent/FUNCintent.params
@@ -23,6 +23,11 @@
 
     <testcases>1,[2,10,12,13,15,16,1000,2000,3000,4000,5000,6000,18,19]*2,[2,10,12,13,15,16,17,1000,2000,3000,4000,5000,6000,18,19]*2,[2,11,12,13,15,16,1000,2000,3000,4000,5000,6000,18,19]*2,[2,11,12,13,15,16,17,1000,2000,3000,4000,5000,6000,18,19]*2</testcases>
 
+    <GRAPH>
+        <nodeCluster>VM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>
         <size>1,3,1,3,1,3,1,3</size>
     </SCALE>
diff --git a/TestON/tests/FUNC/FUNCintentRest/FUNCintentRest.params b/TestON/tests/FUNC/FUNCintentRest/FUNCintentRest.params
index 7e8b105..d49be07 100644
--- a/TestON/tests/FUNC/FUNCintentRest/FUNCintentRest.params
+++ b/TestON/tests/FUNC/FUNCintentRest/FUNCintentRest.params
@@ -21,6 +21,12 @@
     # 5000 - Test host mobility
 
     <testcases>1,[2,10,12,13,15,16,1000,2000,3000,5000,18,19]*2,[2,10,12,13,15,16,17,1000,2000,3000,5000,18,19]*2,[2,11,12,13,15,16,1000,2000,3000,5000,18,19]*2,[2,11,12,13,15,16,17,1000,2000,3000,5000,18,19]*2</testcases>
+
+    <GRAPH>
+        <nodeCluster>VM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>
         <size>1,3,1,3,1,3,1,3</size>
     </SCALE>
diff --git a/TestON/tests/FUNC/FUNCipv6Intent/FUNCipv6Intent.params b/TestON/tests/FUNC/FUNCipv6Intent/FUNCipv6Intent.params
index 73a3599..1f36084 100644
--- a/TestON/tests/FUNC/FUNCipv6Intent/FUNCipv6Intent.params
+++ b/TestON/tests/FUNC/FUNCipv6Intent/FUNCipv6Intent.params
@@ -16,6 +16,11 @@
 
     <testcases>1,2,11,12,13,16,1000,2000,3000,4000,5000,6000,14</testcases>
 
+    <GRAPH>
+        <nodeCluster>VM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>
         <size>1</size>
     </SCALE>
diff --git a/TestON/tests/FUNC/FUNCnetCfg/FUNCnetCfg.params b/TestON/tests/FUNC/FUNCnetCfg/FUNCnetCfg.params
index 0f08d38..ce62f92 100644
--- a/TestON/tests/FUNC/FUNCnetCfg/FUNCnetCfg.params
+++ b/TestON/tests/FUNC/FUNCnetCfg/FUNCnetCfg.params
@@ -18,6 +18,11 @@
 
     <testcases>1,25,2,20,11,27,26,21,22,23,24</testcases>
 
+    <GRAPH>
+        <nodeCluster>VM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <DEPENDENCY>
         <path>/tests/FUNC/FUNCnetCfg/dependencies/</path>
         <wrapper1>startUp</wrapper1>
diff --git a/TestON/tests/FUNC/FUNCnetconf/FUNCnetconf.params b/TestON/tests/FUNC/FUNCnetconf/FUNCnetconf.params
index f5339f2..e233dc5 100644
--- a/TestON/tests/FUNC/FUNCnetconf/FUNCnetconf.params
+++ b/TestON/tests/FUNC/FUNCnetconf/FUNCnetconf.params
@@ -10,6 +10,11 @@
 
     <testcases>1,[2,100,200,300,19]*2</testcases>
 
+    <GRAPH>
+        <nodeCluster>VM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>
         <size>1,3</size>
     </SCALE>
diff --git a/TestON/tests/FUNC/FUNCoptical/FUNCoptical.params b/TestON/tests/FUNC/FUNCoptical/FUNCoptical.params
index ed94439..2b5a733 100644
--- a/TestON/tests/FUNC/FUNCoptical/FUNCoptical.params
+++ b/TestON/tests/FUNC/FUNCoptical/FUNCoptical.params
@@ -14,6 +14,12 @@
     # 32 - Add and test bidirectional host intents
 
     <testcases>1,[2,10,22,23,31,32,14,19,2,10,16,22,23,31,32,14,19]*1,[2,10,17,22,23,31,32,14,19,2,10,16,17,22,23,31,32,14,19]*1</testcases>
+
+    <GRAPH>
+        <nodeCluster>VM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>
         <size>1,3,1,3</size>
     </SCALE>
diff --git a/TestON/tests/FUNC/FUNCovsdbtest/FUNCovsdbtest.params b/TestON/tests/FUNC/FUNCovsdbtest/FUNCovsdbtest.params
index dba74d1..aa15174 100644
--- a/TestON/tests/FUNC/FUNCovsdbtest/FUNCovsdbtest.params
+++ b/TestON/tests/FUNC/FUNCovsdbtest/FUNCovsdbtest.params
@@ -11,6 +11,11 @@
 
     <testcases>1,3,4,2,5,6,7,8</testcases>
 
+    <GRAPH>
+        <nodeCluster>VM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <DEPENDENCY>
         <path>/tests/FUNC/FUNCovsdbtest/dependencies/</path>
     </DEPENDENCY>
diff --git a/TestON/tests/FUNC/FUNCvirNetNB/FUNCvirNetNB.params b/TestON/tests/FUNC/FUNCvirNetNB/FUNCvirNetNB.params
index 84cab8c..9a810bb 100644
--- a/TestON/tests/FUNC/FUNCvirNetNB/FUNCvirNetNB.params
+++ b/TestON/tests/FUNC/FUNCvirNetNB/FUNCvirNetNB.params
@@ -13,6 +13,11 @@
 
     <testcases>1,2,3,4,5,6,7,8,9,10,11,12,13</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SLEEP>
         <startup>15</startup>
     </SLEEP>
diff --git a/TestON/tests/HA/HAclusterRestart/HAclusterRestart.params b/TestON/tests/HA/HAclusterRestart/HAclusterRestart.params
index 8ba346c..bbf11ae 100644
--- a/TestON/tests/HA/HAclusterRestart/HAclusterRestart.params
+++ b/TestON/tests/HA/HAclusterRestart/HAclusterRestart.params
@@ -19,6 +19,11 @@
     #CASE17: Check for basic functionality with distributed primitives
     <testcases>1,2,8,21,3,8,4,5,14,16,17,[6],8,3,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
 
+    <GRAPH>
+        <nodeCluster>VM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <apps></apps>
     <ONOS_Configuration>
         <org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
@@ -28,7 +33,7 @@
     </ONOS_Configuration>
     <ENV>
         <cellName>HA</cellName>
-        <appString>drivers,openflow,proxyarp,mobility</appString>
+        <appString>drivers,openflow,proxyarp,mobility,events</appString>
     </ENV>
     <GIT>
         <pull>False</pull>
diff --git a/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py b/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py
index 2712c3f..96ba015 100644
--- a/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py
+++ b/TestON/tests/HA/HAclusterRestart/HAclusterRestart.py
@@ -90,7 +90,6 @@
         except Exception as e:
             main.testSetUp.envSetupException( e )
         main.testSetUp.evnSetupConclusion( stepResult )
-        main.HA.generateGraph( "HAclusterRestart" )
 
         main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
                                   extraApply=main.HA.startingMininet )
diff --git a/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.params b/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.params
index f1520f7..a72a475 100644
--- a/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.params
+++ b/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.params
@@ -19,7 +19,7 @@
     #CASE15: Check that Leadership Election is still functional
     #CASE16: Install Distributed Primitives app
     #CASE17: Check for basic functionality with distributed primitives
-    <testcases>1,2,8,21,3,4,5,14,16,17,[61,8,7,4,15,17,62]*1000,8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
+    <testcases>1,2,8,21,3,4,5,14,16,17,[61,8,7,4,15,17,62,7,8,4,15,17]*1000,8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
 
     <apps></apps>
     <ONOS_Configuration>
@@ -30,7 +30,7 @@
     </ONOS_Configuration>
     <ENV>
         <cellName>HA</cellName>
-        <appString>drivers,openflow,proxyarp,mobility</appString>
+        <appString>drivers,openflow,proxyarp,mobility,events</appString>
     </ENV>
     <GIT>
         <pull>False</pull>
diff --git a/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.py b/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.py
index 811e04f..b7385fe 100644
--- a/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.py
+++ b/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.py
@@ -94,7 +94,6 @@
         except Exception as e:
             main.testSetUp.envSetupException( e )
         main.testSetUp.evnSetupConclusion( stepResult )
-        main.HA.generateGraph( "HAcontinuousStopNodes" )
 
         main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
                                   extraApply=[ main.HA.startingMininet,
@@ -143,7 +142,7 @@
             assert main.killCount is not None, "main.killCount not defined"
         except AttributeError as e:
             main.log.warn( "Node to kill not selected, defaulting to node 1" )
-            main.nodeIndex = 0
+            main.nodeIndex = -1
             main.killCount = 1
 
         main.case( "Stopping ONOS nodes - iteration " + str( main.killCount ) )
@@ -169,11 +168,11 @@
         utilities.assert_equals( expect=main.TRUE, actual=killResults,
                                  onpass="ONOS nodes stopped successfully",
                                  onfail="ONOS nodes NOT successfully stopped" )
+        main.Cluster.reset()
 
         main.step( "Checking ONOS nodes" )
-        nodeResults = utilities.retry( main.HA.nodesCheck,
+        nodeResults = utilities.retry( main.Cluster.nodesCheck,
                                        False,
-                                       args=[ main.Cluster.active() ],
                                        sleep=15,
                                        attempts=5 )
 
@@ -195,7 +194,7 @@
         """
         The bring up stopped nodes
         """
-        main.HA.bringUpStoppedNode( main )
+        main.HA.bringUpStoppedNodes( main )
 
     def CASE7( self, main ):
         """
diff --git a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.params b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.params
index 3a8b60f..fe4cd80 100644
--- a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.params
+++ b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.params
@@ -21,6 +21,11 @@
     #CASE17: Check for basic functionality with distributed primitives
     <testcases>1,[2,8,21,3,4,5,14,16,17]*1,[61,8,7,4,15,17,62],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
 
+    <GRAPH>
+        <nodeCluster>VM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <apps></apps>
     <ONOS_Configuration>
         <org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
@@ -30,7 +35,7 @@
     </ONOS_Configuration>
     <ENV>
         <cellName>HA</cellName>
-        <appString>drivers,openflow,proxyarp,mobility</appString>
+        <appString>drivers,openflow,proxyarp,mobility,events</appString>
     </ENV>
     <GIT>
         <pull>False</pull>
diff --git a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
index cf7fe73..13424ed 100644
--- a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
+++ b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.py
@@ -93,7 +93,6 @@
         except Exception as e:
             main.testSetUp.envSetupException( e )
         main.testSetUp.evnSetupConclusion( stepResult )
-        main.HA.generateGraph( "HAfullNetPartition" )
 
         main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
                                   extraApply=[ main.HA.startingMininet,
@@ -244,9 +243,8 @@
             main.cleanAndExit()
         """
         main.step( "Checking ONOS nodes" )
-        nodeResults = utilities.retry( main.HA.nodesCheck,
+        nodeResults = utilities.retry( main.Cluster.nodesCheck,
                                        False,
-                                       args=[ main.Cluster.active() ],
                                        sleep=15,
                                        attempts=5 )
 
diff --git a/TestON/tests/HA/HAkillNodes/HAkillNodes.params b/TestON/tests/HA/HAkillNodes/HAkillNodes.params
index d8f3d31..409bd1f 100644
--- a/TestON/tests/HA/HAkillNodes/HAkillNodes.params
+++ b/TestON/tests/HA/HAkillNodes/HAkillNodes.params
@@ -21,6 +21,11 @@
     #CASE17: Check for basic functionality with distributed primitives
     <testcases>1,2,8,21,3,4,5,14,16,17,[61,8,7,4,15,17,62],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
 
+    <GRAPH>
+        <nodeCluster>VM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <apps></apps>
     <ONOS_Configuration>
         <org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
@@ -30,7 +35,7 @@
     </ONOS_Configuration>
     <ENV>
         <cellName>HA</cellName>
-        <appString>drivers,openflow,proxyarp,mobility</appString>
+        <appString>drivers,openflow,proxyarp,mobility,events</appString>
     </ENV>
     <GIT>
         <pull>False</pull>
diff --git a/TestON/tests/HA/HAkillNodes/HAkillNodes.py b/TestON/tests/HA/HAkillNodes/HAkillNodes.py
index 01ebe38..cd47131 100644
--- a/TestON/tests/HA/HAkillNodes/HAkillNodes.py
+++ b/TestON/tests/HA/HAkillNodes/HAkillNodes.py
@@ -87,7 +87,6 @@
         except Exception as e:
             main.testSetUp.envSetupException( e )
         main.testSetUp.evnSetupConclusion( stepResult )
-        main.HA.generateGraph( "HAkillNodes" )
 
         main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
                                   extraApply=[ main.HA.startingMininet,
@@ -161,9 +160,8 @@
                                  onfail="ONOS nodes NOT successfully killed" )
 
         main.step( "Checking ONOS nodes" )
-        nodeResults = utilities.retry( main.HA.nodesCheck,
+        nodeResults = utilities.retry( main.Cluster.nodesCheck,
                                        False,
-                                       args=[ main.Cluster.active() ],
                                        sleep=15,
                                        attempts=5 )
 
@@ -183,7 +181,7 @@
         """
         The bring up stopped nodes
         """
-        main.HA.bringUpStoppedNode( main )
+        main.HA.bringUpStoppedNodes( main )
 
     def CASE7( self, main ):
         """
diff --git a/TestON/tests/HA/HAsanity/HAsanity.params b/TestON/tests/HA/HAsanity/HAsanity.params
index 5c298ec..5a9f8f9 100644
--- a/TestON/tests/HA/HAsanity/HAsanity.params
+++ b/TestON/tests/HA/HAsanity/HAsanity.params
@@ -20,6 +20,11 @@
     #1,2,8,21,8,3,4,5,14,16,17,[6],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13
     <testcases>1,2,8,21,8,3,4,5,14,16,17,[6],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
 
+    <GRAPH>
+        <nodeCluster>VM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <apps></apps>
     <ONOS_Configuration>
         <org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
@@ -29,7 +34,7 @@
     </ONOS_Configuration>
     <ENV>
         <cellName>HA</cellName>
-        <appString>drivers,openflow,proxyarp,mobility</appString>
+        <appString>events,drivers,openflow,proxyarp,mobility</appString>
     </ENV>
     <GIT>
         <pull>False</pull>
diff --git a/TestON/tests/HA/HAsanity/HAsanity.py b/TestON/tests/HA/HAsanity/HAsanity.py
index baff818..eb90c53 100644
--- a/TestON/tests/HA/HAsanity/HAsanity.py
+++ b/TestON/tests/HA/HAsanity/HAsanity.py
@@ -86,7 +86,6 @@
         except Exception as e:
             main.testSetUp.envSetupException( e )
         main.testSetUp.evnSetupConclusion( stepResult )
-        main.HA.generateGraph( "HAsanity" )
 
         main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
                                   extraApply=main.HA.startingMininet )
diff --git a/TestON/tests/HA/HAscaling/HAscaling.params b/TestON/tests/HA/HAscaling/HAscaling.params
index 9fd1760..233a55d 100644
--- a/TestON/tests/HA/HAscaling/HAscaling.params
+++ b/TestON/tests/HA/HAscaling/HAscaling.params
@@ -19,6 +19,11 @@
     #CASE17: Check for basic functionality with distributed primitives
     <testcases>1,2,8,21,3,8,4,5,14,16,17,[6,8,7,4,15,17]*9,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
 
+    <GRAPH>
+        <nodeCluster>VM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <scaling>1,3b,5b,7b,7,7b,5b,3b,1</scaling>
     <server>
         <port>8000</port>
diff --git a/TestON/tests/HA/HAscaling/HAscaling.py b/TestON/tests/HA/HAscaling/HAscaling.py
index 63aa1e5..d4b7b32 100644
--- a/TestON/tests/HA/HAscaling/HAscaling.py
+++ b/TestON/tests/HA/HAscaling/HAscaling.py
@@ -94,7 +94,6 @@
         except Exception as e:
             main.testSetUp.envSetupException( e )
         main.testSetUp.evnSetupConclusion( stepResult )
-        main.HA.generateGraph( "HAscaling", index=1 )
 
         main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
                                   extraApply=[ main.HA.setServerForCluster,
@@ -203,9 +202,8 @@
         main.Cluster.startCLIs()
 
         main.step( "Checking ONOS nodes" )
-        nodeResults = utilities.retry( main.HA.nodesCheck,
+        nodeResults = utilities.retry( main.Cluster.nodesCheck,
                                        False,
-                                       args=[ main.Cluster.active() ],
                                        attempts=5 )
         utilities.assert_equals( expect=True, actual=nodeResults,
                                  onpass="Nodes check successful",
diff --git a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.params b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.params
index 67a655a..e93e655 100644
--- a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.params
+++ b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.params
@@ -18,6 +18,11 @@
     #CASE17: Check for basic functionality with distributed primitives
     <testcases>1,2,8,3,4,5,14,15,16,17,[6],8,3,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
 
+    <GRAPH>
+        <nodeCluster>VM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <apps></apps>
     <ONOS_Configuration>
         <org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
diff --git a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
index 584232a..74b2fc1 100644
--- a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
+++ b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
@@ -97,7 +97,6 @@
         except Exception as e:
             main.testSetUp.envSetupException( e )
         main.testSetUp.evnSetupConclusion( stepResult )
-        main.HA.generateGraph( "HAsingleInstanceRestart" )
         main.Cluster.setRunningNode( int( main.params[ 'num_controllers' ] ) )
         ip = main.Cluster.getIps( allNode=True )
         main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName="SingleHA", removeLog=True,
@@ -716,9 +715,8 @@
                                  onpass="Topology Check Test successful",
                                  onfail="Topology Check Test NOT successful" )
         main.step( "Checking ONOS nodes" )
-        nodeResults = utilities.retry( main.HA.nodesCheck,
+        nodeResults = utilities.retry( main.Cluster.nodesCheck,
                                        False,
-                                       args=[ main.Cluster.active() ],
                                        attempts=5 )
 
         utilities.assert_equals( expect=True, actual=nodeResults,
diff --git a/TestON/tests/HA/HAstopNodes/HAstopNodes.params b/TestON/tests/HA/HAstopNodes/HAstopNodes.params
index d8f3d31..de7f775 100644
--- a/TestON/tests/HA/HAstopNodes/HAstopNodes.params
+++ b/TestON/tests/HA/HAstopNodes/HAstopNodes.params
@@ -21,6 +21,11 @@
     #CASE17: Check for basic functionality with distributed primitives
     <testcases>1,2,8,21,3,4,5,14,16,17,[61,8,7,4,15,17,62],8,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4,13</testcases>
 
+    <GRAPH>
+        <nodeCluster>VM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <apps></apps>
     <ONOS_Configuration>
         <org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
@@ -30,7 +35,7 @@
     </ONOS_Configuration>
     <ENV>
         <cellName>HA</cellName>
-        <appString>drivers,openflow,proxyarp,mobility</appString>
+        <appString>events,drivers,openflow,proxyarp,mobility</appString>
     </ENV>
     <GIT>
         <pull>False</pull>
diff --git a/TestON/tests/HA/HAstopNodes/HAstopNodes.py b/TestON/tests/HA/HAstopNodes/HAstopNodes.py
index 7b57730..4c8fe1d 100644
--- a/TestON/tests/HA/HAstopNodes/HAstopNodes.py
+++ b/TestON/tests/HA/HAstopNodes/HAstopNodes.py
@@ -87,7 +87,6 @@
         except Exception as e:
             main.testSetUp.envSetupException( e )
         main.testSetUp.evnSetupConclusion( stepResult )
-        main.HA.generateGraph( "HAstopNodes" )
 
         main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
                                   extraApply=[ main.HA.startingMininet,
@@ -160,9 +159,8 @@
                                  onfail="ONOS nodes NOT successfully stopped" )
 
         main.step( "Checking ONOS nodes" )
-        nodeResults = utilities.retry( main.HA.nodesCheck,
+        nodeResults = utilities.retry( main.Cluster.nodesCheck,
                                        False,
-                                       args=[ main.Cluster.active() ],
                                        sleep=15,
                                        attempts=5 )
 
@@ -182,7 +180,7 @@
         """
         The bring up stopped nodes
         """
-        main.HA.bringUpStoppedNode( main )
+        main.HA.bringUpStoppedNodes( main )
 
     def CASE7( self, main ):
         """
diff --git a/TestON/tests/HA/HAswapNodes/HAswapNodes.params b/TestON/tests/HA/HAswapNodes/HAswapNodes.params
index cf395cb..f78f98d 100644
--- a/TestON/tests/HA/HAswapNodes/HAswapNodes.params
+++ b/TestON/tests/HA/HAswapNodes/HAswapNodes.params
@@ -19,6 +19,11 @@
     #CASE17: Check for basic functionality with distributed primitives
     <testcases>1,[2,8,21,3,8,4,5,14,16,17]*1,6,[8,3,7,4,15,17,9,8,4,10,8,4,11,8,4,12,8,4]*1,13</testcases>
 
+    <GRAPH>
+        <nodeCluster>VM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <server>
         <port>8000</port>
         <interface>eth0</interface>
diff --git a/TestON/tests/HA/HAswapNodes/HAswapNodes.py b/TestON/tests/HA/HAswapNodes/HAswapNodes.py
index e0751f6..b221347 100644
--- a/TestON/tests/HA/HAswapNodes/HAswapNodes.py
+++ b/TestON/tests/HA/HAswapNodes/HAswapNodes.py
@@ -93,7 +93,6 @@
         except Exception as e:
             main.testSetUp.envSetupException( e )
         main.testSetUp.evnSetupConclusion( stepResult )
-        main.HA.generateGraph( "HAswapNodes" )
 
         main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
                                   extraApply=[ main.HA.setServerForCluster,
@@ -201,9 +200,8 @@
         main.testSetUp.startOnosClis( main.Cluster )
 
         main.step( "Checking ONOS nodes" )
-        nodeResults = utilities.retry( main.HA.nodesCheck,
+        nodeResults = utilities.retry( main.Cluster.nodesCheck,
                                        False,
-                                       args=[ main.Cluster.active() ],
                                        attempts=5 )
         utilities.assert_equals( expect=True, actual=nodeResults,
                                  onpass="Nodes check successful",
diff --git a/TestON/tests/HA/dependencies/HA.py b/TestON/tests/HA/dependencies/HA.py
index 2227146..fa75618 100644
--- a/TestON/tests/HA/dependencies/HA.py
+++ b/TestON/tests/HA/dependencies/HA.py
@@ -271,56 +271,10 @@
         main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
         return ( result, leaderList )
 
-    def nodesCheck( self, nodes ):
-        nodesOutput = []
-        results = True
-        threads = []
-        for node in nodes:
-            t = main.Thread( target=node.nodes,
-                             name="nodes-" + str( node ),
-                             args=[] )
-            threads.append( t )
-            t.start()
-
-        for t in threads:
-            t.join()
-            nodesOutput.append( t.result )
-        ips = sorted( main.Cluster.getIps( activeOnly=True ) )
-        for i in nodesOutput:
-            try:
-                current = json.loads( i )
-                activeIps = []
-                currentResult = False
-                for node in current:
-                    if node[ 'state' ] == 'READY':
-                        activeIps.append( node[ 'ip' ] )
-                activeIps.sort()
-                if ips == activeIps:
-                    currentResult = True
-            except ( ValueError, TypeError ):
-                main.log.error( "Error parsing nodes output" )
-                main.log.warn( repr( i ) )
-                currentResult = False
-            results = results and currentResult
-        return results
-
     def generateGraph( self, testName, plotName="Plot-HA", index=2 ):
-        # GRAPHS
-        # NOTE: important params here:
-        #       job = name of Jenkins job
-        #       Plot Name = Plot-HA, only can be used if multiple plots
-        #       index = The number of the graph under plot name
-        job = testName
-        graphs = '<ac:structured-macro ac:name="html">\n'
-        graphs += '<ac:plain-text-body><![CDATA[\n'
-        graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
-                  '/plot/' + plotName + '/getPlot?index=' + str( index ) +\
-                  '&width=500&height=300"' +\
-                  'noborder="0" width="500" height="300" scrolling="yes" ' +\
-                  'seamless="seamless"></iframe>\n'
-        graphs += ']]></ac:plain-text-body>\n'
-        graphs += '</ac:structured-macro>\n'
-        main.log.wiki( graphs )
+        # DEPRECATED: ONOSSetup.py now creates these graphs.
+
+        main.log.debug( "HA.generateGraph() is deprecated; ONOSSetup now creates these graphs." )
 
     def initialSetUp( self, serviceClean=False ):
         """
@@ -342,10 +296,9 @@
             main.ONOSbench.handle.expect( "\$" )
 
         main.step( "Checking ONOS nodes" )
-        nodeResults = utilities.retry( self.nodesCheck,
+        nodeResults = utilities.retry( main.Cluster.nodesCheck,
                                        False,
-                                       args=[ main.Cluster.active() ],
-                                       attempts=5 )
+                                       attempts=9 )
 
         utilities.assert_equals( expect=True, actual=nodeResults,
                                  onpass="Nodes check successful",
@@ -859,7 +812,7 @@
         main.step( "Check Intent state" )
         installedCheck = False
         loopCount = 0
-        while not installedCheck and loopCount < 40:
+        while not installedCheck and loopCount < 90:
             installedCheck = True
             # Print the intent states
             intents = onosCli.CLI.intents()
@@ -2579,17 +2532,17 @@
         else:
             main.log.debug( "skipping saving log files" )
 
+        main.step( "Checking ONOS Logs for errors" )
+        for ctrl in main.Cluster.runningNodes:
+            main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
+            main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
+
         main.step( "Stopping Mininet" )
         mnResult = main.Mininet1.stopNet()
         utilities.assert_equals( expect=main.TRUE, actual=mnResult,
                                  onpass="Mininet stopped",
                                  onfail="MN cleanup NOT successful" )
 
-        main.step( "Checking ONOS Logs for errors" )
-        for ctrl in main.Cluster.runningNodes:
-            main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
-            main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
-
         try:
             timerLog = open( main.logdir + "/Timers.csv", 'w' )
             main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
@@ -2710,9 +2663,9 @@
                    "controller",
             onfail="Switches were not successfully reassigned" )
 
-    def bringUpStoppedNode( self, main ):
+    def bringUpStoppedNodes( self, main ):
         """
-        The bring up stopped nodes
+        The bring up stopped nodes.
         """
         import time
         assert main, "main not defined"
@@ -2743,7 +2696,7 @@
                                  onpass="ONOS restarted successfully",
                                  onfail="ONOS restart NOT successful" )
 
-        main.step( "Restarting ONOS nodes" )
+        main.step( "Restarting ONOS CLI" )
         cliResults = main.TRUE
         for ctrl in main.kill:
             cliResults = cliResults and\
@@ -2753,15 +2706,13 @@
                                  onpass="ONOS node(s) restarted",
                                  onfail="ONOS node(s) did not restart" )
 
-        # Grab the time of restart so we chan check how long the gossip
-        # protocol has had time to work
+        # Grab the time of restart so we can have some idea of average time
         main.restartTime = time.time() - restartTime
         main.log.debug( "Restart time: " + str( main.restartTime ) )
         # TODO: MAke this configurable. Also, we are breaking the above timer
         main.step( "Checking ONOS nodes" )
-        nodeResults = utilities.retry( self.nodesCheck,
+        nodeResults = utilities.retry( main.Cluster.nodesCheck,
                                        False,
-                                       args=[ main.Cluster.active() ],
                                        sleep=15,
                                        attempts=5 )
 
@@ -2786,7 +2737,96 @@
                          ctrl.electionTestRun()
         utilities.assert_equals( expect=main.TRUE, actual=runResults,
                                  onpass="ONOS nodes reran for election topic",
-                                 onfail="Errror rerunning for election" )
+                                 onfail="Error rerunning for election" )
+
+    def upgradeNodes( self, main ):
+        """
+        Reinstall some nodes with an upgraded version.
+
+        This will reinstall nodes in main.kill with an upgraded version.
+        """
+        import time
+        assert main, "main not defined"
+        assert utilities.assert_equals, "utilities.assert_equals not defined"
+        assert main.kill, "main.kill not defined"
+        nodeNames = [ node.name for node in main.kill ]
+        main.step( "Upgrading" + str( nodeNames ) + " ONOS nodes" )
+
+        stopResults = main.TRUE
+        uninstallResults = main.TRUE
+        startResults = main.TRUE
+        sshResults = main.TRUE
+        isup = main.TRUE
+        restartTime = time.time()
+        for ctrl in main.kill:
+            stopResults = stopResults and\
+                          ctrl.onosStop( ctrl.ipAddress )
+            uninstallResults = uninstallResults and\
+                               ctrl.onosUninstall( ctrl.ipAddress )
+            # Install the new version of onos
+            startResults = startResults and\
+                           ctrl.onosInstall( options="-fv", node=ctrl.ipAddress )
+            sshResults = sshResults and\
+                           ctrl.onosSecureSSH( node=ctrl.ipAddress )
+            isup = isup and ctrl.isup( ctrl.ipAddress )
+        utilities.assert_equals( expect=main.TRUE, actual=stopResults,
+                                 onpass="ONOS nodes stopped successfully",
+                                 onfail="ONOS nodes NOT successfully stopped" )
+        utilities.assert_equals( expect=main.TRUE, actual=uninstallResults,
+                                 onpass="ONOS nodes uninstalled successfully",
+                                 onfail="ONOS nodes NOT successfully uninstalled" )
+        utilities.assert_equals( expect=main.TRUE, actual=startResults,
+                                 onpass="ONOS nodes started successfully",
+                                 onfail="ONOS nodes NOT successfully started" )
+        utilities.assert_equals( expect=main.TRUE, actual=sshResults,
+                                 onpass="Successfully secured onos ssh",
+                                 onfail="Failed to secure onos ssh" )
+        utilities.assert_equals( expect=main.TRUE, actual=isup,
+                                 onpass="ONOS nodes fully started",
+                                 onfail="ONOS nodes NOT fully started" )
+
+        main.step( "Restarting ONOS CLI" )
+        cliResults = main.TRUE
+        for ctrl in main.kill:
+            cliResults = cliResults and\
+                         ctrl.startOnosCli( ctrl.ipAddress )
+            ctrl.active = True
+        utilities.assert_equals( expect=main.TRUE, actual=cliResults,
+                                 onpass="ONOS node(s) restarted",
+                                 onfail="ONOS node(s) did not restart" )
+
+        # Grab the time of restart so we can have some idea of average time
+        main.restartTime = time.time() - restartTime
+        main.log.debug( "Restart time: " + str( main.restartTime ) )
+        # TODO: Make this configurable.
+        main.step( "Checking ONOS nodes" )
+        nodeResults = utilities.retry( main.Cluster.nodesCheck,
+                                       False,
+                                       sleep=15,
+                                       attempts=5 )
+
+        utilities.assert_equals( expect=True, actual=nodeResults,
+                                 onpass="Nodes check successful",
+                                 onfail="Nodes check NOT successful" )
+
+        if not nodeResults:
+            for ctrl in main.Cluster.active():
+                main.log.debug( "{} components not ACTIVE: \n{}".format(
+                    ctrl.name,
+                    ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
+            main.log.error( "Failed to start ONOS, stopping test" )
+            main.cleanAndExit()
+
+        self.commonChecks()
+
+        main.step( "Rerun for election on the node(s) that were killed" )
+        runResults = main.TRUE
+        for ctrl in main.kill:
+            runResults = runResults and\
+                         ctrl.electionTestRun()
+        utilities.assert_equals( expect=main.TRUE, actual=runResults,
+                                 onpass="ONOS nodes reran for election topic",
+                                 onfail="Error rerunning for election" )
 
     def tempCell( self, cellName, ipList ):
         main.step( "Create cell file" )
@@ -3096,8 +3136,8 @@
 
             elapsed = time.time() - startTime
             cliTime = time.time() - cliStart
-            print "Elapsed time: " + str( elapsed )
-            print "CLI time: " + str( cliTime )
+            main.log.debug( "Elapsed time: " + str( elapsed ) )
+            main.log.debug( "CLI time: " + str( cliTime ) )
 
             if all( e is None for e in devices ) and\
                all( e is None for e in hosts ) and\
@@ -3378,9 +3418,8 @@
 
         # FIXME: move this to an ONOS state case
         main.step( "Checking ONOS nodes" )
-        nodeResults = utilities.retry( self.nodesCheck,
+        nodeResults = utilities.retry( main.Cluster.nodesCheck,
                                        False,
-                                       args=[ main.Cluster.active() ],
                                        attempts=5 )
         utilities.assert_equals( expect=True, actual=nodeResults,
                                  onpass="Nodes check successful",
@@ -3542,15 +3581,18 @@
             onfail="Inconsistent leaderboards" )
 
         if sameResult:
+            # Check that the leader is one of the active nodes
+            ips = sorted( main.Cluster.getIps( activeOnly=True ) )
             leader = leaders[ 0 ][ 0 ]
-            if onosCli.ipAddress in leader:
-                correctLeader = True
+            if leader in ips:
+                legitimate = True
             else:
-                correctLeader = False
-            main.step( "First node was elected leader" )
+                legitimate = False
+                main.log.debug( leaders )
+            main.step( "Active node was elected leader?" )
             utilities.assert_equals(
                 expect=True,
-                actual=correctLeader,
+                actual=legitimate,
                 onpass="Correct leader was elected",
                 onfail="Incorrect leader" )
             main.Cluster.testLeader = leader
@@ -3669,18 +3711,6 @@
             else:
                 main.log.info( "Expected no leader, got: " + str( newLeader ) )
                 correctCandidateResult = main.FALSE
-        elif len( oldLeaders[ 0 ] ) >= 3:
-            if newLeader == oldLeaders[ 0 ][ 2 ]:
-                # correct leader was elected
-                correctCandidateResult = main.TRUE
-            else:
-                correctCandidateResult = main.FALSE
-                main.log.error( "Candidate {} was elected. {} should have had priority.".format(
-                                    newLeader, oldLeaders[ 0 ][ 2 ] ) )
-        else:
-            main.log.warn( "Could not determine who should be the correct leader" )
-            main.log.debug( oldLeaders[ 0 ] )
-            correctCandidateResult = main.FALSE
         utilities.assert_equals(
             expect=main.TRUE,
             actual=correctCandidateResult,
@@ -3708,24 +3738,10 @@
         time.sleep( 5 )  # Paremterize
         positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
 
-        # Check that the re-elected node is last on the candidate List
-        if not reRunLeaders[ 0 ]:
-            positionResult = main.FALSE
-        elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
-            main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
-                                                                                      str( reRunLeaders[ 0 ] ) ) )
-            positionResult = main.FALSE
-        utilities.assert_equals(
-            expect=True,
-            actual=positionResult,
-            onpass="Old leader successfully re-ran for election",
-            onfail="Something went wrong with Leadership election after " +
-                   "the old leader re-ran for election" )
-
     def installDistributedPrimitiveApp( self, main ):
-        """
+        '''
         Install Distributed Primitives app
-        """
+        '''
         import time
         assert main, "main not defined"
         assert utilities.assert_equals, "utilities.assert_equals not defined"
diff --git a/TestON/tests/MISC/SCPFbatchFlowResp/SCPFbatchFlowResp.params b/TestON/tests/MISC/SCPFbatchFlowResp/SCPFbatchFlowResp.params
index 333ac26..01d4f3e 100755
--- a/TestON/tests/MISC/SCPFbatchFlowResp/SCPFbatchFlowResp.params
+++ b/TestON/tests/MISC/SCPFbatchFlowResp/SCPFbatchFlowResp.params
@@ -15,6 +15,10 @@
 
     <!-- <testcases>1,10,100,1000,100,2000,100,110</testcases> -->
     <testcases>1,2,10,100,1000,2100,100,3100,100,110,210</testcases>
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
     <GIT>
         <pull>False</pull>
         <branch>master</branch>
diff --git a/TestON/tests/SCPF/SCPFcbench/SCPFcbench.params b/TestON/tests/SCPF/SCPFcbench/SCPFcbench.params
index 633955f..80b2382 100644
--- a/TestON/tests/SCPF/SCPFcbench/SCPFcbench.params
+++ b/TestON/tests/SCPF/SCPFcbench/SCPFcbench.params
@@ -2,6 +2,11 @@
 
     <testcases>1,2</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>1</SCALE>
     <availableNodes>1</availableNodes>
 
diff --git a/TestON/tests/SCPF/SCPFflowTp1g/SCPFflowTp1g.params b/TestON/tests/SCPF/SCPFflowTp1g/SCPFflowTp1g.params
index 1a910d3..b374227 100644
--- a/TestON/tests/SCPF/SCPFflowTp1g/SCPFflowTp1g.params
+++ b/TestON/tests/SCPF/SCPFflowTp1g/SCPFflowTp1g.params
@@ -2,6 +2,11 @@
 
     <testcases>0,1,2,1,2,1,2,1,2,1,2,1,2,1,2</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>1,3,3,5,5,7,7</SCALE>
     <max>7</max>
 
diff --git a/TestON/tests/SCPF/SCPFhostLat/SCPFhostLat.params b/TestON/tests/SCPF/SCPFhostLat/SCPFhostLat.params
index 566bd34..17a8bd3 100644
--- a/TestON/tests/SCPF/SCPFhostLat/SCPFhostLat.params
+++ b/TestON/tests/SCPF/SCPFhostLat/SCPFhostLat.params
@@ -1,6 +1,11 @@
 <PARAMS>
     <testcases>0,2,11,20,2,11,20,2,11,20,2,11,20</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>1,3,5,7</SCALE>
 
     <ENV>
diff --git a/TestON/tests/SCPF/SCPFintentEventTp/SCPFintentEventTp.params b/TestON/tests/SCPF/SCPFintentEventTp/SCPFintentEventTp.params
index cf5e7e8..3a069ca 100644
--- a/TestON/tests/SCPF/SCPFintentEventTp/SCPFintentEventTp.params
+++ b/TestON/tests/SCPF/SCPFintentEventTp/SCPFintentEventTp.params
@@ -2,6 +2,11 @@
 
     <testcases>0,1,2,1,2,1,2,1,2,1,2,1,2,1,2</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <debugMode></debugMode>  #nothing means false
 
     <ENV>
diff --git a/TestON/tests/SCPF/SCPFintentInstallWithdrawLat/SCPFintentInstallWithdrawLat.params b/TestON/tests/SCPF/SCPFintentInstallWithdrawLat/SCPFintentInstallWithdrawLat.params
index 9d8fd6e..126c311 100644
--- a/TestON/tests/SCPF/SCPFintentInstallWithdrawLat/SCPFintentInstallWithdrawLat.params
+++ b/TestON/tests/SCPF/SCPFintentInstallWithdrawLat/SCPFintentInstallWithdrawLat.params
@@ -2,6 +2,11 @@
 
     <testcases>0,1,2,1,2,1,2,1,2</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>1,3,5,7</SCALE>
     <max>7</max>
 
diff --git a/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.params b/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.params
index d1aec26..8a758e3 100644
--- a/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.params
+++ b/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.params
@@ -2,6 +2,11 @@
 
     <testcases>0,1,2,1,2,1,2,1,2</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>1,3,5,7</SCALE>
     <max>7</max>
 
diff --git a/TestON/tests/SCPF/SCPFmastershipFailoverLat/SCPFmastershipFailoverLat.params b/TestON/tests/SCPF/SCPFmastershipFailoverLat/SCPFmastershipFailoverLat.params
index 1b4fc02..497bb9d 100644
--- a/TestON/tests/SCPF/SCPFmastershipFailoverLat/SCPFmastershipFailoverLat.params
+++ b/TestON/tests/SCPF/SCPFmastershipFailoverLat/SCPFmastershipFailoverLat.params
@@ -1,6 +1,11 @@
 <PARAMS>
     <testcases>0,[1,2,3]*3</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>3,5,7</SCALE>
     <max>7</max>
 
diff --git a/TestON/tests/SCPF/SCPFmastershipFailoverLat/SCPFmastershipFailoverLat.py b/TestON/tests/SCPF/SCPFmastershipFailoverLat/SCPFmastershipFailoverLat.py
index c534841..e591381 100644
--- a/TestON/tests/SCPF/SCPFmastershipFailoverLat/SCPFmastershipFailoverLat.py
+++ b/TestON/tests/SCPF/SCPFmastershipFailoverLat/SCPFmastershipFailoverLat.py
@@ -261,9 +261,8 @@
                 criticalError = True
 
             main.log.info( "Checking ONOS nodes." )
-            nodeResults = utilities.retry( main.HA.nodesCheck,
+            nodeResults = utilities.retry( main.Cluster.nodesCheck,
                                            False,
-                                           args=[ main.Cluster.active() ],
                                            sleep=1,
                                            attempts=3 )
 
diff --git a/TestON/tests/SCPF/SCPFportLat/SCPFportLat.params b/TestON/tests/SCPF/SCPFportLat/SCPFportLat.params
index 65e02fa..e558365 100644
--- a/TestON/tests/SCPF/SCPFportLat/SCPFportLat.params
+++ b/TestON/tests/SCPF/SCPFportLat/SCPFportLat.params
@@ -1,6 +1,11 @@
 <PARAMS>
     <testcases>0,1,2,1,2,1,2,1,2</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>1,3,5,7</SCALE>
     <max>7</max>
 
diff --git a/TestON/tests/SCPF/SCPFscaleTopo/SCPFscaleTopo.params b/TestON/tests/SCPF/SCPFscaleTopo/SCPFscaleTopo.params
index b152611..2ef69df 100755
--- a/TestON/tests/SCPF/SCPFscaleTopo/SCPFscaleTopo.params
+++ b/TestON/tests/SCPF/SCPFscaleTopo/SCPFscaleTopo.params
@@ -10,6 +10,11 @@
     # 1,[2,10,300,11,100,300,11,200,300,11,1000]*3
     <testcases>1,[3,2,10,300,11,1000]*7,3</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <DEPENDENCY>
         <path>/tests/SCPF/SCPFscaleTopo/dependencies/</path>
         <wrapper1>startUp</wrapper1>
diff --git a/TestON/tests/SCPF/SCPFscalingMaxIntents/SCPFscalingMaxIntents.params b/TestON/tests/SCPF/SCPFscalingMaxIntents/SCPFscalingMaxIntents.params
index 5cb65d0..26fb3f5 100644
--- a/TestON/tests/SCPF/SCPFscalingMaxIntents/SCPFscalingMaxIntents.params
+++ b/TestON/tests/SCPF/SCPFscalingMaxIntents/SCPFscalingMaxIntents.params
@@ -8,6 +8,11 @@
     # 0,1,2,10,20,1,2,10,20,1,2,10,20
    <testcases>0,1,2,11,20,1,2,11,20,1,2,11,20,1,2,11,20</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <reroute>False</reroute>
 
     <SCALE>1,3,5,7</SCALE>
diff --git a/TestON/tests/SCPF/SCPFswitchLat/SCPFswitchLat.params b/TestON/tests/SCPF/SCPFswitchLat/SCPFswitchLat.params
index d77eec3..8680b5c 100644
--- a/TestON/tests/SCPF/SCPFswitchLat/SCPFswitchLat.params
+++ b/TestON/tests/SCPF/SCPFswitchLat/SCPFswitchLat.params
@@ -1,6 +1,11 @@
 <PARAMS>
     <testcases>0,1,2,1,2,1,2,1,2</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>1,3,5,7</SCALE>
     <max>7</max>
 
diff --git a/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.params b/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.params
index 1a58556..cf284ca 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRClusterRestart/SRClusterRestart.params
@@ -2,6 +2,11 @@
 
     <testcases>1,2,3</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>
         <size>3</size>
         <max>3</max>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.params b/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.params
index a757a48..2fa1d4e 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRDynamic/SRDynamic.params
@@ -2,6 +2,11 @@
 
     <testcases>1,2,3,4,5,6</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>
         <size>3</size>
         <max>3</max>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.params b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.params
index 5f990fa..9f61a70 100644
--- a/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRHighAvailability/SRHighAvailability.params
@@ -2,6 +2,11 @@
 
     <testcases>1,2,3,4,5,6,7,8</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>
         <size>3</size>
         <max>3</max>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params
index 8f3ade9..93e3dbe 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRLinkFailure/SRLinkFailure.params
@@ -2,6 +2,11 @@
 
     <testcases>1,2,4,5</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>
         <size>3</size>
         <max>3</max>
diff --git a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.params b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.params
index 1a58556..cf284ca 100755
--- a/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.params
+++ b/TestON/tests/USECASE/SegmentRouting/SROnosFailure/SROnosFailure.params
@@ -2,6 +2,11 @@
 
     <testcases>1,2,3</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>
         <size>3</size>
         <max>3</max>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.params b/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.params
index a757a48..2fa1d4e 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRSanity/SRSanity.params
@@ -2,6 +2,11 @@
 
     <testcases>1,2,3,4,5,6</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>
         <size>3</size>
         <max>3</max>
diff --git a/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.params b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.params
index 8f3ade9..93e3dbe 100755
--- a/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.params
+++ b/TestON/tests/USECASE/SegmentRouting/SRSwitchFailure/SRSwitchFailure.params
@@ -2,6 +2,11 @@
 
     <testcases>1,2,4,5</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <SCALE>
         <size>3</size>
         <max>3</max>
diff --git a/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py b/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
index f542554..07b0188 100644
--- a/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
+++ b/TestON/tests/USECASE/SegmentRouting/dependencies/Testcaselib.py
@@ -422,7 +422,7 @@
 
         if len( nodes ) < main.Cluster.numCtrls:
 
-            nodeResults = utilities.retry( Testcaselib.nodesCheck,
+            nodeResults = utilities.retry( main.Cluster.nodesCheck,
                                            False,
                                            attempts=5,
                                            sleep=10 )
@@ -478,9 +478,8 @@
                                      onfail="ONOS CLI is not ready" )
 
         main.step( "Checking ONOS nodes" )
-        nodeResults = utilities.retry( Testcaselib.nodesCheck,
+        nodeResults = utilities.retry( main.Cluster.nodesCheck,
                                        False,
-                                       args=[ nodes ],
                                        attempts=5,
                                        sleep=10 )
         utilities.assert_equals( expect=True, actual=nodeResults,
@@ -587,28 +586,3 @@
         main.Cluster.active( 0 ).REST.removeNetCfg( subjectClass="apps",
                                                     subjectKey="org.onosproject.segmentrouting",
                                                     configKey="xconnect" )
-
-    @staticmethod
-    def nodesCheck( nodes ):
-        results = True
-        nodesOutput = main.Cluster.command( "nodes", specificDriver=2 )
-        ips = sorted( main.Cluster.getIps( activeOnly=True ) )
-        for i in nodesOutput:
-            try:
-                current = json.loads( i )
-                activeIps = []
-                currentResult = False
-                for node in current:
-                    if node[ 'state' ] == 'READY':
-                        activeIps.append( node[ 'ip' ] )
-                currentResult = True
-                for ip in ips:
-                    if ip not in activeIps:
-                        currentResult = False
-                        break
-            except ( ValueError, TypeError ):
-                main.log.error( "Error parsing nodes output" )
-                main.log.warn( repr( i ) )
-                currentResult = False
-            results = results and currentResult
-        return results
diff --git a/TestON/tests/USECASE/USECASE_SdnipFunction/USECASE_SdnipFunction.params b/TestON/tests/USECASE/USECASE_SdnipFunction/USECASE_SdnipFunction.params
index 8b401d6..bf87224 100644
--- a/TestON/tests/USECASE/USECASE_SdnipFunction/USECASE_SdnipFunction.params
+++ b/TestON/tests/USECASE/USECASE_SdnipFunction/USECASE_SdnipFunction.params
@@ -2,6 +2,11 @@
 
     <testcases>101, 100, 200, 102, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     #Environment variables
     <ENV>
         #Cells that you use
diff --git a/TestON/tests/USECASE/USECASE_SdnipFunction/dependencies/USECASE_SdnipI2MN.py b/TestON/tests/USECASE/USECASE_SdnipFunction/dependencies/USECASE_SdnipI2MN.py
index 23607ad..14a7c69 100644
--- a/TestON/tests/USECASE/USECASE_SdnipFunction/dependencies/USECASE_SdnipI2MN.py
+++ b/TestON/tests/USECASE/USECASE_SdnipFunction/dependencies/USECASE_SdnipI2MN.py
@@ -39,7 +39,6 @@
 QUAGGA_DIR = '/usr/lib/quagga'
 QUAGGA_RUN_DIR = '/usr/local/var/run/quagga'
 QUAGGA_CONFIG_DIR = '~/OnosSystemTest/TestON/tests/USECASE/USECASE_SdnipFunction/dependencies/'
-# onos1IP = '10.254.1.201'
 numSw = 39
 
 
diff --git a/TestON/tests/USECASE/USECASE_SdnipFunction/sdnip_single_instance b/TestON/tests/USECASE/USECASE_SdnipFunction/sdnip_single_instance
index c2c51c6..9e0be8e 100644
--- a/TestON/tests/USECASE/USECASE_SdnipFunction/sdnip_single_instance
+++ b/TestON/tests/USECASE/USECASE_SdnipFunction/sdnip_single_instance
@@ -1,8 +1,8 @@
 export ONOS_CELL="sdnip_single_instance"
 
 export ONOS_INSTALL_DIR="/opt/onos"
-export ONOS_NIC=10.254.1.*
-export OC1="10.254.1.201"
+export ONOS_NIC=10.192.19.*
+export OC1="10.192.19.68"
 export OCN="127.0.0.1"
 export OCI="${OC1}"
 export ONOS_USER="sdn"                  # ONOS user on remote system
diff --git a/TestON/tests/USECASE/USECASE_SdnipFunctionCluster/USECASE_SdnipFunctionCluster.params b/TestON/tests/USECASE/USECASE_SdnipFunctionCluster/USECASE_SdnipFunctionCluster.params
index e25086a..3253ed8 100644
--- a/TestON/tests/USECASE/USECASE_SdnipFunctionCluster/USECASE_SdnipFunctionCluster.params
+++ b/TestON/tests/USECASE/USECASE_SdnipFunctionCluster/USECASE_SdnipFunctionCluster.params
@@ -2,6 +2,13 @@
 
     <testcases>101, 100, 200, 102, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12</testcases>
     #Environment variables
+
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
+
     <ENV>
         <cellName>SDNIP</cellName>
         <appString>drivers,openflow,proxyarp</appString>
diff --git a/TestON/tests/USECASE/USECASE_SdnipFunctionCluster/sdnip_multiple_instance_BM b/TestON/tests/USECASE/USECASE_SdnipFunctionCluster/sdnip_multiple_instance_BM
index 1053083..dc9cfea 100644
--- a/TestON/tests/USECASE/USECASE_SdnipFunctionCluster/sdnip_multiple_instance_BM
+++ b/TestON/tests/USECASE/USECASE_SdnipFunctionCluster/sdnip_multiple_instance_BM
@@ -1,10 +1,10 @@
 export ONOS_CELL="sdnip_multiple_instance_BM"
 
 export ONOS_INSTALL_DIR="/opt/onos"
-export ONOS_NIC=10.254.1.*
-export OC1="10.254.1.201"
-export OC2="10.254.1.202"
-export OC3="10.254.1.203"
+export ONOS_NIC=10.192.19.*
+export OC1="10.192.19.68"
+export OC2="10.192.19.67"
+export OC3="10.192.19.66"
 export OCN="127.0.0.1"
 export OCI="${OC1}"
 export ONOS_USER="sdn"                  # ONOS user on remote system
diff --git a/TestON/tests/USECASE/VPLS/VPLSBasic/VPLSBasic.params b/TestON/tests/USECASE/VPLS/VPLSBasic/VPLSBasic.params
index fc6a16b..355ff57 100755
--- a/TestON/tests/USECASE/VPLS/VPLSBasic/VPLSBasic.params
+++ b/TestON/tests/USECASE/VPLS/VPLSBasic/VPLSBasic.params
@@ -2,6 +2,11 @@
 
     <testcases>1,2,10,11,12,13,14,15,16,11</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <num_controllers>3</num_controllers>
 
     <GIT>
diff --git a/TestON/tests/USECASE/VPLS/VPLSfailsafe/VPLSfailsafe.params b/TestON/tests/USECASE/VPLS/VPLSfailsafe/VPLSfailsafe.params
index 53e4c6f..c44bb58 100755
--- a/TestON/tests/USECASE/VPLS/VPLSfailsafe/VPLSfailsafe.params
+++ b/TestON/tests/USECASE/VPLS/VPLSfailsafe/VPLSfailsafe.params
@@ -2,6 +2,11 @@
 
     <testcases>1,2,50,100,200,300,310,400</testcases>
 
+    <GRAPH>
+        <nodeCluster>BM</nodeCluster>
+        <builds>20</builds>
+    </GRAPH>
+
     <num_controllers>3</num_controllers>
 
     <GIT>
diff --git a/TestON/tests/USECASE/VPLS/VPLSfailsafe/VPLSfailsafe.py b/TestON/tests/USECASE/VPLS/VPLSfailsafe/VPLSfailsafe.py
index a7dfa3b..9778b68 100644
--- a/TestON/tests/USECASE/VPLS/VPLSfailsafe/VPLSfailsafe.py
+++ b/TestON/tests/USECASE/VPLS/VPLSfailsafe/VPLSfailsafe.py
@@ -421,9 +421,8 @@
 
             # Checking if all nodes appear with status READY using 'nodes' command
             main.step( "Checking ONOS nodes." )
-            nodeResults = utilities.retry( main.HA.nodesCheck,
+            nodeResults = utilities.retry( main.Cluster.nodesCheck,
                                            False,
-                                           args=[ main.Cluster.runningNodes ],
                                            sleep=main.timeSleep,
                                            attempts=main.numAttempts )
 
diff --git a/TestON/tests/dependencies/Cluster.py b/TestON/tests/dependencies/Cluster.py
index 210134f..1532039 100644
--- a/TestON/tests/dependencies/Cluster.py
+++ b/TestON/tests/dependencies/Cluster.py
@@ -18,6 +18,7 @@
     You should have received a copy of the GNU General Public License
     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
 """
+import json
 class Cluster():
 
     def __str__( self ):
@@ -339,6 +340,28 @@
             self.controllers[ i ].active = True
         return result
 
+    def nodesCheck( self ):
+        results = True
+        nodesOutput = self.command( "nodes", specificDriver=2 )
+        ips = sorted( self.getIps( activeOnly=True ) )
+        for i in nodesOutput:
+            try:
+                current = json.loads( i )
+                activeIps = []
+                currentResult = False
+                for node in current:
+                    if node[ 'state' ] == 'READY':
+                        activeIps.append( node[ 'ip' ] )
+                activeIps.sort()
+                if ips == activeIps:
+                    currentResult = True
+            except ( ValueError, TypeError ):
+                main.log.error( "Error parsing nodes output" )
+                main.log.warn( repr( i ) )
+                currentResult = False
+            results = results and currentResult
+        return results
+
     def printResult( self, results, activeList, logLevel="debug" ):
         """
         Description:
@@ -471,6 +494,7 @@
         maxSize = float( segmentSize ) * float( multiplier )
         ret = True
         for n in self.runningNodes:
-            ret = ret and n.server.folderSize( "/opt/onos/apache-karaf-*/data/partitions/*/*.log",
+            # Partition logs
+            ret = ret and n.server.folderSize( "/opt/onos/apache-karaf-*/data/db/partitions/*/*.log",
                                                size=maxSize, unit=units, ignoreRoot=False )
         return ret
diff --git a/TestON/tests/dependencies/ONOSSetup.py b/TestON/tests/dependencies/ONOSSetup.py
index 750dde6..ed4d978 100644
--- a/TestON/tests/dependencies/ONOSSetup.py
+++ b/TestON/tests/dependencies/ONOSSetup.py
@@ -125,8 +125,43 @@
                                         "test variables ",
                                  onfail="Failed to construct test variables" )
 
+        url = self.generateGraphURL()
+        main.log.wiki( url )
+
         main.commit = main.ONOSbench.getVersion( report=True )
 
+    def generateGraphURL( self, width=525, height=350 ):
+        """
+        Description:
+            Obtain the URL for the graph that corresponds to the test being run.
+        """
+
+        nodeCluster = main.params[ 'GRAPH' ][ 'nodeCluster' ]
+        testname = main.TEST
+        branch = main.ONOSbench.getBranchName()
+        maxBuildsToShow = main.params[ 'GRAPH' ][ 'builds' ]
+
+        return '<ac:structured-macro ac:name="html">\n' + \
+                '<ac:plain-text-body><![CDATA[\n' + \
+                '<img src="https://onos-jenkins.onlab.us/job/Pipeline_postjob_' + \
+                nodeCluster + \
+                '/lastSuccessfulBuild/artifact/' + \
+                testname + \
+                '_' + \
+                branch + \
+                '_' + \
+                maxBuildsToShow + \
+                '-builds_graph.jpg", alt="' + \
+                testname + \
+                '", style="width:' + \
+                str( width ) + \
+                'px;height:' + \
+                str( height ) + \
+                'px;border:0"' + \
+                '>' + \
+                ']]></ac:plain-text-body>\n' + \
+                '</ac:structured-macro>\n'
+
     def setNumCtrls( self, hasMultiNodeRounds ):
         """
         Description: