Merge "[ONOS-6957] Create Jenkins File for the other tests"
diff --git a/TestON/JenkinsFile/SCPF/SCPFIntentInstallWithdrawRerouteLat.R b/TestON/JenkinsFile/SCPF/SCPFIntentInstallWithdrawRerouteLat.R
new file mode 100644
index 0000000..897460b
--- /dev/null
+++ b/TestON/JenkinsFile/SCPF/SCPFIntentInstallWithdrawRerouteLat.R
@@ -0,0 +1,193 @@
+# Copyright 2017 Open Networking Foundation (ONF)
+#
+# Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+# the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+# or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+#
+#     TestON is free software: you can redistribute it and/or modify
+#     it under the terms of the GNU General Public License as published by
+#     the Free Software Foundation, either version 2 of the License, or
+#     (at your option) any later version.
+#
+#     TestON is distributed in the hope that it will be useful,
+#     but WITHOUT ANY WARRANTY; without even the implied warranty of
+#     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#     GNU General Public License for more details.
+#
+#     You should have received a copy of the GNU General Public License
+#     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
+#
+# If you have any questions, or if you don't understand R,
+# please contact Jeremy Ronquillo: jeremyr@opennetworking.org
+
+# **********************************************************
+# STEP 1: File management.
+# **********************************************************
+
+print( "STEP 1: File management." )
+
+# Command line arguments are read. Args usually include the database filename and the output
+# directory for the graphs to save to.
+# ie: Rscript SCPFgraphGenerator SCPFsampleDataDB.csv ~/tmp/
+print( "Reading commmand-line args." )
+args <- commandArgs( trailingOnly=TRUE )
+
+# Import libraries to be used for graphing and organizing data, respectively.
+# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
+#                     reshape2: https://github.com/hadley/reshape
+print( "Importing libraries." )
+library( ggplot2 )
+library( reshape2 )
+library( RPostgreSQL )    # For databases
+
+# Check if sufficient args are provided.
+if ( is.na( args[ 9 ] ) ){
+    print( "Usage: Rscript SCPFIntentInstallWithdrawRerouteLat.R <isFlowObj> <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <batch-size> <directory-to-save-graphs>" )
+    q()  # basically exit(), but in R
+}
+
+flowObjFileModifier <- ""
+if ( args[ 1 ] == "y" ){
+    flowObjFileModifier <- "fobj_"
+}
+
+# Filenames for output graphs include the testname and the graph type.
+# See the examples below. paste() is used to concatenate strings.
+
+errBarOutputFile <- paste( args[ 9 ], "SCPFIntentInstallWithdrawRerouteLat", sep="" )
+errBarOutputFile <- paste( errBarOutputFile, args[ 7 ], sep="_" )
+if ( args[ 1 ] == "y" ){
+    errBarOutputFile <- paste( errBarOutputFile, "_fobj", sep="" )
+}
+errBarOutputFile <- paste( errBarOutputFile, "_", sep="" )
+errBarOutputFile <- paste( errBarOutputFile, args[ 8 ], sep="" )
+errBarOutputFile <- paste( errBarOutputFile, "-batchSize", sep="" )
+errBarOutputFile <- paste( errBarOutputFile, "_graph.jpg", sep="" )
+
+print( "Reading from databases." )
+
+con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 2 ], port=strtoi( args[ 3 ] ), user=args[ 4 ],password=args[ 5 ] )
+
+command1 <- paste( "SELECT * FROM intent_latency_", flowObjFileModifier, sep="" )
+command1 <- paste( command1, "tests WHERE batch_size=", sep="" )
+command1 <- paste( command1, args[ 8 ], sep="" )
+command1 <- paste( command1, " AND branch = '", sep="" )
+command1 <- paste( command1, args[ 7 ], sep="" )
+command1 <- paste( command1, "' AND date IN ( SELECT MAX( date ) FROM intent_latency_", sep="" )
+command1 <- paste( command1, flowObjFileModifier, sep="" )
+command1 <- paste( command1,  "tests WHERE branch='", sep="" )
+command1 <- paste( command1,  args[ 7 ], sep="" )
+command1 <- paste( command1,  "')", sep="" )
+
+print( paste( "Sending SQL command:", command1 ) )
+
+fileData1 <- dbGetQuery( con, command1 )
+
+command2 <- paste( "SELECT * FROM intent_reroute_latency_", flowObjFileModifier, sep="" )
+command2 <- paste( command2, "tests WHERE batch_size=", sep="" )
+command2 <- paste( command2, args[ 8 ], sep="" )
+command2 <- paste( command2, " AND branch = '", sep="" )
+command2 <- paste( command2, args[ 7 ], sep="" )
+command2 <- paste( command2, "' AND date IN ( SELECT MAX( date ) FROM intent_reroute_latency_", sep="" )
+command2 <- paste( command2, flowObjFileModifier, sep="" )
+command2 <- paste( command2,  "tests WHERE branch='", sep="" )
+command2 <- paste( command2,  args[ 7 ], sep="" )
+command2 <- paste( command2,  "')", sep="" )
+
+print( paste( "Sending SQL command:", command2 ) )
+
+fileData2 <- dbGetQuery( con, command2 )
+
+# **********************************************************
+# STEP 2: Organize data.
+# **********************************************************
+
+print( "STEP 2: Organize data." )
+
+# Create lists c() and organize data into their corresponding list.
+print( "Sorting data." )
+if ( ncol( fileData2 ) == 0 ){
+    avgs <- c( fileData1[ 'install_avg' ], fileData1[ 'withdraw_avg' ] )
+} else{
+    colnames( fileData2 ) <- c( "date", "name", "date", "branch", "commit", "scale", "batch_size", "reroute_avg", "reroute_std" )
+    avgs <- c( fileData1[ 'install_avg' ], fileData1[ 'withdraw_avg' ], fileData2[ 'reroute_avg' ] )
+}
+
+# Parse lists into data frames.
+dataFrame <- melt( avgs )              # This is where reshape2 comes in. Avgs list is converted to data frame
+
+if ( ncol( fileData2 ) == 0 ){
+    dataFrame$scale <- c( fileData1$scale, fileData1$scale )      # Add node scaling to the data frame.
+    dataFrame$stds <- c( fileData1$install_std, fileData1$withdraw_std )
+} else{
+    dataFrame$scale <- c( fileData1$scale, fileData1$scale, fileData2$scale )      # Add node scaling to the data frame.
+    dataFrame$stds <- c( fileData1$install_std, fileData1$withdraw_std, fileData2$reroute_std )
+}
+colnames( dataFrame ) <- c( "ms", "type", "scale", "stds" )
+
+# Format data frame so that the data is in the same order as it appeared in the file.
+dataFrame$type <- as.character( dataFrame$type )
+dataFrame$type <- factor( dataFrame$type, levels=unique( dataFrame$type ) )
+
+# **********************************************************
+# STEP 3: Generate graphs.
+# **********************************************************
+
+print( "STEP 3: Generate graphs." )
+
+# 1. Graph fundamental data is generated first.
+#    These are variables that apply to all of the graphs being generated, regardless of type.
+#
+# 2. Type specific graph data is generated.
+#     Data specific for the error bar and stacked bar graphs are generated.
+#
+# 3. Generate and save the graphs.
+#      Graphs are saved to the filename above, in the directory provided in command line args
+
+print( "Generating fundamental graph data." )
+
+# Calculate window to display graph, based on the lowest and highest points of the data.
+if ( min( dataFrame$ms - dataFrame$stds ) < 0){
+    yWindowMin <- min( dataFrame$ms - dataFrame$stds ) * 1.05
+} else {
+    yWindowMin <- 0
+}
+yWindowMax <- max( dataFrame$ms + dataFrame$stds )
+
+mainPlot <- ggplot( data = dataFrame, aes( x = scale, y = ms, ymin = ms - stds, ymax = ms + stds,fill = type ) )
+
+# Formatting the plot
+width <- 1.3  # Width of the bars.
+xScaleConfig <- scale_x_continuous( breaks=c( 1, 3, 5, 7, 9) )
+yLimit <- ylim( yWindowMin, yWindowMax )
+xLabel <- xlab( "Scale" )
+yLabel <- ylab( "Latency (ms)" )
+fillLabel <- labs( fill="Type" )
+chartTitle <- "Intent Install, Withdraw, & Reroute Latencies"
+if ( args[ 1 ] == "y" ){
+    chartTitle <- paste( chartTitle, "with Flow Objectives" )
+}
+chartTitle <- paste( chartTitle, "\nBatch Size =" )
+chartTitle <- paste( chartTitle, fileData1[ 1,'batch_size' ] )
+
+theme <- theme( plot.title=element_text( hjust = 0.5, size = 18, face='bold' ) )
+
+# Store plot configurations as 1 variable
+fundamentalGraphData <- mainPlot + xScaleConfig + yLimit + xLabel + yLabel + fillLabel + theme
+
+
+# Create the bar graph with error bars.
+# geom_bar contains:
+#    - stat: data formatting (usually "identity")
+#    - width: the width of the bar types (declared above)
+# geom_errorbar contains similar arguments as geom_bar.
+print( "Generating bar graph with error bars." )
+barGraphFormat <- geom_bar( stat = "identity", width = width, position = "dodge" )
+errorBarFormat <- geom_errorbar( width = width, position = "dodge" )
+title <- ggtitle( chartTitle )
+result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title
+
+# Save graph to file
+print( paste( "Saving bar chart with error bars to", errBarOutputFile ) )
+ggsave( errBarOutputFile, width = 10, height = 6, dpi = 200 )
+print( paste( "Successfully wrote bar chart with error bars out to", errBarOutputFile ) )
diff --git a/TestON/JenkinsFile/SCPF/SCPFLineGraph.R b/TestON/JenkinsFile/SCPF/SCPFLineGraph.R
new file mode 100644
index 0000000..f9c6c05
--- /dev/null
+++ b/TestON/JenkinsFile/SCPF/SCPFLineGraph.R
@@ -0,0 +1,144 @@
+# Copyright 2017 Open Networking Foundation (ONF)
+#
+# Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+# the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+# or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+#
+#     TestON is free software: you can redistribute it and/or modify
+#     it under the terms of the GNU General Public License as published by
+#     the Free Software Foundation, either version 2 of the License, or
+#     (at your option) any later version.
+#
+#     TestON is distributed in the hope that it will be useful,
+#     but WITHOUT ANY WARRANTY; without even the implied warranty of
+#     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#     GNU General Public License for more details.
+#
+#     You should have received a copy of the GNU General Public License
+#     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
+#
+# If you have any questions, or if you don't understand R,
+# please contact Jeremy Ronquillo: jeremyr@opennetworking.org
+
+# This is the R script that generates the SCPF front page graphs.
+
+# **********************************************************
+# STEP 1: Data management.
+# **********************************************************
+
+print( "STEP 1: Data management." )
+
+# Import libraries to be used for graphing and organizing data, respectively.
+# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
+#                     reshape2: https://github.com/hadley/reshape
+#                      RPostgreSQL: https://code.google.com/archive/p/rpostgresql/
+print( "Importing libraries." )
+library( ggplot2 )
+library( reshape2 )
+library( RPostgreSQL )
+
+# Command line arguments are read. Args include the database credentials, test name, branch name, and the directory to output files.
+print( "Reading commmand-line args." )
+args <- commandArgs( trailingOnly=TRUE )
+
+# Check if sufficient args are provided.
+if ( is.na( args[ 10 ] ) ){
+    print( "Usage: Rscript testresultgraph.R <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <#-dates> <SQL-command> <y-axis> <directory-to-save-graph>" )
+    q()  # basically exit(), but in R
+}
+
+# Filenames for the output graph include the testname, branch, and the graph type.
+
+outputFile <- paste( args[ 10 ], "SCPF_Front_Page" , sep="" )
+outputFile <- paste( outputFile, gsub( " ", "_", args[ 5 ] ), sep="_" )
+outputFile <- paste( outputFile, args[ 6 ], sep="_" )
+outputFile <- paste( outputFile, args[ 7 ], sep="_" )
+outputFile <- paste( outputFile, "dates", sep="-" )
+outputFile <- paste( outputFile, "_graph.jpg", sep="" )
+
+# From RPostgreSQL
+print( "Reading from databases." )
+con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 1 ], port=strtoi( args[ 2 ] ), user=args[ 3 ],password=args[ 4 ] )
+
+print( "Sending SQL command." )
+fileData <- dbGetQuery( con, args[ 8 ] )
+
+# Title of graph based on command line args.
+title <- args[ 5 ]
+
+# **********************************************************
+# STEP 2: Organize data.
+# **********************************************************
+
+print( "STEP 2: Organize data." )
+
+# Create lists c() and organize data into their corresponding list.
+print( "Sorting data into new data frame." )
+
+if ( ncol( fileData ) > 1 ){
+    for ( i in 2:ncol( fileData ) ){
+        fileData[ i ] <- fileData[ i - 1 ] + fileData[ i ]
+    }
+}
+
+# Parse lists into data frames.
+# This is where reshape2 comes in. Avgs list is converted to data frame.
+dataFrame <- melt( fileData )
+
+dataFrame$date <- fileData$date
+
+colnames( dataFrame ) <- c( "Legend", "Values" )
+
+# Format data frame so that the data is in the same order as it appeared in the file.
+dataFrame$Legend <- as.character( dataFrame$Legend )
+dataFrame$Legend <- factor( dataFrame$Legend, levels=unique( dataFrame$Legend ) )
+
+# Adding a temporary reversed iterative list to the dataFrame so that there are no gaps in-between date numbers.
+dataFrame$iterative <- seq( 1, nrow( fileData ), by = 1 )
+
+print( "Data Frame Results:" )
+print( dataFrame )
+
+# **********************************************************
+# STEP 3: Generate graphs.
+# **********************************************************
+
+print( "STEP 3: Generate graphs." )
+
+print( "Creating main plot." )
+# Create the primary plot here.
+# ggplot contains the following arguments:
+#     - data: the data frame that the graph will be based off of
+#    - aes: the asthetics of the graph which require:
+#        - x: x-axis values (usually iterative, but it will become date # later)
+#        - y: y-axis values (usually tests)
+#        - color: the category of the colored lines (usually legend of test)
+mainPlot <- ggplot( data = dataFrame, aes( x = iterative, y = Values, color = Legend ) )
+
+print( "Formatting main plot." )
+
+# Store plot configurations as 1 variable
+fundamentalGraphData <- mainPlot + expand_limits( y = 0 )
+
+yScaleConfig <- scale_y_continuous( breaks = seq( 0, max( dataFrame$Values ) * 1.05, by = ceiling( max( dataFrame$Values ) / 10 ) ) )
+
+xLabel <- xlab( "Date" )
+yLabel <- ylab( args[ 9 ] )
+fillLabel <- labs( fill="Type" )
+legendLabels <- scale_colour_discrete( labels = names( fileData ) )
+centerTitle <- theme( plot.title=element_text( hjust = 0.5 ) )  # To center the title text
+theme <- theme( axis.text.x = element_blank(), axis.ticks.x = element_blank(), plot.title = element_text( size = 18, face='bold' ) )
+
+fundamentalGraphData <- fundamentalGraphData + yScaleConfig + xLabel + yLabel + fillLabel + legendLabels + centerTitle + theme
+print( "Generating line graph." )
+
+lineGraphFormat <- geom_line()
+pointFormat <- geom_point( size = 0.2 )
+title <- ggtitle( title )
+
+result <- fundamentalGraphData + lineGraphFormat + pointFormat + title
+
+# Save graph to file
+print( paste( "Saving result graph to", outputFile ) )
+ggsave( outputFile, width = 10, height = 6, dpi = 200 )
+print( paste( "Successfully wrote result graph out to", outputFile ) )
\ No newline at end of file
diff --git a/TestON/JenkinsFile/SCPF/SCPFbatchFlowResp.R b/TestON/JenkinsFile/SCPF/SCPFbatchFlowResp.R
new file mode 100644
index 0000000..dbf18e9
--- /dev/null
+++ b/TestON/JenkinsFile/SCPF/SCPFbatchFlowResp.R
@@ -0,0 +1,169 @@
+# Copyright 2017 Open Networking Foundation (ONF)
+#
+# Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+# the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+# or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+#
+#     TestON is free software: you can redistribute it and/or modify
+#     it under the terms of the GNU General Public License as published by
+#     the Free Software Foundation, either version 2 of the License, or
+#     (at your option) any later version.
+#
+#     TestON is distributed in the hope that it will be useful,
+#     but WITHOUT ANY WARRANTY; without even the implied warranty of
+#     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#     GNU General Public License for more details.
+#
+#     You should have received a copy of the GNU General Public License
+#     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
+#
+# If you have any questions, or if you don't understand R,
+# please contact Jeremy Ronquillo: jeremyr@opennetworking.org
+
+# **********************************************************
+# STEP 1: File management.
+# **********************************************************
+
+print( "STEP 1: File management." )
+
+# Command line arguments are read. Args usually include the database filename and the output
+# directory for the graphs to save to.
+# ie: Rscript SCPFgraphGenerator SCPFsampleDataDB.csv ~/tmp/
+print( "Reading commmand-line args." )
+args <- commandArgs( trailingOnly=TRUE )
+
+# Import libraries to be used for graphing and organizing data, respectively.
+# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
+#                     reshape2: https://github.com/hadley/reshape
+print( "Importing libraries." )
+library( ggplot2 )
+library( reshape2 )
+library( RPostgreSQL )    # For databases
+
+# Check if sufficient args are provided.
+if ( is.na( args[ 7 ] ) ){
+    print( "Usage: Rscript SCPFbatchFlowResp <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <directory-to-save-graphs>" )
+    q()  # basically exit(), but in R
+}
+
+# Filenames for output graphs include the testname and the graph type.
+# See the examples below. paste() is used to concatenate strings.
+
+errBarOutputFile <- paste( args[ 7 ], args[ 5 ], sep="" )
+errBarOutputFile <- paste( errBarOutputFile, args[ 6 ], sep="_" )
+errBarOutputFile <- paste( errBarOutputFile, "_PostGraph.jpg", sep="" )
+
+print( "Reading from databases." )
+
+con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 1 ], port=strtoi( args[ 2 ] ), user=args[ 3 ],password=args[ 4 ] )
+
+command <- paste( "SELECT * FROM batch_flow_tests WHERE branch='", args[ 6 ], sep="" )
+command <- paste( command, "' ORDER BY date DESC LIMIT 3", sep="" )
+
+print( paste( "Sending SQL command:", command ) )
+
+fileData <- dbGetQuery( con, command )
+
+chartTitle <- paste( "Single Bench Flow Latency - Post", "Last 3 Builds", sep = "\n" )
+
+# **********************************************************
+# STEP 2: Organize data.
+# **********************************************************
+
+avgs <- c()
+
+print( "Sorting data." )
+avgs <- c( fileData[ 'posttoconfrm' ], fileData[ 'elapsepost' ] )
+
+dataFrame <- melt( avgs )
+dataFrame$scale <- fileData$scale
+dataFrame$date <- fileData$date
+dataFrame$iterative <- dataFrame$iterative <- rev( seq( 1, nrow( fileData ), by = 1 ) )
+
+colnames( dataFrame ) <- c( "ms", "type", "scale", "date", "iterative" )
+
+# Format data frame so that the data is in the same order as it appeared in the file.
+dataFrame$type <- as.character( dataFrame$type )
+dataFrame$type <- factor( dataFrame$type, levels=unique( dataFrame$type ) )
+
+# **********************************************************
+# STEP 3: Generate graphs.
+# **********************************************************
+
+print( "Generating fundamental graph data." )
+
+mainPlot <- ggplot( data = dataFrame, aes( x = iterative, y = ms, fill = type ) )
+xScaleConfig <- scale_x_continuous( breaks = dataFrame$iterative, label = dataFrame$date )
+xLabel <- xlab( "date" )
+yLabel <- ylab( "Latency (ms)" )
+fillLabel <- labs( fill="Type" )
+theme <- theme( plot.title=element_text( hjust = 0.5, size = 18, face='bold' ) )
+
+fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
+
+
+print( "Generating bar graph with error bars." )
+width <- 0.3
+barGraphFormat <- geom_bar( stat="identity", width = width )
+title <- ggtitle( chartTitle )
+result <- fundamentalGraphData + barGraphFormat + title
+
+
+print( paste( "Saving bar chart to", errBarOutputFile ) )
+ggsave( errBarOutputFile, width = 10, height = 6, dpi = 200 )
+
+print( paste( "Successfully wrote stacked bar chart out to", errBarOutputFile ) )
+
+
+# **********************************************************
+# STEP 2: Organize data.
+# **********************************************************
+
+avgs <- c()
+
+print( "Sorting data." )
+avgs <- c( fileData[ 'deltoconfrm' ], fileData[ 'elapsedel' ] )
+
+dataFrame <- melt( avgs )
+dataFrame$scale <- fileData$scale
+dataFrame$date <- fileData$date
+dataFrame$iterative <- dataFrame$iterative <- rev( seq( 1, nrow( fileData ), by = 1 ) )
+
+colnames( dataFrame ) <- c( "ms", "type", "scale", "date", "iterative" )
+
+# Format data frame so that the data is in the same order as it appeared in the file.
+dataFrame$type <- as.character( dataFrame$type )
+dataFrame$type <- factor( dataFrame$type, levels=unique( dataFrame$type ) )
+
+
+# **********************************************************
+# STEP 3: Generate graphs.
+# **********************************************************
+
+print( "Generating fundamental graph data." )
+
+mainPlot <- ggplot( data = dataFrame, aes( x = iterative, y = ms, fill = type ) )
+xScaleConfig <- scale_x_continuous( breaks = dataFrame$iterative, label = dataFrame$date )
+xLabel <- xlab( "Build Date" )
+yLabel <- ylab( "Latency (ms)" )
+fillLabel <- labs( fill="Type" )
+theme <- theme( plot.title=element_text( hjust = 0.5, size = 18, face='bold' ) )
+
+fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
+
+
+print( "Generating bar graph with error bars." )
+width <- 0.3
+barGraphFormat <- geom_bar( stat="identity", width = width )
+chartTitle <- paste( "Single Bench Flow Latency - Del", "Last 3 Builds", sep = "\n" )
+title <- ggtitle( chartTitle )
+result <- fundamentalGraphData + barGraphFormat + title
+
+errBarOutputFile <- paste( args[ 7 ], args[ 5 ], sep="" )
+errBarOutputFile <- paste( errBarOutputFile, args[ 6 ], sep="_" )
+errBarOutputFile <- paste( errBarOutputFile, "_DelGraph.jpg", sep="" )
+
+print( paste( "Saving bar chart to", errBarOutputFile ) )
+ggsave( errBarOutputFile, width = 10, height = 6, dpi = 200 )
+
+print( paste( "Successfully wrote stacked bar chart out to", errBarOutputFile ) )
\ No newline at end of file
diff --git a/TestON/JenkinsFile/SCPF/SCPFcbench.R b/TestON/JenkinsFile/SCPF/SCPFcbench.R
new file mode 100644
index 0000000..999504e
--- /dev/null
+++ b/TestON/JenkinsFile/SCPF/SCPFcbench.R
@@ -0,0 +1,114 @@
+# Copyright 2017 Open Networking Foundation (ONF)
+#
+# Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+# the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+# or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+#
+#     TestON is free software: you can redistribute it and/or modify
+#     it under the terms of the GNU General Public License as published by
+#     the Free Software Foundation, either version 2 of the License, or
+#     (at your option) any later version.
+#
+#     TestON is distributed in the hope that it will be useful,
+#     but WITHOUT ANY WARRANTY; without even the implied warranty of
+#     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#     GNU General Public License for more details.
+#
+#     You should have received a copy of the GNU General Public License
+#     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
+#
+# If you have any questions, or if you don't understand R,
+# please contact Jeremy Ronquillo: jeremyr@opennetworking.org
+
+# **********************************************************
+# STEP 1: File management.
+# **********************************************************
+
+print( "STEP 1: File management." )
+
+# Command line arguments are read. Args usually include the database filename and the output
+# directory for the graphs to save to.
+# ie: Rscript SCPFgraphGenerator SCPFsampleDataDB.csv ~/tmp/
+print( "Reading commmand-line args." )
+args <- commandArgs( trailingOnly=TRUE )
+
+# Import libraries to be used for graphing and organizing data, respectively.
+# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
+#                     reshape2: https://github.com/hadley/reshape
+print( "Importing libraries." )
+library( ggplot2 )
+library( reshape2 )
+library( RPostgreSQL )    # For databases
+
+# Normal usage
+# Check if sufficient args are provided.
+if ( is.na( args[ 7 ] ) ){
+    print( "Usage: Rscript SCPFcbench <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <directory-to-save-graphs>" )
+    q()  # basically exit(), but in R
+}
+
+# Filenames for output graphs include the testname and the graph type.
+# See the examples below. paste() is used to concatenate strings.
+
+errBarOutputFile <- paste( args[ 7 ], args[ 5 ], sep="" )
+errBarOutputFile <- paste( errBarOutputFile, args[ 6 ], sep="_" )
+errBarOutputFile <- paste( errBarOutputFile, "_errGraph.jpg", sep="" )
+
+print( "Reading from databases." )
+
+con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 1 ], port=strtoi( args[ 2 ] ), user=args[ 3 ],password=args[ 4 ] )
+
+command <- paste( "SELECT * FROM cbench_bm_tests WHERE branch='", args[ 6 ], sep="" )
+command <- paste( command, "' ORDER BY date DESC LIMIT 3", sep="" )
+
+print( paste( "Sending SQL command:", command ) )
+
+fileData <- dbGetQuery( con, command )
+
+chartTitle <- paste( "Single-Node CBench Throughput", "Last 3 Builds", sep = "\n" )
+
+# **********************************************************
+# STEP 2: Organize data.
+# **********************************************************
+
+fileDataNames <- names( fileData )
+
+avgs <- c()
+stds <- c()
+
+print( "Sorting data." )
+avgs <- c( fileData[ 'avg' ] )
+
+dataFrame <- melt( avgs )
+dataFrame$std <- c( fileData$std )
+dataFrame$date <- c( fileData$date )
+dataFrame$iterative <- rev( seq( 1, nrow( fileData ), by = 1 ) )
+
+colnames( dataFrame ) <- c( "ms", "type", "std", "date", "iterative" )
+
+# **********************************************************
+# STEP 3: Generate graphs.
+# **********************************************************
+
+print( "Generating fundamental graph data." )
+mainPlot <- ggplot( data = dataFrame, aes( x = iterative, y = ms, ymin = ms - std, ymax = ms + std ) )
+xScaleConfig <- scale_x_continuous( breaks = dataFrame$iterative, label = dataFrame$date )
+xLabel <- xlab( "date" )
+yLabel <- ylab( "Responses / sec" )
+fillLabel <- labs( fill="Type" )
+theme <- theme( plot.title=element_text( hjust = 0.5, size = 18, face='bold' ) )
+
+fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
+
+
+print( "Generating bar graph with error bars." )
+width <- 0.3
+barGraphFormat <- geom_bar( stat="identity", position = position_dodge(), width = width, fill="#00AA13" )
+errorBarFormat <- geom_errorbar( position=position_dodge( ), width = width )
+title <- ggtitle( chartTitle )
+result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title
+
+
+print( paste( "Saving bar chart with error bars to", errBarOutputFile ) )
+ggsave( errBarOutputFile, width = 10, height = 6, dpi = 200 )
+print( paste( "Successfully wrote bar chart with error bars out to", errBarOutputFile ) )
diff --git a/TestON/JenkinsFile/SCPF/SCPFflowTp1g.R b/TestON/JenkinsFile/SCPF/SCPFflowTp1g.R
new file mode 100644
index 0000000..8350f38
--- /dev/null
+++ b/TestON/JenkinsFile/SCPF/SCPFflowTp1g.R
@@ -0,0 +1,175 @@
+# Copyright 2017 Open Networking Foundation (ONF)
+#
+# Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+# the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+# or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+#
+#     TestON is free software: you can redistribute it and/or modify
+#     it under the terms of the GNU General Public License as published by
+#     the Free Software Foundation, either version 2 of the License, or
+#     (at your option) any later version.
+#
+#     TestON is distributed in the hope that it will be useful,
+#     but WITHOUT ANY WARRANTY; without even the implied warranty of
+#     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#     GNU General Public License for more details.
+#
+#     You should have received a copy of the GNU General Public License
+#     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
+#
+# If you have any questions, or if you don't understand R,
+# please contact Jeremy Ronquillo: jeremyr@opennetworking.org
+
+# **********************************************************
+# STEP 1: File management.
+# **********************************************************
+
+print( "STEP 1: File management." )
+
+# Command line arguments are read. Args usually include the database filename and the output
+# directory for the graphs to save to.
+# ie: Rscript SCPFgraphGenerator SCPFsampleDataDB.csv ~/tmp/
+print( "Reading commmand-line args." )
+args <- commandArgs( trailingOnly=TRUE )
+
+# Import libraries to be used for graphing and organizing data, respectively.
+# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
+#                     reshape2: https://github.com/hadley/reshape
+print( "Importing libraries." )
+library( ggplot2 )
+library( reshape2 )
+library( RPostgreSQL )    # For databases
+
+# Normal usage
+# Check if sufficient args are provided.
+if ( is.na( args[ 9 ] ) ){
+    print( "Usage: Rscript SCPFflowTp1g.R <has-flow-obj> <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <has-neighbors> <directory-to-save-graphs>" )
+    q()  # basically exit(), but in R
+}
+
+# Filenames for output graphs include the testname and the graph type.
+# See the examples below. paste() is used to concatenate strings.
+
+errBarOutputFile <- paste( args[ 9 ], args[ 6 ], sep="" )
+errBarOutputFile <- paste( errBarOutputFile, args[ 7 ], sep="_" )
+if ( args[ 8 ] == 'y' ){
+    errBarOutputFile <- paste( errBarOutputFile, "all-neighbors", sep="_" )
+} else {
+    errBarOutputFile <- paste( errBarOutputFile, "no-neighbors", sep="_" )
+}
+if ( args[ 1 ] == 'y' ){
+    errBarOutputFile <- paste( errBarOutputFile, "flowObj", sep="_")
+}
+errBarOutputFile <- paste( errBarOutputFile, "_graph.jpg", sep="" )
+
+print( "Reading from databases." )
+con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 2 ], port=strtoi( args[ 3 ] ), user=args[ 4 ],password=args[ 5 ] )
+
+commandNeighborModifier <- ""
+flowObjModifier <- ""
+if ( args[ 1 ] == 'y' ){
+    flowObjModifier <- "_fobj"
+}
+if ( args[ 8 ] == 'y' ){
+    commandNeighborModifier <- "NOT "
+}
+
+command <- paste( "SELECT scale, avg( avg ), avg( std ) FROM flow_tp", flowObjModifier, sep="" )
+command <- paste( command, "_tests WHERE ", sep="" )
+command <- paste( command, commandNeighborModifier, sep="" )
+command <- paste( command, "neighbors = 0 AND branch = '", sep="" )
+command <- paste( command, args[ 7 ], sep="" )
+command <- paste( command, "' AND date IN ( SELECT max( date ) FROM flow_tp", sep="" )
+command <- paste( command, flowObjModifier, sep="" )
+command <- paste( command, "_tests WHERE branch='", sep="" )
+command <- paste( command, args[ 7 ], sep="" )
+command <- paste( command,  "' ) GROUP BY scale ORDER BY scale", sep="" )
+
+print( paste( "Sending SQL command:", command ) )
+
+fileData <- dbGetQuery( con, command )
+
+title <- paste( args[ 6 ], args[ 7 ], sep="_" )
+
+# **********************************************************
+# STEP 2: Organize data.
+# **********************************************************
+
+print( "STEP 2: Organize data." )
+
+# Create lists c() and organize data into their corresponding list.
+print( "Sorting data." )
+colnames( fileData ) <- c( "scale", "avg", "std" )
+avgs <- c( fileData[ 'avg' ] )
+
+# Parse lists into data frames.
+dataFrame <- melt( avgs )              # This is where reshape2 comes in. Avgs list is converted to data frame
+dataFrame$scale <- fileData$scale          # Add node scaling to the data frame.
+dataFrame$std <- fileData$std
+
+colnames( dataFrame ) <- c( "throughput", "type", "scale", "std" )
+
+# **********************************************************
+# STEP 3: Generate graphs.
+# **********************************************************
+
+print( "STEP 3: Generate graphs." )
+
+# 1. Graph fundamental data is generated first.
+#    These are variables that apply to all of the graphs being generated, regardless of type.
+#
+# 2. Type specific graph data is generated.
+#     Data specific for the error bar and stacked bar graphs are generated.
+#
+# 3. Generate and save the graphs.
+#      Graphs are saved to the filename above, in the directory provided in command line args
+
+print( "Generating fundamental graph data." )
+
+# Create the primary plot here.
+# ggplot contains the following arguments:
+#     - data: the data frame that the graph will be based off of
+#    - aes: the asthetics of the graph which require:
+#        - x: x-axis values (usually node scaling)
+#        - y: y-axis values (usually time in milliseconds)
+#        - fill: the category of the colored side-by-side bars (usually type)
+mainPlot <- ggplot( data = dataFrame, aes( x = scale, y = throughput, ymin = throughput - std, ymax = throughput + std, fill = type ) )
+
+# Formatting the plot
+width <- 0.7  # Width of the bars.
+xScaleConfig <- scale_x_continuous( breaks = dataFrame$scale, label = dataFrame$scale )
+xLabel <- xlab( "Scale" )
+yLabel <- ylab( "Throughput (events/second)" )
+fillLabel <- labs( fill="Type" )
+chartTitle <- "Flow Throughput Test"
+if ( args[ 1 ] == 'y' ){
+    chartTitle <- paste( chartTitle, " with Flow Objectives", sep="" )
+}
+chartTitle <- paste( chartTitle, "\nNeighbors =", sep="" )
+if ( args[ 8 ] == 'y' ){
+    chartTitle <- paste( chartTitle, "Cluster Size - 1" )
+} else {
+    chartTitle <- paste( chartTitle, "0" )
+}
+
+theme <- theme( plot.title=element_text( hjust = 0.5, size = 18, face='bold' ) )
+
+# Store plot configurations as 1 variable
+fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
+
+
+# Create the stacked bar graph with error bars.
+# geom_bar contains:
+#    - stat: data formatting (usually "identity")
+#    - width: the width of the bar types (declared above)
+# geom_errorbar contains similar arguments as geom_bar.
+print( "Generating bar graph with error bars." )
+barGraphFormat <- geom_bar( stat = "identity", width = width, fill="#FFA94F" )
+errorBarFormat <- geom_errorbar( position=position_dodge( ), width = width )
+title <- ggtitle( paste( chartTitle, "" ) )
+result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title
+
+# Save graph to file
+print( paste( "Saving bar chart with error bars to", errBarOutputFile ) )
+ggsave( errBarOutputFile, width = 10, height = 6, dpi = 200 )
+print( paste( "Successfully wrote bar chart with error bars out to", errBarOutputFile ) )
diff --git a/TestON/JenkinsFile/SCPF/SCPFhostLat.R b/TestON/JenkinsFile/SCPF/SCPFhostLat.R
new file mode 100644
index 0000000..58d0b9b
--- /dev/null
+++ b/TestON/JenkinsFile/SCPF/SCPFhostLat.R
@@ -0,0 +1,118 @@
+# Copyright 2017 Open Networking Foundation (ONF)
+#
+# Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+# the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+# or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+#
+#     TestON is free software: you can redistribute it and/or modify
+#     it under the terms of the GNU General Public License as published by
+#     the Free Software Foundation, either version 2 of the License, or
+#     (at your option) any later version.
+#
+#     TestON is distributed in the hope that it will be useful,
+#     but WITHOUT ANY WARRANTY; without even the implied warranty of
+#     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#     GNU General Public License for more details.
+#
+#     You should have received a copy of the GNU General Public License
+#     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
+#
+# If you have any questions, or if you don't understand R,
+# please contact Jeremy Ronquillo: jeremyr@opennetworking.org
+
+# **********************************************************
+# STEP 1: File management.
+# **********************************************************
+
+print( "STEP 1: File management." )
+
+# Command line arguments are read. Args usually include the database filename and the output
+# directory for the graphs to save to.
+# ie: Rscript SCPFgraphGenerator SCPFsampleDataDB.csv ~/tmp/
+print( "Reading commmand-line args." )
+args <- commandArgs( trailingOnly=TRUE )
+
+# Import libraries to be used for graphing and organizing data, respectively.
+# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
+#                     reshape2: https://github.com/hadley/reshape
+print( "Importing libraries." )
+library( ggplot2 )
+library( reshape2 )
+library( RPostgreSQL )    # For databases
+
+
+# Check if sufficient args are provided.
+if ( is.na( args[ 7 ] ) ){
+    print( "Usage: Rscript SCPFhostLat <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <directory-to-save-graphs>" )
+    q()  # basically exit(), but in R
+}
+
+# Filenames for output graphs include the testname and the graph type.
+# See the examples below. paste() is used to concatenate strings.
+
+errBarOutputFile <- paste( args[ 7 ], args[ 5 ], sep="" )
+errBarOutputFile <- paste( errBarOutputFile, args[ 6 ], sep="_" )
+errBarOutputFile <- paste( errBarOutputFile, "_errGraph.jpg", sep="" )
+
+print( "Reading from databases." )
+
+con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 1 ], port=strtoi( args[ 2 ] ), user=args[ 3 ],password=args[ 4 ] )
+
+command  <- paste( "SELECT * FROM host_latency_tests WHERE branch = '", args[ 6 ], sep = "" )
+command <- paste( command, "' AND date IN ( SELECT MAX( date ) FROM host_latency_tests WHERE branch = '", sep = "" )
+command <- paste( command, args[ 6 ], sep = "" )
+command <- paste( command, "' ) ", sep="" )
+
+print( paste( "Sending SQL command:", command ) )
+
+fileData <- dbGetQuery( con, command )
+
+chartTitle <- "Host Latency"
+
+
+# **********************************************************
+# STEP 2: Organize data.
+# **********************************************************
+
+print( "STEP 2: Organize data." )
+
+avgs <- c()
+
+print( "Sorting data." )
+avgs <- c( fileData[ 'avg' ] )
+
+dataFrame <- melt( avgs )
+dataFrame$scale <- fileData$scale
+dataFrame$std <- fileData$std
+
+colnames( dataFrame ) <- c( "ms", "type", "scale", "std" )
+
+
+# **********************************************************
+# STEP 3: Generate graphs.
+# **********************************************************
+
+print( "Generating fundamental graph data." )
+
+mainPlot <- ggplot( data = dataFrame, aes( x = scale, y = ms, ymin = ms - std, ymax = ms + std ) )
+xScaleConfig <- scale_x_continuous( breaks=c( 1, 3, 5, 7, 9) )
+xLabel <- xlab( "Scale" )
+yLabel <- ylab( "Latency (ms)" )
+fillLabel <- labs( fill="Type" )
+theme <- theme( plot.title=element_text( hjust = 0.5, size = 18, face='bold' ) )
+
+fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
+
+
+print( "Generating bar graph with error bars." )
+width <- 0.9
+barGraphFormat <- geom_bar( stat="identity", position=position_dodge( ), width = width, fill="#E8BD00" )
+errorBarFormat <- geom_errorbar( position=position_dodge( ), width = width )
+title <- ggtitle( paste( chartTitle, "with Standard Error Bars" ) )
+result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title
+
+
+print( paste( "Saving bar chart with error bars to", errBarOutputFile ) )
+ggsave( errBarOutputFile, width = 10, height = 6, dpi = 200 )
+
+print( paste( "Successfully wrote bar chart out to", errBarOutputFile ) )
\ No newline at end of file
diff --git a/TestON/JenkinsFile/SCPF/SCPFintentEventTp.R b/TestON/JenkinsFile/SCPF/SCPFintentEventTp.R
new file mode 100644
index 0000000..e7818d1
--- /dev/null
+++ b/TestON/JenkinsFile/SCPF/SCPFintentEventTp.R
@@ -0,0 +1,173 @@
+# Copyright 2017 Open Networking Foundation (ONF)
+#
+# Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+# the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+# or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+#
+#     TestON is free software: you can redistribute it and/or modify
+#     it under the terms of the GNU General Public License as published by
+#     the Free Software Foundation, either version 2 of the License, or
+#     (at your option) any later version.
+#
+#     TestON is distributed in the hope that it will be useful,
+#     but WITHOUT ANY WARRANTY; without even the implied warranty of
+#     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#     GNU General Public License for more details.
+#
+#     You should have received a copy of the GNU General Public License
+#     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
+#
+# If you have any questions, or if you don't understand R,
+# please contact Jeremy Ronquillo: jeremyr@opennetworking.org
+
+# **********************************************************
+# STEP 1: File management.
+# **********************************************************
+
+print( "STEP 1: File management." )
+
+# Command line arguments are read. Args usually include the database filename and the output
+# directory for the graphs to save to.
+# ie: Rscript SCPFgraphGenerator SCPFsampleDataDB.csv ~/tmp/
+print( "Reading commmand-line args." )
+args <- commandArgs( trailingOnly=TRUE )
+
+# Import libraries to be used for graphing and organizing data, respectively.
+# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
+#                     reshape2: https://github.com/hadley/reshape
+print( "Importing libraries." )
+library( ggplot2 )
+library( reshape2 )
+library( RPostgreSQL )    # For databases
+
+# Normal usage
+# Check if sufficient args are provided.
+if ( is.na( args[ 9 ] ) ){
+    print( "Usage: Rscript SCPFIntentEventTp.R <has-flow-obj> <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <has-neighbors> <directory-to-save-graphs>" )
+    q()  # basically exit(), but in R
+}
+
+# Filenames for output graphs include the testname and the graph type.
+# See the examples below. paste() is used to concatenate strings.
+
+errBarOutputFile <- paste( args[ 9 ], args[ 6 ], sep="" )
+errBarOutputFile <- paste( errBarOutputFile, args[ 7 ], sep="_" )
+if ( args[ 8 ] == 'y' ){
+    errBarOutputFile <- paste( errBarOutputFile, "all-neighbors", sep="_" )
+} else {
+    errBarOutputFile <- paste( errBarOutputFile, "no-neighbors", sep="_" )
+}
+if ( args[ 1 ] == 'y' ){
+    errBarOutputFile <- paste( errBarOutputFile, "flowObj", sep="_")
+}
+errBarOutputFile <- paste( errBarOutputFile, "_graph.jpg", sep="" )
+
+print( "Reading from databases." )
+con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 2 ], port=strtoi( args[ 3 ] ), user=args[ 4 ],password=args[ 5 ] )
+
+commandNeighborModifier <- ""
+flowObjModifier <- ""
+if ( args[ 1 ] == 'y' ){
+    flowObjModifier <- "_fobj"
+}
+if ( args[ 8 ] == 'y' ){
+    commandNeighborModifier <- "NOT "
+}
+
+command <- paste( "SELECT scale, avg( avg ) FROM intent_tp", flowObjModifier, sep="" )
+command <- paste( command, "_tests WHERE ", sep="" )
+command <- paste( command, commandNeighborModifier, sep="" )
+command <- paste( command, "neighbors = 0 AND branch = '", sep="")
+command <- paste( command, args[ 7 ], sep="" )
+command <- paste( command, "' AND date IN ( SELECT max( date ) FROM intent_tp", sep="" )
+command <- paste( command, flowObjModifier, sep="" )
+command <- paste( command, "_tests WHERE branch='", sep="" )
+command <- paste( command, args[ 7 ], sep="" )
+command <- paste( command,  "' ) GROUP BY scale ORDER BY scale", sep="" )
+
+print( paste( "Sending SQL command:", command ) )
+
+fileData <- dbGetQuery( con, command )
+
+title <- paste( args[ 6 ], args[ 7 ], sep="_" )
+
+# **********************************************************
+# STEP 2: Organize data.
+# **********************************************************
+
+print( "STEP 2: Organize data." )
+
+# Create lists c() and organize data into their corresponding list.
+print( "Sorting data." )
+avgs <- c( fileData[ 'avg' ] )
+
+# Parse lists into data frames.
+dataFrame <- melt( avgs )              # This is where reshape2 comes in. Avgs list is converted to data frame
+dataFrame$scale <- fileData$scale          # Add node scaling to the data frame.
+
+colnames( dataFrame ) <- c( "throughput", "type", "scale" )
+
+# **********************************************************
+# STEP 3: Generate graphs.
+# **********************************************************
+
+print( "STEP 3: Generate graphs." )
+
+# 1. Graph fundamental data is generated first.
+#    These are variables that apply to all of the graphs being generated, regardless of type.
+#
+# 2. Type specific graph data is generated.
+#     Data specific for the error bar and stacked bar graphs are generated.
+#
+# 3. Generate and save the graphs.
+#      Graphs are saved to the filename above, in the directory provided in command line args
+
+print( "Generating fundamental graph data." )
+
+# Create the primary plot here.
+# ggplot contains the following arguments:
+#     - data: the data frame that the graph will be based off of
+#    - aes: the asthetics of the graph which require:
+#        - x: x-axis values (usually node scaling)
+#        - y: y-axis values (usually time in milliseconds)
+#        - fill: the category of the colored side-by-side bars (usually type)
+mainPlot <- ggplot( data = dataFrame, aes( x = scale, y = throughput, fill = type ) )
+
+# Formatting the plot
+width <- 0.7  # Width of the bars.
+xScaleConfig <- scale_x_continuous( breaks = dataFrame$scale, label = dataFrame$scale )
+xLabel <- xlab( "Scale" )
+yLabel <- ylab( "Throughput (events/second)" )
+fillLabel <- labs( fill="Type" )
+chartTitle <- "Intent Event Throughput"
+if ( args[ 1 ] == 'y' ){
+    chartTitle <- paste( chartTitle, " With Flow Objectives", sep="" )
+}
+chartTitle <- paste( chartTitle, "\nevents/second with Neighbors =", sep="" )
+if ( args[ 8 ] == 'y' ){
+    chartTitle <- paste( chartTitle, "all" )
+} else {
+    chartTitle <- paste( chartTitle, "0" )
+}
+
+theme <- theme( plot.title=element_text( hjust = 0.5, size = 18, face='bold' ) )
+
+# Store plot configurations as 1 variable
+fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
+
+
+# Create the stacked bar graph with error bars.
+# geom_bar contains:
+#    - stat: data formatting (usually "identity")
+#    - width: the width of the bar types (declared above)
+# geom_errorbar contains similar arguments as geom_bar.
+print( "Generating bar graph." )
+barGraphFormat <- geom_bar( stat = "identity", width = width, fill="#169EFF" )
+title <- ggtitle( paste( chartTitle, "" ) )
+result <- fundamentalGraphData + barGraphFormat + title
+
+# Save graph to file
+print( paste( "Saving bar chart to", errBarOutputFile ) )
+ggsave( errBarOutputFile, width = 10, height = 6, dpi = 200 )
+
+print( paste( "Successfully wrote bar chart out to", errBarOutputFile ) )
diff --git a/TestON/JenkinsFile/SCPF/SCPFmastershipFailoverLat.R b/TestON/JenkinsFile/SCPF/SCPFmastershipFailoverLat.R
new file mode 100644
index 0000000..6ec3098
--- /dev/null
+++ b/TestON/JenkinsFile/SCPF/SCPFmastershipFailoverLat.R
@@ -0,0 +1,158 @@
+# Copyright 2017 Open Networking Foundation (ONF)
+#
+# Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+# the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+# or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+#
+#     TestON is free software: you can redistribute it and/or modify
+#     it under the terms of the GNU General Public License as published by
+#     the Free Software Foundation, either version 2 of the License, or
+#     (at your option) any later version.
+#
+#     TestON is distributed in the hope that it will be useful,
+#     but WITHOUT ANY WARRANTY; without even the implied warranty of
+#     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#     GNU General Public License for more details.
+#
+#     You should have received a copy of the GNU General Public License
+#     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
+#
+# If you have any questions, or if you don't understand R,
+# please contact Jeremy Ronquillo: jeremyr@opennetworking.org
+
+# **********************************************************
+# STEP 1: File management.
+# **********************************************************
+
+print( "STEP 1: File management." )
+
+# Command line arguments are read. Args usually include the database filename and the output
+# directory for the graphs to save to.
+# ie: Rscript SCPFgraphGenerator SCPFsampleDataDB.csv ~/tmp/
+print( "Reading commmand-line args." )
+args <- commandArgs( trailingOnly=TRUE )
+
+# Import libraries to be used for graphing and organizing data, respectively.
+# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
+#                     reshape2: https://github.com/hadley/reshape
+print( "Importing libraries." )
+library( ggplot2 )
+library( reshape2 )
+library( RPostgreSQL )    # For databases
+
+# Normal usage
+# Check if sufficient args are provided.
+if ( is.na( args[ 7 ] ) ){
+    print( "Usage: Rscript SCPFmastershipFailoverLat <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <directory-to-save-graphs>" )
+        q()  # basically exit(), but in R
+}
+
+# Filenames for output graphs include the testname and the graph type.
+# See the examples below. paste() is used to concatenate strings.
+
+errBarOutputFile <- paste( args[ 7 ], args[ 5 ], sep="" )
+errBarOutputFile <- paste( errBarOutputFile, args[ 6 ], sep="_" )
+errBarOutputFile <- paste( errBarOutputFile, "_errGraph.jpg", sep="" )
+
+stackedBarOutputFile <- paste( args[ 7 ], args[ 5 ], sep="" )
+stackedBarOutputFile <- paste( stackedBarOutputFile, args[ 6 ], sep="_" )
+stackedBarOutputFile <- paste( stackedBarOutputFile, "_stackedGraph.jpg", sep="" )
+
+print( "Reading from databases." )
+
+con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 1 ], port=strtoi( args[ 2 ] ), user=args[ 3 ],password=args[ 4 ] )
+
+command  <- paste( "SELECT * FROM mastership_failover_tests WHERE branch = '", args[ 6 ], sep = "" )
+command <- paste( command, "' AND date IN ( SELECT MAX( date ) FROM mastership_failover_tests WHERE branch = '", sep = "" )
+command <- paste( command, args[ 6 ], sep = "" )
+command <- paste( command, "' ) ", sep="" )
+
+print( paste( "Sending SQL command:", command ) )
+
+fileData <- dbGetQuery( con, command )
+
+chartTitle <- "Mastership Failover Latency"
+
+
+# **********************************************************
+# STEP 2: Organize data.
+# **********************************************************
+
+fileDataNames <- names( fileData )
+
+avgs <- c()
+stds <- c()
+
+
+print( "Sorting data." )
+for ( name in fileDataNames ){
+    nameLen <- nchar( name )
+    if ( nameLen > 2 ){
+        if ( substring( name, nameLen - 2, nameLen ) == "avg" ){
+            avgs <- c( avgs, fileData[ name ] )
+        }
+        if ( substring( name, nameLen - 2, nameLen ) == "std" ){
+            stds <- c( stds, fileData[ name  ] )
+        }
+    }
+}
+
+avgData <- melt( avgs )
+avgData$scale <- fileData$scale
+colnames( avgData ) <- c( "ms", "type", "scale" )
+
+stdData <- melt( stds )
+colnames( stdData ) <- c( "ms", "type" )
+
+
+# **********************************************************
+# STEP 3: Generate graphs.
+# **********************************************************
+
+print( "Generating fundamental graph data." )
+barBaseLength <- 16
+if (min( c( avgData$ms, stdData$ms ) ) < 0){
+    yMin <- min( c( avgData$ms, stdData$ms ) )
+} else {
+    yMin <- 0
+}
+yMax <- max( c( avgData$ms, stdData$ms, max( avgs$deact_role_avg + avgs$kill_deact_avg ) ) ) * 1.05
+
+mainPlot <- ggplot( data = avgData, aes( x = scale, y = ms, ymin = ms - stdData$ms, ymax = ms + stdData$ms,fill = type ) )
+xScaleConfig <- scale_x_continuous( breaks=c( 1, 3, 5, 7, 9) )
+#xLimit <- xlim( min( avgData$scale - 1 ), max( avgData$scale + 1 ) )
+yLimit <- ylim( yMin, yMax )
+xLabel <- xlab( "Scale" )
+yLabel <- ylab( "Latency (ms)" )
+fillLabel <- labs( fill="Type" )
+theme <- theme( plot.title=element_text( hjust = 0.5, size = 18, face='bold' ) )
+
+fundamentalGraphData <- mainPlot + xScaleConfig + yLimit + xLabel + yLabel + fillLabel + theme
+
+
+print( "Generating bar graph with error bars." )
+width <- 0.9
+barGraphFormat <- geom_bar( stat="identity", position=position_dodge( ), width = width )
+errorBarFormat <- geom_errorbar( position=position_dodge( ), width = width )
+title <- ggtitle( paste( chartTitle, "with Standard Error Bars" ) )
+result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title
+
+
+print( paste( "Saving bar chart with error bars to", errBarOutputFile ) )
+ggsave( errBarOutputFile, width = 10, height = 6, dpi = 200 )
+
+
+print( paste( "Successfully wrote bar chart with error bars out to", errBarOutputFile ) )
+
+
+print( "Generating stacked bar chart." )
+stackedBarFormat <- geom_bar( stat="identity", width=width )
+title <- ggtitle( paste( chartTitle, "Total Latency" ) )
+result <- fundamentalGraphData + stackedBarFormat + title
+
+
+print( paste( "Saving stacked bar chart to", stackedBarOutputFile ) )
+ggsave( stackedBarOutputFile, width = 10, height = 6, dpi = 200 )
+
+
+print( paste( "Successfully wrote stacked bar chart out to", stackedBarOutputFile ) )
\ No newline at end of file
diff --git a/TestON/JenkinsFile/SCPF/SCPFportLat.R b/TestON/JenkinsFile/SCPF/SCPFportLat.R
new file mode 100644
index 0000000..10af8a9
--- /dev/null
+++ b/TestON/JenkinsFile/SCPF/SCPFportLat.R
@@ -0,0 +1,167 @@
+# Copyright 2017 Open Networking Foundation (ONF)
+#
+# Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+# the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+# or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+#
+#     TestON is free software: you can redistribute it and/or modify
+#     it under the terms of the GNU General Public License as published by
+#     the Free Software Foundation, either version 2 of the License, or
+#     (at your option) any later version.
+#
+#     TestON is distributed in the hope that it will be useful,
+#     but WITHOUT ANY WARRANTY; without even the implied warranty of
+#     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#     GNU General Public License for more details.
+#
+#     You should have received a copy of the GNU General Public License
+#     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
+#
+# If you have any questions, or if you don't understand R,
+# please contact Jeremy Ronquillo: jeremyr@opennetworking.org
+
+# **********************************************************
+# STEP 1: File management.
+# **********************************************************
+
+print( "STEP 1: File management." )
+
+# Command line arguments are read. Args usually include the database filename and the output
+# directory for the graphs to save to.
+# ie: Rscript SCPFgraphGenerator SCPFsampleDataDB.csv ~/tmp/
+print( "Reading commmand-line args." )
+args <- commandArgs( trailingOnly=TRUE )
+
+# Import libraries to be used for graphing and organizing data, respectively.
+# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
+#                     reshape2: https://github.com/hadley/reshape
+print( "Importing libraries." )
+library( ggplot2 )
+library( reshape2 )
+library( RPostgreSQL )    # For databases
+
+# Check if sufficient args are provided.
+if ( is.na( args[ 7 ] ) ){
+    print( "Usage: Rscript SCPFportLat <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <directory-to-save-graphs>" )
+    q()  # basically exit(), but in R
+}
+
+# Filenames for output graphs include the testname and the graph type.
+# See the examples below. paste() is used to concatenate strings.
+errBarOutputFileUp <- paste( args[ 7 ], "SCPFportLat_", sep = "" )
+errBarOutputFileUp <- paste( errBarOutputFileUp, args[ 6 ], sep = "" )
+errBarOutputFileUp <- paste( errBarOutputFileUp, "_UpErrBarWithStack.jpg", sep = "" )
+
+errBarOutputFileDown <- paste( args[ 7 ], "SCPFportLat_", sep = "" )
+errBarOutputFileDown <- paste( errBarOutputFileDown, args[ 6 ], sep = "" )
+errBarOutputFileDown <- paste( errBarOutputFileDown, "_DownErrBarWithStack.jpg", sep = "" )
+
+print( "Reading from databases." )
+
+con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 1 ], port=strtoi( args[ 2 ] ), user=args[ 3 ],password=args[ 4 ] )
+
+command  <- paste( "SELECT * FROM port_latency_details WHERE branch = '", args[ 6 ], sep = "" )
+command <- paste( command, "' AND date IN ( SELECT MAX( date ) FROM port_latency_details WHERE branch = '", sep = "" )
+command <- paste( command, args[ 6 ], sep = "" )
+command <- paste( command, "' ) ", sep="" )
+
+print( paste( "Sending SQL command:", command ) )
+
+fileData <- dbGetQuery( con, command )
+
+chartTitle <- paste( "Port Latency", args[ 6 ], sep = " - " )
+chartTitle <- paste( chartTitle, "\n" )
+
+
+# **********************************************************
+# STEP 2: Organize data.
+# **********************************************************
+
+print( "Sorting data." )
+
+upAvgs <- c( fileData[ 'up_ofp_to_dev_avg' ], fileData[ 'up_dev_to_link_avg' ], fileData[ 'up_link_to_graph_avg' ] )
+upAvgsData <- melt( upAvgs )
+upAvgsData$scale <- fileData$scale
+upAvgsData$up_std <- fileData$up_std
+
+
+colnames( upAvgsData ) <- c( "ms", "type", "scale", "stds" )
+upAvgsData$type <- as.character( upAvgsData$type )
+upAvgsData$type <- factor( upAvgsData$type, levels=unique( upAvgsData$type ) )
+
+downAvgs <- c( fileData[ 'down_ofp_to_dev_avg' ], fileData[ 'down_dev_to_link_avg' ], fileData[ 'down_link_to_graph_avg' ] )
+downAvgsData <- melt( downAvgs )
+downAvgsData$scale <- fileData$scale
+downAvgsData$down_std <- fileData$down_std
+
+colnames( downAvgsData ) <- c( "ms", "type", "scale", "stds" )
+downAvgsData$type <- as.character( downAvgsData$type )
+downAvgsData$type <- factor( downAvgsData$type, levels=unique( downAvgsData$type ) )
+
+
+# **********************************************************
+# STEP 3: Generate graphs.
+# **********************************************************
+
+
+print( "Generating fundamental graph data (Port Up Latency)." )
+width <- 1
+ if ( min( fileData[ 'up_end_to_end_avg' ] - upAvgsData$stds ) < 0 ) {
+     yMin <- min( fileData[ 'up_end_to_end_avg' ] - upAvgsData$stds ) * 1.05
+ } else {
+     yMin <- 0
+ }
+yMax <- max( fileData[ 'up_end_to_end_avg' ] + upAvgsData$stds )
+
+mainPlot <- ggplot( data = upAvgsData, aes( x = scale, y = ms, fill = type, ymin = fileData[ 'up_end_to_end_avg' ] - stds, ymax = fileData[ 'up_end_to_end_avg' ] + stds ) )
+xScaleConfig <- scale_x_continuous( breaks=c( 1, 3, 5, 7, 9) )
+yLimit <- ylim( yMin, yMax )
+xLabel <- xlab( "Scale" )
+yLabel <- ylab( "Latency (ms)" )
+fillLabel <- labs( fill="Type" )
+theme <- theme( plot.title=element_text( hjust = 0.5, size = 18, face='bold' ) )
+
+fundamentalGraphData <- mainPlot + yLimit + xScaleConfig + xLabel + yLabel + fillLabel + theme
+
+print( "Generating bar graph with error bars (Port Up Latency)." )
+barGraphFormat <- geom_bar( stat="identity", width = width )
+errorBarFormat <- geom_errorbar( width = width )
+
+title <- ggtitle( "Port Up Latency" )
+result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title
+
+
+print( paste( "Saving bar chart with error bars (Port Up Latency) to", errBarOutputFileUp ) )
+ggsave( errBarOutputFileUp, width = 10, height = 6, dpi = 200 )
+
+
+print( paste( "Successfully wrote bar chart with error bars (Port Up Latency) out to", errBarOutputFileUp ) )
+
+
+print( "Generating fundamental graph data (Port Down Latency)." )
+ if ( min( fileData[ 'down_end_to_end_avg' ] - downAvgsData$stds ) < 0 ) {
+     yMin <- min( fileData[ 'down_end_to_end_avg' ] - downAvgsData$stds )
+ } else {
+     yMin <- 0
+ }
+ yMax <- max( fileData[ 'down_end_to_end_avg' ] + downAvgsData$stds )
+
+mainPlot <- ggplot( data = downAvgsData, aes( x = scale, y = ms, fill = type, ymin = fileData[ 'down_end_to_end_avg' ] - stds, ymax = fileData[ 'down_end_to_end_avg' ] + stds ) )
+yLimit <- ylim( yMin, yMax )
+theme <- theme( plot.title=element_text( hjust = 0.5, size = 18, face='bold' ) )
+
+fundamentalGraphData <- mainPlot + yLimit + xScaleConfig + xLabel + yLabel + fillLabel + theme
+
+print( "Generating bar graph with error bars (Port Down Latency)." )
+barGraphFormat <- geom_bar( stat="identity", width = width )
+errorBarFormat <- geom_errorbar( width = width )
+
+title <- ggtitle( "Port Down Latency" )
+result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title
+
+
+print( paste( "Saving bar chart with error bars (Port Down Latency) to", errBarOutputFileDown ) )
+ggsave( errBarOutputFileDown, width = 10, height = 6, dpi = 200 )
+
+
+print( paste( "Successfully wrote bar chart with error bars (Port Down Latency) out to", errBarOutputFileDown ) )
diff --git a/TestON/JenkinsFile/SCPF/SCPFscaleTopo.R b/TestON/JenkinsFile/SCPF/SCPFscaleTopo.R
new file mode 100644
index 0000000..9956ec8
--- /dev/null
+++ b/TestON/JenkinsFile/SCPF/SCPFscaleTopo.R
@@ -0,0 +1,155 @@
+# Copyright 2017 Open Networking Foundation (ONF)
+#
+# Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+# the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+# or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+#
+#     TestON is free software: you can redistribute it and/or modify
+#     it under the terms of the GNU General Public License as published by
+#     the Free Software Foundation, either version 2 of the License, or
+#     (at your option) any later version.
+#
+#     TestON is distributed in the hope that it will be useful,
+#     but WITHOUT ANY WARRANTY; without even the implied warranty of
+#     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#     GNU General Public License for more details.
+#
+#     You should have received a copy of the GNU General Public License
+#     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
+#
+# If you have any questions, or if you don't understand R,
+# please contact Jeremy Ronquillo: jeremyr@opennetworking.org
+
+# **********************************************************
+# STEP 1: File management.
+# **********************************************************
+
+print( "STEP 1: File management." )
+
+# Command line arguments are read. Args usually include the database filename and the output
+# directory for the graphs to save to.
+# ie: Rscript SCPFgraphGenerator SCPFsampleDataDB.csv ~/tmp/
+print( "Reading commmand-line args." )
+args <- commandArgs( trailingOnly=TRUE )
+
+# Import libraries to be used for graphing and organizing data, respectively.
+# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
+#                     reshape2: https://github.com/hadley/reshape
+print( "Importing libraries." )
+library( ggplot2 )
+library( reshape2 )
+library( RPostgreSQL )    # For databases
+
+# Check if sufficient args are provided.
+if ( is.na( args[ 7 ] ) ){
+    print( "Usage: Rscript SCPFgraphGenerator <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <directory-to-save-graphs>" )
+    q()  # basically exit(), but in R
+}
+
+# Filenames for output graphs include the testname and the graph type.
+# See the examples below. paste() is used to concatenate strings.
+
+outputFile <- paste( args[ 7 ], args[ 5 ], sep="" )
+outputFile <- paste( outputFile, args[ 6 ], sep="_" )
+outputFile <- paste( outputFile, "_graph.jpg", sep="" )
+
+print( "Reading from databases." )
+
+con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 1 ], port=strtoi( args[ 2 ] ), user=args[ 3 ],password=args[ 4 ] )
+
+command  <- paste( "SELECT * FROM scale_topo_latency_details WHERE branch = '", args[ 6 ], sep = "" )
+command <- paste( command, "' AND date IN ( SELECT MAX( date ) FROM scale_topo_latency_details WHERE branch = '", sep = "" )
+command <- paste( command, args[ 6 ], sep = "" )
+command <- paste( command, "' ) ", sep="" )
+
+print( paste( "Sending SQL command:", command ) )
+
+fileData <- dbGetQuery( con, command )
+
+title <- paste( args[ 5 ], args[ 6 ], sep="_" )
+
+# **********************************************************
+# STEP 2: Organize data.
+# **********************************************************
+
+print( "STEP 2: Organize data." )
+
+# Create lists c() and organize data into their corresponding list.
+print( "Sorting data." )
+avgs <- c( fileData[ 'last_role_request_to_last_topology' ], fileData[ 'last_connection_to_last_role_request' ], fileData[ 'first_connection_to_last_connection' ] )
+
+# Parse lists into data frames.
+dataFrame <- melt( avgs )              # This is where reshape2 comes in. Avgs list is converted to data frame
+dataFrame$scale <- fileData$scale          # Add node scaling to the data frame.
+colnames( dataFrame ) <- c( "ms", "type", "scale")
+
+
+# Format data frame so that the data is in the same order as it appeared in the file.
+dataFrame$type <- as.character( dataFrame$type )
+dataFrame$type <- factor( dataFrame$type, levels=unique( dataFrame$type ) )
+dataFrame$iterative <- seq( 1, nrow( fileData ), by = 1 )
+
+# Obtain the sum of the averages for the plot size and center of standard deviation bars.
+avgsSum <- fileData$total_time
+
+# **********************************************************
+# STEP 3: Generate graphs.
+# **********************************************************
+
+print( "STEP 3: Generate graphs." )
+
+# 1. Graph fundamental data is generated first.
+#    These are variables that apply to all of the graphs being generated, regardless of type.
+#
+# 2. Type specific graph data is generated.
+#     Data specific for the error bar and stacked bar graphs are generated.
+#
+# 3. Generate and save the graphs.
+#      Graphs are saved to the filename above, in the directory provided in command line args
+
+print( "Generating fundamental graph data." )
+
+# Calculate window to display graph, based on the lowest and highest points of the data.
+if ( min( avgsSum ) < 0){
+    yWindowMin <- min( avgsSum ) * 1.05
+} else {
+    yWindowMin <- 0
+}
+yWindowMax <- max( avgsSum )
+
+# Create the primary plot here.
+# ggplot contains the following arguments:
+#     - data: the data frame that the graph will be based off of
+#    - aes: the asthetics of the graph which require:
+#        - x: x-axis values (usually node scaling)
+#        - y: y-axis values (usually time in milliseconds)
+#        - fill: the category of the colored side-by-side bars (usually type)
+mainPlot <- ggplot( data = dataFrame, aes( x = iterative, y = ms, fill = type ) )
+
+# Formatting the plot
+width <- 0.6  # Width of the bars.
+xScaleConfig <- scale_x_continuous( breaks = dataFrame$iterative, label = dataFrame$scale )
+yLimit <- ylim( yWindowMin, yWindowMax )
+xLabel <- xlab( "Scale" )
+yLabel <- ylab( "Latency (ms)" )
+fillLabel <- labs( fill="Type" )
+chartTitle <- paste( "Topology Scaling Operation Latency" )
+theme <- theme( plot.title=element_text( hjust = 0.5, size = 18, face='bold' ) )
+
+# Store plot configurations as 1 variable
+fundamentalGraphData <- mainPlot + xScaleConfig + yLimit + xLabel + yLabel + fillLabel + theme
+
+# Create the stacked bar graph with error bars.
+# geom_bar contains:
+#    - stat: data formatting (usually "identity")
+#    - width: the width of the bar types (declared above)
+# geom_errorbar contains similar arguments as geom_bar.
+print( "Generating bar graph with error bars." )
+barGraphFormat <- geom_bar( stat = "identity", width = width )
+title <- ggtitle( paste( chartTitle, "" ) )
+result <- fundamentalGraphData + barGraphFormat + title
+
+# Save graph to file
+print( paste( "Saving bar chart with error bars to", outputFile ) )
+ggsave( outputFile, width = 10, height = 6, dpi = 200 )
+print( paste( "Successfully wrote bar chart with error bars out to", outputFile ) )
diff --git a/TestON/JenkinsFile/SCPF/SCPFscalingMaxIntents.R b/TestON/JenkinsFile/SCPF/SCPFscalingMaxIntents.R
new file mode 100644
index 0000000..950083d
--- /dev/null
+++ b/TestON/JenkinsFile/SCPF/SCPFscalingMaxIntents.R
@@ -0,0 +1,133 @@
+# Copyright 2017 Open Networking Foundation (ONF)
+#
+# Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+# the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+# or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+#
+#     TestON is free software: you can redistribute it and/or modify
+#     it under the terms of the GNU General Public License as published by
+#     the Free Software Foundation, either version 2 of the License, or
+#     (at your option) any later version.
+#
+#     TestON is distributed in the hope that it will be useful,
+#     but WITHOUT ANY WARRANTY; without even the implied warranty of
+#     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#     GNU General Public License for more details.
+#
+#     You should have received a copy of the GNU General Public License
+#     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
+#
+# If you have any questions, or if you don't understand R,
+# please contact Jeremy Ronquillo: jeremyr@opennetworking.org
+
+# **********************************************************
+# STEP 1: File management.
+# **********************************************************
+
+print( "STEP 1: File management." )
+
+# Command line arguments are read. Args usually include the database filename and the output
+# directory for the graphs to save to.
+# ie: Rscript SCPFgraphGenerator SCPFsampleDataDB.csv ~/tmp/
+print( "Reading commmand-line args." )
+args <- commandArgs( trailingOnly=TRUE )
+
+# Import libraries to be used for graphing and organizing data, respectively.
+# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
+#                     reshape2: https://github.com/hadley/reshape
+print( "Importing libraries." )
+library( ggplot2 )
+library( reshape2 )
+library( RPostgreSQL )    # For databases
+
+# Normal usage
+# Check if sufficient args are provided.
+if ( is.na( args[ 8 ] ) ){
+    print( "Usage: Rscript SCPFInstalledIntentsFlows <has-flowObj> <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <directory-to-save-graphs>" )
+    q()  # basically exit(), but in R
+}
+
+# Filenames for output graphs include the testname and the graph type.
+# See the examples below. paste() is used to concatenate strings.
+
+outputFile <- paste( args[ 8 ], args[ 6 ], sep="" )
+if ( args[ 1 ] == "y" ){
+    outputFile <- paste( outputFile, "flowObj", sep="_" )
+}
+outputFile <- paste( outputFile, args[ 7 ], sep="_" )
+outputFile <- paste( outputFile, "_errGraph.jpg", sep="" )
+
+print( "Reading from databases." )
+
+con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 2 ], port=strtoi( args[ 3 ] ), user=args[ 4 ],password=args[ 5 ] )
+
+command  <- "SELECT * FROM max_intents_"
+if ( args[ 1 ] == "y" ){
+    command <- paste( command, "fobj_", sep="" )
+}
+command <- paste( command, "tests WHERE branch = '", sep = "" )
+command <- paste( command, args[ 7 ], sep="" )
+command <- paste( command, "' AND date IN ( SELECT MAX( date ) FROM max_intents_", sep="" )
+if ( args[ 1 ] == "y" ){
+    command <- paste( command, "fobj_", sep="" )
+}
+command <- paste( command, "tests WHERE branch = '", sep = "" )
+command <- paste( command, args[ 7 ], sep = "" )
+command <- paste( command, "' ) ", sep="" )
+
+print( paste( "Sending SQL command:", command ) )
+
+fileData <- dbGetQuery( con, command )
+
+if ( args[ 1 ] == "y" ){
+    chartTitle <- "Number of Installed Intents & Flows with Flow Objectives"
+} else {
+    chartTitle <- "Number of Installed Intents & Flows"
+}
+
+# **********************************************************
+# STEP 2: Organize data.
+# **********************************************************
+
+fileDataNames <- names( fileData )
+
+avgs <- c()
+
+print( "Sorting data." )
+avgs <- c( fileData[ 'max_intents_ovs' ], fileData[ 'max_flows_ovs' ] )
+
+dataFrame <- melt( avgs )
+dataFrame$scale <- fileData$scale
+
+colnames( dataFrame ) <- c( "ms", "type", "scale" )
+
+dataFrame$type <- as.character( dataFrame$type )
+dataFrame$type <- factor( dataFrame$type, levels=unique( dataFrame$type ) )
+
+# **********************************************************
+# STEP 3: Generate graphs.
+# **********************************************************
+
+print( "Generating fundamental graph data." )
+
+mainPlot <- ggplot( data = dataFrame, aes( x = scale, y = ms, fill = type ) )
+xScaleConfig <- scale_x_continuous( breaks=c( 1, 3, 5, 7, 9) )
+xLabel <- xlab( "Scale" )
+yLabel <- ylab( "Max Number of Intents/Flow Rules" )
+fillLabel <- labs( fill="Type" )
+theme <- theme( plot.title=element_text( hjust = 0.5, size = 18, face='bold' ) )
+
+fundamentalGraphData <- mainPlot + xScaleConfig + xLabel + yLabel + fillLabel + theme
+
+
+print( "Generating bar graph bars." )
+width <- 1.3
+barGraphFormat <- geom_bar( stat="identity", position=position_dodge( ), width = width )
+title <- ggtitle( chartTitle )
+result <- fundamentalGraphData + barGraphFormat + title
+
+
+print( paste( "Saving bar chart to", outputFile ) )
+ggsave( outputFile, width = 10, height = 6, dpi = 200 )
+
+print( paste( "Successfully wrote bar chart out to", outputFile ) )
diff --git a/TestON/JenkinsFile/SCPF/SCPFswitchLat.R b/TestON/JenkinsFile/SCPF/SCPFswitchLat.R
new file mode 100644
index 0000000..a68b516
--- /dev/null
+++ b/TestON/JenkinsFile/SCPF/SCPFswitchLat.R
@@ -0,0 +1,162 @@
+# Copyright 2017 Open Networking Foundation (ONF)
+#
+# Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+# the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+# or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+#
+#     TestON is free software: you can redistribute it and/or modify
+#     it under the terms of the GNU General Public License as published by
+#     the Free Software Foundation, either version 2 of the License, or
+#     (at your option) any later version.
+#
+#     TestON is distributed in the hope that it will be useful,
+#     but WITHOUT ANY WARRANTY; without even the implied warranty of
+#     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#     GNU General Public License for more details.
+#
+#     You should have received a copy of the GNU General Public License
+#     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
+#
+# If you have any questions, or if you don't understand R,
+# please contact Jeremy Ronquillo: jeremyr@opennetworking.org
+
+# **********************************************************
+# STEP 1: File management.
+# **********************************************************
+
+print( "STEP 1: File management." )
+
+# Command line arguments are read. Args usually include the database filename and the output
+# directory for the graphs to save to.
+# ie: Rscript SCPFgraphGenerator SCPFsampleDataDB.csv ~/tmp/
+print( "Reading commmand-line args." )
+args <- commandArgs( trailingOnly=TRUE )
+
+# Import libraries to be used for graphing and organizing data, respectively.
+# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
+#                     reshape2: https://github.com/hadley/reshape
+print( "Importing libraries." )
+library( ggplot2 )
+library( reshape2 )
+library( RPostgreSQL )    # For databases
+
+# Check if sufficient args are provided.
+if ( is.na( args[ 7 ] ) ){
+    print( "Usage: Rscript SCPFswitchLat <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <directory-to-save-graphs>" )
+    q()  # basically exit(), but in R
+}
+
+# Filenames for output graphs include the testname and the graph type.
+# See the examples below. paste() is used to concatenate strings.
+errBarOutputFileUp <- paste( args[ 7 ], "SCPFswitchLat_", sep = "" )
+errBarOutputFileUp <- paste( errBarOutputFileUp, args[ 6 ], sep = "" )
+errBarOutputFileUp <- paste( errBarOutputFileUp, "_UpErrBarWithStack.jpg", sep = "" )
+
+errBarOutputFileDown <- paste( args[ 7 ], "SCPFswitchLat_", sep = "" )
+errBarOutputFileDown <- paste( errBarOutputFileDown, args[ 6 ], sep = "" )
+errBarOutputFileDown <- paste( errBarOutputFileDown, "_DownErrBarWithStack.jpg", sep = "" )
+
+print( "Reading from databases." )
+
+con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 1 ], port=strtoi( args[ 2 ] ), user=args[ 3 ],password=args[ 4 ] )
+
+command <- paste( "SELECT * FROM switch_latency_details WHERE branch = '", args[ 6 ], sep="" )
+command <- paste( command, "' AND date IN ( SELECT MAX( date ) FROM switch_latency_details WHERE branch='", sep = "")
+command <- paste( command, args[ 6 ], sep="" )
+command <- paste( command, "' )", sep="" )
+
+print( paste( "Sending SQL command:", command ) )
+
+fileData <- dbGetQuery( con, command )
+
+# **********************************************************
+# STEP 2: Organize data.
+# **********************************************************
+
+print( "Sorting data." )
+
+upAvgs <- c( fileData[ 'up_device_to_graph_avg' ], fileData[ 'role_reply_to_device_avg' ], fileData[ 'role_request_to_role_reply_avg' ], fileData[ 'feature_reply_to_role_request_avg' ], fileData[ 'tcp_to_feature_reply_avg' ] )
+upAvgsData <- melt( upAvgs )
+upAvgsData$scale <- fileData$scale
+upAvgsData$up_std <- fileData$up_std
+
+colnames( upAvgsData ) <- c( "ms", "type", "scale", "stds" )
+upAvgsData$type <- as.character( upAvgsData$type )
+upAvgsData$type <- factor( upAvgsData$type, levels=unique( upAvgsData$type ) )
+
+downAvgs <- c( fileData[ 'down_device_to_graph_avg' ], fileData[ 'ack_to_device_avg' ], fileData[ 'fin_ack_to_ack_avg' ] )
+downAvgsData <- melt( downAvgs )
+downAvgsData$scale <- fileData$scale
+downAvgsData$down_std <- fileData$down_std
+
+colnames( downAvgsData ) <- c( "ms", "type", "scale", "stds" )
+downAvgsData$type <- as.character( downAvgsData$type )
+downAvgsData$type <- factor( downAvgsData$type, levels=unique( downAvgsData$type ) )
+
+
+# **********************************************************
+# STEP 3: Generate graphs.
+# **********************************************************
+
+
+print( "Generating fundamental graph data (Switch Up Latency)." )
+width <- 1
+ if ( min( fileData[ 'up_end_to_end_avg' ] - upAvgsData$stds ) < 0 ) {
+     yMin <- min( fileData[ 'up_end_to_end_avg' ] + upAvgsData$stds ) * 1.05
+ } else {
+     yMin <- 0
+ }
+yMax <- max( fileData[ 'up_end_to_end_avg' ] + upAvgsData$stds )
+
+mainPlot <- ggplot( data = upAvgsData, aes( x = scale, y = ms, fill = type, ymin = fileData[ 'up_end_to_end_avg' ] - stds, ymax = fileData[ 'up_end_to_end_avg' ] + stds ) )
+xScaleConfig <- scale_x_continuous( breaks=c( 1, 3, 5, 7, 9) )
+yLimit <- ylim( yMin, yMax )
+xLabel <- xlab( "Scale" )
+yLabel <- ylab( "Latency (ms)" )
+fillLabel <- labs( fill="Type" )
+theme <- theme( plot.title=element_text( hjust = 0.5, size = 18, face='bold' ) )
+
+fundamentalGraphData <- mainPlot + yLimit + xScaleConfig + xLabel + yLabel + fillLabel + theme
+
+print( "Generating bar graph with error bars (Switch Up Latency)." )
+barGraphFormat <- geom_bar( stat="identity", width = width )
+errorBarFormat <- geom_errorbar( width = width )
+
+title <- ggtitle( "Switch Up Latency" )
+result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title
+
+
+print( paste( "Saving bar chart with error bars (Switch Up Latency) to", errBarOutputFileUp ) )
+ggsave( errBarOutputFileUp, width = 10, height = 6, dpi = 200 )
+
+
+print( paste( "Successfully wrote bar chart with error bars (Switch Up Latency) out to", errBarOutputFileUp ) )
+
+
+print( "Generating fundamental graph data (Switch Down Latency)." )
+ if ( min( fileData[ 'down_end_to_end_avg' ] - downAvgsData$stds ) < 0 ) {
+     yMin <- min( fileData[ 'down_end_to_end_avg' ] - downAvgsData$stds )
+ } else {
+     yMin <- 0
+ }
+ yMax <- max( fileData[ 'down_end_to_end_avg' ] + downAvgsData$stds )
+
+mainPlot <- ggplot( data = downAvgsData, aes( x = scale, y = ms, fill = type, ymin = fileData[ 'down_end_to_end_avg' ] - stds, ymax = fileData[ 'down_end_to_end_avg' ] + stds ) )
+yLimit <- ylim( yMin, yMax )
+theme <- theme( plot.title=element_text( hjust = 0.5, size = 18, face='bold' ) )
+
+fundamentalGraphData <- mainPlot + yLimit + xScaleConfig + xLabel + yLabel + fillLabel + theme
+
+print( "Generating bar graph with error bars (Switch Down Latency)." )
+barGraphFormat <- geom_bar( stat="identity", width = width )
+errorBarFormat <- geom_errorbar( width = width )
+
+title <- ggtitle( "Switch Down Latency" )
+result <- fundamentalGraphData + barGraphFormat + errorBarFormat + title
+
+
+print( paste( "Saving bar chart with error bars (Switch Down Latency) to", errBarOutputFileDown ) )
+ggsave( errBarOutputFileDown, width = 10, height = 6, dpi = 200 )
+
+
+print( paste( "Successfully wrote bar chart with error bars (Switch Down Latency) out to", errBarOutputFileDown ) )
diff --git a/TestON/JenkinsFile/testCaseGraphGenerator.R b/TestON/JenkinsFile/testCaseGraphGenerator.R
new file mode 100644
index 0000000..74138db
--- /dev/null
+++ b/TestON/JenkinsFile/testCaseGraphGenerator.R
@@ -0,0 +1,153 @@
+# Copyright 2017 Open Networking Foundation (ONF)
+#
+# Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
+# the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
+# or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
+#
+#     TestON is free software: you can redistribute it and/or modify
+#     it under the terms of the GNU General Public License as published by
+#     the Free Software Foundation, either version 2 of the License, or
+#     (at your option) any later version.
+#
+#     TestON is distributed in the hope that it will be useful,
+#     but WITHOUT ANY WARRANTY; without even the implied warranty of
+#     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#     GNU General Public License for more details.
+#
+#     You should have received a copy of the GNU General Public License
+#     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
+#
+# If you have any questions, or if you don't understand R,
+# please contact Jeremy Ronquillo: jeremyr@opennetworking.org
+
+# This is the R script that generates the FUNC and HA result graphs.
+
+# **********************************************************
+# STEP 1: Data management.
+# **********************************************************
+
+print( "STEP 1: Data management." )
+
+# Command line arguments are read. Args include the database credentials, test name, branch name, and the directory to output files.
+print( "Reading commmand-line args." )
+args <- commandArgs( trailingOnly=TRUE )
+
+# Import libraries to be used for graphing and organizing data, respectively.
+# Find out more about ggplot2: https://github.com/tidyverse/ggplot2
+#                     reshape2: https://github.com/hadley/reshape
+#                      RPostgreSQL: https://code.google.com/archive/p/rpostgresql/
+print( "Importing libraries." )
+library( ggplot2 )
+library( reshape2 )
+library( RPostgreSQL )
+
+# Check if sufficient args are provided.
+if ( is.na( args[ 8 ] ) ){
+    print( "Usage: Rscript testCaseGraphGenerator.R <database-host> <database-port> <database-user-id> <database-password> <test-name> <branch-name> <#-builds-to-show> <directory-to-save-graphs>" )
+    q()  # basically exit(), but in R
+}
+
+# Filenames for the output graph include the testname, branch, and the graph type.
+outputFile <- paste( args[ 8 ], args[ 5 ], sep="" )
+outputFile <- paste( outputFile, args[ 6 ], sep="_" )
+outputFile <- paste( outputFile, args[ 7 ], sep="_" )
+outputFile <- paste( outputFile, "builds", sep="-" )
+outputFile <- paste( outputFile, "_graph.jpg", sep="" )
+
+# From RPostgreSQL
+print( "Reading from databases." )
+con <- dbConnect( dbDriver( "PostgreSQL" ), dbname="onostest", host=args[ 1 ], port=strtoi( args[ 2 ] ), user=args[ 3 ],password=args[ 4 ] )
+
+print( "Creating SQL command." )
+# Creating SQL command based on command line args.
+command <- paste( "SELECT * FROM executed_test_tests WHERE actual_test_name='", args[ 5 ], sep="" )
+command <- paste( command, "' AND branch='", sep="" )
+command <- paste( command, args[ 6 ], sep="" )
+command <- paste( command, "' ORDER BY date DESC LIMIT ", sep="" )
+command <- paste( command, args[ 7 ], sep="" )
+fileData <- dbGetQuery( con, command )
+
+# Title of graph based on command line args.
+title <- paste( args[ 5 ], args[ 6 ], sep=" - " )
+title <- paste( title, "Results of Last ", sep=" \n " )
+title <- paste( title, args[ 7 ], sep="" )
+title <- paste( title, " Builds", sep="" )
+
+# **********************************************************
+# STEP 2: Organize data.
+# **********************************************************
+
+print( "STEP 2: Organize data." )
+
+# Create lists c() and organize data into their corresponding list.
+print( "Sorting data into new data frame." )
+categories <- c( fileData[ 'num_failed' ], fileData[ 'num_passed' ], fileData[ 'num_planned' ] )
+
+# Parse lists into data frames.
+# This is where reshape2 comes in. Avgs list is converted to data frame.
+dataFrame <- melt( categories )
+dataFrame$build <- fileData$build
+colnames( dataFrame ) <- c( "Tests", "Status", "Build" )
+
+# Format data frame so that the data is in the same order as it appeared in the file.
+dataFrame$Status <- as.character( dataFrame$Status )
+dataFrame$Status <- factor( dataFrame$Status, levels=unique( dataFrame$Status ) )
+
+# Add planned, passed, and failed results to the dataFrame (for the fill below the lines)
+dataFrame$num_planned <- fileData$num_planned
+dataFrame$num_passed <- fileData$num_passed
+dataFrame$num_failed <- fileData$num_failed
+
+# Adding a temporary reversed iterative list to the dataFrame so that there are no gaps in-between build numbers.
+dataFrame$iterative <- rev( seq( 1, nrow( fileData ), by = 1 ) )
+
+print( "Data Frame Results:" )
+print( dataFrame )
+
+# **********************************************************
+# STEP 3: Generate graphs.
+# **********************************************************
+
+print( "STEP 3: Generate graphs." )
+
+print( "Creating main plot." )
+# Create the primary plot here.
+# ggplot contains the following arguments:
+#     - data: the data frame that the graph will be based off of
+#    - aes: the asthetics of the graph which require:
+#        - x: x-axis values (usually iterative, but it will become build # later)
+#        - y: y-axis values (usually tests)
+#        - color: the category of the colored lines (usually status of test)
+mainPlot <- ggplot( data = dataFrame, aes( x = iterative, y = Tests, color = Status ) )
+
+print( "Formatting main plot." )
+# geom_ribbon is used so that there is a colored fill below the lines. These values shouldn't be changed.
+failedColor <- geom_ribbon( aes( ymin = 0, ymax = dataFrame$num_failed ), fill = "red", linetype = 0, alpha = 0.07 )
+passedColor <- geom_ribbon( aes( ymin = 0, ymax = dataFrame$num_passed ), fill = "green", linetype = 0, alpha = 0.05 )
+plannedColor <- geom_ribbon( aes( ymin = 0, ymax = dataFrame$num_planned ), fill = "blue", linetype = 0, alpha = 0.01 )
+
+xScaleConfig <- scale_x_continuous( breaks = dataFrame$iterative, label = dataFrame$Build )
+yScaleConfig <- scale_y_continuous( breaks = seq( 0, max( dataFrame$Tests ), by = ceiling( max( dataFrame$Tests ) / 10 ) ) )
+
+xLabel <- xlab( "Build Number" )
+yLabel <- ylab( "Test Cases" )
+fillLabel <- labs( fill="Type" )
+legendLabels <- scale_colour_discrete( labels = c( "Failed", "Passed", "Planned" ) )
+centerTitle <- theme( plot.title=element_text( hjust = 0.5 ) )  # To center the title text
+theme <- theme( plot.title = element_text( size = 18, face='bold' ) )
+
+# Store plot configurations as 1 variable
+fundamentalGraphData <- mainPlot + plannedColor + passedColor + failedColor + xScaleConfig + yScaleConfig + xLabel + yLabel + fillLabel + legendLabels + centerTitle + theme
+
+print( "Generating line graph." )
+
+lineGraphFormat <- geom_line( size = 1.1 )
+pointFormat <- geom_point( size = 3 )
+title <- ggtitle( title )
+
+result <- fundamentalGraphData + lineGraphFormat + pointFormat + title
+
+# Save graph to file
+print( paste( "Saving result graph to", outputFile ) )
+ggsave( outputFile, width = 10, height = 6, dpi = 200 )
+print( paste( "Successfully wrote result graph out to", outputFile ) )
diff --git a/TestON/drivers/common/api/controller/onosrestdriver.py b/TestON/drivers/common/api/controller/onosrestdriver.py
index 3e17dea..312f1f1 100755
--- a/TestON/drivers/common/api/controller/onosrestdriver.py
+++ b/TestON/drivers/common/api/controller/onosrestdriver.py
@@ -327,9 +327,10 @@
                                "from topo file" )
                 port = self.port
             query = "/" + str( appName ) + "/active"
-            response = self.send( method="DELETE",
-                                  url="/applications" + query,
-                                  ip = ip, port = port )
+            self.send( method="DELETE",
+                       url="/applications" + query,
+                       ip = ip, port = port )
+            response = self.getApp( appName, ip, port )
             if response:
                 output = response[ 1 ]
                 app = {} if output == "" else json.loads( output )
@@ -360,7 +361,7 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanAndExit()
 
-    def getApp( self, appName, project="org.onosproject.", ip="DEFAULT",
+    def getApp( self, appName, ip="DEFAULT",
                 port="DEFAULT" ):
         """
         Decription:
@@ -380,14 +381,12 @@
                 main.log.warn( "No port given, reverting to port " +
                                "from topo file" )
                 port = self.port
-            query = "/" + project + str( appName )
+            query = "/" + str( appName )
             response = self.send( url="/applications" + query,
                                   ip = ip, port = port )
             if response:
                 if 200 <= response[ 0 ] <= 299:
-                    output = response[ 1 ]
-                    a = json.loads( output )
-                    return a
+                    return response
                 else:
                     main.log.error( "Error with REST request, response was: " +
                                     str( response ) )
diff --git a/TestON/drivers/common/cli/onosclidriver.py b/TestON/drivers/common/cli/onosclidriver.py
index 0190154..ba63b82 100755
--- a/TestON/drivers/common/cli/onosclidriver.py
+++ b/TestON/drivers/common/cli/onosclidriver.py
@@ -3461,6 +3461,7 @@
             if state == "ACTIVE" or state == "INSTALLED":
                 return state
             elif state is None:
+                main.log.warn( "{} app not found", appName )
                 return "UNINSTALLED"
             elif state:
                 main.log.error( "Unexpected state from 'onos:apps': " +
diff --git a/TestON/drivers/common/clidriver.py b/TestON/drivers/common/clidriver.py
index 6f26cfc..55f05ba 100644
--- a/TestON/drivers/common/clidriver.py
+++ b/TestON/drivers/common/clidriver.py
@@ -438,11 +438,16 @@
         return handle
 
     def exitFromSsh( self, handle, ipAddress ):
-        handle.sendline( "logout" )
         try:
+            handle.sendline( "logout" )
             handle.expect( "closed." )
             main.log.info ( "Successfully closed ssh connection from " + ipAddress )
         except pexpect.EOF:
             main.log.error( "Failed to close the connection from " + ipAddress )
-        handle.sendline( "" )
-        handle.expect( self.prompt )
\ No newline at end of file
+        try:
+            # check that this component handle still works
+            self.handle.sendline( "" )
+            self.handle.expect( self.prompt )
+        except pexpect.EOF:
+            main.log.error( self.handle.before )
+            main.log.error( "EOF after closing ssh connection" )
diff --git a/TestON/tests/HA/HAclusterRestart/HAclusterRestart.params b/TestON/tests/HA/HAclusterRestart/HAclusterRestart.params
index c55b26c..8ba346c 100644
--- a/TestON/tests/HA/HAclusterRestart/HAclusterRestart.params
+++ b/TestON/tests/HA/HAclusterRestart/HAclusterRestart.params
@@ -25,9 +25,6 @@
             <useFlowObjectives>false</useFlowObjectives>
             <defaultFlowObjectiveCompiler>org.onosproject.net.intent.impl.compiler.LinkCollectionIntentObjectiveCompiler</defaultFlowObjectiveCompiler>
         </org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
-        <org.onosproject.store.flow.impl.DistributedFlowRuleStore>
-            <backupCount>3</backupCount>
-        </org.onosproject.store.flow.impl.DistributedFlowRuleStore>
     </ONOS_Configuration>
     <ENV>
         <cellName>HA</cellName>
diff --git a/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.params b/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.params
index 9d590be..f1520f7 100644
--- a/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.params
+++ b/TestON/tests/HA/HAcontinuousStopNodes/HAcontinuousStopNodes.params
@@ -27,9 +27,6 @@
             <useFlowObjectives>false</useFlowObjectives>
             <defaultFlowObjectiveCompiler>org.onosproject.net.intent.impl.compiler.LinkCollectionIntentObjectiveCompiler</defaultFlowObjectiveCompiler>
         </org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
-        <org.onosproject.store.flow.impl.DistributedFlowRuleStore>
-            <backupCount>3</backupCount>
-        </org.onosproject.store.flow.impl.DistributedFlowRuleStore>
     </ONOS_Configuration>
     <ENV>
         <cellName>HA</cellName>
diff --git a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.params b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.params
index 93b3e02..3a8b60f 100644
--- a/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.params
+++ b/TestON/tests/HA/HAfullNetPartition/HAfullNetPartition.params
@@ -27,9 +27,6 @@
             <useFlowObjectives>false</useFlowObjectives>
             <defaultFlowObjectiveCompiler>org.onosproject.net.intent.impl.compiler.LinkCollectionIntentObjectiveCompiler</defaultFlowObjectiveCompiler>
         </org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
-        <org.onosproject.store.flow.impl.DistributedFlowRuleStore>
-            <backupCount>3</backupCount>
-        </org.onosproject.store.flow.impl.DistributedFlowRuleStore>
     </ONOS_Configuration>
     <ENV>
         <cellName>HA</cellName>
diff --git a/TestON/tests/HA/HAkillNodes/HAkillNodes.params b/TestON/tests/HA/HAkillNodes/HAkillNodes.params
index ebffe50..d8f3d31 100644
--- a/TestON/tests/HA/HAkillNodes/HAkillNodes.params
+++ b/TestON/tests/HA/HAkillNodes/HAkillNodes.params
@@ -27,9 +27,6 @@
             <useFlowObjectives>false</useFlowObjectives>
             <defaultFlowObjectiveCompiler>org.onosproject.net.intent.impl.compiler.LinkCollectionIntentObjectiveCompiler</defaultFlowObjectiveCompiler>
         </org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
-        <org.onosproject.store.flow.impl.DistributedFlowRuleStore>
-            <backupCount>3</backupCount>
-        </org.onosproject.store.flow.impl.DistributedFlowRuleStore>
     </ONOS_Configuration>
     <ENV>
         <cellName>HA</cellName>
diff --git a/TestON/tests/HA/HAkillNodes/HAkillNodes.py b/TestON/tests/HA/HAkillNodes/HAkillNodes.py
index 63e34cb..4a95206 100644
--- a/TestON/tests/HA/HAkillNodes/HAkillNodes.py
+++ b/TestON/tests/HA/HAkillNodes/HAkillNodes.py
@@ -90,15 +90,14 @@
         main.testSetUp.evnSetupConclusion( stepResult )
         main.HA.generateGraph( "HAkillNodes" )
 
-        main.step( "Make sure ONOS service doesn't automatically respawn" )
-        main.ONOSbench.preventAutoRespawn()
-
         main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
-                                 extraApply=[ main.HA.startingMininet,
-                                              main.HA.customizeOnosGenPartitions ],
-                                 extraClean=main.HA.cleanUpGenPartition )
+                                  extraApply=[ main.HA.startingMininet,
+                                               main.HA.customizeOnosGenPartitions,
+                                               main.HA.copyBackupConfig,
+                                               main.ONOSbench.preventAutoRespawn ],
+                                  extraClean= main.HA.cleanUpGenPartition )
 
-        main.HA.initialSetUp()
+        main.HA.initialSetUp( serviceClean=True )
 
     def CASE2( self, main ):
         """
@@ -150,7 +149,9 @@
             main.kill.append( main.Cluster.runningNodes[ p - 1 ] )
             # NOTE: This only works for cluster sizes of 3,5, or 7.
 
-        main.step( "Killing nodes: " + str( main.kill ) )
+        #NOTE: This is to fix an issue with wiki formating
+        nodeNames = [ node.name for node in main.kill ]
+        main.step( "Killing nodes: " + str( nodeNames ) )
         killResults = main.TRUE
         for ctrl in main.kill:
             killResults = killResults and\
diff --git a/TestON/tests/HA/HAsanity/HAsanity.params b/TestON/tests/HA/HAsanity/HAsanity.params
index ace6b77..5c298ec 100644
--- a/TestON/tests/HA/HAsanity/HAsanity.params
+++ b/TestON/tests/HA/HAsanity/HAsanity.params
@@ -26,9 +26,6 @@
             <useFlowObjectives>false</useFlowObjectives>
             <defaultFlowObjectiveCompiler>org.onosproject.net.intent.impl.compiler.LinkCollectionIntentObjectiveCompiler</defaultFlowObjectiveCompiler>
         </org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
-        <org.onosproject.store.flow.impl.DistributedFlowRuleStore>
-            <backupCount>3</backupCount>
-        </org.onosproject.store.flow.impl.DistributedFlowRuleStore>
     </ONOS_Configuration>
     <ENV>
         <cellName>HA</cellName>
diff --git a/TestON/tests/HA/HAscaling/HAscaling.params b/TestON/tests/HA/HAscaling/HAscaling.params
index 6bcb87d..9fd1760 100644
--- a/TestON/tests/HA/HAscaling/HAscaling.params
+++ b/TestON/tests/HA/HAscaling/HAscaling.params
@@ -30,9 +30,6 @@
             <useFlowObjectives>false</useFlowObjectives>
             <defaultFlowObjectiveCompiler>org.onosproject.net.intent.impl.compiler.LinkCollectionIntentObjectiveCompiler</defaultFlowObjectiveCompiler>
         </org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
-        <org.onosproject.store.flow.impl.DistributedFlowRuleStore>
-            <backupCount>3</backupCount>
-        </org.onosproject.store.flow.impl.DistributedFlowRuleStore>
     </ONOS_Configuration>
     <ENV>
         <cellName>HA</cellName>
diff --git a/TestON/tests/HA/HAscaling/HAscaling.py b/TestON/tests/HA/HAscaling/HAscaling.py
index cecd09e..f986cc2 100644
--- a/TestON/tests/HA/HAscaling/HAscaling.py
+++ b/TestON/tests/HA/HAscaling/HAscaling.py
@@ -79,9 +79,8 @@
             from tests.dependencies.ONOSSetup import ONOSSetup
             main.testSetUp = ONOSSetup()
         except ImportError:
-            main.log.error( "ONOSSetup not found exiting the test" )
+            main.log.error( "ONOSSetup not found. exiting the test" )
             main.cleanAndExit()
-
         main.testSetUp.envSetupDescription()
         try:
             from tests.HA.dependencies.HA import HA
@@ -99,14 +98,15 @@
         main.HA.generateGraph( "HAscaling", index=1 )
 
         main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
-                                 extraApply=[ main.HA.setServerForCluster,
-                                              main.HA.scalingMetadata,
-                                              main.HA.startingMininet,
-                                              main.HA.copyingBackupConfig ],
-                                 extraClean=main.HA.cleanUpOnosService,
-                                 installMax=True )
+                                  extraApply=[ main.HA.setServerForCluster,
+                                               main.HA.scalingMetadata,
+                                               main.HA.startingMininet,
+                                               main.HA.copyBackupConfig,
+                                               main.HA.setMetadataUrl ],
+                                  extraClean=main.HA.cleanUpOnosService,
+                                  installMax=True )
 
-        main.HA.initialSetUp( True )
+        main.HA.initialSetUp( serviceClean=True )
 
     def CASE2( self, main ):
         """
diff --git a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.params b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.params
index 65ec6c6..67a655a 100644
--- a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.params
+++ b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.params
@@ -24,9 +24,6 @@
             <useFlowObjectives>false</useFlowObjectives>
             <defaultFlowObjectiveCompiler>org.onosproject.net.intent.impl.compiler.LinkCollectionIntentObjectiveCompiler</defaultFlowObjectiveCompiler>
         </org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
-        <org.onosproject.store.flow.impl.DistributedFlowRuleStore>
-            <backupCount>3</backupCount>
-        </org.onosproject.store.flow.impl.DistributedFlowRuleStore>
     </ONOS_Configuration>
     <ENV>
         <cellName>HA</cellName>
diff --git a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
index 13a4913..eeb4310 100644
--- a/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
+++ b/TestON/tests/HA/HAsingleInstanceRestart/HAsingleInstanceRestart.py
@@ -105,10 +105,10 @@
                                   extraApply=[ main.testSetUp.createApplyCell,
                                                main.HA.startingMininet,
                                                main.testSetUp.createApplyCell ],
-                                  arg=[ [ main.Cluster, True, cellName, main.Mininet1, True, ip ],
-                                        None,
-                                        [ main.Cluster, True, "SingleHA", main.Mininet1,
-                                        True, main.Cluster.runningNodes[ 0 ].ipAddress ] ] )
+                                  applyArgs=[ [ main.Cluster, True, cellName, main.Mininet1, True, ip ],
+                                              None,
+                                              [ main.Cluster, True, "SingleHA", main.Mininet1,
+                                                True, main.Cluster.runningNodes[ 0 ].ipAddress ] ] )
 
         main.HA.initialSetUp()
 
@@ -313,6 +313,9 @@
             onpass="Hosts are correct",
             onfail="Hosts are incorrect" )
 
+        ONOSMastership, rolesResult, consistentMastership = main.HA.checkTheRole()
+        mastershipState = ONOSMastership[ 0 ]
+
     def CASE6( self, main ):
         """
         The Failure case.
@@ -386,7 +389,7 @@
         main.HA.checkRoleNotNull()
 
         main.step( "Check if switch roles are consistent across all nodes" )
-        ONOSMastership, rolesResult,consistentMastership = main.HA.checkTheRole()
+        ONOSMastership, rolesResult, consistentMastership = main.HA.checkTheRole()
         ONOSMastership = ONOSMastership[ 0 ]
         description2 = "Compare switch roles from before failure"
         main.step( description2 )
@@ -405,7 +408,8 @@
             if current == old:
                 mastershipCheck = mastershipCheck and main.TRUE
             else:
-                main.log.warn( "Mastership of switch %s changed" % switchDPID )
+                main.log.warn( "Mastership of switch %s changed; old: %s, new: %s" % ( switchDPID,
+                    old, current ) )
                 mastershipCheck = main.FALSE
         utilities.assert_equals(
             expect=main.TRUE,
diff --git a/TestON/tests/HA/HAstopNodes/HAstopNodes.params b/TestON/tests/HA/HAstopNodes/HAstopNodes.params
index ebffe50..d8f3d31 100644
--- a/TestON/tests/HA/HAstopNodes/HAstopNodes.params
+++ b/TestON/tests/HA/HAstopNodes/HAstopNodes.params
@@ -27,9 +27,6 @@
             <useFlowObjectives>false</useFlowObjectives>
             <defaultFlowObjectiveCompiler>org.onosproject.net.intent.impl.compiler.LinkCollectionIntentObjectiveCompiler</defaultFlowObjectiveCompiler>
         </org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
-        <org.onosproject.store.flow.impl.DistributedFlowRuleStore>
-            <backupCount>3</backupCount>
-        </org.onosproject.store.flow.impl.DistributedFlowRuleStore>
     </ONOS_Configuration>
     <ENV>
         <cellName>HA</cellName>
diff --git a/TestON/tests/HA/HAstopNodes/HAstopNodes.py b/TestON/tests/HA/HAstopNodes/HAstopNodes.py
index 2c4f059..e7bdcd4 100644
--- a/TestON/tests/HA/HAstopNodes/HAstopNodes.py
+++ b/TestON/tests/HA/HAstopNodes/HAstopNodes.py
@@ -92,10 +92,11 @@
 
         main.testSetUp.ONOSSetUp( main.Mininet1, main.Cluster, cellName=cellName, removeLog=True,
                                   extraApply=[ main.HA.startingMininet,
-                                               main.HA.customizeOnosGenPartitions ],
-                                  extraClean=main.HA.cleanUpGenPartition )
+                                               main.HA.customizeOnosGenPartitions,
+                                               main.HA.copyBackupConfig ],
+                                  extraClean= main.HA.cleanUpGenPartition )
 
-        main.HA.initialSetUp()
+        main.HA.initialSetUp( serviceClean=True )
 
     def CASE2( self, main ):
         """
@@ -147,7 +148,9 @@
             main.kill.append( main.Cluster.runningNodes[ p - 1 ] )
             # NOTE: This only works for cluster sizes of 3,5, or 7.
 
-        main.step( "Stopping nodes: " + str( main.kill ) )
+        #NOTE: This is to fix an issue with wiki formating
+        nodeNames = [ node.name for node in main.kill ]
+        main.step( "Stopping nodes: " + str( nodeNames ) )
         killResults = main.TRUE
         for ctrl in main.kill:
             killResults = killResults and\
diff --git a/TestON/tests/HA/HAswapNodes/HAswapNodes.params b/TestON/tests/HA/HAswapNodes/HAswapNodes.params
index d557c1e..cf395cb 100644
--- a/TestON/tests/HA/HAswapNodes/HAswapNodes.params
+++ b/TestON/tests/HA/HAswapNodes/HAswapNodes.params
@@ -29,9 +29,6 @@
             <useFlowObjectives>false</useFlowObjectives>
             <defaultFlowObjectiveCompiler>org.onosproject.net.intent.impl.compiler.LinkCollectionIntentObjectiveCompiler</defaultFlowObjectiveCompiler>
         </org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator>
-        <org.onosproject.store.flow.impl.DistributedFlowRuleStore>
-            <backupCount>3</backupCount>
-        </org.onosproject.store.flow.impl.DistributedFlowRuleStore>
     </ONOS_Configuration>
     <ENV>
         <cellName>HA</cellName>
diff --git a/TestON/tests/HA/HAswapNodes/HAswapNodes.py b/TestON/tests/HA/HAswapNodes/HAswapNodes.py
index 2de9f87..1fa8106 100644
--- a/TestON/tests/HA/HAswapNodes/HAswapNodes.py
+++ b/TestON/tests/HA/HAswapNodes/HAswapNodes.py
@@ -101,7 +101,8 @@
                                  extraApply=[ main.HA.setServerForCluster,
                                               main.HA.swapNodeMetadata,
                                               main.HA.startingMininet,
-                                              main.HA.copyingBackupConfig ],
+                                              main.HA.copyBackupConfig,
+                                              main.HA.setMetadataUrl ],
                                  extraClean=main.HA.cleanUpOnosService,
                                  installMax=True )
         main.HA.initialSetUp()
diff --git a/TestON/tests/HA/dependencies/HA.py b/TestON/tests/HA/dependencies/HA.py
index 6750775..f1ddaef 100644
--- a/TestON/tests/HA/dependencies/HA.py
+++ b/TestON/tests/HA/dependencies/HA.py
@@ -112,7 +112,7 @@
                                  onpass="Server started",
                                  onfail="Failled to start SimpleHTTPServer" )
 
-    def copyingBackupConfig( self ):
+    def copyBackupConfig( self ):
         main.step( "Copying backup config files" )
         main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
         cp = main.ONOSbench.scp( main.ONOSbench,
@@ -124,6 +124,9 @@
                                  actual=cp,
                                  onpass="Copy backup config file succeeded",
                                  onfail="Copy backup config file failed" )
+
+    def setMetadataUrl( self ):
+        # NOTE: You should probably backup the config before and reset the config after the test
         # we need to modify the onos-service file to use remote metadata file
         # url for cluster metadata file
         iface = main.params[ 'server' ].get( 'interface' )
@@ -870,6 +873,7 @@
                     state = intent.get( 'state', None )
                     if "INSTALLED" not in state:
                         installedCheck = False
+                        main.log.debug( "Failed intent: " + str( intent ) )
                     intentId = intent.get( 'id', None )
                     intentStates.append( ( intentId, state ) )
             except ( ValueError, TypeError ):
diff --git a/TestON/tests/PLAT/PLATdockertest/PLATdockertest.py b/TestON/tests/PLAT/PLATdockertest/PLATdockertest.py
index a64ad3a..9cbe929 100755
--- a/TestON/tests/PLAT/PLATdockertest/PLATdockertest.py
+++ b/TestON/tests/PLAT/PLATdockertest/PLATdockertest.py
@@ -36,7 +36,7 @@
 
     def __init__( self ):
         self.default = ''
-        global DOCKERREPO, DOCKERTAG
+        global DOCKERREPO, DOCKERTAG, INITDOCKERTAG
         global IPlist
         global CTIDlist
         global NODElist
@@ -82,6 +82,8 @@
 
         if imageTagCounter < len( imageTagList ):
             DOCKERTAG = imageTagList[imageTagCounter]
+        if not imageTagCounter:
+            INITDOCKERTAG = DOCKERTAG
         imageTagCounter += 1
 
         main.case("Set case test params for onos image {}".format( DOCKERTAG ))
@@ -387,6 +389,7 @@
                     if matchObj:
                         wordsToRemove = re.compile("latest|- PASS|- FAIL|- No Result")
                         testCaseName = wordsToRemove.sub("", matchObj.group(1))
+                        testCaseName = testCaseName.replace( INITDOCKERTAG,'' )
                         testCaseList.append(testCaseName)
                         testCaseCounter += 1
                 if matchObj:
diff --git a/TestON/tests/dependencies/ONOSSetup.py b/TestON/tests/dependencies/ONOSSetup.py
index 4cfd694..8a73e32 100644
--- a/TestON/tests/dependencies/ONOSSetup.py
+++ b/TestON/tests/dependencies/ONOSSetup.py
@@ -18,15 +18,15 @@
     You should have received a copy of the GNU General Public License
     along with TestON.  If not, see <http://www.gnu.org/licenses/>.
 """
-import time
 import re
-import imp
 
 class ONOSSetup:
     main = None
+
     def __init__( self ):
         self.default = ''
-    def envSetupDescription ( self ):
+
+    def envSetupDescription( self ):
         """
         Introduction part of the test. It will initialize some basic vairables.
         """
@@ -60,7 +60,8 @@
                                      actual=stepResult,
                                      onpass="Successfully checkout onos branch.",
                                      onfail="Failed to checkout onos branch. Exiting test..." )
-            if not stepResult: main.cleanAndExit()
+            if not stepResult:
+                main.cleanAndExit()
 
             main.step( "Git Pull on ONOS branch:" + gitBranch )
             stepResult = main.ONOSbench.gitPull()
@@ -68,7 +69,8 @@
                                      actual=stepResult,
                                      onpass="Successfully pull onos. ",
                                      onfail="Failed to pull onos. Exiting test ..." )
-            if not stepResult: main.cleanAndExit()
+            if not stepResult:
+                main.cleanAndExit()
 
         else:
             main.log.info( "Skipped git checkout and pull as they are disabled in params file" )
@@ -303,9 +305,19 @@
                                  onfail="Failed to start ONOS cli" )
         return startCliResult
 
+    def processList( self, functions, args ):
+        if functions is not None:
+            if isinstance( functions, list ):
+                i = 0
+                for f in functions:
+                    f( *( args[ i ] ) ) if args is not None and args[ i ] is not None else f()
+                    i += 1
+            else:
+                functions( *args ) if args is not None else functions()
+
     def ONOSSetUp( self, Mininet, cluster, hasMultiNodeRounds=False, startOnos=True, newCell=True,
-                   cellName="temp", removeLog=False, extraApply=None, arg=None, extraClean=None,
-                   skipPack=False, installMax=False, useSSH=True, killRemoveMax=True,
+                   cellName="temp", removeLog=False, extraApply=None, applyArgs=None, extraClean=None,
+                   cleanArgs=None, skipPack=False, installMax=False, useSSH=True, killRemoveMax=True,
                    stopOnos=False, installParallel=True ):
         """
         Description:
@@ -330,9 +342,10 @@
             * newCell - True for making a new cell and False for not making it.
             * cellName - Name of the cell that will be used.
             * removeLog - True if wish to remove raft logs
-            * extraApply - Function(s) that will be applied. Default to None.
-            * arg - argument of the functon(s) of the extraApply. Should be in list.
-            * extraClean - extra Clean up process. Function(s) will be passed.
+            * extraApply - Function(s) that will be called before building ONOS. Default to None.
+            * applyArgs - argument of the functon(s) of the extraApply. Should be in list.
+            * extraClean - Function(s) that will be called after building ONOS. Defaults to None.
+            * cleanArgs - argument of the functon(s) of the extraClean. Should be in list.
             * skipPack - True if wish to skip some packing.
             * installMax - True if wish to install onos max number of nodes
             False if wish to install onos of running nodes only
@@ -346,7 +359,7 @@
         self.setNumCtrls( hasMultiNodeRounds )
 
         main.case( "Starting up " + str( cluster.numCtrls ) +
-                  " node(s) ONOS cluster" )
+                   " node(s) ONOS cluster" )
         main.caseExplanation = "Set up ONOS with " + str( cluster.numCtrls ) + \
                                " node(s) ONOS cluster"
         killResult = self.killingAllOnos( cluster, killRemoveMax, stopOnos )
@@ -369,24 +382,12 @@
                 main.ONOSbench.onosRemoveRaftLogs()
 
             onosUninstallResult = self.uninstallOnos( cluster, killRemoveMax )
-
-            if extraApply is not None:
-                if isinstance( extraApply, list ):
-                    i = 0
-                    for apply in extraApply:
-                        apply( *(arg[ i ]) ) if arg is not None \
-                                                            and arg[ i ] is not None else apply()
-                        i += 1
-                else:
-                    extraApply( *arg ) if arg is not None else extraApply()
-
-
+            self.processList( extraApply, applyArgs )
             packageResult = self.buildOnos( cluster )
 
         onosInstallResult = self.installOnos( cluster, installMax, installParallel )
 
-        if extraClean is not None:
-            extraClean()
+        self.processList( extraClean, cleanArgs )
         secureSshResult = self.setupSsh( cluster )
 
         onosServiceResult = self.checkOnosService( cluster )
@@ -395,4 +396,4 @@
             onosCliResult = self.startOnosClis( cluster )
 
         return killResult and cellResult and packageResult and onosUninstallResult and \
-               onosInstallResult and secureSshResult and onosServiceResult and onosCliResult
\ No newline at end of file
+               onosInstallResult and secureSshResult and onosServiceResult and onosCliResult