Merge "Refactored SAMPstartTemplate initial commit"
diff --git a/TestON/config/teston.cfg b/TestON/config/teston.cfg
index 558a1a1..82c96dc 100644
--- a/TestON/config/teston.cfg
+++ b/TestON/config/teston.cfg
@@ -1,18 +1,18 @@
 <config>
 
     <parser> 
-        <file>~/OnosSystemTest/TestON/core/xmlparser.py </file>
+        <file>../core/xmlparser.py</file>
         <class>xmlparser</class>
     </parser>
     <mail_to>hari@onlab.us</mail_to>
 
     <logger> 
-        <file>~/OnosSystemTest/TestON/core/logger.py </file>
+        <file>../core/logger.py</file>
         <class>Logger</class>
     </logger>
     
     <responseparser>
-        <file>~/OnosSystemTest/TestON/core/jsonparser.py </file>
+        <file>../core/jsonparser.py</file>
         <class>JsonParser</class>
     </responseparser>
 </config>
diff --git a/TestON/dependencies/Jenkins_getresult_HA.py b/TestON/dependencies/Jenkins_getresult_HA.py
deleted file mode 100755
index 181e1a9..0000000
--- a/TestON/dependencies/Jenkins_getresult_HA.py
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/usr/bin/env python
-
-import sys
-import os
-import re
-import datetime
-import time
-import argparse
-import glob
-import shutil
-
-parser = argparse.ArgumentParser()
-parser.add_argument("-n", "--name", help="Comma Separated string of test names. Ex: --name='test1, test2, test3'")
-parser.add_argument("-w", "--workspace", help="The name of the Jenkin's job/workspace where csv files will be saved'")
-args = parser.parse_args()
-
-#Pass in test names as a comma separated string argument. 
-#Example: ./Jenkins_getresult.py "Test1,Test2,Test3,Test4"
-name_list = args.name.split(",")
-result_list = map(lambda x: x.strip(), name_list)
-job = args.workspace
-if job is None:
-    job = ""
-print job
-
-#NOTE: testnames list should be in order in which it is run
-testnames = result_list
-output = ''
-header = ''
-graphs = ''
-testdate = datetime.datetime.now()
-#workspace = "/var/lib/jenkins/workspace/ONOS-HA"
-workspace = "/var/lib/jenkins/workspace/"
-workspace = workspace + job
-
-header +="<p>**************************************</p>"
-header +=testdate.strftime('Jenkins test result for %H:%M on %b %d, %Y. %Z')
-
-
-#NOTE: CASE SPECIFIC THINGS
-
-#THIS LINE IS LOUSY FIXME
-if any("HA" in s for s in testnames):
-    ##Graphs
-    graphs += '<ac:structured-macro ac:name="html">\n'
-    graphs += '<ac:plain-text-body><![CDATA[\n'
-    graphs += '<iframe src="https://onos-jenkins.onlab.us/job/'+job+'/plot/Plot-HA/getPlot?index=2&width=500&height=300" noborder="0" width="500" height="300" scrolling="yes" seamless="seamless"></iframe>\n'
-    graphs += '<iframe src="https://onos-jenkins.onlab.us/job/'+job+'/plot/Plot-HA/getPlot?index=1&width=500&height=300" noborder="0" width="500" height="300" scrolling="yes" seamless="seamless"></iframe>\n'
-    graphs += '<iframe src="https://onos-jenkins.onlab.us/job/'+job+'/plot/Plot-HA/getPlot?index=0&width=500&height=300" noborder="0" width="500" height="300" scrolling="yes" seamless="seamless"></iframe>\n'
-    graphs += '<iframe src="https://onos-jenkins.onlab.us/job/'+job+'/plot/Plot-HA/getPlot?index=3&width=500&height=300" noborder="0" width="500" height="300" scrolling="yes" seamless="seamless"></iframe>\n'
-    graphs += ']]></ac:plain-text-body>\n'
-    graphs += '</ac:structured-macro>\n'
-    header +="<p> <a href='https://wiki.onosproject.org/display/OST/Test+Plan+-+HA'>Test Plan for HA Test Cases</a></p>"
-
-
-# ***
-
-
-#TestON reporting
-for test in testnames:
-    passes = 0
-    fails = 0
-    name = os.popen("ls /home/admin/ONLabTest/TestON/logs/ -rt | grep %s_ | tail -1" % test).read().split()[0]
-    path = "/home/admin/ONLabTest/TestON/logs/" + name + "/"
-    try:
-        #IF exists, move the csv file to the workspace
-        for csvFile in glob.glob( path + '*.csv' ):
-            shutil.copy( csvFile, workspace )
-    except IOError:
-        #File probably doesn't exist
-        pass
-
-    output +="<p></p>"
-    #output +="   Date: %s, %s %s" % (name.split("_")[2], name.split("_")[1], name.split("_")[3]) + "<p>*******************<p>"
-    #Open the latest log folder
-    output += "<h2>Test "+str(test)+"</h2><p>************************************</p>"
-
-    f = open(path + name + ".rpt")
-
-    #Parse through each line of logs and look for specific strings to output to wiki.
-    #NOTE: with current implementation, you must specify which output to output to wiki by using
-    #main.log.report("") since it is looking for the [REPORT] tag in the logs
-    for line in f:
-        if re.search("Result summary for Testcase", line):
-            output += "<h3>"+str(line)+"</h3>"
-            #output += "<br>"
-        if re.search("\[REPORT\]", line): 
-            line_split = line.split("] ")
-            #line string is split by bracket, and first two items (log tags) in list are omitted from output
-            #join is used to convert list to string
-            line_str = ''.join(line_split[2:])
-            output += "<p>"
-            output += line_str
-            output += "</p>"
-        if re.search("Result:", line):
-            output += "<p>"
-            output += line
-            output += "</p>"
-            if re.search("Pass", line):
-                passes = passes + 1
-            elif re.search("Fail", line):
-                fails = fails + 1
-    f.close()
-    #https://wiki.onosproject.org/display/OST/Test+Results+-+HA#Test+Results+-+HA
-    #Example anchor on new wiki:        #TestResults-HA-TestHATestSanity
-    page_name = "Master-HA"
-    if "ONOS-HA-1.1.X" in job:
-        page_name = "Blackbird-HA"
-    elif "ONOS-HA-Maint" in job:
-        # NOTE if page name starts with number confluence prepends 'id-'
-        #      to anchor links
-        page_name = "id-1.0-HA"
-
-    header += "<li><a href=\'#" + str(page_name) + "-Test" + str(test) + "\'> " + str(test) + " - Results: " + str(passes) + " Passed, " + str(fails) + " Failed</a></li>"
-
-    #*********************
-    #include any other phrase specific to case you would like to include in wiki here
-    if test == "IntentPerf":
-        output += "URL to Historical Performance results data: <a href='http://10.128.5.54perf.html'>Perf Graph</a>"
-    #*********************
-
-#header_file = open("/tmp/header_ha.txt",'w')
-#header_file.write(header)
-output = header + graphs + output
-print output
diff --git a/TestON/dependencies/Jenkins_getresult_andrew.py b/TestON/dependencies/Jenkins_getresult_andrew.py
deleted file mode 100755
index 0e7ef8d..0000000
--- a/TestON/dependencies/Jenkins_getresult_andrew.py
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env python
-
-import sys
-import os
-import re
-import datetime
-import time
-import argparse
-
-parser = argparse.ArgumentParser()
-parser.add_argument("-n", "--name", help="Comma Separated string of test names. Ex: --name='test1, test2, test3'")
-args = parser.parse_args()
-
-#Pass in test names as a comma separated string argument. 
-#Example: ./Jenkins_getresult.py "Test1,Test2,Test3,Test4"
-name_list = args.name.split(",")
-result_list = map(lambda x: x.strip(), name_list)
-
-#NOTE: testnames list should be in order in which it is run
-testnames = result_list
-output = ''
-testdate = datetime.datetime.now()
-
-output +="<p>**************************************</p>"
-output +=testdate.strftime('Jenkins test result for %H:%M on %b %d, %Y. %Z')
-
-#TestON reporting
-for test in testnames:
-    name = os.popen("ls /home/admin/ONLabTest/TestON/logs/ -rt | grep %s | tail -1" % test).read().split()[0]
-    path = "/home/admin/ONLabTest/TestON/logs/" + name + "/"
-    output +="<p></p>"
-    #output +="   Date: %s, %s %s" % (name.split("_")[2], name.split("_")[1], name.split("_")[3]) + "<br>*******************<br>"
-    #Open the latest log folder 
-    output += "<h2>Test "+str(test)+"</h2><p>************************************</p>"
-
-    f = open(path + name + ".rpt")
-
-    #Parse through each line of logs and look for specific strings to output to wiki.
-    #NOTE: with current implementation, you must specify which output to output to wiki by using
-    #main.log.report("") since it is looking for the [REPORT] tag in the logs
-    for line in f:
-        if re.search("Result summary for Testcase", line):
-            output += "<h3>"+str(line)+"</h3>"
-            #output += "<br>"
-        if re.search("\[REPORT\]", line): 
-            line_split = line.split("] ")
-            #line string is split by bracket, and first two items (log tags) in list are omitted from output
-            #join is used to convert list to string
-            line_str = ''.join(line_split[2:])
-            output += "<p>"
-            output += line_str
-            output += "</p>"
-        if re.search("Result:", line):
-            output += "<p>"
-            output += line
-            output += "</p>"
-    f.close()
-
-    #*********************
-    #include any other phrase specific to case you would like to include in wiki here
-    if test == "IntentPerf":
-        output += "URL to Historical Performance results data: <a href='http://10.128.5.54/perf.html'>Perf Graph</a>"
-    #*********************
-print output
diff --git a/TestON/dependencies/loadgen_NB.py b/TestON/dependencies/loadgen_NB.py
deleted file mode 100755
index 78d18b9..0000000
--- a/TestON/dependencies/loadgen_NB.py
+++ /dev/null
@@ -1,225 +0,0 @@
-#! /usr/bin/env python
-from time import time, sleep
-import time
-import json
-import requests
-import urllib2
-from urllib2 import URLError, HTTPError
-
-'''
-    This script is for Intent Throughput testing. Use linear 7-switch topo. Intents are from S1P1 to/from S7/P1, with incrementing src/dst Mac addresses.
-'''
-
-def setIntentJSN(node_id, intPerGroup, group_id, intent_id):
-    intents = [None for i in range(intPerGroup)]
-    oper = {}
-    index = 0
-    for i in range(intPerGroup / 2):
-        smac = str("%x" %(node_id * 0x100000000000 + 0x010000000000 + (group_id * 0x000001000000) +i + 1))
-        dmac = str("%x" %(node_id * 0x100000000000 + 0x070000000000 + (group_id * 0x000001000000) +i + 1))
-        srcMac = ':'.join(smac[i:i+2] for i in range(0, len(smac), 2))
-        dstMac = ':'.join(dmac[i:i+2] for i in range(0, len(dmac), 2))
-        srcSwitch = "00:00:00:00:00:00:00:01"
-        dstSwitch = "00:00:00:00:00:00:00:07"
-        srcPort = 1
-        dstPort = 1
-
-        oper['intentId'] = intent_id
-        oper['intentType'] = 'SHORTEST_PATH'    # XXX: Hardcode
-        oper['staticPath'] = False              # XXX: Hardcoded
-        oper['srcSwitchDpid'] = srcSwitch
-        oper['srcSwitchPort'] = srcPort
-        oper['dstSwitchDpid'] = dstSwitch
-        oper['dstSwitchPort'] = dstPort
-        oper['matchSrcMac'] = srcMac
-        oper['matchDstMac'] = dstMac
-        intents[index] = oper
-        #print ("perGroup Intents-0 are: " + json.dumps(intents) + "\n\n\n" )
-        index += 1
-        intent_id += 1
-        oper = {}
-        #print ("ID:" + str(id))
-
-        oper['intentId'] = intent_id
-        oper['intentType'] = 'SHORTEST_PATH'    # XXX: Hardcoded
-        oper['staticPath'] = False              # XXX: Hardcoded
-        oper['srcSwitchDpid'] = dstSwitch
-        oper['srcSwitchPort'] = dstPort
-        oper['dstSwitchDpid'] = srcSwitch
-        oper['dstSwitchPort'] = srcPort
-        oper['matchSrcMac'] = dstMac
-        oper['matchDstMac'] = srcMac
-        intents[index] = oper
-        index += 1 
-        intent_id += 1
-        oper = {}
-        #print ("ID: " + str(id))
-        #print ("perGroup Intents-1 are: " + json.dumps(intents) + "\n\n\n" )
-    #print ("contructed intents are: " + json.dumps(intents) + "\n\n\n")
-    return intents, intent_id
-
-def post_json(url, data):
-    """Make a REST POST call and return the JSON result
-           url: the URL to call
-           data: the data to POST"""
-    posturl = "http://%s/wm/onos/intent/high" %(url)
-    #print ("\nPost url is : " + posturl + "\n")
-    parsed_result = []
-    data_json = json.dumps(data)
-    try:
-        request = urllib2.Request(posturl, data_json)
-        request.add_header("Content-Type", "application/json")
-        response = urllib2.urlopen(request)
-        result = response.read()
-        response.close()
-        if len(result) != 0:
-            parsed_result = json.loads(result)
-    except HTTPError as exc:
-        print "ERROR:"
-        print "  REST POST URL: %s" % posturl
-        # NOTE: exc.fp contains the object with the response payload
-        error_payload = json.loads(exc.fp.read())
-        print "  REST Error Code: %s" % (error_payload['code'])
-        print "  REST Error Summary: %s" % (error_payload['summary'])
-        print "  REST Error Description: %s" % (error_payload['formattedDescription'])
-        print "  HTTP Error Code: %s" % exc.code
-        print "  HTTP Error Reason: %s" % exc.reason
-    except URLError as exc:
-        print "ERROR:"
-        print "  REST POST URL: %s" % posturl
-        print "  URL Error Reason: %s" % exc.reason
-    return parsed_result
-
-def delete_json(self, url, intPerGroup, startID):
-    """Make a REST DELETE call and return the JSON result
-           url: the URL to call"""
-    #url = "localhost:8080"
-    for i in range(intPerGroup):
-        posturl = "http://%s/wm/onos/intent/high/%s" %(url, str(i + startID))
-        parsed_result = []
-        try:
-            request = urllib2.Request(posturl)
-            request.get_method = lambda: 'DELETE'
-            response = urllib2.urlopen(request)
-            result = response.read()
-            response.close()
-            #if len(result) != 0:
-            #    parsed_result = json.loads(result)
-        except HTTPError as exc:
-            print "ERROR:"
-            print "  REST DELETE URL: %s" % posturl
-            # NOTE: exc.fp contains the object with the response payload
-            error_payload = json.loads(exc.fp.read())
-            print "  REST Error Code: %s" % (error_payload['code'])
-            print "  REST Error Summary: %s" % (error_payload['summary'])
-            print "  REST Error Description: %s" % (error_payload['formattedDescription'])
-            print "  HTTP Error Code: %s" % exc.code
-            print "  HTTP Error Reason: %s" % exc.reason
-        except URLError as exc:
-            print "ERROR:"
-            print "  REST DELETE URL: %s" % posturl
-            print "  URL Error Reason: %s" % exc.reason
-    return parsed_result
-
-def delete_all_json(url):
-    """Make a REST DELETE call and return the JSON result
-           url: the URL to call"""
-    #url = "localhost:8080"
-    posturl = "http://%s/wm/onos/intent/high" %(url)
-    parsed_result = []
-    try:
-        request = urllib2.Request(posturl)
-        request.get_method = lambda: 'DELETE'
-        response = urllib2.urlopen(request)
-        result = response.read()
-        response.close()
-        if len(result) != 0:
-            parsed_result = json.loads(result)
-    except HTTPError as exc:
-        print "ERROR:"
-        print "  REST DELETE URL: %s" % posturl
-        # NOTE: exc.fp contains the object with the response payload
-        error_payload = json.loads(exc.fp.read())
-        print "  REST Error Code: %s" % (error_payload['code'])
-        print "  REST Error Summary: %s" % (error_payload['summary'])
-        print "  REST Error Description: %s" % (error_payload['formattedDescription'])
-        print "  HTTP Error Code: %s" % exc.code
-        print "  HTTP Error Reason: %s" % exc.reason
-    except URLError as exc:
-        print "ERROR:"
-        print "  REST DELETE URL: %s" % posturl
-        print "  URL Error Reason: %s" % exc.reason
-    return parsed_result
-
-def loadIntents(node_id, urllist, intPerGroup, addrate, duration):
-    urlindex = 0
-    group = 0
-    start_id = 0
-    sleeptimer = (1.000/addrate)
-    tstart = time.time()
-    while ( (time.time() - tstart) <= duration ):
-        if urlindex < len(urllist):
-            realurlind = urlindex
-        else:
-            realurlind = 0
-            urlindex = 0
-
-        u = str(urllist[realurlind])
-        gstart = time.time()
-        intents,start_id = setIntentJSN(node_id, intPerGroup, group, start_id)
-        #print (str(intents))
-        #print ("Starting intent id: " + str(start_id))
-        result = post_json(u, intents)
-        #print json.dumps(intents[group])
-        #print ("post result: " + str(result))
-        gelapse = time.time() - gstart
-        print ("Group: " + str(group) + " with " + str(intPerGroup) + " intents were added in " + str('%.3f' %gelapse) + " seconds.")
-        sleep(sleeptimer)
-        urlindex += 1
-        group += 1
-
-    telapse = time.time() - tstart
-    #print ( "Number of groups: " + str(group) + "; Totoal " + str(args.groups * args.intPerGroup) + " intents were added in " + str(telapse) + " seconds.")
-    return telapse, group
-
-def main():
-    import argparse
-
-    parser = argparse.ArgumentParser(description="less script")
-    parser.add_argument("-n", "--node_id", dest="node_id", default = 1, type=int, help="id of the node generating the intents, this is used to distinguish intents when multiple nodes are use to generate intents")
-    parser.add_argument("-u", "--urls", dest="urls", default="10.128.10.1", type=str, help="a string to show urls to post intents to separated by space, ex. '10.128.10.1:8080 10.128.10.2:80080' ")
-    parser.add_argument("-i", "--intentsPerGroup", dest="intPerGroup", default=100, type=int, help="number of intents in one restcall group")
-    parser.add_argument("-a", "--addrate", dest="addrate", default=10, type=float, help="rate to add intents groups, groups per second")
-    parser.add_argument("-d", "--delrate", dest="delrate", default=100, type=float, help= "### Not Effective -for now intents are delete as bulk #### rate to delete intents, intents/second")
-    parser.add_argument("-l", "--length", dest="duration", default=300, type=int, help="duration/length of time the intents are posted")
-    parser.add_argument("-p", "--pause", dest="pause", default=0, type=int, help= "pausing time between add and delete of intents")
-    args = parser.parse_args()
-
-    node_id = args.node_id
-    urllist = args.urls.split()
-    intPerGroup = args.intPerGroup
-    addrate = args.addrate
-    delrate = args.delrate
-    duration = args.duration    
-    pause = args.pause
-
-    print ("Intent posting urls are: " + str(urllist))
-    print ("Number of Intents per group: " + str(intPerGroup))
-    print ("Intent group add rate: " + str(addrate) )
-    print ("Intent delete rate:" + str(delrate) )
-    print ("Duration: " + str(duration) )
-    print ("Pause between add and delete: " + str(args.pause))
-
-    telapse, group = loadIntents(node_id, urllist, intPerGroup, addrate, duration)
-    print ("\n\n#####################")
-    print ( str(group) + " groups " + " of " + str(intPerGroup) + " Intents per group - Total " + str(group * intPerGroup) + " intents were added in " + str('%.3f' %telapse) + " seconds.")
-    print ( "Effective intents posting rate is: " + str( '%.1f' %( (group * intPerGroup)/telapse ) ) + " Intents/second." )
-    print ("#####################\n\n")
-    print ("Sleep for " + str(pause) + " seconds before deleting all intents...")
-    time.sleep(pause)
-    print ("Cleaning up intents in all nodes...")
-    for url in urllist:
-        delete_all_json(url)
-        
-if __name__ == '__main__':
-    main()
diff --git a/TestON/dependencies/loadgen_SB.py b/TestON/dependencies/loadgen_SB.py
deleted file mode 100755
index cfd2adf..0000000
--- a/TestON/dependencies/loadgen_SB.py
+++ /dev/null
@@ -1,117 +0,0 @@
-#!/usr/bin/python
-
-"""
-This example shows how to create an empty Mininet object
-(without a topology object) and add nodes to it manually.
-"""
-import sys
-import subprocess
-import time
-from mininet.net import Mininet
-from mininet.node import Controller
-from mininet.cli import CLI
-from mininet.log import setLogLevel, info
-
-swlist = []
-hostlist= []
-count = 0 
-
-def createSwPorts(numsw, numport):
-
-    "Create an empty network and add nodes to it."
-
-    net = Mininet()
-    swlist = []
-    hostlist= []
-    print ("Starting Mininet Network....")
-    for i in range(numsw):
-        sw = net.addSwitch( 's' + str(i), dpid = ('00000000000000' + '%0d'%i))
-        print str(sw),
-        for p in range(numport):
-            host = net.addHost("s"+str(i)+"h"+str(p))
-            hostlist.append(host)
-            print str(host),
-            net.addLink(host,sw)
-        swlist.append(sw)
-
-            
-    info( '*** Starting network\n')
-    net.start()
-
-    return swlist
-
-def loadsw(urllist, swlist, addrate, delrate, duration):
-    global numport
-    urlindex = 0
-    count = 0
-    addsleeptimer = 1.000 /addrate
-    delsleeptimer = 1.000/delrate
-    print (" Add sleeptimer: " + str('%.3f' %addsleeptimer) + "; Delete sleeptimer: " + str('%.3f' %delsleeptimer))
-    print str(swlist)
- 
-    tstart = time.time()
-    while ( (time.time() - tstart) <= duration ):
-        #print (time.time() - tstart)
-        astart = time.time()
-        for sw in swlist:
-            if urlindex < len(urllist):
-                i = urlindex
-            else:
-                i = 0
-                urlindex = 0
-        
-            ovscmd = "sudo ovs-vsctl set-controller " + str(sw) + " tcp:" + urllist[i]
-            print ("a"),
-            s = subprocess.Popen(ovscmd, shell=True )
-            time.sleep(addsleeptimer)
-            count += 1
-            urlindex += 1
-        aelapse = time.time() - astart
-        print ("Number of switches connected: " + str(len(swlist)) + " in: " + str('%.3f' %aelapse) + "seconds.")
-
-        dstart = time.time()
-        for sw in swlist:
-            ovscmd = "sudo ovs-vsctl set-controller " + str(sw) + " tcp:127.0.0.1:6633"
-            print ("d"),
-            s = subprocess.Popen(ovscmd, shell=True )
-            time.sleep(delsleeptimer)
-            count += 1
-        delapse = time.time() - dstart
-        print ("Number of switches disconnected: " + str(len(swlist)) + " in: " + str('%.3f' %delapse) + "seconds.")
-    telapse = time.time() - tstart
-    
-    return telapse, count
-def cleanMN():
-    print ("Cleaning MN switches...")
-    s = subprocess.Popen("sudo mn -c > /dev/null 2>&1", shell=True)
-    print ("Done.")
-
-def main():
-    import argparse
-    import threading
-    from threading import Thread
-
-    parser = argparse.ArgumentParser(description="less script")
-    parser.add_argument("-u", "--urls", dest="urls", default="10.128.10.1", type=str, help="a string to show urls to post intents to separated by space, ex. '10.128.10.1:6633 10.128.10.2:6633' ")
-    parser.add_argument("-s", "--switches", dest="numsw", default=100, type=int, help="number of switches use in the load generator; together with the ports per switch config, each switch generates (numport + 2) events")
-    parser.add_argument("-p", "--ports", dest="numport", default=1, type=int, help="number of ports per switches")
-    parser.add_argument("-a", "--addrate", dest="addrate", default=10, type=float, help="rate to add intents groups, groups per second")
-    parser.add_argument("-d", "--delrate", dest="delrate", default=100, type=float, help= "rate to delete intents, intents/second")
-    parser.add_argument("-l", "--testlength", dest="duration", default=0, type=int, help= "pausing time between add and delete of intents")
-    args = parser.parse_args()
-
-    urllist = args.urls.split()
-    numsw = args.numsw
-    numport = args.numport
-    addrate = args.addrate
-    delrate = args.delrate
-    duration = args.duration
-    setLogLevel( 'info' )
-    swlist = createSwPorts(numsw,numport)
-    telapse,count = loadsw(urllist, swlist, addrate, delrate, duration)
-    print ("Total number of switches connected/disconnected: " + str(count) + "; Total events generated: " + str(count * (2 + numport)) + "; Elalpse time: " + str('%.1f' %telapse))
-    print ("Effective aggregated loading is: " + str('%.1f' %((( count * (2+ numport))) / telapse ) ) + "Events/s.")
-    cleanMN()
-
-if __name__ == '__main__':
-    main()
diff --git a/TestON/dependencies/rotate.sh b/TestON/dependencies/rotate.sh
deleted file mode 100755
index 7136ac6..0000000
--- a/TestON/dependencies/rotate.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/bash
-
-
-# NOTE: Taken fnd modified from onos.sh
-# pack-rotate-log [packname] "[log-filenames]" [max rotations]
-# Note: [packname] and all the log-files specified by [log-filenames]
-#       must reside in same dir
-# Example:
-#  pack="/foo/bar/testlogs"
-#  logfiles="/foo/bar/test1.log /foo/bar/test*.log"
-#  pack-rotate-log $pack "$logfiles" 5
-#   => testlogs.tar.bz2 (will contain test1.log test2.log ...)
-#      testlogs.tar.bz2 -> testlogs.1.tar.bz2
-#      testlogs.1.tar.bz2 -> testlogs.2.tar.bz2
-#      ...
-function pack-rotate-log {
-  local packname=$1
-  local logfiles=$2
-  local nr_max=${3:-10}
-  local suffix=".tar.bz2"
-
-  # rotate
-  for i in `seq $(expr $nr_max - 1) -1 1`; do
-    if [ -f ${packname}.${i}${suffix} ]; then
-      mv -f -- ${packname}.${i}${suffix} ${packname}.`expr $i + 1`${suffix}
-    fi
-  done
-  if [ -f ${packname}${suffix} ]; then
-    mv -- ${packname}${suffix} ${packname}.1${suffix}
-  fi
-
-  # pack
-  local existing_logfiles=$( ls -1 $logfiles  2>/dev/null | xargs -n1  basename 2>/dev/null)
-  if [ ! -z "${existing_logfiles}" ]; then
-    tar cjf ${packname}${suffix} -C `dirname ${packname}` -- ${existing_logfiles}
-    for word in ${existing_logfiles}
-    do
-        rm -- `dirname ${packname}`/${word}
-    done
-   fi
-}
-
-
-
-#Begin script
-#NOTE: This seems to break the TestON summary since it mentions the testname
-#echo "Rotating logs for '${1}' test"
-base_name=$1
-root_dir="/home/admin/packet_captures"
-timestamp=`date +%Y_%B_%d_%H_%M_%S`
-#Maybe this should be an argument? pack-and-rotate supports that
-nr_max=10
-
-pack-rotate-log ${root_dir}'/'${base_name} "${root_dir}/${base_name}*.pcap ${root_dir}/${base_name}*.log*" ${nr_max}
diff --git a/TestON/dependencies/topo-100sw.py b/TestON/dependencies/topo-100sw.py
deleted file mode 100644
index 308a3f1..0000000
--- a/TestON/dependencies/topo-100sw.py
+++ /dev/null
@@ -1,31 +0,0 @@
-
-from mininet.topo import Topo
-
-class MyTopo( Topo ):
-        "100 'floating' switch topology"
-
-        def __init__( self ):
-                # Initialize topology
-                Topo.__init__( self )
-
-                sw_list = []
-
-                for i in range(1, 101):
-                        sw_list.append(
-                                self.addSwitch(
-                                        's'+str(i),
-                                        dpid = str(i).zfill(16)))
-
-
-                #Below connections are used for test cases
-                #that need to test link and port events
-                #Add link between switch 1 and switch 2
-                self.addLink(sw_list[0],sw_list[1])
-                
-                #Create hosts and attach to sw 1 and sw 2
-                h1 = self.addHost('h1')
-                h2 = self.addHost('h2')
-                self.addLink(sw_list[0],h1)
-                self.addLink(sw_list[1],h2)
-        
-topos = { 'mytopo': ( lambda: MyTopo() ) }
diff --git a/TestON/dependencies/topo-HA.py b/TestON/dependencies/topo-HA.py
deleted file mode 100644
index 65613d6..0000000
--- a/TestON/dependencies/topo-HA.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from mininet.topo import Topo
-class MyTopo( Topo ):
-    def __init__( self ):
-        Topo.__init__( self )
-        topSwitch = self.addSwitch('s1',dpid='1000'.zfill(16))
-        leftTopSwitch = self.addSwitch('s2',dpid='2000'.zfill(16))
-        rightTopSwitch = self.addSwitch('s5',dpid='5000'.zfill(16))
-        leftBotSwitch = self.addSwitch('s3',dpid='3000'.zfill(16))
-        rightBotSwitch = self.addSwitch('s6',dpid='6000'.zfill(16))	
-        midBotSwitch = self.addSwitch('s28',dpid='2800'.zfill(16))
-        
-        topHost = self.addHost( 'h1' )
-        leftTopHost = self.addHost('h2')
-        rightTopHost = self.addHost('h5')
-        leftBotHost = self.addHost('h3')
-        rightBotHost = self.addHost('h6')
-        midBotHost = self.addHost('h28')
-        self.addLink(topSwitch,topHost)
-        self.addLink(leftTopSwitch,leftTopHost)
-        self.addLink(rightTopSwitch,rightTopHost)
-        self.addLink(leftBotSwitch,leftBotHost)
-        self.addLink(rightBotSwitch,rightBotHost)
-        self.addLink(midBotSwitch,midBotHost)
-        self.addLink(leftTopSwitch,rightTopSwitch)
-        self.addLink(topSwitch,leftTopSwitch)
-        self.addLink(topSwitch,rightTopSwitch)
-        self.addLink(leftTopSwitch,leftBotSwitch)
-        self.addLink(rightTopSwitch,rightBotSwitch)
-        self.addLink(leftBotSwitch,midBotSwitch)
-        self.addLink(midBotSwitch,rightBotSwitch)
-
-        agg1Switch = self.addSwitch('s4',dpid = '3004'.zfill(16))
-        agg2Switch = self.addSwitch('s7',dpid = '6007'.zfill(16))
-        agg1Host = self.addHost('h4')
-        agg2Host = self.addHost('h7')
-        self.addLink(agg1Switch,agg1Host)
-        self.addLink(agg2Switch,agg2Host)
-        self.addLink(agg1Switch, leftBotSwitch)
-        self.addLink(agg2Switch, rightBotSwitch)
-
-        for i in range(10):
-            num = str(i+8)
-            switch = self.addSwitch('s'+num,dpid = ('30'+num.zfill(2)).zfill(16))
-            host = self.addHost('h'+num)
-            self.addLink(switch, host)
-            self.addLink(switch, agg1Switch)
-
-        for i in range(10):
-            num = str(i+18)
-            switch = self.addSwitch('s'+num,dpid = ('60'+num.zfill(2)).zfill(16))
-            host = self.addHost('h'+num)
-            self.addLink(switch, host)
-            self.addLink(switch, agg2Switch)
-
-topos = { 'mytopo': (lambda: MyTopo() ) }
-
-
-
-
-
-
-
-
diff --git a/TestON/dependencies/topo-intentFlower.py b/TestON/dependencies/topo-intentFlower.py
deleted file mode 100644
index 138c291..0000000
--- a/TestON/dependencies/topo-intentFlower.py
+++ /dev/null
@@ -1,80 +0,0 @@
-'''
-Topology with 3 core switches connected linearly.
-
-Each 'core' switch has a 'flower' of 10 switches
-for a total of 33 switches.
-
-Used in conjunction with 'IntentPerfNext' test
-'''
-
-from mininet.topo import Topo
-
-class MyTopo( Topo ):
-
-    def __init__( self ):
-        Topo.__init__( self )
-       
-        #Switches are listed out here for better view
-        #of the topology from this code
-        core_sw_list = ['s1','s2','s3']
-       
-        #Flower switches for core switch 1
-        flower_sw_list_s1 =\
-                ['s10', 's11', 's12', 's13', 's14',
-                 's15', 's16', 's17', 's18', 's19']
-        #Flower switches for core switch 2
-        flower_sw_list_s2 =\
-                ['s20', 's21', 's22', 's23', 's24',
-                 's25', 's26', 's27', 's28', 's29']
-        #Flower switches for core switch 3
-        flower_sw_list_s3 =\
-                ['s30', 's31', 's32', 's33', 's34',
-                 's35', 's36', 's37', 's38', 's39']
-
-        #Store switch objects in these variables
-        core_switches = []
-        flower_switches_1 = []
-        flower_switches_2 = []
-        flower_switches_3 = []
-       
-        #Add switches
-        for sw in core_sw_list:
-            core_switches.append(
-                    self.addSwitch(
-                        sw, 
-                        dpid = sw.replace('s','').zfill(16)
-                    )
-            )
-        for sw in flower_sw_list_s1:
-            flower_switches_1.append(
-                    self.addSwitch(
-                        sw,
-                        dpid = sw.replace('s','').zfill(16)
-                    )
-            )
-        for sw in flower_sw_list_s2:
-            flower_switches_2.append(
-                    self.addSwitch(
-                        sw,
-                        dpid = sw.replace('s','').zfill(16)
-                    )
-            )
-        for sw in flower_sw_list_s3:
-            flower_switches_3.append(
-                    self.addSwitch(
-                        sw,
-                        dpid = sw.replace('s','').zfill(16)
-                    )
-            )
-
-        self.addLink(core_switches[0], core_switches[1])
-        self.addLink(core_switches[1], core_switches[2])
-
-        for x in range(0, len(flower_sw_list_s1)):
-            self.addLink(core_switches[0], flower_switches_1[x]) 
-        for x in range(0, len(flower_sw_list_s2)):
-            self.addLink(core_switches[1], flower_switches_2[x])
-        for x in range(0, len(flower_sw_list_s3)):
-            self.addLink(core_switches[2], flower_switches_3[x])
-
-topos = { 'mytopo': ( lambda: MyTopo() ) }
diff --git a/TestON/dependencies/topo-onos4node.py b/TestON/dependencies/topo-onos4node.py
deleted file mode 100644
index 4fc036c..0000000
--- a/TestON/dependencies/topo-onos4node.py
+++ /dev/null
@@ -1,63 +0,0 @@
-"""Custom topology example
-
-Two directly connected switches plus a host for each switch:
-
-   host --- switch --- switch --- host
-
-Adding the 'topos' dict with a key/value pair to generate our newly defined
-topology enables one to pass in '--topo=mytopo' from the command line.
-"""
-
-from mininet.topo import Topo
-
-class MyTopo( Topo ):
-	"Simple topology example."
-
-	def __init__( self ):
-		"Create custom topo."
-		# Initialize topology
-		Topo.__init__( self )
-
-		# Make the middle triangle	
-		leftSwitch = self.addSwitch( 's1' , dpid = '1000'.zfill(16))
-		rightSwitch = self.addSwitch( 's2' , dpid = '2000'.zfill(16))
-		topSwitch = self.addSwitch( 's3' , dpid = '3000'.zfill(16))
-		lefthost = self.addHost( 'h1' )
-		righthost = self.addHost( 'h2' )
-		tophost = self.addHost( 'h3' )
-		self.addLink( leftSwitch, lefthost )
-		self.addLink( rightSwitch, righthost )
-		self.addLink( topSwitch, tophost )
-
-		self.addLink( leftSwitch, rightSwitch )
-		self.addLink( leftSwitch, topSwitch )
-		self.addLink( topSwitch, rightSwitch )
-
-		# Make aggregation switches
-		agg1Switch = self.addSwitch( 's4', dpid = '1004'.zfill(16) ) 
-		agg2Switch = self.addSwitch( 's5', dpid = '2005'.zfill(16) ) 
-		agg1Host = self.addHost( 'h4' ) 
-		agg2Host = self.addHost( 'h5' ) 
-
-		self.addLink( agg1Switch, agg1Host, port1=1, port2=1 )
-		self.addLink( agg2Switch, agg2Host, port1=1, port2=1 )
-
-		self.addLink( agg2Switch, rightSwitch )
-		self.addLink( agg1Switch, leftSwitch )
-
-		# Make two aggregation fans
-		for i in range(10):
-			num=str(i+6)
-			switch = self.addSwitch( 's' + num, dpid = ('10' + num.zfill(2) ).zfill(16))
-			host = self.addHost( 'h' + num ) 
-			self.addLink( switch, host, port1=1, port2=1 ) 
-			self.addLink( switch, agg1Switch ) 
-
-		for i in range(10):
-			num=str(i+31)
-			switch = self.addSwitch( 's' + num, dpid = ('20' + num.zfill(2)).zfill(16) )
-			host = self.addHost( 'h' + num ) 
-			self.addLink( switch, host, port1=1, port2=1 ) 
-			self.addLink( switch, agg2Switch ) 
-
-topos = { 'mytopo': ( lambda: MyTopo() ) }
diff --git a/TestON/dependencies/topo-onos4node.py.old b/TestON/dependencies/topo-onos4node.py.old
deleted file mode 100644
index 3328a5d..0000000
--- a/TestON/dependencies/topo-onos4node.py.old
+++ /dev/null
@@ -1,61 +0,0 @@
-"""Custom topology example
-
-Two directly connected switches plus a host for each switch:
-
-   host --- switch --- switch --- host
-
-Adding the 'topos' dict with a key/value pair to generate our newly defined
-topology enables one to pass in '--topo=mytopo' from the command line.
-"""
-
-from mininet.topo import Topo
-
-class MyTopo( Topo ):
-	"Simple topology example."
-
-	def __init__( self ):
-		"Create custom topo."
-		# Initialize topology
-		Topo.__init__( self )
-
-		# Make the middle triangle	
-		leftSwitch = self.addSwitch( 's1' )
-		rightSwitch = self.addSwitch( 's2' )
-		topSwitch = self.addSwitch( 's3' )
-		lefthost = self.addHost( 'h1' )
-		righthost = self.addHost( 'h2' )
-		tophost = self.addHost( 'h3' )
-		self.addLink( leftSwitch, lefthost )
-		self.addLink( rightSwitch, righthost )
-		self.addLink( topSwitch, tophost )
-
-		self.addLink( leftSwitch, rightSwitch )
-		self.addLink( leftSwitch, topSwitch )
-		self.addLink( topSwitch, rightSwitch )
-
-		# Make aggregation switches
-		agg1Switch = self.addSwitch( 's4' ) 
-		agg2Switch = self.addSwitch( 's5' ) 
-		agg1Host = self.addHost( 'h4' ) 
-		agg2Host = self.addHost( 'h5' ) 
-
-		self.addLink( agg1Switch, agg1Host )
-		self.addLink( agg2Switch, agg2Host )
-
-		self.addLink( agg1Switch, rightSwitch )
-		self.addLink( agg2Switch, leftSwitch )
-
-		# Make two aggregation fans
-		for i in range(10):
-			switch = self.addSwitch( 's%d' % (i+6) )
-			host = self.addHost( 'h%d' % (i+6) ) 
-			self.addLink( switch, host ) 
-			self.addLink( switch, agg1Switch ) 
-
-		for i in range(10):
-			switch = self.addSwitch( 's%d' % (i+31) )
-			host = self.addHost( 'h%d' % (i+31) ) 
-			self.addLink( switch, host ) 
-			self.addLink( switch, agg2Switch ) 
-
-topos = { 'mytopo': ( lambda: MyTopo() ) }
diff --git a/TestON/dependencies/topo-onos4nodeNEW.py b/TestON/dependencies/topo-onos4nodeNEW.py
deleted file mode 100644
index 1824e3b..0000000
--- a/TestON/dependencies/topo-onos4nodeNEW.py
+++ /dev/null
@@ -1,63 +0,0 @@
-"""Custom topology example
-
-Two directly connected switches plus a host for each switch:
-
-   host --- switch --- switch --- host
-
-Adding the 'topos' dict with a key/value pair to generate our newly defined
-topology enables one to pass in '--topo=mytopo' from the command line.
-"""
-
-from mininet.topo import Topo
-
-class MyTopo( Topo ):
-	"Simple topology example."
-
-	def __init__( self ):
-		"Create custom topo."
-		# Initialize topology
-		Topo.__init__( self )
-
-		# Make the middle triangle	
-		leftSwitch = self.addSwitch( 's1' , dpid = '1000'.zfill(16))
-		rightSwitch = self.addSwitch( 's2' , dpid = '2000'.zfill(16))
-		topSwitch = self.addSwitch( 's3' , dpid = '3000'.zfill(16))
-		lefthost = self.addHost( 'h1' )
-		righthost = self.addHost( 'h2' )
-		tophost = self.addHost( 'h3' )
-		self.addLink( leftSwitch, lefthost )
-		self.addLink( rightSwitch, righthost )
-		self.addLink( topSwitch, tophost )
-
-		self.addLink( leftSwitch, rightSwitch )
-		self.addLink( leftSwitch, topSwitch )
-		self.addLink( topSwitch, rightSwitch )
-
-		# Make aggregation switches
-		agg1Switch = self.addSwitch( 's4', dpid = '1004'.zfill(16) ) 
-		agg2Switch = self.addSwitch( 's5', dpid = '2005'.zfill(16) ) 
-		agg1Host = self.addHost( 'h4' ) 
-		agg2Host = self.addHost( 'h5' ) 
-
-		self.addLink( agg1Switch, agg1Host )
-		self.addLink( agg2Switch, agg2Host )
-
-		self.addLink( agg2Switch, rightSwitch )
-		self.addLink( agg1Switch, leftSwitch )
-
-		# Make two aggregation fans
-		for i in range(10):
-			num=str(i+6)
-			switch = self.addSwitch( 's' + num, dpid = ('10' + num.zfill(2) ).zfill(16))
-			host = self.addHost( 'h' + num ) 
-			self.addLink( switch, host ) 
-			self.addLink( switch, agg1Switch ) 
-
-		for i in range(10):
-			num=str(i+31)
-			switch = self.addSwitch( 's' + num, dpid = ('20' + num.zfill(2)).zfill(16) )
-			host = self.addHost( 'h' + num ) 
-			self.addLink( switch, host ) 
-			self.addLink( switch, agg2Switch ) 
-
-topos = { 'mytopo': ( lambda: MyTopo() ) }
diff --git a/TestON/tests/SCPFflowTp1g/SCPFflowTp1g.py b/TestON/tests/SCPFflowTp1g/SCPFflowTp1g.py
index 48f1b57..0a3e0aa 100644
--- a/TestON/tests/SCPFflowTp1g/SCPFflowTp1g.py
+++ b/TestON/tests/SCPFflowTp1g/SCPFflowTp1g.py
@@ -35,10 +35,6 @@
     
         main.log.info("==========DEBUG VERSION 3===========")
 
-        main.exceptions = [0]*11
-        main.warnings = [0]*11
-        main.errors = [0]*11
-
         # -- INIT SECTION, ONLY RUNS ONCE -- #
         if init == False:
             init = True
@@ -317,8 +313,7 @@
                 if test >= warmUp:
                     for i in result: 
                         if i == "": 
-                            main.log.error("Missing data point, critical failure incoming")
-
+                            main.ONOSbench.logReport(ONOSIp[1], ["ERROR", "WARNING", "EXCEPT"])
                     print result
                     maxes[test-warmUp] = max(result)
                     main.log.info("Data collection iteration: " + str(test-warmUp) + " of " + str(sampleSize))
diff --git a/TestON/tests/SCPFintentInstallWithdrawLat/SCPFintentInstallWithdrawLat.params b/TestON/tests/SCPFintentInstallWithdrawLat/SCPFintentInstallWithdrawLat.params
new file mode 100644
index 0000000..8aad63b
--- /dev/null
+++ b/TestON/tests/SCPFintentInstallWithdrawLat/SCPFintentInstallWithdrawLat.params
@@ -0,0 +1,66 @@
+<PARAMS>
+
+    <testcases>1,2,1,2,1,2,1,2</testcases>
+
+    <SCALE>1,3,5,7</SCALE>
+    <availableNodes>7</availableNodes>
+ 
+    <ENV>
+        <cellName>IntentInstallWithdrawCell</cellName>
+        <cellApps>drivers,null</cellApps>
+    </ENV>
+
+    <TEST>
+        <skipCleanInstall>yes</skipCleanInstall>
+        <switchCount>7</switchCount>
+        <warmUp>10</warmUp>
+        <sampleSize>20</sampleSize>                     
+        <wait></wait>
+        <intents>1,100,1000</intents>                       #list format, will be split on ','
+        <debug>True</debug>                                        #"True" for true
+    </TEST>
+
+    <GIT>
+        <autopull>off</autopull>
+        <checkout>master</checkout>
+    </GIT>
+
+    <CTRL>
+        <USER>admin</USER>
+        
+        <ip1>OC1</ip1>
+        <port1>6633</port1>
+        
+        <ip2>OC2</ip2>
+        <port2>6633</port2>
+        
+        <ip3>OC3</ip3>
+        <port3>6633</port3>
+        
+        <ip4>OC4</ip4>
+        <port4>6633</port4>
+        
+        <ip5>OC5</ip5>
+        <port5>6633</port5>
+        
+        <ip6>OC6</ip6>
+        <port6>6633</port6> 
+       
+        <ip7>OC7</ip7>
+        <port7>6633</port7>
+
+    </CTRL>
+
+    <MN>
+        <ip1>OCN</ip1>
+    </MN>
+
+    <BENCH>
+        <user>admin</user>
+        <ip1>OCN</ip1>
+    </BENCH>
+
+    <JSON>
+    </JSON>
+
+</PARAMS>
diff --git a/TestON/tests/SCPFintentInstallWithdrawLat/SCPFintentInstallWithdrawLat.py b/TestON/tests/SCPFintentInstallWithdrawLat/SCPFintentInstallWithdrawLat.py
new file mode 100644
index 0000000..34e9a8d
--- /dev/null
+++ b/TestON/tests/SCPFintentInstallWithdrawLat/SCPFintentInstallWithdrawLat.py
@@ -0,0 +1,278 @@
+# ScaleOutTemplate
+#
+# CASE1 starts number of nodes specified in param file
+#
+# cameron@onlab.us
+
+import sys
+import os.path
+
+
+class SCPFintentInstallWithdrawLat:
+
+    def __init__( self ):
+        self.default = ''
+
+    def CASE1( self, main ):           
+                                        
+        import time                     
+        global init       
+        try: 
+            if type(init) is not bool: 
+                init = False  
+        except NameError: 
+            init = False 
+       
+        #Load values from params file
+        checkoutBranch = main.params[ 'GIT' ][ 'checkout' ]
+        gitPull = main.params[ 'GIT' ][ 'autopull' ]
+        cellName = main.params[ 'ENV' ][ 'cellName' ]
+        Apps = main.params[ 'ENV' ][ 'cellApps' ]
+        BENCHIp = main.params[ 'BENCH' ][ 'ip1' ]
+        BENCHUser = main.params[ 'BENCH' ][ 'user' ]
+        MN1Ip = main.params[ 'MN' ][ 'ip1' ]
+        maxNodes = int(main.params[ 'availableNodes' ])
+        skipMvn = main.params[ 'TEST' ][ 'skipCleanInstall' ]
+        cellName = main.params[ 'ENV' ][ 'cellName' ]        
+        switchCount = main.params[ 'TEST' ][ 'switchCount' ]
+
+        # -- INIT SECTION, ONLY RUNS ONCE -- # 
+        if init == False: 
+            init = True
+            global clusterCount             #number of nodes running
+            global ONOSIp                   #list of ONOS IP addresses
+            global scale 
+            global commit            
+    
+            clusterCount = 0
+            ONOSIp = [ 0 ]
+            scale = (main.params[ 'SCALE' ]).split(",")            
+            clusterCount = int(scale[0])
+
+            #Populate ONOSIp with ips from params 
+            ONOSIp = [0]
+            ONOSIp.extend(main.ONOSbench.getOnosIps())
+
+            #mvn clean install, for debugging set param 'skipCleanInstall' to yes to speed up test
+            if skipMvn != "yes":
+                mvnResult = main.ONOSbench.cleanInstall()
+
+            #git
+            main.step( "Git checkout and pull " + checkoutBranch )
+            if gitPull == 'on':
+                checkoutResult = main.ONOSbench.gitCheckout( checkoutBranch )
+                pullResult = main.ONOSbench.gitPull()
+
+            else:
+                checkoutResult = main.TRUE
+                pullResult = main.TRUE
+                main.log.info( "Skipped git checkout and pull" )
+       
+            commit = main.ONOSbench.getVersion()
+            commit = (commit.split(" "))[1]
+
+            resultsDB = open("IntentInstallWithdrawLatDB", "w+")
+            resultsDB.close()
+
+        # -- END OF INIT SECTION --#
+         
+        clusterCount = int(scale[0])
+        scale.remove(scale[0])       
+
+        MN1Ip = ONOSIp[len(ONOSIp)-1]
+        BENCHIp = ONOSIp[len(ONOSIp)-2]
+
+        #kill off all onos processes 
+        main.log.step("Safety check, killing all ONOS processes")
+        main.log.step("before initiating enviornment setup")
+        for node in range(1, maxNodes + 1):
+            main.ONOSbench.onosDie(ONOSIp[node])
+        
+        #Uninstall everywhere
+        main.log.step( "Cleaning Enviornment..." )
+        for i in range(1, maxNodes + 1):
+            main.log.info(" Uninstalling ONOS " + str(i) )
+            main.ONOSbench.onosUninstall( ONOSIp[i] )
+       
+        #construct the cell file
+        main.log.info("Creating cell file")
+        cellIp = []
+        for node in range (1, clusterCount + 1):
+            cellIp.append(ONOSIp[node])
+
+        main.ONOSbench.createCellFile(BENCHIp,cellName,MN1Ip,str(Apps), *cellIp)
+
+        main.step( "Set Cell" )
+        main.ONOSbench.setCell(cellName)
+        
+        main.step( "Creating ONOS package" )
+        packageResult = main.ONOSbench.onosPackage()  
+
+        main.step( "verify cells" )
+        verifyCellResult = main.ONOSbench.verifyCell()
+      
+        main.log.report( "Initializeing " + str( clusterCount ) + " node cluster." )
+        for node in range(1, clusterCount + 1):
+            main.log.info("Starting ONOS " + str(node) + " at IP: " + ONOSIp[node])
+            main.ONOSbench.onosInstall( ONOSIp[node])
+
+        for node in range(1, clusterCount + 1):
+            for i in range( 2 ):
+                isup = main.ONOSbench.isup( ONOSIp[node] )
+                if isup:
+                    main.log.info("ONOS " + str(node) + " is up\n")
+                    break
+            if not isup:
+                main.log.report( "ONOS " + str(node) + " didn't start!" )
+
+        main.ONOS1cli.startOnosCli( ONOSIp[1] )
+        main.log.info("Startup sequence complete")
+        
+        time.sleep(30)
+        
+        for i in range(3):
+            main.ONOSbench.onosCfgSet( ONOSIp[1], "org.onosproject.provider.nil.NullProviders", ("deviceCount " + str(switchCount)) ) 
+            main.ONOSbench.onosCfgSet( ONOSIp[1], "org.onosproject.provider.nil.NullProviders", "topoShape linear")
+            main.ONOSbench.onosCfgSet( ONOSIp[1], "org.onosproject.provider.nil.NullProviders", "enabled true")
+            if main.ONOSbench.verifySummary(ONOSIp[1], switchCount):
+                break
+            else: 
+                print "Failed- looping" 
+
+        main.ONOSbench.handle.sendline("""onos $OC1 "balance-masters" """)
+        main.ONOSbench.handle.expect(":~")
+        main.ONOSbench.logReport(ONOSIp[1], ["ERROR", "WARNING", "EXCEPT"])
+
+    def CASE2( self, main ):
+         
+        import time
+        import numpy
+
+        testStatus = "pass"
+        sampleSize = int(main.params[ 'TEST' ][ 'sampleSize' ])
+        warmUp = int(main.params[ 'TEST' ][ 'warmUp' ])
+        intentsList = (main.params[ 'TEST' ][ 'intents' ]).split(",")
+        switchCount = int(main.params[ 'TEST' ][ 'switchCount' ])
+        debug = main.params[ 'TEST' ][ 'switchCount' ]
+        for i in range(0,len(intentsList)):
+            intentsList[i] = int(intentsList[i])
+
+        ######################
+        debug = True
+        ######################
+
+        linkCount = 0
+        for i in range(0,10):
+            main.ONOSbench.handle.sendline("onos $OC1 links|wc -l")
+            main.ONOSbench.handle.expect(":~")
+            linkCount = main.ONOSbench.handle.before
+            if debug: main.log.info("Link Count check: " + linkCount)
+            if str((switchCount*2)-2) in linkCount:
+                break
+            time.sleep(2)
+
+        links = "--"
+        for i in range(8): 
+            if debug: main.log.info("top of loop")
+            main.ONOSbench.handle.sendline("onos $OC1 links")
+            main.ONOSbench.handle.expect(":~")
+            links = main.ONOSbench.handle.before
+            if "=null:" in links:
+                break 
+            if debug: main.log.info(str(links))
+            if i > 3: 
+                main.ONOSbench.logReport(ONOSIp[1], ["ERROR", "WARNING", "EXCEPT"], "d")  
+            if i == 7: 
+                main.log.error("link data missing") 
+            time.sleep(3)
+
+        links = links.splitlines()
+        templinks = links
+
+        tempDevices = []
+        for line in links:
+            temp = line.split(" ")
+            temp[0].replace("src=","")
+            temp[0] = (temp[0].split("/"))[0]
+            tempDevices.append(temp[0])
+
+        tempDevices.sort()
+        devices = []
+        for i in tempDevices:
+            if "src=null" in i:
+                devices.append(i.replace("src=", ""))
+        if debug: main.log.info(str(devices))
+
+        ingress = devices[0]
+        egress = devices.pop()
+        if debug: main.log.info(ingress)
+        if debug: main.log.info(egress)
+
+        for intentSize in intentsList:
+            cmd = "onos $OC1 push-test-intents "
+            cmd += ingress + "/6 "
+            cmd += egress + "/5 "
+            cmd += str(intentSize) + " 1"
+            installed = []
+            withdrawn = []
+
+            for run in range(0, (warmUp + sampleSize)):
+                if run > warmUp:
+                    time.sleep(5)
+
+                myRawResult = "--"
+                while "ms" not in myRawResult:
+                    main.ONOSbench.handle.sendline(cmd)
+                    main.ONOSbench.handle.expect(":~")
+                    myRawResult = main.ONOSbench.handle.before
+                    if debug: main.log.info(myRawResult)
+
+                if debug: main.log.info(myRawResult)
+
+                if run >= warmUp:
+                    myRawResult = myRawResult.splitlines()
+                    for line in myRawResult:
+                        if "install" in line:
+                            installed.append(int(line.split(" ")[5]))
+
+                    for line in myRawResult:
+                        if "withdraw" in line:
+                            withdrawn.append(int(line.split(" ")[5]))
+
+                    for line in myRawResult: 
+                        if "Failure:" in line: 
+                            main.log.error("INTENT TEST FAILURE, ABORTING TESTCASE")
+                            testStatus = "fail"
+                if testStatus == "fail": 
+                    break 
+                            
+                    print("installed: " + str(installed))
+                    print("withraw: " + str(withdrawn) + "\n")
+                    if withdrawn[len(withdrawn) -1] > 1000 or installed[len(installed) -1] > 1000: 
+                        main.log.info("ABNORMAL VALUE, CHECKING LOG")
+                        main.ONOSbench.logReport(ONOSIp[1], ["ERROR", "WARNING", "EXCEPT"], outputMode="d")
+
+            if testStatus == "fail": 
+                break 
+            main.log.report("----------------------------------------------------")
+            main.log.report("Scale: " + str(clusterCount) + "\tIntent batch size: " + str(intentSize))
+            main.log.report("Data samples: " + str(sampleSize) + "\tWarm up tests: " + str(warmUp))
+            main.log.report("Installed average: " + str(numpy.mean(installed)))
+            main.log.report("Installed standard deviation: " + str(numpy.std(installed)))
+            main.log.report("Withdraw average: " + str(numpy.mean(withdrawn)))
+            main.log.report("Withdraw standard deviation: " + str(numpy.std(withdrawn)))
+            main.log.report("     ")
+
+            resultString = "'" + commit + "',"
+            resultString += str(clusterCount) + ","
+            resultString += str(intentSize) + ","
+            resultString += str(numpy.mean(installed)) + ","
+            resultString += str(numpy.std(installed)) + ","
+            resultString += str(numpy.mean(withdrawn)) + ","
+            resultString += str(numpy.std(withdrawn)) + "\n"
+            resultsDB = open("IntentInstallWithdrawLatDB", "a")
+            resultsDB.write(resultString)
+            resultsDB.close()
+
+            main.ONOSbench.logReport(ONOSIp[1], ["ERROR", "WARNING", "EXCEPT"])
+            time.sleep(20)
diff --git a/TestON/tests/SCPFintentInstallWithdrawLat/SCPFintentInstallWithdrawLat.topo b/TestON/tests/SCPFintentInstallWithdrawLat/SCPFintentInstallWithdrawLat.topo
new file mode 100644
index 0000000..d82f3fd
--- /dev/null
+++ b/TestON/tests/SCPFintentInstallWithdrawLat/SCPFintentInstallWithdrawLat.topo
@@ -0,0 +1,144 @@
+<TOPOLOGY>
+
+    <COMPONENT>
+
+        <ONOSbench>
+            <host>OCN</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosDriver</type>
+            <connect_order>1</connect_order>
+            <COMPONENTS><home>~/onos</home></COMPONENTS>
+        </ONOSbench>
+
+        <ONOS1cli>
+            <host>OCN</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosCliDriver</type>
+            <connect_order>2</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS1cli>
+
+        <ONOS2cli>
+            <host>OCN</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosCliDriver</type>
+            <connect_order>3</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS2cli>
+
+        <ONOS3cli>
+            <host>OCN</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosCliDriver</type>
+            <connect_order>4</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS3cli>
+
+        <ONOS4cli>
+            <host>OCN</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosCliDriver</type>
+            <connect_order>5</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS4cli>
+
+        <ONOS5cli>
+            <host>OCN</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosCliDriver</type>
+            <connect_order>6</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS5cli>
+
+        <ONOS6cli>
+            <host>OCN</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosCliDriver</type>
+            <connect_order>7</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS6cli>
+
+        <ONOS7cli>
+            <host>OCN</host>
+            <user>admin</user>
+            <password>onos_test</password>
+            <type>OnosCliDriver</type>
+            <connect_order>8</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS7cli>
+
+        <ONOS1>
+            <host>OC1</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosDriver</type>
+            <connect_order>9</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS1>
+
+        <ONOS2>
+            <host>OC2</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosDriver</type>
+            <connect_order>10</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS2>
+
+        <ONOS3>
+            <host>OC3</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosDriver</type>
+            <connect_order>11</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS3>
+
+        <ONOS4>
+            <host>OC4</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosDriver</type>
+            <connect_order>12</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS4>
+
+    
+        <ONOS5>
+            <host>OC5</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosDriver</type>
+            <connect_order>13</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS5>
+
+        <ONOS6>
+            <host>OC6</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosDriver</type>
+            <connect_order>14</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS6>
+
+        <ONOS7>
+            <host>OC7</host>
+            <user>sdn</user>
+            <password>rocks</password>
+            <type>OnosDriver</type>
+            <connect_order>15</connect_order>
+            <COMPONENTS> </COMPONENTS>
+        </ONOS7>
+
+    </COMPONENT>
+
+</TOPOLOGY>
+ 
diff --git a/TestON/tests/SCPFintentInstallWithdrawLat/__init__.py b/TestON/tests/SCPFintentInstallWithdrawLat/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TestON/tests/SCPFintentInstallWithdrawLat/__init__.py