Remove top level dependencies folder.

Each test should include any scripts it depends upon. If it is something
usefull for every test, we can think about including it in the bin
folder.

Change-Id: I315f3893987931cc414f88d4c31be8595db46bc7
diff --git a/TestON/dependencies/Jenkins_getresult_HA.py b/TestON/dependencies/Jenkins_getresult_HA.py
deleted file mode 100755
index 181e1a9..0000000
--- a/TestON/dependencies/Jenkins_getresult_HA.py
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/usr/bin/env python
-
-import sys
-import os
-import re
-import datetime
-import time
-import argparse
-import glob
-import shutil
-
-parser = argparse.ArgumentParser()
-parser.add_argument("-n", "--name", help="Comma Separated string of test names. Ex: --name='test1, test2, test3'")
-parser.add_argument("-w", "--workspace", help="The name of the Jenkin's job/workspace where csv files will be saved'")
-args = parser.parse_args()
-
-#Pass in test names as a comma separated string argument. 
-#Example: ./Jenkins_getresult.py "Test1,Test2,Test3,Test4"
-name_list = args.name.split(",")
-result_list = map(lambda x: x.strip(), name_list)
-job = args.workspace
-if job is None:
-    job = ""
-print job
-
-#NOTE: testnames list should be in order in which it is run
-testnames = result_list
-output = ''
-header = ''
-graphs = ''
-testdate = datetime.datetime.now()
-#workspace = "/var/lib/jenkins/workspace/ONOS-HA"
-workspace = "/var/lib/jenkins/workspace/"
-workspace = workspace + job
-
-header +="<p>**************************************</p>"
-header +=testdate.strftime('Jenkins test result for %H:%M on %b %d, %Y. %Z')
-
-
-#NOTE: CASE SPECIFIC THINGS
-
-#THIS LINE IS LOUSY FIXME
-if any("HA" in s for s in testnames):
-    ##Graphs
-    graphs += '<ac:structured-macro ac:name="html">\n'
-    graphs += '<ac:plain-text-body><![CDATA[\n'
-    graphs += '<iframe src="https://onos-jenkins.onlab.us/job/'+job+'/plot/Plot-HA/getPlot?index=2&width=500&height=300" noborder="0" width="500" height="300" scrolling="yes" seamless="seamless"></iframe>\n'
-    graphs += '<iframe src="https://onos-jenkins.onlab.us/job/'+job+'/plot/Plot-HA/getPlot?index=1&width=500&height=300" noborder="0" width="500" height="300" scrolling="yes" seamless="seamless"></iframe>\n'
-    graphs += '<iframe src="https://onos-jenkins.onlab.us/job/'+job+'/plot/Plot-HA/getPlot?index=0&width=500&height=300" noborder="0" width="500" height="300" scrolling="yes" seamless="seamless"></iframe>\n'
-    graphs += '<iframe src="https://onos-jenkins.onlab.us/job/'+job+'/plot/Plot-HA/getPlot?index=3&width=500&height=300" noborder="0" width="500" height="300" scrolling="yes" seamless="seamless"></iframe>\n'
-    graphs += ']]></ac:plain-text-body>\n'
-    graphs += '</ac:structured-macro>\n'
-    header +="<p> <a href='https://wiki.onosproject.org/display/OST/Test+Plan+-+HA'>Test Plan for HA Test Cases</a></p>"
-
-
-# ***
-
-
-#TestON reporting
-for test in testnames:
-    passes = 0
-    fails = 0
-    name = os.popen("ls /home/admin/ONLabTest/TestON/logs/ -rt | grep %s_ | tail -1" % test).read().split()[0]
-    path = "/home/admin/ONLabTest/TestON/logs/" + name + "/"
-    try:
-        #IF exists, move the csv file to the workspace
-        for csvFile in glob.glob( path + '*.csv' ):
-            shutil.copy( csvFile, workspace )
-    except IOError:
-        #File probably doesn't exist
-        pass
-
-    output +="<p></p>"
-    #output +="   Date: %s, %s %s" % (name.split("_")[2], name.split("_")[1], name.split("_")[3]) + "<p>*******************<p>"
-    #Open the latest log folder
-    output += "<h2>Test "+str(test)+"</h2><p>************************************</p>"
-
-    f = open(path + name + ".rpt")
-
-    #Parse through each line of logs and look for specific strings to output to wiki.
-    #NOTE: with current implementation, you must specify which output to output to wiki by using
-    #main.log.report("") since it is looking for the [REPORT] tag in the logs
-    for line in f:
-        if re.search("Result summary for Testcase", line):
-            output += "<h3>"+str(line)+"</h3>"
-            #output += "<br>"
-        if re.search("\[REPORT\]", line): 
-            line_split = line.split("] ")
-            #line string is split by bracket, and first two items (log tags) in list are omitted from output
-            #join is used to convert list to string
-            line_str = ''.join(line_split[2:])
-            output += "<p>"
-            output += line_str
-            output += "</p>"
-        if re.search("Result:", line):
-            output += "<p>"
-            output += line
-            output += "</p>"
-            if re.search("Pass", line):
-                passes = passes + 1
-            elif re.search("Fail", line):
-                fails = fails + 1
-    f.close()
-    #https://wiki.onosproject.org/display/OST/Test+Results+-+HA#Test+Results+-+HA
-    #Example anchor on new wiki:        #TestResults-HA-TestHATestSanity
-    page_name = "Master-HA"
-    if "ONOS-HA-1.1.X" in job:
-        page_name = "Blackbird-HA"
-    elif "ONOS-HA-Maint" in job:
-        # NOTE if page name starts with number confluence prepends 'id-'
-        #      to anchor links
-        page_name = "id-1.0-HA"
-
-    header += "<li><a href=\'#" + str(page_name) + "-Test" + str(test) + "\'> " + str(test) + " - Results: " + str(passes) + " Passed, " + str(fails) + " Failed</a></li>"
-
-    #*********************
-    #include any other phrase specific to case you would like to include in wiki here
-    if test == "IntentPerf":
-        output += "URL to Historical Performance results data: <a href='http://10.128.5.54perf.html'>Perf Graph</a>"
-    #*********************
-
-#header_file = open("/tmp/header_ha.txt",'w')
-#header_file.write(header)
-output = header + graphs + output
-print output
diff --git a/TestON/dependencies/Jenkins_getresult_andrew.py b/TestON/dependencies/Jenkins_getresult_andrew.py
deleted file mode 100755
index 0e7ef8d..0000000
--- a/TestON/dependencies/Jenkins_getresult_andrew.py
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env python
-
-import sys
-import os
-import re
-import datetime
-import time
-import argparse
-
-parser = argparse.ArgumentParser()
-parser.add_argument("-n", "--name", help="Comma Separated string of test names. Ex: --name='test1, test2, test3'")
-args = parser.parse_args()
-
-#Pass in test names as a comma separated string argument. 
-#Example: ./Jenkins_getresult.py "Test1,Test2,Test3,Test4"
-name_list = args.name.split(",")
-result_list = map(lambda x: x.strip(), name_list)
-
-#NOTE: testnames list should be in order in which it is run
-testnames = result_list
-output = ''
-testdate = datetime.datetime.now()
-
-output +="<p>**************************************</p>"
-output +=testdate.strftime('Jenkins test result for %H:%M on %b %d, %Y. %Z')
-
-#TestON reporting
-for test in testnames:
-    name = os.popen("ls /home/admin/ONLabTest/TestON/logs/ -rt | grep %s | tail -1" % test).read().split()[0]
-    path = "/home/admin/ONLabTest/TestON/logs/" + name + "/"
-    output +="<p></p>"
-    #output +="   Date: %s, %s %s" % (name.split("_")[2], name.split("_")[1], name.split("_")[3]) + "<br>*******************<br>"
-    #Open the latest log folder 
-    output += "<h2>Test "+str(test)+"</h2><p>************************************</p>"
-
-    f = open(path + name + ".rpt")
-
-    #Parse through each line of logs and look for specific strings to output to wiki.
-    #NOTE: with current implementation, you must specify which output to output to wiki by using
-    #main.log.report("") since it is looking for the [REPORT] tag in the logs
-    for line in f:
-        if re.search("Result summary for Testcase", line):
-            output += "<h3>"+str(line)+"</h3>"
-            #output += "<br>"
-        if re.search("\[REPORT\]", line): 
-            line_split = line.split("] ")
-            #line string is split by bracket, and first two items (log tags) in list are omitted from output
-            #join is used to convert list to string
-            line_str = ''.join(line_split[2:])
-            output += "<p>"
-            output += line_str
-            output += "</p>"
-        if re.search("Result:", line):
-            output += "<p>"
-            output += line
-            output += "</p>"
-    f.close()
-
-    #*********************
-    #include any other phrase specific to case you would like to include in wiki here
-    if test == "IntentPerf":
-        output += "URL to Historical Performance results data: <a href='http://10.128.5.54/perf.html'>Perf Graph</a>"
-    #*********************
-print output
diff --git a/TestON/dependencies/loadgen_NB.py b/TestON/dependencies/loadgen_NB.py
deleted file mode 100755
index 78d18b9..0000000
--- a/TestON/dependencies/loadgen_NB.py
+++ /dev/null
@@ -1,225 +0,0 @@
-#! /usr/bin/env python
-from time import time, sleep
-import time
-import json
-import requests
-import urllib2
-from urllib2 import URLError, HTTPError
-
-'''
-    This script is for Intent Throughput testing. Use linear 7-switch topo. Intents are from S1P1 to/from S7/P1, with incrementing src/dst Mac addresses.
-'''
-
-def setIntentJSN(node_id, intPerGroup, group_id, intent_id):
-    intents = [None for i in range(intPerGroup)]
-    oper = {}
-    index = 0
-    for i in range(intPerGroup / 2):
-        smac = str("%x" %(node_id * 0x100000000000 + 0x010000000000 + (group_id * 0x000001000000) +i + 1))
-        dmac = str("%x" %(node_id * 0x100000000000 + 0x070000000000 + (group_id * 0x000001000000) +i + 1))
-        srcMac = ':'.join(smac[i:i+2] for i in range(0, len(smac), 2))
-        dstMac = ':'.join(dmac[i:i+2] for i in range(0, len(dmac), 2))
-        srcSwitch = "00:00:00:00:00:00:00:01"
-        dstSwitch = "00:00:00:00:00:00:00:07"
-        srcPort = 1
-        dstPort = 1
-
-        oper['intentId'] = intent_id
-        oper['intentType'] = 'SHORTEST_PATH'    # XXX: Hardcode
-        oper['staticPath'] = False              # XXX: Hardcoded
-        oper['srcSwitchDpid'] = srcSwitch
-        oper['srcSwitchPort'] = srcPort
-        oper['dstSwitchDpid'] = dstSwitch
-        oper['dstSwitchPort'] = dstPort
-        oper['matchSrcMac'] = srcMac
-        oper['matchDstMac'] = dstMac
-        intents[index] = oper
-        #print ("perGroup Intents-0 are: " + json.dumps(intents) + "\n\n\n" )
-        index += 1
-        intent_id += 1
-        oper = {}
-        #print ("ID:" + str(id))
-
-        oper['intentId'] = intent_id
-        oper['intentType'] = 'SHORTEST_PATH'    # XXX: Hardcoded
-        oper['staticPath'] = False              # XXX: Hardcoded
-        oper['srcSwitchDpid'] = dstSwitch
-        oper['srcSwitchPort'] = dstPort
-        oper['dstSwitchDpid'] = srcSwitch
-        oper['dstSwitchPort'] = srcPort
-        oper['matchSrcMac'] = dstMac
-        oper['matchDstMac'] = srcMac
-        intents[index] = oper
-        index += 1 
-        intent_id += 1
-        oper = {}
-        #print ("ID: " + str(id))
-        #print ("perGroup Intents-1 are: " + json.dumps(intents) + "\n\n\n" )
-    #print ("contructed intents are: " + json.dumps(intents) + "\n\n\n")
-    return intents, intent_id
-
-def post_json(url, data):
-    """Make a REST POST call and return the JSON result
-           url: the URL to call
-           data: the data to POST"""
-    posturl = "http://%s/wm/onos/intent/high" %(url)
-    #print ("\nPost url is : " + posturl + "\n")
-    parsed_result = []
-    data_json = json.dumps(data)
-    try:
-        request = urllib2.Request(posturl, data_json)
-        request.add_header("Content-Type", "application/json")
-        response = urllib2.urlopen(request)
-        result = response.read()
-        response.close()
-        if len(result) != 0:
-            parsed_result = json.loads(result)
-    except HTTPError as exc:
-        print "ERROR:"
-        print "  REST POST URL: %s" % posturl
-        # NOTE: exc.fp contains the object with the response payload
-        error_payload = json.loads(exc.fp.read())
-        print "  REST Error Code: %s" % (error_payload['code'])
-        print "  REST Error Summary: %s" % (error_payload['summary'])
-        print "  REST Error Description: %s" % (error_payload['formattedDescription'])
-        print "  HTTP Error Code: %s" % exc.code
-        print "  HTTP Error Reason: %s" % exc.reason
-    except URLError as exc:
-        print "ERROR:"
-        print "  REST POST URL: %s" % posturl
-        print "  URL Error Reason: %s" % exc.reason
-    return parsed_result
-
-def delete_json(self, url, intPerGroup, startID):
-    """Make a REST DELETE call and return the JSON result
-           url: the URL to call"""
-    #url = "localhost:8080"
-    for i in range(intPerGroup):
-        posturl = "http://%s/wm/onos/intent/high/%s" %(url, str(i + startID))
-        parsed_result = []
-        try:
-            request = urllib2.Request(posturl)
-            request.get_method = lambda: 'DELETE'
-            response = urllib2.urlopen(request)
-            result = response.read()
-            response.close()
-            #if len(result) != 0:
-            #    parsed_result = json.loads(result)
-        except HTTPError as exc:
-            print "ERROR:"
-            print "  REST DELETE URL: %s" % posturl
-            # NOTE: exc.fp contains the object with the response payload
-            error_payload = json.loads(exc.fp.read())
-            print "  REST Error Code: %s" % (error_payload['code'])
-            print "  REST Error Summary: %s" % (error_payload['summary'])
-            print "  REST Error Description: %s" % (error_payload['formattedDescription'])
-            print "  HTTP Error Code: %s" % exc.code
-            print "  HTTP Error Reason: %s" % exc.reason
-        except URLError as exc:
-            print "ERROR:"
-            print "  REST DELETE URL: %s" % posturl
-            print "  URL Error Reason: %s" % exc.reason
-    return parsed_result
-
-def delete_all_json(url):
-    """Make a REST DELETE call and return the JSON result
-           url: the URL to call"""
-    #url = "localhost:8080"
-    posturl = "http://%s/wm/onos/intent/high" %(url)
-    parsed_result = []
-    try:
-        request = urllib2.Request(posturl)
-        request.get_method = lambda: 'DELETE'
-        response = urllib2.urlopen(request)
-        result = response.read()
-        response.close()
-        if len(result) != 0:
-            parsed_result = json.loads(result)
-    except HTTPError as exc:
-        print "ERROR:"
-        print "  REST DELETE URL: %s" % posturl
-        # NOTE: exc.fp contains the object with the response payload
-        error_payload = json.loads(exc.fp.read())
-        print "  REST Error Code: %s" % (error_payload['code'])
-        print "  REST Error Summary: %s" % (error_payload['summary'])
-        print "  REST Error Description: %s" % (error_payload['formattedDescription'])
-        print "  HTTP Error Code: %s" % exc.code
-        print "  HTTP Error Reason: %s" % exc.reason
-    except URLError as exc:
-        print "ERROR:"
-        print "  REST DELETE URL: %s" % posturl
-        print "  URL Error Reason: %s" % exc.reason
-    return parsed_result
-
-def loadIntents(node_id, urllist, intPerGroup, addrate, duration):
-    urlindex = 0
-    group = 0
-    start_id = 0
-    sleeptimer = (1.000/addrate)
-    tstart = time.time()
-    while ( (time.time() - tstart) <= duration ):
-        if urlindex < len(urllist):
-            realurlind = urlindex
-        else:
-            realurlind = 0
-            urlindex = 0
-
-        u = str(urllist[realurlind])
-        gstart = time.time()
-        intents,start_id = setIntentJSN(node_id, intPerGroup, group, start_id)
-        #print (str(intents))
-        #print ("Starting intent id: " + str(start_id))
-        result = post_json(u, intents)
-        #print json.dumps(intents[group])
-        #print ("post result: " + str(result))
-        gelapse = time.time() - gstart
-        print ("Group: " + str(group) + " with " + str(intPerGroup) + " intents were added in " + str('%.3f' %gelapse) + " seconds.")
-        sleep(sleeptimer)
-        urlindex += 1
-        group += 1
-
-    telapse = time.time() - tstart
-    #print ( "Number of groups: " + str(group) + "; Totoal " + str(args.groups * args.intPerGroup) + " intents were added in " + str(telapse) + " seconds.")
-    return telapse, group
-
-def main():
-    import argparse
-
-    parser = argparse.ArgumentParser(description="less script")
-    parser.add_argument("-n", "--node_id", dest="node_id", default = 1, type=int, help="id of the node generating the intents, this is used to distinguish intents when multiple nodes are use to generate intents")
-    parser.add_argument("-u", "--urls", dest="urls", default="10.128.10.1", type=str, help="a string to show urls to post intents to separated by space, ex. '10.128.10.1:8080 10.128.10.2:80080' ")
-    parser.add_argument("-i", "--intentsPerGroup", dest="intPerGroup", default=100, type=int, help="number of intents in one restcall group")
-    parser.add_argument("-a", "--addrate", dest="addrate", default=10, type=float, help="rate to add intents groups, groups per second")
-    parser.add_argument("-d", "--delrate", dest="delrate", default=100, type=float, help= "### Not Effective -for now intents are delete as bulk #### rate to delete intents, intents/second")
-    parser.add_argument("-l", "--length", dest="duration", default=300, type=int, help="duration/length of time the intents are posted")
-    parser.add_argument("-p", "--pause", dest="pause", default=0, type=int, help= "pausing time between add and delete of intents")
-    args = parser.parse_args()
-
-    node_id = args.node_id
-    urllist = args.urls.split()
-    intPerGroup = args.intPerGroup
-    addrate = args.addrate
-    delrate = args.delrate
-    duration = args.duration    
-    pause = args.pause
-
-    print ("Intent posting urls are: " + str(urllist))
-    print ("Number of Intents per group: " + str(intPerGroup))
-    print ("Intent group add rate: " + str(addrate) )
-    print ("Intent delete rate:" + str(delrate) )
-    print ("Duration: " + str(duration) )
-    print ("Pause between add and delete: " + str(args.pause))
-
-    telapse, group = loadIntents(node_id, urllist, intPerGroup, addrate, duration)
-    print ("\n\n#####################")
-    print ( str(group) + " groups " + " of " + str(intPerGroup) + " Intents per group - Total " + str(group * intPerGroup) + " intents were added in " + str('%.3f' %telapse) + " seconds.")
-    print ( "Effective intents posting rate is: " + str( '%.1f' %( (group * intPerGroup)/telapse ) ) + " Intents/second." )
-    print ("#####################\n\n")
-    print ("Sleep for " + str(pause) + " seconds before deleting all intents...")
-    time.sleep(pause)
-    print ("Cleaning up intents in all nodes...")
-    for url in urllist:
-        delete_all_json(url)
-        
-if __name__ == '__main__':
-    main()
diff --git a/TestON/dependencies/loadgen_SB.py b/TestON/dependencies/loadgen_SB.py
deleted file mode 100755
index cfd2adf..0000000
--- a/TestON/dependencies/loadgen_SB.py
+++ /dev/null
@@ -1,117 +0,0 @@
-#!/usr/bin/python
-
-"""
-This example shows how to create an empty Mininet object
-(without a topology object) and add nodes to it manually.
-"""
-import sys
-import subprocess
-import time
-from mininet.net import Mininet
-from mininet.node import Controller
-from mininet.cli import CLI
-from mininet.log import setLogLevel, info
-
-swlist = []
-hostlist= []
-count = 0 
-
-def createSwPorts(numsw, numport):
-
-    "Create an empty network and add nodes to it."
-
-    net = Mininet()
-    swlist = []
-    hostlist= []
-    print ("Starting Mininet Network....")
-    for i in range(numsw):
-        sw = net.addSwitch( 's' + str(i), dpid = ('00000000000000' + '%0d'%i))
-        print str(sw),
-        for p in range(numport):
-            host = net.addHost("s"+str(i)+"h"+str(p))
-            hostlist.append(host)
-            print str(host),
-            net.addLink(host,sw)
-        swlist.append(sw)
-
-            
-    info( '*** Starting network\n')
-    net.start()
-
-    return swlist
-
-def loadsw(urllist, swlist, addrate, delrate, duration):
-    global numport
-    urlindex = 0
-    count = 0
-    addsleeptimer = 1.000 /addrate
-    delsleeptimer = 1.000/delrate
-    print (" Add sleeptimer: " + str('%.3f' %addsleeptimer) + "; Delete sleeptimer: " + str('%.3f' %delsleeptimer))
-    print str(swlist)
- 
-    tstart = time.time()
-    while ( (time.time() - tstart) <= duration ):
-        #print (time.time() - tstart)
-        astart = time.time()
-        for sw in swlist:
-            if urlindex < len(urllist):
-                i = urlindex
-            else:
-                i = 0
-                urlindex = 0
-        
-            ovscmd = "sudo ovs-vsctl set-controller " + str(sw) + " tcp:" + urllist[i]
-            print ("a"),
-            s = subprocess.Popen(ovscmd, shell=True )
-            time.sleep(addsleeptimer)
-            count += 1
-            urlindex += 1
-        aelapse = time.time() - astart
-        print ("Number of switches connected: " + str(len(swlist)) + " in: " + str('%.3f' %aelapse) + "seconds.")
-
-        dstart = time.time()
-        for sw in swlist:
-            ovscmd = "sudo ovs-vsctl set-controller " + str(sw) + " tcp:127.0.0.1:6633"
-            print ("d"),
-            s = subprocess.Popen(ovscmd, shell=True )
-            time.sleep(delsleeptimer)
-            count += 1
-        delapse = time.time() - dstart
-        print ("Number of switches disconnected: " + str(len(swlist)) + " in: " + str('%.3f' %delapse) + "seconds.")
-    telapse = time.time() - tstart
-    
-    return telapse, count
-def cleanMN():
-    print ("Cleaning MN switches...")
-    s = subprocess.Popen("sudo mn -c > /dev/null 2>&1", shell=True)
-    print ("Done.")
-
-def main():
-    import argparse
-    import threading
-    from threading import Thread
-
-    parser = argparse.ArgumentParser(description="less script")
-    parser.add_argument("-u", "--urls", dest="urls", default="10.128.10.1", type=str, help="a string to show urls to post intents to separated by space, ex. '10.128.10.1:6633 10.128.10.2:6633' ")
-    parser.add_argument("-s", "--switches", dest="numsw", default=100, type=int, help="number of switches use in the load generator; together with the ports per switch config, each switch generates (numport + 2) events")
-    parser.add_argument("-p", "--ports", dest="numport", default=1, type=int, help="number of ports per switches")
-    parser.add_argument("-a", "--addrate", dest="addrate", default=10, type=float, help="rate to add intents groups, groups per second")
-    parser.add_argument("-d", "--delrate", dest="delrate", default=100, type=float, help= "rate to delete intents, intents/second")
-    parser.add_argument("-l", "--testlength", dest="duration", default=0, type=int, help= "pausing time between add and delete of intents")
-    args = parser.parse_args()
-
-    urllist = args.urls.split()
-    numsw = args.numsw
-    numport = args.numport
-    addrate = args.addrate
-    delrate = args.delrate
-    duration = args.duration
-    setLogLevel( 'info' )
-    swlist = createSwPorts(numsw,numport)
-    telapse,count = loadsw(urllist, swlist, addrate, delrate, duration)
-    print ("Total number of switches connected/disconnected: " + str(count) + "; Total events generated: " + str(count * (2 + numport)) + "; Elalpse time: " + str('%.1f' %telapse))
-    print ("Effective aggregated loading is: " + str('%.1f' %((( count * (2+ numport))) / telapse ) ) + "Events/s.")
-    cleanMN()
-
-if __name__ == '__main__':
-    main()
diff --git a/TestON/dependencies/rotate.sh b/TestON/dependencies/rotate.sh
deleted file mode 100755
index 7136ac6..0000000
--- a/TestON/dependencies/rotate.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/bash
-
-
-# NOTE: Taken fnd modified from onos.sh
-# pack-rotate-log [packname] "[log-filenames]" [max rotations]
-# Note: [packname] and all the log-files specified by [log-filenames]
-#       must reside in same dir
-# Example:
-#  pack="/foo/bar/testlogs"
-#  logfiles="/foo/bar/test1.log /foo/bar/test*.log"
-#  pack-rotate-log $pack "$logfiles" 5
-#   => testlogs.tar.bz2 (will contain test1.log test2.log ...)
-#      testlogs.tar.bz2 -> testlogs.1.tar.bz2
-#      testlogs.1.tar.bz2 -> testlogs.2.tar.bz2
-#      ...
-function pack-rotate-log {
-  local packname=$1
-  local logfiles=$2
-  local nr_max=${3:-10}
-  local suffix=".tar.bz2"
-
-  # rotate
-  for i in `seq $(expr $nr_max - 1) -1 1`; do
-    if [ -f ${packname}.${i}${suffix} ]; then
-      mv -f -- ${packname}.${i}${suffix} ${packname}.`expr $i + 1`${suffix}
-    fi
-  done
-  if [ -f ${packname}${suffix} ]; then
-    mv -- ${packname}${suffix} ${packname}.1${suffix}
-  fi
-
-  # pack
-  local existing_logfiles=$( ls -1 $logfiles  2>/dev/null | xargs -n1  basename 2>/dev/null)
-  if [ ! -z "${existing_logfiles}" ]; then
-    tar cjf ${packname}${suffix} -C `dirname ${packname}` -- ${existing_logfiles}
-    for word in ${existing_logfiles}
-    do
-        rm -- `dirname ${packname}`/${word}
-    done
-   fi
-}
-
-
-
-#Begin script
-#NOTE: This seems to break the TestON summary since it mentions the testname
-#echo "Rotating logs for '${1}' test"
-base_name=$1
-root_dir="/home/admin/packet_captures"
-timestamp=`date +%Y_%B_%d_%H_%M_%S`
-#Maybe this should be an argument? pack-and-rotate supports that
-nr_max=10
-
-pack-rotate-log ${root_dir}'/'${base_name} "${root_dir}/${base_name}*.pcap ${root_dir}/${base_name}*.log*" ${nr_max}
diff --git a/TestON/dependencies/topo-100sw.py b/TestON/dependencies/topo-100sw.py
deleted file mode 100644
index 308a3f1..0000000
--- a/TestON/dependencies/topo-100sw.py
+++ /dev/null
@@ -1,31 +0,0 @@
-
-from mininet.topo import Topo
-
-class MyTopo( Topo ):
-        "100 'floating' switch topology"
-
-        def __init__( self ):
-                # Initialize topology
-                Topo.__init__( self )
-
-                sw_list = []
-
-                for i in range(1, 101):
-                        sw_list.append(
-                                self.addSwitch(
-                                        's'+str(i),
-                                        dpid = str(i).zfill(16)))
-
-
-                #Below connections are used for test cases
-                #that need to test link and port events
-                #Add link between switch 1 and switch 2
-                self.addLink(sw_list[0],sw_list[1])
-                
-                #Create hosts and attach to sw 1 and sw 2
-                h1 = self.addHost('h1')
-                h2 = self.addHost('h2')
-                self.addLink(sw_list[0],h1)
-                self.addLink(sw_list[1],h2)
-        
-topos = { 'mytopo': ( lambda: MyTopo() ) }
diff --git a/TestON/dependencies/topo-HA.py b/TestON/dependencies/topo-HA.py
deleted file mode 100644
index 65613d6..0000000
--- a/TestON/dependencies/topo-HA.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from mininet.topo import Topo
-class MyTopo( Topo ):
-    def __init__( self ):
-        Topo.__init__( self )
-        topSwitch = self.addSwitch('s1',dpid='1000'.zfill(16))
-        leftTopSwitch = self.addSwitch('s2',dpid='2000'.zfill(16))
-        rightTopSwitch = self.addSwitch('s5',dpid='5000'.zfill(16))
-        leftBotSwitch = self.addSwitch('s3',dpid='3000'.zfill(16))
-        rightBotSwitch = self.addSwitch('s6',dpid='6000'.zfill(16))	
-        midBotSwitch = self.addSwitch('s28',dpid='2800'.zfill(16))
-        
-        topHost = self.addHost( 'h1' )
-        leftTopHost = self.addHost('h2')
-        rightTopHost = self.addHost('h5')
-        leftBotHost = self.addHost('h3')
-        rightBotHost = self.addHost('h6')
-        midBotHost = self.addHost('h28')
-        self.addLink(topSwitch,topHost)
-        self.addLink(leftTopSwitch,leftTopHost)
-        self.addLink(rightTopSwitch,rightTopHost)
-        self.addLink(leftBotSwitch,leftBotHost)
-        self.addLink(rightBotSwitch,rightBotHost)
-        self.addLink(midBotSwitch,midBotHost)
-        self.addLink(leftTopSwitch,rightTopSwitch)
-        self.addLink(topSwitch,leftTopSwitch)
-        self.addLink(topSwitch,rightTopSwitch)
-        self.addLink(leftTopSwitch,leftBotSwitch)
-        self.addLink(rightTopSwitch,rightBotSwitch)
-        self.addLink(leftBotSwitch,midBotSwitch)
-        self.addLink(midBotSwitch,rightBotSwitch)
-
-        agg1Switch = self.addSwitch('s4',dpid = '3004'.zfill(16))
-        agg2Switch = self.addSwitch('s7',dpid = '6007'.zfill(16))
-        agg1Host = self.addHost('h4')
-        agg2Host = self.addHost('h7')
-        self.addLink(agg1Switch,agg1Host)
-        self.addLink(agg2Switch,agg2Host)
-        self.addLink(agg1Switch, leftBotSwitch)
-        self.addLink(agg2Switch, rightBotSwitch)
-
-        for i in range(10):
-            num = str(i+8)
-            switch = self.addSwitch('s'+num,dpid = ('30'+num.zfill(2)).zfill(16))
-            host = self.addHost('h'+num)
-            self.addLink(switch, host)
-            self.addLink(switch, agg1Switch)
-
-        for i in range(10):
-            num = str(i+18)
-            switch = self.addSwitch('s'+num,dpid = ('60'+num.zfill(2)).zfill(16))
-            host = self.addHost('h'+num)
-            self.addLink(switch, host)
-            self.addLink(switch, agg2Switch)
-
-topos = { 'mytopo': (lambda: MyTopo() ) }
-
-
-
-
-
-
-
-
diff --git a/TestON/dependencies/topo-intentFlower.py b/TestON/dependencies/topo-intentFlower.py
deleted file mode 100644
index 138c291..0000000
--- a/TestON/dependencies/topo-intentFlower.py
+++ /dev/null
@@ -1,80 +0,0 @@
-'''
-Topology with 3 core switches connected linearly.
-
-Each 'core' switch has a 'flower' of 10 switches
-for a total of 33 switches.
-
-Used in conjunction with 'IntentPerfNext' test
-'''
-
-from mininet.topo import Topo
-
-class MyTopo( Topo ):
-
-    def __init__( self ):
-        Topo.__init__( self )
-       
-        #Switches are listed out here for better view
-        #of the topology from this code
-        core_sw_list = ['s1','s2','s3']
-       
-        #Flower switches for core switch 1
-        flower_sw_list_s1 =\
-                ['s10', 's11', 's12', 's13', 's14',
-                 's15', 's16', 's17', 's18', 's19']
-        #Flower switches for core switch 2
-        flower_sw_list_s2 =\
-                ['s20', 's21', 's22', 's23', 's24',
-                 's25', 's26', 's27', 's28', 's29']
-        #Flower switches for core switch 3
-        flower_sw_list_s3 =\
-                ['s30', 's31', 's32', 's33', 's34',
-                 's35', 's36', 's37', 's38', 's39']
-
-        #Store switch objects in these variables
-        core_switches = []
-        flower_switches_1 = []
-        flower_switches_2 = []
-        flower_switches_3 = []
-       
-        #Add switches
-        for sw in core_sw_list:
-            core_switches.append(
-                    self.addSwitch(
-                        sw, 
-                        dpid = sw.replace('s','').zfill(16)
-                    )
-            )
-        for sw in flower_sw_list_s1:
-            flower_switches_1.append(
-                    self.addSwitch(
-                        sw,
-                        dpid = sw.replace('s','').zfill(16)
-                    )
-            )
-        for sw in flower_sw_list_s2:
-            flower_switches_2.append(
-                    self.addSwitch(
-                        sw,
-                        dpid = sw.replace('s','').zfill(16)
-                    )
-            )
-        for sw in flower_sw_list_s3:
-            flower_switches_3.append(
-                    self.addSwitch(
-                        sw,
-                        dpid = sw.replace('s','').zfill(16)
-                    )
-            )
-
-        self.addLink(core_switches[0], core_switches[1])
-        self.addLink(core_switches[1], core_switches[2])
-
-        for x in range(0, len(flower_sw_list_s1)):
-            self.addLink(core_switches[0], flower_switches_1[x]) 
-        for x in range(0, len(flower_sw_list_s2)):
-            self.addLink(core_switches[1], flower_switches_2[x])
-        for x in range(0, len(flower_sw_list_s3)):
-            self.addLink(core_switches[2], flower_switches_3[x])
-
-topos = { 'mytopo': ( lambda: MyTopo() ) }
diff --git a/TestON/dependencies/topo-onos4node.py b/TestON/dependencies/topo-onos4node.py
deleted file mode 100644
index 4fc036c..0000000
--- a/TestON/dependencies/topo-onos4node.py
+++ /dev/null
@@ -1,63 +0,0 @@
-"""Custom topology example
-
-Two directly connected switches plus a host for each switch:
-
-   host --- switch --- switch --- host
-
-Adding the 'topos' dict with a key/value pair to generate our newly defined
-topology enables one to pass in '--topo=mytopo' from the command line.
-"""
-
-from mininet.topo import Topo
-
-class MyTopo( Topo ):
-	"Simple topology example."
-
-	def __init__( self ):
-		"Create custom topo."
-		# Initialize topology
-		Topo.__init__( self )
-
-		# Make the middle triangle	
-		leftSwitch = self.addSwitch( 's1' , dpid = '1000'.zfill(16))
-		rightSwitch = self.addSwitch( 's2' , dpid = '2000'.zfill(16))
-		topSwitch = self.addSwitch( 's3' , dpid = '3000'.zfill(16))
-		lefthost = self.addHost( 'h1' )
-		righthost = self.addHost( 'h2' )
-		tophost = self.addHost( 'h3' )
-		self.addLink( leftSwitch, lefthost )
-		self.addLink( rightSwitch, righthost )
-		self.addLink( topSwitch, tophost )
-
-		self.addLink( leftSwitch, rightSwitch )
-		self.addLink( leftSwitch, topSwitch )
-		self.addLink( topSwitch, rightSwitch )
-
-		# Make aggregation switches
-		agg1Switch = self.addSwitch( 's4', dpid = '1004'.zfill(16) ) 
-		agg2Switch = self.addSwitch( 's5', dpid = '2005'.zfill(16) ) 
-		agg1Host = self.addHost( 'h4' ) 
-		agg2Host = self.addHost( 'h5' ) 
-
-		self.addLink( agg1Switch, agg1Host, port1=1, port2=1 )
-		self.addLink( agg2Switch, agg2Host, port1=1, port2=1 )
-
-		self.addLink( agg2Switch, rightSwitch )
-		self.addLink( agg1Switch, leftSwitch )
-
-		# Make two aggregation fans
-		for i in range(10):
-			num=str(i+6)
-			switch = self.addSwitch( 's' + num, dpid = ('10' + num.zfill(2) ).zfill(16))
-			host = self.addHost( 'h' + num ) 
-			self.addLink( switch, host, port1=1, port2=1 ) 
-			self.addLink( switch, agg1Switch ) 
-
-		for i in range(10):
-			num=str(i+31)
-			switch = self.addSwitch( 's' + num, dpid = ('20' + num.zfill(2)).zfill(16) )
-			host = self.addHost( 'h' + num ) 
-			self.addLink( switch, host, port1=1, port2=1 ) 
-			self.addLink( switch, agg2Switch ) 
-
-topos = { 'mytopo': ( lambda: MyTopo() ) }
diff --git a/TestON/dependencies/topo-onos4node.py.old b/TestON/dependencies/topo-onos4node.py.old
deleted file mode 100644
index 3328a5d..0000000
--- a/TestON/dependencies/topo-onos4node.py.old
+++ /dev/null
@@ -1,61 +0,0 @@
-"""Custom topology example
-
-Two directly connected switches plus a host for each switch:
-
-   host --- switch --- switch --- host
-
-Adding the 'topos' dict with a key/value pair to generate our newly defined
-topology enables one to pass in '--topo=mytopo' from the command line.
-"""
-
-from mininet.topo import Topo
-
-class MyTopo( Topo ):
-	"Simple topology example."
-
-	def __init__( self ):
-		"Create custom topo."
-		# Initialize topology
-		Topo.__init__( self )
-
-		# Make the middle triangle	
-		leftSwitch = self.addSwitch( 's1' )
-		rightSwitch = self.addSwitch( 's2' )
-		topSwitch = self.addSwitch( 's3' )
-		lefthost = self.addHost( 'h1' )
-		righthost = self.addHost( 'h2' )
-		tophost = self.addHost( 'h3' )
-		self.addLink( leftSwitch, lefthost )
-		self.addLink( rightSwitch, righthost )
-		self.addLink( topSwitch, tophost )
-
-		self.addLink( leftSwitch, rightSwitch )
-		self.addLink( leftSwitch, topSwitch )
-		self.addLink( topSwitch, rightSwitch )
-
-		# Make aggregation switches
-		agg1Switch = self.addSwitch( 's4' ) 
-		agg2Switch = self.addSwitch( 's5' ) 
-		agg1Host = self.addHost( 'h4' ) 
-		agg2Host = self.addHost( 'h5' ) 
-
-		self.addLink( agg1Switch, agg1Host )
-		self.addLink( agg2Switch, agg2Host )
-
-		self.addLink( agg1Switch, rightSwitch )
-		self.addLink( agg2Switch, leftSwitch )
-
-		# Make two aggregation fans
-		for i in range(10):
-			switch = self.addSwitch( 's%d' % (i+6) )
-			host = self.addHost( 'h%d' % (i+6) ) 
-			self.addLink( switch, host ) 
-			self.addLink( switch, agg1Switch ) 
-
-		for i in range(10):
-			switch = self.addSwitch( 's%d' % (i+31) )
-			host = self.addHost( 'h%d' % (i+31) ) 
-			self.addLink( switch, host ) 
-			self.addLink( switch, agg2Switch ) 
-
-topos = { 'mytopo': ( lambda: MyTopo() ) }
diff --git a/TestON/dependencies/topo-onos4nodeNEW.py b/TestON/dependencies/topo-onos4nodeNEW.py
deleted file mode 100644
index 1824e3b..0000000
--- a/TestON/dependencies/topo-onos4nodeNEW.py
+++ /dev/null
@@ -1,63 +0,0 @@
-"""Custom topology example
-
-Two directly connected switches plus a host for each switch:
-
-   host --- switch --- switch --- host
-
-Adding the 'topos' dict with a key/value pair to generate our newly defined
-topology enables one to pass in '--topo=mytopo' from the command line.
-"""
-
-from mininet.topo import Topo
-
-class MyTopo( Topo ):
-	"Simple topology example."
-
-	def __init__( self ):
-		"Create custom topo."
-		# Initialize topology
-		Topo.__init__( self )
-
-		# Make the middle triangle	
-		leftSwitch = self.addSwitch( 's1' , dpid = '1000'.zfill(16))
-		rightSwitch = self.addSwitch( 's2' , dpid = '2000'.zfill(16))
-		topSwitch = self.addSwitch( 's3' , dpid = '3000'.zfill(16))
-		lefthost = self.addHost( 'h1' )
-		righthost = self.addHost( 'h2' )
-		tophost = self.addHost( 'h3' )
-		self.addLink( leftSwitch, lefthost )
-		self.addLink( rightSwitch, righthost )
-		self.addLink( topSwitch, tophost )
-
-		self.addLink( leftSwitch, rightSwitch )
-		self.addLink( leftSwitch, topSwitch )
-		self.addLink( topSwitch, rightSwitch )
-
-		# Make aggregation switches
-		agg1Switch = self.addSwitch( 's4', dpid = '1004'.zfill(16) ) 
-		agg2Switch = self.addSwitch( 's5', dpid = '2005'.zfill(16) ) 
-		agg1Host = self.addHost( 'h4' ) 
-		agg2Host = self.addHost( 'h5' ) 
-
-		self.addLink( agg1Switch, agg1Host )
-		self.addLink( agg2Switch, agg2Host )
-
-		self.addLink( agg2Switch, rightSwitch )
-		self.addLink( agg1Switch, leftSwitch )
-
-		# Make two aggregation fans
-		for i in range(10):
-			num=str(i+6)
-			switch = self.addSwitch( 's' + num, dpid = ('10' + num.zfill(2) ).zfill(16))
-			host = self.addHost( 'h' + num ) 
-			self.addLink( switch, host ) 
-			self.addLink( switch, agg1Switch ) 
-
-		for i in range(10):
-			num=str(i+31)
-			switch = self.addSwitch( 's' + num, dpid = ('20' + num.zfill(2)).zfill(16) )
-			host = self.addHost( 'h' + num ) 
-			self.addLink( switch, host ) 
-			self.addLink( switch, agg2Switch ) 
-
-topos = { 'mytopo': ( lambda: MyTopo() ) }