Added new cluster scripts.
Modified onos.sh to use host-specific config file by default.
Modified onos.sh interactive prompt to ignore the letter case.
Moved old cluster scripts to old-scripts dir.

Change-Id: I2d580bedeaec7dde2bab8a4a39a49752fbb3de0c
diff --git a/cluster-mgmt/README.txt b/cluster-mgmt/README.txt
deleted file mode 100644
index 08380d2..0000000
--- a/cluster-mgmt/README.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-########
-Cluster Management Tools
-#######
-
-ssh : contains necessary files in .ssh (don't change file permission)
-bash_profile: Can be used as $HOME/.bash_profile file. The following four lines needs to be changed as necessary
-
-export RCP_USER=ubuntu
-export RCMD_CMD=ssh
-export RCMD_CMD_ARGS="-i $HOME/.ssh/onlabkey.pem"
-export RCMD_USER=ubuntu
-export RCP_CMD="scp -i $HOME/.ssh/onlabkey.pem -o StrictHostKeyChecking=no"
-export FANOUT=64
-export CLUSTER="$HOME/bin/cluster.txt"
-
-### Set the proper value ##
-export ONOS_CLUSTER_BASENAME="onosdevx"
-export ONOS_CLUSTER_NR_NODES=8
-
-bin/start.sh : shutdown all service and restart
-bin/stop.sh : shutdown all service
-bin/status.sh : show status of the services 
diff --git a/cluster-mgmt/bash_profile b/cluster-mgmt/bash_profile
deleted file mode 100644
index e9f2a39..0000000
--- a/cluster-mgmt/bash_profile
+++ /dev/null
@@ -1,25 +0,0 @@
-# .bash_profile
-
-# Get the aliases and functions
-if [ -f ~/.bashrc ]; then
-	. ~/.bashrc
-fi
-
-# User specific environment and startup programs
-
-PATH=$PATH:$HOME/bin
-
-export PATH
-
-### Cluster-IT setting##
-export RCP_USER=ubuntu
-export RCMD_CMD=ssh
-export RCMD_CMD_ARGS="-i $HOME/.ssh/onlabkey.pem"
-export RCMD_USER=ubuntu
-export RCP_CMD="scp -i $HOME/.ssh/onlabkey.pem -o StrictHostKeyChecking=no"
-export FANOUT=64
-export CLUSTER="$HOME/cluster-mgmt/cluster.txt"
-
-#### Set the proper value ####
-#export ONOS_CLUSTER_BASENAME="onosdevx"
-#export ONOS_CLUSTER_NR_NODES=8
diff --git a/cluster-mgmt/bin/bootup.sh b/cluster-mgmt/bin/bootup.sh
deleted file mode 100755
index ca86f7e..0000000
--- a/cluster-mgmt/bin/bootup.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#! /bin/bash
-. `dirname $0`/func.sh
-
-onos stop
-cassandra cleandb
-cassandra stop
-zk stop
-
-zk start
-cassandra start
-cassandra cleandb
-db_status=`cassandra checkdb |grep OK | wc -l`
-if [ $db_status != 1 ];then
-  echo $db_status
-  echo "Cassandra DB was screwed up. Need DB key drop"
-  exit
-fi
-onos start
-switch local
-#dsh -g $basename 'cd ONOS; ./ctrl-local.sh'
diff --git a/cluster-mgmt/bin/cassandra b/cluster-mgmt/bin/cassandra
deleted file mode 100755
index b2033ad..0000000
--- a/cluster-mgmt/bin/cassandra
+++ /dev/null
@@ -1,5 +0,0 @@
-#! /bin/bash
-. `dirname $0`/func.sh
-
-#$0 $1 $2
-`basename $0` $1 $2
diff --git a/cluster-mgmt/bin/check_status.py b/cluster-mgmt/bin/check_status.py
deleted file mode 100755
index 61f2108..0000000
--- a/cluster-mgmt/bin/check_status.py
+++ /dev/null
@@ -1,245 +0,0 @@
-#! /usr/bin/env python
-import json
-import os
-
-urls="http://localhost:8080/wm/core/topology/switches/all/json http://localhost:8080/wm/core/topology/links/json http://localhost:8080/wm/registry/controllers/json http://localhost:8080/wm/registry/switches/json"
-RestIP=os.environ.get("ONOS_CLUSTER_BASENAME")+"1"
-RestPort="8080"
-
-core_switches=["00:00:00:00:ba:5e:ba:11", "00:00:00:00:00:00:ba:12", "00:00:20:4e:7f:51:8a:35", "00:00:00:00:ba:5e:ba:13", "00:00:00:08:a2:08:f9:01", "00:00:00:16:97:08:9a:46"]
-correct_nr_switch=[6,50,25,25,25,25,25,25]
-correct_intra_link=[16, 98, 48, 48, 48, 48, 48, 48]
-
-#nr_links=(switch[1]+switch[2]+switch[3]+switch[4]+switch[5]+switch[6]+switch[7]+len(switch)-1+8)*2
-nr_links= (49 + 24 * 6 + 7 + 8) * 2
-
-cluster_basename=os.environ.get("ONOS_CLUSTER_BASENAME")
-nr_nodes=os.environ.get("ONOS_CLUSTER_NR_NODES")
-
-def get_json(url):
-  print url
-  try:
-    command = "curl -s %s" % (url)
-    result = os.popen(command).read()
-    parsedResult = json.loads(result)
-  except:
-    print "REST IF %s has issue" % command
-    parsedResult = ""
-
-  if type(parsedResult) == 'dict' and parsedResult.has_key('code'):
-    print "REST %s returned code %s" % (command, parsedResult['code'])
-    parsedResult = ""
-
-  return parsedResult 
-
-def check_switch():
-  buf = ""
-  retcode = 0
-
-  url="http://%s:%s/wm/core/topology/switches/all/json" % (RestIP, RestPort)
-  parsedResult = get_json(url)
-
-  if parsedResult == "":
-    retcode = 1
-    return (retcode, "Rest API has an issue")
-
-  url = "http://%s:%s/wm/registry/switches/json" % (RestIP, RestPort)
-  registry = get_json(url)
-
-  if registry == "":
-    retcode = 1
-    return (retcode, "Rest API has an issue")
-
-
-  buf += "switch: total %d switches\n" % len(parsedResult)
-  cnt = []
-  active = []
-  for r in range(8):
-    cnt.append(0)
-    active.append(0)
-
-  for s in parsedResult:
-    if s['dpid'] in core_switches:
-      nw_index = 0
-    else:
-      nw_index =int(s['dpid'].split(':')[-2], 16) - 1
-    cnt[nw_index] += 1
-
-    if s['state']  == "ACTIVE":
-      active[nw_index] += 1
-
-    if not s['dpid'] in registry:
-      buf += "switch:  dpid %s lost controller\n" % (s['dpid'])
-
-  for r in range(8):
-    buf += "switch: network %d : %d switches %d active\n" % (r+1, cnt[r], active[r])
-    if correct_nr_switch[r] != cnt[r]:
-      buf += "switch fail: network %d should have %d switches but has %d\n" % (r+1, correct_nr_switch[r], cnt[r])
-      retcode = 1
-
-    if correct_nr_switch[r] != active[r]:
-      buf += "switch fail: network %d should have %d active switches but has %d\n" % (r+1, correct_nr_switch[r], active[r])
-      retcode = 1
-
-  return (retcode, buf)
-
-def check_link():
-  buf = ""
-  retcode = 0
-
-  url = "http://%s:%s/wm/core/topology/links/json" % (RestIP, RestPort)
-  parsedResult = get_json(url)
-
-  if parsedResult == "":
-    retcode = 1
-    return (retcode, "Rest API has an issue")
-
-  buf += "link: total %d links (correct : %d)\n" % (len(parsedResult), nr_links)
-  intra = []
-  interlink=0
-  for r in range(8):
-    intra.append(0)
-
-  for s in parsedResult:
-    if s['src-switch'] in core_switches:
-      src_nw = 1
-    else:
-      src_nw =int(s['src-switch'].split(':')[-2], 16)
-    
-    if s['dst-switch'] in core_switches:
-      dst_nw = 1
-    else:
-      dst_nw =int(s['dst-switch'].split(':')[-2], 16)
-
-    src_swid =int(s['src-switch'].split(':')[-1], 16)
-    dst_swid =int(s['dst-switch'].split(':')[-1], 16)
-    if src_nw == dst_nw:
-      intra[src_nw - 1] = intra[src_nw - 1] + 1 
-    else:
-      interlink += 1
-
-  for r in range(8):
-    if intra[r] != correct_intra_link[r]:
-      buf += "link fail: network %d should have %d intra links but has %d\n" % (r+1, correct_intra_link[r], intra[r])
-      retcode = 1
-
-  if interlink != 14:
-      buf += "link fail: There should be %d intra links (uni-directional) but %d\n" % (14, interlink)
-      retcode = 1
-
-  return (retcode, buf)
-
-def check_switch_local():
-  buf = "check_switch_local\n"
-  retcode = 0
-
-  url = "http://%s:%s/wm/registry/switches/json" % (RestIP, RestPort)
-  parsedResult = get_json(url)
-
-  if parsedResult == "":
-    retcode = 1
-    return (retcode, "Rest API has an issue")
-
-  for s in parsedResult:
-    #print s,len(s),s[0]['controllerId']
-    ctrl=parsedResult[s][0]['controllerId']
-    if s in core_switches:
-      nw = 1
-    else:
-      nw =int(s.split(':')[-2], 16)
-
-    if len(parsedResult[s]) > 1:
-      buf += "switch_local warn: switch %s has more than 1 controller: " % (s)
-      for i in parsedResult[s]:
-        buf += "%s " % (i['controllerId'])
-      buf += "\n"
-      retcode = 1
-
-    if int(ctrl[-1]) != nw:
-      buf += "switch_local fail: switch %s is wrongly controlled by %s\n" % (s, ctrl)
-      retcode = 1
-      
-  return (retcode, buf)
-
-def check_switch_all(nr_ctrl):
-  buf = "check_switch_all\n"
-  retcode = 0
-
-  url = "http://%s:%s/wm/registry/controllers/json" % (RestIP, RestPort)
-  parsedResult = get_json(url)
-
-  if parsedResult == "":
-    retcode = 1
-    return (retcode, "Rest API has an issue")
-
-  ## Check Dup Controller ##
-  controllers=list(set(parsedResult))
-  if len (controllers) != len(parsedResult):
-    buf += "Duplicated Controller in registory: " + str(parsedResult) + "\n"
-    retcode = 1
-
-  ## Check Missing Controller ##
-  if len (controllers) != nr_ctrl:
-    buf += "Missiing Controller in registory: " + str(parsedResult) + "\n"
-    retcode = 1
-
-  ## Check Core Controller Exist ##
-  core_ctrl="%s1" % (cluster_basename)
-  if not core_ctrl in controllers:
-    buf += "Core controller missing in registory: " + str(parsedResult) + "\n"
-    retcode = 1
-
-  controllers.remove(core_ctrl)
-
-  url = "http://%s:%s/wm/registry/switches/json" % (RestIP, RestPort)
-  parsedResult = get_json(url)
-
-  if parsedResult == "":
-    retcode = 1
-    return (retcode, "Rest API has an issue")
-
-  for s in parsedResult:
-    ctrl_set = []
-    for c in parsedResult[s]:
-      ctrl_set.append(c['controllerId'])
-
-    if s in core_switches:
-      nw = 1
-    else:
-      nw =int(s.split(':')[-2], 16)
-
-    if nw == 1 and len(ctrl_set) != 1:
-      buf += "Core switch %s has more than 1 controller: %s\n" % (s, ctrl_set)
-    elif nw != 1:
-      if len(list(set(ctrl_set))) != len(ctrl_set):
-        buf += "Edge switch %s has dup controller: %s\n" % (s, ctrl_set)
-      elif len(list(set(ctrl_set))) != len(controllers):
-        buf += "Edge switch %s has missing controller: %s\n" % (s, ctrl_set)
-
-  return (retcode, buf)
-
-def check_controllers(n):
-  retcode = 0
-  buf = ""
-  url = "http://%s:%s/wm/registry/controllers/json" % (RestIP, RestPort)
-  parsedResult = get_json(url)
-
-  if parsedResult == "":
-    retcode = 1
-
-    return (retcode, "Rest API has an issue")
-
-  for i,c in enumerate(parsedResult):
-    buf += "%d : %s\n" % (i,c)
-
-  if len(parsedResult) != n:
-    buf += "controller fail: there are %d controllers (should be %d)\n" % (len(parsedResult), n)
-    retcode = 1
-
-  return (retcode, buf)
-
-if __name__ == "__main__":
-  print "%s" % check_switch()[1]
-  print "%s" % check_link()[1]
-  print "%s" % check_switch_local()[1]
-  print "%s" % check_controllers(8)[1]
diff --git a/cluster-mgmt/bin/check_status_failover.py b/cluster-mgmt/bin/check_status_failover.py
deleted file mode 100755
index 17baa01..0000000
--- a/cluster-mgmt/bin/check_status_failover.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#! /usr/bin/env python
-import json
-import os
-from check_status import *
-
-cluster_basename=os.environ.get("ONOS_CLUSTER_BASENAME")
-nr_nodes=os.environ.get("ONOS_CLUSTER_NR_NODES")
-
-if __name__ == "__main__":
-  print "%s" % check_switch()[1]
-  print "%s" % check_link()[1]
-  print "%s" % check_controllers(8)[1]
-  print "%s" % check_switch_all(8)[1]
diff --git a/cluster-mgmt/bin/cho-failover.py b/cluster-mgmt/bin/cho-failover.py
deleted file mode 100755
index 04f83c5..0000000
--- a/cluster-mgmt/bin/cho-failover.py
+++ /dev/null
@@ -1,188 +0,0 @@
-#! /usr/bin/env python
-import json
-import sys
-import os
-import re
-from check_status import *
-import time
-
-basename=os.getenv("ONOS_CLUSTER_BASENAME")
-operation=['switch all', 'onos stop 8', 'onos stop 7', 'onos stop 6', 'onos stop 5', 'onos start 5;onos start 6;onos start 7;onos start 8', 'switch local']  
-nr_controllers=[8, 7, 6, 5, 4, 8, 8]
-
-wait1=30
-wait2=60
-
-def check_by_pingall():
-  buf = ""
-  cmd = "pingall-speedup.sh %s" % (flowdef)
-  result = os.popen(cmd).read()
-  buf += result
-  
-  if re.search("fail 0", result):
-    return (0, buf)
-  else:
-    return (1, buf)
-
-def link_change_core(op):
-  cmd = "dsh -w %s1 \"sudo ifconfig %s\"" % (basename, op)
-  os.popen(cmd)
-  print cmd
-
-def check_flow_nmap():
-  buf = "" 
-  buf += os.popen("date").read()
-  print "dump all flows from network map"
-  cmd  = "dsh -w %s1 \"cd ONOS/web; ./get_flow.py all\"" % cluster_basename
-  buf += os.popen(cmd).read()
-  return (0, buf)
-
-def check_flow_raw():
-  buf = "" 
-  print "dump all flows from switches"
-  cmd  = "dsh \"cd ONOS/scripts; ./showflow.sh\""
-  buf += os.popen(cmd).read()
-  return (0, buf)
-
-def dump_json(url, filename):
-  f = open(filename, 'w')
-  buf = "" 
-  command = "curl -s %s" % (url)
-  result = os.popen(command).read()
-  buf += json.dumps(json.loads(result), sort_keys = True, indent = 2)
-  f.write(buf)
-  f.close()
-
-def dump_flowgetall(tag):
-  url="http://%s:%s/wm/flow/getall/json" % (RestIP, RestPort)
-  filename  = "rest-flow-getall-log.%s.log" % tag
-  dump_json(url, filename)
-
-def check_rest(tag):
-  url="http://%s:%s/wm/flow/getall/json" % (RestIP, RestPort)
-  filename  = "rest-flow-getall-log.%s.log" % tag
-  dump_json(url, filename)
-
-  url="http://%s:%s/wm/core/topology/switches/all/json" % (RestIP, RestPort)
-  filename  = "rest-sw-log.%s.log" % tag
-  dump_json(url, filename)
-
-  url = "http://%s:%s/wm/core/topology/links/json" % (RestIP, RestPort)
-  filename  = "rest-link-log.%s.log" % tag
-  dump_json(url, filename)
-
-  url = "http://%s:%s/wm/registry/switches/json" % (RestIP, RestPort)
-  filename  = "rest-reg-sw-log.%s.log" % tag
-  dump_json(url, filename)
-
-  url = "http://%s:%s/wm/registry/controllers/json" % (RestIP, RestPort)
-  filename  = "rest-reg-ctrl-log.%s.log" % tag
-  dump_json(url, filename)
-
-  url = "http://%s:%s/wm/flow/getsummary/0/0/json" % (RestIP, RestPort)
-  filename  = "rest-flow-getsummary-log.%s.log" % tag
-  dump_json(url, filename)
-
-
-def check_and_log(tag):
-  global cur_nr_controllers
-  buf = ""
-  buf += "check by pingall\n"
-  (code, result) = check_by_pingall()
-  if code == 0:
-    buf += "ping success %s\n" % (result)
-  else:
-    buf += "pingall failed\n"
-    buf += "%s\n" % (result)
-    error = "error-log.%s.log" % tag
-    rawflow  = "raw-flow-log.%s.log" % tag
-    
-    ferror = open(error, 'w')
-    ferror.write(result)
-
-    fraw = open(rawflow,'w')
-    fraw.write(check_flow_raw()[1])
-    fraw.close()
-
-    check_rest(tag)
-
-    ferror.write(check_switch()[1])
-    ferror.write(check_link()[1])
-    ferror.write(check_switch_local()[1])
-    ferror.write(check_controllers(cur_nr_controllers)[1])
-    ferror.close()
-
-  return (code, buf)
-
-def plog(string):
-  global logf
-  print string
-  logf.write(string+"\n")
-
-if __name__ == "__main__":
-  global logf, cur_nr_controllers
-  argvs = sys.argv 
-  if len(argvs) == 5:
-    log_filename = sys.argv[1]
-    flowdef = sys.argv[2]
-    wait1 = int(sys.argv[3])
-    wait2 = int(sys.argv[4])
-  else:
-    print "usage: %s log_filename flowdef_filename wait1 wait2" % sys.argv[0]
-    print "  wait1: wait time (sec) to check ping after change"
-    print "  wait2: additional wait time (sec) if the first check failed"
-    sys.exit(1)
-
-  logf = open(log_filename, 'w', 0)    
-
-  plog("flow def: %s" % flowdef)
-  plog("wait1 : %d" % wait1)
-  plog("wait2 : %d" % wait2)
-
-  plog(check_switch()[1])
-  plog(check_link()[1])
-  plog(check_controllers(8)[1])
-
-  (code, result) = check_by_pingall()
-
-  plog(result)
-
-  print result
-  k = raw_input('hit any key>')
-
-  for cycle in range(1000):
-    for n, op in enumerate(operation):
-      plog("==== Cycle %d operation %d ====: %s" % (cycle, n, os.popen('date').read()))
-#      link_change_core(op)
-      os.popen(op)
-      plog(op)
-      cur_nr_controllers = nr_controllers[n]
-
-      plog("wait %d sec" % wait1)
-      time.sleep(wait1)
-      plog("check and log: %s" % os.popen('date').read())
-
-      tstart=int(time.time())
-      (code, result) = check_and_log("%d.%d.1" % (cycle,n))
-      plog(result)
-      plog("done: %s" % os.popen('date').read())
-      tend=int(time.time())
-
-      tdelta=tend-tstart
-
-      if not code == 0:
-        wait = max(0, wait2 - tdelta)
-        plog("took %d sec for check and log. wait another %d sec" % (tdelta, wait))
-        time.sleep(wait)
-        plog("check and log: %s" % os.popen('date').read())
-        (code, result) = check_and_log("%d.%d.2" % (cycle,n))
-        plog(result)
-        plog("done: %s" % os.popen('date').read())
-        if code == 0:
-          tag = "%d.%d.2" % (cycle,n)
-          dump_flowgetall(tag)
-          rawflow  = "raw-flow-log.%s.log" % tag
-          fraw = open(rawflow,'w')
-          fraw.write(check_flow_raw()[1])
-          fraw.close()
-  logf.close()
diff --git a/cluster-mgmt/bin/cho-link-failure.py b/cluster-mgmt/bin/cho-link-failure.py
deleted file mode 100755
index 8889c1f..0000000
--- a/cluster-mgmt/bin/cho-link-failure.py
+++ /dev/null
@@ -1,187 +0,0 @@
-#! /usr/bin/env python
-import json
-import sys
-import os
-import re
-from check_status import *
-import time
-
-basename=os.getenv("ONOS_CLUSTER_BASENAME")
-operation=["sw3-eth4 down","sw4-eth4 down","sw4-eth3 down","sw3-eth4 up","sw1-eth2 down","sw4-eth4 up","sw4-eth3 up","sw1-eth2 up"]
-wait1=30
-wait2=60
-
-def check_by_pingall():
-  buf = ""
-  cmd = "pingall-speedup.sh %s" % (flowdef)
-  result = os.popen(cmd).read()
-  buf += result
-  
-  if re.search("fail 0", result):
-    return (0, buf)
-  else:
-    return (1, buf)
-
-def link_change_core(op):
-  cmd = "dsh -w %s1 \"sudo ifconfig %s\"" % (basename, op)
-  os.popen(cmd)
-  print cmd
-
-def check_flow_nmap():
-  buf = "" 
-  buf += os.popen("date").read()
-  print "dump all flows from network map"
-  cmd  = "dsh -w %s1 \"cd ONOS/web; ./get_flow.py all\"" % cluster_basename
-  buf += os.popen(cmd).read()
-  return (0, buf)
-
-def check_flow_raw():
-  buf = "" 
-  print "dump all flows from switches"
-  cmd  = "dsh \"cd ONOS/scripts; ./showflow.sh\""
-  buf += os.popen(cmd).read()
-  return (0, buf)
-
-def dump_json(url, filename):
-  f = open(filename, 'w')
-  buf = "" 
-  command = "curl -s %s" % (url)
-  result = os.popen(command).read()
-  buf += json.dumps(json.loads(result), sort_keys = True, indent = 2)
-  f.write(buf)
-  f.close()
-
-def dump_flowgetall(tag):
-  url="http://%s:%s/wm/flow/getall/json" % (RestIP, RestPort)
-  filename  = "rest-flow-getall-log.%s.log" % tag
-  dump_json(url, filename)
-
-def check_rest(tag):
-  url="http://%s:%s/wm/flow/getall/json" % (RestIP, RestPort)
-  filename  = "rest-flow-getall-log.%s.log" % tag
-  dump_json(url, filename)
-
-  url="http://%s:%s/wm/core/topology/switches/all/json" % (RestIP, RestPort)
-  filename  = "rest-sw-log.%s.log" % tag
-  dump_json(url, filename)
-
-  url = "http://%s:%s/wm/core/topology/links/json" % (RestIP, RestPort)
-  filename  = "rest-link-log.%s.log" % tag
-  dump_json(url, filename)
-
-  url = "http://%s:%s/wm/registry/switches/json" % (RestIP, RestPort)
-  filename  = "rest-reg-sw-log.%s.log" % tag
-  dump_json(url, filename)
-
-  url = "http://%s:%s/wm/registry/controllers/json" % (RestIP, RestPort)
-  filename  = "rest-reg-ctrl-log.%s.log" % tag
-  dump_json(url, filename)
-
-  url = "http://%s:%s/wm/flow/getsummary/0/0/json" % (RestIP, RestPort)
-  filename  = "rest-flow-getsummary-log.%s.log" % tag
-  dump_json(url, filename)
-
-
-def check_and_log(tag):
-  global cur_nr_controllers
-  buf = ""
-  buf += "check by pingall\n"
-  (code, result) = check_by_pingall()
-  if code == 0:
-    buf += "ping success %s\n" % (result)
-  else:
-    buf += "pingall failed\n"
-    buf += "%s\n" % (result)
-    error = "error-log.%s.log" % tag
-    rawflow  = "raw-flow-log.%s.log" % tag
-    
-    ferror = open(error, 'w')
-    ferror.write(result)
-
-    fraw = open(rawflow,'w')
-    fraw.write(check_flow_raw()[1])
-    fraw.close()
-
-    check_rest(tag)
-
-    ferror.write(check_switch()[1])
-    ferror.write(check_link()[1])
-    ferror.write(check_switch_local()[1])
-    ferror.write(check_controllers(cur_nr_controllers)[1])
-    ferror.close()
-
-  return (code, buf)
-
-def plog(string):
-  global logf
-  print string
-  logf.write(string+"\n")
-
-if __name__ == "__main__":
-  global logf, cur_nr_controllers
-
-  cur_nr_controllers = 8
-
-  argvs = sys.argv 
-  if len(argvs) == 5:
-    log_filename = sys.argv[1]
-    flowdef = sys.argv[2]
-    wait1 = int(sys.argv[3])
-    wait2 = int(sys.argv[4])
-  else:
-    print "usage: %s log_filename flowdef_filename wait1 wait2" % sys.argv[0]
-    print "  wait1: wait time (sec) to check ping after change"
-    print "  wait2: additional wait time (sec) if the first check failed"
-    sys.exit(1)
-
-  logf = open(log_filename, 'w', 0)    
-
-  plog("flow def: %s" % flowdef)
-  plog("wait1 : %d" % wait1)
-  plog("wait2 : %d" % wait2)
-
-  plog(check_switch()[1])
-  plog(check_link()[1])
-  plog(check_controllers(cur_nr_controllers)[1])
-
-  (code, result) = check_by_pingall()
-
-  plog(result)
-
-  print result
-  k = raw_input('hit any key>')
-
-  for cycle in range(1000):
-    for n, op in enumerate(operation):
-      plog("==== Cycle %d operation %d ====: %s" % (cycle, n, os.popen('date').read()))
-      link_change_core(op)
-      plog(op)
-
-      plog("wait %d sec" % wait1)
-      time.sleep(wait1)
-      plog("check and log: %s" % os.popen('date').read())
-
-      tstart=int(time.time())
-      (code, result) = check_and_log("%d.%d.1" % (cycle,n))
-      plog(result)
-      plog("done: %s" % os.popen('date').read())
-      tend=int(time.time())
-
-      tdelta=tend-tstart
-
-      if not code == 0:
-        wait = max(0, wait2 - tdelta)
-        plog("took %d sec for check and log. wait another %d sec" % (tdelta, wait))
-        time.sleep(wait)
-        plog("check and log: %s" % os.popen('date').read())
-        (code, result) = check_and_log("%d.%d.2" % (cycle,n))
-        plog(result)
-        plog("done: %s" % os.popen('date').read())
-        if code == 0:
-          tag = "%d.%d.2" % (cycle,n)
-          dump_flowgetall(tag)
-          rawflow  = "raw-flow-log.%s.log" % tag
-          fraw = open(rawflow,'w')
-          fraw.write(check_flow_raw()[1])
-          fraw.close()
-  logf.close()
diff --git a/cluster-mgmt/bin/cho-link-failure.sh b/cluster-mgmt/bin/cho-link-failure.sh
deleted file mode 100755
index 4db887a..0000000
--- a/cluster-mgmt/bin/cho-link-failure.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#! /bin/sh
-basename=$ONOS_CLUSTER_BASENAME
-wait=10
-
-fdef="flowdef_8node_42.txt"
-
-function log()
-{
-    date > error.$1.$2.log
-    check_status.py >> error.$1.$2.log
-    dsh -w ${basename}1 "cd ONOS/web; ./get_flow.py all" >> error.$1.$2.log
-    dsh "cd ONOS/scripts; ./showflow.sh"             >> error.$1.$2.log
-}
-
-echo "all links up"
-dsh -w ${basename}1 "cd ONOS/scripts; ./all-linkup.sh"
-echo "clean up flow"
-dsh -w ${basename}1 "cd ONOS/web; ./delete_flow.py 1 100"
-sleep 1
-dsh -w ${basename}1 "cd ONOS/web; ./get_flow.py all"
-dsh "cd ONOS/scripts; ./delflow.sh"
-echo "checkup status"
-check_status.py
-read -p "hit anykey> "
-
-echo "install pre-set flows"
-dsh -w ${basename}1 "cd ONOS/web; ./add_flow.py -m onos -f $fdef"
-sleep 6
-echo "check"
-dsh -w ${basename}1 "cd ONOS/web; ./pingall.py $fdef"
-
-#ports=`dsh -w ${basename}1 "cd ONOS/scripts; ./listports.sh" | awk '{print $2}' |grep -v tap`
-operation=("sw3-eth3 down" "sw4-eth4 down" "sw4-eth3 down" "sw3-eth3 up" "sw1-eth2 down" "sw4-eth4 up" "sw4-eth3 up" "sw1-eth2 up")
-
-((n=0))
-while [ 1 ] ; do
-  for (( i = 0; i< ${#operation[@]}; i ++)); do
-    echo "Test $n-$i"
-    p=`echo ${operation[$i]}`
-    echo "operation: $p"
-#  read -p "hit anykey> "
-    dsh -w ${basename}1 "sudo ifconfig $p"
-    echo "wait $wait sec"
-    sleep $wait 
-    result=`dsh -w ${basename}1 "cd ONOS/web; ./pingall.py $fdef"`
-    echo $result
-    nr_fail=`echo $result |grep fail | wc -l`
-    if [ $nr_fail -gt 0 ]; then
-      log $n $i
-    fi
-  done
-  ((n++))
-done
-
diff --git a/cluster-mgmt/bin/cmd b/cluster-mgmt/bin/cmd
deleted file mode 100755
index b2033ad..0000000
--- a/cluster-mgmt/bin/cmd
+++ /dev/null
@@ -1,5 +0,0 @@
-#! /bin/bash
-. `dirname $0`/func.sh
-
-#$0 $1 $2
-`basename $0` $1 $2
diff --git a/cluster-mgmt/bin/comp-nwmap-sw.py b/cluster-mgmt/bin/comp-nwmap-sw.py
deleted file mode 100755
index 8dc65f6..0000000
--- a/cluster-mgmt/bin/comp-nwmap-sw.py
+++ /dev/null
@@ -1,144 +0,0 @@
-#! /usr/bin/env python
-import os
-import re
-import json
-import sys
-import os
-
-status=0
-
-pid=os.getpid()
-basename=os.getenv("ONOS_CLUSTER_BASENAME")
-RestPort=8080
-
-def dump_switch_table(filename):
-  cmd="dsh \"cd ONOS/scripts; ./showflow.sh\""
-  f=open(filename, 'w')
-  result=os.popen(cmd).read()
-
-  f.write(result)
-  f.close()
-
-def dump_network_map(filename):
-  url="http://%s1:%d/wm/flow/getall/json" % (basename, RestPort)
-  cmd="curl -s %s" % url
-  f=open(filename, 'w')
-  try:
-    result=json.loads(os.popen(cmd).read())
-  except:
-    print "REST has issue"
-    sys.exit(1)
-
-  json.dump(result, f, indent=2, sort_keys=True)
-  f.close()
-    
-def make_key(*kargs):
-  key=""
-  for k in kargs:
-    key += str(k)+"_"
-  return key[:-1]
-
-def fdb_nmap(filename):
-  f=open(filename, 'r')
-  json_flow = json.load(f)
-  nr_flow_entries = 0
-  fdb_nmap={}
-  ## XXX should be better way to ditect empty list ##
-  if json_flow == "[]":
-    print "nmap contained %d flow entries" % nr_flow_entries
-    return fdb_nmap
-
-  for flow in json_flow:
-    fid = flow['flowId']['value']
-    dl_src = flow['flowEntryMatch']['srcMac']['value'].lower()
-    dl_dst = flow['flowEntryMatch']['dstMac']['value'].lower()
-    e = {}
-    for entry in flow['dataPath']['flowEntries']:
-       dpid = entry['dpid']['value'].replace(":","").lower()
-       cookie = entry['flowEntryId']
-       in_port = entry['flowEntryMatch']['inPort']['value']
-
-       outport = []
-       for p in entry['flowEntryActions']:
-         outport.append(p['actionOutput']['port']['value'])
-       outport.sort()
-
-       e['dpid']=dpid  
-       e['cookie']=cookie
-       e['in_port']=in_port
-       e['dl_src']=dl_src
-       e['dl_dst']=dl_dst
-       e['actions']=outport
-       e['fid']=fid
-       key = make_key(dpid, in_port, dl_src, dl_dst, outport[0])
-
-       fdb_nmap[key]=e
-       nr_flow_entries += 1
-
-  print "nmap contained %d flow entries" % nr_flow_entries
-  return fdb_nmap
-
-def fdb_raw(filename):
-  f = open(filename, 'r')
-  fdb_raw={}
-  nr_flow_entries = 0
-  for line in f:
-    e = {}
-    if line[0] == '#':
-      continue
-    dpid=re.search("dpid=([0-9]|[a-f])*", line.strip()).group().split("=")[1]
-    cookie=re.search("cookie=0x([0-9]|[a-f])*", line.strip()).group().split("=")[1]
-    in_port=re.search("in_port=[0-9]*", line.strip()).group().split("=")[1]
-    dl_src=re.search("dl_src=([0-9]|[a-f]|:)*", line.strip()).group().split("=")[1]
-    dl_dst=re.search("dl_dst=([0-9]|[a-f]|:)*", line.strip()).group().split("=")[1]
-    outport_list=re.search("actions=(output:[0-9]*,*)*", line.strip()).group().split("=")[1].split(",")
-    outport=[]
-    for i in outport_list:
-      outport.append(int(i.split(":")[1]))
-    outport.sort()
-
-    e['dpid']=dpid  
-    e['cookie']=cookie
-    e['in_port']=in_port
-    e['dl_src']=dl_src
-    e['dl_dst']=dl_dst
-    e['actions']=outport
-    key = make_key(dpid, in_port, dl_src, dl_dst, outport[0])
-    fdb_raw[key]=e
-    nr_flow_entries += 1
-
-  print "real switches contained %d flow entries" % nr_flow_entries
-  f.close()
-  return fdb_raw
-
-if __name__ == "__main__":
-  argvs = sys.argv 
-  if len(argvs) != 2:
-    f1=".nmap.%d.txt" % pid
-    f2=".rawflow.%d.txt" % pid
-    dump_network_map(f1)
-    dump_switch_table(f2)
-
-  else:
-    f1 = sys.argv[1]
-    f2 = sys.argv[2]
-
-
-  fdb_nmap = fdb_nmap(f1)
-  fdb_raw = fdb_raw(f2)
-
-  nr_not_found_in_switch = 0
-  for f in fdb_nmap:
-    if not fdb_raw.has_key(f):
-      nr_not_found_in_switch += 1
-      print "fid=%s dpid=%s cookie=%s in_port=%s dl_src=%s dl_dst=%s outport=%s not found in switch" % (fdb_nmap[f]['fid'],fdb_nmap[f]['dpid'],fdb_nmap[f]['cookie'],fdb_nmap[f]['in_port'],fdb_nmap[f]['dl_src'],fdb_nmap[f]['dl_dst'],fdb_nmap[f]['actions'])
-
-  nr_not_found_in_nmap = 0
-  for f in fdb_raw:
-    if not fdb_nmap.has_key(f):
-      nr_not_found_in_nmap += 1
-      print "dpid=%s cookie=%s in_port=%s dl_src=%s dl_dst=%s outport=%s not found in nmap" % (fdb_raw[f]['dpid'],fdb_raw[f]['cookie'],fdb_raw[f]['in_port'],fdb_raw[f]['dl_src'],fdb_raw[f]['dl_dst'],fdb_raw[f]['actions'])
-  
-  print "Network Map has %d flow entries,  %d not found in switch" % (len(fdb_nmap), nr_not_found_in_switch)
-  print "Switches have %d flow entries, %d not found in network map" % (len(fdb_raw), nr_not_found_in_nmap)
-  print "dumpfiles: %s %s" % (f1, f2)
diff --git a/cluster-mgmt/bin/config.sh b/cluster-mgmt/bin/config.sh
deleted file mode 100755
index 19bfbf2..0000000
--- a/cluster-mgmt/bin/config.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#! /bin/sh
-
-. `dirname $0`/func.sh
-
-basename=$ONOS_CLUSTER_BASENAME
-nr_nodes=$ONOS_CLUSTER_NR_NODES
-
-for n in `seq 1 $nr_nodes`; do
-  echo "Host node$n"
-  echo "User ubuntu"
-  echo "HostName ${basename}${n}"
-done > ~/.ssh/config
-
-cd ${HOME}/bin
-for n in `seq 1 $nr_nodes`; do
-  ln -s ssh_exec node${n}
-done
diff --git a/cluster-mgmt/bin/demo-reset-hw.sh b/cluster-mgmt/bin/demo-reset-hw.sh
deleted file mode 100755
index 15f97e1..0000000
--- a/cluster-mgmt/bin/demo-reset-hw.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#! /bin/bash
-DIR=${HOME}/ONOS
-echo "==== Reset Demo to the initial State ==="
-date
-start=`date +"%s"`
-echo "all link up.."
-$DIR/scripts/all-linkup-hw.sh
-echo "link up done"
-
-echo "cleanup excess flows"
-$DIR/web/delete_flow.py 201 300
-echo "cleanup excess flows done"
-echo "Adding 200 flows"
-$DIR/web/add_flow.py -m onos -f $DIR/web/flowdef_demo_start.txt
-echo "done"
-echo "killing iperf"
-dsh -g onos 'sudo pkill -KILL iperf'
-echo "done"
-echo "kill onos at 5 and 7"
-onos stop 5
-onos stop 7
-echo "done"
-echo "bringup 1 2 3 4 6 8 if dead"
-for i in 1 2 3 4 6 8; do
-  status=`onos status $i | grep instance | awk '{print $2}'`
-  echo "onos $i status $status"
-  if [ x$status == "x0" ]; then
-    onos start $i
-  fi
-done
-echo "done"
-
-sleep 2
-switch local
-endt=`date +"%s"`
-(( delta = endt -start ))
-echo "finish: took $delta sec"
diff --git a/cluster-mgmt/bin/demo-scale-out-hw.sh b/cluster-mgmt/bin/demo-scale-out-hw.sh
deleted file mode 100755
index 6a44c8d..0000000
--- a/cluster-mgmt/bin/demo-scale-out-hw.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#! /bin/bash
-onos start 5
-onos start 7
-switch local
-sleep 4 
-cd ~/ONOS/web; ./add_flow.py -m onos -f flowdef_demo_add.txt &
diff --git a/cluster-mgmt/bin/func.sh b/cluster-mgmt/bin/func.sh
deleted file mode 100755
index 22071e2..0000000
--- a/cluster-mgmt/bin/func.sh
+++ /dev/null
@@ -1,182 +0,0 @@
-CASSANDRA_DIR='${HOME}/apache-cassandra-1.1.4'
-ZK_DIR='${HOME}/zookeeper-3.4.5'
-ONOS_DIR='${HOME}/ONOS'
-
-ZK_LIB='/var/lib/zookeeper'
-CASSANDRA_LIB='/var/lib/cassandra'
-
-if [ x$CLUSTER == "x" ]; then
-  echo "CLUSTER is not set. Exitting."
-  exit
-fi
-if [ x$ONOS_CLUSTER_BASENAME == "x" ]; then
-  echo "ONOS_CLUSTER_BASENAME is not set. Exitting"
-  exit
-fi
-if [ x$ONOS_CLUSTER_NR_NODES == "x" ]; then
-  echo "ONOS_CLUSTER_NR_NODES is not set. Exitting"
-  exit
-fi
-
-export basename=$ONOS_CLUSTER_BASENAME
-export nr_nodes=$ONOS_CLUSTER_NR_NODES
-
-checkcluster () {
-  dsh -g $basename 'uname -a'
-}
-
-zk () {
-  case "$1" in
-    start)
-      echo "Starting ZK.."
-#      dsh -g $basename "$ZK_DIR/bin/zkServer.sh start"
-      dsh -g $basename 'cd ONOS; ./start-zk.sh start'
-      while [ 1 ]; do
-#        nup=`dsh -g $basename "$ZK_DIR/bin/zkServer.sh status" | grep "Mode" | egrep "leader|follower" | wc -l`
-        nup=`dsh -g $basename "cd ONOS; ./start-zk.sh status" | grep "Mode" | egrep "leader|follower|standalone" | wc -l`
-        echo "check status"
-        if [ $nup == $nr_nodes ]; then
-          echo "everybody's up: $nup up of of $nr_nodes"
-          echo "ZK started"
-          break;
-        fi
-        echo "waiting for everybody's up: $nup up of of $nr_nodes"
-        sleep 1
-      done
-      ;;
-    stop)
-      echo "Stopping ZK.."
-      dsh -g $basename "$ZK_DIR/bin/zkServer.sh stop"
-      ;;
-    status)
-      echo "Checking ZK Status"
-      dsh -g $basename "$ZK_DIR/bin/zkServer.sh status"
-      ;;
-  esac
-}
-
-cassandra () {
-  case "$1" in
-    start)
-      echo "Starting Cassandra.."
-      echo "  start cassandra at the seed node"
-      dsh -w ${basename}1 "cd $ONOS_DIR; ./start-cassandra.sh start"
-      sleep 1
-      echo "  start cassandra in rest nodes"
-      dsh -g ${basename} -x ${basename}1 "cd $ONOS_DIR; ./start-cassandra.sh start"
-      while [ 1 ]; do
-        echo $$
-        dsh -w ${basename}1 "cd $ONOS_DIR; ./start-cassandra.sh status" > .cassandra_check.$$
-        cat .cassandra_check.$$
-        nup=`cat .cassandra_check.$$ | grep Normal |grep Up| wc -l`
-        if [ $nup == $nr_nodes ]; then
-          echo "everybody's up: $nup up of of $nr_nodes"
-          echo "Cassandra started"
-          break;
-        fi
-        echo "waiting for everybody's up: $nup up of of $nr_nodes"
-        sleep 1
-      done
-      ;;
-    bootup)
-      echo "Removing old Cassandra data and logs"
-      dsh -g ${basename} "rm -rf /var/lib/cassandra/*"
-      dsh -g ${basename} "rm -rf /var/log/cassandra/*"
-
-      echo "Starting Cassandra nodes one by one..."
-      for (( c=1; c<=$nr_nodes; c++ ))
-      do
-	echo "Starting node ${basename}${c}"
-	dsh -g ${basename} -w ${basename}${c} "cd $ONOS_DIR; ./start-cassandra.sh start"
-
-	#Wait until it's up
-	while [ 1 ]; do
-            echo $$
-            dsh -w ${basename}1 "cd $ONOS_DIR; ./start-cassandra.sh status" > .cassandra_check.$$
-            cat .cassandra_check.$$
-            nup=`cat .cassandra_check.$$ | grep Normal |grep Up| wc -l`
-            if [ $nup == $c ]; then
-		echo "New node up: $nup up of of $nr_nodes"
-		break;
-            fi
-            echo "Waiting for new node to come up: $nup up of of $nr_nodes"
-            sleep 5
-	done
-      done
-      ;;
-    stop)
-      echo "Stopping Cassandra.."
-      dsh -g ${basename} "cd $ONOS_DIR; ./start-cassandra.sh stop"
-      ;;
-    cleandb)
-      echo "Removing all data in db"
-      dsh -w ${basename}1 "cd $ONOS_DIR; ./scripts/cleanup-cassandra.sh"
-      ;;
-    checkdb)
-      echo "Check DB Status"
-      dsh -w ${basename}1 "cd $ONOS_DIR; ./scripts/check-db-status.sh"
-      ;;
-    status)
-      echo "Checking Cassandra Status"
-      dsh -w ${basename}1 "cd $ONOS_DIR; ./start-cassandra.sh status"
-      ;;
-  esac
-}
-
-onos () {
-  case "$1" in
-    start)
-      if [ x$2 == "x" -o x$2 == "xall" ]; then
-        echo "Starting ONOS on all nodes"
-        dsh -w ${basename}1 "cd $ONOS_DIR; ./start-onos.sh start"
-        sleep 3
-        dsh -g ${basename} -x ${basename}1 "cd $ONOS_DIR; ./start-onos.sh start"
-        dsh -g ${basename} "cd $ONOS_DIR; ./start-rest.sh start"
-      else
-        echo "Starting ONOS on ${basename}$2"
-        dsh -w ${basename}$2 "cd $ONOS_DIR; ./start-onos.sh start"
-      fi
-      ;;
-    stop)
-      if [ x$2 == "x" -o x$2 == "xall" ]; then
-        echo "Stop ONOS on all nodes"
-        dsh -g ${basename} "cd $ONOS_DIR; ./start-onos.sh stop"
-      else
-        echo "Stop ONOS on ${basename}$2"
-        dsh -w ${basename}$2 "cd $ONOS_DIR; ./start-onos.sh stop"
-      fi
-      ;;
-    status)
-      echo "Checking ONOS Status"
-      dsh -g ${basename} "cd $ONOS_DIR; ./start-onos.sh status"
-      ;;
-  esac
-}
-switch () {
-  case "$1" in
-    local)
-      if [ x$2 == "x" -o x$2 == "xall" ]; then
-        echo "set all switches point to local controller"
-        dsh -g ${basename} "$ONOS_DIR/scripts/ctrl-local.sh"
-      else
-        dsh -w ${basename}$2 "$ONOS_DIR/scripts/ctrl-local.sh"
-      fi
-      ;;
-    all)
-      if [ x$2 == "x" -o x$2 == "xall" ]; then
-        echo "set all non-core switches point to all non-core controllers"
-        dsh -g ${basename} -x ${basename}1  "$ONOS_DIR/scripts/ctrl-add-ext.sh"
-      else
-        dsh -w ${basename}$2 "$ONOS_DIR/scripts/ctrl-add-ext.sh"
-      fi
-      ;;
-    none)
-      if [ x$2 == "x" -o x$2 == "xall" ]; then
-        echo "all non-core switches loose controller"
-        dsh -g ${basename} -x ${basename}1 "$ONOS_DIR/scripts/ctrl-none.sh"
-      else
-        dsh -w ${basename}$2 "$ONOS_DIR/scripts/ctrl-none.sh"
-      fi
-      ;;
-  esac
-}
diff --git a/cluster-mgmt/bin/known_hosts.sh b/cluster-mgmt/bin/known_hosts.sh
deleted file mode 100755
index 7113a8d..0000000
--- a/cluster-mgmt/bin/known_hosts.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#! /bin/bash
-. ${HOME}/bin/func.sh
-
-dsh -g ${basename} 'for i in `seq 1 25`; do ssh-keyscan 1.1.$i.1 >> ${HOME}/.ssh/known_hosts; done'
diff --git a/cluster-mgmt/bin/onos b/cluster-mgmt/bin/onos
deleted file mode 100755
index b2033ad..0000000
--- a/cluster-mgmt/bin/onos
+++ /dev/null
@@ -1,5 +0,0 @@
-#! /bin/bash
-. `dirname $0`/func.sh
-
-#$0 $1 $2
-`basename $0` $1 $2
diff --git a/cluster-mgmt/bin/pingall-speedup.sh b/cluster-mgmt/bin/pingall-speedup.sh
deleted file mode 100755
index 9bec6ba..0000000
--- a/cluster-mgmt/bin/pingall-speedup.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#! /bin/bash
-if [ $# != 1 ]; then
-  echo "$0 flowdef_file"
-elif [ ! -f ${HOME}/ONOS/web/$1  ]; then
-  echo "no such flowdef file: $1"
-fi
-logfile="/tmp/.$USER.pingall.result.$$"
-echo "Raw data at $logfile"
-dsh "cd ONOS/web; ./pingallm-local.py $1" > $logfile 
-cat $logfile | grep "Pingall flow" | sort -n -k 4 
-cat $logfile | grep "Pingall Result" | awk '{s+=$5; f+=$7; i+=$9}END{printf("Pingall Result: success %d fail %d incomplete %d\n",s,f,i)}'
-
diff --git a/cluster-mgmt/bin/ssh_exec b/cluster-mgmt/bin/ssh_exec
deleted file mode 100755
index da1f210..0000000
--- a/cluster-mgmt/bin/ssh_exec
+++ /dev/null
@@ -1,4 +0,0 @@
-#! /bin/bash
-. ${HOME}/bin/func.sh
-
-ssh `basename $0`
diff --git a/cluster-mgmt/bin/start.sh b/cluster-mgmt/bin/start.sh
deleted file mode 100755
index 277d69b..0000000
--- a/cluster-mgmt/bin/start.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#! /bin/bash
-. $HOME/bin/func.sh
-
-onos stop
-cassandra cleandb
-
-db_status=`cassandra checkdb |grep OK | wc -l`
-if [ $db_status != 1 ];then
-  echo $db_status
-  echo "Cassandra DB was screwed up. Need DB key drop"
-  exit
-fi
-onos start
-switch local
-#dsh -g $basename 'cd ONOS; ./ctrl-local.sh'
diff --git a/cluster-mgmt/bin/status.sh b/cluster-mgmt/bin/status.sh
deleted file mode 100755
index d9a3ca2..0000000
--- a/cluster-mgmt/bin/status.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#! /bin/bash
-. $HOME/bin/func.sh
-
-onos status
-cassandra status
-zk status
diff --git a/cluster-mgmt/bin/stop.sh b/cluster-mgmt/bin/stop.sh
deleted file mode 100755
index c76266a..0000000
--- a/cluster-mgmt/bin/stop.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#! /bin/bash
-. $HOME/bin/func.sh
-
-onos stop
-cassandra cleandb
-cassandra stop
-zk stop
diff --git a/cluster-mgmt/bin/switch b/cluster-mgmt/bin/switch
deleted file mode 100755
index 262c936..0000000
--- a/cluster-mgmt/bin/switch
+++ /dev/null
@@ -1,5 +0,0 @@
-#! /bin/bash
-. ${HOME}/bin/func.sh
-
-#$0 $1 $2
-`basename $0` $1 $2
diff --git a/cluster-mgmt/bin/test-link-failure.sh b/cluster-mgmt/bin/test-link-failure.sh
deleted file mode 100755
index 4db887a..0000000
--- a/cluster-mgmt/bin/test-link-failure.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#! /bin/sh
-basename=$ONOS_CLUSTER_BASENAME
-wait=10
-
-fdef="flowdef_8node_42.txt"
-
-function log()
-{
-    date > error.$1.$2.log
-    check_status.py >> error.$1.$2.log
-    dsh -w ${basename}1 "cd ONOS/web; ./get_flow.py all" >> error.$1.$2.log
-    dsh "cd ONOS/scripts; ./showflow.sh"             >> error.$1.$2.log
-}
-
-echo "all links up"
-dsh -w ${basename}1 "cd ONOS/scripts; ./all-linkup.sh"
-echo "clean up flow"
-dsh -w ${basename}1 "cd ONOS/web; ./delete_flow.py 1 100"
-sleep 1
-dsh -w ${basename}1 "cd ONOS/web; ./get_flow.py all"
-dsh "cd ONOS/scripts; ./delflow.sh"
-echo "checkup status"
-check_status.py
-read -p "hit anykey> "
-
-echo "install pre-set flows"
-dsh -w ${basename}1 "cd ONOS/web; ./add_flow.py -m onos -f $fdef"
-sleep 6
-echo "check"
-dsh -w ${basename}1 "cd ONOS/web; ./pingall.py $fdef"
-
-#ports=`dsh -w ${basename}1 "cd ONOS/scripts; ./listports.sh" | awk '{print $2}' |grep -v tap`
-operation=("sw3-eth3 down" "sw4-eth4 down" "sw4-eth3 down" "sw3-eth3 up" "sw1-eth2 down" "sw4-eth4 up" "sw4-eth3 up" "sw1-eth2 up")
-
-((n=0))
-while [ 1 ] ; do
-  for (( i = 0; i< ${#operation[@]}; i ++)); do
-    echo "Test $n-$i"
-    p=`echo ${operation[$i]}`
-    echo "operation: $p"
-#  read -p "hit anykey> "
-    dsh -w ${basename}1 "sudo ifconfig $p"
-    echo "wait $wait sec"
-    sleep $wait 
-    result=`dsh -w ${basename}1 "cd ONOS/web; ./pingall.py $fdef"`
-    echo $result
-    nr_fail=`echo $result |grep fail | wc -l`
-    if [ $nr_fail -gt 0 ]; then
-      log $n $i
-    fi
-  done
-  ((n++))
-done
-
diff --git a/cluster-mgmt/bin/zk b/cluster-mgmt/bin/zk
deleted file mode 100755
index b2033ad..0000000
--- a/cluster-mgmt/bin/zk
+++ /dev/null
@@ -1,5 +0,0 @@
-#! /bin/bash
-. `dirname $0`/func.sh
-
-#$0 $1 $2
-`basename $0` $1 $2
diff --git a/cluster-mgmt/common/authorized_keys b/cluster-mgmt/common/authorized_keys
deleted file mode 100644
index 0e3693f..0000000
--- a/cluster-mgmt/common/authorized_keys
+++ /dev/null
@@ -1,3 +0,0 @@
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCTlBTIOKm30b7TsCgIT+xjq42q0zwG+EohOGkCtNr1eGkS9OZDYwkNAkPtpzYtZJ914oRL29JiXFm+OsAfwVKsY2yZlV+tcnTx4Djfhgs6/wURMhw3sOovWu2iAoPAhQYvvvq8maD8ZvybYTzq4yHNP27G7rv4s+GCtv3bXOgzsKd8Zkg0+tGZYuCks5mNimlfWGBlA5jI9MEkd0nWTqSTRj8IkfhJo26HralR+X/KwHGryfxjG9rsyqoZGnVC/xV4KOOtZlVRzTVxCDFPj86lO4dzf7Tt+dst/t/9u/V2d7YxnuhaM+Sarve+6f/tZoekWzpNRGGT9h7FzT7Osg+l onlab-gui
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEHmYMw6DugE6FCoLR5fdTO7iQfouHmLm60yxSjXu/wnBGmM7SGc1AAgmtr6JaEPYj8H6g7AL8+wFrbj7TXOoMD4HWoEzC/PuTZ5JgyCeTK/rmYdBlbAqBbLeD1d9q35O+GnWOsLIsSQHcKvKZveLLPTBtzJ6em9NfgiPKibbsAFD716w++cxXKHabzHw7KB9XaewoYdznrosWwU3TXR4C2rzMAimh6XuBLZ0xFTNF4nFhy+H0AWUEN8dY8NHwAMGlAWK4g7phZ2cQhgU4GeutfGlEKlKT3iT7j8rkW1JKsx/AOVfcnozuHCm76jYD5qXcizHeS4BYinXRepGY7mfn onlabkey
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3QgAX4yEcOHaKFgeq/tD2lbGg5VbNvRka1atUSd5q8hhtw5rB8um5Q5Z6+AfL83+Xlez2KonH6JLjhhs8wBHaJCVbzvDnycMEEHg12o+MvlKgKTkkSqP9W+Jejk4YGIr6QOQ/yzZRhRGoNGMaqI6KU7NjtgZyZs8h66GTyoBeXi9TZwGYdxeF5rVqZD80nlb+xlc+PUC4TQ/o2RnGej7S0J/+ES+/X6LiNgHyZPdFK2Pr4BilLwS8c5EyAHHQuW8hIcPhNwXgrx97f5L8yuNKAmW9WSYLk0r4DhnFUZrvIGqh3isxtnJDDf3UZ2U+PtGZ75ZNfk546obsuyc/IwHH ubuntu@onos9vpc
diff --git a/cluster-mgmt/common/hosts b/cluster-mgmt/common/hosts
deleted file mode 100644
index 220b30f..0000000
--- a/cluster-mgmt/common/hosts
+++ /dev/null
@@ -1,11 +0,0 @@
-127.0.0.1 localhost
-
-# The following lines are desirable for IPv6 capable hosts
-::1 ip6-localhost ip6-loopback
-fe00::0 ip6-localnet
-ff00::0 ip6-mcastprefix
-ff02::1 ip6-allnodes
-ff02::2 ip6-allrouters
-ff02::3 ip6-allhosts
-
-## For ONOS Development
diff --git a/cluster-mgmt/common/known_hosts b/cluster-mgmt/common/known_hosts
deleted file mode 100644
index bc8d892..0000000
--- a/cluster-mgmt/common/known_hosts
+++ /dev/null
@@ -1,2 +0,0 @@
-|1|vpuCVwBaUAW338i8XkTyuZpPn3o=|OEtDpg0rUr4I6MJrPU3UgO6xIjY= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
-|1|oQEfymNRsrXOo9uHu/jCST0f0I0=|UqxLCIvwPdgIlZWmusieRLCzRxE= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAvCFIZTznMUPbS/r6b0Gw9jcnOBbH21wcBKETjXg9U5bMwHz2ocnEK8PPL1EK8uUTjZ3Kbilx4Jeio8HXEWtUkyOF/KyW1nXd0mxrWqqGQjFlpPj017Wfo0KIISgCWB9L8RJJ3aJ0selZwvmdHmg7uS306UGsJf1co2qubLGMAsdjPhYpvKXSJHoThupHBCuoqqOw80Tt5b3qJ6RwFjt/QiCgom9KoQn2DMQhS0iB9h5NHpejDX9/qLgFFiF3PdXaBCTE+vFLvoXwecp/x3pP2c8zA6FhCzYbZxLYMdMHqSmJRSKALWU3Qg9ekdXUBfzrLs4lPQ6UGFcku9WBAtN7oQ==
diff --git a/cluster-mgmt/common/zoo.cfg b/cluster-mgmt/common/zoo.cfg
deleted file mode 100644
index c4e1eb3..0000000
--- a/cluster-mgmt/common/zoo.cfg
+++ /dev/null
@@ -1,45 +0,0 @@
-# The number of milliseconds of each tick
-tickTime=2000
-# The number of ticks that the initial 
-# synchronization phase can take
-initLimit=10
-# The number of ticks that can pass between 
-# sending a request and getting an acknowledgement
-syncLimit=5
-# the directory where the snapshot is stored.
-# do not use /tmp for storage, /tmp here is just 
-# example sakes.
-dataDir=/var/lib/zookeeper
-# the port at which the clients will connect
-clientPort=2181
-#
-# specify all servers in the Zookeeper ensemble
-
-#server.1=onosgui1:2888:3888
-#server.2=onosgui2:2888:3888
-#server.3=onosgui3:2888:3888
-#server.4=onosgui4:2888:3888
-#server.5=onosgui5:2888:3888
-#server.6=onosgui6:2888:3888
-#server.7=onosgui7:2888:3888
-#server.8=onosgui8:2888:3888
-#
-#
-# Be sure to read the maintenance section of the 
-# administrator guide before turning on autopurge.
-#
-#
-# Be sure to read the maintenance section of the 
-# administrator guide before turning on autopurge.
-#
-# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
-#
-# The number of snapshots to retain in dataDir
-#autopurge.snapRetainCount=3
-# Purge task interval in hours
-# Set to "0" to disable auto purge feature
-#autopurge.purgeInterval=1
-server.1=test1:2888:3888
-server.2=test2:2888:3888
-server.3=test3:2888:3888
-server.4=test4:2888:3888
diff --git a/cluster-mgmt/conf/onos-cluster.conf b/cluster-mgmt/conf/onos-cluster.conf
new file mode 100644
index 0000000..73b228d
--- /dev/null
+++ b/cluster-mgmt/conf/onos-cluster.conf
@@ -0,0 +1,60 @@
+### Cluster-wide settings ###
+# List of host names/addresses constitute ONOS cluster
+# NOTE: Order of names affects ZooKeeper myid
+cluster.hosts.names = onosdev1, onosdev2, onosdev3, onosdev4
+
+# Back-end module to store topology/flows
+cluster.hosts.backend = ramcloud
+#cluster.hosts.backend = hazelcast
+
+# Protocol used by RAMCloud cluster (fast+udp by default)
+#cluster.hosts.ramcloud.protocol = fast+udp
+
+# Communication method used for Hazelcast communication
+#cluster.hosts.hazelcast.network = multicast
+cluster.hosts.hazelcast.network = tcp-ip
+
+# Multicast address used by Hazelcast datagrid (224.2.2.3 by default)
+# Valid only if cluster.hosts.hazelcast.network is set to "multicast"
+#cluster.hosts.hazelcast.multicast.address = 224.2.2.3
+
+# Multicast port used by Hazelcast datagrid (54327 by default)
+# Valid only if cluster.hosts.hazelcast.network is set to "multicast"
+#cluster.hosts.hazelcast.multicast.port = 54327
+
+### Host-specific settings ###
+# IP address of host used for ONOS communication (resolved hostname by default)
+#cluster.onosdev1.ip = 192.168.56.11
+
+# Role of host
+cluster.onosdev1.role = rc-coord-and-server
+
+# IP address or hostname of host used for ZooKeeper communication (cluster.onosdev1.ip by default)
+#cluster.onosdev1.zk.host = 192.168.56.11
+
+# IP address of host used for RAMCloud communication (cluster.onosdev1.ip by default)
+#cluster.onosdev1.ramcloud.ip = 192.168.56.11
+
+# Port number used by RAMCloud coordinator (12246 by default)
+#cluster.onosdev1.ramcloud.coordinator.port = 12246
+
+# Port number used by RAMCloud server (12242 by default)
+#cluster.onosdev1.ramcloud.server.port = 12242
+
+# IP address of host used for Hazelcast communication (cluster.onosdev1.ip by default)
+# Valid only if cluster.hosts.hazelcast.network is set to "tcp-ip"
+#cluster.onosdev1.hazelcast.ip = 192.168.56.11
+
+# At least role must be specified for all hosts
+cluster.onosdev2.role = rc-server
+cluster.onosdev3.role = rc-server
+cluster.onosdev4.role = rc-server
+
+
+### SSH settings used for delivering config files ###
+# Common username used to login host (current user by default)
+#remote.common.ssh.user = mininet
+
+# Host-specific username settings
+#remote.onosdev1.ssh.user = mininet
+#remote.onosdev2.ssh.user = mininet
diff --git a/cluster-mgmt/conf/template/onos_node.conf.template b/cluster-mgmt/conf/template/onos_node.conf.template
new file mode 100644
index 0000000..b6e069d
--- /dev/null
+++ b/cluster-mgmt/conf/template/onos_node.conf.template
@@ -0,0 +1,44 @@
+# Name of this host (`hostname` by default)
+host.name = __HOST_NAME__
+
+# IP address of this host used for ONOS communication
+host.ip = __HOST_IP__
+
+# Role of this host
+host.role = __ONOS_ROLE__
+
+# Back-end module to store topology/flows
+host.backend = __BACKEND__
+
+# List of host name/IPs that constitute ZooKeeper cluster
+# myid will be assigned incrementally according to order of list
+zookeeper.hosts = __ZK_HOSTS__
+
+# Protocol used by RAMCloud coordinator (fast+udp by default)
+ramcloud.coordinator.protocol = __RAMCLOUD_PROTOCOL__
+
+# IP address of RAMCloud coordinator (host.ip by default)
+ramcloud.coordinator.ip = __RAMCLOUD_IP__
+
+# Port number of RAMCloud coordinator (12246 by default)
+ramcloud.coordinator.port = __RAMCLOUD_COORD_PORT__
+
+# Protocol used by RAMCloud server (fast+udp by default)
+ramcloud.server.protocol = __RAMCLOUD_PROTOCOL__
+
+# IP address of RAMCloud server (host.ip by default)
+ramcloud.server.ip = __RAMCLOUD_IP__
+
+# Port number of RAMCloud server (12242 by default)
+ramcloud.server.port = __RAMCLOUD_SERVER_PORT__
+
+# List of hostname/ip[:port] which forms Hazelcast datagrid
+# If this value is empty, Hazelcast will be set to multicast mode.
+# Inversely, if this value is set, multicast settings will be ignored.
+hazelcast.tcp-ip.members = __HAZELCAST_MEMBERS__
+
+# Multicast address used by Hazelcast. (224.2.2.3 by default)
+hazelcast.multicast.group = __HAZELCAST_MULTICAST_GROUP__
+
+# Multicast port used by Hazelcast. (54327 by default)
+#hazelcast.multicast.port = __HAZELCAST_MULTICAST_PORT__
\ No newline at end of file
diff --git a/cluster-mgmt/cp-config.sh b/cluster-mgmt/cp-config.sh
deleted file mode 100755
index e6db9d3..0000000
--- a/cluster-mgmt/cp-config.sh
+++ /dev/null
@@ -1,63 +0,0 @@
-#! /bin/bash
-USERNAME=ubuntu
-CASSANDRA_DIR='/home/ubuntu/apache-cassandra-1.1.4'
-ZK_DIR='/home/ubuntu/zookeeper-3.4.5'
-ZK_LIB='/var/lib/zookeeper'
-CASSANDRA_LIB='/var/lib/cassandra'
-
-SSH_COPY="authorized_keys  id_rsa  id_rsa.pub  known_hosts  onlab-gui.pem  onlabkey.pem"
-
-if [ x$ONOS_CLUSTER_BASENAME == "x" -o x$ONOS_CLUSTER_NR_NODES == "x" ]; then
-  echo "set environment variable ONOS_CLUSTER_BASENAME and ONOS_CLUSTER_NR_NODES"
-  exit
-fi
-
-basename=$ONOS_CLUSTER_BASENAME
-NR_NODES=$ONOS_CLUSTER_NR_NODES
-
-dsh -g $basename 'uname -a'
-
-echo "Stopping Services"
-#dsh -g $basename 'cd ONOS; ./start-onos.sh stop'
-#dsh -g $basename 'cd ONOS; ./stop-cassandra stop'
-#dsh -g $basename '$ZK_DIR/bin/zkServer.sh stop'
-
-# authorized_keys  cassandra.yaml  hosts  id_rsa  id_rsa.pub  known_hosts  onlab-gui.pem  onlabkey.pem  zoo.cfg
-## SSH Setting
-dsh -g $basename 'mkdir -m 700 .ssh' 
-for n in $SSH_COPY; do
- pcp -g $basename  common/$n '.ssh'
- if [ $n != "id_rsa.pub" ] ; then
-   dsh -g $basename "chmod 600 .ssh/$n"
- fi
-done
-
-dsh -g $basename "sudo rm -rf $CASSANDRA_LIB/commitlog/*"
-dsh -g $basename "sudo rm -rf $CASSANDRA_LIB/saved_caches/*"
-dsh -g $basename "sudo rm -rf $CASSANDRA_LIB/data/*"
-dsh -g $basename "sudo chown -R $username:$username $CASSANDRA_LIB"
-
-dsh -g $basename "sudo rm -rf $ZK_LIB/version-2*"
-dsh -g $basename "sudo rm -rf $ZK_LIB/myid"
-
-pcp -g $basename common/cassandra.yaml $CASSANDRA_DIR/conf
-pcp -g $basename common/zoo.cfg        $ZK_DIR/conf
-pcp -g $basename common/hosts          '~'
-
-for n in `seq 1 $NR_NODES`; do
-  pcp -w ${basename}${n} ${basename}${n}/hostname '~'
-  pcp -w ${basename}${n} ${basename}${n}/myid $ZK_DIR/conf
-done
-
-dsh -g $basename 'sudo cp ~/hostname /etc' 
-dsh -g $basename 'sudo cp ~/hosts /etc' 
-dsh -g $basename "cd $ZK_LIB; sudo ln -s $ZK_DIR/conf/myid"
-
-dsh -g $basename 'sudo hostname `cat /etc/hostname`'
-
-#for n in `seq 2 $NR_NODES`; do
-#  pcp -w ${basename}${n} ${basename}${n}/onsdemo_edge.py 'ONOS/test-network/mininet'
-#  pcp -w ${basename}${n} ${basename}${n}/tunnel_onos_edge.sh 'ONOS/test-network/mininet'
-#done
-#pcp -w ${basename}1 ${basename}1/tunnel_onos_core.sh 'ONOS/test-network/mininet'
-#pcp -w ${basename}1 ${basename}1/onsdemo_core.py 'ONOS/test-network/mininet'
diff --git a/cluster-mgmt/cp-mininet.sh b/cluster-mgmt/cp-mininet.sh
deleted file mode 100755
index b7307c0..0000000
--- a/cluster-mgmt/cp-mininet.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#! /bin/bash
-USERNAME=ubuntu
-CASSANDRA_DIR='/home/ubuntu/apache-cassandra-1.1.4'
-ZK_DIR='/home/ubuntu/zookeeper-3.4.5'
-ZK_LIB='/var/lib/zookeeper'
-CASSANDRA_LIB='/var/lib/cassandra'
-
-if [ x$ONOS_CLUSTER_BASENAME == "x" -o x$ONOS_CLUSTER_NR_NODES == "x" ]; then
-  echo "set environment variable ONOS_CLUSTER_BASENAME and ONOS_CLUSTER_NR_NODES"
-  exit
-fi
-
-basename=$ONOS_CLUSTER_BASENAME
-NR_NODES=$ONOS_CLUSTER_NR_NODES
-
-dsh -g $basename 'uname -a'
-
-for n in `seq 1 $NR_NODES`; do
-  pcp -w ${basename}${n} ${basename}${n}/onsdemo.py 'ONOS/test-network/mininet'
-  pcp -w ${basename}${n} ${basename}${n}/tunnel_onsdemo.sh 'ONOS/test-network/mininet'
-done
-dsh -g $basename 'chmod 755 ONOS/test-network/mininet/tunnel_onsdemo.sh'
-dsh -g $basename 'chmod 755 ONOS/test-network/mininet/onsdemo.py'
diff --git a/cluster-mgmt/make-config.sh b/cluster-mgmt/make-config.sh
deleted file mode 100755
index 5e0794f..0000000
--- a/cluster-mgmt/make-config.sh
+++ /dev/null
@@ -1,60 +0,0 @@
-#! /bin/bash
-USERNAME=ubuntu
-if [ x$ONOS_CLUSTER_BASENAME == "x" -o x$ONOS_CLUSTER_NR_NODES == "x" ]; then 
-  echo "set environment variable ONOS_CLUSTER_BASENAME and ONOS_CLUSTER_NR_NODES"
-  exit
-elif [ $# != 1 ]; then
-  echo "$0 hostfile"
-  exit
-fi
-
-basename=$ONOS_CLUSTER_BASENAME
-NR_NODES=$ONOS_CLUSTER_NR_NODES
-hosts_file=$1
-
-for n in `seq 1 $NR_NODES`; do
-  rm -rf ${basename}${n}
-  mkdir ${basename}${n}
-  echo "${basename}${n}" > ${basename}${n}/hostname
-  echo $n > ${basename}${n}/myid
-done
-
-## ZK config ##
-cp template/zoo.cfg common/
-for n in `seq 1 $NR_NODES`; do
- echo "server.${n}=${basename}${n}:2888:3888"
-done >> common/zoo.cfg
-
-## Cassandra config ##
-cat template/cassandra.yaml |\
-  sed "s/__SEED__/${basename}1/g" > common/cassandra.yaml
-
-## /etc/hosts ##
-cat template/hosts $hosts_file >  common/hosts
-
-
-## .ssh/known_hosts ##
-ssh-keyscan -H -t rsa github.com > common/known_hosts
-ssh-keyscan -H -t rsa onosnat >> common/known_hosts
-for n in `seq 1 $NR_NODES`; do
-  ssh-keyscan -H -t rsa ${basename}${n}
-done >> common/known_hosts
-
-echo "GROUP: $basename" > bin/cluster.txt
-cat $hosts_file | awk '{print $2}' >> bin/cluster.txt
-
-
-## Creating shell script to login each node ##
-for n in `seq 1 $NR_NODES`; do
-  cat << EOF > bin/${basename}${n}
-#!/bin/sh
-ssh $USERNAME@${basename}${n}
-EOF
-  chmod 755 bin/${basename}${n}
-done
-
-echo "======================================"
-echo "Do not forget to do the following"
-echo "paste $hosts_file to /etc/hosts"
-echo "paste cluster.txt to your CLUSTER file"
-echo "======================================"
diff --git a/cluster-mgmt/make-mininet.sh b/cluster-mgmt/make-mininet.sh
deleted file mode 100755
index 207b89e..0000000
--- a/cluster-mgmt/make-mininet.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#! /bin/bash
-if [ x$ONOS_CLUSTER_BASENAME == "x" -o x$ONOS_CLUSTER_NR_NODES == "x" ]; then 
-  echo "set environment variable ONOS_CLUSTER_BASENAME and ONOS_CLUSTER_NR_NODES"
-  exit
-elif [ $# != 1 ]; then
-  echo "$0 hostfile"
-  exit
-fi
-
-basename=$ONOS_CLUSTER_BASENAME
-## Shell Var can't handle "-" so replace it with "_"
-basename_var=`echo $ONOS_CLUSTER_BASENAME | sed 's/\-/_/g'`
-
-NR_NODES=$ONOS_CLUSTER_NR_NODES
-hosts_file=$1
-
-for n in `seq 2 $NR_NODES`; do
-  if [ $n == 2 ]; then
-    nrsw=50
-  else
-    nrsw=25
-  fi
-  cat template/onsdemo_edge_template.py | sed "s/__NWID__/$n/g" | sed "s/__NRSW__/${nrsw}/g" > ${basename}${n}/onsdemo.py
-done
-cp template/onsdemo_core.py ${basename}1/onsdemo.py
-
-## Shell var can't handle "-" so replace it with "_"
-cat $hosts_file | sed 's/\-/\_/g' | awk '{printf("%s=%s\n",$2,$1)}' > .tmp
-
-for n in `seq 2 $NR_NODES`; do
-  cat template/tunnel_onsdemo_edge_template.sh | awk '{if(NR==2){system("cat .tmp")}else{print $0}}' |\
-  sed "s/__NWID__/$n/g" |\
-  sed "s/__TUNNEL__/TUNNEL\=\(\"1 $n ${basename_var}1\"\)/g" > ${basename}${n}/tunnel_onsdemo.sh
-  chmod 755 ${basename}${n}/tunnel_onsdemo.sh
-done
-
-cat template/tunnel_onsdemo_core_template.sh | awk '{if(NR==2){system("cat .tmp")}else{print $0}}' |\
-  sed "s/__basename__/${basename_var}/g" > ${basename}1/tunnel_onsdemo.sh 
-  chmod 755 ${basename}1/tunnel_onsdemo.sh
diff --git a/cluster-mgmt/onos-cluster.sh b/cluster-mgmt/onos-cluster.sh
new file mode 100755
index 0000000..e1cceb8
--- /dev/null
+++ b/cluster-mgmt/onos-cluster.sh
@@ -0,0 +1,328 @@
+#! /bin/bash
+
+set -e
+
+### Env vars used by this script. (default value) ###
+# $ONOS_HOME         : path of root directory of ONOS repository (~/ONOS)
+# $ONOS_CLUSTER_HOME : path of ONOS cluster tools directory (this script's dir)
+# $REMOTE_ONOS_HOME  : path of root directory of ONOS repository in remote hosts (ONOS)
+# $SSH               : command name to access host
+# $PSSH              : command name to access hosts in parallel
+# $SCP               : command name to copy config file to each host
+#####################################################
+
+
+### Variables read from ONOS config file ###
+ONOS_HOME=${ONOS_HOME:-${HOME}/ONOS}
+
+source ${ONOS_HOME}/scripts/common/utils.sh
+
+CLUSTER_HOME=${ONOS_CLUSTER_HOME:-$(cd `dirname $0`; pwd)}
+CLUSTER_CONF_DIR=${CLUSTER_HOME}/conf
+CLUSTER_CONF=${ONOS_CLUSTER_CONF:-${CLUSTER_CONF_DIR}/onos-cluster.conf}
+CLUSTER_TEMPLATE_DIR=${CLUSTER_CONF_DIR}/template
+
+REMOTE_ONOS_HOME=${REMOTE_ONOS_HOME:-ONOS}
+REMOTE_ONOS_CONF_DIR=${REMOTE_ONOS_HOME}/conf
+
+if [ ! -f ${CLUSTER_CONF} ]; then
+  echo "${CLUSTER_CONF} not found."
+  exit 1
+fi
+CLUSTER_HOSTS=$(read-conf ${CLUSTER_CONF}       cluster.hosts.names             `hostname` | tr ',' ' ')
+CLUSTER_BACKEND=$(read-conf ${CLUSTER_CONF}     cluster.hosts.backend)
+CLUSTER_RC_PROTOCOL=$(read-conf ${CLUSTER_CONF} cluster.hosts.ramcloud.protocol "fast+udp")
+CLUSTER_HC_NETWORK=$(read-conf ${CLUSTER_CONF}  cluster.hosts.hazelcast.network)
+CLUSTER_HC_ADDR=$(read-conf ${CLUSTER_CONF}     cluster.hosts.hazelcast.multicast.address "224.2.2.3")
+CLUSTER_HC_PORT=$(read-conf ${CLUSTER_CONF}     cluster.hosts.hazelcast.multicast.port    "54327")
+############################################
+
+
+ONOS_CONF_TEMPLATE=${CLUSTER_TEMPLATE_DIR}/onos_node.conf.template
+
+
+### Parallel SSH settings ###
+SSH=${SSH:-ssh}
+PSSH=${PSSH:-parallel-ssh}
+PSSH_CONF=${CLUSTER_CONF_DIR}/pssh.hosts
+SCP=${SCP:-scp}
+#############################
+
+
+############# Common functions #############
+function print_usage {
+  local filename=`basename ${ONOS_CLUSTER_CONF}`
+  local usage="Usage: setup/deploy/start/stop/status ONOS cluster.
+ \$ $0 setup [-f]
+    Set up ONOS cluster using ${filename}.
+    If -f option is used, all existing files will be overwritten without confirmation.
+ \$ $0 deploy [-f]
+    Deliver node config files to cluster nodes.
+    If -f option is used, all existing files will be overwritten without confirmation.
+ \$ $0 start
+    Start ONOS cluster
+ \$ $0 stop
+    Stop ONOS cluster
+ \$ $0 status
+    Show status of ONOS-cluster"
+  
+  echo "${usage}"	
+}
+
+############################################
+
+
+############# Setup functions ##############
+
+function list-zk-hosts {
+  local list=()
+  for host in ${CLUSTER_HOSTS}; do 
+    local zk_host_string=$(read-conf ${CLUSTER_CONF} "cluster.${host}.zk.host")
+    
+    if [ -z ${zk_host_string} ]; then
+      # falling back to ip
+      zk_host_string=$(read-conf ${CLUSTER_CONF} "cluster.${host}.ip")
+    fi
+    if [ -z ${zk_host_string} ]; then
+      # falling back to hostname
+      zk_host_string=${host}
+    fi
+    
+    list=("${list[@]}" ${zk_host_string})
+  done
+  
+  # join with comma
+  local IFS=,
+  echo "${list[*]}"
+}
+
+function list-hc-hosts {
+  local list=()
+  for host in ${CLUSTER_HOSTS}; do 
+    local hc_host_string=$(read-conf ${CLUSTER_CONF} "cluster.${host}.hazelcast.ip")
+    
+    if [ -z ${hc_host_string} ]; then
+      # falling back to ip
+      hc_host_string=$(read-conf ${CLUSTER_CONF} "cluster.${host}.ip")
+    fi
+    
+    if [ -z ${hc_host_string} ]; then
+      # falling back to hostname
+      hc_host_string=${host}
+    fi
+    
+    list=("${list[@]}" ${hc_host_string})
+  done
+  
+  local IFS=,
+  echo "${list[*]}"
+}
+
+function create-pssh-conf {
+  local tempfile=`begin-conf-creation ${PSSH_CONF}`
+  
+  # creation of pssh config file
+  for host in ${CLUSTER_HOSTS}; do
+    local user=$(read-conf ${CLUSTER_CONF} remote.${host}.ssh.user)
+    if [ -z ${user} ]; then
+      # falling back to common setting
+      user=$(read-conf ${CLUSTER_CONF} remote.common.ssh.user)
+    fi
+    
+    if [ -z ${user} ]; then
+      echo ${host} >> ${tempfile}
+    else
+      echo ${user}@${host} >> ${tempfile}
+    fi
+  done
+  
+  end-conf-creation ${PSSH_CONF}
+}
+
+# create-onos-conf {hostname}
+function create-onos-conf {
+  local host_name=${1}
+  
+  if [ -z ${host_name} ]; then
+    echo "FAILED"
+    echo "[ERROR] invalid hostname ${host_name}"
+    exit 1
+  fi
+  
+  local onos_conf="${CLUSTER_CONF_DIR}/onos_node.${host_name}.conf"
+  local tempfile=`begin-conf-creation ${onos_conf}`
+
+  cp ${ONOS_CONF_TEMPLATE} ${tempfile}
+  
+  local prefix="cluster.${host}"
+  
+  local host_ip=$(read-conf ${CLUSTER_CONF} "${prefix}.ip")
+  local host_string=${host_ip}
+  if [ -z "${host_string}" ]; then
+    host_string=${host_name}
+  fi
+  local host_role=$(read-conf ${CLUSTER_CONF} "${prefix}.role")
+  local zk_hosts=`list-zk-hosts`
+  local rc_ip=$(read-conf ${CLUSTER_CONF} "${prefix}.ramcloud.ip" ${host_string})
+  local rc_coord_port=$(read-conf ${CLUSTER_CONF} "${prefix}.ramcloud.coordinator.port" 12246)
+  local rc_server_port=$(read-conf ${CLUSTER_CONF} "${prefix}.ramcloud.server.port" 12242)
+  local hc_hosts=`list-hc-hosts`
+  
+  # creation of ONOS node config file
+  sed -i -e "s|__HOST_NAME__|${host_name}|" ${tempfile}
+  if [ -z "${host_ip}" ]; then
+    # comment out
+    sed -i -e "s|^\(.*__HOST_IP__.*\)$|#\1|" ${tempfile}
+  else
+    sed -i -e "s|__HOST_IP__|${host_ip}|" ${tempfile}
+  fi
+  sed -i -e "s|__ONOS_ROLE__|${host_role}|" ${tempfile}
+  sed -i -e "s|__BACKEND__|${CLUSTER_BACKEND}|" ${tempfile}
+  sed -i -e "s|__ZK_HOSTS__|${zk_hosts}|" ${tempfile}
+  sed -i -e "s|__RAMCLOUD_PROTOCOL__|${CLUSTER_RC_PROTOCOL}|" ${tempfile}
+  sed -i -e "s|__RAMCLOUD_IP__|${rc_ip}|" ${tempfile}
+  sed -i -e "s|__RAMCLOUD_COORD_PORT__|${rc_coord_port}|" ${tempfile}
+  sed -i -e "s|__RAMCLOUD_SERVER_PORT__|${rc_server_port}|" ${tempfile}
+  
+  if [ ${CLUSTER_HC_NETWORK} = "tcp-ip" ]; then
+    sed -i -e "s|__HAZELCAST_MEMBERS__|${hc_hosts}|" ${tempfile}
+    
+    # Comment out unused parameters
+    sed -i -e "s|^\(.*__HAZELCAST_MULTICAST_GROUP__.*\)$|#\1|" ${tempfile}
+    sed -i -e "s|^\(.*__HAZELCAST_MULTICAST_PORT__.*\)$|#\1|" ${tempfile}
+  elif [ ${CLUSTER_HC_NETWORK} = "multicast" ]; then
+    sed -i -e "s|__HAZELCAST_MULTICAST_GROUP__|${CLUSTER_HC_ADDR}|" ${tempfile}
+    sed -i -e "s|__HAZELCAST_MULTICAST_PORT__|${CLUSTER_HC_PORT}|" ${tempfile}
+    
+    sed -i -e "s|^\(.*__HAZELCAST_MEMBERS__.*\)$|#\1|" ${tempfile}
+  fi
+ 
+  end-conf-creation ${onos_conf}
+}
+
+# setup -f : force overwrite existing files
+function setup {
+  if [ "${1}" = "-f" ]; then
+    create-pssh-conf
+    
+    for host in ${CLUSTER_HOSTS}; do 
+      create-onos-conf ${host}
+    done
+  else
+    create-conf-interactive ${PSSH_CONF} create-pssh-conf
+    
+    for host in ${CLUSTER_HOSTS}; do 
+      local filename="${CLUSTER_CONF_DIR}/onos_node.${host}.conf"
+      create-conf-interactive ${filename} create-onos-conf ${host}
+    done
+  fi
+}
+
+############################################
+
+
+############ Deploy functions ##############
+
+function deploy {
+  if [ ! -f ${PSSH_CONF} ]; then
+    echo "[ERROR] ${PSSH_CONF} not found"
+    local command=`basename ${0}`
+    echo "[ERROR] Try \"${command} setup\" to create files."
+    exit 1
+  fi
+
+  for host in ${CLUSTER_HOSTS}; do
+    local conf=${CLUSTER_CONF_DIR}/onos_node.${host}.conf
+    if [ ! -f ${conf} ]; then
+      echo "[ERROR] ${conf} not found"
+      local command=`basename ${0}`
+      echo "[ERROR] Try \"${command} setup\" to create files."
+      exit 1
+    fi
+      
+    local user=$(read-conf ${CLUSTER_CONF} "remote.${host}.ssh.user")
+    if [ -z ${user} ]; then
+      # falling back to common setting
+      user=$(read-conf ${CLUSTER_CONF} "remote.common.ssh.user")
+    fi
+      
+    ${SCP} ${conf} ${user}@${host}:${REMOTE_ONOS_CONF_DIR}
+    ${SSH} ${user}@${host} "cd ${REMOTE_ONOS_HOME}; ./onos.sh setup -f"
+  done
+ 
+# TODO: Replacing per-host ssh command with pssh command below.
+#       Need to solve concurrency problem when ONOS directory is shared among hosts.
+#  ${PSSH} -i -h ${PSSH_CONF} "cd ${REMOTE_ONOS_HOME}; ./onos.sh setup -f"
+}
+############################################
+
+
+############# Start functions ##############
+
+function start {
+  if [ ! -f ${PSSH_CONF} ]; then
+    echo "[ERROR] ${PSSH_CONF} not found"
+    local command=`basename ${0}`
+    echo "[ERROR] Try \"${command} setup\" to create files."
+    exit 1
+  fi
+  
+  ${PSSH} -i -h ${PSSH_CONF} "cd ${REMOTE_ONOS_HOME}; ./onos.sh start"
+}
+
+############################################
+
+
+############# Stop functions $##############
+
+function stop {
+  if [ ! -f ${PSSH_CONF} ]; then
+    echo "[ERROR] ${PSSH_CONF} not found"
+    local command=`basename ${0}`
+    echo "[ERROR] Try \"${command} setup\" to create files."
+    exit 1
+  fi
+  
+  ${PSSH} -i -h ${PSSH_CONF} "cd ${REMOTE_ONOS_HOME}; ./onos.sh stop"
+}
+
+############################################
+
+
+############ Status functions ##############
+
+function status {
+  if [ ! -f ${PSSH_CONF} ]; then
+    echo "[ERROR] ${PSSH_CONF} not found"
+    local command=`basename ${0}`
+    echo "[ERROR] Try \"${command} setup\" to create files."
+    exit 1
+  fi
+  
+  ${PSSH} -i -h ${PSSH_CONF} "cd ${REMOTE_ONOS_HOME}; ./onos.sh status"
+}
+
+############################################
+
+
+################## Main ####################
+case "$1" in
+  setup)
+    setup $2
+    ;;
+  deploy)
+    deploy
+    ;;
+  start)
+    start
+    ;;
+  stop)
+    stop
+    ;;
+  stat*) # <- status
+    status
+    ;;
+  *)
+    print_usage
+    exit 1
+esac
diff --git a/cluster-mgmt/ssh/authorized_keys b/cluster-mgmt/ssh/authorized_keys
deleted file mode 100644
index f723bc2..0000000
--- a/cluster-mgmt/ssh/authorized_keys
+++ /dev/null
@@ -1,2 +0,0 @@
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCTlBTIOKm30b7TsCgIT+xjq42q0zwG+EohOGkCtNr1eGkS9OZDYwkNAkPtpzYtZJ914oRL29JiXFm+OsAfwVKsY2yZlV+tcnTx4Djfhgs6/wURMhw3sOovWu2iAoPAhQYvvvq8maD8ZvybYTzq4yHNP27G7rv4s+GCtv3bXOgzsKd8Zkg0+tGZYuCks5mNimlfWGBlA5jI9MEkd0nWTqSTRj8IkfhJo26HralR+X/KwHGryfxjG9rsyqoZGnVC/xV4KOOtZlVRzTVxCDFPj86lO4dzf7Tt+dst/t/9u/V2d7YxnuhaM+Sarve+6f/tZoekWzpNRGGT9h7FzT7Osg+l onlab-gui
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEHmYMw6DugE6FCoLR5fdTO7iQfouHmLm60yxSjXu/wnBGmM7SGc1AAgmtr6JaEPYj8H6g7AL8+wFrbj7TXOoMD4HWoEzC/PuTZ5JgyCeTK/rmYdBlbAqBbLeD1d9q35O+GnWOsLIsSQHcKvKZveLLPTBtzJ6em9NfgiPKibbsAFD716w++cxXKHabzHw7KB9XaewoYdznrosWwU3TXR4C2rzMAimh6XuBLZ0xFTNF4nFhy+H0AWUEN8dY8NHwAMGlAWK4g7phZ2cQhgU4GeutfGlEKlKT3iT7j8rkW1JKsx/AOVfcnozuHCm76jYD5qXcizHeS4BYinXRepGY7mfn onlabkey
diff --git a/cluster-mgmt/ssh/id_rsa.pub b/cluster-mgmt/ssh/id_rsa.pub
deleted file mode 100644
index bcb2d75..0000000
--- a/cluster-mgmt/ssh/id_rsa.pub
+++ /dev/null
@@ -1 +0,0 @@
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3QgAX4yEcOHaKFgeq/tD2lbGg5VbNvRka1atUSd5q8hhtw5rB8um5Q5Z6+AfL83+Xlez2KonH6JLjhhs8wBHaJCVbzvDnycMEEHg12o+MvlKgKTkkSqP9W+Jejk4YGIr6QOQ/yzZRhRGoNGMaqI6KU7NjtgZyZs8h66GTyoBeXi9TZwGYdxeF5rVqZD80nlb+xlc+PUC4TQ/o2RnGej7S0J/+ES+/X6LiNgHyZPdFK2Pr4BilLwS8c5EyAHHQuW8hIcPhNwXgrx97f5L8yuNKAmW9WSYLk0r4DhnFUZrvIGqh3isxtnJDDf3UZ2U+PtGZ75ZNfk546obsuyc/IwHH ubuntu@onos9vpc
diff --git a/cluster-mgmt/start-mininet.sh b/cluster-mgmt/start-mininet.sh
deleted file mode 100755
index e226c39..0000000
--- a/cluster-mgmt/start-mininet.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#! /bin/bash
-USERNAME=ubuntu
-CASSANDRA_DIR='/home/ubuntu/apache-cassandra-1.1.4'
-ZK_DIR='/home/ubuntu/zookeeper-3.4.5'
-ZK_LIB='/var/lib/zookeeper'
-CASSANDRA_LIB='/var/lib/cassandra'
-
-if [ x$ONOS_CLUSTER_BASENAME == "x" -o x$ONOS_CLUSTER_NR_NODES == "x" ]; then
-  echo "set environment variable ONOS_CLUSTER_BASENAME and ONOS_CLUSTER_NR_NODES"
-  exit
-fi
-
-basename=$ONOS_CLUSTER_BASENAME
-NR_NODES=$ONOS_CLUSTER_NR_NODES
-
-dsh -g $basename 'uname -a'
-
-dsh -g ${basename} 'cd ONOS/test-network/mininet; ./tunnel_onsdemo.sh start'
-dsh -g ${basename} 'cd ONOS/test-network/mininet; ./tunnel_onsdemo.sh start'
-dsh -g ${basename} 'cd ONOS/test-network/mininet; sudo mn -c'
-dsh -g ${basename} 'cd ONOS/test-network/mininet; sudo ./onsdemo.py -n'
diff --git a/cluster-mgmt/template/cassandra.yaml b/cluster-mgmt/template/cassandra.yaml
deleted file mode 100644
index ab79cdc..0000000
--- a/cluster-mgmt/template/cassandra.yaml
+++ /dev/null
@@ -1,568 +0,0 @@
-# Cassandra storage config YAML 
-
-# NOTE:
-#   See http://wiki.apache.org/cassandra/StorageConfiguration for
-#   full explanations of configuration directives
-# /NOTE
-
-# The name of the cluster. This is mainly used to prevent machines in
-# one logical cluster from joining another.
-cluster_name: 'ONOS Test Cluster'
-
-# You should always specify InitialToken when setting up a production
-# cluster for the first time, and often when adding capacity later.
-# The principle is that each node should be given an equal slice of
-# the token ring; see http://wiki.apache.org/cassandra/Operations
-# for more details.
-#
-# If blank, Cassandra will request a token bisecting the range of
-# the heaviest-loaded existing node.  If there is no load information
-# available, such as is the case with a new cluster, it will pick
-# a random token, which will lead to hot spots.
-initial_token:
-
-# See http://wiki.apache.org/cassandra/HintedHandoff
-hinted_handoff_enabled: true
-# this defines the maximum amount of time a dead host will have hints
-# generated.  After it has been dead this long, hints will be dropped.
-max_hint_window_in_ms: 3600000 # one hour
-# Sleep this long after delivering each hint
-hinted_handoff_throttle_delay_in_ms: 1
-
-# The following setting populates the page cache on memtable flush and compaction
-# WARNING: Enable this setting only when the whole node's data fits in memory.
-# Defaults to: false
-# populate_io_cache_on_flush: false
-
-# authentication backend, implementing IAuthenticator; used to identify users
-authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
-
-# authorization backend, implementing IAuthority; used to limit access/provide permissions
-authority: org.apache.cassandra.auth.AllowAllAuthority
-
-# The partitioner is responsible for distributing rows (by key) across
-# nodes in the cluster.  Any IPartitioner may be used, including your
-# own as long as it is on the classpath.  Out of the box, Cassandra
-# provides org.apache.cassandra.dht.RandomPartitioner
-# org.apache.cassandra.dht.ByteOrderedPartitioner,
-# org.apache.cassandra.dht.OrderPreservingPartitioner (deprecated),
-# and org.apache.cassandra.dht.CollatingOrderPreservingPartitioner
-# (deprecated).
-# 
-# - RandomPartitioner distributes rows across the cluster evenly by md5.
-#   When in doubt, this is the best option.
-# - ByteOrderedPartitioner orders rows lexically by key bytes.  BOP allows
-#   scanning rows in key order, but the ordering can generate hot spots
-#   for sequential insertion workloads.
-# - OrderPreservingPartitioner is an obsolete form of BOP, that stores
-# - keys in a less-efficient format and only works with keys that are
-#   UTF8-encoded Strings.
-# - CollatingOPP colates according to EN,US rules rather than lexical byte
-#   ordering.  Use this as an example if you need custom collation.
-#
-# See http://wiki.apache.org/cassandra/Operations for more on
-# partitioners and token selection.
-partitioner: org.apache.cassandra.dht.RandomPartitioner
-
-# directories where Cassandra should store data on disk.
-data_file_directories:
-    - /var/lib/cassandra/data
-
-# commit log
-commitlog_directory: /var/lib/cassandra/commitlog
-
-# Maximum size of the key cache in memory.
-#
-# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
-# minimum, sometimes more. The key cache is fairly tiny for the amount of
-# time it saves, so it's worthwhile to use it at large numbers.
-# The row cache saves even more time, but must store the whole values of
-# its rows, so it is extremely space-intensive. It's best to only use the
-# row cache if you have hot rows or static rows.
-#
-# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
-#
-# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
-key_cache_size_in_mb:
-
-# Duration in seconds after which Cassandra should
-# safe the keys cache. Caches are saved to saved_caches_directory as
-# specified in this configuration file.
-#
-# Saved caches greatly improve cold-start speeds, and is relatively cheap in
-# terms of I/O for the key cache. Row cache saving is much more expensive and
-# has limited use.
-#
-# Default is 14400 or 4 hours.
-key_cache_save_period: 14400
-
-# Number of keys from the key cache to save
-# Disabled by default, meaning all keys are going to be saved
-# key_cache_keys_to_save: 100
-
-# Maximum size of the row cache in memory.
-# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
-#
-# Default value is 0, to disable row caching.
-row_cache_size_in_mb: 0
-
-# Duration in seconds after which Cassandra should
-# safe the row cache. Caches are saved to saved_caches_directory as specified
-# in this configuration file.
-#
-# Saved caches greatly improve cold-start speeds, and is relatively cheap in
-# terms of I/O for the key cache. Row cache saving is much more expensive and
-# has limited use.
-#
-# Default is 0 to disable saving the row cache.
-row_cache_save_period: 0
-
-# Number of keys from the row cache to save
-# Disabled by default, meaning all keys are going to be saved
-# row_cache_keys_to_save: 100
-
-# The provider for the row cache to use.
-#
-# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider
-#
-# SerializingCacheProvider serialises the contents of the row and stores
-# it in native memory, i.e., off the JVM Heap. Serialized rows take
-# significantly less memory than "live" rows in the JVM, so you can cache
-# more rows in a given memory footprint.  And storing the cache off-heap
-# means you can use smaller heap sizes, reducing the impact of GC pauses.
-#
-# It is also valid to specify the fully-qualified class name to a class
-# that implements org.apache.cassandra.cache.IRowCacheProvider.
-#
-# Defaults to SerializingCacheProvider
-row_cache_provider: SerializingCacheProvider
-
-# saved caches
-saved_caches_directory: /var/lib/cassandra/saved_caches
-
-# commitlog_sync may be either "periodic" or "batch." 
-# When in batch mode, Cassandra won't ack writes until the commit log
-# has been fsynced to disk.  It will wait up to
-# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
-# performing the sync.
-#
-# commitlog_sync: batch
-# commitlog_sync_batch_window_in_ms: 50
-#
-# the other option is "periodic" where writes may be acked immediately
-# and the CommitLog is simply synced every commitlog_sync_period_in_ms
-# milliseconds.
-commitlog_sync: periodic
-commitlog_sync_period_in_ms: 10000
-
-# The size of the individual commitlog file segments.  A commitlog
-# segment may be archived, deleted, or recycled once all the data
-# in it (potentally from each columnfamily in the system) has been 
-# flushed to sstables.  
-#
-# The default size is 32, which is almost always fine, but if you are
-# archiving commitlog segments (see commitlog_archiving.properties),
-# then you probably want a finer granularity of archiving; 8 or 16 MB
-# is reasonable.
-commitlog_segment_size_in_mb: 32
-
-# any class that implements the SeedProvider interface and has a
-# constructor that takes a Map<String, String> of parameters will do.
-seed_provider:
-    # Addresses of hosts that are deemed contact points. 
-    # Cassandra nodes use this list of hosts to find each other and learn
-    # the topology of the ring.  You must change this if you are running
-    # multiple nodes!
-    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
-      parameters:
-          # seeds is actually a comma-delimited list of addresses.
-          # Ex: "<ip1>,<ip2>,<ip3>"
-#          - seeds: "10.0.1.243"
-          - seeds: "__SEED__"
-
-# emergency pressure valve: each time heap usage after a full (CMS)
-# garbage collection is above this fraction of the max, Cassandra will
-# flush the largest memtables.  
-#
-# Set to 1.0 to disable.  Setting this lower than
-# CMSInitiatingOccupancyFraction is not likely to be useful.
-#
-# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY:
-# it is most effective under light to moderate load, or read-heavy
-# workloads; under truly massive write load, it will often be too
-# little, too late.
-flush_largest_memtables_at: 0.75
-
-# emergency pressure valve #2: the first time heap usage after a full
-# (CMS) garbage collection is above this fraction of the max,
-# Cassandra will reduce cache maximum _capacity_ to the given fraction
-# of the current _size_.  Should usually be set substantially above
-# flush_largest_memtables_at, since that will have less long-term
-# impact on the system.  
-# 
-# Set to 1.0 to disable.  Setting this lower than
-# CMSInitiatingOccupancyFraction is not likely to be useful.
-reduce_cache_sizes_at: 0.85
-reduce_cache_capacity_to: 0.6
-
-# For workloads with more data than can fit in memory, Cassandra's
-# bottleneck will be reads that need to fetch data from
-# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
-# order to allow the operations to enqueue low enough in the stack
-# that the OS and drives can reorder them.
-#
-# On the other hand, since writes are almost never IO bound, the ideal
-# number of "concurrent_writes" is dependent on the number of cores in
-# your system; (8 * number_of_cores) is a good rule of thumb.
-concurrent_reads: 16
-concurrent_writes: 16
-
-# Total memory to use for memtables.  Cassandra will flush the largest
-# memtable when this much memory is used.
-# If omitted, Cassandra will set it to 1/3 of the heap.
-# memtable_total_space_in_mb: 2048
-
-# Total space to use for commitlogs.  Since commitlog segments are
-# mmapped, and hence use up address space, the default size is 32
-# on 32-bit JVMs, and 1024 on 64-bit JVMs.
-#
-# If space gets above this value (it will round up to the next nearest
-# segment multiple), Cassandra will flush every dirty CF in the oldest
-# segment and remove it.  So a small total commitlog space will tend
-# to cause more flush activity on less-active columnfamilies.
-# commitlog_total_space_in_mb: 4096
-
-# This sets the amount of memtable flush writer threads.  These will
-# be blocked by disk io, and each one will hold a memtable in memory
-# while blocked. If you have a large heap and many data directories,
-# you can increase this value for better flush performance.
-# By default this will be set to the amount of data directories defined.
-#memtable_flush_writers: 1
-
-# the number of full memtables to allow pending flush, that is,
-# waiting for a writer thread.  At a minimum, this should be set to
-# the maximum number of secondary indexes created on a single CF.
-memtable_flush_queue_size: 4
-
-# Whether to, when doing sequential writing, fsync() at intervals in
-# order to force the operating system to flush the dirty
-# buffers. Enable this to avoid sudden dirty buffer flushing from
-# impacting read latencies. Almost always a good idea on SSD:s; not
-# necessarily on platters.
-trickle_fsync: false
-trickle_fsync_interval_in_kb: 10240
-
-# TCP port, for commands and data
-storage_port: 7000
-
-# SSL port, for encrypted communication.  Unused unless enabled in
-# encryption_options
-ssl_storage_port: 7001
-
-# Address to bind to and tell other Cassandra nodes to connect to. You
-# _must_ change this if you want multiple nodes to be able to
-# communicate!
-# 
-# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
-# will always do the Right Thing *if* the node is properly configured
-# (hostname, name resolution, etc), and the Right Thing is to use the
-# address associated with the hostname (it might not be).
-#
-# Setting this to 0.0.0.0 is always wrong.
-listen_address:
-
-# Address to broadcast to other Cassandra nodes
-# Leaving this blank will set it to the same value as listen_address
-# broadcast_address: 1.2.3.4
-
-# The address to bind the Thrift RPC service to -- clients connect
-# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
-# you want Thrift to listen on all interfaces.
-# 
-# Leaving this blank has the same effect it does for ListenAddress,
-# (i.e. it will be based on the configured hostname of the node).
-rpc_address: 0.0.0.0
-# port for Thrift to listen for clients on
-rpc_port: 9160
-
-# enable or disable keepalive on rpc connections
-rpc_keepalive: true
-
-# Cassandra provides three options for the RPC Server:
-#
-# sync  -> One connection per thread in the rpc pool (see below).
-#          For a very large number of clients, memory will be your limiting
-#          factor; on a 64 bit JVM, 128KB is the minimum stack size per thread.
-#          Connection pooling is very, very strongly recommended.
-#
-# async -> Nonblocking server implementation with one thread to serve 
-#          rpc connections.  This is not recommended for high throughput use
-#          cases. Async has been tested to be about 50% slower than sync
-#          or hsha and is deprecated: it will be removed in the next major release.
-#
-# hsha  -> Stands for "half synchronous, half asynchronous." The rpc thread pool 
-#          (see below) is used to manage requests, but the threads are multiplexed
-#          across the different clients.
-#
-# The default is sync because on Windows hsha is about 30% slower.  On Linux,
-# sync/hsha performance is about the same, with hsha of course using less memory.
-rpc_server_type: sync
-
-# Uncomment rpc_min|max|thread to set request pool size.
-# You would primarily set max for the sync server to safeguard against
-# misbehaved clients; if you do hit the max, Cassandra will block until one
-# disconnects before accepting more.  The defaults for sync are min of 16 and max
-# unlimited.
-# 
-# For the Hsha server, the min and max both default to quadruple the number of
-# CPU cores.
-#
-# This configuration is ignored by the async server.
-#
-# rpc_min_threads: 16
-# rpc_max_threads: 2048
-
-# uncomment to set socket buffer sizes on rpc connections
-# rpc_send_buff_size_in_bytes:
-# rpc_recv_buff_size_in_bytes:
-
-# Frame size for thrift (maximum field length).
-# 0 disables TFramedTransport in favor of TSocket. This option
-# is deprecated; we strongly recommend using Framed mode.
-thrift_framed_transport_size_in_mb: 15
-
-# The max length of a thrift message, including all fields and
-# internal thrift overhead.
-thrift_max_message_length_in_mb: 16
-
-# Set to true to have Cassandra create a hard link to each sstable
-# flushed or streamed locally in a backups/ subdirectory of the
-# Keyspace data.  Removing these links is the operator's
-# responsibility.
-incremental_backups: false
-
-# Whether or not to take a snapshot before each compaction.  Be
-# careful using this option, since Cassandra won't clean up the
-# snapshots for you.  Mostly useful if you're paranoid when there
-# is a data format change.
-snapshot_before_compaction: false
-
-# Whether or not a snapshot is taken of the data before keyspace truncation
-# or dropping of column families. The STRONGLY advised default of true 
-# should be used to provide data safety. If you set this flag to false, you will
-# lose data on truncation or drop.
-auto_snapshot: true
-
-# Add column indexes to a row after its contents reach this size.
-# Increase if your column values are large, or if you have a very large
-# number of columns.  The competing causes are, Cassandra has to
-# deserialize this much of the row to read a single column, so you want
-# it to be small - at least if you do many partial-row reads - but all
-# the index data is read for each access, so you don't want to generate
-# that wastefully either.
-column_index_size_in_kb: 64
-
-# Size limit for rows being compacted in memory.  Larger rows will spill
-# over to disk and use a slower two-pass compaction process.  A message
-# will be logged specifying the row key.
-in_memory_compaction_limit_in_mb: 64
-
-# Number of simultaneous compactions to allow, NOT including
-# validation "compactions" for anti-entropy repair.  Simultaneous
-# compactions can help preserve read performance in a mixed read/write
-# workload, by mitigating the tendency of small sstables to accumulate
-# during a single long running compactions. The default is usually
-# fine and if you experience problems with compaction running too
-# slowly or too fast, you should look at
-# compaction_throughput_mb_per_sec first.
-#
-# This setting has no effect on LeveledCompactionStrategy.
-#
-# concurrent_compactors defaults to the number of cores.
-# Uncomment to make compaction mono-threaded, the pre-0.8 default.
-#concurrent_compactors: 1
-
-# Multi-threaded compaction. When enabled, each compaction will use
-# up to one thread per core, plus one thread per sstable being merged.
-# This is usually only useful for SSD-based hardware: otherwise, 
-# your concern is usually to get compaction to do LESS i/o (see:
-# compaction_throughput_mb_per_sec), not more.
-multithreaded_compaction: false
-
-# Throttles compaction to the given total throughput across the entire
-# system. The faster you insert data, the faster you need to compact in
-# order to keep the sstable count down, but in general, setting this to
-# 16 to 32 times the rate you are inserting data is more than sufficient.
-# Setting this to 0 disables throttling. Note that this account for all types
-# of compaction, including validation compaction.
-compaction_throughput_mb_per_sec: 16
-
-# Track cached row keys during compaction, and re-cache their new
-# positions in the compacted sstable.  Disable if you use really large
-# key caches.
-compaction_preheat_key_cache: true
-
-# Throttles all outbound streaming file transfers on this node to the
-# given total throughput in Mbps. This is necessary because Cassandra does
-# mostly sequential IO when streaming data during bootstrap or repair, which
-# can lead to saturating the network connection and degrading rpc performance.
-# When unset, the default is 400 Mbps or 50 MB/s.
-# stream_throughput_outbound_megabits_per_sec: 400
-
-# Time to wait for a reply from other nodes before failing the command 
-rpc_timeout_in_ms: 10000
-
-# Enable socket timeout for streaming operation.
-# When a timeout occurs during streaming, streaming is retried from the start
-# of the current file. This *can* involve re-streaming an important amount of
-# data, so you should avoid setting the value too low.
-# Default value is 0, which never timeout streams.
-# streaming_socket_timeout_in_ms: 0
-
-# phi value that must be reached for a host to be marked down.
-# most users should never need to adjust this.
-# phi_convict_threshold: 8
-
-# endpoint_snitch -- Set this to a class that implements
-# IEndpointSnitch.  The snitch has two functions:
-# - it teaches Cassandra enough about your network topology to route
-#   requests efficiently
-# - it allows Cassandra to spread replicas around your cluster to avoid
-#   correlated failures. It does this by grouping machines into
-#   "datacenters" and "racks."  Cassandra will do its best not to have
-#   more than one replica on the same "rack" (which may not actually
-#   be a physical location)
-#
-# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
-# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
-# ARE PLACED.
-#
-# Out of the box, Cassandra provides
-#  - SimpleSnitch:
-#    Treats Strategy order as proximity. This improves cache locality
-#    when disabling read repair, which can further improve throughput.
-#    Only appropriate for single-datacenter deployments.
-#  - PropertyFileSnitch:
-#    Proximity is determined by rack and data center, which are
-#    explicitly configured in cassandra-topology.properties.
-#  - GossipingPropertyFileSnitch
-#    The rack and datacenter for the local node are defined in
-#    cassandra-rackdc.properties and propagated to other nodes via gossip.  If
-#    cassandra-topology.properties exists, it is used as a fallback, allowing
-#    migration from the PropertyFileSnitch.
-#  - RackInferringSnitch:
-#    Proximity is determined by rack and data center, which are
-#    assumed to correspond to the 3rd and 2nd octet of each node's
-#    IP address, respectively.  Unless this happens to match your
-#    deployment conventions (as it did Facebook's), this is best used
-#    as an example of writing a custom Snitch class.
-#  - Ec2Snitch:
-#    Appropriate for EC2 deployments in a single Region.  Loads Region
-#    and Availability Zone information from the EC2 API. The Region is
-#    treated as the Datacenter, and the Availability Zone as the rack.
-#    Only private IPs are used, so this will not work across multiple
-#    Regions.
-#  - Ec2MultiRegionSnitch:
-#    Uses public IPs as broadcast_address to allow cross-region
-#    connectivity.  (Thus, you should set seed addresses to the public
-#    IP as well.) You will need to open the storage_port or
-#    ssl_storage_port on the public IP firewall.  (For intra-Region
-#    traffic, Cassandra will switch to the private IP after
-#    establishing a connection.)
-#
-# You can use a custom Snitch by setting this to the full class name
-# of the snitch, which will be assumed to be on your classpath.
-endpoint_snitch: SimpleSnitch
-
-# controls how often to perform the more expensive part of host score
-# calculation
-dynamic_snitch_update_interval_in_ms: 100 
-# controls how often to reset all host scores, allowing a bad host to
-# possibly recover
-dynamic_snitch_reset_interval_in_ms: 600000
-# if set greater than zero and read_repair_chance is < 1.0, this will allow
-# 'pinning' of replicas to hosts in order to increase cache capacity.
-# The badness threshold will control how much worse the pinned host has to be
-# before the dynamic snitch will prefer other replicas over it.  This is
-# expressed as a double which represents a percentage.  Thus, a value of
-# 0.2 means Cassandra would continue to prefer the static snitch values
-# until the pinned host was 20% worse than the fastest.
-dynamic_snitch_badness_threshold: 0.1
-
-# request_scheduler -- Set this to a class that implements
-# RequestScheduler, which will schedule incoming client requests
-# according to the specific policy. This is useful for multi-tenancy
-# with a single Cassandra cluster.
-# NOTE: This is specifically for requests from the client and does
-# not affect inter node communication.
-# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
-# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
-# client requests to a node with a separate queue for each
-# request_scheduler_id. The scheduler is further customized by
-# request_scheduler_options as described below.
-request_scheduler: org.apache.cassandra.scheduler.NoScheduler
-
-# Scheduler Options vary based on the type of scheduler
-# NoScheduler - Has no options
-# RoundRobin
-#  - throttle_limit -- The throttle_limit is the number of in-flight
-#                      requests per client.  Requests beyond 
-#                      that limit are queued up until
-#                      running requests can complete.
-#                      The value of 80 here is twice the number of
-#                      concurrent_reads + concurrent_writes.
-#  - default_weight -- default_weight is optional and allows for
-#                      overriding the default which is 1.
-#  - weights -- Weights are optional and will default to 1 or the
-#               overridden default_weight. The weight translates into how
-#               many requests are handled during each turn of the
-#               RoundRobin, based on the scheduler id.
-#
-# request_scheduler_options:
-#    throttle_limit: 80
-#    default_weight: 5
-#    weights:
-#      Keyspace1: 1
-#      Keyspace2: 5
-
-# request_scheduler_id -- An identifer based on which to perform
-# the request scheduling. Currently the only valid option is keyspace.
-# request_scheduler_id: keyspace
-
-# index_interval controls the sampling of entries from the primrary
-# row index in terms of space versus time.  The larger the interval,
-# the smaller and less effective the sampling will be.  In technicial
-# terms, the interval coresponds to the number of index entries that
-# are skipped between taking each sample.  All the sampled entries
-# must fit in memory.  Generally, a value between 128 and 512 here
-# coupled with a large key cache size on CFs results in the best trade
-# offs.  This value is not often changed, however if you have many
-# very small rows (many to an OS page), then increasing this will
-# often lower memory usage without a impact on performance.
-index_interval: 128
-
-# Enable or disable inter-node encryption
-# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
-# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
-# suite for authentication, key exchange and encryption of the actual data transfers.
-# NOTE: No custom encryption options are enabled at the moment
-# The available internode options are : all, none, dc, rack
-#
-# If set to dc cassandra will encrypt the traffic between the DCs
-# If set to rack cassandra will encrypt the traffic between the racks
-#
-# The passwords used in these options must match the passwords used when generating
-# the keystore and truststore.  For instructions on generating these files, see:
-# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
-#
-encryption_options:
-    internode_encryption: none
-    keystore: conf/.keystore
-    keystore_password: cassandra
-    truststore: conf/.truststore
-    truststore_password: cassandra
-    # More advanced defaults below:
-    # protocol: TLS
-    # algorithm: SunX509
-    # store_type: JKS
-    # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
diff --git a/cluster-mgmt/template/hosts b/cluster-mgmt/template/hosts
deleted file mode 100644
index 220b30f..0000000
--- a/cluster-mgmt/template/hosts
+++ /dev/null
@@ -1,11 +0,0 @@
-127.0.0.1 localhost
-
-# The following lines are desirable for IPv6 capable hosts
-::1 ip6-localhost ip6-loopback
-fe00::0 ip6-localnet
-ff00::0 ip6-mcastprefix
-ff02::1 ip6-allnodes
-ff02::2 ip6-allrouters
-ff02::3 ip6-allhosts
-
-## For ONOS Development
diff --git a/cluster-mgmt/template/onsdemo_core.py b/cluster-mgmt/template/onsdemo_core.py
deleted file mode 100755
index a0e184e..0000000
--- a/cluster-mgmt/template/onsdemo_core.py
+++ /dev/null
@@ -1,157 +0,0 @@
-#!/usr/bin/python
-
-"""
-Start up a Simple topology
-"""
-from mininet.net import Mininet
-from mininet.node import Controller, RemoteController
-from mininet.log import setLogLevel, info, error, warn, debug
-from mininet.cli import CLI
-from mininet.topo import Topo
-from mininet.util import quietRun
-from mininet.moduledeps import pathCheck
-from mininet.link import Link, TCLink
-
-from sys import exit
-import os.path
-from subprocess import Popen, STDOUT, PIPE
-
-import sys
-
-
-#import argparse
-
-class MyController( Controller ):
-    def __init__( self, name, ip='127.0.0.1', port=6633, **kwargs):
-        """Init.
-           name: name to give controller
-           ip: the IP address where the remote controller is
-           listening
-           port: the port where the remote controller is listening"""
-        Controller.__init__( self, name, ip=ip, port=port, **kwargs )
-
-    def start( self ):
-        "Overridden to do nothing."
-        return
-
-    def stop( self ):
-        "Overridden to do nothing."
-        return
-
-    def checkListening( self ):
-        "Warn if remote controller is not accessible"
-        listening = self.cmd( "echo A | telnet -e A %s %d" %
-                              ( self.ip, self.port ) )
-        if 'Unable' in listening:
-            warn( "Unable to contact the remote controller"
-                  " at %s:%d\n" % ( self.ip, self.port ) )
-
-class SDNTopo( Topo ):
-    "SDN Topology"
-
-    def __init__( self, *args, **kwargs ):
-        Topo.__init__( self, *args, **kwargs )
-        sw1 = self.addSwitch('sw1', dpid='0000000000000101')
-        sw2 = self.addSwitch('sw2', dpid='0000000000000102')
-        sw3 = self.addSwitch('sw3', dpid='0000000000000103')
-        sw4 = self.addSwitch('sw4', dpid='0000000000000104')
-        sw5 = self.addSwitch('sw5', dpid='0000000000000105')
-        sw6 = self.addSwitch('sw6', dpid='0000000000000106')
-
-        host1 = self.addHost( 'host1' )
-        host2 = self.addHost( 'host2' )
-        host3 = self.addHost( 'host3' )
-        host4 = self.addHost( 'host4' )
-        host5 = self.addHost( 'host5' )
-        host6 = self.addHost( 'host6' )
-
-        self.addLink( host1, sw1 )
-        self.addLink( host2, sw2 )
-        self.addLink( host3, sw3 )
-        self.addLink( host4, sw4 )
-        self.addLink( host5, sw5 )
-        self.addLink( host6, sw6 )
-
-        self.addLink( sw1, sw2 )
-        self.addLink( sw1, sw6 )
-        self.addLink( sw2, sw3 )
-        self.addLink( sw3, sw4 )
-        self.addLink( sw3, sw6 )
-        self.addLink( sw4, sw5 )
-        self.addLink( sw5, sw6 )
-        self.addLink( sw4, sw6 )
-
-def startsshd( host ):
-    "Start sshd on host"
-    info( '*** Starting sshd\n' )
-    name, intf, ip = host.name, host.defaultIntf(), host.IP()
-    banner = '/tmp/%s.banner' % name
-    host.cmd( 'echo "Welcome to %s at %s" >  %s' % ( name, ip, banner ) )
-    host.cmd( '/usr/sbin/sshd -o "Banner %s"' % banner, '-o "UseDNS no"' )
-    info( '***', host.name, 'is running sshd on', intf, 'at', ip, '\n' )
-
-def startsshds ( hosts ):
-    for h in hosts:
-        startsshd( h )
-
-def stopsshd( ):
-    "Stop *all* sshd processes with a custom banner"
-    info( '*** Shutting down stale sshd/Banner processes ',
-          quietRun( "pkill -9 -f Banner" ), '\n' )
-
-def sdnnet(opt):
-#    os.system('/home/ubuntu/openflow/controller/controller ptcp: &')
-#    os.system('/home/ubuntu/openflow/controller/controller ptcp:7000 &')
-
-    topo = SDNTopo()
-    info( '*** Creating network\n' )
-#    net = Mininet( topo=topo, controller=RemoteController )
-    net = Mininet( topo=topo, controller=MyController, link=TCLink)
-#    dc = DebugController('c3', ip='127.0.0.1', port=7000)
-#    net.addController(dc)
-#    net.addController(controller=RemoteController)
-
-    host1, host2, host3, host4, host5, host6 = net.get( 'host1', 'host2', 'host3', 'host4', 'host5', 'host6')
-
-    ## Adding 2nd, 3rd and 4th interface to host1 connected to sw1 (for another BGP peering)
-    sw1 = net.get('sw1')
-    sw2 = net.get('sw2')
-    sw3 = net.get('sw3')
-    sw4 = net.get('sw4')
-    sw5 = net.get('sw5')
-    sw6 = net.get('sw6')
-
-    net.start()
-
-    sw2.attach('tap01_2')
-    sw3.attach('tap01_3')
-    sw4.attach('tap01_4')
-    sw4.attach('tap01_5')
-    sw5.attach('tap01_6')
-    sw6.attach('tap01_7')
-    sw1.attach('tap01_8')
-
-    host1.defaultIntf().setIP('192.168.100.141/16') 
-    host2.defaultIntf().setIP('192.168.100.142/16')
-    host3.defaultIntf().setIP('192.168.100.143/16')
-    host4.defaultIntf().setIP('192.168.100.144/16')
-    host5.defaultIntf().setIP('192.168.100.145/16')
-    host6.defaultIntf().setIP('192.168.100.146/16')
-
-    hosts = [ host1, host2, host3, host4, host5, host6 ]
-    stopsshd ()
-    startsshds ( hosts )
-
-    if opt=="cli":
-        CLI(net)
-        stopsshd()
-        net.stop()
-
-if __name__ == '__main__':
-    setLogLevel( 'info' )
-    if len(sys.argv) == 1:
-      sdnnet("cli")
-    elif len(sys.argv) == 2 and sys.argv[1] == "-n":
-      sdnnet("nocli")
-    else:
-      print "%s [-n]" % sys.argv[0]
diff --git a/cluster-mgmt/template/onsdemo_core.py.devA b/cluster-mgmt/template/onsdemo_core.py.devA
deleted file mode 100755
index ad74e4b..0000000
--- a/cluster-mgmt/template/onsdemo_core.py.devA
+++ /dev/null
@@ -1,158 +0,0 @@
-#!/usr/bin/python
-
-"""
-Start up a Simple topology
-"""
-from mininet.net import Mininet
-from mininet.node import Controller, RemoteController
-from mininet.log import setLogLevel, info, error, warn, debug
-from mininet.cli import CLI
-from mininet.topo import Topo
-from mininet.util import quietRun
-from mininet.moduledeps import pathCheck
-from mininet.link import Link, TCLink
-
-from sys import exit
-import os.path
-from subprocess import Popen, STDOUT, PIPE
-
-import sys
-
-
-#import argparse
-
-class MyController( Controller ):
-    def __init__( self, name, ip='127.0.0.1', port=6633, **kwargs):
-        """Init.
-           name: name to give controller
-           ip: the IP address where the remote controller is
-           listening
-           port: the port where the remote controller is listening"""
-        Controller.__init__( self, name, ip=ip, port=port, **kwargs )
-
-    def start( self ):
-        "Overridden to do nothing."
-        return
-
-    def stop( self ):
-        "Overridden to do nothing."
-        return
-
-    def checkListening( self ):
-        "Warn if remote controller is not accessible"
-        listening = self.cmd( "echo A | telnet -e A %s %d" %
-                              ( self.ip, self.port ) )
-        if 'Unable' in listening:
-            warn( "Unable to contact the remote controller"
-                  " at %s:%d\n" % ( self.ip, self.port ) )
-
-class SDNTopo( Topo ):
-    "SDN Topology"
-
-    def __init__( self, *args, **kwargs ):
-        Topo.__init__( self, *args, **kwargs )
-
-        sw5 = self.addSwitch('sw5', dpid='00000000ba5eba13')
-        sw2 = self.addSwitch('sw2', dpid='00000000ba5eba11')
-        sw3 = self.addSwitch('sw3', dpid='00000008a208f901')
-        sw4 = self.addSwitch('sw4', dpid='000000000000ba12')
-        sw6 = self.addSwitch('sw6', dpid='0000204e7f518a35')
-        sw1 = self.addSwitch('sw1', dpid='0000001697089a46')
-
-        host1 = self.addHost( 'host1' )
-        host2 = self.addHost( 'host2' )
-        host3 = self.addHost( 'host3' )
-        host4 = self.addHost( 'host4' )
-        host5 = self.addHost( 'host5' )
-        host6 = self.addHost( 'host6' )
-
-        self.addLink( host1, sw1 )
-        self.addLink( host2, sw2 )
-        self.addLink( host3, sw3 )
-        self.addLink( host4, sw4 )
-        self.addLink( host5, sw5 )
-        self.addLink( host6, sw6 )
-
-        self.addLink( sw1, sw2 )
-        self.addLink( sw1, sw6 )
-        self.addLink( sw2, sw3 )
-        self.addLink( sw3, sw4 )
-        self.addLink( sw3, sw6 )
-        self.addLink( sw4, sw5 )
-        self.addLink( sw5, sw6 )
-        self.addLink( sw4, sw6 )
-
-def startsshd( host ):
-    "Start sshd on host"
-    info( '*** Starting sshd\n' )
-    name, intf, ip = host.name, host.defaultIntf(), host.IP()
-    banner = '/tmp/%s.banner' % name
-    host.cmd( 'echo "Welcome to %s at %s" >  %s' % ( name, ip, banner ) )
-    host.cmd( '/usr/sbin/sshd -o "Banner %s"' % banner, '-o "UseDNS no"' )
-    info( '***', host.name, 'is running sshd on', intf, 'at', ip, '\n' )
-
-def startsshds ( hosts ):
-    for h in hosts:
-        startsshd( h )
-
-def stopsshd( ):
-    "Stop *all* sshd processes with a custom banner"
-    info( '*** Shutting down stale sshd/Banner processes ',
-          quietRun( "pkill -9 -f Banner" ), '\n' )
-
-def sdnnet(opt):
-#    os.system('/home/ubuntu/openflow/controller/controller ptcp: &')
-#    os.system('/home/ubuntu/openflow/controller/controller ptcp:7000 &')
-
-    topo = SDNTopo()
-    info( '*** Creating network\n' )
-#    net = Mininet( topo=topo, controller=RemoteController )
-    net = Mininet( topo=topo, controller=MyController, link=TCLink)
-#    dc = DebugController('c3', ip='127.0.0.1', port=7000)
-#    net.addController(dc)
-#    net.addController(controller=RemoteController)
-
-    host1, host2, host3, host4, host5, host6 = net.get( 'host1', 'host2', 'host3', 'host4', 'host5', 'host6')
-
-    ## Adding 2nd, 3rd and 4th interface to host1 connected to sw1 (for another BGP peering)
-    sw1 = net.get('sw1')
-    sw2 = net.get('sw2')
-    sw3 = net.get('sw3')
-    sw4 = net.get('sw4')
-    sw5 = net.get('sw5')
-    sw6 = net.get('sw6')
-
-    net.start()
-
-    sw2.attach('tap01_2')
-    sw3.attach('tap01_3')
-    sw4.attach('tap01_4')
-    sw4.attach('tap01_5')
-    sw5.attach('tap01_6')
-    sw6.attach('tap01_7')
-    sw1.attach('tap01_8')
-
-    host1.defaultIntf().setIP('192.168.100.141/16') 
-    host2.defaultIntf().setIP('192.168.100.142/16')
-    host3.defaultIntf().setIP('192.168.100.143/16')
-    host4.defaultIntf().setIP('192.168.100.144/16')
-    host5.defaultIntf().setIP('192.168.100.145/16')
-    host6.defaultIntf().setIP('192.168.100.146/16')
-
-    hosts = [ host1, host2, host3, host4, host5, host6 ]
-    stopsshd ()
-    startsshds ( hosts )
-
-    if opt=="cli":
-        CLI(net)
-        stopsshd()
-        net.stop()
-
-if __name__ == '__main__':
-    setLogLevel( 'info' )
-    if len(sys.argv) == 1:
-      sdnnet("cli")
-    elif len(sys.argv) == 2 and sys.argv[1] == "-n":
-      sdnnet("nocli")
-    else:
-      print "%s [-n]" % sys.argv[0]
diff --git a/cluster-mgmt/template/onsdemo_edge_template.py b/cluster-mgmt/template/onsdemo_edge_template.py
deleted file mode 100755
index cc40a5a..0000000
--- a/cluster-mgmt/template/onsdemo_edge_template.py
+++ /dev/null
@@ -1,157 +0,0 @@
-#!/usr/bin/python
-NWID=__NWID__
-NR_NODES=__NRSW__
-Controllers=[{"ip":"127.0.0.1", "port":6633}]
-
-"""
-Start up a Simple topology
-"""
-from mininet.net import Mininet
-from mininet.node import Controller, RemoteController
-from mininet.log import setLogLevel, info, error, warn, debug
-from mininet.cli import CLI
-from mininet.topo import Topo
-from mininet.util import quietRun
-from mininet.moduledeps import pathCheck
-from mininet.link import Link, TCLink
-
-from sys import exit
-import os.path
-from subprocess import Popen, STDOUT, PIPE
-
-import sys
-
-import argparse
-
-class MyController( Controller ):
-    def __init__( self, name, ip='127.0.0.1', port=6633, **kwargs):
-        """Init.
-           name: name to give controller
-           ip: the IP address where the remote controller is
-           listening
-           port: the port where the remote controller is listening"""
-        Controller.__init__( self, name, ip=ip, port=port, **kwargs )
-
-    def start( self ):
-        "Overridden to do nothing."
-        return
-
-    def stop( self ):
-        "Overridden to do nothing."
-        return
-
-    def checkListening( self ):
-        "Warn if remote controller is not accessible"
-        listening = self.cmd( "echo A | telnet -e A %s %d" %
-                              ( self.ip, self.port ) )
-        if 'Unable' in listening:
-            warn( "Unable to contact the remote controller"
-                  " at %s:%d\n" % ( self.ip, self.port ) )
-
-class SDNTopo( Topo ):
-    "SDN Topology"
-
-    def __init__( self, *args, **kwargs ):
-        Topo.__init__( self, *args, **kwargs )
-
-        switch = []
-        host = []
-
-        for i in range (NR_NODES):
-            name_suffix = '%02d' % NWID + "." + '%02d' % (int(i)+1)
-            dpid_suffix = '%02x' % NWID + '%02x' % (int(i)+1)
-            dpid = '0000' + '0000' + '0000' + dpid_suffix
-            sw = self.addSwitch('sw'+name_suffix, dpid=dpid)
-            switch.append(sw)
-
-        for i in range (NR_NODES):
-            host.append(self.addHost( 'host%d.%d' % (NWID, int(i)+1) ))
-
-        for i in range (NR_NODES):
-            self.addLink(host[i], switch[i])
-
-        for i in range (1, NR_NODES):
-            self.addLink(switch[0], switch[i])
-
-def startsshd( host ):
-    "Start sshd on host"
-    info( '*** Starting sshd\n' )
-    name, intf, ip = host.name, host.defaultIntf(), host.IP()
-    banner = '/tmp/%s.banner' % name
-    host.cmd( 'echo "Welcome to %s at %s" >  %s' % ( name, ip, banner ) )
-    host.cmd( '/usr/sbin/sshd -o "Banner %s"' % banner, '-o "UseDNS no"' )
-    info( '***', host.name, 'is running sshd on', intf, 'at', ip, '\n' )
-
-def startsshds ( hosts ):
-    for h in hosts:
-        startsshd( h )
-
-def startiperf( host ):
-    host.cmd( '/usr/bin/iperf', '-s &' )
-
-def startiperfs ( hosts ):
-    for h in hosts:
-        startiperf( h )
-
-def stopiperf( ):
-    quietRun( "pkill -9 iperf" )
-
-def stopsshd( ):
-    "Stop *all* sshd processes with a custom banner"
-    info( '*** Shutting down stale sshd/Banner processes ',
-          quietRun( "pkill -9 -f Banner" ), '\n' )
-
-def sdnnet(nocli, noarp):
-    topo = SDNTopo()
-    info( '*** Creating network\n' )
-    net = Mininet( topo=topo, controller=MyController, link=TCLink)
-    #net = Mininet( topo=topo, link=TCLink, build=False)
-    #controllers=[]
-    #for c in Controllers:
-    #  rc = RemoteController('c%d' % Controllers.index(c), ip=c['ip'],port=c['port'])
-    #  print "controller ip %s port %s" % (c['ip'], c['port'])
-    #  controllers.append(rc)
-
-    #net.controllers=controllers
-    #net.build()
-
-    host = []
-    for i in range (NR_NODES):
-      host.append(net.get( 'host%d.%d' % (NWID, (int(i)+1)) ))
-
-    net.start()
-
-    sw=net.get('sw%02x.%02x' % (NWID,1))
-    print "center sw", sw
-    sw.attach('tap%02x_1' % NWID)
-
-    for i in range (NR_NODES):
-        host[i].defaultIntf().setIP('192.168.%d.%d/16' % (NWID,(int(i)+1))) 
-        host[i].defaultIntf().setMAC('00:00:%02x:%02x:%02x:%02x' % (192,168,NWID,(int(i)+1))) 
-
-
-    if noarp == False:
-      for i in range (NR_NODES):
-        for n in range (2,9):
-          for h in range (25):
-            host[i].setARP('192.168.%d.%d' % (n, (int(h)+1)), '00:00:%02x:%02x:%02x:%02x' % (192,168,n,(int(h)+1))) 
-
-    stopsshd ()
-#    stopiperf ()
-    startsshds ( host )
-#    startiperfs ( host )
-
-    if nocli == False:
-        CLI(net)
-        stopsshd()
-        net.stop()
-
-if __name__ == '__main__':
-    setLogLevel( 'info' )
-    parser = argparse.ArgumentParser(description='mininet script')
-    parser.add_argument('-x', dest='noarp', action='store_true',
-                     help='do not crete staric arp entries')
-    parser.add_argument('-n', dest='nocli', action='store_true',
-                     help='do not run cli')
-    args = parser.parse_args()
-    sdnnet(args.nocli, args.noarp)
diff --git a/cluster-mgmt/template/tunnel_onsdemo_core_template.sh b/cluster-mgmt/template/tunnel_onsdemo_core_template.sh
deleted file mode 100755
index d697c6c..0000000
--- a/cluster-mgmt/template/tunnel_onsdemo_core_template.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-
-TUNNEL=( "2 2 __basename__2" "3 3 __basename__3" "4 4 __basename__4" "5 5 __basename__5" "6 6 __basename__6" "7 7 __basename__7" "8 8 __basename__8")
-NW_ID=01
-
-start () {
-  ## Modify ##
-  ulimit -c
-  for (( i = 0; i< ${#TUNNEL[@]}; i ++)); do
-    t=`echo ${TUNNEL[$i]}`
-    ifnr=`echo $t | awk '{print $1}'`
-    tun_tag=`echo $t | awk '{print $2}'`
-    tun_end_=`echo $t | awk '{print $3}'`
-    tun_end=`eval echo '$'$tun_end_`
-    ifconfig tap${NW_ID}_${ifnr}
-    echo "ifconfig tap${NW_ID}_${ifnr}"
-    if [ $? -ne 0 ]; then
-      echo "creating tap${NW_ID}_${ifnr}"
-      sudo tunctl -t tap${NW_ID}_${ifnr} 
-    fi
-    echo "./capsulator -v -d -t eth0 -f ${tun_end} -vb tap${NW_ID}_${ifnr}#${tun_tag}"
-    sudo ifconfig tap${NW_ID}_${ifnr} 0.0.0.0 up > /dev/null 2>&1
-    sudo ./capsulator -v -d -t eth0 -f ${tun_end} -vb tap${NW_ID}_${ifnr}#${tun_tag}  > /dev/null 2>&1 &
-  done
-}
-
-stop () {
-  sudo pkill capsulator
-}
-
-case "$1" in
-  start | restart)
-    stop
-    start
-    ;;
-  stop)
-    stop
-    ;;
-  status)
-    nr=`pgrep capsulator | wc -l`
-    if [ $nr -gt 0 ]; then
-      echo "$nr tunnel(s) is running"
-    else
-      echo "tunnel is not running"
-    fi
-    ;;
-  *)
-    echo "Usage: $0 {start|stop|restart|status}" >&2
-    exit 1
-    ;;
-esac
diff --git a/cluster-mgmt/template/tunnel_onsdemo_edge_template.sh b/cluster-mgmt/template/tunnel_onsdemo_edge_template.sh
deleted file mode 100755
index bbd7274..0000000
--- a/cluster-mgmt/template/tunnel_onsdemo_edge_template.sh
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/bin/bash
-
-#TUNNEL=("0 1 VIP_ONOS10" "1 2 VIP_ONOS10") 
-__TUNNEL__
-NW_ID=0__NWID__
-
-start () {
-  ## Modify ##
-  ulimit -c
-  for (( i = 0; i< ${#TUNNEL[@]}; i ++)); do
-    t=`echo ${TUNNEL[$i]}`
-    ifnr=`echo $t | awk '{print $1}'`
-    tun_tag=`echo $t | awk '{print $2}'`
-    tun_end_=`echo $t | awk '{print $3}'`
-    tun_end=`eval echo '$'$tun_end_`
-    ifconfig tap${NW_ID}_${ifnr}
-    echo "ifconfig tap${NW_ID}_${ifnr}"
-    if [ $? -ne 0 ]; then
-      echo "creating tap${NW_ID}_${ifnr}"
-      sudo tunctl -t tap${NW_ID}_${ifnr} 
-    fi
-    echo "./capsulator -v -d -t eth0 -f ${tun_end} -vb tap${NW_ID}_${ifnr}#${tun_tag}"
-    sudo ifconfig tap${NW_ID}_${ifnr} 0.0.0.0 up > /dev/null 2>&1
-    sudo ./capsulator -v -d -t eth0 -f ${tun_end} -vb tap${NW_ID}_${ifnr}#${tun_tag}  > /dev/null 2>&1 &
-  done
-}
-
-stop () {
-  sudo pkill capsulator
-}
-
-case "$1" in
-  start | restart)
-    stop
-    start
-    ;;
-  stop)
-    stop
-    ;;
-  status)
-    nr=`pgrep capsulator | wc -l`
-    if [ $nr -gt 0 ]; then
-      echo "$nr tunnel(s) is running"
-    else
-      echo "tunnel is not running"
-    fi
-    ;;
-  *)
-    echo "Usage: $0 {start|stop|restart|status}" >&2
-    exit 1
-    ;;
-esac
diff --git a/cluster-mgmt/template/zoo.cfg b/cluster-mgmt/template/zoo.cfg
deleted file mode 100644
index e1ab8c3..0000000
--- a/cluster-mgmt/template/zoo.cfg
+++ /dev/null
@@ -1,41 +0,0 @@
-# The number of milliseconds of each tick
-tickTime=2000
-# The number of ticks that the initial 
-# synchronization phase can take
-initLimit=10
-# The number of ticks that can pass between 
-# sending a request and getting an acknowledgement
-syncLimit=5
-# the directory where the snapshot is stored.
-# do not use /tmp for storage, /tmp here is just 
-# example sakes.
-dataDir=/var/lib/zookeeper
-# the port at which the clients will connect
-clientPort=2181
-#
-# specify all servers in the Zookeeper ensemble
-
-#server.1=onosgui1:2888:3888
-#server.2=onosgui2:2888:3888
-#server.3=onosgui3:2888:3888
-#server.4=onosgui4:2888:3888
-#server.5=onosgui5:2888:3888
-#server.6=onosgui6:2888:3888
-#server.7=onosgui7:2888:3888
-#server.8=onosgui8:2888:3888
-#
-#
-# Be sure to read the maintenance section of the 
-# administrator guide before turning on autopurge.
-#
-#
-# Be sure to read the maintenance section of the 
-# administrator guide before turning on autopurge.
-#
-# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
-#
-# The number of snapshots to retain in dataDir
-#autopurge.snapRetainCount=3
-# Purge task interval in hours
-# Set to "0" to disable auto purge feature
-#autopurge.purgeInterval=1