add cluster managemnt scripts
diff --git a/cluster-mgmt/README.txt b/cluster-mgmt/README.txt
new file mode 100644
index 0000000..974e3d2
--- /dev/null
+++ b/cluster-mgmt/README.txt
@@ -0,0 +1,22 @@
+########
+Cluster Management Tools
+#######
+
+ssh : contains necessary files in .ssh (don't change file permission)
+bash_profile: Can be used as $HOME/.bash_profile file. The following four lines needs to be changed as necessary
+
+export RCP_USER=ubuntu
+export RCMD_CMD=ssh
+export RCMD_CMD_ARGS="-i $HOME/.ssh/onlabkey.pem"
+export RCMD_USER=ubuntu
+export RCP_CMD="scp -i $HOME/.ssh/onlabkey.pem -o StrictHostKeyChecking=no"
+export FANOUT=64
+export CLUSTER="$HOME/cluster-mgmt/cluster.txt"
+
+### Set the proper value ##
+export ONOS_CLUSTER_BASENAME="onosdevx"
+export ONOS_CLUSTER_NR_NODES=8
+
+bin/start.sh : shutdown all service and restart
+bin/stop.sh : shutdown all service
+bin/status.sh : show status of the services 
diff --git a/cluster-mgmt/bash_profile b/cluster-mgmt/bash_profile
new file mode 100644
index 0000000..e9f2a39
--- /dev/null
+++ b/cluster-mgmt/bash_profile
@@ -0,0 +1,25 @@
+# .bash_profile
+
+# Get the aliases and functions
+if [ -f ~/.bashrc ]; then
+	. ~/.bashrc
+fi
+
+# User specific environment and startup programs
+
+PATH=$PATH:$HOME/bin
+
+export PATH
+
+### Cluster-IT setting##
+export RCP_USER=ubuntu
+export RCMD_CMD=ssh
+export RCMD_CMD_ARGS="-i $HOME/.ssh/onlabkey.pem"
+export RCMD_USER=ubuntu
+export RCP_CMD="scp -i $HOME/.ssh/onlabkey.pem -o StrictHostKeyChecking=no"
+export FANOUT=64
+export CLUSTER="$HOME/cluster-mgmt/cluster.txt"
+
+#### Set the proper value ####
+#export ONOS_CLUSTER_BASENAME="onosdevx"
+#export ONOS_CLUSTER_NR_NODES=8
diff --git a/cluster-mgmt/bin/start.sh b/cluster-mgmt/bin/start.sh
new file mode 100755
index 0000000..bd65df7
--- /dev/null
+++ b/cluster-mgmt/bin/start.sh
@@ -0,0 +1,13 @@
+#! /bin/bash
+. $HOME/cluster-mgmt/func.sh
+
+onos stop
+cassandra cleandb
+cassandra stop
+zk stop
+
+zk start
+cassandra start
+cassandra cleandb
+onos start
+dsh -g $basename 'cd ONOS; ./ctrl-local.sh'
diff --git a/cluster-mgmt/bin/status.sh b/cluster-mgmt/bin/status.sh
new file mode 100755
index 0000000..880bf10
--- /dev/null
+++ b/cluster-mgmt/bin/status.sh
@@ -0,0 +1,9 @@
+#! /bin/bash
+. $HOME/cluster-mgmt/func.sh
+
+basename="onosdevb"
+nr_nodes=4
+
+onos status
+cassandra status
+zk status
diff --git a/cluster-mgmt/bin/stop.sh b/cluster-mgmt/bin/stop.sh
new file mode 100755
index 0000000..a8f64ca
--- /dev/null
+++ b/cluster-mgmt/bin/stop.sh
@@ -0,0 +1,10 @@
+#! /bin/bash
+. $HOME/cluster-mgmt/func.sh
+
+basename="onosdevc"
+nr_nodes=4
+
+onos stop
+cassandra cleandb
+cassandra stop
+zk stop
diff --git a/cluster-mgmt/common/hosts b/cluster-mgmt/common/hosts
new file mode 100644
index 0000000..220b30f
--- /dev/null
+++ b/cluster-mgmt/common/hosts
@@ -0,0 +1,11 @@
+127.0.0.1 localhost
+
+# The following lines are desirable for IPv6 capable hosts
+::1 ip6-localhost ip6-loopback
+fe00::0 ip6-localnet
+ff00::0 ip6-mcastprefix
+ff02::1 ip6-allnodes
+ff02::2 ip6-allrouters
+ff02::3 ip6-allhosts
+
+## For ONOS Development
diff --git a/cluster-mgmt/common/known_hosts b/cluster-mgmt/common/known_hosts
new file mode 100644
index 0000000..bc8d892
--- /dev/null
+++ b/cluster-mgmt/common/known_hosts
@@ -0,0 +1,2 @@
+|1|vpuCVwBaUAW338i8XkTyuZpPn3o=|OEtDpg0rUr4I6MJrPU3UgO6xIjY= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
+|1|oQEfymNRsrXOo9uHu/jCST0f0I0=|UqxLCIvwPdgIlZWmusieRLCzRxE= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAvCFIZTznMUPbS/r6b0Gw9jcnOBbH21wcBKETjXg9U5bMwHz2ocnEK8PPL1EK8uUTjZ3Kbilx4Jeio8HXEWtUkyOF/KyW1nXd0mxrWqqGQjFlpPj017Wfo0KIISgCWB9L8RJJ3aJ0selZwvmdHmg7uS306UGsJf1co2qubLGMAsdjPhYpvKXSJHoThupHBCuoqqOw80Tt5b3qJ6RwFjt/QiCgom9KoQn2DMQhS0iB9h5NHpejDX9/qLgFFiF3PdXaBCTE+vFLvoXwecp/x3pP2c8zA6FhCzYbZxLYMdMHqSmJRSKALWU3Qg9ekdXUBfzrLs4lPQ6UGFcku9WBAtN7oQ==
diff --git a/cluster-mgmt/common/onos.properties b/cluster-mgmt/common/onos.properties
new file mode 100644
index 0000000..1828db7
--- /dev/null
+++ b/cluster-mgmt/common/onos.properties
@@ -0,0 +1,18 @@
+floodlight.modules = net.floodlightcontroller.storage.memory.MemoryStorageSource,\
+net.floodlightcontroller.core.FloodlightProvider,\
+net.floodlightcontroller.threadpool.ThreadPool,\
+net.floodlightcontroller.devicemanager.internal.DeviceManagerImpl,\
+net.floodlightcontroller.staticflowentry.StaticFlowEntryPusher,\
+net.floodlightcontroller.firewall.Firewall,\
+net.floodlightcontroller.jython.JythonDebugInterface,\
+net.floodlightcontroller.counter.CounterStore,\
+net.floodlightcontroller.perfmon.PktInProcessingTime,\
+net.floodlightcontroller.ui.web.StaticWebRoutable,\
+net.floodlightcontroller.onoslistener.OnosPublisher, \
+net.onrc.onos.registry.controller.ZookeeperRegistry
+net.floodlightcontroller.restserver.RestApiServer.port = 8080
+net.floodlightcontroller.core.FloodlightProvider.openflowport = 6633
+net.floodlightcontroller.jython.JythonDebugInterface.port = 6655
+net.floodlightcontroller.forwarding.Forwarding.idletimeout = 5
+net.floodlightcontroller.forwarding.Forwarding.hardtimeout = 0
+net.floodlightcontroller.onoslistener.OnosPublisher.dbconf = /tmp/cassandra.titan
diff --git a/cluster-mgmt/common/zoo.cfg b/cluster-mgmt/common/zoo.cfg
new file mode 100644
index 0000000..c4e1eb3
--- /dev/null
+++ b/cluster-mgmt/common/zoo.cfg
@@ -0,0 +1,45 @@
+# The number of milliseconds of each tick
+tickTime=2000
+# The number of ticks that the initial 
+# synchronization phase can take
+initLimit=10
+# The number of ticks that can pass between 
+# sending a request and getting an acknowledgement
+syncLimit=5
+# the directory where the snapshot is stored.
+# do not use /tmp for storage, /tmp here is just 
+# example sakes.
+dataDir=/var/lib/zookeeper
+# the port at which the clients will connect
+clientPort=2181
+#
+# specify all servers in the Zookeeper ensemble
+
+#server.1=onosgui1:2888:3888
+#server.2=onosgui2:2888:3888
+#server.3=onosgui3:2888:3888
+#server.4=onosgui4:2888:3888
+#server.5=onosgui5:2888:3888
+#server.6=onosgui6:2888:3888
+#server.7=onosgui7:2888:3888
+#server.8=onosgui8:2888:3888
+#
+#
+# Be sure to read the maintenance section of the 
+# administrator guide before turning on autopurge.
+#
+#
+# Be sure to read the maintenance section of the 
+# administrator guide before turning on autopurge.
+#
+# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
+#
+# The number of snapshots to retain in dataDir
+#autopurge.snapRetainCount=3
+# Purge task interval in hours
+# Set to "0" to disable auto purge feature
+#autopurge.purgeInterval=1
+server.1=test1:2888:3888
+server.2=test2:2888:3888
+server.3=test3:2888:3888
+server.4=test4:2888:3888
diff --git a/cluster-mgmt/cp-config.sh b/cluster-mgmt/cp-config.sh
new file mode 100755
index 0000000..5c74462
--- /dev/null
+++ b/cluster-mgmt/cp-config.sh
@@ -0,0 +1,69 @@
+#! /bin/bash
+USERNAME=ubuntu
+CASSANDRA_DIR='/home/ubuntu/apache-cassandra-1.1.4'
+ZK_DIR='/home/ubuntu/zookeeper-3.4.5'
+ZK_LIB='/var/lib/zookeeper'
+CASSANDRA_LIB='/var/lib/cassandra'
+
+SSH_COPY="authorized_keys  id_rsa  id_rsa.pub  known_hosts  onlab-gui.pem  onlabkey.pem"
+
+if [ $# == 2 ]; then
+  NR_NODES=$1
+  basename=$2
+else
+  echo "$0 nr_nodes basename"
+  exit
+fi
+
+if [ ! -f ./cluster.txt ]; then
+  echo "Cannot find cluster.txt"
+  exit
+fi
+
+export CLUSTER="./cluster.txt"
+dsh -g $basename 'uname -a'
+
+echo "Stopping Services"
+#dsh -g $basename 'cd ONOS; ./start-onos.sh stop'
+#dsh -g $basename 'cd ONOS; ./stop-cassandra stop'
+#dsh -g $basename '$ZK_DIR/bin/zkServer.sh stop'
+
+# authorized_keys  cassandra.yaml  hosts  id_rsa  id_rsa.pub  known_hosts  onlab-gui.pem  onlabkey.pem  onos.properties  zoo.cfg
+## SSH Setting
+dsh -g $basename 'mkdir -m 700 .ssh' 
+for n in $SSH_COPY; do
+ pcp -g $basename  common/$n '.ssh'
+ if [ $n != "id_rsa.pub" ] ; then
+   dsh -g $basename "chmod 600 .ssh/$n"
+ fi
+done
+
+dsh -g $basename "sudo rm -rf $CASSANDRA_LIB/commitlog/*"
+dsh -g $basename "sudo rm -rf $CASSANDRA_LIB/saved_caches/*"
+dsh -g $basename "sudo rm -rf $CASSANDRA_LIB/data/*"
+dsh -g $basename "sudo chown -R $username:$username $CASSANDRA_LIB"
+
+dsh -g $basename "sudo rm -rf $ZK_LIB/version-2*"
+dsh -g $basename "sudo rm -rf $ZK_LIB/myid"
+
+pcp -g $basename common/cassandra.yaml $CASSANDRA_DIR/conf
+pcp -g $basename common/zoo.cfg        $ZK_DIR/conf
+pcp -g $basename common/hosts          '~'
+
+for n in `seq 1 $NR_NODES`; do
+  pcp -w ${basename}${n} ${basename}${n}/hostname '~'
+  pcp -w ${basename}${n} ${basename}${n}/myid $ZK_DIR/conf
+done
+
+dsh -g $basename 'sudo cp ~/hostname /etc' 
+dsh -g $basename 'sudo cp ~/hosts /etc' 
+dsh -g $basename "cd $ZK_LIB; sudo ln -s $ZK_DIR/conf/myid"
+
+dsh -g $basename 'sudo hostname `cat /etc/hostname`'
+
+for n in `seq 2 $NR_NODES`; do
+  pcp -w ${basename}${n} ${basename}${n}/onsdemo_edge.py 'ONOS/test-network/mininet'
+  pcp -w ${basename}${n} ${basename}${n}/tunnel_onos_edge.sh 'ONOS/test-network/mininet'
+done
+pcp -w ${basename}1 ${basename}1/tunnel_onos_core.sh 'ONOS/test-network/mininet'
+pcp -w ${basename}1 ${basename}1/onsdemo_core.py 'ONOS/test-network/mininet'
diff --git a/cluster-mgmt/cp-mininet.sh b/cluster-mgmt/cp-mininet.sh
new file mode 100755
index 0000000..c3d69fe
--- /dev/null
+++ b/cluster-mgmt/cp-mininet.sh
@@ -0,0 +1,29 @@
+#! /bin/bash
+USERNAME=ubuntu
+CASSANDRA_DIR='/home/ubuntu/apache-cassandra-1.1.4'
+ZK_DIR='/home/ubuntu/zookeeper-3.4.5'
+ZK_LIB='/var/lib/zookeeper'
+CASSANDRA_LIB='/var/lib/cassandra'
+
+if [ $# == 2 ]; then
+  NR_NODES=$1
+  basename=$2
+else
+  echo "$0 nr_nodes basename"
+  exit
+fi
+
+if [ ! -f ./cluster.txt ]; then
+  echo "Cannot find cluster.txt"
+  exit
+fi
+
+export CLUSTER="./cluster.txt"
+dsh -g $basename 'uname -a'
+
+for n in `seq 1 $NR_NODES`; do
+  pcp -w ${basename}${n} ${basename}${n}/onsdemo.py 'ONOS/test-network/mininet'
+  pcp -w ${basename}${n} ${basename}${n}/tunnel_onsdemo.sh 'ONOS/test-network/mininet'
+done
+dsh -g $basename 'chmod 755 ONOS/test-network/mininet/tunnel_onsdemo.sh'
+dsh -g $basename 'chmod 755 ONOS/test-network/mininet/onsdemo.py'
diff --git a/cluster-mgmt/func.sh b/cluster-mgmt/func.sh
new file mode 100755
index 0000000..e7d976d
--- /dev/null
+++ b/cluster-mgmt/func.sh
@@ -0,0 +1,109 @@
+USERNAME=ubuntu
+CASSANDRA_DIR='/home/ubuntu/apache-cassandra-1.1.4'
+ZK_DIR='/home/ubuntu/zookeeper-3.4.5'
+ONOS_DIR='/home/ubuntu/ONOS'
+ZK_LIB='/var/lib/zookeeper'
+CASSANDRA_LIB='/var/lib/cassandra'
+
+if [ x$CLUSTER == "x" ]; then
+  echo "CLUSTER is not set. Exitting."
+  exit
+fi
+if [ x$ONOS_CLUSTER_BASENAME == "x" ]; then
+  echo "ONOS_CLUSTER_BASENAME is not set. Exitting"
+  exit
+fi
+if [ x$ONOS_CLUSTER_NR_NODES == "x" ]; then
+  echo "ONOS_CLUSTER_NR_NODES is not set. Exitting"
+  exit
+fi
+
+export basename=$ONOS_CLUSTER_BASENAME
+export nr_nodes=$ONOS_CLUSTER_NR_NODES
+
+checkcluster () {
+  dsh -g $basename 'uname -a'
+}
+
+zk () {
+  case "$1" in
+    start)
+      echo "Starting ZK.."
+      dsh -g $basename "$ZK_DIR/bin/zkServer.sh start"
+      while [ 1 ]; do
+        nup=`dsh -g $basename "$ZK_DIR/bin/zkServer.sh status" | grep "Mode" | egrep "leader|follower" | wc -l`
+        if [ $nup == $nr_nodes ]; then
+          echo "everybody's up: $nup up of of $nr_nodes"
+          echo "ZK started"
+          break;
+        fi
+        echo "waiting for everybody's up: $nup up of of $nr_nodes"
+        sleep 1
+      done
+      ;;
+    stop)
+      echo "Stopping ZK.."
+      dsh -g $basename "$ZK_DIR/bin/zkServer.sh stop"
+      ;;
+    status)
+      echo "Checking ZK Status"
+      dsh -g $basename "$ZK_DIR/bin/zkServer.sh status"
+      ;;
+  esac
+}
+
+cassandra () {
+  case "$1" in
+    start)
+      echo "Starting Cassandra.."
+      echo "  start cassandra at the seed node"
+      dsh -w ${basename}1 "cd $ONOS_DIR; ./start-cassandra.sh start"
+      sleep 1
+      echo "  start cassandra in rest nodes"
+      dsh -g ${basename} -x ${basename}1 "cd $ONOS_DIR; ./start-cassandra.sh start"
+      while [ 1 ]; do
+        echo $$
+        dsh -w ${basename}1 "cd $ONOS_DIR; ./start-cassandra.sh status" > .cassandra_check.$$
+        cat .cassandra_check.$$
+        nup=`cat .cassandra_check.$$ | grep Normal |grep Up| wc -l`
+        if [ $nup == $nr_nodes ]; then
+          echo "everybody's up: $nup up of of $nr_nodes"
+          echo "Cassandra started"
+          break;
+        fi
+        echo "waiting for everybody's up: $nup up of of $nr_nodes"
+        sleep 1
+      done
+      ;;
+    stop)
+      echo "Stopping Cassandra.."
+      dsh -g ${basename} "cd $ONOS_DIR; ./start-cassandra.sh stop"
+      ;;
+    cleandb)
+      echo "Removing all data in db"
+      dsh -w ${basename}1 "cd $ONOS_DIR; ./scripts/cleanup-cassandra.sh"
+      ;;
+    status)
+      echo "Checking Cassandra Status"
+      dsh -w ${basename}1 "cd $ONOS_DIR; ./start-cassandra.sh status"
+      ;;
+  esac
+}
+
+onos () {
+  case "$1" in
+    start)
+      echo "Starting ONOS"
+      dsh -g ${basename} "cd $ONOS_DIR; ./start-onos.sh start"
+      dsh -w ${basename}1 "cd $ONOS_DIR; ./start-rest.sh start"
+      ;;
+    stop)
+      echo "Stop ONOS"
+      dsh -g ${basename} "cd $ONOS_DIR; ./start-onos.sh stop"
+      ;;
+    status)
+      echo "Checking ONOS Status"
+      dsh -g ${basename} "cd $ONOS_DIR; ./start-onos.sh status"
+      ;;
+  esac
+}
diff --git a/cluster-mgmt/make-config.sh b/cluster-mgmt/make-config.sh
new file mode 100755
index 0000000..b97a818
--- /dev/null
+++ b/cluster-mgmt/make-config.sh
@@ -0,0 +1,56 @@
+#! /bin/bash
+USERNAME=ubuntu
+if [ $# == 3 ]; then
+  NR_NODES=$1
+  basename=$2
+  hosts_file=$3
+else
+  echo "$0 nr_hodes basename hostfile"
+  exit
+fi
+
+for n in `seq 1 $NR_NODES`; do
+  rm -rf ${basename}${n}
+  mkdir ${basename}${n}
+  echo "${basename}${n}" > ${basename}${n}/hostname
+  echo $n > ${basename}${n}/myid
+done
+
+## ZK config ##
+cp template/zoo.cfg common/
+for n in `seq 1 $NR_NODES`; do
+ echo "server.${n}=${basename}${n}:2888:3888"
+done >> common/zoo.cfg
+
+## Cassandra config ##
+cat template/cassandra.yaml |\
+  sed "s/__SEED__/${basename}1/g" > common/cassandra.yaml
+
+## /etc/hosts ##
+cat template/hosts $hosts_file >  common/hosts
+
+## .ssh/known_hosts ##
+ssh-keyscan -H -t rsa github.com > common/known_hosts
+ssh-keyscan -H -t rsa onosnat >> common/known_hosts
+for n in `seq 1 $NR_NODES`; do
+  ssh-keyscan -H -t rsa ${basename}${n}
+done >> common/known_hosts
+
+echo "GROUP: $basename" > cluster.txt
+cat $hosts_file | awk '{print $2}' >> cluster.txt
+
+
+## Creating shell script to login each node ##
+for n in `seq 1 $NR_NODES`; do
+  cat << EOF > bin/${basename}${n}
+#!/bin/sh
+ssh $USERNAME@${basename}${n}
+EOF
+  chmod 755 bin/${basename}${n}
+done
+
+echo "======================================"
+echo "Do not forget to do the following"
+echo "paste $hosts_file to /etc/hosts"
+echo "paste cluster.txt to your CLUSTER file"
+echo "======================================"
diff --git a/cluster-mgmt/make-mininet.sh b/cluster-mgmt/make-mininet.sh
new file mode 100755
index 0000000..e314114
--- /dev/null
+++ b/cluster-mgmt/make-mininet.sh
@@ -0,0 +1,31 @@
+#! /bin/bash
+if [ $# == 3 ]; then
+  NR_NODES=$1
+  basename=$2
+  hosts_file=$3
+else
+  echo "$0 nr_hodes basename hostfile"
+  exit
+fi
+
+for n in `seq 2 $NR_NODES`; do
+  if [ $n == 2 ]; then
+    nrsw=50
+  else
+    nrsw=25
+  fi
+  cat template/onsdemo_edge_template.py | sed "s/__NWID__/$n/g" | sed "s/__NRSW__/${nrsw}/g" > ${basename}${n}/onsdemo.py
+done
+cp template/onsdemo_core.py ${basename}1/onsdemo.py
+
+cat hosts  | awk '{printf("%s=%s\n",$2,$1)}' > .tmp
+for n in `seq 2 $NR_NODES`; do
+  cat template/tunnel_onsdemo_edge_template.sh | awk '{if(NR==2){system("cat .tmp")}else{print $0}}' |\
+  sed "s/__NWID__/$n/g" |\
+  sed "s/__TUNNEL__/TUNNEL\=\(\"1 $n ${basename}1\"\)/g" > ${basename}${n}/tunnel_onsdemo.sh
+  chmod 755 ${basename}${n}/tunnel_onsdemo.sh
+done
+
+cat template/tunnel_onsdemo_core_template.sh | awk '{if(NR==2){system("cat .tmp")}else{print $0}}' |\
+  sed "s/__basename__/$basename/g" > ${basename}1/tunnel_onsdemo.sh 
+  chmod 755 ${basename}1/tunnel_onsdemo.sh
diff --git a/cluster-mgmt/ssh/authorized_keys b/cluster-mgmt/ssh/authorized_keys
new file mode 100644
index 0000000..f723bc2
--- /dev/null
+++ b/cluster-mgmt/ssh/authorized_keys
@@ -0,0 +1,2 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCTlBTIOKm30b7TsCgIT+xjq42q0zwG+EohOGkCtNr1eGkS9OZDYwkNAkPtpzYtZJ914oRL29JiXFm+OsAfwVKsY2yZlV+tcnTx4Djfhgs6/wURMhw3sOovWu2iAoPAhQYvvvq8maD8ZvybYTzq4yHNP27G7rv4s+GCtv3bXOgzsKd8Zkg0+tGZYuCks5mNimlfWGBlA5jI9MEkd0nWTqSTRj8IkfhJo26HralR+X/KwHGryfxjG9rsyqoZGnVC/xV4KOOtZlVRzTVxCDFPj86lO4dzf7Tt+dst/t/9u/V2d7YxnuhaM+Sarve+6f/tZoekWzpNRGGT9h7FzT7Osg+l onlab-gui
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEHmYMw6DugE6FCoLR5fdTO7iQfouHmLm60yxSjXu/wnBGmM7SGc1AAgmtr6JaEPYj8H6g7AL8+wFrbj7TXOoMD4HWoEzC/PuTZ5JgyCeTK/rmYdBlbAqBbLeD1d9q35O+GnWOsLIsSQHcKvKZveLLPTBtzJ6em9NfgiPKibbsAFD716w++cxXKHabzHw7KB9XaewoYdznrosWwU3TXR4C2rzMAimh6XuBLZ0xFTNF4nFhy+H0AWUEN8dY8NHwAMGlAWK4g7phZ2cQhgU4GeutfGlEKlKT3iT7j8rkW1JKsx/AOVfcnozuHCm76jYD5qXcizHeS4BYinXRepGY7mfn onlabkey
diff --git a/cluster-mgmt/ssh/id_rsa.pub b/cluster-mgmt/ssh/id_rsa.pub
new file mode 100644
index 0000000..bcb2d75
--- /dev/null
+++ b/cluster-mgmt/ssh/id_rsa.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3QgAX4yEcOHaKFgeq/tD2lbGg5VbNvRka1atUSd5q8hhtw5rB8um5Q5Z6+AfL83+Xlez2KonH6JLjhhs8wBHaJCVbzvDnycMEEHg12o+MvlKgKTkkSqP9W+Jejk4YGIr6QOQ/yzZRhRGoNGMaqI6KU7NjtgZyZs8h66GTyoBeXi9TZwGYdxeF5rVqZD80nlb+xlc+PUC4TQ/o2RnGej7S0J/+ES+/X6LiNgHyZPdFK2Pr4BilLwS8c5EyAHHQuW8hIcPhNwXgrx97f5L8yuNKAmW9WSYLk0r4DhnFUZrvIGqh3isxtnJDDf3UZ2U+PtGZ75ZNfk546obsuyc/IwHH ubuntu@onos9vpc
diff --git a/cluster-mgmt/start-mininet.sh b/cluster-mgmt/start-mininet.sh
new file mode 100755
index 0000000..46a0239
--- /dev/null
+++ b/cluster-mgmt/start-mininet.sh
@@ -0,0 +1,21 @@
+#! /bin/bash
+USERNAME=ubuntu
+CASSANDRA_DIR='/home/ubuntu/apache-cassandra-1.1.4'
+ZK_DIR='/home/ubuntu/zookeeper-3.4.5'
+ZK_LIB='/var/lib/zookeeper'
+CASSANDRA_LIB='/var/lib/cassandra'
+
+if [ $# == 1 ]; then
+  basename=$1
+else
+  echo "$0 basename"
+  exit
+fi
+
+export CLUSTER="./cluster.txt"
+dsh -g $basename 'uname -a'
+
+#dsh -g ${basename} 'cd ONOS/test-network/mininet; ./tunnel_onsdemo.sh start'
+#dsh -g ${basename} 'cd ONOS/test-network/mininet; ./tunnel_onsdemo.sh start'
+dsh -g ${basename} 'cd ONOS/test-network/mininet; sudo mn -c'
+dsh -g ${basename} 'cd ONOS/test-network/mininet; sudo ./onsdemo.py -n'
diff --git a/cluster-mgmt/template/cassandra.yaml b/cluster-mgmt/template/cassandra.yaml
new file mode 100644
index 0000000..ab79cdc
--- /dev/null
+++ b/cluster-mgmt/template/cassandra.yaml
@@ -0,0 +1,568 @@
+# Cassandra storage config YAML 
+
+# NOTE:
+#   See http://wiki.apache.org/cassandra/StorageConfiguration for
+#   full explanations of configuration directives
+# /NOTE
+
+# The name of the cluster. This is mainly used to prevent machines in
+# one logical cluster from joining another.
+cluster_name: 'ONOS Test Cluster'
+
+# You should always specify InitialToken when setting up a production
+# cluster for the first time, and often when adding capacity later.
+# The principle is that each node should be given an equal slice of
+# the token ring; see http://wiki.apache.org/cassandra/Operations
+# for more details.
+#
+# If blank, Cassandra will request a token bisecting the range of
+# the heaviest-loaded existing node.  If there is no load information
+# available, such as is the case with a new cluster, it will pick
+# a random token, which will lead to hot spots.
+initial_token:
+
+# See http://wiki.apache.org/cassandra/HintedHandoff
+hinted_handoff_enabled: true
+# this defines the maximum amount of time a dead host will have hints
+# generated.  After it has been dead this long, hints will be dropped.
+max_hint_window_in_ms: 3600000 # one hour
+# Sleep this long after delivering each hint
+hinted_handoff_throttle_delay_in_ms: 1
+
+# The following setting populates the page cache on memtable flush and compaction
+# WARNING: Enable this setting only when the whole node's data fits in memory.
+# Defaults to: false
+# populate_io_cache_on_flush: false
+
+# authentication backend, implementing IAuthenticator; used to identify users
+authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
+
+# authorization backend, implementing IAuthority; used to limit access/provide permissions
+authority: org.apache.cassandra.auth.AllowAllAuthority
+
+# The partitioner is responsible for distributing rows (by key) across
+# nodes in the cluster.  Any IPartitioner may be used, including your
+# own as long as it is on the classpath.  Out of the box, Cassandra
+# provides org.apache.cassandra.dht.RandomPartitioner
+# org.apache.cassandra.dht.ByteOrderedPartitioner,
+# org.apache.cassandra.dht.OrderPreservingPartitioner (deprecated),
+# and org.apache.cassandra.dht.CollatingOrderPreservingPartitioner
+# (deprecated).
+# 
+# - RandomPartitioner distributes rows across the cluster evenly by md5.
+#   When in doubt, this is the best option.
+# - ByteOrderedPartitioner orders rows lexically by key bytes.  BOP allows
+#   scanning rows in key order, but the ordering can generate hot spots
+#   for sequential insertion workloads.
+# - OrderPreservingPartitioner is an obsolete form of BOP, that stores
+# - keys in a less-efficient format and only works with keys that are
+#   UTF8-encoded Strings.
+# - CollatingOPP colates according to EN,US rules rather than lexical byte
+#   ordering.  Use this as an example if you need custom collation.
+#
+# See http://wiki.apache.org/cassandra/Operations for more on
+# partitioners and token selection.
+partitioner: org.apache.cassandra.dht.RandomPartitioner
+
+# directories where Cassandra should store data on disk.
+data_file_directories:
+    - /var/lib/cassandra/data
+
+# commit log
+commitlog_directory: /var/lib/cassandra/commitlog
+
+# Maximum size of the key cache in memory.
+#
+# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
+# minimum, sometimes more. The key cache is fairly tiny for the amount of
+# time it saves, so it's worthwhile to use it at large numbers.
+# The row cache saves even more time, but must store the whole values of
+# its rows, so it is extremely space-intensive. It's best to only use the
+# row cache if you have hot rows or static rows.
+#
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
+key_cache_size_in_mb:
+
+# Duration in seconds after which Cassandra should
+# safe the keys cache. Caches are saved to saved_caches_directory as
+# specified in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 14400 or 4 hours.
+key_cache_save_period: 14400
+
+# Number of keys from the key cache to save
+# Disabled by default, meaning all keys are going to be saved
+# key_cache_keys_to_save: 100
+
+# Maximum size of the row cache in memory.
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is 0, to disable row caching.
+row_cache_size_in_mb: 0
+
+# Duration in seconds after which Cassandra should
+# safe the row cache. Caches are saved to saved_caches_directory as specified
+# in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 0 to disable saving the row cache.
+row_cache_save_period: 0
+
+# Number of keys from the row cache to save
+# Disabled by default, meaning all keys are going to be saved
+# row_cache_keys_to_save: 100
+
+# The provider for the row cache to use.
+#
+# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider
+#
+# SerializingCacheProvider serialises the contents of the row and stores
+# it in native memory, i.e., off the JVM Heap. Serialized rows take
+# significantly less memory than "live" rows in the JVM, so you can cache
+# more rows in a given memory footprint.  And storing the cache off-heap
+# means you can use smaller heap sizes, reducing the impact of GC pauses.
+#
+# It is also valid to specify the fully-qualified class name to a class
+# that implements org.apache.cassandra.cache.IRowCacheProvider.
+#
+# Defaults to SerializingCacheProvider
+row_cache_provider: SerializingCacheProvider
+
+# saved caches
+saved_caches_directory: /var/lib/cassandra/saved_caches
+
+# commitlog_sync may be either "periodic" or "batch." 
+# When in batch mode, Cassandra won't ack writes until the commit log
+# has been fsynced to disk.  It will wait up to
+# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
+# performing the sync.
+#
+# commitlog_sync: batch
+# commitlog_sync_batch_window_in_ms: 50
+#
+# the other option is "periodic" where writes may be acked immediately
+# and the CommitLog is simply synced every commitlog_sync_period_in_ms
+# milliseconds.
+commitlog_sync: periodic
+commitlog_sync_period_in_ms: 10000
+
+# The size of the individual commitlog file segments.  A commitlog
+# segment may be archived, deleted, or recycled once all the data
+# in it (potentally from each columnfamily in the system) has been 
+# flushed to sstables.  
+#
+# The default size is 32, which is almost always fine, but if you are
+# archiving commitlog segments (see commitlog_archiving.properties),
+# then you probably want a finer granularity of archiving; 8 or 16 MB
+# is reasonable.
+commitlog_segment_size_in_mb: 32
+
+# any class that implements the SeedProvider interface and has a
+# constructor that takes a Map<String, String> of parameters will do.
+seed_provider:
+    # Addresses of hosts that are deemed contact points. 
+    # Cassandra nodes use this list of hosts to find each other and learn
+    # the topology of the ring.  You must change this if you are running
+    # multiple nodes!
+    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+      parameters:
+          # seeds is actually a comma-delimited list of addresses.
+          # Ex: "<ip1>,<ip2>,<ip3>"
+#          - seeds: "10.0.1.243"
+          - seeds: "__SEED__"
+
+# emergency pressure valve: each time heap usage after a full (CMS)
+# garbage collection is above this fraction of the max, Cassandra will
+# flush the largest memtables.  
+#
+# Set to 1.0 to disable.  Setting this lower than
+# CMSInitiatingOccupancyFraction is not likely to be useful.
+#
+# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY:
+# it is most effective under light to moderate load, or read-heavy
+# workloads; under truly massive write load, it will often be too
+# little, too late.
+flush_largest_memtables_at: 0.75
+
+# emergency pressure valve #2: the first time heap usage after a full
+# (CMS) garbage collection is above this fraction of the max,
+# Cassandra will reduce cache maximum _capacity_ to the given fraction
+# of the current _size_.  Should usually be set substantially above
+# flush_largest_memtables_at, since that will have less long-term
+# impact on the system.  
+# 
+# Set to 1.0 to disable.  Setting this lower than
+# CMSInitiatingOccupancyFraction is not likely to be useful.
+reduce_cache_sizes_at: 0.85
+reduce_cache_capacity_to: 0.6
+
+# For workloads with more data than can fit in memory, Cassandra's
+# bottleneck will be reads that need to fetch data from
+# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
+# order to allow the operations to enqueue low enough in the stack
+# that the OS and drives can reorder them.
+#
+# On the other hand, since writes are almost never IO bound, the ideal
+# number of "concurrent_writes" is dependent on the number of cores in
+# your system; (8 * number_of_cores) is a good rule of thumb.
+concurrent_reads: 16
+concurrent_writes: 16
+
+# Total memory to use for memtables.  Cassandra will flush the largest
+# memtable when this much memory is used.
+# If omitted, Cassandra will set it to 1/3 of the heap.
+# memtable_total_space_in_mb: 2048
+
+# Total space to use for commitlogs.  Since commitlog segments are
+# mmapped, and hence use up address space, the default size is 32
+# on 32-bit JVMs, and 1024 on 64-bit JVMs.
+#
+# If space gets above this value (it will round up to the next nearest
+# segment multiple), Cassandra will flush every dirty CF in the oldest
+# segment and remove it.  So a small total commitlog space will tend
+# to cause more flush activity on less-active columnfamilies.
+# commitlog_total_space_in_mb: 4096
+
+# This sets the amount of memtable flush writer threads.  These will
+# be blocked by disk io, and each one will hold a memtable in memory
+# while blocked. If you have a large heap and many data directories,
+# you can increase this value for better flush performance.
+# By default this will be set to the amount of data directories defined.
+#memtable_flush_writers: 1
+
+# the number of full memtables to allow pending flush, that is,
+# waiting for a writer thread.  At a minimum, this should be set to
+# the maximum number of secondary indexes created on a single CF.
+memtable_flush_queue_size: 4
+
+# Whether to, when doing sequential writing, fsync() at intervals in
+# order to force the operating system to flush the dirty
+# buffers. Enable this to avoid sudden dirty buffer flushing from
+# impacting read latencies. Almost always a good idea on SSD:s; not
+# necessarily on platters.
+trickle_fsync: false
+trickle_fsync_interval_in_kb: 10240
+
+# TCP port, for commands and data
+storage_port: 7000
+
+# SSL port, for encrypted communication.  Unused unless enabled in
+# encryption_options
+ssl_storage_port: 7001
+
+# Address to bind to and tell other Cassandra nodes to connect to. You
+# _must_ change this if you want multiple nodes to be able to
+# communicate!
+# 
+# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
+# will always do the Right Thing *if* the node is properly configured
+# (hostname, name resolution, etc), and the Right Thing is to use the
+# address associated with the hostname (it might not be).
+#
+# Setting this to 0.0.0.0 is always wrong.
+listen_address:
+
+# Address to broadcast to other Cassandra nodes
+# Leaving this blank will set it to the same value as listen_address
+# broadcast_address: 1.2.3.4
+
+# The address to bind the Thrift RPC service to -- clients connect
+# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
+# you want Thrift to listen on all interfaces.
+# 
+# Leaving this blank has the same effect it does for ListenAddress,
+# (i.e. it will be based on the configured hostname of the node).
+rpc_address: 0.0.0.0
+# port for Thrift to listen for clients on
+rpc_port: 9160
+
+# enable or disable keepalive on rpc connections
+rpc_keepalive: true
+
+# Cassandra provides three options for the RPC Server:
+#
+# sync  -> One connection per thread in the rpc pool (see below).
+#          For a very large number of clients, memory will be your limiting
+#          factor; on a 64 bit JVM, 128KB is the minimum stack size per thread.
+#          Connection pooling is very, very strongly recommended.
+#
+# async -> Nonblocking server implementation with one thread to serve 
+#          rpc connections.  This is not recommended for high throughput use
+#          cases. Async has been tested to be about 50% slower than sync
+#          or hsha and is deprecated: it will be removed in the next major release.
+#
+# hsha  -> Stands for "half synchronous, half asynchronous." The rpc thread pool 
+#          (see below) is used to manage requests, but the threads are multiplexed
+#          across the different clients.
+#
+# The default is sync because on Windows hsha is about 30% slower.  On Linux,
+# sync/hsha performance is about the same, with hsha of course using less memory.
+rpc_server_type: sync
+
+# Uncomment rpc_min|max|thread to set request pool size.
+# You would primarily set max for the sync server to safeguard against
+# misbehaved clients; if you do hit the max, Cassandra will block until one
+# disconnects before accepting more.  The defaults for sync are min of 16 and max
+# unlimited.
+# 
+# For the Hsha server, the min and max both default to quadruple the number of
+# CPU cores.
+#
+# This configuration is ignored by the async server.
+#
+# rpc_min_threads: 16
+# rpc_max_threads: 2048
+
+# uncomment to set socket buffer sizes on rpc connections
+# rpc_send_buff_size_in_bytes:
+# rpc_recv_buff_size_in_bytes:
+
+# Frame size for thrift (maximum field length).
+# 0 disables TFramedTransport in favor of TSocket. This option
+# is deprecated; we strongly recommend using Framed mode.
+thrift_framed_transport_size_in_mb: 15
+
+# The max length of a thrift message, including all fields and
+# internal thrift overhead.
+thrift_max_message_length_in_mb: 16
+
+# Set to true to have Cassandra create a hard link to each sstable
+# flushed or streamed locally in a backups/ subdirectory of the
+# Keyspace data.  Removing these links is the operator's
+# responsibility.
+incremental_backups: false
+
+# Whether or not to take a snapshot before each compaction.  Be
+# careful using this option, since Cassandra won't clean up the
+# snapshots for you.  Mostly useful if you're paranoid when there
+# is a data format change.
+snapshot_before_compaction: false
+
+# Whether or not a snapshot is taken of the data before keyspace truncation
+# or dropping of column families. The STRONGLY advised default of true 
+# should be used to provide data safety. If you set this flag to false, you will
+# lose data on truncation or drop.
+auto_snapshot: true
+
+# Add column indexes to a row after its contents reach this size.
+# Increase if your column values are large, or if you have a very large
+# number of columns.  The competing causes are, Cassandra has to
+# deserialize this much of the row to read a single column, so you want
+# it to be small - at least if you do many partial-row reads - but all
+# the index data is read for each access, so you don't want to generate
+# that wastefully either.
+column_index_size_in_kb: 64
+
+# Size limit for rows being compacted in memory.  Larger rows will spill
+# over to disk and use a slower two-pass compaction process.  A message
+# will be logged specifying the row key.
+in_memory_compaction_limit_in_mb: 64
+
+# Number of simultaneous compactions to allow, NOT including
+# validation "compactions" for anti-entropy repair.  Simultaneous
+# compactions can help preserve read performance in a mixed read/write
+# workload, by mitigating the tendency of small sstables to accumulate
+# during a single long running compactions. The default is usually
+# fine and if you experience problems with compaction running too
+# slowly or too fast, you should look at
+# compaction_throughput_mb_per_sec first.
+#
+# This setting has no effect on LeveledCompactionStrategy.
+#
+# concurrent_compactors defaults to the number of cores.
+# Uncomment to make compaction mono-threaded, the pre-0.8 default.
+#concurrent_compactors: 1
+
+# Multi-threaded compaction. When enabled, each compaction will use
+# up to one thread per core, plus one thread per sstable being merged.
+# This is usually only useful for SSD-based hardware: otherwise, 
+# your concern is usually to get compaction to do LESS i/o (see:
+# compaction_throughput_mb_per_sec), not more.
+multithreaded_compaction: false
+
+# Throttles compaction to the given total throughput across the entire
+# system. The faster you insert data, the faster you need to compact in
+# order to keep the sstable count down, but in general, setting this to
+# 16 to 32 times the rate you are inserting data is more than sufficient.
+# Setting this to 0 disables throttling. Note that this account for all types
+# of compaction, including validation compaction.
+compaction_throughput_mb_per_sec: 16
+
+# Track cached row keys during compaction, and re-cache their new
+# positions in the compacted sstable.  Disable if you use really large
+# key caches.
+compaction_preheat_key_cache: true
+
+# Throttles all outbound streaming file transfers on this node to the
+# given total throughput in Mbps. This is necessary because Cassandra does
+# mostly sequential IO when streaming data during bootstrap or repair, which
+# can lead to saturating the network connection and degrading rpc performance.
+# When unset, the default is 400 Mbps or 50 MB/s.
+# stream_throughput_outbound_megabits_per_sec: 400
+
+# Time to wait for a reply from other nodes before failing the command 
+rpc_timeout_in_ms: 10000
+
+# Enable socket timeout for streaming operation.
+# When a timeout occurs during streaming, streaming is retried from the start
+# of the current file. This *can* involve re-streaming an important amount of
+# data, so you should avoid setting the value too low.
+# Default value is 0, which never timeout streams.
+# streaming_socket_timeout_in_ms: 0
+
+# phi value that must be reached for a host to be marked down.
+# most users should never need to adjust this.
+# phi_convict_threshold: 8
+
+# endpoint_snitch -- Set this to a class that implements
+# IEndpointSnitch.  The snitch has two functions:
+# - it teaches Cassandra enough about your network topology to route
+#   requests efficiently
+# - it allows Cassandra to spread replicas around your cluster to avoid
+#   correlated failures. It does this by grouping machines into
+#   "datacenters" and "racks."  Cassandra will do its best not to have
+#   more than one replica on the same "rack" (which may not actually
+#   be a physical location)
+#
+# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
+# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
+# ARE PLACED.
+#
+# Out of the box, Cassandra provides
+#  - SimpleSnitch:
+#    Treats Strategy order as proximity. This improves cache locality
+#    when disabling read repair, which can further improve throughput.
+#    Only appropriate for single-datacenter deployments.
+#  - PropertyFileSnitch:
+#    Proximity is determined by rack and data center, which are
+#    explicitly configured in cassandra-topology.properties.
+#  - GossipingPropertyFileSnitch
+#    The rack and datacenter for the local node are defined in
+#    cassandra-rackdc.properties and propagated to other nodes via gossip.  If
+#    cassandra-topology.properties exists, it is used as a fallback, allowing
+#    migration from the PropertyFileSnitch.
+#  - RackInferringSnitch:
+#    Proximity is determined by rack and data center, which are
+#    assumed to correspond to the 3rd and 2nd octet of each node's
+#    IP address, respectively.  Unless this happens to match your
+#    deployment conventions (as it did Facebook's), this is best used
+#    as an example of writing a custom Snitch class.
+#  - Ec2Snitch:
+#    Appropriate for EC2 deployments in a single Region.  Loads Region
+#    and Availability Zone information from the EC2 API. The Region is
+#    treated as the Datacenter, and the Availability Zone as the rack.
+#    Only private IPs are used, so this will not work across multiple
+#    Regions.
+#  - Ec2MultiRegionSnitch:
+#    Uses public IPs as broadcast_address to allow cross-region
+#    connectivity.  (Thus, you should set seed addresses to the public
+#    IP as well.) You will need to open the storage_port or
+#    ssl_storage_port on the public IP firewall.  (For intra-Region
+#    traffic, Cassandra will switch to the private IP after
+#    establishing a connection.)
+#
+# You can use a custom Snitch by setting this to the full class name
+# of the snitch, which will be assumed to be on your classpath.
+endpoint_snitch: SimpleSnitch
+
+# controls how often to perform the more expensive part of host score
+# calculation
+dynamic_snitch_update_interval_in_ms: 100 
+# controls how often to reset all host scores, allowing a bad host to
+# possibly recover
+dynamic_snitch_reset_interval_in_ms: 600000
+# if set greater than zero and read_repair_chance is < 1.0, this will allow
+# 'pinning' of replicas to hosts in order to increase cache capacity.
+# The badness threshold will control how much worse the pinned host has to be
+# before the dynamic snitch will prefer other replicas over it.  This is
+# expressed as a double which represents a percentage.  Thus, a value of
+# 0.2 means Cassandra would continue to prefer the static snitch values
+# until the pinned host was 20% worse than the fastest.
+dynamic_snitch_badness_threshold: 0.1
+
+# request_scheduler -- Set this to a class that implements
+# RequestScheduler, which will schedule incoming client requests
+# according to the specific policy. This is useful for multi-tenancy
+# with a single Cassandra cluster.
+# NOTE: This is specifically for requests from the client and does
+# not affect inter node communication.
+# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
+# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
+# client requests to a node with a separate queue for each
+# request_scheduler_id. The scheduler is further customized by
+# request_scheduler_options as described below.
+request_scheduler: org.apache.cassandra.scheduler.NoScheduler
+
+# Scheduler Options vary based on the type of scheduler
+# NoScheduler - Has no options
+# RoundRobin
+#  - throttle_limit -- The throttle_limit is the number of in-flight
+#                      requests per client.  Requests beyond 
+#                      that limit are queued up until
+#                      running requests can complete.
+#                      The value of 80 here is twice the number of
+#                      concurrent_reads + concurrent_writes.
+#  - default_weight -- default_weight is optional and allows for
+#                      overriding the default which is 1.
+#  - weights -- Weights are optional and will default to 1 or the
+#               overridden default_weight. The weight translates into how
+#               many requests are handled during each turn of the
+#               RoundRobin, based on the scheduler id.
+#
+# request_scheduler_options:
+#    throttle_limit: 80
+#    default_weight: 5
+#    weights:
+#      Keyspace1: 1
+#      Keyspace2: 5
+
+# request_scheduler_id -- An identifer based on which to perform
+# the request scheduling. Currently the only valid option is keyspace.
+# request_scheduler_id: keyspace
+
+# index_interval controls the sampling of entries from the primrary
+# row index in terms of space versus time.  The larger the interval,
+# the smaller and less effective the sampling will be.  In technicial
+# terms, the interval coresponds to the number of index entries that
+# are skipped between taking each sample.  All the sampled entries
+# must fit in memory.  Generally, a value between 128 and 512 here
+# coupled with a large key cache size on CFs results in the best trade
+# offs.  This value is not often changed, however if you have many
+# very small rows (many to an OS page), then increasing this will
+# often lower memory usage without a impact on performance.
+index_interval: 128
+
+# Enable or disable inter-node encryption
+# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
+# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
+# suite for authentication, key exchange and encryption of the actual data transfers.
+# NOTE: No custom encryption options are enabled at the moment
+# The available internode options are : all, none, dc, rack
+#
+# If set to dc cassandra will encrypt the traffic between the DCs
+# If set to rack cassandra will encrypt the traffic between the racks
+#
+# The passwords used in these options must match the passwords used when generating
+# the keystore and truststore.  For instructions on generating these files, see:
+# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
+#
+encryption_options:
+    internode_encryption: none
+    keystore: conf/.keystore
+    keystore_password: cassandra
+    truststore: conf/.truststore
+    truststore_password: cassandra
+    # More advanced defaults below:
+    # protocol: TLS
+    # algorithm: SunX509
+    # store_type: JKS
+    # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
diff --git a/cluster-mgmt/template/hosts b/cluster-mgmt/template/hosts
new file mode 100644
index 0000000..220b30f
--- /dev/null
+++ b/cluster-mgmt/template/hosts
@@ -0,0 +1,11 @@
+127.0.0.1 localhost
+
+# The following lines are desirable for IPv6 capable hosts
+::1 ip6-localhost ip6-loopback
+fe00::0 ip6-localnet
+ff00::0 ip6-mcastprefix
+ff02::1 ip6-allnodes
+ff02::2 ip6-allrouters
+ff02::3 ip6-allhosts
+
+## For ONOS Development
diff --git a/cluster-mgmt/template/onsdemo_core.py b/cluster-mgmt/template/onsdemo_core.py
new file mode 100755
index 0000000..e62ae77
--- /dev/null
+++ b/cluster-mgmt/template/onsdemo_core.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+
+"""
+Start up a Simple topology
+"""
+from mininet.net import Mininet
+from mininet.node import Controller, RemoteController
+from mininet.log import setLogLevel, info, error, warn, debug
+from mininet.cli import CLI
+from mininet.topo import Topo
+from mininet.util import quietRun
+from mininet.moduledeps import pathCheck
+from mininet.link import Link, TCLink
+
+from sys import exit
+import os.path
+from subprocess import Popen, STDOUT, PIPE
+
+import sys
+
+
+#import argparse
+
+class MyController( Controller ):
+    def __init__( self, name, ip='127.0.0.1', port=6633, **kwargs):
+        """Init.
+           name: name to give controller
+           ip: the IP address where the remote controller is
+           listening
+           port: the port where the remote controller is listening"""
+        Controller.__init__( self, name, ip=ip, port=port, **kwargs )
+
+    def start( self ):
+        "Overridden to do nothing."
+        return
+
+    def stop( self ):
+        "Overridden to do nothing."
+        return
+
+    def checkListening( self ):
+        "Warn if remote controller is not accessible"
+        listening = self.cmd( "echo A | telnet -e A %s %d" %
+                              ( self.ip, self.port ) )
+        if 'Unable' in listening:
+            warn( "Unable to contact the remote controller"
+                  " at %s:%d\n" % ( self.ip, self.port ) )
+
+class SDNTopo( Topo ):
+    "SDN Topology"
+
+    def __init__( self, *args, **kwargs ):
+        Topo.__init__( self, *args, **kwargs )
+        sw1 = self.addSwitch('sw1', dpid='00:00:00:16:97:08:9a:46')
+        sw2 = self.addSwitch('sw2', dpid='00:00:00:00:ba:5e:ba:11')
+        sw3 = self.addSwitch('sw3', dpid='00:00:00:08:a2:08:f9:01')
+        sw4 = self.addSwitch('sw4', dpid='00:00:00:00:00:00:ba:12')
+        sw5 = self.addSwitch('sw5', dpid='00:00:00:00:ba:5e:ba:13')
+        sw6 = self.addSwitch('sw6', dpid='00:00:20:4e:7f:51:8a:35')
+
+        host1 = self.addHost( 'host1' )
+        host2 = self.addHost( 'host2' )
+        host3 = self.addHost( 'host3' )
+        host4 = self.addHost( 'host4' )
+        host5 = self.addHost( 'host5' )
+        host6 = self.addHost( 'host6' )
+
+        root1 = self.addHost( 'root1', inNamespace=False )
+        root2 = self.addHost( 'root2', inNamespace=False )
+        root3 = self.addHost( 'root3', inNamespace=False )
+        root4 = self.addHost( 'root4', inNamespace=False )
+        root5 = self.addHost( 'root5', inNamespace=False )
+        root6 = self.addHost( 'root6', inNamespace=False )
+
+
+        self.addLink( host1, sw1 )
+        self.addLink( host2, sw2 )
+        self.addLink( host3, sw3 )
+        self.addLink( host4, sw4 )
+        self.addLink( host5, sw5 )
+        self.addLink( host6, sw6 )
+
+
+        self.addLink( sw1, sw2 )
+        self.addLink( sw1, sw6 )
+        self.addLink( sw2, sw3 )
+        self.addLink( sw3, sw4 )
+        self.addLink( sw3, sw6 )
+        self.addLink( sw4, sw5 )
+        self.addLink( sw5, sw6 )
+
+        self.addLink( root1, host1 )
+        self.addLink( root2, host2 )
+        self.addLink( root3, host3 )
+        self.addLink( root4, host4 )
+        self.addLink( root5, host5 )
+        self.addLink( root6, host6 )
+
+def startsshd( host ):
+    "Start sshd on host"
+    info( '*** Starting sshd\n' )
+    name, intf, ip = host.name, host.defaultIntf(), host.IP()
+    banner = '/tmp/%s.banner' % name
+    host.cmd( 'echo "Welcome to %s at %s" >  %s' % ( name, ip, banner ) )
+    host.cmd( '/usr/sbin/sshd -o "Banner %s"' % banner, '-o "UseDNS no"' )
+    info( '***', host.name, 'is running sshd on', intf, 'at', ip, '\n' )
+
+def startsshds ( hosts ):
+    for h in hosts:
+        startsshd( h )
+
+def stopsshd( ):
+    "Stop *all* sshd processes with a custom banner"
+    info( '*** Shutting down stale sshd/Banner processes ',
+          quietRun( "pkill -9 -f Banner" ), '\n' )
+
+def sdnnet(opt):
+#    os.system('/home/ubuntu/openflow/controller/controller ptcp: &')
+#    os.system('/home/ubuntu/openflow/controller/controller ptcp:7000 &')
+
+    topo = SDNTopo()
+    info( '*** Creating network\n' )
+#    net = Mininet( topo=topo, controller=RemoteController )
+    net = Mininet( topo=topo, controller=MyController, link=TCLink)
+#    dc = DebugController('c3', ip='127.0.0.1', port=7000)
+#    net.addController(dc)
+#    net.addController(controller=RemoteController)
+
+    host1, host2, host3, host4, host5, host6 = net.get( 'host1', 'host2', 'host3', 'host4', 'host5', 'host6')
+
+    ## Adding 2nd, 3rd and 4th interface to host1 connected to sw1 (for another BGP peering)
+    sw1 = net.get('sw1')
+    sw2 = net.get('sw2')
+    sw3 = net.get('sw3')
+    sw4 = net.get('sw4')
+    sw5 = net.get('sw5')
+    sw6 = net.get('sw6')
+
+    net.start()
+
+    sw2.attach('tap01_2')
+    sw3.attach('tap01_3')
+    sw4.attach('tap01_4')
+    sw4.attach('tap01_5')
+    sw5.attach('tap01_6')
+    sw6.attach('tap01_7')
+    sw1.attach('tap01_8')
+
+    host1.defaultIntf().setIP('192.168.100.141/16') 
+    host2.defaultIntf().setIP('192.168.100.142/16')
+    host3.defaultIntf().setIP('192.168.100.143/16')
+    host4.defaultIntf().setIP('192.168.100.144/16')
+    host5.defaultIntf().setIP('192.168.100.145/16')
+    host6.defaultIntf().setIP('192.168.100.146/16')
+
+    root1, root2, root3, root4, root5, root6  = net.get( 'root1', 'root2', 'root3', 'root4', 'root5', 'root6' )
+    host1.intf('host1-eth1').setIP('1.1.1.1/24')
+    root1.intf('root1-eth0').setIP('1.1.1.2/24')
+
+    host2.intf('host2-eth1') .setIP('1.1.2.1/24')
+    root2.intf('root2-eth0').setIP('1.1.2.2/24')
+
+    host3.intf('host3-eth1') .setIP('1.1.3.1/24')
+    root3.intf('root3-eth0').setIP('1.1.3.2/24')
+
+    host4.intf('host4-eth1') .setIP('1.1.4.1/24')
+    root4.intf('root4-eth0').setIP('1.1.4.2/24')
+
+    host5.intf('host5-eth1') .setIP('1.1.5.1/24')
+    root5.intf('root5-eth0').setIP('1.1.5.2/24')
+
+    host6.intf('host6-eth1') .setIP('1.1.6.1/24')
+    root6.intf('root6-eth0').setIP('1.1.6.2/24')
+
+    hosts = [ host1, host2, host3, host4, host5, host6 ]
+    stopsshd ()
+    startsshds ( hosts )
+
+    if opt=="cli":
+        CLI(net)
+        stopsshd()
+        net.stop()
+
+if __name__ == '__main__':
+    setLogLevel( 'info' )
+    if len(sys.argv) == 1:
+      sdnnet("cli")
+    elif len(sys.argv) == 2 and sys.argv[1] == "-n":
+      sdnnet("nocli")
+    else:
+      print "%s [-n]" % sys.argv[0]
diff --git a/cluster-mgmt/template/onsdemo_edge_template.py b/cluster-mgmt/template/onsdemo_edge_template.py
new file mode 100755
index 0000000..a1ac11c
--- /dev/null
+++ b/cluster-mgmt/template/onsdemo_edge_template.py
@@ -0,0 +1,155 @@
+#!/usr/bin/python
+NWID=__NWID__
+NR_NODES=__NRSW__
+Controllers=[{"ip":"127.0.0.1", "port":6633}]
+
+"""
+Start up a Simple topology
+"""
+from mininet.net import Mininet
+from mininet.node import Controller, RemoteController
+from mininet.log import setLogLevel, info, error, warn, debug
+from mininet.cli import CLI
+from mininet.topo import Topo
+from mininet.util import quietRun
+from mininet.moduledeps import pathCheck
+from mininet.link import Link, TCLink
+
+from sys import exit
+import os.path
+from subprocess import Popen, STDOUT, PIPE
+
+import sys
+
+#import argparse
+
+class MyController( Controller ):
+    def __init__( self, name, ip='127.0.0.1', port=6633, **kwargs):
+        """Init.
+           name: name to give controller
+           ip: the IP address where the remote controller is
+           listening
+           port: the port where the remote controller is listening"""
+        Controller.__init__( self, name, ip=ip, port=port, **kwargs )
+
+    def start( self ):
+        "Overridden to do nothing."
+        return
+
+    def stop( self ):
+        "Overridden to do nothing."
+        return
+
+    def checkListening( self ):
+        "Warn if remote controller is not accessible"
+        listening = self.cmd( "echo A | telnet -e A %s %d" %
+                              ( self.ip, self.port ) )
+        if 'Unable' in listening:
+            warn( "Unable to contact the remote controller"
+                  " at %s:%d\n" % ( self.ip, self.port ) )
+
+class SDNTopo( Topo ):
+    "SDN Topology"
+
+    def __init__( self, *args, **kwargs ):
+        Topo.__init__( self, *args, **kwargs )
+
+        switch = []
+        host = []
+        root = []
+
+        for i in range (NR_NODES):
+            name_suffix = '%02d' % NWID + "." + '%02d' % (int(i)+1)
+            dpid_suffix = '%02x' % NWID + '%02x' % (int(i)+1)
+            dpid = '0000' + '0000' + '0000' + dpid_suffix
+            sw = self.addSwitch('sw'+name_suffix, dpid=dpid)
+            switch.append(sw)
+
+        for i in range (NR_NODES):
+            host.append(self.addHost( 'host%d' % (int(i)+1) ))
+            root.append(self.addHost( 'root%d' % (int(i)+1), inNamespace=False ))
+
+        for i in range (NR_NODES):
+            self.addLink(host[i], switch[i])
+
+        for i in range (1, NR_NODES):
+            self.addLink(switch[0], switch[i])
+
+        for i in range (NR_NODES):
+            self.addLink(root[i], host[i])
+
+def startsshd( host ):
+    "Start sshd on host"
+    info( '*** Starting sshd\n' )
+    name, intf, ip = host.name, host.defaultIntf(), host.IP()
+    banner = '/tmp/%s.banner' % name
+    host.cmd( 'echo "Welcome to %s at %s" >  %s' % ( name, ip, banner ) )
+    host.cmd( '/usr/sbin/sshd -o "Banner %s"' % banner, '-o "UseDNS no"' )
+    info( '***', host.name, 'is running sshd on', intf, 'at', ip, '\n' )
+
+def startsshds ( hosts ):
+    for h in hosts:
+        startsshd( h )
+
+def stopsshd( ):
+    "Stop *all* sshd processes with a custom banner"
+    info( '*** Shutting down stale sshd/Banner processes ',
+          quietRun( "pkill -9 -f Banner" ), '\n' )
+
+def sdnnet(opt):
+    topo = SDNTopo()
+    info( '*** Creating network\n' )
+    net = Mininet( topo=topo, controller=MyController, link=TCLink)
+    #net = Mininet( topo=topo, link=TCLink, build=False)
+    #controllers=[]
+    #for c in Controllers:
+    #  rc = RemoteController('c%d' % Controllers.index(c), ip=c['ip'],port=c['port'])
+    #  print "controller ip %s port %s" % (c['ip'], c['port'])
+    #  controllers.append(rc)
+
+    #net.controllers=controllers
+    net.build()
+
+    host = []
+    for i in range (NR_NODES):
+      host.append(net.get( 'host%d' % (int(i)+1) ))
+
+    net.start()
+
+    sw=net.get('sw%02x.%02x' % (NWID,1))
+    print "center sw", sw
+    sw.attach('tap%02x_1' % NWID)
+
+    for i in range (NR_NODES):
+        host[i].defaultIntf().setIP('192.168.%d.%d/16' % (NWID,(int(i)+1))) 
+        host[i].defaultIntf().setMAC('00:00:%02x:%02x:%02x:%02x' % (192,168,NWID,(int(i)+1))) 
+
+    for i in range (NR_NODES):
+       for n in range (1,8):
+         for h in range (25):
+           host[i].setARP('192.168.%d.%d' % (n, (int(h)+1)), '00:00:%02x:%02x:%02x:%02x' % (192,168,n,(int(h)+1))) 
+
+    root = []
+    for i in range (NR_NODES):
+        root.append(net.get( 'root%d' % (int(i)+1) ))
+
+    for i in range (NR_NODES):
+        host[i].intf('host%d-eth1' % (int(i)+1)).setIP('1.1.%d.1/24' % (int(i)+1))
+        root[i].intf('root%d-eth0' % (int(i)+1)).setIP('1.1.%d.2/24' % (int(i)+1))
+
+    stopsshd ()
+    startsshds ( host )
+
+    if opt=="cli":
+        CLI(net)
+        stopsshd()
+        net.stop()
+
+if __name__ == '__main__':
+    setLogLevel( 'info' )
+    if len(sys.argv) == 1:
+      sdnnet("cli")
+    elif len(sys.argv) == 2 and sys.argv[1] == "-n":
+      sdnnet("nocli")
+    else:
+      print "%s [-n]" % sys.argv[0]
diff --git a/cluster-mgmt/template/tunnel_onsdemo_core_template.sh b/cluster-mgmt/template/tunnel_onsdemo_core_template.sh
new file mode 100755
index 0000000..d697c6c
--- /dev/null
+++ b/cluster-mgmt/template/tunnel_onsdemo_core_template.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+TUNNEL=( "2 2 __basename__2" "3 3 __basename__3" "4 4 __basename__4" "5 5 __basename__5" "6 6 __basename__6" "7 7 __basename__7" "8 8 __basename__8")
+NW_ID=01
+
+start () {
+  ## Modify ##
+  ulimit -c
+  for (( i = 0; i< ${#TUNNEL[@]}; i ++)); do
+    t=`echo ${TUNNEL[$i]}`
+    ifnr=`echo $t | awk '{print $1}'`
+    tun_tag=`echo $t | awk '{print $2}'`
+    tun_end_=`echo $t | awk '{print $3}'`
+    tun_end=`eval echo '$'$tun_end_`
+    ifconfig tap${NW_ID}_${ifnr}
+    echo "ifconfig tap${NW_ID}_${ifnr}"
+    if [ $? -ne 0 ]; then
+      echo "creating tap${NW_ID}_${ifnr}"
+      sudo tunctl -t tap${NW_ID}_${ifnr} 
+    fi
+    echo "./capsulator -v -d -t eth0 -f ${tun_end} -vb tap${NW_ID}_${ifnr}#${tun_tag}"
+    sudo ifconfig tap${NW_ID}_${ifnr} 0.0.0.0 up > /dev/null 2>&1
+    sudo ./capsulator -v -d -t eth0 -f ${tun_end} -vb tap${NW_ID}_${ifnr}#${tun_tag}  > /dev/null 2>&1 &
+  done
+}
+
+stop () {
+  sudo pkill capsulator
+}
+
+case "$1" in
+  start | restart)
+    stop
+    start
+    ;;
+  stop)
+    stop
+    ;;
+  status)
+    nr=`pgrep capsulator | wc -l`
+    if [ $nr -gt 0 ]; then
+      echo "$nr tunnel(s) is running"
+    else
+      echo "tunnel is not running"
+    fi
+    ;;
+  *)
+    echo "Usage: $0 {start|stop|restart|status}" >&2
+    exit 1
+    ;;
+esac
diff --git a/cluster-mgmt/template/tunnel_onsdemo_edge_template.sh b/cluster-mgmt/template/tunnel_onsdemo_edge_template.sh
new file mode 100755
index 0000000..bbd7274
--- /dev/null
+++ b/cluster-mgmt/template/tunnel_onsdemo_edge_template.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+#TUNNEL=("0 1 VIP_ONOS10" "1 2 VIP_ONOS10") 
+__TUNNEL__
+NW_ID=0__NWID__
+
+start () {
+  ## Modify ##
+  ulimit -c
+  for (( i = 0; i< ${#TUNNEL[@]}; i ++)); do
+    t=`echo ${TUNNEL[$i]}`
+    ifnr=`echo $t | awk '{print $1}'`
+    tun_tag=`echo $t | awk '{print $2}'`
+    tun_end_=`echo $t | awk '{print $3}'`
+    tun_end=`eval echo '$'$tun_end_`
+    ifconfig tap${NW_ID}_${ifnr}
+    echo "ifconfig tap${NW_ID}_${ifnr}"
+    if [ $? -ne 0 ]; then
+      echo "creating tap${NW_ID}_${ifnr}"
+      sudo tunctl -t tap${NW_ID}_${ifnr} 
+    fi
+    echo "./capsulator -v -d -t eth0 -f ${tun_end} -vb tap${NW_ID}_${ifnr}#${tun_tag}"
+    sudo ifconfig tap${NW_ID}_${ifnr} 0.0.0.0 up > /dev/null 2>&1
+    sudo ./capsulator -v -d -t eth0 -f ${tun_end} -vb tap${NW_ID}_${ifnr}#${tun_tag}  > /dev/null 2>&1 &
+  done
+}
+
+stop () {
+  sudo pkill capsulator
+}
+
+case "$1" in
+  start | restart)
+    stop
+    start
+    ;;
+  stop)
+    stop
+    ;;
+  status)
+    nr=`pgrep capsulator | wc -l`
+    if [ $nr -gt 0 ]; then
+      echo "$nr tunnel(s) is running"
+    else
+      echo "tunnel is not running"
+    fi
+    ;;
+  *)
+    echo "Usage: $0 {start|stop|restart|status}" >&2
+    exit 1
+    ;;
+esac
diff --git a/cluster-mgmt/template/zoo.cfg b/cluster-mgmt/template/zoo.cfg
new file mode 100644
index 0000000..e1ab8c3
--- /dev/null
+++ b/cluster-mgmt/template/zoo.cfg
@@ -0,0 +1,41 @@
+# The number of milliseconds of each tick
+tickTime=2000
+# The number of ticks that the initial 
+# synchronization phase can take
+initLimit=10
+# The number of ticks that can pass between 
+# sending a request and getting an acknowledgement
+syncLimit=5
+# the directory where the snapshot is stored.
+# do not use /tmp for storage, /tmp here is just 
+# example sakes.
+dataDir=/var/lib/zookeeper
+# the port at which the clients will connect
+clientPort=2181
+#
+# specify all servers in the Zookeeper ensemble
+
+#server.1=onosgui1:2888:3888
+#server.2=onosgui2:2888:3888
+#server.3=onosgui3:2888:3888
+#server.4=onosgui4:2888:3888
+#server.5=onosgui5:2888:3888
+#server.6=onosgui6:2888:3888
+#server.7=onosgui7:2888:3888
+#server.8=onosgui8:2888:3888
+#
+#
+# Be sure to read the maintenance section of the 
+# administrator guide before turning on autopurge.
+#
+#
+# Be sure to read the maintenance section of the 
+# administrator guide before turning on autopurge.
+#
+# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
+#
+# The number of snapshots to retain in dataDir
+#autopurge.snapRetainCount=3
+# Purge task interval in hours
+# Set to "0" to disable auto purge feature
+#autopurge.purgeInterval=1