Merge branch 'master' of https://github.com/OPENNETWORKINGLAB/ONOS
diff --git a/cleanup-cassandra.sh b/cleanup-cassandra.sh
new file mode 100755
index 0000000..2844c1a
--- /dev/null
+++ b/cleanup-cassandra.sh
@@ -0,0 +1,3 @@
+#! /bin/bash
+DIR=~/ONOS
+~/titan-0.2.0/bin/gremlin.sh -e $DIR/cleanup-onos-db 
diff --git a/cleanup-onos-db b/cleanup-onos-db
new file mode 100644
index 0000000..8949fea
--- /dev/null
+++ b/cleanup-onos-db
@@ -0,0 +1,6 @@
+g=TitanFactory.open('/tmp/cassandra.titan')
+g.V('type','port').each{g.removeVertex(it)}
+g.V('type','switch').each{g.removeVertex(it)}
+g.V('type','flow').each{g.removeVertex(it)}
+g.V('type','flow_entry').each{g.removeVertex(it)}
+g.stopTransaction(SUCCESS) 
diff --git a/src/main/java/net/floodlightcontroller/flowcache/FlowManager.java b/src/main/java/net/floodlightcontroller/flowcache/FlowManager.java
index 3f74ff1..d55979e 100644
--- a/src/main/java/net/floodlightcontroller/flowcache/FlowManager.java
+++ b/src/main/java/net/floodlightcontroller/flowcache/FlowManager.java
@@ -101,12 +101,12 @@
 		for (IFlowEntry flowEntryObj : allFlowEntries) {
 		    FlowEntryId flowEntryId =
 			new FlowEntryId(flowEntryObj.getFlowEntryId());
-		    String userState = flowEntryObj.getUserState();
-		    String switchState = flowEntryObj.getSwitchState();
+		    String userState = "User State: " + flowEntryObj.getUserState();
+		    String switchState = "Switch State: " + flowEntryObj.getSwitchState();
 
-		    log.debug("Found Flow Entry {}: ", flowEntryId.toString());
-		    log.debug("User State {}:", userState);
-		    log.debug("Switch State {}:", switchState);
+		    log.debug("Found Flow Entry {}: {}",
+			      flowEntryId.toString(),
+			      userState + " " + switchState);
 
 		    if (! switchState.equals("FE_SWITCH_NOT_UPDATED")) {
 			// Ignore the entry: nothing to do
@@ -116,7 +116,7 @@
 		    Dpid dpid = new Dpid(flowEntryObj.getSwitchDpid());
 		    IOFSwitch mySwitch = mySwitches.get(dpid.value());
 		    if (mySwitch == null) {
-			log.debug("Flow Entry ignored: not my switch");
+			log.debug("Flow Entry ignored: not my switch (FlowEntryId = {} DPID = {})", flowEntryId.toString(), dpid.toString());
 			continue;
 		    }
 		    myFlowEntries.put(flowEntryId.value(), flowEntryObj);
@@ -371,8 +371,10 @@
 	    log.error(":addFlow FlowId:{} failed",
 		      flowPath.flowId().toString());
 	}
-	if (flowObj == null)
+	if (flowObj == null) {
+	    conn.endTx(Transaction.COMMIT);
 	    return false;
+	}
 
 	//
 	// Set the Flow key:
@@ -420,8 +422,10 @@
 		log.error(":addFlow FlowEntryId:{} failed",
 			  flowEntry.flowEntryId().toString());
 	    }
-	    if (flowEntryObj == null)
+	    if (flowEntryObj == null) {
+		conn.endTx(Transaction.COMMIT);
 		return false;
+	    }
 
 	    //
 	    // Set the Flow Entry key:
@@ -490,6 +494,7 @@
 	// TODO: We need a proper Flow ID allocation mechanism.
 	//
 	flowId.setValue(flowPath.flowId().value());
+
 	return true;
     }
 
@@ -521,8 +526,10 @@
 	    conn.endTx(Transaction.ROLLBACK);
 	    log.error(":deleteFlow FlowId:{} failed", flowId.toString());
 	}
-	if (flowObj == null)
+	if (flowObj == null) {
+	    conn.endTx(Transaction.COMMIT);
 	    return true;		// OK: No such flow
+	}
 
 	//
 	// Find and mark for deletion all Flow Entries
@@ -567,8 +574,10 @@
 	    conn.endTx(Transaction.ROLLBACK);
 	    log.error(":clearFlow FlowId:{} failed", flowId.toString());
 	}
-	if (flowObj == null)
+	if (flowObj == null) {
+	    conn.endTx(Transaction.COMMIT);
 	    return true;		// OK: No such flow
+	}
 
 	//
 	// Remove all Flow Entries
@@ -608,8 +617,10 @@
 	    conn.endTx(Transaction.ROLLBACK);
 	    log.error(":getFlow FlowId:{} failed", flowId.toString());
 	}
-	if (flowObj == null)
+	if (flowObj == null) {
+	    conn.endTx(Transaction.COMMIT);
 	    return null;		// Flow not found
+	}
 
 	//
 	// Extract the Flow state
@@ -741,8 +752,10 @@
 	    conn.endTx(Transaction.ROLLBACK);
 	    log.error(":getAllFlowPaths failed");
 	}
-	if ((flowPathsObj == null) || (flowPathsObj.iterator().hasNext() == false))
+	if ((flowPathsObj == null) || (flowPathsObj.iterator().hasNext() == false)) {
+	    conn.endTx(Transaction.COMMIT);
 	    return null;	// No Flows found
+	}
 
 	ArrayList<FlowPath> flowPaths = new ArrayList<FlowPath>();
 	for (IFlowPath flowObj : flowPathsObj) {
diff --git a/src/main/java/net/floodlightcontroller/onoslistener/OnosPublisher.java b/src/main/java/net/floodlightcontroller/onoslistener/OnosPublisher.java
index a6931e6..c6fe108 100644
--- a/src/main/java/net/floodlightcontroller/onoslistener/OnosPublisher.java
+++ b/src/main/java/net/floodlightcontroller/onoslistener/OnosPublisher.java
@@ -45,6 +45,7 @@
 	protected IControllerRegistryService registryService;
 	
 	protected static final String DBConfigFile = "dbconf";
+	protected static final String CleanupEnabled = "EnableCleanup";
 	protected IThreadPoolService threadPool;
 	
 	protected final int CLEANUP_TASK_INTERVAL = 999; // 999 ms
@@ -217,11 +218,16 @@
 	@Override
 	public void startUp(FloodlightModuleContext context) {
 		// TODO Auto-generated method stub
-		ScheduledExecutorService ses = threadPool.getScheduledExecutor();
+		Map<String, String> configMap = context.getConfigParams(this);
+		String cleanupNeeded = configMap.get(CleanupEnabled);
+
 		deviceService.addListener(this);
 	       // Setup the Cleanup task. 
-        cleanupTask = new SingletonTask(ses, new SwitchCleanup());
-        cleanupTask.reschedule(CLEANUP_TASK_INTERVAL, TimeUnit.MILLISECONDS);
+		if (cleanupNeeded != null &&cleanupNeeded.equals("True")) {
+				ScheduledExecutorService ses = threadPool.getScheduledExecutor();
+				cleanupTask = new SingletonTask(ses, new SwitchCleanup());
+				cleanupTask.reschedule(CLEANUP_TASK_INTERVAL, TimeUnit.MILLISECONDS);
+		}
 	}
 
 }
diff --git a/test-network/mininet/net.sprint5.template.py b/test-network/mininet/net.sprint5.template.py
new file mode 100755
index 0000000..52e3184
--- /dev/null
+++ b/test-network/mininet/net.sprint5.template.py
@@ -0,0 +1,160 @@
+#!/usr/bin/python
+
+NWID=3
+NR_NODES=20
+#Controllers=[{"ip":'127.0.0.1', "port":6633}, {"ip":'10.0.1.28', "port":6633}]
+Controllers=[{"ip":'10.0.1.223', "port":6633}]
+
+"""
+Start up a Simple topology
+"""
+from mininet.net import Mininet
+from mininet.node import Controller, RemoteController
+from mininet.log import setLogLevel, info, error, warn, debug
+from mininet.cli import CLI
+from mininet.topo import Topo
+from mininet.util import quietRun
+from mininet.moduledeps import pathCheck
+from mininet.link import Link, TCLink
+
+from sys import exit
+import os.path
+from subprocess import Popen, STDOUT, PIPE
+
+import sys
+
+#import argparse
+
+class MyController( Controller ):
+    def __init__( self, name, ip='127.0.0.1', port=6633, **kwargs):
+        """Init.
+           name: name to give controller
+           ip: the IP address where the remote controller is
+           listening
+           port: the port where the remote controller is listening"""
+        Controller.__init__( self, name, ip=ip, port=port, **kwargs )
+
+    def start( self ):
+        "Overridden to do nothing."
+        return
+
+    def stop( self ):
+        "Overridden to do nothing."
+        return
+
+    def checkListening( self ):
+        "Warn if remote controller is not accessible"
+        listening = self.cmd( "echo A | telnet -e A %s %d" %
+                              ( self.ip, self.port ) )
+        if 'Unable' in listening:
+            warn( "Unable to contact the remote controller"
+                  " at %s:%d\n" % ( self.ip, self.port ) )
+
+class SDNTopo( Topo ):
+    "SDN Topology"
+
+    def __init__( self, *args, **kwargs ):
+        Topo.__init__( self, *args, **kwargs )
+
+        switch = []
+        host = []
+        root = []
+
+        for i in range (NR_NODES):
+            name_suffix = '%02d' % NWID + "." + '%02d' % i
+            dpid_suffix = '%02x' % NWID + '%02x' % i
+            dpid = '0000' + '0000' + '0000' + dpid_suffix
+            sw = self.addSwitch('sw'+name_suffix, dpid=dpid)
+            switch.append(sw)
+
+        for i in range (NR_NODES):
+            host.append(self.addHost( 'host%d' % i ))
+
+        for i in range (NR_NODES):
+            root.append(self.addHost( 'root%d' % i, inNamespace=False ))
+
+        for i in range (NR_NODES):
+            self.addLink(host[i], switch[i])
+
+        for i in range (1, NR_NODES):
+            self.addLink(switch[0], switch[i])
+
+        for i in range (NR_NODES):
+            self.addLink(root[i], host[i])
+
+def startsshd( host ):
+    "Start sshd on host"
+    info( '*** Starting sshd\n' )
+    name, intf, ip = host.name, host.defaultIntf(), host.IP()
+    banner = '/tmp/%s.banner' % name
+    host.cmd( 'echo "Welcome to %s at %s" >  %s' % ( name, ip, banner ) )
+    host.cmd( '/usr/sbin/sshd -o "Banner %s"' % banner, '-o "UseDNS no"' )
+    info( '***', host.name, 'is running sshd on', intf, 'at', ip, '\n' )
+
+def startsshds ( hosts ):
+    for h in hosts:
+        startsshd( h )
+
+def stopsshd( ):
+    "Stop *all* sshd processes with a custom banner"
+    info( '*** Shutting down stale sshd/Banner processes ',
+          quietRun( "pkill -9 -f Banner" ), '\n' )
+
+def sdnnet(opt):
+    topo = SDNTopo()
+    info( '*** Creating network\n' )
+    #net = Mininet( topo=topo, controller=MyController, link=TCLink)
+    net = Mininet( topo=topo, link=TCLink, build=False)
+    controllers=[]
+    for c in Controllers:
+      rc = RemoteController('c%d' % Controllers.index(c), ip=c['ip'],port=c['port'])
+      print "controller ip %s port %s" % (c['ip'], c['port'])
+      controllers.append(rc)
+  
+    net.controllers=controllers
+    net.build()
+
+    host = []
+    for i in range (NR_NODES):
+      host.append(net.get( 'host%d' % i ))
+
+    net.start()
+
+    sw=net.get('sw03.00')
+    print "center sw", sw
+    sw.attach('tapc0')
+
+    for i in range (NR_NODES):
+        host[i].defaultIntf().setIP('192.168.%d.%d/16' % (NWID,i)) 
+        host[i].defaultIntf().setMAC('00:00:00:00:%02x:%02x' % (NWID,i)) 
+
+    for i in range (NR_NODES):
+       for n in range (3):
+         for h in range (10):
+           host[i].setARP('192.168.%d.%d' % (n, h), '00:00:00:00:%02x:%02x' % (n,h)) 
+
+
+    root = []
+    for i in range (NR_NODES):
+        root.append(net.get( 'root%d' % i ))
+
+    for i in range (NR_NODES):
+        host[i].intf('host%d-eth1' % i).setIP('1.1.%d.1/24' % i)
+        root[i].intf('root%d-eth0' % i).setIP('1.1.%d.2/24' % i)
+
+    stopsshd ()
+    startsshds ( host )
+
+    if opt=="cli":
+        CLI(net)
+        stopsshd()
+        net.stop()
+
+if __name__ == '__main__':
+    setLogLevel( 'info' )
+    if len(sys.argv) == 1:
+      sdnnet("cli")
+    elif len(sys.argv) == 2 and sys.argv[1] == "-n":
+      sdnnet("nocli")
+    else:
+      print "%s [-n]" % sys.argv[0]
diff --git a/web/add_flow.py b/web/add_flow.py
index 56b305a..0440c39 100755
--- a/web/add_flow.py
+++ b/web/add_flow.py
@@ -21,6 +21,7 @@
 ControllerIP = "127.0.0.1"
 ControllerPort = 8080
 MonitoringEnabled = False
+ReadFromFile = ""
 
 DEBUG=0
 pp = pprint.PrettyPrinter(indent=4)
@@ -49,31 +50,33 @@
     result = os.popen(command).read()
     debug("result %s" % result)
     if len(result) == 0:
-	log_error("No Path found")
-	return parsedResult
-
-    parsedResult = json.loads(result)
-    debug("parsed %s" % parsedResult)
+      log_error("No Path found from %s/%s to %s/%s" % (v1, p1, v2, p2))
+    else:
+      parsedResult = json.loads(result)
+      debug("parsed %s" % parsedResult)
 
   except:
-    log_error("Controller IF has issue")
-    exit(1)
+    log_error("Controller IF has issue: No Path found from %s/%s to %s/%s" % (v1, p1, v2, p2))
 
-  srcSwitch = parsedResult['srcPort']['dpid']['value'];
-  srcPort = parsedResult['srcPort']['port']['value'];
-  dstSwitch = parsedResult['dstPort']['dpid']['value'];
-  dstPort = parsedResult['dstPort']['port']['value'];
+  return parsedResult
+
+def print_data_path(data_path):
+  if len(data_path) == 0:
+    return
+
+  srcSwitch = data_path['srcPort']['dpid']['value'];
+  srcPort = data_path['srcPort']['port']['value'];
+  dstSwitch = data_path['dstPort']['dpid']['value'];
+  dstPort = data_path['dstPort']['port']['value'];
 
   print "DataPath: (src = %s/%s dst = %s/%s)" % (srcSwitch, srcPort, dstSwitch, dstPort);
 
-  for f in parsedResult['flowEntries']:
+  for f in data_path['flowEntries']:
     inPort = f['inPort']['value'];
     outPort = f['outPort']['value'];
     dpid = f['dpid']['value']
     print "  FlowEntry: (%s, %s, %s)" % (inPort, dpid, outPort)
 
-  return parsedResult
-
 def add_flow_path(flow_path):
   flow_path_json = json.dumps(flow_path)
 
@@ -370,7 +373,10 @@
 if __name__ == "__main__":
   usage_msg = "Usage: %s [Flags] <flow-id> <installer-id> <src-dpid> <src-port> <dest-dpid> <dest-port> [Match Conditions] [Actions]\n" % (sys.argv[0])
   usage_msg = usage_msg + "    Flags:\n"
-  usage_msg = usage_msg + "        -m        Monitor and maintain the installed shortest path\n"
+  usage_msg = usage_msg + "        -m              Monitor and maintain the installed shortest path(s)\n"
+  usage_msg = usage_msg + "        -f <filename>   Read the flow(s) to install from a file\n"
+  usage_msg = usage_msg + "                        File format: one line per flow starting with <flow-id>\n"
+  usage_msg = usage_msg + "\n"
   usage_msg = usage_msg + "    Match Conditions:\n"
   usage_msg = usage_msg + "        matchInPort <True|False> (default to True)\n"
   usage_msg = usage_msg + "        matchSrcMac <source MAC address>\n"
@@ -378,7 +384,7 @@
   usage_msg = usage_msg + "        matchSrcIPv4Net <source IPv4 network address>\n"
   usage_msg = usage_msg + "        matchDstIPv4Net <destination IPv4 network address>\n"
   usage_msg = usage_msg + "        matchEthernetFrameType <Ethernet frame type>\n"
-
+  usage_msg = usage_msg + "\n"
   usage_msg = usage_msg + "    Match Conditions (not implemented yet):\n"
   usage_msg = usage_msg + "        matchVlanId <VLAN ID>\n"
   usage_msg = usage_msg + "        matchVlanPriority <VLAN priority>\n"
@@ -386,12 +392,14 @@
   usage_msg = usage_msg + "        matchIpProto <IP protocol>\n"
   usage_msg = usage_msg + "        matchSrcTcpUdpPort <source TCP/UDP port>\n"
   usage_msg = usage_msg + "        matchDstTcpUdpPort <destination TCP/UDP port>\n"
+  usage_msg = usage_msg + "\n"
   usage_msg = usage_msg + "    Actions:\n"
   usage_msg = usage_msg + "        actionOutput <True|False> (default to True)\n"
   usage_msg = usage_msg + "        actionSetEthernetSrcAddr <source MAC address>\n"
   usage_msg = usage_msg + "        actionSetEthernetDstAddr <destination MAC address>\n"
   usage_msg = usage_msg + "        actionSetIPv4SrcAddr <source IPv4 address>\n"
   usage_msg = usage_msg + "        actionSetIPv4DstAddr <destination IPv4 address>\n"
+  usage_msg = usage_msg + "\n"
   usage_msg = usage_msg + "    Actions (not implemented yet):\n"
   usage_msg = usage_msg + "        actionSetVlanId <VLAN ID>\n"
   usage_msg = usage_msg + "        actionSetVlanPriority <VLAN priority>\n"
@@ -412,30 +420,78 @@
   # Check the flags
   #
   start_argv_index = 1
-  if len(sys.argv) > 1 and sys.argv[1] == "-m":
-    MonitoringEnabled = True
-    start_argv_index = start_argv_index + 1
+  idx = 1
+  while idx < len(sys.argv):
+    arg1 = sys.argv[idx]
+    idx = idx + 1
+    if arg1 == "-m":
+      MonitoringEnabled = True
+      start_argv_index = idx
+    elif arg1 == "-f":
+      if idx >= len(sys.argv):
+	error_arg = "ERROR: Missing or invalid '" + arg1 + "' argument"
+	log_error(error_arg)
+	log_error(usage_msg)
+	exit(1)
+      ReadFromFile = sys.argv[idx]
+      idx = idx + 1
+      start_argv_index = idx
+    else:
+      break;
 
   #
-  # Parse the remaining arguments
+  # Read the arguments from a file or from the remaining command line options
   #
-  my_args = sys.argv[start_argv_index:]
-  parsed_args = copy.deepcopy(extract_flow_args(my_args))
+  my_lines = []
+  if len(ReadFromFile) > 0:
+    f = open(ReadFromFile, "rt")
+    my_line = f.readline()
+    while my_line:
+      if len(my_line.rstrip()) > 0 and my_line[0] != "#":
+	my_token_line = my_line.rstrip().split()
+	my_lines.append(my_token_line)
+      my_line = f.readline()
+  else:
+    my_lines.append(copy.deepcopy(sys.argv[start_argv_index:]))
 
-  last_data_path = []
-  my_flow_id = parsed_args['my_flow_id']
-  # Cleanup leftover state
-  delete_flow_path(my_flow_id)
+  #
+  # Initialization
+  #
+  last_data_paths = []
+  parsed_args = []
+  idx = 0
+  while idx < len(my_lines):
+    last_data_path = []
+    last_data_paths.append(copy.deepcopy(last_data_path))
+    #
+    # Parse the flow arguments
+    #
+    my_args = my_lines[idx]
+    parsed_args.append(copy.deepcopy(extract_flow_args(my_args)))
+    # Cleanup leftover state
+    my_flow_id = parsed_args[idx]['my_flow_id']
+    delete_flow_path(my_flow_id)
 
+    idx = idx + 1
+
+  #
+  # Do the work: install and/or periodically monitor each flow
+  #
   while True:
-    data_path = compute_data_path(parsed_args)
-    if data_path != last_data_path:
-      if len(last_data_path) > 0:
-	delete_flow_path(my_flow_id)
-      if len(data_path) > 0:
-	flow_path = compute_flow_path(parsed_args, data_path)
-	add_flow_path(flow_path)
-      last_data_path = data_path
+    idx = 0
+    while idx < len(parsed_args):
+      last_data_path = last_data_paths[idx]
+      my_flow_id = parsed_args[idx]['my_flow_id']
+      data_path = compute_data_path(parsed_args[idx])
+      if data_path != last_data_path:
+	print_data_path(data_path)
+	if len(last_data_path) > 0:
+	  delete_flow_path(my_flow_id)
+	if len(data_path) > 0:
+	  flow_path = compute_flow_path(parsed_args[idx], data_path)
+	  add_flow_path(flow_path)
+	last_data_paths[idx] = copy.deepcopy(data_path)
+      idx = idx + 1
 
     if MonitoringEnabled != True:
       break
diff --git a/web/e2eflow.sh b/web/e2eflow.sh
new file mode 100755
index 0000000..e0454ce
--- /dev/null
+++ b/web/e2eflow.sh
@@ -0,0 +1,4 @@
+#! /bin/sh
+./add_flow.py -m 1 host0101-host0301 00:00:00:00:00:00:01:01 1 00:00:00:00:00:00:03:01 1 matchSrcMac 00:00:00:00:01:01 matchDstMac 00:00:00:00:03:01 2>&1 > /dev/null &
+./add_flow.py -m 2 host0301-host0101 00:00:00:00:00:00:03:01 1 00:00:00:00:00:00:01:01 1 matchSrcMac 00:00:00:00:03:01 matchDstMac 00:00:00:00:01:01 2>&1 > /dev/null &
+#./get_flow.py all
diff --git a/web/get_flow.py b/web/get_flow.py
index fdaa10b..5704457 100755
--- a/web/get_flow.py
+++ b/web/get_flow.py
@@ -44,12 +44,13 @@
   print "FlowPath: (flowId = %s installerId = %s src = %s/%s dst = %s/%s)" % (flowId, installerId, srcSwitch, srcPort, dstSwitch, dstPort)
 
   for f in parsedResult['dataPath']['flowEntries']:
+    flowEntryId = f['flowEntryId']
     dpid = f['dpid']['value']
     userState = f['flowEntryUserState']
     switchState = f['flowEntrySwitchState']
     match = f['flowEntryMatch'];
     actions = f['flowEntryActions']
-    print "  FlowEntry: (%s, %s, %s)" % (dpid, userState, switchState)
+    print "  FlowEntry: (%s, %s, %s, %s)" % (flowEntryId, dpid, userState, switchState)
 
     #
     # Print the match conditions
@@ -161,8 +162,8 @@
     result = os.popen(command).read()
     debug("result %s" % result)
     if len(result) == 0:
-	print "No Flow found"
-	return;
+      print "No Flow found"
+      return;
 
     parsedResult = json.loads(result)
     debug("parsed %s" % parsedResult)
diff --git a/web/rest-test.sh b/web/rest-test.sh
index a54e1e2..241915a 100755
--- a/web/rest-test.sh
+++ b/web/rest-test.sh
@@ -1,7 +1,12 @@
 #! /bin/sh
 rm -f rest.json
 touch rest.json
-curl -s 'http://localhost:8080/wm/core/topology/switches/all/json' | python -m json.tool >> rest.json
-curl -s 'http://localhost:8080/wm/core/topology/links/json' | python -m json.tool >> rest.json
-curl -s 'http://localhost:8080/wm/registry/controllers/json' | python -m json.tool >> rest.json
-curl -s 'http://localhost:8080/wm/registry/switches/json' | python -m json.tool >> rest.json
+
+urls="'http://localhost:8080/wm/core/topology/switches/all/json' 'http://localhost:8080/wm/core/topology/links/json' 'http://localhost:8080/wm/registry/controllers/json' 'http://localhost:8080/wm/registry/switches/json'"
+
+for url in $urls; do
+  echo "---REST CALL---" >> rest.json
+  echo "curl -s $url" >> rest.json
+  echo "---Result----" >> rest.json
+  curl -s $url | python -m json.tool >> rest.json
+done