Merge branch 'master' of ssh://gerrit.onlab.us:29418/onos-next
Conflicts:
core/net/src/main/java/org/onlab/onos/cluster/impl/MastershipManager.java
core/net/src/main/java/org/onlab/onos/net/device/impl/DeviceManager.java
core/store/hz/cluster/src/main/java/org/onlab/onos/store/cluster/impl/DistributedMastershipStore.java
Change-Id: I6a8b756fc20968e18ea3fd145e155d6282cea945
diff --git a/apps/foo/pom.xml b/apps/foo/pom.xml
index 860d70b..868b992 100644
--- a/apps/foo/pom.xml
+++ b/apps/foo/pom.xml
@@ -28,10 +28,6 @@
<version>${project.version}</version>
</dependency>
<dependency>
- <groupId>org.livetribe.slp</groupId>
- <artifactId>livetribe-slp</artifactId>
- </dependency>
- <dependency>
<groupId>org.apache.karaf.shell</groupId>
<artifactId>org.apache.karaf.shell.console</artifactId>
</dependency>
diff --git a/apps/foo/src/main/java/org/onlab/onos/foo/IOLoopTestClient.java b/apps/foo/src/main/java/org/onlab/onos/foo/IOLoopTestClient.java
index 3ec8c07..302a0c7 100644
--- a/apps/foo/src/main/java/org/onlab/onos/foo/IOLoopTestClient.java
+++ b/apps/foo/src/main/java/org/onlab/onos/foo/IOLoopTestClient.java
@@ -233,7 +233,7 @@
}
@Override
- protected void connect(SelectionKey key) {
+ protected void connect(SelectionKey key) throws IOException {
super.connect(key);
TestMessageStream b = (TestMessageStream) key.attachment();
Worker w = ((CustomIOLoop) b.loop()).worker;
diff --git a/apps/foo/src/main/java/org/onlab/onos/foo/package-info.java b/apps/foo/src/main/java/org/onlab/onos/foo/package-info.java
index 6372772..af9506e 100644
--- a/apps/foo/src/main/java/org/onlab/onos/foo/package-info.java
+++ b/apps/foo/src/main/java/org/onlab/onos/foo/package-info.java
@@ -1,4 +1,4 @@
/**
* Sample application for use in various experiments.
*/
-package org.onlab.onos.foo;
\ No newline at end of file
+package org.onlab.onos.foo;
diff --git a/apps/foo/src/main/resources/org/onlab/onos/foo/FooComponent.properties b/apps/foo/src/main/resources/org/onlab/onos/foo/FooComponent.properties
new file mode 100644
index 0000000..eed1e38
--- /dev/null
+++ b/apps/foo/src/main/resources/org/onlab/onos/foo/FooComponent.properties
@@ -0,0 +1,34 @@
+livetribe.slp.da.expired.services.purge.period=60
+livetribe.slp.sa.client.connect.address=127.0.0.1
+livetribe.slp.sa.client.factory=org.livetribe.slp.sa.StandardServiceAgentClient$Factory
+livetribe.slp.sa.factory=org.livetribe.slp.sa.StandardServiceAgent$Factory
+livetribe.slp.sa.service.renewal.enabled=true
+livetribe.slp.sa.unicast.prefer.tcp=false
+livetribe.slp.tcp.connector.factory=org.livetribe.slp.spi.net.SocketTCPConnector$Factory
+livetribe.slp.tcp.connector.server.factory=org.livetribe.slp.spi.net.SocketTCPConnectorServer$Factory
+livetribe.slp.tcp.message.max.length=4096
+livetribe.slp.tcp.read.timeout=300000
+livetribe.slp.ua.client.factory=org.livetribe.slp.ua.StandardUserAgentClient$Factory
+livetribe.slp.ua.factory=org.livetribe.slp.ua.StandardUserAgent$Factory
+livetribe.slp.ua.unicast.prefer.tcp=false
+livetribe.slp.udp.connector.factory=org.livetribe.slp.spi.net.SocketUDPConnector$Factory
+livetribe.slp.udp.connector.server.factory=org.livetribe.slp.spi.net.SocketUDPConnectorServer$Factory
+net.slp.DAAddresses=
+net.slp.DAAttributes=
+net.slp.DAHeartBeat=10800
+net.slp.MTU=1400
+net.slp.SAAttributes=
+net.slp.broadcastAddress=255.255.255.255
+net.slp.datagramTimeouts=150,250,400
+net.slp.interfaces=0.0.0.0
+net.slp.isBroadcastOnly=false
+net.slp.locale=en
+net.slp.multicastAddress=239.255.255.253
+net.slp.multicastMaximumWait=15000
+net.slp.multicastTTL=255
+net.slp.multicastTimeouts=150,250,400,600,1000
+net.slp.notificationPort=1847
+net.slp.port=427
+net.slp.useScopes=default
+
+org.onlab.cluster.name = TV-ONOS
diff --git a/cli/src/main/java/org/onlab/onos/cli/NodeAddCommand.java b/cli/src/main/java/org/onlab/onos/cli/NodeAddCommand.java
new file mode 100644
index 0000000..7c9a163
--- /dev/null
+++ b/cli/src/main/java/org/onlab/onos/cli/NodeAddCommand.java
@@ -0,0 +1,34 @@
+package org.onlab.onos.cli;
+
+import org.apache.karaf.shell.commands.Argument;
+import org.apache.karaf.shell.commands.Command;
+import org.onlab.onos.cluster.ClusterAdminService;
+import org.onlab.onos.cluster.NodeId;
+import org.onlab.packet.IpPrefix;
+
+/**
+ * Adds a new controller cluster node.
+ */
+@Command(scope = "onos", name = "add-node",
+ description = "Adds a new controller cluster node")
+public class NodeAddCommand extends AbstractShellCommand {
+
+ @Argument(index = 0, name = "nodeId", description = "Node ID",
+ required = true, multiValued = false)
+ String nodeId = null;
+
+ @Argument(index = 1, name = "ip", description = "Node IP address",
+ required = true, multiValued = false)
+ String ip = null;
+
+ @Argument(index = 2, name = "tcpPort", description = "Node TCP listen port",
+ required = false, multiValued = false)
+ int tcpPort = 9876;
+
+ @Override
+ protected void execute() {
+ ClusterAdminService service = get(ClusterAdminService.class);
+ service.addNode(new NodeId(nodeId), IpPrefix.valueOf(ip), tcpPort);
+ }
+
+}
diff --git a/cli/src/main/java/org/onlab/onos/cli/NodeRemoveCommand.java b/cli/src/main/java/org/onlab/onos/cli/NodeRemoveCommand.java
new file mode 100644
index 0000000..219c187
--- /dev/null
+++ b/cli/src/main/java/org/onlab/onos/cli/NodeRemoveCommand.java
@@ -0,0 +1,25 @@
+package org.onlab.onos.cli;
+
+import org.apache.karaf.shell.commands.Argument;
+import org.apache.karaf.shell.commands.Command;
+import org.onlab.onos.cluster.ClusterAdminService;
+import org.onlab.onos.cluster.NodeId;
+
+/**
+ * Removes a controller cluster node.
+ */
+@Command(scope = "onos", name = "remove-node",
+ description = "Removes a new controller cluster node")
+public class NodeRemoveCommand extends AbstractShellCommand {
+
+ @Argument(index = 0, name = "nodeId", description = "Node ID",
+ required = true, multiValued = false)
+ String nodeId = null;
+
+ @Override
+ protected void execute() {
+ ClusterAdminService service = get(ClusterAdminService.class);
+ service.removeNode(new NodeId(nodeId));
+ }
+
+}
diff --git a/cli/src/main/java/org/onlab/onos/cli/NodesListCommand.java b/cli/src/main/java/org/onlab/onos/cli/NodesListCommand.java
index 40f722d..b7b4556 100644
--- a/cli/src/main/java/org/onlab/onos/cli/NodesListCommand.java
+++ b/cli/src/main/java/org/onlab/onos/cli/NodesListCommand.java
@@ -17,7 +17,7 @@
public class NodesListCommand extends AbstractShellCommand {
private static final String FMT =
- "id=%s, ip=%s, state=%s %s";
+ "id=%s, address=%s:%s, state=%s %s";
@Override
protected void execute() {
@@ -26,7 +26,7 @@
Collections.sort(nodes, Comparators.NODE_COMPARATOR);
ControllerNode self = service.getLocalNode();
for (ControllerNode node : nodes) {
- print(FMT, node.id(), node.ip(),
+ print(FMT, node.id(), node.ip(), node.tcpPort(),
service.getState(node.id()),
node.equals(self) ? "*" : "");
}
diff --git a/cli/src/main/java/org/onlab/onos/cli/net/FlowsListCommand.java b/cli/src/main/java/org/onlab/onos/cli/net/FlowsListCommand.java
index 4c4af1a..41f30a7 100644
--- a/cli/src/main/java/org/onlab/onos/cli/net/FlowsListCommand.java
+++ b/cli/src/main/java/org/onlab/onos/cli/net/FlowsListCommand.java
@@ -103,4 +103,4 @@
}
-}
\ No newline at end of file
+}
diff --git a/cli/src/main/resources/OSGI-INF/blueprint/shell-config.xml b/cli/src/main/resources/OSGI-INF/blueprint/shell-config.xml
index 30fce6f..16b5672 100644
--- a/cli/src/main/resources/OSGI-INF/blueprint/shell-config.xml
+++ b/cli/src/main/resources/OSGI-INF/blueprint/shell-config.xml
@@ -5,6 +5,12 @@
<action class="org.onlab.onos.cli.NodesListCommand"/>
</command>
<command>
+ <action class="org.onlab.onos.cli.NodeAddCommand"/>
+ </command>
+ <command>
+ <action class="org.onlab.onos.cli.NodeRemoveCommand"/>
+ </command>
+ <command>
<action class="org.onlab.onos.cli.MastersListCommand"/>
<completers>
<ref component-id="clusterIdCompleter"/>
diff --git a/core/api/src/main/java/org/onlab/onos/cluster/ClusterAdminService.java b/core/api/src/main/java/org/onlab/onos/cluster/ClusterAdminService.java
index 4f98804..73137e1 100644
--- a/core/api/src/main/java/org/onlab/onos/cluster/ClusterAdminService.java
+++ b/core/api/src/main/java/org/onlab/onos/cluster/ClusterAdminService.java
@@ -1,11 +1,23 @@
package org.onlab.onos.cluster;
+import org.onlab.packet.IpPrefix;
+
/**
* Service for administering the cluster node membership.
*/
public interface ClusterAdminService {
/**
+ * Adds a new controller node to the cluster.
+ *
+ * @param nodeId controller node identifier
+ * @param ip node IP listen address
+ * @param tcpPort tcp listen port
+ * @return newly added node
+ */
+ ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort);
+
+ /**
* Removes the specified node from the cluster node list.
*
* @param nodeId controller node identifier
diff --git a/core/api/src/main/java/org/onlab/onos/cluster/ClusterStore.java b/core/api/src/main/java/org/onlab/onos/cluster/ClusterStore.java
index ea5bbd3..3725706 100644
--- a/core/api/src/main/java/org/onlab/onos/cluster/ClusterStore.java
+++ b/core/api/src/main/java/org/onlab/onos/cluster/ClusterStore.java
@@ -1,6 +1,7 @@
package org.onlab.onos.cluster;
import org.onlab.onos.store.Store;
+import org.onlab.packet.IpPrefix;
import java.util.Set;
@@ -40,6 +41,16 @@
ControllerNode.State getState(NodeId nodeId);
/**
+ * Adds a new controller node to the cluster.
+ *
+ * @param nodeId controller node identifier
+ * @param ip node IP listen address
+ * @param tcpPort tcp listen port
+ * @return newly added node
+ */
+ ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort);
+
+ /**
* Removes the specified node from the inventory of cluster nodes.
*
* @param nodeId controller instance identifier
diff --git a/core/api/src/main/java/org/onlab/onos/cluster/ControllerNode.java b/core/api/src/main/java/org/onlab/onos/cluster/ControllerNode.java
index c6f0cb3..33fe1c9 100644
--- a/core/api/src/main/java/org/onlab/onos/cluster/ControllerNode.java
+++ b/core/api/src/main/java/org/onlab/onos/cluster/ControllerNode.java
@@ -35,4 +35,12 @@
*/
IpPrefix ip();
+
+ /**
+ * Returns the TCP port on which the node listens for connections.
+ *
+ * @return TCP port
+ */
+ int tcpPort();
+
}
diff --git a/core/api/src/main/java/org/onlab/onos/cluster/DefaultControllerNode.java b/core/api/src/main/java/org/onlab/onos/cluster/DefaultControllerNode.java
index 86ea14c..d23b7a3 100644
--- a/core/api/src/main/java/org/onlab/onos/cluster/DefaultControllerNode.java
+++ b/core/api/src/main/java/org/onlab/onos/cluster/DefaultControllerNode.java
@@ -11,13 +11,17 @@
*/
public class DefaultControllerNode implements ControllerNode {
+ private static final int DEFAULT_PORT = 9876;
+
private final NodeId id;
private final IpPrefix ip;
+ private final int tcpPort;
// For serialization
private DefaultControllerNode() {
this.id = null;
this.ip = null;
+ this.tcpPort = 0;
}
/**
@@ -27,8 +31,19 @@
* @param ip instance IP address
*/
public DefaultControllerNode(NodeId id, IpPrefix ip) {
+ this(id, ip, DEFAULT_PORT);
+ }
+
+ /**
+ * Creates a new instance with the specified id and IP address and TCP port.
+ *
+ * @param id instance identifier
+ * @param ip instance IP address
+ */
+ public DefaultControllerNode(NodeId id, IpPrefix ip, int tcpPort) {
this.id = id;
this.ip = ip;
+ this.tcpPort = tcpPort;
}
@Override
@@ -42,6 +57,11 @@
}
@Override
+ public int tcpPort() {
+ return tcpPort;
+ }
+
+ @Override
public int hashCode() {
return Objects.hash(id);
}
@@ -60,7 +80,8 @@
@Override
public String toString() {
- return toStringHelper(this).add("id", id).add("ip", ip).toString();
+ return toStringHelper(this).add("id", id)
+ .add("ip", ip).add("tcpPort", tcpPort).toString();
}
}
diff --git a/core/api/src/main/java/org/onlab/onos/net/proxyarp/ProxyArpService.java b/core/api/src/main/java/org/onlab/onos/net/proxyarp/ProxyArpService.java
new file mode 100644
index 0000000..e6fe43b
--- /dev/null
+++ b/core/api/src/main/java/org/onlab/onos/net/proxyarp/ProxyArpService.java
@@ -0,0 +1,29 @@
+package org.onlab.onos.net.proxyarp;
+
+import org.onlab.packet.Ethernet;
+import org.onlab.packet.IpPrefix;
+
+/**
+ * Service for processing arp requests on behalf of applications.
+ */
+public interface ProxyArpService {
+
+ /**
+ * Returns whether this particular ip address is known to the system.
+ *
+ * @param addr
+ * a ip address
+ * @return true if know, false otherwise
+ */
+ boolean known(IpPrefix addr);
+
+ /**
+ * Sends a reply for a given request. If the host is not known then the arp
+ * will be flooded at all edge ports.
+ *
+ * @param request
+ * an arp request
+ */
+ void reply(Ethernet request);
+
+}
diff --git a/core/api/src/main/java/org/onlab/onos/net/proxyarp/package-info.java b/core/api/src/main/java/org/onlab/onos/net/proxyarp/package-info.java
new file mode 100644
index 0000000..30d44e7
--- /dev/null
+++ b/core/api/src/main/java/org/onlab/onos/net/proxyarp/package-info.java
@@ -0,0 +1,4 @@
+/**
+ * Base abstractions related to the proxy arp service.
+ */
+package org.onlab.onos.net.proxyarp;
diff --git a/core/api/src/main/java/org/onlab/onos/store/ClockService.java b/core/api/src/main/java/org/onlab/onos/store/ClockService.java
new file mode 100644
index 0000000..2446ab7
--- /dev/null
+++ b/core/api/src/main/java/org/onlab/onos/store/ClockService.java
@@ -0,0 +1,26 @@
+package org.onlab.onos.store;
+
+import org.onlab.onos.cluster.MastershipTerm;
+import org.onlab.onos.net.DeviceId;
+
+// TODO: Consider renaming to DeviceClockService?
+/**
+ * Interface for a logical clock service that vends per device timestamps.
+ */
+public interface ClockService {
+
+ /**
+ * Returns a new timestamp for the specified deviceId.
+ * @param deviceId device identifier.
+ * @return timestamp.
+ */
+ public Timestamp getTimestamp(DeviceId deviceId);
+
+ // TODO: Should this be here or separate as Admin service, etc.?
+ /**
+ * Updates the mastership term for the specified deviceId.
+ * @param deviceId device identifier.
+ * @param term mastership term.
+ */
+ public void setMastershipTerm(DeviceId deviceId, MastershipTerm term);
+}
diff --git a/core/api/src/main/java/org/onlab/onos/store/package-info.java b/core/api/src/main/java/org/onlab/onos/store/package-info.java
index 7e767f0..b8203ce 100644
--- a/core/api/src/main/java/org/onlab/onos/store/package-info.java
+++ b/core/api/src/main/java/org/onlab/onos/store/package-info.java
@@ -1,4 +1,4 @@
/**
* Abstractions for creating and interacting with distributed stores.
*/
-package org.onlab.onos.store;
\ No newline at end of file
+package org.onlab.onos.store;
diff --git a/core/net/pom.xml b/core/net/pom.xml
index e2703b2..c075147 100644
--- a/core/net/pom.xml
+++ b/core/net/pom.xml
@@ -40,13 +40,14 @@
Currently required for DistributedDeviceManagerTest. -->
<dependency>
<groupId>org.onlab.onos</groupId>
- <artifactId>onos-core-store</artifactId>
+ <artifactId>onos-core-hz-net</artifactId>
<version>${project.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.onlab.onos</groupId>
- <artifactId>onos-core-store</artifactId>
+ <!-- FIXME: should be somewhere else -->
+ <artifactId>onos-core-hz-common</artifactId>
<version>${project.version}</version>
<classifier>tests</classifier>
<scope>test</scope>
diff --git a/core/net/src/main/java/org/onlab/onos/cluster/impl/ClusterManager.java b/core/net/src/main/java/org/onlab/onos/cluster/impl/ClusterManager.java
index 9913ad0..36f2f7e 100644
--- a/core/net/src/main/java/org/onlab/onos/cluster/impl/ClusterManager.java
+++ b/core/net/src/main/java/org/onlab/onos/cluster/impl/ClusterManager.java
@@ -16,10 +16,12 @@
import org.onlab.onos.cluster.NodeId;
import org.onlab.onos.event.AbstractListenerRegistry;
import org.onlab.onos.event.EventDeliveryService;
+import org.onlab.packet.IpPrefix;
import org.slf4j.Logger;
import java.util.Set;
+import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.slf4j.LoggerFactory.getLogger;
@@ -81,6 +83,14 @@
}
@Override
+ public ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort) {
+ checkNotNull(nodeId, INSTANCE_ID_NULL);
+ checkNotNull(ip, "IP address cannot be null");
+ checkArgument(tcpPort > 5000, "TCP port must be > 5000");
+ return store.addNode(nodeId, ip, tcpPort);
+ }
+
+ @Override
public void removeNode(NodeId nodeId) {
checkNotNull(nodeId, INSTANCE_ID_NULL);
store.removeNode(nodeId);
diff --git a/core/net/src/main/java/org/onlab/onos/cluster/impl/MastershipManager.java b/core/net/src/main/java/org/onlab/onos/cluster/impl/MastershipManager.java
index 8dd3379..20ebc40 100644
--- a/core/net/src/main/java/org/onlab/onos/cluster/impl/MastershipManager.java
+++ b/core/net/src/main/java/org/onlab/onos/cluster/impl/MastershipManager.java
@@ -1,5 +1,10 @@
package org.onlab.onos.cluster.impl;
+import static com.google.common.base.Preconditions.checkNotNull;
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.util.Set;
+
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
@@ -14,6 +19,7 @@
import org.onlab.onos.cluster.MastershipListener;
import org.onlab.onos.cluster.MastershipService;
import org.onlab.onos.cluster.MastershipStore;
+import org.onlab.onos.cluster.MastershipStoreDelegate;
import org.onlab.onos.cluster.MastershipTerm;
import org.onlab.onos.cluster.MastershipTermService;
import org.onlab.onos.cluster.NodeId;
@@ -23,15 +29,10 @@
import org.onlab.onos.net.MastershipRole;
import org.slf4j.Logger;
-import java.util.Set;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-import static org.slf4j.LoggerFactory.getLogger;
-
@Component(immediate = true)
@Service
public class MastershipManager
- implements MastershipService, MastershipAdminService {
+implements MastershipService, MastershipAdminService {
private static final String NODE_ID_NULL = "Node ID cannot be null";
private static final String DEVICE_ID_NULL = "Device ID cannot be null";
@@ -40,7 +41,9 @@
private final Logger log = getLogger(getClass());
protected final AbstractListenerRegistry<MastershipEvent, MastershipListener>
- listenerRegistry = new AbstractListenerRegistry<>();
+ listenerRegistry = new AbstractListenerRegistry<>();
+
+ private final MastershipStoreDelegate delegate = new InternalDelegate();
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
protected MastershipStore store;
@@ -57,6 +60,7 @@
public void activate() {
eventDispatcher.addSink(MastershipEvent.class, listenerRegistry);
clusterService.addListener(clusterListener);
+ store.setDelegate(delegate);
log.info("Started");
}
@@ -64,6 +68,7 @@
public void deactivate() {
eventDispatcher.removeSink(MastershipEvent.class);
clusterService.removeListener(clusterListener);
+ store.unsetDelegate(delegate);
log.info("Stopped");
}
@@ -188,4 +193,15 @@
}
}
+
+ public class InternalDelegate implements MastershipStoreDelegate {
+
+ @Override
+ public void notify(MastershipEvent event) {
+ log.info("dispatching mastership event {}", event);
+ eventDispatcher.post(event);
+ }
+
+ }
+
}
diff --git a/core/net/src/main/java/org/onlab/onos/cluster/impl/package-info.java b/core/net/src/main/java/org/onlab/onos/cluster/impl/package-info.java
index d98f983..a31eb36 100644
--- a/core/net/src/main/java/org/onlab/onos/cluster/impl/package-info.java
+++ b/core/net/src/main/java/org/onlab/onos/cluster/impl/package-info.java
@@ -1,4 +1,4 @@
/**
* Subsystem for tracking controller cluster nodes.
*/
-package org.onlab.onos.cluster.impl;
\ No newline at end of file
+package org.onlab.onos.cluster.impl;
diff --git a/core/net/src/main/java/org/onlab/onos/net/device/impl/DeviceManager.java b/core/net/src/main/java/org/onlab/onos/net/device/impl/DeviceManager.java
index f8f4750..eb409a5 100644
--- a/core/net/src/main/java/org/onlab/onos/net/device/impl/DeviceManager.java
+++ b/core/net/src/main/java/org/onlab/onos/net/device/impl/DeviceManager.java
@@ -1,5 +1,11 @@
package org.onlab.onos.net.device.impl;
+import static com.google.common.base.Preconditions.checkNotNull;
+import static org.onlab.onos.net.device.DeviceEvent.Type.DEVICE_MASTERSHIP_CHANGED;
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.util.List;
+
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
@@ -11,6 +17,7 @@
import org.onlab.onos.cluster.MastershipListener;
import org.onlab.onos.cluster.MastershipService;
import org.onlab.onos.cluster.MastershipTermService;
+import org.onlab.onos.cluster.MastershipTerm;
import org.onlab.onos.event.AbstractListenerRegistry;
import org.onlab.onos.event.EventDeliveryService;
import org.onlab.onos.net.Device;
@@ -31,22 +38,17 @@
import org.onlab.onos.net.device.PortDescription;
import org.onlab.onos.net.provider.AbstractProviderRegistry;
import org.onlab.onos.net.provider.AbstractProviderService;
+import org.onlab.onos.store.ClockService;
import org.slf4j.Logger;
-import java.util.List;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-import static org.onlab.onos.net.device.DeviceEvent.Type.DEVICE_MASTERSHIP_CHANGED;
-import static org.slf4j.LoggerFactory.getLogger;
-
/**
* Provides implementation of the device SB & NB APIs.
*/
@Component(immediate = true)
@Service
public class DeviceManager
- extends AbstractProviderRegistry<DeviceProvider, DeviceProviderService>
- implements DeviceService, DeviceAdminService, DeviceProviderRegistry {
+ extends AbstractProviderRegistry<DeviceProvider, DeviceProviderService>
+ implements DeviceService, DeviceAdminService, DeviceProviderRegistry {
private static final String DEVICE_ID_NULL = "Device ID cannot be null";
private static final String PORT_NUMBER_NULL = "Port number cannot be null";
@@ -56,10 +58,10 @@
private final Logger log = getLogger(getClass());
- protected final AbstractListenerRegistry<DeviceEvent, DeviceListener>
- listenerRegistry = new AbstractListenerRegistry<>();
+ protected final AbstractListenerRegistry<DeviceEvent, DeviceListener> listenerRegistry =
+ new AbstractListenerRegistry<>();
- private DeviceStoreDelegate delegate = new InternalStoreDelegate();
+ private final DeviceStoreDelegate delegate = new InternalStoreDelegate();
private final MastershipListener mastershipListener = new InternalMastershipListener();
@@ -77,6 +79,9 @@
protected MastershipTermService termService;
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClockService clockService;
+
@Activate
public void activate() {
store.setDelegate(delegate);
@@ -168,33 +173,36 @@
}
@Override
- protected DeviceProviderService createProviderService(DeviceProvider provider) {
+ protected DeviceProviderService createProviderService(
+ DeviceProvider provider) {
return new InternalDeviceProviderService(provider);
}
// Personalized device provider service issued to the supplied provider.
private class InternalDeviceProviderService
- extends AbstractProviderService<DeviceProvider>
- implements DeviceProviderService {
+ extends AbstractProviderService<DeviceProvider>
+ implements DeviceProviderService {
InternalDeviceProviderService(DeviceProvider provider) {
super(provider);
}
@Override
- public void deviceConnected(DeviceId deviceId, DeviceDescription deviceDescription) {
+ public void deviceConnected(DeviceId deviceId,
+ DeviceDescription deviceDescription) {
checkNotNull(deviceId, DEVICE_ID_NULL);
checkNotNull(deviceDescription, DEVICE_DESCRIPTION_NULL);
checkValidity();
DeviceEvent event = store.createOrUpdateDevice(provider().id(),
- deviceId, deviceDescription);
+ deviceId, deviceDescription);
- // If there was a change of any kind, trigger role selection process.
+ // If there was a change of any kind, trigger role selection
+ // process.
if (event != null) {
log.info("Device {} connected", deviceId);
mastershipService.requestRoleFor(deviceId);
provider().roleChanged(event.subject(),
- mastershipService.getLocalRole(deviceId));
+ mastershipService.getLocalRole(deviceId));
post(event);
}
}
@@ -214,25 +222,30 @@
}
@Override
- public void updatePorts(DeviceId deviceId, List<PortDescription> portDescriptions) {
+ public void updatePorts(DeviceId deviceId,
+ List<PortDescription> portDescriptions) {
checkNotNull(deviceId, DEVICE_ID_NULL);
- checkNotNull(portDescriptions, "Port descriptions list cannot be null");
+ checkNotNull(portDescriptions,
+ "Port descriptions list cannot be null");
checkValidity();
- List<DeviceEvent> events = store.updatePorts(deviceId, portDescriptions);
+ List<DeviceEvent> events = store.updatePorts(deviceId,
+ portDescriptions);
for (DeviceEvent event : events) {
post(event);
}
}
@Override
- public void portStatusChanged(DeviceId deviceId, PortDescription portDescription) {
+ public void portStatusChanged(DeviceId deviceId,
+ PortDescription portDescription) {
checkNotNull(deviceId, DEVICE_ID_NULL);
checkNotNull(portDescription, PORT_DESCRIPTION_NULL);
checkValidity();
- DeviceEvent event = store.updatePortStatus(deviceId, portDescription);
+ DeviceEvent event = store.updatePortStatus(deviceId,
+ portDescription);
if (event != null) {
- log.info("Device {} port {} status changed", deviceId,
- event.port().number());
+ log.info("Device {} port {} status changed", deviceId, event
+ .port().number());
post(event);
}
}
@@ -240,8 +253,8 @@
@Override
public void unableToAssertRole(DeviceId deviceId, MastershipRole role) {
// FIXME: implement response to this notification
- log.warn("Failed to assert role [{}] onto Device {}",
- role, deviceId);
+ log.warn("Failed to assert role [{}] onto Device {}", role,
+ deviceId);
}
}
@@ -253,18 +266,24 @@
}
// Intercepts mastership events
- private class InternalMastershipListener implements MastershipListener {
+ private class InternalMastershipListener
+ implements MastershipListener {
@Override
public void event(MastershipEvent event) {
- // FIXME: for now we're taking action only on becoming master
if (event.master().equals(clusterService.getLocalNode().id())) {
+ MastershipTerm term = mastershipService.requestTermService()
+ .getMastershipTerm(event.subject());
+ clockService.setMastershipTerm(event.subject(), term);
applyRole(event.subject(), MastershipRole.MASTER);
+ } else {
+ applyRole(event.subject(), MastershipRole.STANDBY);
}
}
}
// Store delegate to re-post events emitted from the store.
- private class InternalStoreDelegate implements DeviceStoreDelegate {
+ private class InternalStoreDelegate
+ implements DeviceStoreDelegate {
@Override
public void notify(DeviceEvent event) {
post(event);
diff --git a/core/net/src/main/java/org/onlab/onos/proxyarp/impl/ProxyArpManager.java b/core/net/src/main/java/org/onlab/onos/proxyarp/impl/ProxyArpManager.java
new file mode 100644
index 0000000..f267f68
--- /dev/null
+++ b/core/net/src/main/java/org/onlab/onos/proxyarp/impl/ProxyArpManager.java
@@ -0,0 +1,100 @@
+package org.onlab.onos.proxyarp.impl;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.nio.ByteBuffer;
+import java.util.Set;
+
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.onlab.onos.net.Host;
+import org.onlab.onos.net.flow.DefaultTrafficTreatment;
+import org.onlab.onos.net.flow.TrafficTreatment;
+import org.onlab.onos.net.host.HostService;
+import org.onlab.onos.net.packet.DefaultOutboundPacket;
+import org.onlab.onos.net.packet.PacketService;
+import org.onlab.onos.net.proxyarp.ProxyArpService;
+import org.onlab.onos.net.topology.TopologyService;
+import org.onlab.packet.ARP;
+import org.onlab.packet.Ethernet;
+import org.onlab.packet.IpPrefix;
+import org.onlab.packet.VlanId;
+
+public class ProxyArpManager implements ProxyArpService {
+
+ private static final String MAC_ADDR_NULL = "Mac address cannot be null.";
+ private static final String REQUEST_NULL = "Arp request cannot be null.";
+ private static final String REQUEST_NOT_ARP = "Ethernet frame does not contain ARP request.";
+ private static final String NOT_ARP_REQUEST = "ARP is not a request.";
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected HostService hostService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected PacketService packetService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected TopologyService topologyService;
+
+ @Override
+ public boolean known(IpPrefix addr) {
+ checkNotNull(MAC_ADDR_NULL, addr);
+ Set<Host> hosts = hostService.getHostsByIp(addr);
+ return !hosts.isEmpty();
+ }
+
+ @Override
+ public void reply(Ethernet request) {
+ checkNotNull(REQUEST_NULL, request);
+ checkArgument(request.getEtherType() == Ethernet.TYPE_ARP,
+ REQUEST_NOT_ARP);
+ ARP arp = (ARP) request.getPayload();
+ checkArgument(arp.getOpCode() == ARP.OP_REQUEST, NOT_ARP_REQUEST);
+
+ VlanId vlan = VlanId.vlanId(request.getVlanID());
+ Set<Host> hosts = hostService.getHostsByIp(IpPrefix.valueOf(arp
+ .getTargetProtocolAddress()));
+
+ Host h = null;
+ for (Host host : hosts) {
+ if (host.vlan().equals(vlan)) {
+ h = host;
+ break;
+ }
+ }
+
+ if (h == null) {
+ flood(request);
+ return;
+ }
+
+ Ethernet arpReply = buildArpReply(h, request);
+ // TODO: check send status with host service.
+ TrafficTreatment.Builder builder = new DefaultTrafficTreatment.Builder();
+ builder.setOutput(h.location().port());
+ packetService.emit(new DefaultOutboundPacket(h.location().deviceId(),
+ builder.build(), ByteBuffer.wrap(arpReply.serialize())));
+ }
+
+ private void flood(Ethernet request) {
+ // TODO: flood on all edge ports.
+ }
+
+ private Ethernet buildArpReply(Host h, Ethernet request) {
+ Ethernet eth = new Ethernet();
+ eth.setDestinationMACAddress(request.getSourceMACAddress());
+ eth.setSourceMACAddress(h.mac().getAddress());
+ eth.setEtherType(Ethernet.TYPE_ARP);
+ ARP arp = new ARP();
+ arp.setOpCode(ARP.OP_REPLY);
+ arp.setSenderHardwareAddress(h.mac().getAddress());
+ arp.setTargetHardwareAddress(request.getSourceMACAddress());
+
+ arp.setTargetProtocolAddress(((ARP) request.getPayload())
+ .getSenderProtocolAddress());
+ arp.setSenderProtocolAddress(h.ipAddresses().iterator().next().toInt());
+ eth.setPayload(arp);
+ return eth;
+ }
+}
diff --git a/core/net/src/main/java/org/onlab/onos/proxyarp/impl/package-info.java b/core/net/src/main/java/org/onlab/onos/proxyarp/impl/package-info.java
new file mode 100644
index 0000000..7a5dc6e8
--- /dev/null
+++ b/core/net/src/main/java/org/onlab/onos/proxyarp/impl/package-info.java
@@ -0,0 +1,4 @@
+/**
+ * Core subsystem for responding to arp requests.
+ */
+package org.onlab.onos.proxyarp.impl;
diff --git a/core/net/src/test/java/org/onlab/onos/net/device/impl/DistributedDeviceManagerTest.java b/core/net/src/test/java/org/onlab/onos/net/device/impl/DistributedDeviceManagerTest.java
index aeb0978..bcb4a68 100644
--- a/core/net/src/test/java/org/onlab/onos/net/device/impl/DistributedDeviceManagerTest.java
+++ b/core/net/src/test/java/org/onlab/onos/net/device/impl/DistributedDeviceManagerTest.java
@@ -32,9 +32,9 @@
import org.onlab.onos.net.device.PortDescription;
import org.onlab.onos.net.provider.AbstractProvider;
import org.onlab.onos.net.provider.ProviderId;
+import org.onlab.onos.store.common.StoreManager;
+import org.onlab.onos.store.common.TestStoreManager;
import org.onlab.onos.store.device.impl.DistributedDeviceStore;
-import org.onlab.onos.store.impl.StoreManager;
-import org.onlab.onos.store.impl.TestStoreManager;
import org.onlab.packet.IpPrefix;
import java.util.ArrayList;
@@ -163,7 +163,7 @@
public void deviceDisconnected() {
connectDevice(DID1, SW1);
connectDevice(DID2, SW1);
- validateEvents(DEVICE_ADDED, DEVICE_ADDED, DEVICE_ADDED, DEVICE_ADDED);
+ validateEvents(DEVICE_ADDED, DEVICE_ADDED);
assertTrue("device should be available", service.isAvailable(DID1));
// Disconnect
@@ -182,10 +182,10 @@
@Test
public void deviceUpdated() {
connectDevice(DID1, SW1);
- validateEvents(DEVICE_ADDED, DEVICE_ADDED);
+ validateEvents(DEVICE_ADDED);
connectDevice(DID1, SW2);
- validateEvents(DEVICE_UPDATED, DEVICE_UPDATED);
+ validateEvents(DEVICE_UPDATED);
}
@Test
@@ -202,7 +202,7 @@
pds.add(new DefaultPortDescription(P2, true));
pds.add(new DefaultPortDescription(P3, true));
providerService.updatePorts(DID1, pds);
- validateEvents(DEVICE_ADDED, DEVICE_ADDED, PORT_ADDED, PORT_ADDED, PORT_ADDED);
+ validateEvents(DEVICE_ADDED, PORT_ADDED, PORT_ADDED, PORT_ADDED);
pds.clear();
pds.add(new DefaultPortDescription(P1, false));
@@ -218,7 +218,7 @@
pds.add(new DefaultPortDescription(P1, true));
pds.add(new DefaultPortDescription(P2, true));
providerService.updatePorts(DID1, pds);
- validateEvents(DEVICE_ADDED, DEVICE_ADDED, PORT_ADDED, PORT_ADDED);
+ validateEvents(DEVICE_ADDED, PORT_ADDED, PORT_ADDED);
providerService.portStatusChanged(DID1, new DefaultPortDescription(P1, false));
validateEvents(PORT_UPDATED);
@@ -233,7 +233,7 @@
pds.add(new DefaultPortDescription(P1, true));
pds.add(new DefaultPortDescription(P2, true));
providerService.updatePorts(DID1, pds);
- validateEvents(DEVICE_ADDED, DEVICE_ADDED, PORT_ADDED, PORT_ADDED);
+ validateEvents(DEVICE_ADDED, PORT_ADDED, PORT_ADDED);
assertEquals("wrong port count", 2, service.getPorts(DID1).size());
Port port = service.getPort(DID1, P1);
@@ -247,7 +247,7 @@
connectDevice(DID2, SW2);
assertEquals("incorrect device count", 2, service.getDeviceCount());
admin.removeDevice(DID1);
- validateEvents(DEVICE_ADDED, DEVICE_ADDED, DEVICE_ADDED, DEVICE_ADDED, DEVICE_REMOVED, DEVICE_REMOVED);
+ validateEvents(DEVICE_ADDED, DEVICE_ADDED, DEVICE_REMOVED);
assertNull("device should not be found", service.getDevice(DID1));
assertNotNull("device should be found", service.getDevice(DID2));
assertEquals("incorrect device count", 1, service.getDeviceCount());
diff --git a/core/pom.xml b/core/pom.xml
index fc603df..afee0d0 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -20,7 +20,6 @@
<module>api</module>
<module>net</module>
<module>store</module>
- <module>trivial</module>
</modules>
<dependencies>
diff --git a/core/store/dist/pom.xml b/core/store/dist/pom.xml
new file mode 100644
index 0000000..577376a
--- /dev/null
+++ b/core/store/dist/pom.xml
@@ -0,0 +1,65 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-core-store</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ <relativePath>../pom.xml</relativePath>
+ </parent>
+
+ <artifactId>onos-core-dist</artifactId>
+ <packaging>bundle</packaging>
+
+ <description>ONOS Gossip based distributed store subsystems</description>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-core-serializers</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onlab-nio</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>com.fasterxml.jackson.core</groupId>
+ <artifactId>jackson-databind</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.fasterxml.jackson.core</groupId>
+ <artifactId>jackson-annotations</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>org.apache.felix.scr.annotations</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>de.javakaffee</groupId>
+ <artifactId>kryo-serializers</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-scr-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/ClusterDefinitionStore.java b/core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/ClusterDefinitionStore.java
new file mode 100644
index 0000000..4dc67d4
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/ClusterDefinitionStore.java
@@ -0,0 +1,75 @@
+package org.onlab.onos.store.cluster.impl;
+
+import com.fasterxml.jackson.core.JsonEncoding;
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ArrayNode;
+import com.fasterxml.jackson.databind.node.ObjectNode;
+import org.onlab.onos.cluster.DefaultControllerNode;
+import org.onlab.onos.cluster.NodeId;
+import org.onlab.packet.IpPrefix;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Set;
+
+/**
+ * Allows for reading and writing cluster definition as a JSON file.
+ */
+public class ClusterDefinitionStore {
+
+ private final File file;
+
+ /**
+ * Creates a reader/writer of the cluster definition file.
+ *
+ * @param filePath location of the definition file
+ */
+ public ClusterDefinitionStore(String filePath) {
+ file = new File(filePath);
+ }
+
+ /**
+ * Returns set of the controller nodes, including self.
+ *
+ * @return set of controller nodes
+ */
+ public Set<DefaultControllerNode> read() throws IOException {
+ Set<DefaultControllerNode> nodes = new HashSet<>();
+ ObjectMapper mapper = new ObjectMapper();
+ ObjectNode clusterNodeDef = (ObjectNode) mapper.readTree(file);
+ Iterator<JsonNode> it = ((ArrayNode) clusterNodeDef.get("nodes")).elements();
+ while (it.hasNext()) {
+ ObjectNode nodeDef = (ObjectNode) it.next();
+ nodes.add(new DefaultControllerNode(new NodeId(nodeDef.get("id").asText()),
+ IpPrefix.valueOf(nodeDef.get("ip").asText()),
+ nodeDef.get("tcpPort").asInt(9876)));
+ }
+ return nodes;
+ }
+
+ /**
+ * Writes the given set of the controller nodes.
+ *
+ * @param nodes set of controller nodes
+ */
+ public void write(Set<DefaultControllerNode> nodes) throws IOException {
+ ObjectMapper mapper = new ObjectMapper();
+ ObjectNode clusterNodeDef = mapper.createObjectNode();
+ ArrayNode nodeDefs = mapper.createArrayNode();
+ clusterNodeDef.set("nodes", nodeDefs);
+ for (DefaultControllerNode node : nodes) {
+ ObjectNode nodeDef = mapper.createObjectNode();
+ nodeDef.put("id", node.id().toString())
+ .put("ip", node.ip().toString())
+ .put("tcpPort", node.tcpPort());
+ nodeDefs.add(nodeDef);
+ }
+ mapper.writeTree(new JsonFactory().createGenerator(file, JsonEncoding.UTF8),
+ clusterNodeDef);
+ }
+
+}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/DistributedClusterStore.java b/core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/DistributedClusterStore.java
new file mode 100644
index 0000000..5cd9d9e
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/DistributedClusterStore.java
@@ -0,0 +1,393 @@
+package org.onlab.onos.store.cluster.impl;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.nio.AcceptorLoop;
+import org.onlab.nio.IOLoop;
+import org.onlab.nio.MessageStream;
+import org.onlab.onos.cluster.ClusterEvent;
+import org.onlab.onos.cluster.ClusterStore;
+import org.onlab.onos.cluster.ClusterStoreDelegate;
+import org.onlab.onos.cluster.ControllerNode;
+import org.onlab.onos.cluster.DefaultControllerNode;
+import org.onlab.onos.cluster.NodeId;
+import org.onlab.onos.store.AbstractStore;
+import org.onlab.packet.IpPrefix;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.net.SocketAddress;
+import java.nio.channels.ByteChannel;
+import java.nio.channels.SelectionKey;
+import java.nio.channels.ServerSocketChannel;
+import java.nio.channels.SocketChannel;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.Timer;
+import java.util.TimerTask;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+import static java.net.InetAddress.getByAddress;
+import static org.onlab.onos.cluster.ControllerNode.State;
+import static org.onlab.packet.IpPrefix.valueOf;
+import static org.onlab.util.Tools.namedThreads;
+
+/**
+ * Distributed implementation of the cluster nodes store.
+ */
+@Component(immediate = true)
+@Service
+public class DistributedClusterStore
+ extends AbstractStore<ClusterEvent, ClusterStoreDelegate>
+ implements ClusterStore {
+
+ private static final int HELLO_MSG = 1;
+ private static final int ECHO_MSG = 2;
+
+ private final Logger log = LoggerFactory.getLogger(getClass());
+
+ private static final long CONNECTION_CUSTODIAN_DELAY = 1000L;
+ private static final long CONNECTION_CUSTODIAN_FREQUENCY = 5000;
+
+ private static final long START_TIMEOUT = 1000;
+ private static final long SELECT_TIMEOUT = 50;
+ private static final int WORKERS = 3;
+ private static final int COMM_BUFFER_SIZE = 32 * 1024;
+ private static final int COMM_IDLE_TIME = 500;
+
+ private static final boolean SO_NO_DELAY = false;
+ private static final int SO_SEND_BUFFER_SIZE = COMM_BUFFER_SIZE;
+ private static final int SO_RCV_BUFFER_SIZE = COMM_BUFFER_SIZE;
+
+ private DefaultControllerNode self;
+ private final Map<NodeId, DefaultControllerNode> nodes = new ConcurrentHashMap<>();
+ private final Map<NodeId, State> states = new ConcurrentHashMap<>();
+
+ // Means to track message streams to other nodes.
+ private final Map<NodeId, TLVMessageStream> streams = new ConcurrentHashMap<>();
+ private final Map<SocketChannel, DefaultControllerNode> nodesByChannel = new ConcurrentHashMap<>();
+
+ // Executor pools for listening and managing connections to other nodes.
+ private final ExecutorService listenExecutor =
+ Executors.newSingleThreadExecutor(namedThreads("onos-comm-listen"));
+ private final ExecutorService commExecutors =
+ Executors.newFixedThreadPool(WORKERS, namedThreads("onos-comm-cluster"));
+ private final ExecutorService heartbeatExecutor =
+ Executors.newSingleThreadExecutor(namedThreads("onos-comm-heartbeat"));
+
+ private final Timer timer = new Timer("onos-comm-initiator");
+ private final TimerTask connectionCustodian = new ConnectionCustodian();
+
+ private ListenLoop listenLoop;
+ private List<CommLoop> commLoops = new ArrayList<>(WORKERS);
+
+ @Activate
+ public void activate() {
+ loadClusterDefinition();
+ startCommunications();
+ startListening();
+ startInitiating();
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ listenLoop.shutdown();
+ for (CommLoop loop : commLoops) {
+ loop.shutdown();
+ }
+ log.info("Stopped");
+ }
+
+ // Loads the cluster definition file
+ private void loadClusterDefinition() {
+// ClusterDefinitionStore cds = new ClusterDefinitionStore("../config/cluster.json");
+// try {
+// Set<DefaultControllerNode> storedNodes = cds.read();
+// for (DefaultControllerNode node : storedNodes) {
+// nodes.put(node.id(), node);
+// }
+// } catch (IOException e) {
+// log.error("Unable to read cluster definitions", e);
+// }
+
+ // Establishes the controller's own identity.
+ IpPrefix ip = valueOf(System.getProperty("onos.ip", "127.0.1.1"));
+ self = nodes.get(new NodeId(ip.toString()));
+
+ // As a fall-back, let's make sure we at least know who we are.
+ if (self == null) {
+ self = new DefaultControllerNode(new NodeId(ip.toString()), ip);
+ nodes.put(self.id(), self);
+ states.put(self.id(), State.ACTIVE);
+ }
+ }
+
+ // Kicks off the IO loops.
+ private void startCommunications() {
+ for (int i = 0; i < WORKERS; i++) {
+ try {
+ CommLoop loop = new CommLoop();
+ commLoops.add(loop);
+ commExecutors.execute(loop);
+ } catch (IOException e) {
+ log.warn("Unable to start comm IO loop", e);
+ }
+ }
+
+ // Wait for the IO loops to start
+ for (CommLoop loop : commLoops) {
+ if (!loop.awaitStart(START_TIMEOUT)) {
+ log.warn("Comm loop did not start on-time; moving on...");
+ }
+ }
+ }
+
+ // Starts listening for connections from peer cluster members.
+ private void startListening() {
+ try {
+ listenLoop = new ListenLoop(self.ip(), self.tcpPort());
+ listenExecutor.execute(listenLoop);
+ if (!listenLoop.awaitStart(START_TIMEOUT)) {
+ log.warn("Listen loop did not start on-time; moving on...");
+ }
+ } catch (IOException e) {
+ log.error("Unable to listen for cluster connections", e);
+ }
+ }
+
+ /**
+ * Initiates open connection request and registers the pending socket
+ * channel with the given IO loop.
+ *
+ * @param loop loop with which the channel should be registered
+ * @throws java.io.IOException if the socket could not be open or connected
+ */
+ private void openConnection(DefaultControllerNode node, CommLoop loop) throws IOException {
+ SocketAddress sa = new InetSocketAddress(getByAddress(node.ip().toOctets()), node.tcpPort());
+ SocketChannel ch = SocketChannel.open();
+ nodesByChannel.put(ch, node);
+ ch.configureBlocking(false);
+ ch.connect(sa);
+ loop.connectStream(ch);
+ }
+
+
+ // Attempts to connect to any nodes that do not have an associated connection.
+ private void startInitiating() {
+ timer.schedule(connectionCustodian, CONNECTION_CUSTODIAN_DELAY, CONNECTION_CUSTODIAN_FREQUENCY);
+ }
+
+ @Override
+ public ControllerNode getLocalNode() {
+ return self;
+ }
+
+ @Override
+ public Set<ControllerNode> getNodes() {
+ ImmutableSet.Builder<ControllerNode> builder = ImmutableSet.builder();
+ return builder.addAll(nodes.values()).build();
+ }
+
+ @Override
+ public ControllerNode getNode(NodeId nodeId) {
+ return nodes.get(nodeId);
+ }
+
+ @Override
+ public State getState(NodeId nodeId) {
+ State state = states.get(nodeId);
+ return state == null ? State.INACTIVE : state;
+ }
+
+ @Override
+ public ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort) {
+ DefaultControllerNode node = new DefaultControllerNode(nodeId, ip, tcpPort);
+ nodes.put(nodeId, node);
+ return node;
+ }
+
+ @Override
+ public void removeNode(NodeId nodeId) {
+ nodes.remove(nodeId);
+ TLVMessageStream stream = streams.remove(nodeId);
+ if (stream != null) {
+ stream.close();
+ }
+ }
+
+ // Listens and accepts inbound connections from other cluster nodes.
+ private class ListenLoop extends AcceptorLoop {
+ ListenLoop(IpPrefix ip, int tcpPort) throws IOException {
+ super(SELECT_TIMEOUT, new InetSocketAddress(getByAddress(ip.toOctets()), tcpPort));
+ }
+
+ @Override
+ protected void acceptConnection(ServerSocketChannel channel) throws IOException {
+ SocketChannel sc = channel.accept();
+ sc.configureBlocking(false);
+
+ Socket so = sc.socket();
+ so.setTcpNoDelay(SO_NO_DELAY);
+ so.setReceiveBufferSize(SO_RCV_BUFFER_SIZE);
+ so.setSendBufferSize(SO_SEND_BUFFER_SIZE);
+
+ findLeastUtilizedLoop().acceptStream(sc);
+ }
+ }
+
+ private class CommLoop extends IOLoop<TLVMessage, TLVMessageStream> {
+ CommLoop() throws IOException {
+ super(SELECT_TIMEOUT);
+ }
+
+ @Override
+ protected TLVMessageStream createStream(ByteChannel byteChannel) {
+ return new TLVMessageStream(this, byteChannel, COMM_BUFFER_SIZE, COMM_IDLE_TIME);
+ }
+
+ @Override
+ protected void processMessages(List<TLVMessage> messages, MessageStream<TLVMessage> stream) {
+ TLVMessageStream tlvStream = (TLVMessageStream) stream;
+ for (TLVMessage message : messages) {
+ // TODO: add type-based dispatching here... this is just a hack to get going
+ if (message.type() == HELLO_MSG) {
+ processHello(message, tlvStream);
+ } else if (message.type() == ECHO_MSG) {
+ processEcho(message, tlvStream);
+ } else {
+ log.info("Deal with other messages");
+ }
+ }
+ }
+
+ @Override
+ public TLVMessageStream acceptStream(SocketChannel channel) {
+ TLVMessageStream stream = super.acceptStream(channel);
+ try {
+ InetSocketAddress sa = (InetSocketAddress) channel.getRemoteAddress();
+ log.info("Accepted connection from node {}", valueOf(sa.getAddress().getAddress()));
+ stream.write(createHello(self));
+
+ } catch (IOException e) {
+ log.warn("Unable to accept connection from an unknown end-point", e);
+ }
+ return stream;
+ }
+
+ @Override
+ public TLVMessageStream connectStream(SocketChannel channel) {
+ TLVMessageStream stream = super.connectStream(channel);
+ DefaultControllerNode node = nodesByChannel.get(channel);
+ if (node != null) {
+ log.debug("Opened connection to node {}", node.id());
+ nodesByChannel.remove(channel);
+ }
+ return stream;
+ }
+
+ @Override
+ protected void connect(SelectionKey key) throws IOException {
+ try {
+ super.connect(key);
+ TLVMessageStream stream = (TLVMessageStream) key.attachment();
+ send(stream, createHello(self));
+ } catch (IOException e) {
+ if (!Objects.equals(e.getMessage(), "Connection refused")) {
+ throw e;
+ }
+ }
+ }
+
+ @Override
+ protected void removeStream(MessageStream<TLVMessage> stream) {
+ DefaultControllerNode node = ((TLVMessageStream) stream).node();
+ if (node != null) {
+ log.info("Closed connection to node {}", node.id());
+ states.put(node.id(), State.INACTIVE);
+ streams.remove(node.id());
+ }
+ super.removeStream(stream);
+ }
+ }
+
+ // Processes a HELLO message from a peer controller node.
+ private void processHello(TLVMessage message, TLVMessageStream stream) {
+ // FIXME: pure hack for now
+ String data = new String(message.data());
+ String[] fields = data.split(":");
+ DefaultControllerNode node = new DefaultControllerNode(new NodeId(fields[0]),
+ valueOf(fields[1]),
+ Integer.parseInt(fields[2]));
+ stream.setNode(node);
+ nodes.put(node.id(), node);
+ streams.put(node.id(), stream);
+ states.put(node.id(), State.ACTIVE);
+ }
+
+ // Processes an ECHO message from a peer controller node.
+ private void processEcho(TLVMessage message, TLVMessageStream tlvStream) {
+ // TODO: implement heart-beat refresh
+ log.info("Dealing with echoes...");
+ }
+
+ // Sends message to the specified stream.
+ private void send(TLVMessageStream stream, TLVMessage message) {
+ try {
+ stream.write(message);
+ } catch (IOException e) {
+ log.warn("Unable to send message to {}", stream.node().id());
+ }
+ }
+
+ // Creates a hello message to be sent to a peer controller node.
+ private TLVMessage createHello(DefaultControllerNode self) {
+ return new TLVMessage(HELLO_MSG, (self.id() + ":" + self.ip() + ":" + self.tcpPort()).getBytes());
+ }
+
+ // Sweeps through all controller nodes and attempts to open connection to
+ // those that presently do not have one.
+ private class ConnectionCustodian extends TimerTask {
+ @Override
+ public void run() {
+ for (DefaultControllerNode node : nodes.values()) {
+ if (node != self && !streams.containsKey(node.id())) {
+ try {
+ openConnection(node, findLeastUtilizedLoop());
+ } catch (IOException e) {
+ log.debug("Unable to connect", e);
+ }
+ }
+ }
+ }
+ }
+
+ // Finds the least utilities IO loop.
+ private CommLoop findLeastUtilizedLoop() {
+ CommLoop leastUtilized = null;
+ int minCount = Integer.MAX_VALUE;
+ for (CommLoop loop : commLoops) {
+ int count = loop.streamCount();
+ if (count == 0) {
+ return loop;
+ }
+
+ if (count < minCount) {
+ leastUtilized = loop;
+ minCount = count;
+ }
+ }
+ return leastUtilized;
+ }
+}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/TLVMessage.java b/core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/TLVMessage.java
new file mode 100644
index 0000000..246f8ee
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/TLVMessage.java
@@ -0,0 +1,70 @@
+package org.onlab.onos.store.cluster.impl;
+
+import org.onlab.nio.AbstractMessage;
+
+import java.util.Objects;
+
+import static com.google.common.base.MoreObjects.toStringHelper;
+
+/**
+ * Base message for cluster-wide communications using TLVs.
+ */
+public class TLVMessage extends AbstractMessage {
+
+ private final int type;
+ private final byte[] data;
+
+ /**
+ * Creates an immutable TLV message.
+ *
+ * @param type message type
+ * @param data message data bytes
+ */
+ public TLVMessage(int type, byte[] data) {
+ this.length = data.length + TLVMessageStream.METADATA_LENGTH;
+ this.type = type;
+ this.data = data;
+ }
+
+ /**
+ * Returns the message type indicator.
+ *
+ * @return message type
+ */
+ public int type() {
+ return type;
+ }
+
+ /**
+ * Returns the data bytes.
+ *
+ * @return message data
+ */
+ public byte[] data() {
+ return data;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(type, data);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ final TLVMessage other = (TLVMessage) obj;
+ return Objects.equals(this.type, other.type) &&
+ Objects.equals(this.data, other.data);
+ }
+
+ @Override
+ public String toString() {
+ return toStringHelper(this).add("type", type).add("length", length).toString();
+ }
+
+}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/TLVMessageStream.java b/core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/TLVMessageStream.java
new file mode 100644
index 0000000..b003945
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/TLVMessageStream.java
@@ -0,0 +1,95 @@
+package org.onlab.onos.store.cluster.impl;
+
+import org.onlab.nio.IOLoop;
+import org.onlab.nio.MessageStream;
+import org.onlab.onos.cluster.DefaultControllerNode;
+
+import java.nio.ByteBuffer;
+import java.nio.channels.ByteChannel;
+
+import static com.google.common.base.Preconditions.checkState;
+
+/**
+ * Stream for transferring TLV messages between cluster members.
+ */
+public class TLVMessageStream extends MessageStream<TLVMessage> {
+
+ public static final int METADATA_LENGTH = 16; // 8 + 4 + 4
+
+ private static final int LENGTH_OFFSET = 12;
+ private static final long MARKER = 0xfeedcafecafefeedL;
+
+ private DefaultControllerNode node;
+
+ /**
+ * Creates a message stream associated with the specified IO loop and
+ * backed by the given byte channel.
+ *
+ * @param loop IO loop
+ * @param byteChannel backing byte channel
+ * @param bufferSize size of the backing byte buffers
+ * @param maxIdleMillis maximum number of millis the stream can be idle
+ */
+ protected TLVMessageStream(IOLoop<TLVMessage, ?> loop, ByteChannel byteChannel,
+ int bufferSize, int maxIdleMillis) {
+ super(loop, byteChannel, bufferSize, maxIdleMillis);
+ }
+
+ /**
+ * Returns the node with which this stream is associated.
+ *
+ * @return controller node
+ */
+ DefaultControllerNode node() {
+ return node;
+ }
+
+ /**
+ * Sets the node with which this stream is affiliated.
+ *
+ * @param node controller node
+ */
+ void setNode(DefaultControllerNode node) {
+ checkState(this.node == null, "Stream is already bound to a node");
+ this.node = node;
+ }
+
+ @Override
+ protected TLVMessage read(ByteBuffer buffer) {
+ // Do we have enough bytes to read the header? If not, bail.
+ if (buffer.remaining() < METADATA_LENGTH) {
+ return null;
+ }
+
+ // Peek at the length and if we have enough to read the entire message
+ // go ahead, otherwise bail.
+ int length = buffer.getInt(buffer.position() + LENGTH_OFFSET);
+ if (buffer.remaining() < length) {
+ return null;
+ }
+
+ // At this point, we have enough data to read a complete message.
+ long marker = buffer.getLong();
+ checkState(marker == MARKER, "Incorrect message marker");
+
+ int type = buffer.getInt();
+ length = buffer.getInt();
+
+ // TODO: add deserialization hook here
+ byte[] data = new byte[length - METADATA_LENGTH];
+ buffer.get(data);
+
+ return new TLVMessage(type, data);
+ }
+
+ @Override
+ protected void write(TLVMessage message, ByteBuffer buffer) {
+ buffer.putLong(MARKER);
+ buffer.putInt(message.type());
+ buffer.putInt(message.length());
+
+ // TODO: add serialization hook here
+ buffer.put(message.data());
+ }
+
+}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/OnosClockService.java b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/OnosClockService.java
new file mode 100644
index 0000000..a99482f
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/OnosClockService.java
@@ -0,0 +1,53 @@
+package org.onlab.onos.store.device.impl;
+
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.onos.cluster.MastershipTerm;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.store.ClockService;
+import org.onlab.onos.store.Timestamp;
+import org.onlab.onos.store.impl.OnosTimestamp;
+import org.slf4j.Logger;
+
+@Component(immediate = true)
+@Service
+public class OnosClockService implements ClockService {
+
+ private final Logger log = getLogger(getClass());
+
+ // TODO: Implement per device ticker that is reset to 0 at the beginning of a new term.
+ private final AtomicInteger ticker = new AtomicInteger(0);
+ private ConcurrentMap<DeviceId, MastershipTerm> deviceMastershipTerms = new ConcurrentHashMap<>();
+
+ @Activate
+ public void activate() {
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ log.info("Stopped");
+ }
+
+ @Override
+ public Timestamp getTimestamp(DeviceId deviceId) {
+ MastershipTerm term = deviceMastershipTerms.get(deviceId);
+ if (term == null) {
+ throw new IllegalStateException("Requesting timestamp for a deviceId without mastership");
+ }
+ return new OnosTimestamp(term.termNumber(), ticker.incrementAndGet());
+ }
+
+ @Override
+ public void setMastershipTerm(DeviceId deviceId, MastershipTerm term) {
+ deviceMastershipTerms.put(deviceId, term);
+ }
+}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/OnosDistributedDeviceStore.java b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/OnosDistributedDeviceStore.java
new file mode 100644
index 0000000..bd5f2fd
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/OnosDistributedDeviceStore.java
@@ -0,0 +1,338 @@
+package org.onlab.onos.store.device.impl;
+
+import static com.google.common.base.Predicates.notNull;
+import static com.google.common.base.Preconditions.checkState;
+
+import com.google.common.collect.FluentIterable;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.ImmutableSet.Builder;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.onos.net.DefaultDevice;
+import org.onlab.onos.net.DefaultPort;
+import org.onlab.onos.net.Device;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.net.Port;
+import org.onlab.onos.net.PortNumber;
+import org.onlab.onos.net.device.DeviceDescription;
+import org.onlab.onos.net.device.DeviceEvent;
+import org.onlab.onos.net.device.DeviceStore;
+import org.onlab.onos.net.device.DeviceStoreDelegate;
+import org.onlab.onos.net.device.PortDescription;
+import org.onlab.onos.net.provider.ProviderId;
+import org.onlab.onos.store.AbstractStore;
+import org.onlab.onos.store.ClockService;
+import org.onlab.onos.store.Timestamp;
+import org.slf4j.Logger;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static org.onlab.onos.net.device.DeviceEvent.Type.*;
+import static org.slf4j.LoggerFactory.getLogger;
+
+/**
+ * Manages inventory of infrastructure devices using a protocol that takes into consideration
+ * the order in which device events occur.
+ */
+@Component(immediate = true)
+@Service
+public class OnosDistributedDeviceStore
+ extends AbstractStore<DeviceEvent, DeviceStoreDelegate>
+ implements DeviceStore {
+
+ private final Logger log = getLogger(getClass());
+
+ public static final String DEVICE_NOT_FOUND = "Device with ID %s not found";
+
+ private ConcurrentHashMap<DeviceId, VersionedValue<Device>> devices;
+ private ConcurrentHashMap<DeviceId, Map<PortNumber, VersionedValue<Port>>> devicePorts;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClockService clockService;
+
+ @Activate
+ public void activate() {
+
+ devices = new ConcurrentHashMap<>();
+ devicePorts = new ConcurrentHashMap<>();
+
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ log.info("Stopped");
+ }
+
+ @Override
+ public int getDeviceCount() {
+ return devices.size();
+ }
+
+ @Override
+ public Iterable<Device> getDevices() {
+ Builder<Device> builder = ImmutableSet.builder();
+ synchronized (this) {
+ for (VersionedValue<Device> device : devices.values()) {
+ builder.add(device.entity());
+ }
+ return builder.build();
+ }
+ }
+
+ @Override
+ public Device getDevice(DeviceId deviceId) {
+ VersionedValue<Device> device = devices.get(deviceId);
+ checkArgument(device != null, DEVICE_NOT_FOUND, deviceId);
+ return device.entity();
+ }
+
+ @Override
+ public DeviceEvent createOrUpdateDevice(ProviderId providerId, DeviceId deviceId,
+ DeviceDescription deviceDescription) {
+ Timestamp newTimestamp = clockService.getTimestamp(deviceId);
+ VersionedValue<Device> device = devices.get(deviceId);
+
+ if (device == null) {
+ return createDevice(providerId, deviceId, deviceDescription, newTimestamp);
+ }
+
+ checkState(newTimestamp.compareTo(device.timestamp()) > 0,
+ "Existing device has a timestamp in the future!");
+
+ return updateDevice(providerId, device.entity(), deviceDescription, newTimestamp);
+ }
+
+ // Creates the device and returns the appropriate event if necessary.
+ private DeviceEvent createDevice(ProviderId providerId, DeviceId deviceId,
+ DeviceDescription desc, Timestamp timestamp) {
+ Device device = new DefaultDevice(providerId, deviceId, desc.type(),
+ desc.manufacturer(),
+ desc.hwVersion(), desc.swVersion(),
+ desc.serialNumber());
+
+ devices.put(deviceId, new VersionedValue<>(device, true, timestamp));
+ // TODO,FIXME: broadcast a message telling peers of a device event.
+ return new DeviceEvent(DEVICE_ADDED, device, null);
+ }
+
+ // Updates the device and returns the appropriate event if necessary.
+ private DeviceEvent updateDevice(ProviderId providerId, Device device,
+ DeviceDescription desc, Timestamp timestamp) {
+ // We allow only certain attributes to trigger update
+ if (!Objects.equals(device.hwVersion(), desc.hwVersion()) ||
+ !Objects.equals(device.swVersion(), desc.swVersion())) {
+
+ Device updated = new DefaultDevice(providerId, device.id(),
+ desc.type(),
+ desc.manufacturer(),
+ desc.hwVersion(),
+ desc.swVersion(),
+ desc.serialNumber());
+ devices.put(device.id(), new VersionedValue<Device>(updated, true, timestamp));
+ // FIXME: broadcast a message telling peers of a device event.
+ return new DeviceEvent(DeviceEvent.Type.DEVICE_UPDATED, updated, null);
+ }
+
+ // Otherwise merely attempt to change availability
+ Device updated = new DefaultDevice(providerId, device.id(),
+ desc.type(),
+ desc.manufacturer(),
+ desc.hwVersion(),
+ desc.swVersion(),
+ desc.serialNumber());
+
+ VersionedValue<Device> oldDevice = devices.put(device.id(),
+ new VersionedValue<Device>(updated, true, timestamp));
+ if (!oldDevice.isUp()) {
+ return new DeviceEvent(DEVICE_AVAILABILITY_CHANGED, device, null);
+ } else {
+ return null;
+ }
+ }
+
+ @Override
+ public DeviceEvent markOffline(DeviceId deviceId) {
+ VersionedValue<Device> device = devices.get(deviceId);
+ boolean willRemove = device != null && device.isUp();
+ if (!willRemove) {
+ return null;
+ }
+ Timestamp timestamp = clockService.getTimestamp(deviceId);
+ if (replaceIfLatest(device.entity(), false, timestamp)) {
+ return new DeviceEvent(DEVICE_AVAILABILITY_CHANGED, device.entity(), null);
+ }
+ return null;
+ }
+
+ // Replace existing value if its timestamp is older.
+ private synchronized boolean replaceIfLatest(Device device, boolean isUp, Timestamp timestamp) {
+ VersionedValue<Device> existingValue = devices.get(device.id());
+ if (timestamp.compareTo(existingValue.timestamp()) > 0) {
+ devices.put(device.id(), new VersionedValue<Device>(device, isUp, timestamp));
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public List<DeviceEvent> updatePorts(DeviceId deviceId,
+ List<PortDescription> portDescriptions) {
+ List<DeviceEvent> events = new ArrayList<>();
+ synchronized (this) {
+ VersionedValue<Device> device = devices.get(deviceId);
+ checkArgument(device != null, DEVICE_NOT_FOUND, deviceId);
+ Map<PortNumber, VersionedValue<Port>> ports = getPortMap(deviceId);
+ Timestamp newTimestamp = clockService.getTimestamp(deviceId);
+
+ // Add new ports
+ Set<PortNumber> processed = new HashSet<>();
+ for (PortDescription portDescription : portDescriptions) {
+ VersionedValue<Port> port = ports.get(portDescription.portNumber());
+ if (port == null) {
+ events.add(createPort(device, portDescription, ports, newTimestamp));
+ }
+ checkState(newTimestamp.compareTo(port.timestamp()) > 0,
+ "Existing port state has a timestamp in the future!");
+ events.add(updatePort(device.entity(), port.entity(), portDescription, ports, newTimestamp));
+ processed.add(portDescription.portNumber());
+ }
+
+ updatePortMap(deviceId, ports);
+
+ events.addAll(pruneOldPorts(device.entity(), ports, processed));
+ }
+ return FluentIterable.from(events).filter(notNull()).toList();
+ }
+
+ // Creates a new port based on the port description adds it to the map and
+ // Returns corresponding event.
+ //@GuardedBy("this")
+ private DeviceEvent createPort(VersionedValue<Device> device, PortDescription portDescription,
+ Map<PortNumber, VersionedValue<Port>> ports, Timestamp timestamp) {
+ Port port = new DefaultPort(device.entity(), portDescription.portNumber(),
+ portDescription.isEnabled());
+ ports.put(port.number(), new VersionedValue<Port>(port, true, timestamp));
+ updatePortMap(device.entity().id(), ports);
+ return new DeviceEvent(PORT_ADDED, device.entity(), port);
+ }
+
+ // Checks if the specified port requires update and if so, it replaces the
+ // existing entry in the map and returns corresponding event.
+ //@GuardedBy("this")
+ private DeviceEvent updatePort(Device device, Port port,
+ PortDescription portDescription,
+ Map<PortNumber, VersionedValue<Port>> ports,
+ Timestamp timestamp) {
+ if (port.isEnabled() != portDescription.isEnabled()) {
+ VersionedValue<Port> updatedPort = new VersionedValue<Port>(
+ new DefaultPort(device, portDescription.portNumber(),
+ portDescription.isEnabled()),
+ portDescription.isEnabled(),
+ timestamp);
+ ports.put(port.number(), updatedPort);
+ updatePortMap(device.id(), ports);
+ return new DeviceEvent(PORT_UPDATED, device, updatedPort.entity());
+ }
+ return null;
+ }
+
+ // Prunes the specified list of ports based on which ports are in the
+ // processed list and returns list of corresponding events.
+ //@GuardedBy("this")
+ private List<DeviceEvent> pruneOldPorts(Device device,
+ Map<PortNumber, VersionedValue<Port>> ports,
+ Set<PortNumber> processed) {
+ List<DeviceEvent> events = new ArrayList<>();
+ Iterator<PortNumber> iterator = ports.keySet().iterator();
+ while (iterator.hasNext()) {
+ PortNumber portNumber = iterator.next();
+ if (!processed.contains(portNumber)) {
+ events.add(new DeviceEvent(PORT_REMOVED, device,
+ ports.get(portNumber).entity()));
+ iterator.remove();
+ }
+ }
+ if (!events.isEmpty()) {
+ updatePortMap(device.id(), ports);
+ }
+ return events;
+ }
+
+ // Gets the map of ports for the specified device; if one does not already
+ // exist, it creates and registers a new one.
+ // WARN: returned value is a copy, changes made to the Map
+ // needs to be written back using updatePortMap
+ //@GuardedBy("this")
+ private Map<PortNumber, VersionedValue<Port>> getPortMap(DeviceId deviceId) {
+ Map<PortNumber, VersionedValue<Port>> ports = devicePorts.get(deviceId);
+ if (ports == null) {
+ ports = new HashMap<>();
+ // this probably is waste of time in most cases.
+ updatePortMap(deviceId, ports);
+ }
+ return ports;
+ }
+
+ //@GuardedBy("this")
+ private void updatePortMap(DeviceId deviceId, Map<PortNumber, VersionedValue<Port>> ports) {
+ devicePorts.put(deviceId, ports);
+ }
+
+ @Override
+ public DeviceEvent updatePortStatus(DeviceId deviceId,
+ PortDescription portDescription) {
+ VersionedValue<Device> device = devices.get(deviceId);
+ checkArgument(device != null, DEVICE_NOT_FOUND, deviceId);
+ Map<PortNumber, VersionedValue<Port>> ports = getPortMap(deviceId);
+ VersionedValue<Port> port = ports.get(portDescription.portNumber());
+ Timestamp timestamp = clockService.getTimestamp(deviceId);
+ return updatePort(device.entity(), port.entity(), portDescription, ports, timestamp);
+ }
+
+ @Override
+ public List<Port> getPorts(DeviceId deviceId) {
+ Map<PortNumber, VersionedValue<Port>> versionedPorts = devicePorts.get(deviceId);
+ if (versionedPorts == null) {
+ return Collections.emptyList();
+ }
+ List<Port> ports = new ArrayList<>();
+ for (VersionedValue<Port> port : versionedPorts.values()) {
+ ports.add(port.entity());
+ }
+ return ports;
+ }
+
+ @Override
+ public Port getPort(DeviceId deviceId, PortNumber portNumber) {
+ Map<PortNumber, VersionedValue<Port>> ports = devicePorts.get(deviceId);
+ return ports == null ? null : ports.get(portNumber).entity();
+ }
+
+ @Override
+ public boolean isAvailable(DeviceId deviceId) {
+ return devices.get(deviceId).isUp();
+ }
+
+ @Override
+ public DeviceEvent removeDevice(DeviceId deviceId) {
+ VersionedValue<Device> previousDevice = devices.remove(deviceId);
+ return previousDevice == null ? null :
+ new DeviceEvent(DEVICE_REMOVED, previousDevice.entity(), null);
+ }
+}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/VersionedValue.java b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/VersionedValue.java
new file mode 100644
index 0000000..1a85c53
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/VersionedValue.java
@@ -0,0 +1,45 @@
+package org.onlab.onos.store.device.impl;
+
+import org.onlab.onos.store.Timestamp;
+
+/**
+ * Wrapper class for a entity that is versioned
+ * and can either be up or down.
+ *
+ * @param <T> type of the value.
+ */
+public class VersionedValue<T> {
+ private final T entity;
+ private final Timestamp timestamp;
+ private final boolean isUp;
+
+ public VersionedValue(T entity, boolean isUp, Timestamp timestamp) {
+ this.entity = entity;
+ this.isUp = isUp;
+ this.timestamp = timestamp;
+ }
+
+ /**
+ * Returns the value.
+ * @return value.
+ */
+ public T entity() {
+ return entity;
+ }
+
+ /**
+ * Tells whether the entity is up or down.
+ * @return true if up, false otherwise.
+ */
+ public boolean isUp() {
+ return isUp;
+ }
+
+ /**
+ * Returns the timestamp (version) associated with this entity.
+ * @return timestamp.
+ */
+ public Timestamp timestamp() {
+ return timestamp;
+ }
+}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/package-info.java b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/package-info.java
new file mode 100644
index 0000000..b2fc91d
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/package-info.java
@@ -0,0 +1,4 @@
+/**
+ * Implementation of device store using distributed structures.
+ */
+package org.onlab.onos.store.device.impl;
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/flow/impl/DistributedFlowRuleStore.java b/core/store/dist/src/main/java/org/onlab/onos/store/flow/impl/DistributedFlowRuleStore.java
new file mode 100644
index 0000000..5a5592a
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/flow/impl/DistributedFlowRuleStore.java
@@ -0,0 +1,153 @@
+package org.onlab.onos.store.flow.impl;
+
+import static org.onlab.onos.net.flow.FlowRuleEvent.Type.RULE_ADDED;
+import static org.onlab.onos.net.flow.FlowRuleEvent.Type.RULE_REMOVED;
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.util.Collection;
+import java.util.Collections;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.onos.ApplicationId;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.net.flow.DefaultFlowRule;
+import org.onlab.onos.net.flow.FlowRule;
+import org.onlab.onos.net.flow.FlowRule.FlowRuleState;
+import org.onlab.onos.net.flow.FlowRuleEvent;
+import org.onlab.onos.net.flow.FlowRuleEvent.Type;
+import org.onlab.onos.net.flow.FlowRuleStore;
+import org.onlab.onos.net.flow.FlowRuleStoreDelegate;
+import org.onlab.onos.store.AbstractStore;
+import org.slf4j.Logger;
+
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Multimap;
+
+/**
+ * Manages inventory of flow rules using trivial in-memory implementation.
+ */
+//FIXME: I LIE I AM NOT DISTRIBUTED
+@Component(immediate = true)
+@Service
+public class DistributedFlowRuleStore
+extends AbstractStore<FlowRuleEvent, FlowRuleStoreDelegate>
+implements FlowRuleStore {
+
+ private final Logger log = getLogger(getClass());
+
+ // store entries as a pile of rules, no info about device tables
+ private final Multimap<DeviceId, FlowRule> flowEntries =
+ ArrayListMultimap.<DeviceId, FlowRule>create();
+
+ private final Multimap<ApplicationId, FlowRule> flowEntriesById =
+ ArrayListMultimap.<ApplicationId, FlowRule>create();
+
+ @Activate
+ public void activate() {
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ log.info("Stopped");
+ }
+
+
+ @Override
+ public synchronized FlowRule getFlowRule(FlowRule rule) {
+ for (FlowRule f : flowEntries.get(rule.deviceId())) {
+ if (f.equals(rule)) {
+ return f;
+ }
+ }
+ return null;
+ }
+
+ @Override
+ public synchronized Iterable<FlowRule> getFlowEntries(DeviceId deviceId) {
+ Collection<FlowRule> rules = flowEntries.get(deviceId);
+ if (rules == null) {
+ return Collections.emptyList();
+ }
+ return ImmutableSet.copyOf(rules);
+ }
+
+ @Override
+ public synchronized Iterable<FlowRule> getFlowEntriesByAppId(ApplicationId appId) {
+ Collection<FlowRule> rules = flowEntriesById.get(appId);
+ if (rules == null) {
+ return Collections.emptyList();
+ }
+ return ImmutableSet.copyOf(rules);
+ }
+
+ @Override
+ public synchronized void storeFlowRule(FlowRule rule) {
+ FlowRule f = new DefaultFlowRule(rule, FlowRuleState.PENDING_ADD);
+ DeviceId did = f.deviceId();
+ if (!flowEntries.containsEntry(did, f)) {
+ flowEntries.put(did, f);
+ flowEntriesById.put(rule.appId(), f);
+ }
+ }
+
+ @Override
+ public synchronized void deleteFlowRule(FlowRule rule) {
+ FlowRule f = new DefaultFlowRule(rule, FlowRuleState.PENDING_REMOVE);
+ DeviceId did = f.deviceId();
+
+ /*
+ * find the rule and mark it for deletion.
+ * Ultimately a flow removed will come remove it.
+ */
+
+ if (flowEntries.containsEntry(did, f)) {
+ //synchronized (flowEntries) {
+ flowEntries.remove(did, f);
+ flowEntries.put(did, f);
+ flowEntriesById.remove(rule.appId(), rule);
+ //}
+ }
+ }
+
+ @Override
+ public synchronized FlowRuleEvent addOrUpdateFlowRule(FlowRule rule) {
+ DeviceId did = rule.deviceId();
+
+ // check if this new rule is an update to an existing entry
+ if (flowEntries.containsEntry(did, rule)) {
+ //synchronized (flowEntries) {
+ // Multimaps support duplicates so we have to remove our rule
+ // and replace it with the current version.
+ flowEntries.remove(did, rule);
+ flowEntries.put(did, rule);
+ //}
+ return new FlowRuleEvent(Type.RULE_UPDATED, rule);
+ }
+
+ flowEntries.put(did, rule);
+ return new FlowRuleEvent(RULE_ADDED, rule);
+ }
+
+ @Override
+ public synchronized FlowRuleEvent removeFlowRule(FlowRule rule) {
+ //synchronized (this) {
+ if (flowEntries.remove(rule.deviceId(), rule)) {
+ return new FlowRuleEvent(RULE_REMOVED, rule);
+ } else {
+ return null;
+ }
+ //}
+ }
+
+
+
+
+
+
+
+}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/host/impl/DistributedHostStore.java b/core/store/dist/src/main/java/org/onlab/onos/store/host/impl/DistributedHostStore.java
new file mode 100644
index 0000000..09820f4
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/host/impl/DistributedHostStore.java
@@ -0,0 +1,278 @@
+package org.onlab.onos.store.host.impl;
+
+import static org.onlab.onos.net.host.HostEvent.Type.HOST_ADDED;
+import static org.onlab.onos.net.host.HostEvent.Type.HOST_MOVED;
+import static org.onlab.onos.net.host.HostEvent.Type.HOST_REMOVED;
+import static org.onlab.onos.net.host.HostEvent.Type.HOST_UPDATED;
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.onos.net.ConnectPoint;
+import org.onlab.onos.net.DefaultHost;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.net.Host;
+import org.onlab.onos.net.HostId;
+import org.onlab.onos.net.host.HostDescription;
+import org.onlab.onos.net.host.HostEvent;
+import org.onlab.onos.net.host.HostStore;
+import org.onlab.onos.net.host.HostStoreDelegate;
+import org.onlab.onos.net.host.PortAddresses;
+import org.onlab.onos.net.provider.ProviderId;
+import org.onlab.onos.store.AbstractStore;
+import org.onlab.packet.IpPrefix;
+import org.onlab.packet.MacAddress;
+import org.onlab.packet.VlanId;
+import org.slf4j.Logger;
+
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.Sets;
+
+/**
+ * Manages inventory of end-station hosts using trivial in-memory
+ * implementation.
+ */
+//FIXME: I LIE I AM NOT DISTRIBUTED
+@Component(immediate = true)
+@Service
+public class DistributedHostStore
+extends AbstractStore<HostEvent, HostStoreDelegate>
+implements HostStore {
+
+ private final Logger log = getLogger(getClass());
+
+ // Host inventory
+ private final Map<HostId, Host> hosts = new ConcurrentHashMap<>();
+
+ // Hosts tracked by their location
+ private final Multimap<ConnectPoint, Host> locations = HashMultimap.create();
+
+ private final Map<ConnectPoint, PortAddresses> portAddresses =
+ new ConcurrentHashMap<>();
+
+ @Activate
+ public void activate() {
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ log.info("Stopped");
+ }
+
+ @Override
+ public HostEvent createOrUpdateHost(ProviderId providerId, HostId hostId,
+ HostDescription hostDescription) {
+ Host host = hosts.get(hostId);
+ if (host == null) {
+ return createHost(providerId, hostId, hostDescription);
+ }
+ return updateHost(providerId, host, hostDescription);
+ }
+
+ // creates a new host and sends HOST_ADDED
+ private HostEvent createHost(ProviderId providerId, HostId hostId,
+ HostDescription descr) {
+ DefaultHost newhost = new DefaultHost(providerId, hostId,
+ descr.hwAddress(),
+ descr.vlan(),
+ descr.location(),
+ descr.ipAddresses());
+ synchronized (this) {
+ hosts.put(hostId, newhost);
+ locations.put(descr.location(), newhost);
+ }
+ return new HostEvent(HOST_ADDED, newhost);
+ }
+
+ // checks for type of update to host, sends appropriate event
+ private HostEvent updateHost(ProviderId providerId, Host host,
+ HostDescription descr) {
+ DefaultHost updated;
+ HostEvent event;
+ if (!host.location().equals(descr.location())) {
+ updated = new DefaultHost(providerId, host.id(),
+ host.mac(),
+ host.vlan(),
+ descr.location(),
+ host.ipAddresses());
+ event = new HostEvent(HOST_MOVED, updated);
+
+ } else if (!(host.ipAddresses().equals(descr.ipAddresses()))) {
+ updated = new DefaultHost(providerId, host.id(),
+ host.mac(),
+ host.vlan(),
+ descr.location(),
+ descr.ipAddresses());
+ event = new HostEvent(HOST_UPDATED, updated);
+ } else {
+ return null;
+ }
+ synchronized (this) {
+ hosts.put(host.id(), updated);
+ locations.remove(host.location(), host);
+ locations.put(updated.location(), updated);
+ }
+ return event;
+ }
+
+ @Override
+ public HostEvent removeHost(HostId hostId) {
+ synchronized (this) {
+ Host host = hosts.remove(hostId);
+ if (host != null) {
+ locations.remove((host.location()), host);
+ return new HostEvent(HOST_REMOVED, host);
+ }
+ return null;
+ }
+ }
+
+ @Override
+ public int getHostCount() {
+ return hosts.size();
+ }
+
+ @Override
+ public Iterable<Host> getHosts() {
+ return Collections.unmodifiableSet(new HashSet<>(hosts.values()));
+ }
+
+ @Override
+ public Host getHost(HostId hostId) {
+ return hosts.get(hostId);
+ }
+
+ @Override
+ public Set<Host> getHosts(VlanId vlanId) {
+ Set<Host> vlanset = new HashSet<>();
+ for (Host h : hosts.values()) {
+ if (h.vlan().equals(vlanId)) {
+ vlanset.add(h);
+ }
+ }
+ return vlanset;
+ }
+
+ @Override
+ public Set<Host> getHosts(MacAddress mac) {
+ Set<Host> macset = new HashSet<>();
+ for (Host h : hosts.values()) {
+ if (h.mac().equals(mac)) {
+ macset.add(h);
+ }
+ }
+ return macset;
+ }
+
+ @Override
+ public Set<Host> getHosts(IpPrefix ip) {
+ Set<Host> ipset = new HashSet<>();
+ for (Host h : hosts.values()) {
+ if (h.ipAddresses().contains(ip)) {
+ ipset.add(h);
+ }
+ }
+ return ipset;
+ }
+
+ @Override
+ public Set<Host> getConnectedHosts(ConnectPoint connectPoint) {
+ return ImmutableSet.copyOf(locations.get(connectPoint));
+ }
+
+ @Override
+ public Set<Host> getConnectedHosts(DeviceId deviceId) {
+ Set<Host> hostset = new HashSet<>();
+ for (ConnectPoint p : locations.keySet()) {
+ if (p.deviceId().equals(deviceId)) {
+ hostset.addAll(locations.get(p));
+ }
+ }
+ return hostset;
+ }
+
+ @Override
+ public void updateAddressBindings(PortAddresses addresses) {
+ synchronized (portAddresses) {
+ PortAddresses existing = portAddresses.get(addresses.connectPoint());
+ if (existing == null) {
+ portAddresses.put(addresses.connectPoint(), addresses);
+ } else {
+ Set<IpPrefix> union = Sets.union(existing.ips(), addresses.ips())
+ .immutableCopy();
+
+ MacAddress newMac = (addresses.mac() == null) ? existing.mac()
+ : addresses.mac();
+
+ PortAddresses newAddresses =
+ new PortAddresses(addresses.connectPoint(), union, newMac);
+
+ portAddresses.put(newAddresses.connectPoint(), newAddresses);
+ }
+ }
+ }
+
+ @Override
+ public void removeAddressBindings(PortAddresses addresses) {
+ synchronized (portAddresses) {
+ PortAddresses existing = portAddresses.get(addresses.connectPoint());
+ if (existing != null) {
+ Set<IpPrefix> difference =
+ Sets.difference(existing.ips(), addresses.ips()).immutableCopy();
+
+ // If they removed the existing mac, set the new mac to null.
+ // Otherwise, keep the existing mac.
+ MacAddress newMac = existing.mac();
+ if (addresses.mac() != null && addresses.mac().equals(existing.mac())) {
+ newMac = null;
+ }
+
+ PortAddresses newAddresses =
+ new PortAddresses(addresses.connectPoint(), difference, newMac);
+
+ portAddresses.put(newAddresses.connectPoint(), newAddresses);
+ }
+ }
+ }
+
+ @Override
+ public void clearAddressBindings(ConnectPoint connectPoint) {
+ synchronized (portAddresses) {
+ portAddresses.remove(connectPoint);
+ }
+ }
+
+ @Override
+ public Set<PortAddresses> getAddressBindings() {
+ synchronized (portAddresses) {
+ return new HashSet<>(portAddresses.values());
+ }
+ }
+
+ @Override
+ public PortAddresses getAddressBindingsForPort(ConnectPoint connectPoint) {
+ PortAddresses addresses;
+
+ synchronized (portAddresses) {
+ addresses = portAddresses.get(connectPoint);
+ }
+
+ if (addresses == null) {
+ addresses = new PortAddresses(connectPoint, null, null);
+ }
+
+ return addresses;
+ }
+
+}
diff --git a/core/store/src/main/java/org/onlab/onos/store/impl/OnosTimestamp.java b/core/store/dist/src/main/java/org/onlab/onos/store/impl/OnosTimestamp.java
similarity index 72%
rename from core/store/src/main/java/org/onlab/onos/store/impl/OnosTimestamp.java
rename to core/store/dist/src/main/java/org/onlab/onos/store/impl/OnosTimestamp.java
index f994e02..2005582 100644
--- a/core/store/src/main/java/org/onlab/onos/store/impl/OnosTimestamp.java
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/impl/OnosTimestamp.java
@@ -1,11 +1,9 @@
package org.onlab.onos.store.impl;
-import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkArgument;
import java.util.Objects;
-import org.onlab.onos.net.ElementId;
import org.onlab.onos.store.Timestamp;
import com.google.common.base.MoreObjects;
@@ -14,22 +12,20 @@
// If it is store specific, implement serializable interfaces?
/**
* Default implementation of Timestamp.
+ * TODO: Better documentation.
*/
public final class OnosTimestamp implements Timestamp {
- private final ElementId id;
private final int termNumber;
private final int sequenceNumber;
/**
* Default version tuple.
*
- * @param id identifier of the element
* @param termNumber the mastership termNumber
* @param sequenceNumber the sequenceNumber number within the termNumber
*/
- public OnosTimestamp(ElementId id, int termNumber, int sequenceNumber) {
- this.id = checkNotNull(id);
+ public OnosTimestamp(int termNumber, int sequenceNumber) {
this.termNumber = termNumber;
this.sequenceNumber = sequenceNumber;
}
@@ -38,9 +34,6 @@
public int compareTo(Timestamp o) {
checkArgument(o instanceof OnosTimestamp, "Must be OnosTimestamp", o);
OnosTimestamp that = (OnosTimestamp) o;
- checkArgument(this.id.equals(that.id),
- "Cannot compare version for different element this:%s, that:%s",
- this, that);
return ComparisonChain.start()
.compare(this.termNumber, that.termNumber)
@@ -50,7 +43,7 @@
@Override
public int hashCode() {
- return Objects.hash(id, termNumber, sequenceNumber);
+ return Objects.hash(termNumber, sequenceNumber);
}
@Override
@@ -62,30 +55,19 @@
return false;
}
OnosTimestamp that = (OnosTimestamp) obj;
- return Objects.equals(this.id, that.id) &&
- Objects.equals(this.termNumber, that.termNumber) &&
+ return Objects.equals(this.termNumber, that.termNumber) &&
Objects.equals(this.sequenceNumber, that.sequenceNumber);
}
@Override
public String toString() {
return MoreObjects.toStringHelper(getClass())
- .add("id", id)
.add("termNumber", termNumber)
.add("sequenceNumber", sequenceNumber)
.toString();
}
/**
- * Returns the element.
- *
- * @return element identifier
- */
- public ElementId id() {
- return id;
- }
-
- /**
* Returns the termNumber.
*
* @return termNumber
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/link/impl/OnosDistributedLinkStore.java b/core/store/dist/src/main/java/org/onlab/onos/store/link/impl/OnosDistributedLinkStore.java
new file mode 100644
index 0000000..5df25b4
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/link/impl/OnosDistributedLinkStore.java
@@ -0,0 +1,246 @@
+package org.onlab.onos.store.link.impl;
+
+import static org.onlab.onos.net.Link.Type.DIRECT;
+import static org.onlab.onos.net.Link.Type.INDIRECT;
+import static org.onlab.onos.net.link.LinkEvent.Type.LINK_ADDED;
+import static org.onlab.onos.net.link.LinkEvent.Type.LINK_REMOVED;
+import static org.onlab.onos.net.link.LinkEvent.Type.LINK_UPDATED;
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.onos.net.ConnectPoint;
+import org.onlab.onos.net.DefaultLink;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.net.Link;
+import org.onlab.onos.net.LinkKey;
+import org.onlab.onos.net.link.LinkDescription;
+import org.onlab.onos.net.link.LinkEvent;
+import org.onlab.onos.net.link.LinkStore;
+import org.onlab.onos.net.link.LinkStoreDelegate;
+import org.onlab.onos.net.provider.ProviderId;
+import org.onlab.onos.store.AbstractStore;
+import org.onlab.onos.store.ClockService;
+import org.onlab.onos.store.Timestamp;
+import org.onlab.onos.store.device.impl.VersionedValue;
+import org.slf4j.Logger;
+
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.ImmutableSet.Builder;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkState;
+
+/**
+ * Manages inventory of infrastructure links using a protocol that takes into consideration
+ * the order in which events occur.
+ */
+// FIXME: This does not yet implement the full protocol.
+// The full protocol requires the sender of LLDP message to include the
+// version information of src device/port and the receiver to
+// take that into account when figuring out if a more recent src
+// device/port down event renders the link discovery obsolete.
+@Component(immediate = true)
+@Service
+public class OnosDistributedLinkStore
+ extends AbstractStore<LinkEvent, LinkStoreDelegate>
+ implements LinkStore {
+
+ private final Logger log = getLogger(getClass());
+
+ // Link inventory
+ private ConcurrentMap<LinkKey, VersionedValue<Link>> links;
+
+ public static final String LINK_NOT_FOUND = "Link between %s and %s not found";
+
+ // TODO synchronize?
+ // Egress and ingress link sets
+ private final Multimap<DeviceId, VersionedValue<Link>> srcLinks = HashMultimap.create();
+ private final Multimap<DeviceId, VersionedValue<Link>> dstLinks = HashMultimap.create();
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected ClockService clockService;
+
+ @Activate
+ public void activate() {
+
+ links = new ConcurrentHashMap<>();
+
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ log.info("Stopped");
+ }
+
+ @Override
+ public int getLinkCount() {
+ return links.size();
+ }
+
+ @Override
+ public Iterable<Link> getLinks() {
+ Builder<Link> builder = ImmutableSet.builder();
+ synchronized (this) {
+ for (VersionedValue<Link> link : links.values()) {
+ builder.add(link.entity());
+ }
+ return builder.build();
+ }
+ }
+
+ @Override
+ public Set<Link> getDeviceEgressLinks(DeviceId deviceId) {
+ Set<VersionedValue<Link>> egressLinks = ImmutableSet.copyOf(srcLinks.get(deviceId));
+ Set<Link> rawEgressLinks = new HashSet<>();
+ for (VersionedValue<Link> link : egressLinks) {
+ rawEgressLinks.add(link.entity());
+ }
+ return rawEgressLinks;
+ }
+
+ @Override
+ public Set<Link> getDeviceIngressLinks(DeviceId deviceId) {
+ Set<VersionedValue<Link>> ingressLinks = ImmutableSet.copyOf(dstLinks.get(deviceId));
+ Set<Link> rawIngressLinks = new HashSet<>();
+ for (VersionedValue<Link> link : ingressLinks) {
+ rawIngressLinks.add(link.entity());
+ }
+ return rawIngressLinks;
+ }
+
+ @Override
+ public Link getLink(ConnectPoint src, ConnectPoint dst) {
+ VersionedValue<Link> link = links.get(new LinkKey(src, dst));
+ checkArgument(link != null, "LINK_NOT_FOUND", src, dst);
+ return link.entity();
+ }
+
+ @Override
+ public Set<Link> getEgressLinks(ConnectPoint src) {
+ Set<Link> egressLinks = new HashSet<>();
+ for (VersionedValue<Link> link : srcLinks.get(src.deviceId())) {
+ if (link.entity().src().equals(src)) {
+ egressLinks.add(link.entity());
+ }
+ }
+ return egressLinks;
+ }
+
+ @Override
+ public Set<Link> getIngressLinks(ConnectPoint dst) {
+ Set<Link> ingressLinks = new HashSet<>();
+ for (VersionedValue<Link> link : dstLinks.get(dst.deviceId())) {
+ if (link.entity().dst().equals(dst)) {
+ ingressLinks.add(link.entity());
+ }
+ }
+ return ingressLinks;
+ }
+
+ @Override
+ public LinkEvent createOrUpdateLink(ProviderId providerId,
+ LinkDescription linkDescription) {
+
+ final DeviceId destinationDeviceId = linkDescription.dst().deviceId();
+ final Timestamp newTimestamp = clockService.getTimestamp(destinationDeviceId);
+
+ LinkKey key = new LinkKey(linkDescription.src(), linkDescription.dst());
+ VersionedValue<Link> link = links.get(key);
+ if (link == null) {
+ return createLink(providerId, key, linkDescription, newTimestamp);
+ }
+
+ checkState(newTimestamp.compareTo(link.timestamp()) > 0,
+ "Existing Link has a timestamp in the future!");
+
+ return updateLink(providerId, link, key, linkDescription, newTimestamp);
+ }
+
+ // Creates and stores the link and returns the appropriate event.
+ private LinkEvent createLink(ProviderId providerId, LinkKey key,
+ LinkDescription linkDescription, Timestamp timestamp) {
+ VersionedValue<Link> link = new VersionedValue<Link>(new DefaultLink(providerId, key.src(), key.dst(),
+ linkDescription.type()), true, timestamp);
+ synchronized (this) {
+ links.put(key, link);
+ addNewLink(link, timestamp);
+ }
+ // FIXME: notify peers.
+ return new LinkEvent(LINK_ADDED, link.entity());
+ }
+
+ // update Egress and ingress link sets
+ private void addNewLink(VersionedValue<Link> link, Timestamp timestamp) {
+ Link rawLink = link.entity();
+ synchronized (this) {
+ srcLinks.put(rawLink.src().deviceId(), link);
+ dstLinks.put(rawLink.dst().deviceId(), link);
+ }
+ }
+
+ // Updates, if necessary the specified link and returns the appropriate event.
+ private LinkEvent updateLink(ProviderId providerId, VersionedValue<Link> existingLink,
+ LinkKey key, LinkDescription linkDescription, Timestamp timestamp) {
+ // FIXME confirm Link update condition is OK
+ if (existingLink.entity().type() == INDIRECT && linkDescription.type() == DIRECT) {
+ synchronized (this) {
+
+ VersionedValue<Link> updatedLink = new VersionedValue<Link>(
+ new DefaultLink(providerId, existingLink.entity().src(), existingLink.entity().dst(),
+ linkDescription.type()), true, timestamp);
+ links.replace(key, existingLink, updatedLink);
+
+ replaceLink(existingLink, updatedLink);
+ // FIXME: notify peers.
+ return new LinkEvent(LINK_UPDATED, updatedLink.entity());
+ }
+ }
+ return null;
+ }
+
+ // update Egress and ingress link sets
+ private void replaceLink(VersionedValue<Link> current, VersionedValue<Link> updated) {
+ synchronized (this) {
+ srcLinks.remove(current.entity().src().deviceId(), current);
+ dstLinks.remove(current.entity().dst().deviceId(), current);
+
+ srcLinks.put(current.entity().src().deviceId(), updated);
+ dstLinks.put(current.entity().dst().deviceId(), updated);
+ }
+ }
+
+ @Override
+ public LinkEvent removeLink(ConnectPoint src, ConnectPoint dst) {
+ synchronized (this) {
+ LinkKey key = new LinkKey(src, dst);
+ VersionedValue<Link> link = links.remove(key);
+ if (link != null) {
+ removeLink(link);
+ // notify peers
+ return new LinkEvent(LINK_REMOVED, link.entity());
+ }
+ return null;
+ }
+ }
+
+ // update Egress and ingress link sets
+ private void removeLink(VersionedValue<Link> link) {
+ synchronized (this) {
+ srcLinks.remove(link.entity().src().deviceId(), link);
+ dstLinks.remove(link.entity().dst().deviceId(), link);
+ }
+ }
+}
diff --git a/core/store/src/main/java/org/onlab/onos/store/serializers/OnosTimestampSerializer.java b/core/store/dist/src/main/java/org/onlab/onos/store/serializers/OnosTimestampSerializer.java
similarity index 81%
rename from core/store/src/main/java/org/onlab/onos/store/serializers/OnosTimestampSerializer.java
rename to core/store/dist/src/main/java/org/onlab/onos/store/serializers/OnosTimestampSerializer.java
index 812bc9d..192e035 100644
--- a/core/store/src/main/java/org/onlab/onos/store/serializers/OnosTimestampSerializer.java
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/serializers/OnosTimestampSerializer.java
@@ -1,6 +1,5 @@
package org.onlab.onos.store.serializers;
-import org.onlab.onos.net.ElementId;
import org.onlab.onos.store.impl.OnosTimestamp;
import com.esotericsoftware.kryo.Kryo;
@@ -20,18 +19,17 @@
// non-null, immutable
super(false, true);
}
+
@Override
public void write(Kryo kryo, Output output, OnosTimestamp object) {
- kryo.writeClassAndObject(output, object.id());
output.writeInt(object.termNumber());
output.writeInt(object.sequenceNumber());
}
@Override
public OnosTimestamp read(Kryo kryo, Input input, Class<OnosTimestamp> type) {
- ElementId id = (ElementId) kryo.readClassAndObject(input);
final int term = input.readInt();
final int sequence = input.readInt();
- return new OnosTimestamp(id, term, sequence);
+ return new OnosTimestamp(term, sequence);
}
}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopology.java b/core/store/dist/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopology.java
new file mode 100644
index 0000000..5574d27
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopology.java
@@ -0,0 +1,444 @@
+package org.onlab.onos.store.topology.impl;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.ImmutableSetMultimap;
+import org.onlab.graph.DijkstraGraphSearch;
+import org.onlab.graph.GraphPathSearch;
+import org.onlab.graph.TarjanGraphSearch;
+import org.onlab.onos.net.AbstractModel;
+import org.onlab.onos.net.ConnectPoint;
+import org.onlab.onos.net.DefaultPath;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.net.Link;
+import org.onlab.onos.net.Path;
+import org.onlab.onos.net.provider.ProviderId;
+import org.onlab.onos.net.topology.ClusterId;
+import org.onlab.onos.net.topology.DefaultTopologyCluster;
+import org.onlab.onos.net.topology.DefaultTopologyVertex;
+import org.onlab.onos.net.topology.GraphDescription;
+import org.onlab.onos.net.topology.LinkWeight;
+import org.onlab.onos.net.topology.Topology;
+import org.onlab.onos.net.topology.TopologyCluster;
+import org.onlab.onos.net.topology.TopologyEdge;
+import org.onlab.onos.net.topology.TopologyGraph;
+import org.onlab.onos.net.topology.TopologyVertex;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static com.google.common.base.MoreObjects.toStringHelper;
+import static com.google.common.collect.ImmutableSetMultimap.Builder;
+import static org.onlab.graph.GraphPathSearch.Result;
+import static org.onlab.graph.TarjanGraphSearch.SCCResult;
+import static org.onlab.onos.net.Link.Type.INDIRECT;
+
+/**
+ * Default implementation of the topology descriptor. This carries the
+ * backing topology data.
+ */
+public class DefaultTopology extends AbstractModel implements Topology {
+
+ private static final DijkstraGraphSearch<TopologyVertex, TopologyEdge> DIJKSTRA =
+ new DijkstraGraphSearch<>();
+ private static final TarjanGraphSearch<TopologyVertex, TopologyEdge> TARJAN =
+ new TarjanGraphSearch<>();
+
+ private static final ProviderId PID = new ProviderId("core", "org.onlab.onos.net");
+
+ private final long time;
+ private final TopologyGraph graph;
+
+ private final SCCResult<TopologyVertex, TopologyEdge> clusterResults;
+ private final ImmutableMap<DeviceId, Result<TopologyVertex, TopologyEdge>> results;
+ private final ImmutableSetMultimap<PathKey, Path> paths;
+
+ private final ImmutableMap<ClusterId, TopologyCluster> clusters;
+ private final ImmutableSet<ConnectPoint> infrastructurePoints;
+ private final ImmutableSetMultimap<ClusterId, ConnectPoint> broadcastSets;
+
+ private ImmutableMap<DeviceId, TopologyCluster> clustersByDevice;
+ private ImmutableSetMultimap<TopologyCluster, DeviceId> devicesByCluster;
+ private ImmutableSetMultimap<TopologyCluster, Link> linksByCluster;
+
+
+ /**
+ * Creates a topology descriptor attributed to the specified provider.
+ *
+ * @param providerId identity of the provider
+ * @param description data describing the new topology
+ */
+ DefaultTopology(ProviderId providerId, GraphDescription description) {
+ super(providerId);
+ this.time = description.timestamp();
+
+ // Build the graph
+ this.graph = new DefaultTopologyGraph(description.vertexes(),
+ description.edges());
+
+ this.results = searchForShortestPaths();
+ this.paths = buildPaths();
+
+ this.clusterResults = searchForClusters();
+ this.clusters = buildTopologyClusters();
+
+ buildIndexes();
+
+ this.broadcastSets = buildBroadcastSets();
+ this.infrastructurePoints = findInfrastructurePoints();
+ }
+
+ @Override
+ public long time() {
+ return time;
+ }
+
+ @Override
+ public int clusterCount() {
+ return clusters.size();
+ }
+
+ @Override
+ public int deviceCount() {
+ return graph.getVertexes().size();
+ }
+
+ @Override
+ public int linkCount() {
+ return graph.getEdges().size();
+ }
+
+ @Override
+ public int pathCount() {
+ return paths.size();
+ }
+
+ /**
+ * Returns the backing topology graph.
+ *
+ * @return topology graph
+ */
+ TopologyGraph getGraph() {
+ return graph;
+ }
+
+ /**
+ * Returns the set of topology clusters.
+ *
+ * @return set of clusters
+ */
+ Set<TopologyCluster> getClusters() {
+ return ImmutableSet.copyOf(clusters.values());
+ }
+
+ /**
+ * Returns the specified topology cluster.
+ *
+ * @param clusterId cluster identifier
+ * @return topology cluster
+ */
+ TopologyCluster getCluster(ClusterId clusterId) {
+ return clusters.get(clusterId);
+ }
+
+ /**
+ * Returns the topology cluster that contains the given device.
+ *
+ * @param deviceId device identifier
+ * @return topology cluster
+ */
+ TopologyCluster getCluster(DeviceId deviceId) {
+ return clustersByDevice.get(deviceId);
+ }
+
+ /**
+ * Returns the set of cluster devices.
+ *
+ * @param cluster topology cluster
+ * @return cluster devices
+ */
+ Set<DeviceId> getClusterDevices(TopologyCluster cluster) {
+ return devicesByCluster.get(cluster);
+ }
+
+ /**
+ * Returns the set of cluster links.
+ *
+ * @param cluster topology cluster
+ * @return cluster links
+ */
+ Set<Link> getClusterLinks(TopologyCluster cluster) {
+ return linksByCluster.get(cluster);
+ }
+
+ /**
+ * Indicates whether the given point is an infrastructure link end-point.
+ *
+ * @param connectPoint connection point
+ * @return true if infrastructure
+ */
+ boolean isInfrastructure(ConnectPoint connectPoint) {
+ return infrastructurePoints.contains(connectPoint);
+ }
+
+ /**
+ * Indicates whether the given point is part of a broadcast set.
+ *
+ * @param connectPoint connection point
+ * @return true if in broadcast set
+ */
+ boolean isBroadcastPoint(ConnectPoint connectPoint) {
+ // Any non-infrastructure, i.e. edge points are assumed to be OK.
+ if (!isInfrastructure(connectPoint)) {
+ return true;
+ }
+
+ // Find the cluster to which the device belongs.
+ TopologyCluster cluster = clustersByDevice.get(connectPoint.deviceId());
+ if (cluster == null) {
+ throw new IllegalArgumentException("No cluster found for device " + connectPoint.deviceId());
+ }
+
+ // If the broadcast set is null or empty, or if the point explicitly
+ // belongs to it, return true;
+ Set<ConnectPoint> points = broadcastSets.get(cluster.id());
+ return points == null || points.isEmpty() || points.contains(connectPoint);
+ }
+
+ /**
+ * Returns the size of the cluster broadcast set.
+ *
+ * @param clusterId cluster identifier
+ * @return size of the cluster broadcast set
+ */
+ int broadcastSetSize(ClusterId clusterId) {
+ return broadcastSets.get(clusterId).size();
+ }
+
+ /**
+ * Returns the set of pre-computed shortest paths between source and
+ * destination devices.
+ *
+ * @param src source device
+ * @param dst destination device
+ * @return set of shortest paths
+ */
+ Set<Path> getPaths(DeviceId src, DeviceId dst) {
+ return paths.get(new PathKey(src, dst));
+ }
+
+ /**
+ * Computes on-demand the set of shortest paths between source and
+ * destination devices.
+ *
+ * @param src source device
+ * @param dst destination device
+ * @return set of shortest paths
+ */
+ Set<Path> getPaths(DeviceId src, DeviceId dst, LinkWeight weight) {
+ GraphPathSearch.Result<TopologyVertex, TopologyEdge> result =
+ DIJKSTRA.search(graph, new DefaultTopologyVertex(src),
+ new DefaultTopologyVertex(dst), weight);
+ ImmutableSet.Builder<Path> builder = ImmutableSet.builder();
+ for (org.onlab.graph.Path<TopologyVertex, TopologyEdge> path : result.paths()) {
+ builder.add(networkPath(path));
+ }
+ return builder.build();
+ }
+
+
+ // Searches the graph for all shortest paths and returns the search results.
+ private ImmutableMap<DeviceId, Result<TopologyVertex, TopologyEdge>> searchForShortestPaths() {
+ ImmutableMap.Builder<DeviceId, Result<TopologyVertex, TopologyEdge>> builder = ImmutableMap.builder();
+
+ // Search graph paths for each source to all destinations.
+ LinkWeight weight = new HopCountLinkWeight(graph.getVertexes().size());
+ for (TopologyVertex src : graph.getVertexes()) {
+ builder.put(src.deviceId(), DIJKSTRA.search(graph, src, null, weight));
+ }
+ return builder.build();
+ }
+
+ // Builds network paths from the graph path search results
+ private ImmutableSetMultimap<PathKey, Path> buildPaths() {
+ Builder<PathKey, Path> builder = ImmutableSetMultimap.builder();
+ for (DeviceId deviceId : results.keySet()) {
+ Result<TopologyVertex, TopologyEdge> result = results.get(deviceId);
+ for (org.onlab.graph.Path<TopologyVertex, TopologyEdge> path : result.paths()) {
+ builder.put(new PathKey(path.src().deviceId(), path.dst().deviceId()),
+ networkPath(path));
+ }
+ }
+ return builder.build();
+ }
+
+ // Converts graph path to a network path with the same cost.
+ private Path networkPath(org.onlab.graph.Path<TopologyVertex, TopologyEdge> path) {
+ List<Link> links = new ArrayList<>();
+ for (TopologyEdge edge : path.edges()) {
+ links.add(edge.link());
+ }
+ return new DefaultPath(PID, links, path.cost());
+ }
+
+
+ // Searches for SCC clusters in the network topology graph using Tarjan
+ // algorithm.
+ private SCCResult<TopologyVertex, TopologyEdge> searchForClusters() {
+ return TARJAN.search(graph, new NoIndirectLinksWeight());
+ }
+
+ // Builds the topology clusters and returns the id-cluster bindings.
+ private ImmutableMap<ClusterId, TopologyCluster> buildTopologyClusters() {
+ ImmutableMap.Builder<ClusterId, TopologyCluster> clusterBuilder = ImmutableMap.builder();
+ SCCResult<TopologyVertex, TopologyEdge> result =
+ TARJAN.search(graph, new NoIndirectLinksWeight());
+
+ // Extract both vertexes and edges from the results; the lists form
+ // pairs along the same index.
+ List<Set<TopologyVertex>> clusterVertexes = result.clusterVertexes();
+ List<Set<TopologyEdge>> clusterEdges = result.clusterEdges();
+
+ // Scan over the lists and create a cluster from the results.
+ for (int i = 0, n = result.clusterCount(); i < n; i++) {
+ Set<TopologyVertex> vertexSet = clusterVertexes.get(i);
+ Set<TopologyEdge> edgeSet = clusterEdges.get(i);
+
+ ClusterId cid = ClusterId.clusterId(i);
+ DefaultTopologyCluster cluster =
+ new DefaultTopologyCluster(cid, vertexSet.size(), edgeSet.size(),
+ findRoot(vertexSet).deviceId());
+ clusterBuilder.put(cid, cluster);
+ }
+ return clusterBuilder.build();
+ }
+
+ // Finds the vertex whose device id is the lexicographical minimum in the
+ // specified set.
+ private TopologyVertex findRoot(Set<TopologyVertex> vertexSet) {
+ TopologyVertex minVertex = null;
+ for (TopologyVertex vertex : vertexSet) {
+ if (minVertex == null ||
+ minVertex.deviceId().toString()
+ .compareTo(minVertex.deviceId().toString()) < 0) {
+ minVertex = vertex;
+ }
+ }
+ return minVertex;
+ }
+
+ // Processes a map of broadcast sets for each cluster.
+ private ImmutableSetMultimap<ClusterId, ConnectPoint> buildBroadcastSets() {
+ Builder<ClusterId, ConnectPoint> builder = ImmutableSetMultimap.builder();
+ for (TopologyCluster cluster : clusters.values()) {
+ addClusterBroadcastSet(cluster, builder);
+ }
+ return builder.build();
+ }
+
+ // Finds all broadcast points for the cluster. These are those connection
+ // points which lie along the shortest paths between the cluster root and
+ // all other devices within the cluster.
+ private void addClusterBroadcastSet(TopologyCluster cluster,
+ Builder<ClusterId, ConnectPoint> builder) {
+ // Use the graph root search results to build the broadcast set.
+ Result<TopologyVertex, TopologyEdge> result = results.get(cluster.root());
+ for (Map.Entry<TopologyVertex, Set<TopologyEdge>> entry : result.parents().entrySet()) {
+ TopologyVertex vertex = entry.getKey();
+
+ // Ignore any parents that lead outside the cluster.
+ if (clustersByDevice.get(vertex.deviceId()) != cluster) {
+ continue;
+ }
+
+ // Ignore any back-link sets that are empty.
+ Set<TopologyEdge> parents = entry.getValue();
+ if (parents.isEmpty()) {
+ continue;
+ }
+
+ // Use the first back-link source and destinations to add to the
+ // broadcast set.
+ Link link = parents.iterator().next().link();
+ builder.put(cluster.id(), link.src());
+ builder.put(cluster.id(), link.dst());
+ }
+ }
+
+ // Collects and returns an set of all infrastructure link end-points.
+ private ImmutableSet<ConnectPoint> findInfrastructurePoints() {
+ ImmutableSet.Builder<ConnectPoint> builder = ImmutableSet.builder();
+ for (TopologyEdge edge : graph.getEdges()) {
+ builder.add(edge.link().src());
+ builder.add(edge.link().dst());
+ }
+ return builder.build();
+ }
+
+ // Builds cluster-devices, cluster-links and device-cluster indexes.
+ private void buildIndexes() {
+ // Prepare the index builders
+ ImmutableMap.Builder<DeviceId, TopologyCluster> clusterBuilder = ImmutableMap.builder();
+ ImmutableSetMultimap.Builder<TopologyCluster, DeviceId> devicesBuilder = ImmutableSetMultimap.builder();
+ ImmutableSetMultimap.Builder<TopologyCluster, Link> linksBuilder = ImmutableSetMultimap.builder();
+
+ // Now scan through all the clusters
+ for (TopologyCluster cluster : clusters.values()) {
+ int i = cluster.id().index();
+
+ // Scan through all the cluster vertexes.
+ for (TopologyVertex vertex : clusterResults.clusterVertexes().get(i)) {
+ devicesBuilder.put(cluster, vertex.deviceId());
+ clusterBuilder.put(vertex.deviceId(), cluster);
+ }
+
+ // Scan through all the cluster edges.
+ for (TopologyEdge edge : clusterResults.clusterEdges().get(i)) {
+ linksBuilder.put(cluster, edge.link());
+ }
+ }
+
+ // Finalize all indexes.
+ clustersByDevice = clusterBuilder.build();
+ devicesByCluster = devicesBuilder.build();
+ linksByCluster = linksBuilder.build();
+ }
+
+ // Link weight for measuring link cost as hop count with indirect links
+ // being as expensive as traversing the entire graph to assume the worst.
+ private static class HopCountLinkWeight implements LinkWeight {
+ private final int indirectLinkCost;
+
+ HopCountLinkWeight(int indirectLinkCost) {
+ this.indirectLinkCost = indirectLinkCost;
+ }
+
+ @Override
+ public double weight(TopologyEdge edge) {
+ // To force preference to use direct paths first, make indirect
+ // links as expensive as the linear vertex traversal.
+ return edge.link().type() == INDIRECT ? indirectLinkCost : 1;
+ }
+ }
+
+ // Link weight for preventing traversal over indirect links.
+ private static class NoIndirectLinksWeight implements LinkWeight {
+ @Override
+ public double weight(TopologyEdge edge) {
+ return edge.link().type() == INDIRECT ? -1 : 1;
+ }
+ }
+
+ @Override
+ public String toString() {
+ return toStringHelper(this)
+ .add("time", time)
+ .add("clusters", clusterCount())
+ .add("devices", deviceCount())
+ .add("links", linkCount())
+ .add("pathCount", pathCount())
+ .toString();
+ }
+}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopologyGraph.java b/core/store/dist/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopologyGraph.java
new file mode 100644
index 0000000..945ba05
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopologyGraph.java
@@ -0,0 +1,28 @@
+package org.onlab.onos.store.topology.impl;
+
+import org.onlab.graph.AdjacencyListsGraph;
+import org.onlab.onos.net.topology.TopologyEdge;
+import org.onlab.onos.net.topology.TopologyGraph;
+import org.onlab.onos.net.topology.TopologyVertex;
+
+import java.util.Set;
+
+/**
+ * Default implementation of an immutable topology graph based on a generic
+ * implementation of adjacency lists graph.
+ */
+public class DefaultTopologyGraph
+ extends AdjacencyListsGraph<TopologyVertex, TopologyEdge>
+ implements TopologyGraph {
+
+ /**
+ * Creates a topology graph comprising of the specified vertexes and edges.
+ *
+ * @param vertexes set of graph vertexes
+ * @param edges set of graph edges
+ */
+ public DefaultTopologyGraph(Set<TopologyVertex> vertexes, Set<TopologyEdge> edges) {
+ super(vertexes, edges);
+ }
+
+}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/topology/impl/DistributedTopologyStore.java b/core/store/dist/src/main/java/org/onlab/onos/store/topology/impl/DistributedTopologyStore.java
new file mode 100644
index 0000000..567861e
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/topology/impl/DistributedTopologyStore.java
@@ -0,0 +1,141 @@
+package org.onlab.onos.store.topology.impl;
+
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.util.List;
+import java.util.Set;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.onos.event.Event;
+import org.onlab.onos.net.ConnectPoint;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.net.Link;
+import org.onlab.onos.net.Path;
+import org.onlab.onos.net.provider.ProviderId;
+import org.onlab.onos.net.topology.ClusterId;
+import org.onlab.onos.net.topology.GraphDescription;
+import org.onlab.onos.net.topology.LinkWeight;
+import org.onlab.onos.net.topology.Topology;
+import org.onlab.onos.net.topology.TopologyCluster;
+import org.onlab.onos.net.topology.TopologyEvent;
+import org.onlab.onos.net.topology.TopologyGraph;
+import org.onlab.onos.net.topology.TopologyStore;
+import org.onlab.onos.net.topology.TopologyStoreDelegate;
+import org.onlab.onos.store.AbstractStore;
+import org.slf4j.Logger;
+
+/**
+ * Manages inventory of topology snapshots using trivial in-memory
+ * structures implementation.
+ */
+//FIXME: I LIE I AM NOT DISTRIBUTED
+@Component(immediate = true)
+@Service
+public class DistributedTopologyStore
+extends AbstractStore<TopologyEvent, TopologyStoreDelegate>
+implements TopologyStore {
+
+ private final Logger log = getLogger(getClass());
+
+ private volatile DefaultTopology current;
+
+ @Activate
+ public void activate() {
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ log.info("Stopped");
+ }
+ @Override
+ public Topology currentTopology() {
+ return current;
+ }
+
+ @Override
+ public boolean isLatest(Topology topology) {
+ // Topology is current only if it is the same as our current topology
+ return topology == current;
+ }
+
+ @Override
+ public TopologyGraph getGraph(Topology topology) {
+ return defaultTopology(topology).getGraph();
+ }
+
+ @Override
+ public Set<TopologyCluster> getClusters(Topology topology) {
+ return defaultTopology(topology).getClusters();
+ }
+
+ @Override
+ public TopologyCluster getCluster(Topology topology, ClusterId clusterId) {
+ return defaultTopology(topology).getCluster(clusterId);
+ }
+
+ @Override
+ public Set<DeviceId> getClusterDevices(Topology topology, TopologyCluster cluster) {
+ return defaultTopology(topology).getClusterDevices(cluster);
+ }
+
+ @Override
+ public Set<Link> getClusterLinks(Topology topology, TopologyCluster cluster) {
+ return defaultTopology(topology).getClusterLinks(cluster);
+ }
+
+ @Override
+ public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst) {
+ return defaultTopology(topology).getPaths(src, dst);
+ }
+
+ @Override
+ public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst,
+ LinkWeight weight) {
+ return defaultTopology(topology).getPaths(src, dst, weight);
+ }
+
+ @Override
+ public boolean isInfrastructure(Topology topology, ConnectPoint connectPoint) {
+ return defaultTopology(topology).isInfrastructure(connectPoint);
+ }
+
+ @Override
+ public boolean isBroadcastPoint(Topology topology, ConnectPoint connectPoint) {
+ return defaultTopology(topology).isBroadcastPoint(connectPoint);
+ }
+
+ @Override
+ public TopologyEvent updateTopology(ProviderId providerId,
+ GraphDescription graphDescription,
+ List<Event> reasons) {
+ // First off, make sure that what we're given is indeed newer than
+ // what we already have.
+ if (current != null && graphDescription.timestamp() < current.time()) {
+ return null;
+ }
+
+ // Have the default topology construct self from the description data.
+ DefaultTopology newTopology =
+ new DefaultTopology(providerId, graphDescription);
+
+ // Promote the new topology to current and return a ready-to-send event.
+ synchronized (this) {
+ current = newTopology;
+ return new TopologyEvent(TopologyEvent.Type.TOPOLOGY_CHANGED, current);
+ }
+ }
+
+ // Validates the specified topology and returns it as a default
+ private DefaultTopology defaultTopology(Topology topology) {
+ if (topology instanceof DefaultTopology) {
+ return (DefaultTopology) topology;
+ }
+ throw new IllegalArgumentException("Topology class " + topology.getClass() +
+ " not supported");
+ }
+
+}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/topology/impl/PathKey.java b/core/store/dist/src/main/java/org/onlab/onos/store/topology/impl/PathKey.java
new file mode 100644
index 0000000..60736b9
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/topology/impl/PathKey.java
@@ -0,0 +1,40 @@
+package org.onlab.onos.store.topology.impl;
+
+import org.onlab.onos.net.DeviceId;
+
+import java.util.Objects;
+
+/**
+ * Key for filing pre-computed paths between source and destination devices.
+ */
+class PathKey {
+ private final DeviceId src;
+ private final DeviceId dst;
+
+ /**
+ * Creates a path key from the given source/dest pair.
+ * @param src source device
+ * @param dst destination device
+ */
+ PathKey(DeviceId src, DeviceId dst) {
+ this.src = src;
+ this.dst = dst;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(src, dst);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj instanceof PathKey) {
+ final PathKey other = (PathKey) obj;
+ return Objects.equals(this.src, other.src) && Objects.equals(this.dst, other.dst);
+ }
+ return false;
+ }
+}
diff --git a/core/store/hz/cluster/pom.xml b/core/store/hz/cluster/pom.xml
new file mode 100644
index 0000000..95307f1
--- /dev/null
+++ b/core/store/hz/cluster/pom.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-core-hz</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ <relativePath>../pom.xml</relativePath>
+ </parent>
+
+ <artifactId>onos-core-hz-cluster</artifactId>
+ <packaging>bundle</packaging>
+
+ <description>ONOS Hazelcast based distributed store subsystems</description>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-core-serializers</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-core-hz-common</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-core-hz-common</artifactId>
+ <classifier>tests</classifier>
+ <scope>test</scope>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>org.apache.felix.scr.annotations</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.hazelcast</groupId>
+ <artifactId>hazelcast</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>de.javakaffee</groupId>
+ <artifactId>kryo-serializers</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-scr-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
diff --git a/core/store/src/main/java/org/onlab/onos/store/cluster/impl/DistributedClusterStore.java b/core/store/hz/cluster/src/main/java/org/onlab/onos/store/cluster/impl/DistributedClusterStore.java
similarity index 88%
rename from core/store/src/main/java/org/onlab/onos/store/cluster/impl/DistributedClusterStore.java
rename to core/store/hz/cluster/src/main/java/org/onlab/onos/store/cluster/impl/DistributedClusterStore.java
index 004f807..0f375f6 100644
--- a/core/store/src/main/java/org/onlab/onos/store/cluster/impl/DistributedClusterStore.java
+++ b/core/store/hz/cluster/src/main/java/org/onlab/onos/store/cluster/impl/DistributedClusterStore.java
@@ -8,6 +8,7 @@
import com.hazelcast.core.MemberAttributeEvent;
import com.hazelcast.core.MembershipEvent;
import com.hazelcast.core.MembershipListener;
+
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
@@ -18,9 +19,9 @@
import org.onlab.onos.cluster.ControllerNode;
import org.onlab.onos.cluster.DefaultControllerNode;
import org.onlab.onos.cluster.NodeId;
-import org.onlab.onos.store.impl.AbsentInvalidatingLoadingCache;
-import org.onlab.onos.store.impl.AbstractDistributedStore;
-import org.onlab.onos.store.impl.OptionalCacheLoader;
+import org.onlab.onos.store.common.AbsentInvalidatingLoadingCache;
+import org.onlab.onos.store.common.AbstractHazelcastStore;
+import org.onlab.onos.store.common.OptionalCacheLoader;
import org.onlab.packet.IpPrefix;
import java.util.Map;
@@ -38,7 +39,7 @@
@Component(immediate = true)
@Service
public class DistributedClusterStore
- extends AbstractDistributedStore<ClusterEvent, ClusterStoreDelegate>
+ extends AbstractHazelcastStore<ClusterEvent, ClusterStoreDelegate>
implements ClusterStore {
private IMap<byte[], byte[]> rawNodes;
@@ -57,7 +58,7 @@
OptionalCacheLoader<NodeId, DefaultControllerNode> nodeLoader
= new OptionalCacheLoader<>(storeService, rawNodes);
nodes = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader));
- rawNodes.addEntryListener(new RemoteEventHandler<>(nodes), true);
+ rawNodes.addEntryListener(new RemoteCacheEventHandler<>(nodes), true);
loadClusterNodes();
@@ -67,7 +68,7 @@
// Loads the initial set of cluster nodes
private void loadClusterNodes() {
for (Member member : theInstance.getCluster().getMembers()) {
- addMember(member);
+ addNode(node(member));
}
}
@@ -103,6 +104,11 @@
}
@Override
+ public ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort) {
+ return addNode(new DefaultControllerNode(nodeId, ip, tcpPort));
+ }
+
+ @Override
public void removeNode(NodeId nodeId) {
synchronized (this) {
rawNodes.remove(serialize(nodeId));
@@ -111,8 +117,7 @@
}
// Adds a new node based on the specified member
- private synchronized ControllerNode addMember(Member member) {
- DefaultControllerNode node = node(member);
+ private synchronized ControllerNode addNode(DefaultControllerNode node) {
rawNodes.put(serialize(node.id()), serialize(node));
nodes.put(node.id(), Optional.of(node));
states.put(node.id(), State.ACTIVE);
@@ -135,7 +140,7 @@
@Override
public void memberAdded(MembershipEvent membershipEvent) {
log.info("Member {} added", membershipEvent.getMember());
- ControllerNode node = addMember(membershipEvent.getMember());
+ ControllerNode node = addNode(node(membershipEvent.getMember()));
notifyDelegate(new ClusterEvent(INSTANCE_ACTIVATED, node));
}
diff --git a/core/store/src/main/java/org/onlab/onos/store/cluster/impl/DistributedMastershipStore.java b/core/store/hz/cluster/src/main/java/org/onlab/onos/store/cluster/impl/DistributedMastershipStore.java
similarity index 74%
rename from core/store/src/main/java/org/onlab/onos/store/cluster/impl/DistributedMastershipStore.java
rename to core/store/hz/cluster/src/main/java/org/onlab/onos/store/cluster/impl/DistributedMastershipStore.java
index 00d0547..ccb2937 100644
--- a/core/store/src/main/java/org/onlab/onos/store/cluster/impl/DistributedMastershipStore.java
+++ b/core/store/hz/cluster/src/main/java/org/onlab/onos/store/cluster/impl/DistributedMastershipStore.java
@@ -1,9 +1,11 @@
package org.onlab.onos.store.cluster.impl;
-import com.google.common.base.Optional;
-import com.google.common.cache.LoadingCache;
-import com.google.common.collect.ImmutableSet;
-import com.hazelcast.core.IMap;
+import static com.google.common.cache.CacheBuilder.newBuilder;
+import static org.onlab.onos.cluster.MastershipEvent.Type.MASTER_CHANGED;
+
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
@@ -19,15 +21,14 @@
import org.onlab.onos.cluster.NodeId;
import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.MastershipRole;
-import org.onlab.onos.store.impl.AbsentInvalidatingLoadingCache;
-import org.onlab.onos.store.impl.AbstractDistributedStore;
-import org.onlab.onos.store.impl.OptionalCacheLoader;
+import org.onlab.onos.store.common.AbsentInvalidatingLoadingCache;
+import org.onlab.onos.store.common.AbstractHazelcastStore;
+import org.onlab.onos.store.common.OptionalCacheLoader;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Set;
-
-import static com.google.common.cache.CacheBuilder.newBuilder;
+import com.google.common.base.Optional;
+import com.google.common.cache.LoadingCache;
+import com.google.common.collect.ImmutableSet;
+import com.hazelcast.core.IMap;
/**
* Distributed implementation of the cluster nodes store.
@@ -35,8 +36,8 @@
@Component(immediate = true)
@Service
public class DistributedMastershipStore
- extends AbstractDistributedStore<MastershipEvent, MastershipStoreDelegate>
- implements MastershipStore {
+extends AbstractHazelcastStore<MastershipEvent, MastershipStoreDelegate>
+implements MastershipStore {
private IMap<byte[], byte[]> rawMasters;
private LoadingCache<DeviceId, Optional<NodeId>> masters;
@@ -51,9 +52,9 @@
rawMasters = theInstance.getMap("masters");
OptionalCacheLoader<DeviceId, NodeId> nodeLoader
- = new OptionalCacheLoader<>(storeService, rawMasters);
+ = new OptionalCacheLoader<>(storeService, rawMasters);
masters = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader));
- rawMasters.addEntryListener(new RemoteEventHandler<>(masters), true);
+ rawMasters.addEntryListener(new RemoteMasterShipEventHandler(masters), true);
loadMasters();
@@ -128,4 +129,25 @@
return null;
}
+ private class RemoteMasterShipEventHandler extends RemoteCacheEventHandler<DeviceId, NodeId> {
+ public RemoteMasterShipEventHandler(LoadingCache<DeviceId, Optional<NodeId>> cache) {
+ super(cache);
+ }
+
+ @Override
+ protected void onAdd(DeviceId deviceId, NodeId nodeId) {
+ notifyDelegate(new MastershipEvent(MASTER_CHANGED, deviceId, nodeId));
+ }
+
+ @Override
+ protected void onRemove(DeviceId deviceId, NodeId nodeId) {
+ notifyDelegate(new MastershipEvent(MASTER_CHANGED, deviceId, nodeId));
+ }
+
+ @Override
+ protected void onUpdate(DeviceId deviceId, NodeId oldNodeId, NodeId nodeId) {
+ notifyDelegate(new MastershipEvent(MASTER_CHANGED, deviceId, nodeId));
+ }
+ }
+
}
diff --git a/core/store/src/main/java/org/onlab/onos/store/cluster/impl/package-info.java b/core/store/hz/cluster/src/main/java/org/onlab/onos/store/cluster/impl/package-info.java
similarity index 100%
rename from core/store/src/main/java/org/onlab/onos/store/cluster/impl/package-info.java
rename to core/store/hz/cluster/src/main/java/org/onlab/onos/store/cluster/impl/package-info.java
diff --git a/core/trivial/pom.xml b/core/store/hz/common/pom.xml
similarity index 62%
copy from core/trivial/pom.xml
copy to core/store/hz/common/pom.xml
index 1806ba4..06aa0b7 100644
--- a/core/trivial/pom.xml
+++ b/core/store/hz/common/pom.xml
@@ -6,15 +6,15 @@
<parent>
<groupId>org.onlab.onos</groupId>
- <artifactId>onos-core</artifactId>
+ <artifactId>onos-core-hz</artifactId>
<version>1.0.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
- <artifactId>onos-core-trivial</artifactId>
+ <artifactId>onos-core-hz-common</artifactId>
<packaging>bundle</packaging>
- <description>ONOS network control trivial implementations of core subsystems</description>
+ <description>ONOS Hazelcast based distributed store subsystems</description>
<dependencies>
<dependency>
@@ -22,9 +22,22 @@
<artifactId>onos-api</artifactId>
</dependency>
<dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-core-serializers</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
<groupId>org.apache.felix</groupId>
<artifactId>org.apache.felix.scr.annotations</artifactId>
</dependency>
+ <dependency>
+ <groupId>com.hazelcast</groupId>
+ <artifactId>hazelcast</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>de.javakaffee</groupId>
+ <artifactId>kryo-serializers</artifactId>
+ </dependency>
</dependencies>
<build>
diff --git a/core/store/src/main/java/org/onlab/onos/store/impl/AbsentInvalidatingLoadingCache.java b/core/store/hz/common/src/main/java/org/onlab/onos/store/common/AbsentInvalidatingLoadingCache.java
similarity index 97%
rename from core/store/src/main/java/org/onlab/onos/store/impl/AbsentInvalidatingLoadingCache.java
rename to core/store/hz/common/src/main/java/org/onlab/onos/store/common/AbsentInvalidatingLoadingCache.java
index df4e70a..4dd8669 100644
--- a/core/store/src/main/java/org/onlab/onos/store/impl/AbsentInvalidatingLoadingCache.java
+++ b/core/store/hz/common/src/main/java/org/onlab/onos/store/common/AbsentInvalidatingLoadingCache.java
@@ -1,4 +1,4 @@
-package org.onlab.onos.store.impl;
+package org.onlab.onos.store.common;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
diff --git a/core/store/hz/common/src/main/java/org/onlab/onos/store/common/AbstractHazelcastStore.java b/core/store/hz/common/src/main/java/org/onlab/onos/store/common/AbstractHazelcastStore.java
new file mode 100644
index 0000000..8a96682
--- /dev/null
+++ b/core/store/hz/common/src/main/java/org/onlab/onos/store/common/AbstractHazelcastStore.java
@@ -0,0 +1,239 @@
+package org.onlab.onos.store.common;
+
+import com.google.common.base.Optional;
+import com.google.common.cache.LoadingCache;
+import com.hazelcast.core.EntryAdapter;
+import com.hazelcast.core.EntryEvent;
+import com.hazelcast.core.HazelcastInstance;
+import com.hazelcast.core.MapEvent;
+import com.hazelcast.core.Member;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.onlab.onos.event.Event;
+import org.onlab.onos.store.AbstractStore;
+import org.onlab.onos.store.StoreDelegate;
+import org.slf4j.Logger;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static org.slf4j.LoggerFactory.getLogger;
+
+/**
+ * Abstraction of a distributed store based on Hazelcast.
+ */
+@Component(componentAbstract = true)
+public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDelegate<E>>
+ extends AbstractStore<E, D> {
+
+ protected final Logger log = getLogger(getClass());
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected StoreService storeService;
+
+ protected HazelcastInstance theInstance;
+
+ @Activate
+ public void activate() {
+ theInstance = storeService.getHazelcastInstance();
+ }
+
+ /**
+ * Serializes the specified object using the backing store service.
+ *
+ * @param obj object to be serialized
+ * @return serialized object
+ */
+ protected byte[] serialize(Object obj) {
+ return storeService.serialize(obj);
+ }
+
+ /**
+ * Deserializes the specified object using the backing store service.
+ *
+ * @param bytes bytes to be deserialized
+ * @param <T> type of object
+ * @return deserialized object
+ */
+ protected <T> T deserialize(byte[] bytes) {
+ return storeService.deserialize(bytes);
+ }
+
+
+ /**
+ * An IMap entry listener, which reflects each remote event to the cache.
+ *
+ * @param <K> IMap key type after deserialization
+ * @param <V> IMap value type after deserialization
+ */
+ public class RemoteCacheEventHandler<K, V> extends EntryAdapter<byte[], byte[]> {
+
+ private final Member localMember;
+ private LoadingCache<K, Optional<V>> cache;
+
+ /**
+ * Constructor.
+ *
+ * @param cache cache to update
+ */
+ public RemoteCacheEventHandler(LoadingCache<K, Optional<V>> cache) {
+ this.localMember = theInstance.getCluster().getLocalMember();
+ this.cache = checkNotNull(cache);
+ }
+
+ @Override
+ public void mapCleared(MapEvent event) {
+ if (localMember.equals(event.getMember())) {
+ // ignore locally triggered event
+ return;
+ }
+ cache.invalidateAll();
+ }
+
+ @Override
+ public void entryAdded(EntryEvent<byte[], byte[]> event) {
+ if (localMember.equals(event.getMember())) {
+ // ignore locally triggered event
+ return;
+ }
+ K key = deserialize(event.getKey());
+ V newVal = deserialize(event.getValue());
+ Optional<V> newValue = Optional.of(newVal);
+ cache.asMap().putIfAbsent(key, newValue);
+ onAdd(key, newVal);
+ }
+
+ @Override
+ public void entryUpdated(EntryEvent<byte[], byte[]> event) {
+ if (localMember.equals(event.getMember())) {
+ // ignore locally triggered event
+ return;
+ }
+ K key = deserialize(event.getKey());
+ V oldVal = deserialize(event.getOldValue());
+ Optional<V> oldValue = Optional.fromNullable(oldVal);
+ V newVal = deserialize(event.getValue());
+ Optional<V> newValue = Optional.of(newVal);
+ cache.asMap().replace(key, oldValue, newValue);
+ onUpdate(key, oldVal, newVal);
+ }
+
+ @Override
+ public void entryRemoved(EntryEvent<byte[], byte[]> event) {
+ if (localMember.equals(event.getMember())) {
+ // ignore locally triggered event
+ return;
+ }
+ K key = deserialize(event.getKey());
+ V val = deserialize(event.getOldValue());
+ cache.invalidate(key);
+ onRemove(key, val);
+ }
+
+ /**
+ * Cache entry addition hook.
+ *
+ * @param key new key
+ * @param newVal new value
+ */
+ protected void onAdd(K key, V newVal) {
+ }
+
+ /**
+ * Cache entry update hook.
+ *
+ * @param key new key
+ * @param oldValue old value
+ * @param newVal new value
+ */
+ protected void onUpdate(K key, V oldValue, V newVal) {
+ }
+
+ /**
+ * Cache entry remove hook.
+ *
+ * @param key new key
+ * @param val old value
+ */
+ protected void onRemove(K key, V val) {
+ }
+ }
+
+ /**
+ * Distributed object remote event entry listener.
+ *
+ * @param <K> Entry key type after deserialization
+ * @param <V> Entry value type after deserialization
+ */
+ public class RemoteEventHandler<K, V> extends EntryAdapter<byte[], byte[]> {
+
+ private final Member localMember;
+
+ public RemoteEventHandler() {
+ this.localMember = theInstance.getCluster().getLocalMember();
+ }
+ @Override
+ public void entryAdded(EntryEvent<byte[], byte[]> event) {
+ if (localMember.equals(event.getMember())) {
+ // ignore locally triggered event
+ return;
+ }
+ K key = deserialize(event.getKey());
+ V newVal = deserialize(event.getValue());
+ onAdd(key, newVal);
+ }
+
+ @Override
+ public void entryRemoved(EntryEvent<byte[], byte[]> event) {
+ if (localMember.equals(event.getMember())) {
+ // ignore locally triggered event
+ return;
+ }
+ K key = deserialize(event.getKey());
+ V val = deserialize(event.getValue());
+ onRemove(key, val);
+ }
+
+ @Override
+ public void entryUpdated(EntryEvent<byte[], byte[]> event) {
+ if (localMember.equals(event.getMember())) {
+ // ignore locally triggered event
+ return;
+ }
+ K key = deserialize(event.getKey());
+ V oldVal = deserialize(event.getOldValue());
+ V newVal = deserialize(event.getValue());
+ onUpdate(key, oldVal, newVal);
+ }
+
+ /**
+ * Remote entry addition hook.
+ *
+ * @param key new key
+ * @param newVal new value
+ */
+ protected void onAdd(K key, V newVal) {
+ }
+
+ /**
+ * Remote entry update hook.
+ *
+ * @param key new key
+ * @param oldValue old value
+ * @param newVal new value
+ */
+ protected void onUpdate(K key, V oldValue, V newVal) {
+ }
+
+ /**
+ * Remote entry remove hook.
+ *
+ * @param key new key
+ * @param val old value
+ */
+ protected void onRemove(K key, V val) {
+ }
+ }
+
+}
diff --git a/core/store/src/main/java/org/onlab/onos/store/impl/OptionalCacheLoader.java b/core/store/hz/common/src/main/java/org/onlab/onos/store/common/OptionalCacheLoader.java
similarity index 93%
rename from core/store/src/main/java/org/onlab/onos/store/impl/OptionalCacheLoader.java
rename to core/store/hz/common/src/main/java/org/onlab/onos/store/common/OptionalCacheLoader.java
index dddd128..dd2b872 100644
--- a/core/store/src/main/java/org/onlab/onos/store/impl/OptionalCacheLoader.java
+++ b/core/store/hz/common/src/main/java/org/onlab/onos/store/common/OptionalCacheLoader.java
@@ -1,9 +1,7 @@
-package org.onlab.onos.store.impl;
+package org.onlab.onos.store.common;
import static com.google.common.base.Preconditions.checkNotNull;
-import org.onlab.onos.store.common.StoreService;
-
import com.google.common.base.Optional;
import com.google.common.cache.CacheLoader;
import com.hazelcast.core.IMap;
diff --git a/core/store/src/main/java/org/onlab/onos/store/impl/StoreManager.java b/core/store/hz/common/src/main/java/org/onlab/onos/store/common/StoreManager.java
similarity index 94%
rename from core/store/src/main/java/org/onlab/onos/store/impl/StoreManager.java
rename to core/store/hz/common/src/main/java/org/onlab/onos/store/common/StoreManager.java
index e2692d5..5685116 100644
--- a/core/store/src/main/java/org/onlab/onos/store/impl/StoreManager.java
+++ b/core/store/hz/common/src/main/java/org/onlab/onos/store/common/StoreManager.java
@@ -1,4 +1,4 @@
-package org.onlab.onos.store.impl;
+package org.onlab.onos.store.common;
import com.hazelcast.config.Config;
import com.hazelcast.config.FileSystemXmlConfig;
@@ -27,7 +27,6 @@
import org.onlab.onos.net.Port;
import org.onlab.onos.net.PortNumber;
import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.common.StoreService;
import org.onlab.onos.store.serializers.ConnectPointSerializer;
import org.onlab.onos.store.serializers.DefaultLinkSerializer;
import org.onlab.onos.store.serializers.DefaultPortSerializer;
@@ -35,7 +34,6 @@
import org.onlab.onos.store.serializers.IpPrefixSerializer;
import org.onlab.onos.store.serializers.LinkKeySerializer;
import org.onlab.onos.store.serializers.NodeIdSerializer;
-import org.onlab.onos.store.serializers.OnosTimestampSerializer;
import org.onlab.onos.store.serializers.PortNumberSerializer;
import org.onlab.onos.store.serializers.ProviderIdSerializer;
import org.onlab.packet.IpPrefix;
@@ -102,7 +100,6 @@
.register(DeviceId.class, new DeviceIdSerializer())
.register(PortNumber.class, new PortNumberSerializer())
.register(DefaultPort.class, new DefaultPortSerializer())
- .register(OnosTimestamp.class, new OnosTimestampSerializer())
.register(LinkKey.class, new LinkKeySerializer())
.register(ConnectPoint.class, new ConnectPointSerializer())
.register(DefaultLink.class, new DefaultLinkSerializer())
diff --git a/core/store/src/main/java/org/onlab/onos/store/common/StoreService.java b/core/store/hz/common/src/main/java/org/onlab/onos/store/common/StoreService.java
similarity index 100%
rename from core/store/src/main/java/org/onlab/onos/store/common/StoreService.java
rename to core/store/hz/common/src/main/java/org/onlab/onos/store/common/StoreService.java
diff --git a/core/store/src/main/java/org/onlab/onos/store/common/package-info.java b/core/store/hz/common/src/main/java/org/onlab/onos/store/common/package-info.java
similarity index 100%
rename from core/store/src/main/java/org/onlab/onos/store/common/package-info.java
rename to core/store/hz/common/src/main/java/org/onlab/onos/store/common/package-info.java
diff --git a/core/store/src/main/java/org/onlab/onos/store/impl/package-info.java b/core/store/hz/common/src/main/java/org/onlab/onos/store/impl/package-info.java
similarity index 100%
rename from core/store/src/main/java/org/onlab/onos/store/impl/package-info.java
rename to core/store/hz/common/src/main/java/org/onlab/onos/store/impl/package-info.java
diff --git a/core/store/src/test/java/org/onlab/onos/store/impl/TestStoreManager.java b/core/store/hz/common/src/test/java/org/onlab/onos/store/common/TestStoreManager.java
similarity index 97%
rename from core/store/src/test/java/org/onlab/onos/store/impl/TestStoreManager.java
rename to core/store/hz/common/src/test/java/org/onlab/onos/store/common/TestStoreManager.java
index c9d8821..1914fc3 100644
--- a/core/store/src/test/java/org/onlab/onos/store/impl/TestStoreManager.java
+++ b/core/store/hz/common/src/test/java/org/onlab/onos/store/common/TestStoreManager.java
@@ -1,4 +1,4 @@
-package org.onlab.onos.store.impl;
+package org.onlab.onos.store.common;
import java.io.FileNotFoundException;
import java.util.UUID;
diff --git a/core/store/hz/net/pom.xml b/core/store/hz/net/pom.xml
new file mode 100644
index 0000000..e3bc0e2
--- /dev/null
+++ b/core/store/hz/net/pom.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-core-hz</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ <relativePath>../pom.xml</relativePath>
+ </parent>
+
+ <artifactId>onos-core-hz-net</artifactId>
+ <packaging>bundle</packaging>
+
+ <description>ONOS Hazelcast based distributed store subsystems</description>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-core-serializers</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-core-hz-common</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-core-hz-common</artifactId>
+ <classifier>tests</classifier>
+ <scope>test</scope>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>org.apache.felix.scr.annotations</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.hazelcast</groupId>
+ <artifactId>hazelcast</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>de.javakaffee</groupId>
+ <artifactId>kryo-serializers</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-scr-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
diff --git a/core/store/src/main/java/org/onlab/onos/store/device/impl/DistributedDeviceStore.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/device/impl/DistributedDeviceStore.java
similarity index 94%
rename from core/store/src/main/java/org/onlab/onos/store/device/impl/DistributedDeviceStore.java
rename to core/store/hz/net/src/main/java/org/onlab/onos/store/device/impl/DistributedDeviceStore.java
index 3186578..4725bfe 100644
--- a/core/store/src/main/java/org/onlab/onos/store/device/impl/DistributedDeviceStore.java
+++ b/core/store/hz/net/src/main/java/org/onlab/onos/store/device/impl/DistributedDeviceStore.java
@@ -1,6 +1,7 @@
package org.onlab.onos.store.device.impl;
import static com.google.common.base.Predicates.notNull;
+
import com.google.common.base.Optional;
import com.google.common.cache.LoadingCache;
import com.google.common.collect.FluentIterable;
@@ -26,9 +27,9 @@
import org.onlab.onos.net.device.DeviceStoreDelegate;
import org.onlab.onos.net.device.PortDescription;
import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.impl.AbsentInvalidatingLoadingCache;
-import org.onlab.onos.store.impl.AbstractDistributedStore;
-import org.onlab.onos.store.impl.OptionalCacheLoader;
+import org.onlab.onos.store.common.AbsentInvalidatingLoadingCache;
+import org.onlab.onos.store.common.AbstractHazelcastStore;
+import org.onlab.onos.store.common.OptionalCacheLoader;
import org.slf4j.Logger;
import java.util.ArrayList;
@@ -52,7 +53,7 @@
@Component(immediate = true)
@Service
public class DistributedDeviceStore
- extends AbstractDistributedStore<DeviceEvent, DeviceStoreDelegate>
+ extends AbstractHazelcastStore<DeviceEvent, DeviceStoreDelegate>
implements DeviceStore {
private final Logger log = getLogger(getClass());
@@ -71,6 +72,10 @@
private IMap<byte[], byte[]> rawDevicePorts;
private LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> devicePorts;
+ private String devicesListener;
+
+ private String portsListener;
+
@Override
@Activate
public void activate() {
@@ -85,7 +90,7 @@
= new OptionalCacheLoader<>(storeService, rawDevices);
devices = new AbsentInvalidatingLoadingCache<>(newBuilder().build(deviceLoader));
// refresh/populate cache based on notification from other instance
- rawDevices.addEntryListener(new RemoteDeviceEventHandler(devices), includeValue);
+ devicesListener = rawDevices.addEntryListener(new RemoteDeviceEventHandler(devices), includeValue);
// TODO cache availableDevices
availableDevices = theInstance.getSet("availableDevices");
@@ -95,7 +100,7 @@
= new OptionalCacheLoader<>(storeService, rawDevicePorts);
devicePorts = new AbsentInvalidatingLoadingCache<>(newBuilder().build(devicePortLoader));
// refresh/populate cache based on notification from other instance
- rawDevicePorts.addEntryListener(new RemotePortEventHandler(devicePorts), includeValue);
+ portsListener = rawDevicePorts.addEntryListener(new RemotePortEventHandler(devicePorts), includeValue);
loadDeviceCache();
loadDevicePortsCache();
@@ -105,6 +110,8 @@
@Deactivate
public void deactivate() {
+ rawDevicePorts.removeEntryListener(portsListener);
+ rawDevices.removeEntryListener(devicesListener);
log.info("Stopped");
}
@@ -353,7 +360,7 @@
}
}
- private class RemoteDeviceEventHandler extends RemoteEventHandler<DeviceId, DefaultDevice> {
+ private class RemoteDeviceEventHandler extends RemoteCacheEventHandler<DeviceId, DefaultDevice> {
public RemoteDeviceEventHandler(LoadingCache<DeviceId, Optional<DefaultDevice>> cache) {
super(cache);
}
@@ -374,7 +381,7 @@
}
}
- private class RemotePortEventHandler extends RemoteEventHandler<DeviceId, Map<PortNumber, Port>> {
+ private class RemotePortEventHandler extends RemoteCacheEventHandler<DeviceId, Map<PortNumber, Port>> {
public RemotePortEventHandler(LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> cache) {
super(cache);
}
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/device/impl/NoOpClockService.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/device/impl/NoOpClockService.java
new file mode 100644
index 0000000..2c443e9
--- /dev/null
+++ b/core/store/hz/net/src/main/java/org/onlab/onos/store/device/impl/NoOpClockService.java
@@ -0,0 +1,32 @@
+package org.onlab.onos.store.device.impl;
+
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.onos.cluster.MastershipTerm;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.store.ClockService;
+import org.onlab.onos.store.Timestamp;
+
+// FIXME: Code clone in onos-core-trivial, onos-core-hz-net
+/**
+ * Dummy implementation of {@link ClockService}.
+ */
+@Component(immediate = true)
+@Service
+public class NoOpClockService implements ClockService {
+
+ @Override
+ public Timestamp getTimestamp(DeviceId deviceId) {
+ return new Timestamp() {
+
+ @Override
+ public int compareTo(Timestamp o) {
+ throw new IllegalStateException("Never expected to be used.");
+ }
+ };
+ }
+
+ @Override
+ public void setMastershipTerm(DeviceId deviceId, MastershipTerm term) {
+ }
+}
diff --git a/core/store/src/main/java/org/onlab/onos/store/device/impl/package-info.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/device/impl/package-info.java
similarity index 100%
rename from core/store/src/main/java/org/onlab/onos/store/device/impl/package-info.java
rename to core/store/hz/net/src/main/java/org/onlab/onos/store/device/impl/package-info.java
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/flow/impl/DistributedFlowRuleStore.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/flow/impl/DistributedFlowRuleStore.java
new file mode 100644
index 0000000..5a5592a
--- /dev/null
+++ b/core/store/hz/net/src/main/java/org/onlab/onos/store/flow/impl/DistributedFlowRuleStore.java
@@ -0,0 +1,153 @@
+package org.onlab.onos.store.flow.impl;
+
+import static org.onlab.onos.net.flow.FlowRuleEvent.Type.RULE_ADDED;
+import static org.onlab.onos.net.flow.FlowRuleEvent.Type.RULE_REMOVED;
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.util.Collection;
+import java.util.Collections;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.onos.ApplicationId;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.net.flow.DefaultFlowRule;
+import org.onlab.onos.net.flow.FlowRule;
+import org.onlab.onos.net.flow.FlowRule.FlowRuleState;
+import org.onlab.onos.net.flow.FlowRuleEvent;
+import org.onlab.onos.net.flow.FlowRuleEvent.Type;
+import org.onlab.onos.net.flow.FlowRuleStore;
+import org.onlab.onos.net.flow.FlowRuleStoreDelegate;
+import org.onlab.onos.store.AbstractStore;
+import org.slf4j.Logger;
+
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Multimap;
+
+/**
+ * Manages inventory of flow rules using trivial in-memory implementation.
+ */
+//FIXME: I LIE I AM NOT DISTRIBUTED
+@Component(immediate = true)
+@Service
+public class DistributedFlowRuleStore
+extends AbstractStore<FlowRuleEvent, FlowRuleStoreDelegate>
+implements FlowRuleStore {
+
+ private final Logger log = getLogger(getClass());
+
+ // store entries as a pile of rules, no info about device tables
+ private final Multimap<DeviceId, FlowRule> flowEntries =
+ ArrayListMultimap.<DeviceId, FlowRule>create();
+
+ private final Multimap<ApplicationId, FlowRule> flowEntriesById =
+ ArrayListMultimap.<ApplicationId, FlowRule>create();
+
+ @Activate
+ public void activate() {
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ log.info("Stopped");
+ }
+
+
+ @Override
+ public synchronized FlowRule getFlowRule(FlowRule rule) {
+ for (FlowRule f : flowEntries.get(rule.deviceId())) {
+ if (f.equals(rule)) {
+ return f;
+ }
+ }
+ return null;
+ }
+
+ @Override
+ public synchronized Iterable<FlowRule> getFlowEntries(DeviceId deviceId) {
+ Collection<FlowRule> rules = flowEntries.get(deviceId);
+ if (rules == null) {
+ return Collections.emptyList();
+ }
+ return ImmutableSet.copyOf(rules);
+ }
+
+ @Override
+ public synchronized Iterable<FlowRule> getFlowEntriesByAppId(ApplicationId appId) {
+ Collection<FlowRule> rules = flowEntriesById.get(appId);
+ if (rules == null) {
+ return Collections.emptyList();
+ }
+ return ImmutableSet.copyOf(rules);
+ }
+
+ @Override
+ public synchronized void storeFlowRule(FlowRule rule) {
+ FlowRule f = new DefaultFlowRule(rule, FlowRuleState.PENDING_ADD);
+ DeviceId did = f.deviceId();
+ if (!flowEntries.containsEntry(did, f)) {
+ flowEntries.put(did, f);
+ flowEntriesById.put(rule.appId(), f);
+ }
+ }
+
+ @Override
+ public synchronized void deleteFlowRule(FlowRule rule) {
+ FlowRule f = new DefaultFlowRule(rule, FlowRuleState.PENDING_REMOVE);
+ DeviceId did = f.deviceId();
+
+ /*
+ * find the rule and mark it for deletion.
+ * Ultimately a flow removed will come remove it.
+ */
+
+ if (flowEntries.containsEntry(did, f)) {
+ //synchronized (flowEntries) {
+ flowEntries.remove(did, f);
+ flowEntries.put(did, f);
+ flowEntriesById.remove(rule.appId(), rule);
+ //}
+ }
+ }
+
+ @Override
+ public synchronized FlowRuleEvent addOrUpdateFlowRule(FlowRule rule) {
+ DeviceId did = rule.deviceId();
+
+ // check if this new rule is an update to an existing entry
+ if (flowEntries.containsEntry(did, rule)) {
+ //synchronized (flowEntries) {
+ // Multimaps support duplicates so we have to remove our rule
+ // and replace it with the current version.
+ flowEntries.remove(did, rule);
+ flowEntries.put(did, rule);
+ //}
+ return new FlowRuleEvent(Type.RULE_UPDATED, rule);
+ }
+
+ flowEntries.put(did, rule);
+ return new FlowRuleEvent(RULE_ADDED, rule);
+ }
+
+ @Override
+ public synchronized FlowRuleEvent removeFlowRule(FlowRule rule) {
+ //synchronized (this) {
+ if (flowEntries.remove(rule.deviceId(), rule)) {
+ return new FlowRuleEvent(RULE_REMOVED, rule);
+ } else {
+ return null;
+ }
+ //}
+ }
+
+
+
+
+
+
+
+}
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/host/impl/DistributedHostStore.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/host/impl/DistributedHostStore.java
new file mode 100644
index 0000000..09820f4
--- /dev/null
+++ b/core/store/hz/net/src/main/java/org/onlab/onos/store/host/impl/DistributedHostStore.java
@@ -0,0 +1,278 @@
+package org.onlab.onos.store.host.impl;
+
+import static org.onlab.onos.net.host.HostEvent.Type.HOST_ADDED;
+import static org.onlab.onos.net.host.HostEvent.Type.HOST_MOVED;
+import static org.onlab.onos.net.host.HostEvent.Type.HOST_REMOVED;
+import static org.onlab.onos.net.host.HostEvent.Type.HOST_UPDATED;
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.onos.net.ConnectPoint;
+import org.onlab.onos.net.DefaultHost;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.net.Host;
+import org.onlab.onos.net.HostId;
+import org.onlab.onos.net.host.HostDescription;
+import org.onlab.onos.net.host.HostEvent;
+import org.onlab.onos.net.host.HostStore;
+import org.onlab.onos.net.host.HostStoreDelegate;
+import org.onlab.onos.net.host.PortAddresses;
+import org.onlab.onos.net.provider.ProviderId;
+import org.onlab.onos.store.AbstractStore;
+import org.onlab.packet.IpPrefix;
+import org.onlab.packet.MacAddress;
+import org.onlab.packet.VlanId;
+import org.slf4j.Logger;
+
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.Sets;
+
+/**
+ * Manages inventory of end-station hosts using trivial in-memory
+ * implementation.
+ */
+//FIXME: I LIE I AM NOT DISTRIBUTED
+@Component(immediate = true)
+@Service
+public class DistributedHostStore
+extends AbstractStore<HostEvent, HostStoreDelegate>
+implements HostStore {
+
+ private final Logger log = getLogger(getClass());
+
+ // Host inventory
+ private final Map<HostId, Host> hosts = new ConcurrentHashMap<>();
+
+ // Hosts tracked by their location
+ private final Multimap<ConnectPoint, Host> locations = HashMultimap.create();
+
+ private final Map<ConnectPoint, PortAddresses> portAddresses =
+ new ConcurrentHashMap<>();
+
+ @Activate
+ public void activate() {
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ log.info("Stopped");
+ }
+
+ @Override
+ public HostEvent createOrUpdateHost(ProviderId providerId, HostId hostId,
+ HostDescription hostDescription) {
+ Host host = hosts.get(hostId);
+ if (host == null) {
+ return createHost(providerId, hostId, hostDescription);
+ }
+ return updateHost(providerId, host, hostDescription);
+ }
+
+ // creates a new host and sends HOST_ADDED
+ private HostEvent createHost(ProviderId providerId, HostId hostId,
+ HostDescription descr) {
+ DefaultHost newhost = new DefaultHost(providerId, hostId,
+ descr.hwAddress(),
+ descr.vlan(),
+ descr.location(),
+ descr.ipAddresses());
+ synchronized (this) {
+ hosts.put(hostId, newhost);
+ locations.put(descr.location(), newhost);
+ }
+ return new HostEvent(HOST_ADDED, newhost);
+ }
+
+ // checks for type of update to host, sends appropriate event
+ private HostEvent updateHost(ProviderId providerId, Host host,
+ HostDescription descr) {
+ DefaultHost updated;
+ HostEvent event;
+ if (!host.location().equals(descr.location())) {
+ updated = new DefaultHost(providerId, host.id(),
+ host.mac(),
+ host.vlan(),
+ descr.location(),
+ host.ipAddresses());
+ event = new HostEvent(HOST_MOVED, updated);
+
+ } else if (!(host.ipAddresses().equals(descr.ipAddresses()))) {
+ updated = new DefaultHost(providerId, host.id(),
+ host.mac(),
+ host.vlan(),
+ descr.location(),
+ descr.ipAddresses());
+ event = new HostEvent(HOST_UPDATED, updated);
+ } else {
+ return null;
+ }
+ synchronized (this) {
+ hosts.put(host.id(), updated);
+ locations.remove(host.location(), host);
+ locations.put(updated.location(), updated);
+ }
+ return event;
+ }
+
+ @Override
+ public HostEvent removeHost(HostId hostId) {
+ synchronized (this) {
+ Host host = hosts.remove(hostId);
+ if (host != null) {
+ locations.remove((host.location()), host);
+ return new HostEvent(HOST_REMOVED, host);
+ }
+ return null;
+ }
+ }
+
+ @Override
+ public int getHostCount() {
+ return hosts.size();
+ }
+
+ @Override
+ public Iterable<Host> getHosts() {
+ return Collections.unmodifiableSet(new HashSet<>(hosts.values()));
+ }
+
+ @Override
+ public Host getHost(HostId hostId) {
+ return hosts.get(hostId);
+ }
+
+ @Override
+ public Set<Host> getHosts(VlanId vlanId) {
+ Set<Host> vlanset = new HashSet<>();
+ for (Host h : hosts.values()) {
+ if (h.vlan().equals(vlanId)) {
+ vlanset.add(h);
+ }
+ }
+ return vlanset;
+ }
+
+ @Override
+ public Set<Host> getHosts(MacAddress mac) {
+ Set<Host> macset = new HashSet<>();
+ for (Host h : hosts.values()) {
+ if (h.mac().equals(mac)) {
+ macset.add(h);
+ }
+ }
+ return macset;
+ }
+
+ @Override
+ public Set<Host> getHosts(IpPrefix ip) {
+ Set<Host> ipset = new HashSet<>();
+ for (Host h : hosts.values()) {
+ if (h.ipAddresses().contains(ip)) {
+ ipset.add(h);
+ }
+ }
+ return ipset;
+ }
+
+ @Override
+ public Set<Host> getConnectedHosts(ConnectPoint connectPoint) {
+ return ImmutableSet.copyOf(locations.get(connectPoint));
+ }
+
+ @Override
+ public Set<Host> getConnectedHosts(DeviceId deviceId) {
+ Set<Host> hostset = new HashSet<>();
+ for (ConnectPoint p : locations.keySet()) {
+ if (p.deviceId().equals(deviceId)) {
+ hostset.addAll(locations.get(p));
+ }
+ }
+ return hostset;
+ }
+
+ @Override
+ public void updateAddressBindings(PortAddresses addresses) {
+ synchronized (portAddresses) {
+ PortAddresses existing = portAddresses.get(addresses.connectPoint());
+ if (existing == null) {
+ portAddresses.put(addresses.connectPoint(), addresses);
+ } else {
+ Set<IpPrefix> union = Sets.union(existing.ips(), addresses.ips())
+ .immutableCopy();
+
+ MacAddress newMac = (addresses.mac() == null) ? existing.mac()
+ : addresses.mac();
+
+ PortAddresses newAddresses =
+ new PortAddresses(addresses.connectPoint(), union, newMac);
+
+ portAddresses.put(newAddresses.connectPoint(), newAddresses);
+ }
+ }
+ }
+
+ @Override
+ public void removeAddressBindings(PortAddresses addresses) {
+ synchronized (portAddresses) {
+ PortAddresses existing = portAddresses.get(addresses.connectPoint());
+ if (existing != null) {
+ Set<IpPrefix> difference =
+ Sets.difference(existing.ips(), addresses.ips()).immutableCopy();
+
+ // If they removed the existing mac, set the new mac to null.
+ // Otherwise, keep the existing mac.
+ MacAddress newMac = existing.mac();
+ if (addresses.mac() != null && addresses.mac().equals(existing.mac())) {
+ newMac = null;
+ }
+
+ PortAddresses newAddresses =
+ new PortAddresses(addresses.connectPoint(), difference, newMac);
+
+ portAddresses.put(newAddresses.connectPoint(), newAddresses);
+ }
+ }
+ }
+
+ @Override
+ public void clearAddressBindings(ConnectPoint connectPoint) {
+ synchronized (portAddresses) {
+ portAddresses.remove(connectPoint);
+ }
+ }
+
+ @Override
+ public Set<PortAddresses> getAddressBindings() {
+ synchronized (portAddresses) {
+ return new HashSet<>(portAddresses.values());
+ }
+ }
+
+ @Override
+ public PortAddresses getAddressBindingsForPort(ConnectPoint connectPoint) {
+ PortAddresses addresses;
+
+ synchronized (portAddresses) {
+ addresses = portAddresses.get(connectPoint);
+ }
+
+ if (addresses == null) {
+ addresses = new PortAddresses(connectPoint, null, null);
+ }
+
+ return addresses;
+ }
+
+}
diff --git a/core/store/src/main/java/org/onlab/onos/store/link/impl/DistributedLinkStore.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/link/impl/DistributedLinkStore.java
similarity index 93%
rename from core/store/src/main/java/org/onlab/onos/store/link/impl/DistributedLinkStore.java
rename to core/store/hz/net/src/main/java/org/onlab/onos/store/link/impl/DistributedLinkStore.java
index 6db9695..6ef10e7 100644
--- a/core/store/src/main/java/org/onlab/onos/store/link/impl/DistributedLinkStore.java
+++ b/core/store/hz/net/src/main/java/org/onlab/onos/store/link/impl/DistributedLinkStore.java
@@ -10,6 +10,7 @@
import java.util.HashSet;
import java.util.Set;
+
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
@@ -24,9 +25,9 @@
import org.onlab.onos.net.link.LinkStore;
import org.onlab.onos.net.link.LinkStoreDelegate;
import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.impl.AbsentInvalidatingLoadingCache;
-import org.onlab.onos.store.impl.AbstractDistributedStore;
-import org.onlab.onos.store.impl.OptionalCacheLoader;
+import org.onlab.onos.store.common.AbsentInvalidatingLoadingCache;
+import org.onlab.onos.store.common.AbstractHazelcastStore;
+import org.onlab.onos.store.common.OptionalCacheLoader;
import org.slf4j.Logger;
import com.google.common.base.Optional;
@@ -43,7 +44,7 @@
@Component(immediate = true)
@Service
public class DistributedLinkStore
- extends AbstractDistributedStore<LinkEvent, LinkStoreDelegate>
+ extends AbstractHazelcastStore<LinkEvent, LinkStoreDelegate>
implements LinkStore {
private final Logger log = getLogger(getClass());
@@ -57,6 +58,8 @@
private final Multimap<DeviceId, Link> srcLinks = HashMultimap.create();
private final Multimap<DeviceId, Link> dstLinks = HashMultimap.create();
+ private String linksListener;
+
@Override
@Activate
public void activate() {
@@ -70,7 +73,7 @@
= new OptionalCacheLoader<>(storeService, rawLinks);
links = new AbsentInvalidatingLoadingCache<>(newBuilder().build(linkLoader));
// refresh/populate cache based on notification from other instance
- rawLinks.addEntryListener(new RemoteLinkEventHandler(links), includeValue);
+ linksListener = rawLinks.addEntryListener(new RemoteLinkEventHandler(links), includeValue);
loadLinkCache();
@@ -79,7 +82,7 @@
@Deactivate
public void deactivate() {
- super.activate();
+ rawLinks.removeEntryListener(linksListener);
log.info("Stopped");
}
@@ -232,7 +235,7 @@
}
}
- private class RemoteLinkEventHandler extends RemoteEventHandler<LinkKey, DefaultLink> {
+ private class RemoteLinkEventHandler extends RemoteCacheEventHandler<LinkKey, DefaultLink> {
public RemoteLinkEventHandler(LoadingCache<LinkKey, Optional<DefaultLink>> cache) {
super(cache);
}
diff --git a/core/store/src/main/java/org/onlab/onos/store/link/impl/package-info.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/link/impl/package-info.java
similarity index 100%
rename from core/store/src/main/java/org/onlab/onos/store/link/impl/package-info.java
rename to core/store/hz/net/src/main/java/org/onlab/onos/store/link/impl/package-info.java
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopology.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopology.java
new file mode 100644
index 0000000..5574d27
--- /dev/null
+++ b/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopology.java
@@ -0,0 +1,444 @@
+package org.onlab.onos.store.topology.impl;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.ImmutableSetMultimap;
+import org.onlab.graph.DijkstraGraphSearch;
+import org.onlab.graph.GraphPathSearch;
+import org.onlab.graph.TarjanGraphSearch;
+import org.onlab.onos.net.AbstractModel;
+import org.onlab.onos.net.ConnectPoint;
+import org.onlab.onos.net.DefaultPath;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.net.Link;
+import org.onlab.onos.net.Path;
+import org.onlab.onos.net.provider.ProviderId;
+import org.onlab.onos.net.topology.ClusterId;
+import org.onlab.onos.net.topology.DefaultTopologyCluster;
+import org.onlab.onos.net.topology.DefaultTopologyVertex;
+import org.onlab.onos.net.topology.GraphDescription;
+import org.onlab.onos.net.topology.LinkWeight;
+import org.onlab.onos.net.topology.Topology;
+import org.onlab.onos.net.topology.TopologyCluster;
+import org.onlab.onos.net.topology.TopologyEdge;
+import org.onlab.onos.net.topology.TopologyGraph;
+import org.onlab.onos.net.topology.TopologyVertex;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static com.google.common.base.MoreObjects.toStringHelper;
+import static com.google.common.collect.ImmutableSetMultimap.Builder;
+import static org.onlab.graph.GraphPathSearch.Result;
+import static org.onlab.graph.TarjanGraphSearch.SCCResult;
+import static org.onlab.onos.net.Link.Type.INDIRECT;
+
+/**
+ * Default implementation of the topology descriptor. This carries the
+ * backing topology data.
+ */
+public class DefaultTopology extends AbstractModel implements Topology {
+
+ private static final DijkstraGraphSearch<TopologyVertex, TopologyEdge> DIJKSTRA =
+ new DijkstraGraphSearch<>();
+ private static final TarjanGraphSearch<TopologyVertex, TopologyEdge> TARJAN =
+ new TarjanGraphSearch<>();
+
+ private static final ProviderId PID = new ProviderId("core", "org.onlab.onos.net");
+
+ private final long time;
+ private final TopologyGraph graph;
+
+ private final SCCResult<TopologyVertex, TopologyEdge> clusterResults;
+ private final ImmutableMap<DeviceId, Result<TopologyVertex, TopologyEdge>> results;
+ private final ImmutableSetMultimap<PathKey, Path> paths;
+
+ private final ImmutableMap<ClusterId, TopologyCluster> clusters;
+ private final ImmutableSet<ConnectPoint> infrastructurePoints;
+ private final ImmutableSetMultimap<ClusterId, ConnectPoint> broadcastSets;
+
+ private ImmutableMap<DeviceId, TopologyCluster> clustersByDevice;
+ private ImmutableSetMultimap<TopologyCluster, DeviceId> devicesByCluster;
+ private ImmutableSetMultimap<TopologyCluster, Link> linksByCluster;
+
+
+ /**
+ * Creates a topology descriptor attributed to the specified provider.
+ *
+ * @param providerId identity of the provider
+ * @param description data describing the new topology
+ */
+ DefaultTopology(ProviderId providerId, GraphDescription description) {
+ super(providerId);
+ this.time = description.timestamp();
+
+ // Build the graph
+ this.graph = new DefaultTopologyGraph(description.vertexes(),
+ description.edges());
+
+ this.results = searchForShortestPaths();
+ this.paths = buildPaths();
+
+ this.clusterResults = searchForClusters();
+ this.clusters = buildTopologyClusters();
+
+ buildIndexes();
+
+ this.broadcastSets = buildBroadcastSets();
+ this.infrastructurePoints = findInfrastructurePoints();
+ }
+
+ @Override
+ public long time() {
+ return time;
+ }
+
+ @Override
+ public int clusterCount() {
+ return clusters.size();
+ }
+
+ @Override
+ public int deviceCount() {
+ return graph.getVertexes().size();
+ }
+
+ @Override
+ public int linkCount() {
+ return graph.getEdges().size();
+ }
+
+ @Override
+ public int pathCount() {
+ return paths.size();
+ }
+
+ /**
+ * Returns the backing topology graph.
+ *
+ * @return topology graph
+ */
+ TopologyGraph getGraph() {
+ return graph;
+ }
+
+ /**
+ * Returns the set of topology clusters.
+ *
+ * @return set of clusters
+ */
+ Set<TopologyCluster> getClusters() {
+ return ImmutableSet.copyOf(clusters.values());
+ }
+
+ /**
+ * Returns the specified topology cluster.
+ *
+ * @param clusterId cluster identifier
+ * @return topology cluster
+ */
+ TopologyCluster getCluster(ClusterId clusterId) {
+ return clusters.get(clusterId);
+ }
+
+ /**
+ * Returns the topology cluster that contains the given device.
+ *
+ * @param deviceId device identifier
+ * @return topology cluster
+ */
+ TopologyCluster getCluster(DeviceId deviceId) {
+ return clustersByDevice.get(deviceId);
+ }
+
+ /**
+ * Returns the set of cluster devices.
+ *
+ * @param cluster topology cluster
+ * @return cluster devices
+ */
+ Set<DeviceId> getClusterDevices(TopologyCluster cluster) {
+ return devicesByCluster.get(cluster);
+ }
+
+ /**
+ * Returns the set of cluster links.
+ *
+ * @param cluster topology cluster
+ * @return cluster links
+ */
+ Set<Link> getClusterLinks(TopologyCluster cluster) {
+ return linksByCluster.get(cluster);
+ }
+
+ /**
+ * Indicates whether the given point is an infrastructure link end-point.
+ *
+ * @param connectPoint connection point
+ * @return true if infrastructure
+ */
+ boolean isInfrastructure(ConnectPoint connectPoint) {
+ return infrastructurePoints.contains(connectPoint);
+ }
+
+ /**
+ * Indicates whether the given point is part of a broadcast set.
+ *
+ * @param connectPoint connection point
+ * @return true if in broadcast set
+ */
+ boolean isBroadcastPoint(ConnectPoint connectPoint) {
+ // Any non-infrastructure, i.e. edge points are assumed to be OK.
+ if (!isInfrastructure(connectPoint)) {
+ return true;
+ }
+
+ // Find the cluster to which the device belongs.
+ TopologyCluster cluster = clustersByDevice.get(connectPoint.deviceId());
+ if (cluster == null) {
+ throw new IllegalArgumentException("No cluster found for device " + connectPoint.deviceId());
+ }
+
+ // If the broadcast set is null or empty, or if the point explicitly
+ // belongs to it, return true;
+ Set<ConnectPoint> points = broadcastSets.get(cluster.id());
+ return points == null || points.isEmpty() || points.contains(connectPoint);
+ }
+
+ /**
+ * Returns the size of the cluster broadcast set.
+ *
+ * @param clusterId cluster identifier
+ * @return size of the cluster broadcast set
+ */
+ int broadcastSetSize(ClusterId clusterId) {
+ return broadcastSets.get(clusterId).size();
+ }
+
+ /**
+ * Returns the set of pre-computed shortest paths between source and
+ * destination devices.
+ *
+ * @param src source device
+ * @param dst destination device
+ * @return set of shortest paths
+ */
+ Set<Path> getPaths(DeviceId src, DeviceId dst) {
+ return paths.get(new PathKey(src, dst));
+ }
+
+ /**
+ * Computes on-demand the set of shortest paths between source and
+ * destination devices.
+ *
+ * @param src source device
+ * @param dst destination device
+ * @return set of shortest paths
+ */
+ Set<Path> getPaths(DeviceId src, DeviceId dst, LinkWeight weight) {
+ GraphPathSearch.Result<TopologyVertex, TopologyEdge> result =
+ DIJKSTRA.search(graph, new DefaultTopologyVertex(src),
+ new DefaultTopologyVertex(dst), weight);
+ ImmutableSet.Builder<Path> builder = ImmutableSet.builder();
+ for (org.onlab.graph.Path<TopologyVertex, TopologyEdge> path : result.paths()) {
+ builder.add(networkPath(path));
+ }
+ return builder.build();
+ }
+
+
+ // Searches the graph for all shortest paths and returns the search results.
+ private ImmutableMap<DeviceId, Result<TopologyVertex, TopologyEdge>> searchForShortestPaths() {
+ ImmutableMap.Builder<DeviceId, Result<TopologyVertex, TopologyEdge>> builder = ImmutableMap.builder();
+
+ // Search graph paths for each source to all destinations.
+ LinkWeight weight = new HopCountLinkWeight(graph.getVertexes().size());
+ for (TopologyVertex src : graph.getVertexes()) {
+ builder.put(src.deviceId(), DIJKSTRA.search(graph, src, null, weight));
+ }
+ return builder.build();
+ }
+
+ // Builds network paths from the graph path search results
+ private ImmutableSetMultimap<PathKey, Path> buildPaths() {
+ Builder<PathKey, Path> builder = ImmutableSetMultimap.builder();
+ for (DeviceId deviceId : results.keySet()) {
+ Result<TopologyVertex, TopologyEdge> result = results.get(deviceId);
+ for (org.onlab.graph.Path<TopologyVertex, TopologyEdge> path : result.paths()) {
+ builder.put(new PathKey(path.src().deviceId(), path.dst().deviceId()),
+ networkPath(path));
+ }
+ }
+ return builder.build();
+ }
+
+ // Converts graph path to a network path with the same cost.
+ private Path networkPath(org.onlab.graph.Path<TopologyVertex, TopologyEdge> path) {
+ List<Link> links = new ArrayList<>();
+ for (TopologyEdge edge : path.edges()) {
+ links.add(edge.link());
+ }
+ return new DefaultPath(PID, links, path.cost());
+ }
+
+
+ // Searches for SCC clusters in the network topology graph using Tarjan
+ // algorithm.
+ private SCCResult<TopologyVertex, TopologyEdge> searchForClusters() {
+ return TARJAN.search(graph, new NoIndirectLinksWeight());
+ }
+
+ // Builds the topology clusters and returns the id-cluster bindings.
+ private ImmutableMap<ClusterId, TopologyCluster> buildTopologyClusters() {
+ ImmutableMap.Builder<ClusterId, TopologyCluster> clusterBuilder = ImmutableMap.builder();
+ SCCResult<TopologyVertex, TopologyEdge> result =
+ TARJAN.search(graph, new NoIndirectLinksWeight());
+
+ // Extract both vertexes and edges from the results; the lists form
+ // pairs along the same index.
+ List<Set<TopologyVertex>> clusterVertexes = result.clusterVertexes();
+ List<Set<TopologyEdge>> clusterEdges = result.clusterEdges();
+
+ // Scan over the lists and create a cluster from the results.
+ for (int i = 0, n = result.clusterCount(); i < n; i++) {
+ Set<TopologyVertex> vertexSet = clusterVertexes.get(i);
+ Set<TopologyEdge> edgeSet = clusterEdges.get(i);
+
+ ClusterId cid = ClusterId.clusterId(i);
+ DefaultTopologyCluster cluster =
+ new DefaultTopologyCluster(cid, vertexSet.size(), edgeSet.size(),
+ findRoot(vertexSet).deviceId());
+ clusterBuilder.put(cid, cluster);
+ }
+ return clusterBuilder.build();
+ }
+
+ // Finds the vertex whose device id is the lexicographical minimum in the
+ // specified set.
+ private TopologyVertex findRoot(Set<TopologyVertex> vertexSet) {
+ TopologyVertex minVertex = null;
+ for (TopologyVertex vertex : vertexSet) {
+ if (minVertex == null ||
+ minVertex.deviceId().toString()
+ .compareTo(minVertex.deviceId().toString()) < 0) {
+ minVertex = vertex;
+ }
+ }
+ return minVertex;
+ }
+
+ // Processes a map of broadcast sets for each cluster.
+ private ImmutableSetMultimap<ClusterId, ConnectPoint> buildBroadcastSets() {
+ Builder<ClusterId, ConnectPoint> builder = ImmutableSetMultimap.builder();
+ for (TopologyCluster cluster : clusters.values()) {
+ addClusterBroadcastSet(cluster, builder);
+ }
+ return builder.build();
+ }
+
+ // Finds all broadcast points for the cluster. These are those connection
+ // points which lie along the shortest paths between the cluster root and
+ // all other devices within the cluster.
+ private void addClusterBroadcastSet(TopologyCluster cluster,
+ Builder<ClusterId, ConnectPoint> builder) {
+ // Use the graph root search results to build the broadcast set.
+ Result<TopologyVertex, TopologyEdge> result = results.get(cluster.root());
+ for (Map.Entry<TopologyVertex, Set<TopologyEdge>> entry : result.parents().entrySet()) {
+ TopologyVertex vertex = entry.getKey();
+
+ // Ignore any parents that lead outside the cluster.
+ if (clustersByDevice.get(vertex.deviceId()) != cluster) {
+ continue;
+ }
+
+ // Ignore any back-link sets that are empty.
+ Set<TopologyEdge> parents = entry.getValue();
+ if (parents.isEmpty()) {
+ continue;
+ }
+
+ // Use the first back-link source and destinations to add to the
+ // broadcast set.
+ Link link = parents.iterator().next().link();
+ builder.put(cluster.id(), link.src());
+ builder.put(cluster.id(), link.dst());
+ }
+ }
+
+ // Collects and returns an set of all infrastructure link end-points.
+ private ImmutableSet<ConnectPoint> findInfrastructurePoints() {
+ ImmutableSet.Builder<ConnectPoint> builder = ImmutableSet.builder();
+ for (TopologyEdge edge : graph.getEdges()) {
+ builder.add(edge.link().src());
+ builder.add(edge.link().dst());
+ }
+ return builder.build();
+ }
+
+ // Builds cluster-devices, cluster-links and device-cluster indexes.
+ private void buildIndexes() {
+ // Prepare the index builders
+ ImmutableMap.Builder<DeviceId, TopologyCluster> clusterBuilder = ImmutableMap.builder();
+ ImmutableSetMultimap.Builder<TopologyCluster, DeviceId> devicesBuilder = ImmutableSetMultimap.builder();
+ ImmutableSetMultimap.Builder<TopologyCluster, Link> linksBuilder = ImmutableSetMultimap.builder();
+
+ // Now scan through all the clusters
+ for (TopologyCluster cluster : clusters.values()) {
+ int i = cluster.id().index();
+
+ // Scan through all the cluster vertexes.
+ for (TopologyVertex vertex : clusterResults.clusterVertexes().get(i)) {
+ devicesBuilder.put(cluster, vertex.deviceId());
+ clusterBuilder.put(vertex.deviceId(), cluster);
+ }
+
+ // Scan through all the cluster edges.
+ for (TopologyEdge edge : clusterResults.clusterEdges().get(i)) {
+ linksBuilder.put(cluster, edge.link());
+ }
+ }
+
+ // Finalize all indexes.
+ clustersByDevice = clusterBuilder.build();
+ devicesByCluster = devicesBuilder.build();
+ linksByCluster = linksBuilder.build();
+ }
+
+ // Link weight for measuring link cost as hop count with indirect links
+ // being as expensive as traversing the entire graph to assume the worst.
+ private static class HopCountLinkWeight implements LinkWeight {
+ private final int indirectLinkCost;
+
+ HopCountLinkWeight(int indirectLinkCost) {
+ this.indirectLinkCost = indirectLinkCost;
+ }
+
+ @Override
+ public double weight(TopologyEdge edge) {
+ // To force preference to use direct paths first, make indirect
+ // links as expensive as the linear vertex traversal.
+ return edge.link().type() == INDIRECT ? indirectLinkCost : 1;
+ }
+ }
+
+ // Link weight for preventing traversal over indirect links.
+ private static class NoIndirectLinksWeight implements LinkWeight {
+ @Override
+ public double weight(TopologyEdge edge) {
+ return edge.link().type() == INDIRECT ? -1 : 1;
+ }
+ }
+
+ @Override
+ public String toString() {
+ return toStringHelper(this)
+ .add("time", time)
+ .add("clusters", clusterCount())
+ .add("devices", deviceCount())
+ .add("links", linkCount())
+ .add("pathCount", pathCount())
+ .toString();
+ }
+}
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopologyGraph.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopologyGraph.java
new file mode 100644
index 0000000..945ba05
--- /dev/null
+++ b/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopologyGraph.java
@@ -0,0 +1,28 @@
+package org.onlab.onos.store.topology.impl;
+
+import org.onlab.graph.AdjacencyListsGraph;
+import org.onlab.onos.net.topology.TopologyEdge;
+import org.onlab.onos.net.topology.TopologyGraph;
+import org.onlab.onos.net.topology.TopologyVertex;
+
+import java.util.Set;
+
+/**
+ * Default implementation of an immutable topology graph based on a generic
+ * implementation of adjacency lists graph.
+ */
+public class DefaultTopologyGraph
+ extends AdjacencyListsGraph<TopologyVertex, TopologyEdge>
+ implements TopologyGraph {
+
+ /**
+ * Creates a topology graph comprising of the specified vertexes and edges.
+ *
+ * @param vertexes set of graph vertexes
+ * @param edges set of graph edges
+ */
+ public DefaultTopologyGraph(Set<TopologyVertex> vertexes, Set<TopologyEdge> edges) {
+ super(vertexes, edges);
+ }
+
+}
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DistributedTopologyStore.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DistributedTopologyStore.java
new file mode 100644
index 0000000..567861e
--- /dev/null
+++ b/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DistributedTopologyStore.java
@@ -0,0 +1,141 @@
+package org.onlab.onos.store.topology.impl;
+
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.util.List;
+import java.util.Set;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.onos.event.Event;
+import org.onlab.onos.net.ConnectPoint;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.net.Link;
+import org.onlab.onos.net.Path;
+import org.onlab.onos.net.provider.ProviderId;
+import org.onlab.onos.net.topology.ClusterId;
+import org.onlab.onos.net.topology.GraphDescription;
+import org.onlab.onos.net.topology.LinkWeight;
+import org.onlab.onos.net.topology.Topology;
+import org.onlab.onos.net.topology.TopologyCluster;
+import org.onlab.onos.net.topology.TopologyEvent;
+import org.onlab.onos.net.topology.TopologyGraph;
+import org.onlab.onos.net.topology.TopologyStore;
+import org.onlab.onos.net.topology.TopologyStoreDelegate;
+import org.onlab.onos.store.AbstractStore;
+import org.slf4j.Logger;
+
+/**
+ * Manages inventory of topology snapshots using trivial in-memory
+ * structures implementation.
+ */
+//FIXME: I LIE I AM NOT DISTRIBUTED
+@Component(immediate = true)
+@Service
+public class DistributedTopologyStore
+extends AbstractStore<TopologyEvent, TopologyStoreDelegate>
+implements TopologyStore {
+
+ private final Logger log = getLogger(getClass());
+
+ private volatile DefaultTopology current;
+
+ @Activate
+ public void activate() {
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ log.info("Stopped");
+ }
+ @Override
+ public Topology currentTopology() {
+ return current;
+ }
+
+ @Override
+ public boolean isLatest(Topology topology) {
+ // Topology is current only if it is the same as our current topology
+ return topology == current;
+ }
+
+ @Override
+ public TopologyGraph getGraph(Topology topology) {
+ return defaultTopology(topology).getGraph();
+ }
+
+ @Override
+ public Set<TopologyCluster> getClusters(Topology topology) {
+ return defaultTopology(topology).getClusters();
+ }
+
+ @Override
+ public TopologyCluster getCluster(Topology topology, ClusterId clusterId) {
+ return defaultTopology(topology).getCluster(clusterId);
+ }
+
+ @Override
+ public Set<DeviceId> getClusterDevices(Topology topology, TopologyCluster cluster) {
+ return defaultTopology(topology).getClusterDevices(cluster);
+ }
+
+ @Override
+ public Set<Link> getClusterLinks(Topology topology, TopologyCluster cluster) {
+ return defaultTopology(topology).getClusterLinks(cluster);
+ }
+
+ @Override
+ public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst) {
+ return defaultTopology(topology).getPaths(src, dst);
+ }
+
+ @Override
+ public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst,
+ LinkWeight weight) {
+ return defaultTopology(topology).getPaths(src, dst, weight);
+ }
+
+ @Override
+ public boolean isInfrastructure(Topology topology, ConnectPoint connectPoint) {
+ return defaultTopology(topology).isInfrastructure(connectPoint);
+ }
+
+ @Override
+ public boolean isBroadcastPoint(Topology topology, ConnectPoint connectPoint) {
+ return defaultTopology(topology).isBroadcastPoint(connectPoint);
+ }
+
+ @Override
+ public TopologyEvent updateTopology(ProviderId providerId,
+ GraphDescription graphDescription,
+ List<Event> reasons) {
+ // First off, make sure that what we're given is indeed newer than
+ // what we already have.
+ if (current != null && graphDescription.timestamp() < current.time()) {
+ return null;
+ }
+
+ // Have the default topology construct self from the description data.
+ DefaultTopology newTopology =
+ new DefaultTopology(providerId, graphDescription);
+
+ // Promote the new topology to current and return a ready-to-send event.
+ synchronized (this) {
+ current = newTopology;
+ return new TopologyEvent(TopologyEvent.Type.TOPOLOGY_CHANGED, current);
+ }
+ }
+
+ // Validates the specified topology and returns it as a default
+ private DefaultTopology defaultTopology(Topology topology) {
+ if (topology instanceof DefaultTopology) {
+ return (DefaultTopology) topology;
+ }
+ throw new IllegalArgumentException("Topology class " + topology.getClass() +
+ " not supported");
+ }
+
+}
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/PathKey.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/PathKey.java
new file mode 100644
index 0000000..60736b9
--- /dev/null
+++ b/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/PathKey.java
@@ -0,0 +1,40 @@
+package org.onlab.onos.store.topology.impl;
+
+import org.onlab.onos.net.DeviceId;
+
+import java.util.Objects;
+
+/**
+ * Key for filing pre-computed paths between source and destination devices.
+ */
+class PathKey {
+ private final DeviceId src;
+ private final DeviceId dst;
+
+ /**
+ * Creates a path key from the given source/dest pair.
+ * @param src source device
+ * @param dst destination device
+ */
+ PathKey(DeviceId src, DeviceId dst) {
+ this.src = src;
+ this.dst = dst;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(src, dst);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj instanceof PathKey) {
+ final PathKey other = (PathKey) obj;
+ return Objects.equals(this.src, other.src) && Objects.equals(this.dst, other.dst);
+ }
+ return false;
+ }
+}
diff --git a/core/store/src/test/java/org/onlab/onos/store/device/impl/DistributedDeviceStoreTest.java b/core/store/hz/net/src/test/java/org/onlab/onos/store/device/impl/DistributedDeviceStoreTest.java
similarity index 97%
rename from core/store/src/test/java/org/onlab/onos/store/device/impl/DistributedDeviceStoreTest.java
rename to core/store/hz/net/src/test/java/org/onlab/onos/store/device/impl/DistributedDeviceStoreTest.java
index d7494be..7385778 100644
--- a/core/store/src/test/java/org/onlab/onos/store/device/impl/DistributedDeviceStoreTest.java
+++ b/core/store/hz/net/src/test/java/org/onlab/onos/store/device/impl/DistributedDeviceStoreTest.java
@@ -20,6 +20,7 @@
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
+import org.junit.Ignore;
import org.junit.Test;
import org.onlab.onos.net.Device;
import org.onlab.onos.net.DeviceId;
@@ -32,15 +33,18 @@
import org.onlab.onos.net.device.DeviceStoreDelegate;
import org.onlab.onos.net.device.PortDescription;
import org.onlab.onos.net.provider.ProviderId;
+import org.onlab.onos.store.common.StoreManager;
import org.onlab.onos.store.common.StoreService;
-import org.onlab.onos.store.impl.StoreManager;
-import org.onlab.onos.store.impl.TestStoreManager;
+import org.onlab.onos.store.common.TestStoreManager;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.hazelcast.config.Config;
import com.hazelcast.core.Hazelcast;
+/**
+ * Test of the Hazelcast based distributed DeviceStore implementation.
+ */
public class DistributedDeviceStoreTest {
private static final ProviderId PID = new ProviderId("of", "foo");
@@ -326,6 +330,7 @@
}
// TODO add test for Port events when we have them
+ @Ignore("Ignore until Delegate spec. is clear.")
@Test
public final void testEvents() throws InterruptedException {
final CountDownLatch addLatch = new CountDownLatch(1);
diff --git a/core/store/src/test/java/org/onlab/onos/store/link/impl/DistributedLinkStoreTest.java b/core/store/hz/net/src/test/java/org/onlab/onos/store/link/impl/DistributedLinkStoreTest.java
similarity index 97%
rename from core/store/src/test/java/org/onlab/onos/store/link/impl/DistributedLinkStoreTest.java
rename to core/store/hz/net/src/test/java/org/onlab/onos/store/link/impl/DistributedLinkStoreTest.java
index 41853f6..151d978 100644
--- a/core/store/src/test/java/org/onlab/onos/store/link/impl/DistributedLinkStoreTest.java
+++ b/core/store/hz/net/src/test/java/org/onlab/onos/store/link/impl/DistributedLinkStoreTest.java
@@ -15,6 +15,7 @@
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
+import org.junit.Ignore;
import org.junit.Test;
import org.onlab.onos.net.ConnectPoint;
import org.onlab.onos.net.DeviceId;
@@ -26,24 +27,22 @@
import org.onlab.onos.net.link.LinkEvent;
import org.onlab.onos.net.link.LinkStoreDelegate;
import org.onlab.onos.net.provider.ProviderId;
+import org.onlab.onos.store.common.StoreManager;
import org.onlab.onos.store.common.StoreService;
-import org.onlab.onos.store.impl.StoreManager;
-import org.onlab.onos.store.impl.TestStoreManager;
+import org.onlab.onos.store.common.TestStoreManager;
import com.google.common.collect.Iterables;
import com.hazelcast.config.Config;
import com.hazelcast.core.Hazelcast;
+/**
+ * Test of the Hazelcast based distributed LinkStore implementation.
+ */
public class DistributedLinkStoreTest {
private static final ProviderId PID = new ProviderId("of", "foo");
private static final DeviceId DID1 = deviceId("of:foo");
private static final DeviceId DID2 = deviceId("of:bar");
-// private static final String MFR = "whitebox";
-// private static final String HW = "1.1.x";
-// private static final String SW1 = "3.8.1";
-// private static final String SW2 = "3.9.5";
-// private static final String SN = "43311-12345";
private static final PortNumber P1 = PortNumber.portNumber(1);
private static final PortNumber P2 = PortNumber.portNumber(2);
@@ -302,6 +301,7 @@
assertLink(linkId2, DIRECT, linkStore.getLink(d2P2, d1P1));
}
+ @Ignore("Ignore until Delegate spec. is clear.")
@Test
public final void testEvents() throws InterruptedException {
diff --git a/core/store/hz/pom.xml b/core/store/hz/pom.xml
new file mode 100644
index 0000000..d6aa1fe
--- /dev/null
+++ b/core/store/hz/pom.xml
@@ -0,0 +1,51 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-core-store</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ <relativePath>../pom.xml</relativePath>
+ </parent>
+
+ <artifactId>onos-core-hz</artifactId>
+ <packaging>pom</packaging>
+
+ <description>ONOS Core Hazelcast Store subsystem</description>
+
+ <modules>
+ <module>common</module>
+ <module>cluster</module>
+ <module>net</module>
+ </modules>
+
+ <dependencies>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onlab-misc</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onlab-junit</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.hazelcast</groupId>
+ <artifactId>hazelcast</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
diff --git a/core/store/pom.xml b/core/store/pom.xml
index 246355c..b94b4fe 100644
--- a/core/store/pom.xml
+++ b/core/store/pom.xml
@@ -1,7 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
@@ -12,34 +10,41 @@
</parent>
<artifactId>onos-core-store</artifactId>
- <packaging>bundle</packaging>
+ <packaging>pom</packaging>
- <description>ONOS distributed store subsystems</description>
+ <description>ONOS Core Store subsystem</description>
+
+ <modules>
+ <module>trivial</module>
+ <module>dist</module>
+ <module>hz</module>
+ <module>serializers</module>
+ </modules>
<dependencies>
<dependency>
- <groupId>org.onlab.onos</groupId>
- <artifactId>onos-api</artifactId>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
</dependency>
<dependency>
- <groupId>org.apache.felix</groupId>
- <artifactId>org.apache.felix.scr.annotations</artifactId>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onlab-misc</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onlab-junit</artifactId>
</dependency>
<dependency>
<groupId>com.hazelcast</groupId>
<artifactId>hazelcast</artifactId>
</dependency>
- <dependency>
- <groupId>de.javakaffee</groupId>
- <artifactId>kryo-serializers</artifactId>
- </dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.felix</groupId>
- <artifactId>maven-scr-plugin</artifactId>
+ <artifactId>maven-bundle-plugin</artifactId>
</plugin>
</plugins>
</build>
diff --git a/core/trivial/pom.xml b/core/store/serializers/pom.xml
similarity index 77%
copy from core/trivial/pom.xml
copy to core/store/serializers/pom.xml
index 1806ba4..f222a23 100644
--- a/core/trivial/pom.xml
+++ b/core/store/serializers/pom.xml
@@ -6,15 +6,15 @@
<parent>
<groupId>org.onlab.onos</groupId>
- <artifactId>onos-core</artifactId>
+ <artifactId>onos-core-store</artifactId>
<version>1.0.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
- <artifactId>onos-core-trivial</artifactId>
+ <artifactId>onos-core-serializers</artifactId>
<packaging>bundle</packaging>
- <description>ONOS network control trivial implementations of core subsystems</description>
+ <description>Serializers for ONOS classes</description>
<dependencies>
<dependency>
@@ -25,6 +25,10 @@
<groupId>org.apache.felix</groupId>
<artifactId>org.apache.felix.scr.annotations</artifactId>
</dependency>
+ <dependency>
+ <groupId>de.javakaffee</groupId>
+ <artifactId>kryo-serializers</artifactId>
+ </dependency>
</dependencies>
<build>
diff --git a/core/store/src/main/java/org/onlab/onos/store/serializers/ConnectPointSerializer.java b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/ConnectPointSerializer.java
similarity index 100%
rename from core/store/src/main/java/org/onlab/onos/store/serializers/ConnectPointSerializer.java
rename to core/store/serializers/src/main/java/org/onlab/onos/store/serializers/ConnectPointSerializer.java
diff --git a/core/store/src/main/java/org/onlab/onos/store/serializers/DefaultLinkSerializer.java b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/DefaultLinkSerializer.java
similarity index 100%
rename from core/store/src/main/java/org/onlab/onos/store/serializers/DefaultLinkSerializer.java
rename to core/store/serializers/src/main/java/org/onlab/onos/store/serializers/DefaultLinkSerializer.java
diff --git a/core/store/src/main/java/org/onlab/onos/store/serializers/DefaultPortSerializer.java b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/DefaultPortSerializer.java
similarity index 100%
rename from core/store/src/main/java/org/onlab/onos/store/serializers/DefaultPortSerializer.java
rename to core/store/serializers/src/main/java/org/onlab/onos/store/serializers/DefaultPortSerializer.java
diff --git a/core/store/src/main/java/org/onlab/onos/store/serializers/DeviceIdSerializer.java b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/DeviceIdSerializer.java
similarity index 100%
rename from core/store/src/main/java/org/onlab/onos/store/serializers/DeviceIdSerializer.java
rename to core/store/serializers/src/main/java/org/onlab/onos/store/serializers/DeviceIdSerializer.java
diff --git a/core/store/src/main/java/org/onlab/onos/store/serializers/IpPrefixSerializer.java b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/IpPrefixSerializer.java
similarity index 100%
rename from core/store/src/main/java/org/onlab/onos/store/serializers/IpPrefixSerializer.java
rename to core/store/serializers/src/main/java/org/onlab/onos/store/serializers/IpPrefixSerializer.java
diff --git a/core/store/src/main/java/org/onlab/onos/store/serializers/LinkKeySerializer.java b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/LinkKeySerializer.java
similarity index 100%
rename from core/store/src/main/java/org/onlab/onos/store/serializers/LinkKeySerializer.java
rename to core/store/serializers/src/main/java/org/onlab/onos/store/serializers/LinkKeySerializer.java
diff --git a/core/store/src/main/java/org/onlab/onos/store/serializers/NodeIdSerializer.java b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/NodeIdSerializer.java
similarity index 100%
rename from core/store/src/main/java/org/onlab/onos/store/serializers/NodeIdSerializer.java
rename to core/store/serializers/src/main/java/org/onlab/onos/store/serializers/NodeIdSerializer.java
diff --git a/core/store/src/main/java/org/onlab/onos/store/serializers/PortNumberSerializer.java b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/PortNumberSerializer.java
similarity index 100%
rename from core/store/src/main/java/org/onlab/onos/store/serializers/PortNumberSerializer.java
rename to core/store/serializers/src/main/java/org/onlab/onos/store/serializers/PortNumberSerializer.java
diff --git a/core/store/src/main/java/org/onlab/onos/store/serializers/ProviderIdSerializer.java b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/ProviderIdSerializer.java
similarity index 100%
rename from core/store/src/main/java/org/onlab/onos/store/serializers/ProviderIdSerializer.java
rename to core/store/serializers/src/main/java/org/onlab/onos/store/serializers/ProviderIdSerializer.java
diff --git a/core/store/src/main/java/org/onlab/onos/store/serializers/package-info.java b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/package-info.java
similarity index 100%
rename from core/store/src/main/java/org/onlab/onos/store/serializers/package-info.java
rename to core/store/serializers/src/main/java/org/onlab/onos/store/serializers/package-info.java
diff --git a/core/store/src/main/java/org/onlab/onos/store/impl/AbstractDistributedStore.java b/core/store/src/main/java/org/onlab/onos/store/impl/AbstractDistributedStore.java
deleted file mode 100644
index eb795a8..0000000
--- a/core/store/src/main/java/org/onlab/onos/store/impl/AbstractDistributedStore.java
+++ /dev/null
@@ -1,144 +0,0 @@
-package org.onlab.onos.store.impl;
-
-import com.google.common.base.Optional;
-import com.google.common.cache.LoadingCache;
-import com.hazelcast.core.EntryAdapter;
-import com.hazelcast.core.EntryEvent;
-import com.hazelcast.core.HazelcastInstance;
-import com.hazelcast.core.MapEvent;
-import org.apache.felix.scr.annotations.Activate;
-import org.apache.felix.scr.annotations.Component;
-import org.apache.felix.scr.annotations.Reference;
-import org.apache.felix.scr.annotations.ReferenceCardinality;
-import org.onlab.onos.event.Event;
-import org.onlab.onos.store.AbstractStore;
-import org.onlab.onos.store.StoreDelegate;
-import org.onlab.onos.store.common.StoreService;
-import org.slf4j.Logger;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-import static org.slf4j.LoggerFactory.getLogger;
-
-/**
- * Abstraction of a distributed store based on Hazelcast.
- */
-@Component(componentAbstract = true)
-public abstract class AbstractDistributedStore<E extends Event, D extends StoreDelegate<E>>
- extends AbstractStore<E, D> {
-
- protected final Logger log = getLogger(getClass());
-
- @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
- protected StoreService storeService;
-
- protected HazelcastInstance theInstance;
-
- @Activate
- public void activate() {
- theInstance = storeService.getHazelcastInstance();
- }
-
- /**
- * Serializes the specified object using the backing store service.
- *
- * @param obj object to be serialized
- * @return serialized object
- */
- protected byte[] serialize(Object obj) {
- return storeService.serialize(obj);
- }
-
- /**
- * Deserializes the specified object using the backing store service.
- *
- * @param bytes bytes to be deserialized
- * @param <T> type of object
- * @return deserialized object
- */
- protected <T> T deserialize(byte[] bytes) {
- return storeService.deserialize(bytes);
- }
-
-
- /**
- * An IMap entry listener, which reflects each remote event to the cache.
- *
- * @param <K> IMap key type after deserialization
- * @param <V> IMap value type after deserialization
- */
- public class RemoteEventHandler<K, V> extends EntryAdapter<byte[], byte[]> {
-
- private LoadingCache<K, Optional<V>> cache;
-
- /**
- * Constructor.
- *
- * @param cache cache to update
- */
- public RemoteEventHandler(LoadingCache<K, Optional<V>> cache) {
- this.cache = checkNotNull(cache);
- }
-
- @Override
- public void mapCleared(MapEvent event) {
- cache.invalidateAll();
- }
-
- @Override
- public void entryAdded(EntryEvent<byte[], byte[]> event) {
- K key = deserialize(event.getKey());
- V newVal = deserialize(event.getValue());
- Optional<V> newValue = Optional.of(newVal);
- cache.asMap().putIfAbsent(key, newValue);
- onAdd(key, newVal);
- }
-
- @Override
- public void entryUpdated(EntryEvent<byte[], byte[]> event) {
- K key = deserialize(event.getKey());
- V oldVal = deserialize(event.getOldValue());
- Optional<V> oldValue = Optional.fromNullable(oldVal);
- V newVal = deserialize(event.getValue());
- Optional<V> newValue = Optional.of(newVal);
- cache.asMap().replace(key, oldValue, newValue);
- onUpdate(key, oldVal, newVal);
- }
-
- @Override
- public void entryRemoved(EntryEvent<byte[], byte[]> event) {
- K key = deserialize(event.getKey());
- V val = deserialize(event.getOldValue());
- cache.invalidate(key);
- onRemove(key, val);
- }
-
- /**
- * Cache entry addition hook.
- *
- * @param key new key
- * @param newVal new value
- */
- protected void onAdd(K key, V newVal) {
- }
-
- /**
- * Cache entry update hook.
- *
- * @param key new key
- * @param oldValue old value
- * @param newVal new value
- */
- protected void onUpdate(K key, V oldValue, V newVal) {
- }
-
- /**
- * Cache entry remove hook.
- *
- * @param key new key
- * @param val old value
- */
- protected void onRemove(K key, V val) {
- }
- }
-
-}
diff --git a/core/trivial/pom.xml b/core/store/trivial/pom.xml
similarity index 95%
rename from core/trivial/pom.xml
rename to core/store/trivial/pom.xml
index 1806ba4..40016d4 100644
--- a/core/trivial/pom.xml
+++ b/core/store/trivial/pom.xml
@@ -6,7 +6,7 @@
<parent>
<groupId>org.onlab.onos</groupId>
- <artifactId>onos-core</artifactId>
+ <artifactId>onos-core-store</artifactId>
<version>1.0.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
diff --git a/core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/DefaultTopology.java b/core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/DefaultTopology.java
similarity index 100%
rename from core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/DefaultTopology.java
rename to core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/DefaultTopology.java
diff --git a/core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/DefaultTopologyGraph.java b/core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/DefaultTopologyGraph.java
similarity index 100%
rename from core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/DefaultTopologyGraph.java
rename to core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/DefaultTopologyGraph.java
diff --git a/core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/NoOpClockService.java b/core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/NoOpClockService.java
new file mode 100644
index 0000000..88fcddf
--- /dev/null
+++ b/core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/NoOpClockService.java
@@ -0,0 +1,32 @@
+package org.onlab.onos.net.trivial.impl;
+
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.onos.cluster.MastershipTerm;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.store.ClockService;
+import org.onlab.onos.store.Timestamp;
+
+//FIXME: Code clone in onos-core-trivial, onos-core-hz-net
+/**
+ * Dummy implementation of {@link ClockService}.
+ */
+@Component(immediate = true)
+@Service
+public class NoOpClockService implements ClockService {
+
+ @Override
+ public Timestamp getTimestamp(DeviceId deviceId) {
+ return new Timestamp() {
+
+ @Override
+ public int compareTo(Timestamp o) {
+ throw new IllegalStateException("Never expected to be used.");
+ }
+ };
+ }
+
+ @Override
+ public void setMastershipTerm(DeviceId deviceId, MastershipTerm term) {
+ }
+}
diff --git a/core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/PathKey.java b/core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/PathKey.java
similarity index 100%
rename from core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/PathKey.java
rename to core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/PathKey.java
diff --git a/core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleClusterStore.java b/core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleClusterStore.java
similarity index 91%
rename from core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleClusterStore.java
rename to core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleClusterStore.java
index d348d2f..2208c86 100644
--- a/core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleClusterStore.java
+++ b/core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleClusterStore.java
@@ -20,7 +20,7 @@
import static org.slf4j.LoggerFactory.getLogger;
/**
- * Manages inventory of infrastructure DEVICES using trivial in-memory
+ * Manages inventory of infrastructure devices using trivial in-memory
* structures implementation.
*/
@Component(immediate = true)
@@ -68,6 +68,11 @@
}
@Override
+ public ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort) {
+ return null;
+ }
+
+ @Override
public void removeNode(NodeId nodeId) {
}
diff --git a/core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleDeviceStore.java b/core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleDeviceStore.java
similarity index 97%
rename from core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleDeviceStore.java
rename to core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleDeviceStore.java
index 15dba06..7c7d38f 100644
--- a/core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleDeviceStore.java
+++ b/core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleDeviceStore.java
@@ -101,9 +101,6 @@
synchronized (this) {
devices.put(deviceId, device);
availableDevices.add(deviceId);
-
- // For now claim the device as a master automatically.
- // roles.put(deviceId, MastershipRole.MASTER);
}
return new DeviceEvent(DeviceEvent.Type.DEVICE_ADDED, device, null);
}
@@ -189,7 +186,7 @@
new DefaultPort(device, portDescription.portNumber(),
portDescription.isEnabled());
ports.put(port.number(), updatedPort);
- return new DeviceEvent(PORT_UPDATED, device, port);
+ return new DeviceEvent(PORT_UPDATED, device, updatedPort);
}
return null;
}
diff --git a/core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleFlowRuleStore.java b/core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleFlowRuleStore.java
similarity index 100%
rename from core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleFlowRuleStore.java
rename to core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleFlowRuleStore.java
diff --git a/core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleHostStore.java b/core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleHostStore.java
similarity index 100%
rename from core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleHostStore.java
rename to core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleHostStore.java
diff --git a/core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleLinkStore.java b/core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleLinkStore.java
similarity index 98%
rename from core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleLinkStore.java
rename to core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleLinkStore.java
index 17bbc88..319df89 100644
--- a/core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleLinkStore.java
+++ b/core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleLinkStore.java
@@ -51,8 +51,6 @@
private final Multimap<DeviceId, Link> srcLinks = HashMultimap.create();
private final Multimap<DeviceId, Link> dstLinks = HashMultimap.create();
- private static final Set<Link> EMPTY = ImmutableSet.of();
-
@Activate
public void activate() {
log.info("Started");
diff --git a/core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleMastershipStore.java b/core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleMastershipStore.java
similarity index 100%
rename from core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleMastershipStore.java
rename to core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleMastershipStore.java
diff --git a/core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleTopologyStore.java b/core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleTopologyStore.java
similarity index 100%
rename from core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleTopologyStore.java
rename to core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/SimpleTopologyStore.java
diff --git a/core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/package-info.java b/core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/package-info.java
similarity index 100%
rename from core/trivial/src/main/java/org/onlab/onos/net/trivial/impl/package-info.java
rename to core/store/trivial/src/main/java/org/onlab/onos/net/trivial/impl/package-info.java
diff --git a/core/trivial/src/test/java/org/onlab/onos/net/trivial/impl/DefaultTopologyTest.java b/core/store/trivial/src/test/java/org/onlab/onos/net/trivial/impl/DefaultTopologyTest.java
similarity index 100%
rename from core/trivial/src/test/java/org/onlab/onos/net/trivial/impl/DefaultTopologyTest.java
rename to core/store/trivial/src/test/java/org/onlab/onos/net/trivial/impl/DefaultTopologyTest.java
diff --git a/core/store/src/test/java/org/onlab/onos/store/device/impl/DistributedDeviceStoreTest.java b/core/store/trivial/src/test/java/org/onlab/onos/net/trivial/impl/SimpleDeviceStoreTest.java
similarity index 92%
copy from core/store/src/test/java/org/onlab/onos/store/device/impl/DistributedDeviceStoreTest.java
copy to core/store/trivial/src/test/java/org/onlab/onos/net/trivial/impl/SimpleDeviceStoreTest.java
index d7494be..f973d9b 100644
--- a/core/store/src/test/java/org/onlab/onos/store/device/impl/DistributedDeviceStoreTest.java
+++ b/core/store/trivial/src/test/java/org/onlab/onos/net/trivial/impl/SimpleDeviceStoreTest.java
@@ -1,7 +1,7 @@
/**
*
*/
-package org.onlab.onos.store.device.impl;
+package org.onlab.onos.net.trivial.impl;
import static org.junit.Assert.*;
import static org.onlab.onos.net.Device.Type.SWITCH;
@@ -20,6 +20,7 @@
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
+import org.junit.Ignore;
import org.junit.Test;
import org.onlab.onos.net.Device;
import org.onlab.onos.net.DeviceId;
@@ -29,19 +30,18 @@
import org.onlab.onos.net.device.DefaultPortDescription;
import org.onlab.onos.net.device.DeviceDescription;
import org.onlab.onos.net.device.DeviceEvent;
+import org.onlab.onos.net.device.DeviceStore;
import org.onlab.onos.net.device.DeviceStoreDelegate;
import org.onlab.onos.net.device.PortDescription;
import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.common.StoreService;
-import org.onlab.onos.store.impl.StoreManager;
-import org.onlab.onos.store.impl.TestStoreManager;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
-import com.hazelcast.config.Config;
-import com.hazelcast.core.Hazelcast;
-public class DistributedDeviceStoreTest {
+/**
+ * Test of the simple DeviceStore implementation.
+ */
+public class SimpleDeviceStoreTest {
private static final ProviderId PID = new ProviderId("of", "foo");
private static final DeviceId DID1 = deviceId("of:foo");
@@ -56,9 +56,9 @@
private static final PortNumber P2 = PortNumber.portNumber(2);
private static final PortNumber P3 = PortNumber.portNumber(3);
- private DistributedDeviceStore deviceStore;
+ private SimpleDeviceStore simpleDeviceStore;
+ private DeviceStore deviceStore;
- private StoreManager storeManager;
@BeforeClass
@@ -72,21 +72,14 @@
@Before
public void setUp() throws Exception {
- // TODO should find a way to clean Hazelcast instance without shutdown.
- Config config = TestStoreManager.getTestConfig();
-
- storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config));
- storeManager.activate();
-
- deviceStore = new TestDistributedDeviceStore(storeManager);
- deviceStore.activate();
+ simpleDeviceStore = new SimpleDeviceStore();
+ simpleDeviceStore.activate();
+ deviceStore = simpleDeviceStore;
}
@After
public void tearDown() throws Exception {
- deviceStore.deactivate();
-
- storeManager.deactivate();
+ simpleDeviceStore.deactivate();
}
private void putDevice(DeviceId deviceId, String swVersion) {
@@ -325,7 +318,10 @@
assertEquals(1, deviceStore.getDeviceCount());
}
+ // If Delegates should be called only on remote events,
+ // then Simple* should never call them, thus not test required.
// TODO add test for Port events when we have them
+ @Ignore("Ignore until Delegate spec. is clear.")
@Test
public final void testEvents() throws InterruptedException {
final CountDownLatch addLatch = new CountDownLatch(1);
@@ -377,10 +373,4 @@
deviceStore.removeDevice(DID1);
assertTrue("Remove event fired", removeLatch.await(1, TimeUnit.SECONDS));
}
-
- private class TestDistributedDeviceStore extends DistributedDeviceStore {
- public TestDistributedDeviceStore(StoreService storeService) {
- this.storeService = storeService;
- }
- }
}
diff --git a/core/store/src/test/java/org/onlab/onos/store/link/impl/DistributedLinkStoreTest.java b/core/store/trivial/src/test/java/org/onlab/onos/net/trivial/impl/SimpleLinkStoreTest.java
similarity index 89%
copy from core/store/src/test/java/org/onlab/onos/store/link/impl/DistributedLinkStoreTest.java
copy to core/store/trivial/src/test/java/org/onlab/onos/net/trivial/impl/SimpleLinkStoreTest.java
index 41853f6..50d0e47 100644
--- a/core/store/src/test/java/org/onlab/onos/store/link/impl/DistributedLinkStoreTest.java
+++ b/core/store/trivial/src/test/java/org/onlab/onos/net/trivial/impl/SimpleLinkStoreTest.java
@@ -1,4 +1,4 @@
-package org.onlab.onos.store.link.impl;
+package org.onlab.onos.net.trivial.impl;
import static org.junit.Assert.*;
import static org.onlab.onos.net.DeviceId.deviceId;
@@ -15,6 +15,7 @@
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
+import org.junit.Ignore;
import org.junit.Test;
import org.onlab.onos.net.ConnectPoint;
import org.onlab.onos.net.DeviceId;
@@ -24,34 +25,28 @@
import org.onlab.onos.net.Link.Type;
import org.onlab.onos.net.link.DefaultLinkDescription;
import org.onlab.onos.net.link.LinkEvent;
+import org.onlab.onos.net.link.LinkStore;
import org.onlab.onos.net.link.LinkStoreDelegate;
import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.common.StoreService;
-import org.onlab.onos.store.impl.StoreManager;
-import org.onlab.onos.store.impl.TestStoreManager;
import com.google.common.collect.Iterables;
-import com.hazelcast.config.Config;
-import com.hazelcast.core.Hazelcast;
-public class DistributedLinkStoreTest {
+/**
+ * Test of the simple LinkStore implementation.
+ */
+public class SimpleLinkStoreTest {
private static final ProviderId PID = new ProviderId("of", "foo");
private static final DeviceId DID1 = deviceId("of:foo");
private static final DeviceId DID2 = deviceId("of:bar");
-// private static final String MFR = "whitebox";
-// private static final String HW = "1.1.x";
-// private static final String SW1 = "3.8.1";
-// private static final String SW2 = "3.9.5";
-// private static final String SN = "43311-12345";
private static final PortNumber P1 = PortNumber.portNumber(1);
private static final PortNumber P2 = PortNumber.portNumber(2);
private static final PortNumber P3 = PortNumber.portNumber(3);
- private StoreManager storeManager;
- private DistributedLinkStore linkStore;
+ private SimpleLinkStore simpleLinkStore;
+ private LinkStore linkStore;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
@@ -63,20 +58,14 @@
@Before
public void setUp() throws Exception {
- // TODO should find a way to clean Hazelcast instance without shutdown.
- Config config = TestStoreManager.getTestConfig();
-
- storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config));
- storeManager.activate();
-
- linkStore = new TestDistributedLinkStore(storeManager);
- linkStore.activate();
+ simpleLinkStore = new SimpleLinkStore();
+ simpleLinkStore.activate();
+ linkStore = simpleLinkStore;
}
@After
public void tearDown() throws Exception {
- linkStore.deactivate();
- storeManager.deactivate();
+ simpleLinkStore.deactivate();
}
private void putLink(DeviceId srcId, PortNumber srcNum,
@@ -302,6 +291,9 @@
assertLink(linkId2, DIRECT, linkStore.getLink(d2P2, d1P1));
}
+ // If Delegates should be called only on remote events,
+ // then Simple* should never call them, thus not test required.
+ @Ignore("Ignore until Delegate spec. is clear.")
@Test
public final void testEvents() throws InterruptedException {
@@ -351,11 +343,4 @@
linkStore.removeLink(d1P1, d2P2);
assertTrue("Remove event fired", removeLatch.await(1, TimeUnit.SECONDS));
}
-
-
- class TestDistributedLinkStore extends DistributedLinkStore {
- TestDistributedLinkStore(StoreService storeService) {
- this.storeService = storeService;
- }
- }
}
diff --git a/features/features.xml b/features/features.xml
index 2032aa1..c32bc3d 100644
--- a/features/features.xml
+++ b/features/features.xml
@@ -48,7 +48,17 @@
description="ONOS core components">
<feature>onos-api</feature>
<bundle>mvn:org.onlab.onos/onos-core-net/1.0.0-SNAPSHOT</bundle>
- <bundle>mvn:org.onlab.onos/onos-core-store/1.0.0-SNAPSHOT</bundle>
+ <bundle>mvn:org.onlab.onos/onos-core-dist/1.0.0-SNAPSHOT</bundle>
+ </feature>
+
+ <feature name="onos-core-hazelcast" version="1.0.0"
+ description="ONOS core components built on hazelcast">
+ <feature>onos-api</feature>
+ <bundle>mvn:org.onlab.onos/onos-core-net/1.0.0-SNAPSHOT</bundle>
+ <bundle>mvn:org.onlab.onos/onos-core-hz-common/1.0.0-SNAPSHOT</bundle>
+ <bundle>mvn:org.onlab.onos/onos-core-serializers/1.0.0-SNAPSHOT</bundle>
+ <bundle>mvn:org.onlab.onos/onos-core-hz-cluster/1.0.0-SNAPSHOT</bundle>
+ <bundle>mvn:org.onlab.onos/onos-core-hz-net/1.0.0-SNAPSHOT</bundle>
</feature>
<feature name="onos-core-trivial" version="1.0.0"
diff --git a/openflow/api/src/main/java/org/onlab/onos/openflow/controller/driver/AbstractOpenFlowSwitch.java b/openflow/api/src/main/java/org/onlab/onos/openflow/controller/driver/AbstractOpenFlowSwitch.java
index 393246b..69ddc71 100644
--- a/openflow/api/src/main/java/org/onlab/onos/openflow/controller/driver/AbstractOpenFlowSwitch.java
+++ b/openflow/api/src/main/java/org/onlab/onos/openflow/controller/driver/AbstractOpenFlowSwitch.java
@@ -93,12 +93,16 @@
@Override
public final void sendMsg(OFMessage m) {
- this.write(m);
+ if (role == RoleState.MASTER) {
+ this.write(m);
+ }
}
@Override
public final void sendMsg(List<OFMessage> msgs) {
- this.write(msgs);
+ if (role == RoleState.MASTER) {
+ this.write(msgs);
+ }
}
@Override
@@ -164,7 +168,9 @@
*/
@Override
public final void handleMessage(OFMessage m) {
- this.agent.processMessage(dpid, m);
+ if (this.role == RoleState.MASTER) {
+ this.agent.processMessage(dpid, m);
+ }
}
@Override
@@ -226,19 +232,34 @@
@Override
public abstract void processDriverHandshakeMessage(OFMessage m);
+
+ // Role Handling
+
@Override
public void setRole(RoleState role) {
try {
- log.info("Sending role {} to switch {}", role, getStringId());
if (this.roleMan.sendRoleRequest(role, RoleRecvStatus.MATCHED_SET_ROLE)) {
- this.role = role;
+ log.info("Sending role {} to switch {}", role, getStringId());
+ if (role == RoleState.SLAVE || role == RoleState.EQUAL) {
+ this.role = role;
+ }
}
} catch (IOException e) {
log.error("Unable to write to switch {}.", this.dpid);
}
}
- // Role Handling
+ @Override
+ public void reassertRole() {
+ if (this.getRole() == RoleState.MASTER) {
+ log.warn("Received permission error from switch {} while " +
+ "being master. Reasserting master role.",
+ this.getStringId());
+ this.setRole(RoleState.MASTER);
+ }
+ }
+
+
@Override
public void handleRole(OFMessage m) throws SwitchStateException {
@@ -246,11 +267,15 @@
RoleRecvStatus rrs = roleMan.deliverRoleReply(rri);
if (rrs == RoleRecvStatus.MATCHED_SET_ROLE) {
if (rri.getRole() == RoleState.MASTER) {
+ this.role = rri.getRole();
this.transitionToMasterSwitch();
} else if (rri.getRole() == RoleState.EQUAL ||
- rri.getRole() == RoleState.MASTER) {
+ rri.getRole() == RoleState.SLAVE) {
this.transitionToEqualSwitch();
}
+ } else {
+ return;
+ //TODO: tell people that we failed.
}
}
@@ -267,11 +292,15 @@
new RoleReplyInfo(r, null, m.getXid()));
if (rrs == RoleRecvStatus.MATCHED_SET_ROLE) {
if (r == RoleState.MASTER) {
+ this.role = r;
this.transitionToMasterSwitch();
} else if (r == RoleState.EQUAL ||
r == RoleState.SLAVE) {
this.transitionToEqualSwitch();
}
+ } else {
+ return;
+ //TODO: tell people that we failed.
}
}
@@ -285,12 +314,7 @@
return true;
}
- @Override
- public void reassertRole() {
- if (this.getRole() == RoleState.MASTER) {
- this.setRole(RoleState.MASTER);
- }
- }
+
@Override
public final void setAgent(OpenFlowAgent ag) {
diff --git a/openflow/ctl/src/main/java/org/onlab/onos/openflow/controller/impl/OFChannelHandler.java b/openflow/ctl/src/main/java/org/onlab/onos/openflow/controller/impl/OFChannelHandler.java
index 7850a58..75c139d 100644
--- a/openflow/ctl/src/main/java/org/onlab/onos/openflow/controller/impl/OFChannelHandler.java
+++ b/openflow/ctl/src/main/java/org/onlab/onos/openflow/controller/impl/OFChannelHandler.java
@@ -521,9 +521,7 @@
// if two controllers are master (even if its only for
// a brief period). We might need to see if these errors
// persist before we reassert
- log.warn("Received permission error from switch {} while" +
- "being master. Reasserting master role.",
- h.getSwitchInfoString());
+
h.sw.reassertRole();
} else if (m.getErrType() == OFErrorType.FLOW_MOD_FAILED &&
((OFFlowModFailedErrorMsg) m).getCode() ==
diff --git a/openflow/ctl/src/main/java/org/onlab/onos/openflow/controller/impl/RoleManager.java b/openflow/ctl/src/main/java/org/onlab/onos/openflow/controller/impl/RoleManager.java
index 7eb2445..b47bada 100644
--- a/openflow/ctl/src/main/java/org/onlab/onos/openflow/controller/impl/RoleManager.java
+++ b/openflow/ctl/src/main/java/org/onlab/onos/openflow/controller/impl/RoleManager.java
@@ -142,9 +142,9 @@
}
// OF1.0 switch with support for NX_ROLE_REQUEST vendor extn.
// make Role.EQUAL become Role.SLAVE
+ pendingRole = role;
role = (role == RoleState.EQUAL) ? RoleState.SLAVE : role;
pendingXid = sendNxRoleRequest(role);
- pendingRole = role;
requestPending = true;
} else {
// OF1.3 switch, use OFPT_ROLE_REQUEST message
diff --git a/tools/build/conf/pom.xml b/tools/build/conf/pom.xml
index 8607de3..c2ad09c 100644
--- a/tools/build/conf/pom.xml
+++ b/tools/build/conf/pom.xml
@@ -1,4 +1,7 @@
-<project>
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.onlab.tools</groupId>
<artifactId>onos-build-conf</artifactId>
diff --git a/tools/package/debian/onos.conf b/tools/package/debian/onos.conf
index 6d80502..842a937 100644
--- a/tools/package/debian/onos.conf
+++ b/tools/package/debian/onos.conf
@@ -15,7 +15,7 @@
pre-stop script
/opt/onos/bin/onos halt 2>/opt/onos/var/stderr.log
- sleep 3
+ sleep 2
end script
script
diff --git a/tools/test/bin/onos-config b/tools/test/bin/onos-config
index 9f1e3b0..4c4f7e1 100755
--- a/tools/test/bin/onos-config
+++ b/tools/test/bin/onos-config
@@ -8,7 +8,21 @@
remote=$ONOS_USER@${1:-$OCI}
+# Generate a cluster.json from the ON* environment variables
+CDEF_FILE=/tmp/cluster.json
+echo "{ \"nodes\":[" > $CDEF_FILE
+for node in $(env | sort | egrep "OC[2-9]+" | cut -d= -f2); do
+ echo " { \"id\": \"$node\", \"ip\": \"$node\", \"tcpPort\": 9876 }," >> $CDEF_FILE
+done
+echo " { \"id\": \"$OC1\", \"ip\": \"$OC1\", \"tcpPort\": 9876 }" >> $CDEF_FILE
+echo "]}" >> $CDEF_FILE
+
ssh $remote "
sudo perl -pi.bak -e \"s/ <interface>.*</ <interface>${ONOS_NIC:-192.168.56.*}</g\" \
$ONOS_INSTALL_DIR/$KARAF_DIST/etc/hazelcast.xml
-"
\ No newline at end of file
+
+ echo \"onos.ip = \$(ifconfig | grep $ONOS_NIC | cut -d: -f2 | cut -d\\ -f1)\" \
+ >> $ONOS_INSTALL_DIR/$KARAF_DIST/etc/system.properties
+"
+
+scp -q $CDEF_FILE $remote:$ONOS_INSTALL_DIR/config/
\ No newline at end of file
diff --git a/tools/test/bin/onos-install b/tools/test/bin/onos-install
index d594105..a87ff17 100755
--- a/tools/test/bin/onos-install
+++ b/tools/test/bin/onos-install
@@ -24,6 +24,7 @@
# Make a link to the log file directory and make a home for auxiliaries
ln -s $ONOS_INSTALL_DIR/$KARAF_DIST/data/log /opt/onos/log
mkdir $ONOS_INSTALL_DIR/var
+ mkdir $ONOS_INSTALL_DIR/config
# Install the upstart configuration file and setup options for debugging
sudo cp $ONOS_INSTALL_DIR/debian/onos.conf /etc/init/onos.conf
diff --git a/tools/test/bin/onos-start-network b/tools/test/bin/onos-start-network
new file mode 100755
index 0000000..c8245ab
--- /dev/null
+++ b/tools/test/bin/onos-start-network
@@ -0,0 +1,17 @@
+#!/bin/bash
+#-------------------------------------------------------------------------------
+# Verifies connectivity to each node in ONOS cell.
+#-------------------------------------------------------------------------------
+
+[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1
+. $ONOS_ROOT/tools/build/envDefaults
+
+SSHCMD="ssh -o PasswordAuthentication=no"
+SCPCMD="scp -q -o PasswordAuthentication=no"
+
+echo "Copying topology files to mininet vm."
+$SSHCMD -n $ONOS_USER@$OCN mkdir -p topos
+$SCPCMD $ONOS_ROOT/tools/test/topos/* $ONOS_USER@$OCN:topos/
+
+echo "Starting Network."
+$SSHCMD -t $ONOS_USER@$OCN sudo python topos/sol.py $(env | sort | egrep "OC[0-9]+" | cut -d= -f2)
diff --git a/tools/test/cells/local b/tools/test/cells/local
index b04a5e3..6b9fea5 100644
--- a/tools/test/cells/local
+++ b/tools/test/cells/local
@@ -1,6 +1,8 @@
# Default virtual box ONOS instances 1,2 & ONOS mininet box
. $ONOS_ROOT/tools/test/cells/.reset
+export ONOS_NIC=192.168.56.*
+
export OC1="192.168.56.101"
export OC2="192.168.56.102"
diff --git a/tools/test/topos/sol.py b/tools/test/topos/sol.py
index ef99a9c..68f2d40 100644
--- a/tools/test/topos/sol.py
+++ b/tools/test/topos/sol.py
@@ -1,4 +1,4 @@
#!/usr/bin/python
import sys, solar
-topo = solar.Solar(cip=sys.argv[1])
+topo = solar.Solar(cips=sys.argv[1:])
topo.run()
diff --git a/tools/test/topos/solar.py b/tools/test/topos/solar.py
index d8e83a1..ea11b66 100644
--- a/tools/test/topos/solar.py
+++ b/tools/test/topos/solar.py
@@ -17,22 +17,22 @@
class Solar(object):
""" Create a tiered topology from semi-scratch in Mininet """
- def __init__(self, cname='onos', cip='192.168.56.1', islands=3, edges=2, hosts=2,
- proto=None):
+ def __init__(self, cname='onos', cips=['192.168.56.1'], islands=3, edges=2, hosts=2):
"""Create tower topology for mininet"""
# We are creating the controller with local-loopback on purpose to avoid
# having the switches connect immediately. Instead, we'll set controller
# explicitly for each switch after configuring it as we want.
- self.flare = RemoteController(cname, cip, 6633)
- self.net = Mininet(controller=self.flare, switch = OVSKernelSwitch,
+ self.ctrls = [ RemoteController(cname, cip, 6633) for cip in cips ]
+ self.net = Mininet(controller=RemoteController, switch = OVSKernelSwitch,
build=False)
- self.cip = cip
+ self.cips = cips
self.spines = []
self.leaves = []
self.hosts = []
- self.proto = proto
+ for ctrl in self.ctrls:
+ self.net.addController(ctrl)
# Create the two core switches and links between them
c1 = self.net.addSwitch('c1',dpid='1111000000000000')
@@ -83,29 +83,11 @@
def run(self):
""" Runs the created network topology and launches mininet cli"""
- self.run_silent()
+ self.net.build()
+ self.net.start()
CustomCLI(self.net)
self.net.stop()
- def run_silent(self):
- """ Runs silently - for unit testing """
- self.net.build()
-
- # Start the switches, configure them with desired protocols and only
- # then set the controller
- for sw in self.spines:
- sw.start([self.flare])
- if self.proto:
- sw.cmd('ovs-vsctl set bridge %(sw)s protocols=%(proto)s' % \
- { 'sw': sw.name, 'proto': self.proto})
- sw.cmdPrint('ovs-vsctl set-controller %(sw)s tcp:%(ctl)s:6633' % \
- {'sw': sw.name, 'ctl': self.cip})
-
- for sw in self.leaves:
- sw.start([self.flare])
- sw.cmdPrint('ovs-vsctl set-controller %(sw)s tcp:%(ctl)s:6633' % \
- {'sw': sw.name, 'ctl': self.cip})
-
def pingAll(self):
""" PingAll to create flows - for unit testing """
self.net.pingAll()
diff --git a/utils/misc/src/main/java/org/onlab/util/KryoPool.java b/utils/misc/src/main/java/org/onlab/util/KryoPool.java
index 58c268c..be662a6 100644
--- a/utils/misc/src/main/java/org/onlab/util/KryoPool.java
+++ b/utils/misc/src/main/java/org/onlab/util/KryoPool.java
@@ -1,5 +1,6 @@
package org.onlab.util;
+import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ConcurrentLinkedQueue;
@@ -8,6 +9,8 @@
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.ByteBufferInput;
+import com.esotericsoftware.kryo.io.ByteBufferOutput;
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;
import com.google.common.collect.ImmutableList;
@@ -174,6 +177,22 @@
}
/**
+ * Serializes given object to byte buffer using Kryo instance in pool.
+ *
+ * @param obj Object to serialize
+ * @param buffer to write to
+ */
+ public void serialize(final Object obj, final ByteBuffer buffer) {
+ ByteBufferOutput out = new ByteBufferOutput(buffer);
+ Kryo kryo = getKryo();
+ try {
+ kryo.writeClassAndObject(out, obj);
+ } finally {
+ putKryo(kryo);
+ }
+ }
+
+ /**
* Deserializes given byte array to Object using Kryo instance in pool.
*
* @param bytes serialized bytes
@@ -192,6 +211,24 @@
}
}
+ /**
+ * Deserializes given byte buffer to Object using Kryo instance in pool.
+ *
+ * @param buffer input with serialized bytes
+ * @param <T> deserialized Object type
+ * @return deserialized Object
+ */
+ public <T> T deserialize(final ByteBuffer buffer) {
+ ByteBufferInput in = new ByteBufferInput(buffer);
+ Kryo kryo = getKryo();
+ try {
+ @SuppressWarnings("unchecked")
+ T obj = (T) kryo.readClassAndObject(in);
+ return obj;
+ } finally {
+ putKryo(kryo);
+ }
+ }
/**
* Creates a Kryo instance with {@link #registeredTypes} pre-registered.
diff --git a/utils/nio/src/main/java/org/onlab/nio/IOLoop.java b/utils/nio/src/main/java/org/onlab/nio/IOLoop.java
index 1309330..dc3ecaf 100644
--- a/utils/nio/src/main/java/org/onlab/nio/IOLoop.java
+++ b/utils/nio/src/main/java/org/onlab/nio/IOLoop.java
@@ -54,6 +54,15 @@
}
/**
+ * Returns the number of message stream in custody of the loop.
+ *
+ * @return number of message streams
+ */
+ public int streamCount() {
+ return streams.size();
+ }
+
+ /**
* Creates a new message stream backed by the specified socket channel.
*
* @param byteChannel backing byte channel
@@ -84,14 +93,9 @@
*
* @param key selection key holding the pending connect operation.
*/
- protected void connect(SelectionKey key) {
- try {
- SocketChannel ch = (SocketChannel) key.channel();
- ch.finishConnect();
- } catch (IOException | IllegalStateException e) {
- log.warn("Unable to complete connection", e);
- }
-
+ protected void connect(SelectionKey key) throws IOException {
+ SocketChannel ch = (SocketChannel) key.channel();
+ ch.finishConnect();
if (key.isValid()) {
key.interestOps(SelectionKey.OP_READ);
}
@@ -115,7 +119,11 @@
// If there is a pending connect operation, complete it.
if (key.isConnectable()) {
- connect(key);
+ try {
+ connect(key);
+ } catch (IOException | IllegalStateException e) {
+ log.warn("Unable to complete connection", e);
+ }
}
// If there is a read operation, slurp as much data as possible.
@@ -182,9 +190,10 @@
* with a pending accept operation.
*
* @param channel backing socket channel
+ * @return newly accepted message stream
*/
- public void acceptStream(SocketChannel channel) {
- createAndAdmit(channel, SelectionKey.OP_READ);
+ public S acceptStream(SocketChannel channel) {
+ return createAndAdmit(channel, SelectionKey.OP_READ);
}
@@ -193,9 +202,10 @@
* with a pending connect operation.
*
* @param channel backing socket channel
+ * @return newly connected message stream
*/
- public void connectStream(SocketChannel channel) {
- createAndAdmit(channel, SelectionKey.OP_CONNECT);
+ public S connectStream(SocketChannel channel) {
+ return createAndAdmit(channel, SelectionKey.OP_CONNECT);
}
/**
@@ -205,12 +215,14 @@
* @param channel socket channel
* @param op pending operations mask to be applied to the selection
* key as a set of initial interestedOps
+ * @return newly created message stream
*/
- private synchronized void createAndAdmit(SocketChannel channel, int op) {
+ private synchronized S createAndAdmit(SocketChannel channel, int op) {
S stream = createStream(channel);
streams.add(stream);
newStreamRequests.add(new NewStreamRequest(stream, channel, op));
selector.wakeup();
+ return stream;
}
/**
diff --git a/utils/nio/src/main/java/org/onlab/nio/MessageStream.java b/utils/nio/src/main/java/org/onlab/nio/MessageStream.java
index 89107bf..c38f0f5 100644
--- a/utils/nio/src/main/java/org/onlab/nio/MessageStream.java
+++ b/utils/nio/src/main/java/org/onlab/nio/MessageStream.java
@@ -10,6 +10,7 @@
import java.nio.channels.SelectionKey;
import java.util.ArrayList;
import java.util.List;
+import java.util.Objects;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
@@ -170,7 +171,7 @@
}
/**
- * Reads, withouth blocking, a list of messages from the stream.
+ * Reads, without blocking, a list of messages from the stream.
* The list will be empty if there were not messages pending.
*
* @return list of messages or null if backing channel has been closed
@@ -262,7 +263,7 @@
try {
channel.write(outbound);
} catch (IOException e) {
- if (!closed && !e.getMessage().equals("Broken pipe")) {
+ if (!closed && !Objects.equals(e.getMessage(), "Broken pipe")) {
log.warn("Unable to write data", e);
ioError = e;
}
diff --git a/utils/nio/src/main/java/org/onlab/nio/package-info.java b/utils/nio/src/main/java/org/onlab/nio/package-info.java
index d5ddd10..144236f 100644
--- a/utils/nio/src/main/java/org/onlab/nio/package-info.java
+++ b/utils/nio/src/main/java/org/onlab/nio/package-info.java
@@ -2,4 +2,4 @@
* Mechanism to transfer messages over network using IO loop and
* message stream, backed by NIO byte buffers.
*/
-package org.onlab.nio;
\ No newline at end of file
+package org.onlab.nio;
diff --git a/utils/nio/src/test/java/org/onlab/nio/IOLoopTestClient.java b/utils/nio/src/test/java/org/onlab/nio/IOLoopTestClient.java
index bdcc97a..bbeedd0 100644
--- a/utils/nio/src/test/java/org/onlab/nio/IOLoopTestClient.java
+++ b/utils/nio/src/test/java/org/onlab/nio/IOLoopTestClient.java
@@ -230,7 +230,7 @@
}
@Override
- protected void connect(SelectionKey key) {
+ protected void connect(SelectionKey key) throws IOException {
super.connect(key);
TestMessageStream b = (TestMessageStream) key.attachment();
Worker w = ((CustomIOLoop) b.loop()).worker;