remove Hazelcast based Device, Link, .. Store implementation bundle
Change-Id: I352ebaed2d51b51201a8f3abc609be7c793cc3be
diff --git a/core/store/hz/net/pom.xml b/core/store/hz/net/pom.xml
deleted file mode 100644
index 177e99e..0000000
--- a/core/store/hz/net/pom.xml
+++ /dev/null
@@ -1,55 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
- <modelVersion>4.0.0</modelVersion>
-
- <parent>
- <groupId>org.onlab.onos</groupId>
- <artifactId>onos-core-hz</artifactId>
- <version>1.0.0-SNAPSHOT</version>
- <relativePath>../pom.xml</relativePath>
- </parent>
-
- <artifactId>onos-core-hz-net</artifactId>
- <packaging>bundle</packaging>
-
- <description>ONOS Hazelcast based distributed store subsystems</description>
-
- <dependencies>
- <dependency>
- <groupId>org.onlab.onos</groupId>
- <artifactId>onos-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.onlab.onos</groupId>
- <artifactId>onos-core-hz-common</artifactId>
- <version>${project.version}</version>
- </dependency>
- <dependency>
- <groupId>org.onlab.onos</groupId>
- <artifactId>onos-core-hz-common</artifactId>
- <classifier>tests</classifier>
- <scope>test</scope>
- <version>${project.version}</version>
- </dependency>
- <dependency>
- <groupId>org.apache.felix</groupId>
- <artifactId>org.apache.felix.scr.annotations</artifactId>
- </dependency>
- <dependency>
- <groupId>com.hazelcast</groupId>
- <artifactId>hazelcast</artifactId>
- </dependency>
- </dependencies>
-
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-scr-plugin</artifactId>
- </plugin>
- </plugins>
- </build>
-
-</project>
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/device/impl/DistributedDeviceStore.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/device/impl/DistributedDeviceStore.java
deleted file mode 100644
index 0016939..0000000
--- a/core/store/hz/net/src/main/java/org/onlab/onos/store/device/impl/DistributedDeviceStore.java
+++ /dev/null
@@ -1,408 +0,0 @@
-package org.onlab.onos.store.device.impl;
-
-import static com.google.common.base.Predicates.notNull;
-
-import com.google.common.base.Optional;
-import com.google.common.cache.LoadingCache;
-import com.google.common.collect.FluentIterable;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.ImmutableSet.Builder;
-import com.hazelcast.core.IMap;
-import com.hazelcast.core.ISet;
-
-import org.apache.felix.scr.annotations.Activate;
-import org.apache.felix.scr.annotations.Component;
-import org.apache.felix.scr.annotations.Deactivate;
-import org.apache.felix.scr.annotations.Service;
-import org.onlab.onos.net.DefaultDevice;
-import org.onlab.onos.net.DefaultPort;
-import org.onlab.onos.net.Device;
-import org.onlab.onos.net.DeviceId;
-import org.onlab.onos.net.Port;
-import org.onlab.onos.net.PortNumber;
-import org.onlab.onos.net.device.DeviceDescription;
-import org.onlab.onos.net.device.DeviceEvent;
-import org.onlab.onos.net.device.DeviceStore;
-import org.onlab.onos.net.device.DeviceStoreDelegate;
-import org.onlab.onos.net.device.PortDescription;
-import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.common.AbsentInvalidatingLoadingCache;
-import org.onlab.onos.store.common.AbstractHazelcastStore;
-import org.onlab.onos.store.common.OptionalCacheLoader;
-import org.slf4j.Logger;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Set;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.cache.CacheBuilder.newBuilder;
-import static org.onlab.onos.net.device.DeviceEvent.Type.*;
-import static org.slf4j.LoggerFactory.getLogger;
-
-//TODO: Add support for multiple provider and annotations
-/**
- * Manages inventory of infrastructure devices using Hazelcast-backed map.
- */
-@Component(immediate = true)
-@Service
-public class DistributedDeviceStore
- extends AbstractHazelcastStore<DeviceEvent, DeviceStoreDelegate>
- implements DeviceStore {
-
- private final Logger log = getLogger(getClass());
-
- public static final String DEVICE_NOT_FOUND = "Device with ID %s not found";
-
- // private IMap<DeviceId, DefaultDevice> cache;
- private IMap<byte[], byte[]> rawDevices;
- private LoadingCache<DeviceId, Optional<DefaultDevice>> devices;
-
- // private ISet<DeviceId> availableDevices;
- private ISet<byte[]> availableDevices;
-
- // TODO DevicePorts is very inefficient consider restructuring.
- // private IMap<DeviceId, Map<PortNumber, Port>> devicePorts;
- private IMap<byte[], byte[]> rawDevicePorts;
- private LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> devicePorts;
-
- private String devicesListener;
-
- private String portsListener;
-
- @Override
- @Activate
- public void activate() {
- super.activate();
-
- // IMap event handler needs value
- final boolean includeValue = true;
-
- // TODO decide on Map name scheme to avoid collision
- rawDevices = theInstance.getMap("devices");
- final OptionalCacheLoader<DeviceId, DefaultDevice> deviceLoader
- = new OptionalCacheLoader<>(serializer, rawDevices);
- devices = new AbsentInvalidatingLoadingCache<>(newBuilder().build(deviceLoader));
- // refresh/populate cache based on notification from other instance
- devicesListener = rawDevices.addEntryListener(new RemoteDeviceEventHandler(devices), includeValue);
-
- // TODO cache availableDevices
- availableDevices = theInstance.getSet("availableDevices");
-
- rawDevicePorts = theInstance.getMap("devicePorts");
- final OptionalCacheLoader<DeviceId, Map<PortNumber, Port>> devicePortLoader
- = new OptionalCacheLoader<>(serializer, rawDevicePorts);
- devicePorts = new AbsentInvalidatingLoadingCache<>(newBuilder().build(devicePortLoader));
- // refresh/populate cache based on notification from other instance
- portsListener = rawDevicePorts.addEntryListener(new RemotePortEventHandler(devicePorts), includeValue);
-
- loadDeviceCache();
- loadDevicePortsCache();
-
- log.info("Started");
- }
-
- @Deactivate
- public void deactivate() {
- rawDevicePorts.removeEntryListener(portsListener);
- rawDevices.removeEntryListener(devicesListener);
- log.info("Stopped");
- }
-
- @Override
- public int getDeviceCount() {
- return devices.asMap().size();
- }
-
- @Override
- public Iterable<Device> getDevices() {
- // TODO builder v.s. copyOf. Guava semms to be using copyOf?
- Builder<Device> builder = ImmutableSet.builder();
- for (Optional<DefaultDevice> e : devices.asMap().values()) {
- if (e.isPresent()) {
- builder.add(e.get());
- }
- }
- return builder.build();
- }
-
- private void loadDeviceCache() {
- for (byte[] keyBytes : rawDevices.keySet()) {
- final DeviceId id = deserialize(keyBytes);
- devices.refresh(id);
- }
- }
-
- private void loadDevicePortsCache() {
- for (byte[] keyBytes : rawDevicePorts.keySet()) {
- final DeviceId id = deserialize(keyBytes);
- devicePorts.refresh(id);
- }
- }
-
- @Override
- public Device getDevice(DeviceId deviceId) {
- // TODO revisit if ignoring exception is safe.
- return devices.getUnchecked(deviceId).orNull();
- }
-
- @Override
- public DeviceEvent createOrUpdateDevice(ProviderId providerId, DeviceId deviceId,
- DeviceDescription deviceDescription) {
- DefaultDevice device = devices.getUnchecked(deviceId).orNull();
- if (device == null) {
- return createDevice(providerId, deviceId, deviceDescription);
- }
- return updateDevice(providerId, device, deviceDescription);
- }
-
- // Creates the device and returns the appropriate event if necessary.
- private DeviceEvent createDevice(ProviderId providerId, DeviceId deviceId,
- DeviceDescription desc) {
- DefaultDevice device = new DefaultDevice(providerId, deviceId, desc.type(),
- desc.manufacturer(),
- desc.hwVersion(), desc.swVersion(),
- desc.serialNumber());
-
- synchronized (this) {
- final byte[] deviceIdBytes = serialize(deviceId);
- rawDevices.put(deviceIdBytes, serialize(device));
- devices.put(deviceId, Optional.of(device));
-
- availableDevices.add(deviceIdBytes);
- }
- return new DeviceEvent(DEVICE_ADDED, device, null);
- }
-
- // Updates the device and returns the appropriate event if necessary.
- private DeviceEvent updateDevice(ProviderId providerId, DefaultDevice device,
- DeviceDescription desc) {
- // We allow only certain attributes to trigger update
- if (!Objects.equals(device.hwVersion(), desc.hwVersion()) ||
- !Objects.equals(device.swVersion(), desc.swVersion())) {
-
- DefaultDevice updated = new DefaultDevice(providerId, device.id(),
- desc.type(),
- desc.manufacturer(),
- desc.hwVersion(),
- desc.swVersion(),
- desc.serialNumber());
- synchronized (this) {
- final byte[] deviceIdBytes = serialize(device.id());
- rawDevices.put(deviceIdBytes, serialize(updated));
- devices.put(device.id(), Optional.of(updated));
- availableDevices.add(serialize(device.id()));
- }
- return new DeviceEvent(DeviceEvent.Type.DEVICE_UPDATED, updated, null);
- }
-
- // Otherwise merely attempt to change availability
- synchronized (this) {
- boolean added = availableDevices.add(serialize(device.id()));
- return !added ? null :
- new DeviceEvent(DEVICE_AVAILABILITY_CHANGED, device, null);
- }
- }
-
- @Override
- public DeviceEvent markOffline(DeviceId deviceId) {
- synchronized (this) {
- Device device = devices.getUnchecked(deviceId).orNull();
- boolean removed = device != null && availableDevices.remove(serialize(deviceId));
- return !removed ? null :
- new DeviceEvent(DEVICE_AVAILABILITY_CHANGED, device, null);
- }
- }
-
- @Override
- public List<DeviceEvent> updatePorts(ProviderId providerId, DeviceId deviceId,
- List<PortDescription> portDescriptions) {
- List<DeviceEvent> events = new ArrayList<>();
- synchronized (this) {
- Device device = devices.getUnchecked(deviceId).orNull();
- checkArgument(device != null, DEVICE_NOT_FOUND, deviceId);
- Map<PortNumber, Port> ports = getPortMap(deviceId);
-
- // Add new ports
- Set<PortNumber> processed = new HashSet<>();
- for (PortDescription portDescription : portDescriptions) {
- Port port = ports.get(portDescription.portNumber());
- events.add(port == null ?
- createPort(device, portDescription, ports) :
- updatePort(device, port, portDescription, ports));
- processed.add(portDescription.portNumber());
- }
-
- updatePortMap(deviceId, ports);
-
- events.addAll(pruneOldPorts(device, ports, processed));
- }
- return FluentIterable.from(events).filter(notNull()).toList();
- }
-
- // Creates a new port based on the port description adds it to the map and
- // Returns corresponding event.
- //@GuardedBy("this")
- private DeviceEvent createPort(Device device, PortDescription portDescription,
- Map<PortNumber, Port> ports) {
- DefaultPort port = new DefaultPort(device, portDescription.portNumber(),
- portDescription.isEnabled());
- ports.put(port.number(), port);
- updatePortMap(device.id(), ports);
- return new DeviceEvent(PORT_ADDED, device, port);
- }
-
- // Checks if the specified port requires update and if so, it replaces the
- // existing entry in the map and returns corresponding event.
- //@GuardedBy("this")
- private DeviceEvent updatePort(Device device, Port port,
- PortDescription portDescription,
- Map<PortNumber, Port> ports) {
- if (port.isEnabled() != portDescription.isEnabled()) {
- DefaultPort updatedPort =
- new DefaultPort(device, portDescription.portNumber(),
- portDescription.isEnabled());
- ports.put(port.number(), updatedPort);
- updatePortMap(device.id(), ports);
- return new DeviceEvent(PORT_UPDATED, device, updatedPort);
- }
- return null;
- }
-
- // Prunes the specified list of ports based on which ports are in the
- // processed list and returns list of corresponding events.
- //@GuardedBy("this")
- private List<DeviceEvent> pruneOldPorts(Device device,
- Map<PortNumber, Port> ports,
- Set<PortNumber> processed) {
- List<DeviceEvent> events = new ArrayList<>();
- Iterator<PortNumber> iterator = ports.keySet().iterator();
- while (iterator.hasNext()) {
- PortNumber portNumber = iterator.next();
- if (!processed.contains(portNumber)) {
- events.add(new DeviceEvent(PORT_REMOVED, device,
- ports.get(portNumber)));
- iterator.remove();
- }
- }
- if (!events.isEmpty()) {
- updatePortMap(device.id(), ports);
- }
- return events;
- }
-
- // Gets the map of ports for the specified device; if one does not already
- // exist, it creates and registers a new one.
- // WARN: returned value is a copy, changes made to the Map
- // needs to be written back using updatePortMap
- //@GuardedBy("this")
- private Map<PortNumber, Port> getPortMap(DeviceId deviceId) {
- Map<PortNumber, Port> ports = devicePorts.getUnchecked(deviceId).orNull();
- if (ports == null) {
- ports = new HashMap<>();
- // this probably is waste of time in most cases.
- updatePortMap(deviceId, ports);
- }
- return ports;
- }
-
- //@GuardedBy("this")
- private void updatePortMap(DeviceId deviceId, Map<PortNumber, Port> ports) {
- rawDevicePorts.put(serialize(deviceId), serialize(ports));
- devicePorts.put(deviceId, Optional.of(ports));
- }
-
- @Override
- public DeviceEvent updatePortStatus(ProviderId providerId, DeviceId deviceId,
- PortDescription portDescription) {
- synchronized (this) {
- Device device = devices.getUnchecked(deviceId).orNull();
- checkArgument(device != null, DEVICE_NOT_FOUND, deviceId);
- Map<PortNumber, Port> ports = getPortMap(deviceId);
- Port port = ports.get(portDescription.portNumber());
- return updatePort(device, port, portDescription, ports);
- }
- }
-
- @Override
- public List<Port> getPorts(DeviceId deviceId) {
- Map<PortNumber, Port> ports = devicePorts.getUnchecked(deviceId).orNull();
- return ports == null ? Collections.<Port>emptyList() : ImmutableList.copyOf(ports.values());
- }
-
- @Override
- public Port getPort(DeviceId deviceId, PortNumber portNumber) {
- Map<PortNumber, Port> ports = devicePorts.getUnchecked(deviceId).orNull();
- return ports == null ? null : ports.get(portNumber);
- }
-
- @Override
- public boolean isAvailable(DeviceId deviceId) {
- return availableDevices.contains(serialize(deviceId));
- }
-
- @Override
- public DeviceEvent removeDevice(DeviceId deviceId) {
- synchronized (this) {
- byte[] deviceIdBytes = serialize(deviceId);
-
- // TODO conditional remove?
- Device device = deserialize(rawDevices.remove(deviceIdBytes));
- devices.invalidate(deviceId);
- return device == null ? null :
- new DeviceEvent(DEVICE_REMOVED, device, null);
- }
- }
-
- private class RemoteDeviceEventHandler extends RemoteCacheEventHandler<DeviceId, DefaultDevice> {
- public RemoteDeviceEventHandler(LoadingCache<DeviceId, Optional<DefaultDevice>> cache) {
- super(cache);
- }
-
- @Override
- protected void onAdd(DeviceId deviceId, DefaultDevice device) {
- notifyDelegate(new DeviceEvent(DEVICE_ADDED, device));
- }
-
- @Override
- protected void onRemove(DeviceId deviceId, DefaultDevice device) {
- notifyDelegate(new DeviceEvent(DEVICE_REMOVED, device));
- }
-
- @Override
- protected void onUpdate(DeviceId deviceId, DefaultDevice oldDevice, DefaultDevice device) {
- notifyDelegate(new DeviceEvent(DEVICE_UPDATED, device));
- }
- }
-
- private class RemotePortEventHandler extends RemoteCacheEventHandler<DeviceId, Map<PortNumber, Port>> {
- public RemotePortEventHandler(LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> cache) {
- super(cache);
- }
-
- @Override
- protected void onAdd(DeviceId deviceId, Map<PortNumber, Port> ports) {
-// notifyDelegate(new DeviceEvent(PORT_ADDED, getDevice(deviceId)));
- }
-
- @Override
- protected void onRemove(DeviceId deviceId, Map<PortNumber, Port> ports) {
-// notifyDelegate(new DeviceEvent(PORT_REMOVED, getDevice(deviceId)));
- }
-
- @Override
- protected void onUpdate(DeviceId deviceId, Map<PortNumber, Port> oldPorts, Map<PortNumber, Port> ports) {
-// notifyDelegate(new DeviceEvent(PORT_UPDATED, getDevice(deviceId)));
- }
- }
-
-
- // TODO cache serialized DeviceID if we suffer from serialization cost
-}
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/device/impl/NoOpClockProviderService.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/device/impl/NoOpClockProviderService.java
deleted file mode 100644
index 4626fa4..0000000
--- a/core/store/hz/net/src/main/java/org/onlab/onos/store/device/impl/NoOpClockProviderService.java
+++ /dev/null
@@ -1,20 +0,0 @@
-package org.onlab.onos.store.device.impl;
-
-import org.apache.felix.scr.annotations.Component;
-import org.apache.felix.scr.annotations.Service;
-import org.onlab.onos.mastership.MastershipTerm;
-import org.onlab.onos.net.DeviceId;
-import org.onlab.onos.net.device.DeviceClockProviderService;
-
-// FIXME: Code clone in onos-core-trivial, onos-core-hz-net
-/**
- * Dummy implementation of {@link DeviceClockProviderService}.
- */
-@Component(immediate = true)
-@Service
-public class NoOpClockProviderService implements DeviceClockProviderService {
-
- @Override
- public void setMastershipTerm(DeviceId deviceId, MastershipTerm term) {
- }
-}
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/flow/impl/DistributedFlowRuleStore.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/flow/impl/DistributedFlowRuleStore.java
deleted file mode 100644
index 084435f..0000000
--- a/core/store/hz/net/src/main/java/org/onlab/onos/store/flow/impl/DistributedFlowRuleStore.java
+++ /dev/null
@@ -1,142 +0,0 @@
-package org.onlab.onos.store.flow.impl;
-
-import static org.onlab.onos.net.flow.FlowRuleEvent.Type.RULE_REMOVED;
-import static org.slf4j.LoggerFactory.getLogger;
-
-import java.util.Collection;
-import java.util.Collections;
-
-import org.apache.felix.scr.annotations.Activate;
-import org.apache.felix.scr.annotations.Component;
-import org.apache.felix.scr.annotations.Deactivate;
-import org.apache.felix.scr.annotations.Service;
-import org.onlab.onos.ApplicationId;
-import org.onlab.onos.net.DeviceId;
-import org.onlab.onos.net.flow.DefaultFlowEntry;
-import org.onlab.onos.net.flow.FlowEntry;
-import org.onlab.onos.net.flow.FlowEntry.FlowEntryState;
-import org.onlab.onos.net.flow.FlowRule;
-import org.onlab.onos.net.flow.FlowRuleEvent;
-import org.onlab.onos.net.flow.FlowRuleEvent.Type;
-import org.onlab.onos.net.flow.FlowRuleStore;
-import org.onlab.onos.net.flow.FlowRuleStoreDelegate;
-import org.onlab.onos.store.AbstractStore;
-import org.slf4j.Logger;
-
-import com.google.common.collect.ArrayListMultimap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Multimap;
-
-/**
- * Manages inventory of flow rules using trivial in-memory implementation.
- */
-//FIXME I LIE. I AIN'T DISTRIBUTED
-@Component(immediate = true)
-@Service
-public class DistributedFlowRuleStore
- extends AbstractStore<FlowRuleEvent, FlowRuleStoreDelegate>
- implements FlowRuleStore {
-
- private final Logger log = getLogger(getClass());
-
- // store entries as a pile of rules, no info about device tables
- private final Multimap<DeviceId, FlowEntry> flowEntries =
- ArrayListMultimap.<DeviceId, FlowEntry>create();
-
- private final Multimap<Short, FlowRule> flowEntriesById =
- ArrayListMultimap.<Short, FlowRule>create();
-
- @Activate
- public void activate() {
- log.info("Started");
- }
-
- @Deactivate
- public void deactivate() {
- log.info("Stopped");
- }
-
-
- @Override
- public int getFlowRuleCount() {
- return flowEntries.size();
- }
-
- @Override
- public synchronized FlowEntry getFlowEntry(FlowRule rule) {
- for (FlowEntry f : flowEntries.get(rule.deviceId())) {
- if (f.equals(rule)) {
- return f;
- }
- }
- return null;
- }
-
- @Override
- public synchronized Iterable<FlowEntry> getFlowEntries(DeviceId deviceId) {
- Collection<FlowEntry> rules = flowEntries.get(deviceId);
- if (rules == null) {
- return Collections.emptyList();
- }
- return ImmutableSet.copyOf(rules);
- }
-
- @Override
- public synchronized Iterable<FlowRule> getFlowRulesByAppId(ApplicationId appId) {
- Collection<FlowRule> rules = flowEntriesById.get(appId.id());
- if (rules == null) {
- return Collections.emptyList();
- }
- return ImmutableSet.copyOf(rules);
- }
-
- @Override
- public synchronized void storeFlowRule(FlowRule rule) {
- FlowEntry f = new DefaultFlowEntry(rule);
- DeviceId did = f.deviceId();
- if (!flowEntries.containsEntry(did, f)) {
- flowEntries.put(did, f);
- flowEntriesById.put(rule.appId(), f);
- }
- }
-
- @Override
- public synchronized void deleteFlowRule(FlowRule rule) {
- FlowEntry entry = getFlowEntry(rule);
- if (entry == null) {
- return;
- }
- entry.setState(FlowEntryState.PENDING_REMOVE);
- }
-
- @Override
- public synchronized FlowRuleEvent addOrUpdateFlowRule(FlowEntry rule) {
- DeviceId did = rule.deviceId();
-
- // check if this new rule is an update to an existing entry
- FlowEntry stored = getFlowEntry(rule);
- if (stored != null) {
- stored.setBytes(rule.bytes());
- stored.setLife(rule.life());
- stored.setPackets(rule.packets());
- if (stored.state() == FlowEntryState.PENDING_ADD) {
- stored.setState(FlowEntryState.ADDED);
- return new FlowRuleEvent(Type.RULE_ADDED, rule);
- }
- return new FlowRuleEvent(Type.RULE_UPDATED, rule);
- }
-
- flowEntries.put(did, rule);
- return null;
- }
-
- @Override
- public synchronized FlowRuleEvent removeFlowRule(FlowEntry rule) {
- // This is where one could mark a rule as removed and still keep it in the store.
- if (flowEntries.remove(rule.deviceId(), rule)) {
- return new FlowRuleEvent(RULE_REMOVED, rule);
- } else {
- return null;
- }
- }
-}
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/host/impl/DistributedHostStore.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/host/impl/DistributedHostStore.java
deleted file mode 100644
index bd4b3bf..0000000
--- a/core/store/hz/net/src/main/java/org/onlab/onos/store/host/impl/DistributedHostStore.java
+++ /dev/null
@@ -1,302 +0,0 @@
-package org.onlab.onos.store.host.impl;
-
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Multimap;
-import com.google.common.collect.Sets;
-import org.apache.felix.scr.annotations.Activate;
-import org.apache.felix.scr.annotations.Component;
-import org.apache.felix.scr.annotations.Deactivate;
-import org.apache.felix.scr.annotations.Service;
-import org.onlab.onos.net.Annotations;
-import org.onlab.onos.net.ConnectPoint;
-import org.onlab.onos.net.DefaultHost;
-import org.onlab.onos.net.DeviceId;
-import org.onlab.onos.net.Host;
-import org.onlab.onos.net.HostId;
-import org.onlab.onos.net.HostLocation;
-import org.onlab.onos.net.host.HostDescription;
-import org.onlab.onos.net.host.HostEvent;
-import org.onlab.onos.net.host.HostStore;
-import org.onlab.onos.net.host.HostStoreDelegate;
-import org.onlab.onos.net.host.PortAddresses;
-import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.AbstractStore;
-import org.onlab.packet.IpPrefix;
-import org.onlab.packet.MacAddress;
-import org.onlab.packet.VlanId;
-import org.slf4j.Logger;
-
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-
-import static org.onlab.onos.net.host.HostEvent.Type.*;
-import static org.slf4j.LoggerFactory.getLogger;
-
-/**
- * TEMPORARY: Manages inventory of end-station hosts using distributed
- * structures implementation.
- */
-//FIXME: I LIE I AM NOT DISTRIBUTED
-@Component(immediate = true)
-@Service
-public class DistributedHostStore
- extends AbstractStore<HostEvent, HostStoreDelegate>
- implements HostStore {
-
- private final Logger log = getLogger(getClass());
-
- // Host inventory
- private final Map<HostId, StoredHost> hosts = new ConcurrentHashMap<>(2000000, 0.75f, 16);
-
- // Hosts tracked by their location
- private final Multimap<ConnectPoint, Host> locations = HashMultimap.create();
-
- private final Map<ConnectPoint, PortAddresses> portAddresses =
- new ConcurrentHashMap<>();
-
- @Activate
- public void activate() {
- log.info("Started");
- }
-
- @Deactivate
- public void deactivate() {
- log.info("Stopped");
- }
-
- @Override
- public HostEvent createOrUpdateHost(ProviderId providerId, HostId hostId,
- HostDescription hostDescription) {
- StoredHost host = hosts.get(hostId);
- if (host == null) {
- return createHost(providerId, hostId, hostDescription);
- }
- return updateHost(providerId, host, hostDescription);
- }
-
- // creates a new host and sends HOST_ADDED
- private HostEvent createHost(ProviderId providerId, HostId hostId,
- HostDescription descr) {
- StoredHost newhost = new StoredHost(providerId, hostId,
- descr.hwAddress(),
- descr.vlan(),
- descr.location(),
- ImmutableSet.copyOf(descr.ipAddress()));
- synchronized (this) {
- hosts.put(hostId, newhost);
- locations.put(descr.location(), newhost);
- }
- return new HostEvent(HOST_ADDED, newhost);
- }
-
- // checks for type of update to host, sends appropriate event
- private HostEvent updateHost(ProviderId providerId, StoredHost host,
- HostDescription descr) {
- HostEvent event;
- if (!host.location().equals(descr.location())) {
- host.setLocation(descr.location());
- return new HostEvent(HOST_MOVED, host);
- }
-
- if (host.ipAddresses().containsAll(descr.ipAddress())) {
- return null;
- }
-
- Set<IpPrefix> addresses = new HashSet<>(host.ipAddresses());
- addresses.addAll(descr.ipAddress());
- StoredHost updated = new StoredHost(providerId, host.id(),
- host.mac(), host.vlan(),
- descr.location(), addresses);
- event = new HostEvent(HOST_UPDATED, updated);
- synchronized (this) {
- hosts.put(host.id(), updated);
- locations.remove(host.location(), host);
- locations.put(updated.location(), updated);
- }
- return event;
- }
-
- @Override
- public HostEvent removeHost(HostId hostId) {
- synchronized (this) {
- Host host = hosts.remove(hostId);
- if (host != null) {
- locations.remove((host.location()), host);
- return new HostEvent(HOST_REMOVED, host);
- }
- return null;
- }
- }
-
- @Override
- public int getHostCount() {
- return hosts.size();
- }
-
- @Override
- public Iterable<Host> getHosts() {
- return ImmutableSet.<Host>copyOf(hosts.values());
- }
-
- @Override
- public Host getHost(HostId hostId) {
- return hosts.get(hostId);
- }
-
- @Override
- public Set<Host> getHosts(VlanId vlanId) {
- Set<Host> vlanset = new HashSet<>();
- for (Host h : hosts.values()) {
- if (h.vlan().equals(vlanId)) {
- vlanset.add(h);
- }
- }
- return vlanset;
- }
-
- @Override
- public Set<Host> getHosts(MacAddress mac) {
- Set<Host> macset = new HashSet<>();
- for (Host h : hosts.values()) {
- if (h.mac().equals(mac)) {
- macset.add(h);
- }
- }
- return macset;
- }
-
- @Override
- public Set<Host> getHosts(IpPrefix ip) {
- Set<Host> ipset = new HashSet<>();
- for (Host h : hosts.values()) {
- if (h.ipAddresses().contains(ip)) {
- ipset.add(h);
- }
- }
- return ipset;
- }
-
- @Override
- public Set<Host> getConnectedHosts(ConnectPoint connectPoint) {
- return ImmutableSet.copyOf(locations.get(connectPoint));
- }
-
- @Override
- public Set<Host> getConnectedHosts(DeviceId deviceId) {
- Set<Host> hostset = new HashSet<>();
- for (ConnectPoint p : locations.keySet()) {
- if (p.deviceId().equals(deviceId)) {
- hostset.addAll(locations.get(p));
- }
- }
- return hostset;
- }
-
- @Override
- public void updateAddressBindings(PortAddresses addresses) {
- synchronized (portAddresses) {
- PortAddresses existing = portAddresses.get(addresses.connectPoint());
- if (existing == null) {
- portAddresses.put(addresses.connectPoint(), addresses);
- } else {
- Set<IpPrefix> union = Sets.union(existing.ips(), addresses.ips())
- .immutableCopy();
-
- MacAddress newMac = (addresses.mac() == null) ? existing.mac()
- : addresses.mac();
-
- PortAddresses newAddresses =
- new PortAddresses(addresses.connectPoint(), union, newMac);
-
- portAddresses.put(newAddresses.connectPoint(), newAddresses);
- }
- }
- }
-
- @Override
- public void removeAddressBindings(PortAddresses addresses) {
- synchronized (portAddresses) {
- PortAddresses existing = portAddresses.get(addresses.connectPoint());
- if (existing != null) {
- Set<IpPrefix> difference =
- Sets.difference(existing.ips(), addresses.ips()).immutableCopy();
-
- // If they removed the existing mac, set the new mac to null.
- // Otherwise, keep the existing mac.
- MacAddress newMac = existing.mac();
- if (addresses.mac() != null && addresses.mac().equals(existing.mac())) {
- newMac = null;
- }
-
- PortAddresses newAddresses =
- new PortAddresses(addresses.connectPoint(), difference, newMac);
-
- portAddresses.put(newAddresses.connectPoint(), newAddresses);
- }
- }
- }
-
- @Override
- public void clearAddressBindings(ConnectPoint connectPoint) {
- synchronized (portAddresses) {
- portAddresses.remove(connectPoint);
- }
- }
-
- @Override
- public Set<PortAddresses> getAddressBindings() {
- synchronized (portAddresses) {
- return new HashSet<>(portAddresses.values());
- }
- }
-
- @Override
- public PortAddresses getAddressBindingsForPort(ConnectPoint connectPoint) {
- PortAddresses addresses;
-
- synchronized (portAddresses) {
- addresses = portAddresses.get(connectPoint);
- }
-
- if (addresses == null) {
- addresses = new PortAddresses(connectPoint, null, null);
- }
-
- return addresses;
- }
-
- // Auxiliary extension to allow location to mutate.
- private class StoredHost extends DefaultHost {
- private HostLocation location;
-
- /**
- * Creates an end-station host using the supplied information.
- *
- * @param providerId provider identity
- * @param id host identifier
- * @param mac host MAC address
- * @param vlan host VLAN identifier
- * @param location host location
- * @param ips host IP addresses
- * @param annotations optional key/value annotations
- */
- public StoredHost(ProviderId providerId, HostId id,
- MacAddress mac, VlanId vlan, HostLocation location,
- Set<IpPrefix> ips, Annotations... annotations) {
- super(providerId, id, mac, vlan, location, ips, annotations);
- this.location = location;
- }
-
- void setLocation(HostLocation location) {
- this.location = location;
- }
-
- @Override
- public HostLocation location() {
- return location;
- }
- }
-}
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/link/impl/DistributedLinkStore.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/link/impl/DistributedLinkStore.java
deleted file mode 100644
index 90ae6fe..0000000
--- a/core/store/hz/net/src/main/java/org/onlab/onos/store/link/impl/DistributedLinkStore.java
+++ /dev/null
@@ -1,263 +0,0 @@
-package org.onlab.onos.store.link.impl;
-
-import static com.google.common.cache.CacheBuilder.newBuilder;
-import static org.onlab.onos.net.Link.Type.DIRECT;
-import static org.onlab.onos.net.Link.Type.INDIRECT;
-import static org.onlab.onos.net.LinkKey.linkKey;
-import static org.onlab.onos.net.link.LinkEvent.Type.LINK_ADDED;
-import static org.onlab.onos.net.link.LinkEvent.Type.LINK_REMOVED;
-import static org.onlab.onos.net.link.LinkEvent.Type.LINK_UPDATED;
-import static org.slf4j.LoggerFactory.getLogger;
-
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.felix.scr.annotations.Activate;
-import org.apache.felix.scr.annotations.Component;
-import org.apache.felix.scr.annotations.Deactivate;
-import org.apache.felix.scr.annotations.Service;
-import org.onlab.onos.net.ConnectPoint;
-import org.onlab.onos.net.DefaultLink;
-import org.onlab.onos.net.DeviceId;
-import org.onlab.onos.net.Link;
-import org.onlab.onos.net.LinkKey;
-import org.onlab.onos.net.link.LinkDescription;
-import org.onlab.onos.net.link.LinkEvent;
-import org.onlab.onos.net.link.LinkStore;
-import org.onlab.onos.net.link.LinkStoreDelegate;
-import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.common.AbsentInvalidatingLoadingCache;
-import org.onlab.onos.store.common.AbstractHazelcastStore;
-import org.onlab.onos.store.common.OptionalCacheLoader;
-import org.slf4j.Logger;
-
-import com.google.common.base.Optional;
-import com.google.common.cache.LoadingCache;
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Multimap;
-import com.google.common.collect.ImmutableSet.Builder;
-import com.hazelcast.core.IMap;
-
-//TODO: Add support for multiple provider and annotations
-/**
- * Manages inventory of infrastructure links using Hazelcast-backed map.
- */
-@Component(immediate = true)
-@Service
-public class DistributedLinkStore
- extends AbstractHazelcastStore<LinkEvent, LinkStoreDelegate>
- implements LinkStore {
-
- private final Logger log = getLogger(getClass());
-
- // Link inventory
- private IMap<byte[], byte[]> rawLinks;
- private LoadingCache<LinkKey, Optional<DefaultLink>> links;
-
- // TODO synchronize?
- // Egress and ingress link sets
- private final Multimap<DeviceId, Link> srcLinks = HashMultimap.create();
- private final Multimap<DeviceId, Link> dstLinks = HashMultimap.create();
-
- private String linksListener;
-
- @Override
- @Activate
- public void activate() {
- super.activate();
-
- boolean includeValue = true;
-
- // TODO decide on Map name scheme to avoid collision
- rawLinks = theInstance.getMap("links");
- final OptionalCacheLoader<LinkKey, DefaultLink> linkLoader
- = new OptionalCacheLoader<>(serializer, rawLinks);
- links = new AbsentInvalidatingLoadingCache<>(newBuilder().build(linkLoader));
- // refresh/populate cache based on notification from other instance
- linksListener = rawLinks.addEntryListener(new RemoteLinkEventHandler(links), includeValue);
-
- loadLinkCache();
-
- log.info("Started");
- }
-
- @Deactivate
- public void deactivate() {
- rawLinks.removeEntryListener(linksListener);
- log.info("Stopped");
- }
-
- private void loadLinkCache() {
- for (byte[] keyBytes : rawLinks.keySet()) {
- final LinkKey id = deserialize(keyBytes);
- links.refresh(id);
- }
- }
-
- @Override
- public int getLinkCount() {
- return links.asMap().size();
- }
-
- @Override
- public Iterable<Link> getLinks() {
- Builder<Link> builder = ImmutableSet.builder();
- for (Optional<DefaultLink> e : links.asMap().values()) {
- if (e.isPresent()) {
- builder.add(e.get());
- }
- }
- return builder.build();
- }
-
- @Override
- public Set<Link> getDeviceEgressLinks(DeviceId deviceId) {
- return ImmutableSet.copyOf(srcLinks.get(deviceId));
- }
-
- @Override
- public Set<Link> getDeviceIngressLinks(DeviceId deviceId) {
- return ImmutableSet.copyOf(dstLinks.get(deviceId));
- }
-
- @Override
- public Link getLink(ConnectPoint src, ConnectPoint dst) {
- return links.getUnchecked(linkKey(src, dst)).orNull();
- }
-
- @Override
- public Set<Link> getEgressLinks(ConnectPoint src) {
- Set<Link> egress = new HashSet<>();
- for (Link link : srcLinks.get(src.deviceId())) {
- if (link.src().equals(src)) {
- egress.add(link);
- }
- }
- return egress;
- }
-
- @Override
- public Set<Link> getIngressLinks(ConnectPoint dst) {
- Set<Link> ingress = new HashSet<>();
- for (Link link : dstLinks.get(dst.deviceId())) {
- if (link.dst().equals(dst)) {
- ingress.add(link);
- }
- }
- return ingress;
- }
-
- @Override
- public LinkEvent createOrUpdateLink(ProviderId providerId,
- LinkDescription linkDescription) {
- LinkKey key = linkKey(linkDescription.src(), linkDescription.dst());
- Optional<DefaultLink> link = links.getUnchecked(key);
- if (!link.isPresent()) {
- return createLink(providerId, key, linkDescription);
- }
- return updateLink(providerId, link.get(), key, linkDescription);
- }
-
- // Creates and stores the link and returns the appropriate event.
- private LinkEvent createLink(ProviderId providerId, LinkKey key,
- LinkDescription linkDescription) {
- DefaultLink link = new DefaultLink(providerId, key.src(), key.dst(),
- linkDescription.type());
- synchronized (this) {
- final byte[] keyBytes = serialize(key);
- rawLinks.put(keyBytes, serialize(link));
- links.asMap().putIfAbsent(key, Optional.of(link));
-
- addNewLink(link);
- }
- return new LinkEvent(LINK_ADDED, link);
- }
-
- // update Egress and ingress link sets
- private void addNewLink(DefaultLink link) {
- synchronized (this) {
- srcLinks.put(link.src().deviceId(), link);
- dstLinks.put(link.dst().deviceId(), link);
- }
- }
-
- // Updates, if necessary the specified link and returns the appropriate event.
- private LinkEvent updateLink(ProviderId providerId, DefaultLink link,
- LinkKey key, LinkDescription linkDescription) {
- // FIXME confirm Link update condition is OK
- if (link.type() == INDIRECT && linkDescription.type() == DIRECT) {
- synchronized (this) {
-
- DefaultLink updated =
- new DefaultLink(providerId, link.src(), link.dst(),
- linkDescription.type());
- final byte[] keyBytes = serialize(key);
- rawLinks.put(keyBytes, serialize(updated));
- links.asMap().replace(key, Optional.of(link), Optional.of(updated));
-
- replaceLink(link, updated);
- return new LinkEvent(LINK_UPDATED, updated);
- }
- }
- return null;
- }
-
- // update Egress and ingress link sets
- private void replaceLink(DefaultLink link, DefaultLink updated) {
- synchronized (this) {
- srcLinks.remove(link.src().deviceId(), link);
- dstLinks.remove(link.dst().deviceId(), link);
-
- srcLinks.put(link.src().deviceId(), updated);
- dstLinks.put(link.dst().deviceId(), updated);
- }
- }
-
- @Override
- public LinkEvent removeLink(ConnectPoint src, ConnectPoint dst) {
- synchronized (this) {
- LinkKey key = linkKey(src, dst);
- byte[] keyBytes = serialize(key);
- Link link = deserialize(rawLinks.remove(keyBytes));
- links.invalidate(key);
- if (link != null) {
- removeLink(link);
- return new LinkEvent(LINK_REMOVED, link);
- }
- return null;
- }
- }
-
- // update Egress and ingress link sets
- private void removeLink(Link link) {
- synchronized (this) {
- srcLinks.remove(link.src().deviceId(), link);
- dstLinks.remove(link.dst().deviceId(), link);
- }
- }
-
- private class RemoteLinkEventHandler extends RemoteCacheEventHandler<LinkKey, DefaultLink> {
- public RemoteLinkEventHandler(LoadingCache<LinkKey, Optional<DefaultLink>> cache) {
- super(cache);
- }
-
- @Override
- protected void onAdd(LinkKey key, DefaultLink newVal) {
- addNewLink(newVal);
- notifyDelegate(new LinkEvent(LINK_ADDED, newVal));
- }
-
- @Override
- protected void onUpdate(LinkKey key, DefaultLink oldVal, DefaultLink newVal) {
- replaceLink(oldVal, newVal);
- notifyDelegate(new LinkEvent(LINK_UPDATED, newVal));
- }
-
- @Override
- protected void onRemove(LinkKey key, DefaultLink val) {
- removeLink(val);
- notifyDelegate(new LinkEvent(LINK_REMOVED, val));
- }
- }
-}
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopology.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopology.java
deleted file mode 100644
index 5574d27..0000000
--- a/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopology.java
+++ /dev/null
@@ -1,444 +0,0 @@
-package org.onlab.onos.store.topology.impl;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.ImmutableSetMultimap;
-import org.onlab.graph.DijkstraGraphSearch;
-import org.onlab.graph.GraphPathSearch;
-import org.onlab.graph.TarjanGraphSearch;
-import org.onlab.onos.net.AbstractModel;
-import org.onlab.onos.net.ConnectPoint;
-import org.onlab.onos.net.DefaultPath;
-import org.onlab.onos.net.DeviceId;
-import org.onlab.onos.net.Link;
-import org.onlab.onos.net.Path;
-import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.net.topology.ClusterId;
-import org.onlab.onos.net.topology.DefaultTopologyCluster;
-import org.onlab.onos.net.topology.DefaultTopologyVertex;
-import org.onlab.onos.net.topology.GraphDescription;
-import org.onlab.onos.net.topology.LinkWeight;
-import org.onlab.onos.net.topology.Topology;
-import org.onlab.onos.net.topology.TopologyCluster;
-import org.onlab.onos.net.topology.TopologyEdge;
-import org.onlab.onos.net.topology.TopologyGraph;
-import org.onlab.onos.net.topology.TopologyVertex;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import static com.google.common.base.MoreObjects.toStringHelper;
-import static com.google.common.collect.ImmutableSetMultimap.Builder;
-import static org.onlab.graph.GraphPathSearch.Result;
-import static org.onlab.graph.TarjanGraphSearch.SCCResult;
-import static org.onlab.onos.net.Link.Type.INDIRECT;
-
-/**
- * Default implementation of the topology descriptor. This carries the
- * backing topology data.
- */
-public class DefaultTopology extends AbstractModel implements Topology {
-
- private static final DijkstraGraphSearch<TopologyVertex, TopologyEdge> DIJKSTRA =
- new DijkstraGraphSearch<>();
- private static final TarjanGraphSearch<TopologyVertex, TopologyEdge> TARJAN =
- new TarjanGraphSearch<>();
-
- private static final ProviderId PID = new ProviderId("core", "org.onlab.onos.net");
-
- private final long time;
- private final TopologyGraph graph;
-
- private final SCCResult<TopologyVertex, TopologyEdge> clusterResults;
- private final ImmutableMap<DeviceId, Result<TopologyVertex, TopologyEdge>> results;
- private final ImmutableSetMultimap<PathKey, Path> paths;
-
- private final ImmutableMap<ClusterId, TopologyCluster> clusters;
- private final ImmutableSet<ConnectPoint> infrastructurePoints;
- private final ImmutableSetMultimap<ClusterId, ConnectPoint> broadcastSets;
-
- private ImmutableMap<DeviceId, TopologyCluster> clustersByDevice;
- private ImmutableSetMultimap<TopologyCluster, DeviceId> devicesByCluster;
- private ImmutableSetMultimap<TopologyCluster, Link> linksByCluster;
-
-
- /**
- * Creates a topology descriptor attributed to the specified provider.
- *
- * @param providerId identity of the provider
- * @param description data describing the new topology
- */
- DefaultTopology(ProviderId providerId, GraphDescription description) {
- super(providerId);
- this.time = description.timestamp();
-
- // Build the graph
- this.graph = new DefaultTopologyGraph(description.vertexes(),
- description.edges());
-
- this.results = searchForShortestPaths();
- this.paths = buildPaths();
-
- this.clusterResults = searchForClusters();
- this.clusters = buildTopologyClusters();
-
- buildIndexes();
-
- this.broadcastSets = buildBroadcastSets();
- this.infrastructurePoints = findInfrastructurePoints();
- }
-
- @Override
- public long time() {
- return time;
- }
-
- @Override
- public int clusterCount() {
- return clusters.size();
- }
-
- @Override
- public int deviceCount() {
- return graph.getVertexes().size();
- }
-
- @Override
- public int linkCount() {
- return graph.getEdges().size();
- }
-
- @Override
- public int pathCount() {
- return paths.size();
- }
-
- /**
- * Returns the backing topology graph.
- *
- * @return topology graph
- */
- TopologyGraph getGraph() {
- return graph;
- }
-
- /**
- * Returns the set of topology clusters.
- *
- * @return set of clusters
- */
- Set<TopologyCluster> getClusters() {
- return ImmutableSet.copyOf(clusters.values());
- }
-
- /**
- * Returns the specified topology cluster.
- *
- * @param clusterId cluster identifier
- * @return topology cluster
- */
- TopologyCluster getCluster(ClusterId clusterId) {
- return clusters.get(clusterId);
- }
-
- /**
- * Returns the topology cluster that contains the given device.
- *
- * @param deviceId device identifier
- * @return topology cluster
- */
- TopologyCluster getCluster(DeviceId deviceId) {
- return clustersByDevice.get(deviceId);
- }
-
- /**
- * Returns the set of cluster devices.
- *
- * @param cluster topology cluster
- * @return cluster devices
- */
- Set<DeviceId> getClusterDevices(TopologyCluster cluster) {
- return devicesByCluster.get(cluster);
- }
-
- /**
- * Returns the set of cluster links.
- *
- * @param cluster topology cluster
- * @return cluster links
- */
- Set<Link> getClusterLinks(TopologyCluster cluster) {
- return linksByCluster.get(cluster);
- }
-
- /**
- * Indicates whether the given point is an infrastructure link end-point.
- *
- * @param connectPoint connection point
- * @return true if infrastructure
- */
- boolean isInfrastructure(ConnectPoint connectPoint) {
- return infrastructurePoints.contains(connectPoint);
- }
-
- /**
- * Indicates whether the given point is part of a broadcast set.
- *
- * @param connectPoint connection point
- * @return true if in broadcast set
- */
- boolean isBroadcastPoint(ConnectPoint connectPoint) {
- // Any non-infrastructure, i.e. edge points are assumed to be OK.
- if (!isInfrastructure(connectPoint)) {
- return true;
- }
-
- // Find the cluster to which the device belongs.
- TopologyCluster cluster = clustersByDevice.get(connectPoint.deviceId());
- if (cluster == null) {
- throw new IllegalArgumentException("No cluster found for device " + connectPoint.deviceId());
- }
-
- // If the broadcast set is null or empty, or if the point explicitly
- // belongs to it, return true;
- Set<ConnectPoint> points = broadcastSets.get(cluster.id());
- return points == null || points.isEmpty() || points.contains(connectPoint);
- }
-
- /**
- * Returns the size of the cluster broadcast set.
- *
- * @param clusterId cluster identifier
- * @return size of the cluster broadcast set
- */
- int broadcastSetSize(ClusterId clusterId) {
- return broadcastSets.get(clusterId).size();
- }
-
- /**
- * Returns the set of pre-computed shortest paths between source and
- * destination devices.
- *
- * @param src source device
- * @param dst destination device
- * @return set of shortest paths
- */
- Set<Path> getPaths(DeviceId src, DeviceId dst) {
- return paths.get(new PathKey(src, dst));
- }
-
- /**
- * Computes on-demand the set of shortest paths between source and
- * destination devices.
- *
- * @param src source device
- * @param dst destination device
- * @return set of shortest paths
- */
- Set<Path> getPaths(DeviceId src, DeviceId dst, LinkWeight weight) {
- GraphPathSearch.Result<TopologyVertex, TopologyEdge> result =
- DIJKSTRA.search(graph, new DefaultTopologyVertex(src),
- new DefaultTopologyVertex(dst), weight);
- ImmutableSet.Builder<Path> builder = ImmutableSet.builder();
- for (org.onlab.graph.Path<TopologyVertex, TopologyEdge> path : result.paths()) {
- builder.add(networkPath(path));
- }
- return builder.build();
- }
-
-
- // Searches the graph for all shortest paths and returns the search results.
- private ImmutableMap<DeviceId, Result<TopologyVertex, TopologyEdge>> searchForShortestPaths() {
- ImmutableMap.Builder<DeviceId, Result<TopologyVertex, TopologyEdge>> builder = ImmutableMap.builder();
-
- // Search graph paths for each source to all destinations.
- LinkWeight weight = new HopCountLinkWeight(graph.getVertexes().size());
- for (TopologyVertex src : graph.getVertexes()) {
- builder.put(src.deviceId(), DIJKSTRA.search(graph, src, null, weight));
- }
- return builder.build();
- }
-
- // Builds network paths from the graph path search results
- private ImmutableSetMultimap<PathKey, Path> buildPaths() {
- Builder<PathKey, Path> builder = ImmutableSetMultimap.builder();
- for (DeviceId deviceId : results.keySet()) {
- Result<TopologyVertex, TopologyEdge> result = results.get(deviceId);
- for (org.onlab.graph.Path<TopologyVertex, TopologyEdge> path : result.paths()) {
- builder.put(new PathKey(path.src().deviceId(), path.dst().deviceId()),
- networkPath(path));
- }
- }
- return builder.build();
- }
-
- // Converts graph path to a network path with the same cost.
- private Path networkPath(org.onlab.graph.Path<TopologyVertex, TopologyEdge> path) {
- List<Link> links = new ArrayList<>();
- for (TopologyEdge edge : path.edges()) {
- links.add(edge.link());
- }
- return new DefaultPath(PID, links, path.cost());
- }
-
-
- // Searches for SCC clusters in the network topology graph using Tarjan
- // algorithm.
- private SCCResult<TopologyVertex, TopologyEdge> searchForClusters() {
- return TARJAN.search(graph, new NoIndirectLinksWeight());
- }
-
- // Builds the topology clusters and returns the id-cluster bindings.
- private ImmutableMap<ClusterId, TopologyCluster> buildTopologyClusters() {
- ImmutableMap.Builder<ClusterId, TopologyCluster> clusterBuilder = ImmutableMap.builder();
- SCCResult<TopologyVertex, TopologyEdge> result =
- TARJAN.search(graph, new NoIndirectLinksWeight());
-
- // Extract both vertexes and edges from the results; the lists form
- // pairs along the same index.
- List<Set<TopologyVertex>> clusterVertexes = result.clusterVertexes();
- List<Set<TopologyEdge>> clusterEdges = result.clusterEdges();
-
- // Scan over the lists and create a cluster from the results.
- for (int i = 0, n = result.clusterCount(); i < n; i++) {
- Set<TopologyVertex> vertexSet = clusterVertexes.get(i);
- Set<TopologyEdge> edgeSet = clusterEdges.get(i);
-
- ClusterId cid = ClusterId.clusterId(i);
- DefaultTopologyCluster cluster =
- new DefaultTopologyCluster(cid, vertexSet.size(), edgeSet.size(),
- findRoot(vertexSet).deviceId());
- clusterBuilder.put(cid, cluster);
- }
- return clusterBuilder.build();
- }
-
- // Finds the vertex whose device id is the lexicographical minimum in the
- // specified set.
- private TopologyVertex findRoot(Set<TopologyVertex> vertexSet) {
- TopologyVertex minVertex = null;
- for (TopologyVertex vertex : vertexSet) {
- if (minVertex == null ||
- minVertex.deviceId().toString()
- .compareTo(minVertex.deviceId().toString()) < 0) {
- minVertex = vertex;
- }
- }
- return minVertex;
- }
-
- // Processes a map of broadcast sets for each cluster.
- private ImmutableSetMultimap<ClusterId, ConnectPoint> buildBroadcastSets() {
- Builder<ClusterId, ConnectPoint> builder = ImmutableSetMultimap.builder();
- for (TopologyCluster cluster : clusters.values()) {
- addClusterBroadcastSet(cluster, builder);
- }
- return builder.build();
- }
-
- // Finds all broadcast points for the cluster. These are those connection
- // points which lie along the shortest paths between the cluster root and
- // all other devices within the cluster.
- private void addClusterBroadcastSet(TopologyCluster cluster,
- Builder<ClusterId, ConnectPoint> builder) {
- // Use the graph root search results to build the broadcast set.
- Result<TopologyVertex, TopologyEdge> result = results.get(cluster.root());
- for (Map.Entry<TopologyVertex, Set<TopologyEdge>> entry : result.parents().entrySet()) {
- TopologyVertex vertex = entry.getKey();
-
- // Ignore any parents that lead outside the cluster.
- if (clustersByDevice.get(vertex.deviceId()) != cluster) {
- continue;
- }
-
- // Ignore any back-link sets that are empty.
- Set<TopologyEdge> parents = entry.getValue();
- if (parents.isEmpty()) {
- continue;
- }
-
- // Use the first back-link source and destinations to add to the
- // broadcast set.
- Link link = parents.iterator().next().link();
- builder.put(cluster.id(), link.src());
- builder.put(cluster.id(), link.dst());
- }
- }
-
- // Collects and returns an set of all infrastructure link end-points.
- private ImmutableSet<ConnectPoint> findInfrastructurePoints() {
- ImmutableSet.Builder<ConnectPoint> builder = ImmutableSet.builder();
- for (TopologyEdge edge : graph.getEdges()) {
- builder.add(edge.link().src());
- builder.add(edge.link().dst());
- }
- return builder.build();
- }
-
- // Builds cluster-devices, cluster-links and device-cluster indexes.
- private void buildIndexes() {
- // Prepare the index builders
- ImmutableMap.Builder<DeviceId, TopologyCluster> clusterBuilder = ImmutableMap.builder();
- ImmutableSetMultimap.Builder<TopologyCluster, DeviceId> devicesBuilder = ImmutableSetMultimap.builder();
- ImmutableSetMultimap.Builder<TopologyCluster, Link> linksBuilder = ImmutableSetMultimap.builder();
-
- // Now scan through all the clusters
- for (TopologyCluster cluster : clusters.values()) {
- int i = cluster.id().index();
-
- // Scan through all the cluster vertexes.
- for (TopologyVertex vertex : clusterResults.clusterVertexes().get(i)) {
- devicesBuilder.put(cluster, vertex.deviceId());
- clusterBuilder.put(vertex.deviceId(), cluster);
- }
-
- // Scan through all the cluster edges.
- for (TopologyEdge edge : clusterResults.clusterEdges().get(i)) {
- linksBuilder.put(cluster, edge.link());
- }
- }
-
- // Finalize all indexes.
- clustersByDevice = clusterBuilder.build();
- devicesByCluster = devicesBuilder.build();
- linksByCluster = linksBuilder.build();
- }
-
- // Link weight for measuring link cost as hop count with indirect links
- // being as expensive as traversing the entire graph to assume the worst.
- private static class HopCountLinkWeight implements LinkWeight {
- private final int indirectLinkCost;
-
- HopCountLinkWeight(int indirectLinkCost) {
- this.indirectLinkCost = indirectLinkCost;
- }
-
- @Override
- public double weight(TopologyEdge edge) {
- // To force preference to use direct paths first, make indirect
- // links as expensive as the linear vertex traversal.
- return edge.link().type() == INDIRECT ? indirectLinkCost : 1;
- }
- }
-
- // Link weight for preventing traversal over indirect links.
- private static class NoIndirectLinksWeight implements LinkWeight {
- @Override
- public double weight(TopologyEdge edge) {
- return edge.link().type() == INDIRECT ? -1 : 1;
- }
- }
-
- @Override
- public String toString() {
- return toStringHelper(this)
- .add("time", time)
- .add("clusters", clusterCount())
- .add("devices", deviceCount())
- .add("links", linkCount())
- .add("pathCount", pathCount())
- .toString();
- }
-}
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopologyGraph.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopologyGraph.java
deleted file mode 100644
index 945ba05..0000000
--- a/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopologyGraph.java
+++ /dev/null
@@ -1,28 +0,0 @@
-package org.onlab.onos.store.topology.impl;
-
-import org.onlab.graph.AdjacencyListsGraph;
-import org.onlab.onos.net.topology.TopologyEdge;
-import org.onlab.onos.net.topology.TopologyGraph;
-import org.onlab.onos.net.topology.TopologyVertex;
-
-import java.util.Set;
-
-/**
- * Default implementation of an immutable topology graph based on a generic
- * implementation of adjacency lists graph.
- */
-public class DefaultTopologyGraph
- extends AdjacencyListsGraph<TopologyVertex, TopologyEdge>
- implements TopologyGraph {
-
- /**
- * Creates a topology graph comprising of the specified vertexes and edges.
- *
- * @param vertexes set of graph vertexes
- * @param edges set of graph edges
- */
- public DefaultTopologyGraph(Set<TopologyVertex> vertexes, Set<TopologyEdge> edges) {
- super(vertexes, edges);
- }
-
-}
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DistributedTopologyStore.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DistributedTopologyStore.java
deleted file mode 100644
index 04f5fce..0000000
--- a/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DistributedTopologyStore.java
+++ /dev/null
@@ -1,142 +0,0 @@
-package org.onlab.onos.store.topology.impl;
-
-import static org.slf4j.LoggerFactory.getLogger;
-
-import java.util.List;
-import java.util.Set;
-
-import org.apache.felix.scr.annotations.Activate;
-import org.apache.felix.scr.annotations.Component;
-import org.apache.felix.scr.annotations.Deactivate;
-import org.apache.felix.scr.annotations.Service;
-import org.onlab.onos.event.Event;
-import org.onlab.onos.net.ConnectPoint;
-import org.onlab.onos.net.DeviceId;
-import org.onlab.onos.net.Link;
-import org.onlab.onos.net.Path;
-import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.net.topology.ClusterId;
-import org.onlab.onos.net.topology.GraphDescription;
-import org.onlab.onos.net.topology.LinkWeight;
-import org.onlab.onos.net.topology.Topology;
-import org.onlab.onos.net.topology.TopologyCluster;
-import org.onlab.onos.net.topology.TopologyEvent;
-import org.onlab.onos.net.topology.TopologyGraph;
-import org.onlab.onos.net.topology.TopologyStore;
-import org.onlab.onos.net.topology.TopologyStoreDelegate;
-import org.onlab.onos.store.AbstractStore;
-import org.slf4j.Logger;
-
-/**
- * TEMPORARY: Manages inventory of topology snapshots using distributed
- * structures implementation.
- */
-//FIXME: I LIE I AM NOT DISTRIBUTED
-@Component(immediate = true)
-@Service
-public class DistributedTopologyStore
-extends AbstractStore<TopologyEvent, TopologyStoreDelegate>
-implements TopologyStore {
-
- private final Logger log = getLogger(getClass());
-
- private volatile DefaultTopology current;
-
- @Activate
- public void activate() {
- log.info("Started");
- }
-
- @Deactivate
- public void deactivate() {
- log.info("Stopped");
- }
- @Override
- public Topology currentTopology() {
- return current;
- }
-
- @Override
- public boolean isLatest(Topology topology) {
- // Topology is current only if it is the same as our current topology
- return topology == current;
- }
-
- @Override
- public TopologyGraph getGraph(Topology topology) {
- return defaultTopology(topology).getGraph();
- }
-
- @Override
- public Set<TopologyCluster> getClusters(Topology topology) {
- return defaultTopology(topology).getClusters();
- }
-
- @Override
- public TopologyCluster getCluster(Topology topology, ClusterId clusterId) {
- return defaultTopology(topology).getCluster(clusterId);
- }
-
- @Override
- public Set<DeviceId> getClusterDevices(Topology topology, TopologyCluster cluster) {
- return defaultTopology(topology).getClusterDevices(cluster);
- }
-
- @Override
- public Set<Link> getClusterLinks(Topology topology, TopologyCluster cluster) {
- return defaultTopology(topology).getClusterLinks(cluster);
- }
-
- @Override
- public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst) {
- return defaultTopology(topology).getPaths(src, dst);
- }
-
- @Override
- public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst,
- LinkWeight weight) {
- return defaultTopology(topology).getPaths(src, dst, weight);
- }
-
- @Override
- public boolean isInfrastructure(Topology topology, ConnectPoint connectPoint) {
- return defaultTopology(topology).isInfrastructure(connectPoint);
- }
-
- @Override
- public boolean isBroadcastPoint(Topology topology, ConnectPoint connectPoint) {
- return defaultTopology(topology).isBroadcastPoint(connectPoint);
- }
-
- @Override
- public TopologyEvent updateTopology(ProviderId providerId,
- GraphDescription graphDescription,
- List<Event> reasons) {
- // First off, make sure that what we're given is indeed newer than
- // what we already have.
- if (current != null && graphDescription.timestamp() < current.time()) {
- return null;
- }
-
- // Have the default topology construct self from the description data.
- DefaultTopology newTopology =
- new DefaultTopology(providerId, graphDescription);
-
- // Promote the new topology to current and return a ready-to-send event.
- synchronized (this) {
- current = newTopology;
- return new TopologyEvent(TopologyEvent.Type.TOPOLOGY_CHANGED,
- current, reasons);
- }
- }
-
- // Validates the specified topology and returns it as a default
- private DefaultTopology defaultTopology(Topology topology) {
- if (topology instanceof DefaultTopology) {
- return (DefaultTopology) topology;
- }
- throw new IllegalArgumentException("Topology class " + topology.getClass() +
- " not supported");
- }
-
-}
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/PathKey.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/PathKey.java
deleted file mode 100644
index 60736b9..0000000
--- a/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/PathKey.java
+++ /dev/null
@@ -1,40 +0,0 @@
-package org.onlab.onos.store.topology.impl;
-
-import org.onlab.onos.net.DeviceId;
-
-import java.util.Objects;
-
-/**
- * Key for filing pre-computed paths between source and destination devices.
- */
-class PathKey {
- private final DeviceId src;
- private final DeviceId dst;
-
- /**
- * Creates a path key from the given source/dest pair.
- * @param src source device
- * @param dst destination device
- */
- PathKey(DeviceId src, DeviceId dst) {
- this.src = src;
- this.dst = dst;
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(src, dst);
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj instanceof PathKey) {
- final PathKey other = (PathKey) obj;
- return Objects.equals(this.src, other.src) && Objects.equals(this.dst, other.dst);
- }
- return false;
- }
-}
diff --git a/core/store/hz/net/src/test/java/org/onlab/onos/store/device/impl/DistributedDeviceStoreTest.java b/core/store/hz/net/src/test/java/org/onlab/onos/store/device/impl/DistributedDeviceStoreTest.java
deleted file mode 100644
index 7e2924b..0000000
--- a/core/store/hz/net/src/test/java/org/onlab/onos/store/device/impl/DistributedDeviceStoreTest.java
+++ /dev/null
@@ -1,390 +0,0 @@
-/**
- *
- */
-package org.onlab.onos.store.device.impl;
-
-import static org.junit.Assert.*;
-import static org.onlab.onos.net.Device.Type.SWITCH;
-import static org.onlab.onos.net.DeviceId.deviceId;
-import static org.onlab.onos.net.device.DeviceEvent.Type.*;
-
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.onlab.onos.net.Device;
-import org.onlab.onos.net.DeviceId;
-import org.onlab.onos.net.Port;
-import org.onlab.onos.net.PortNumber;
-import org.onlab.onos.net.device.DefaultDeviceDescription;
-import org.onlab.onos.net.device.DefaultPortDescription;
-import org.onlab.onos.net.device.DeviceDescription;
-import org.onlab.onos.net.device.DeviceEvent;
-import org.onlab.onos.net.device.DeviceStoreDelegate;
-import org.onlab.onos.net.device.PortDescription;
-import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.common.StoreManager;
-import org.onlab.onos.store.common.StoreService;
-import org.onlab.onos.store.common.TestStoreManager;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Sets;
-import com.hazelcast.config.Config;
-import com.hazelcast.core.Hazelcast;
-
-/**
- * Test of the Hazelcast based distributed DeviceStore implementation.
- */
-public class DistributedDeviceStoreTest {
-
- private static final ProviderId PID = new ProviderId("of", "foo");
- private static final DeviceId DID1 = deviceId("of:foo");
- private static final DeviceId DID2 = deviceId("of:bar");
- private static final String MFR = "whitebox";
- private static final String HW = "1.1.x";
- private static final String SW1 = "3.8.1";
- private static final String SW2 = "3.9.5";
- private static final String SN = "43311-12345";
-
- private static final PortNumber P1 = PortNumber.portNumber(1);
- private static final PortNumber P2 = PortNumber.portNumber(2);
- private static final PortNumber P3 = PortNumber.portNumber(3);
-
- private DistributedDeviceStore deviceStore;
-
- private StoreManager storeManager;
-
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- }
-
-
- @Before
- public void setUp() throws Exception {
- // TODO should find a way to clean Hazelcast instance without shutdown.
- Config config = TestStoreManager.getTestConfig();
-
- storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config));
- storeManager.activate();
-
- deviceStore = new TestDistributedDeviceStore(storeManager);
- deviceStore.activate();
- }
-
- @After
- public void tearDown() throws Exception {
- deviceStore.deactivate();
-
- storeManager.deactivate();
- }
-
- private void putDevice(DeviceId deviceId, String swVersion) {
- DeviceDescription description =
- new DefaultDeviceDescription(deviceId.uri(), SWITCH, MFR,
- HW, swVersion, SN);
- deviceStore.createOrUpdateDevice(PID, deviceId, description);
- }
-
- private static void assertDevice(DeviceId id, String swVersion, Device device) {
- assertNotNull(device);
- assertEquals(id, device.id());
- assertEquals(MFR, device.manufacturer());
- assertEquals(HW, device.hwVersion());
- assertEquals(swVersion, device.swVersion());
- assertEquals(SN, device.serialNumber());
- }
-
- @Test
- public final void testGetDeviceCount() {
- assertEquals("initialy empty", 0, deviceStore.getDeviceCount());
-
- putDevice(DID1, SW1);
- putDevice(DID2, SW2);
- putDevice(DID1, SW1);
-
- assertEquals("expect 2 uniq devices", 2, deviceStore.getDeviceCount());
- }
-
- @Test
- public final void testGetDevices() {
- assertEquals("initialy empty", 0, Iterables.size(deviceStore.getDevices()));
-
- putDevice(DID1, SW1);
- putDevice(DID2, SW2);
- putDevice(DID1, SW1);
-
- assertEquals("expect 2 uniq devices",
- 2, Iterables.size(deviceStore.getDevices()));
-
- Map<DeviceId, Device> devices = new HashMap<>();
- for (Device device : deviceStore.getDevices()) {
- devices.put(device.id(), device);
- }
-
- assertDevice(DID1, SW1, devices.get(DID1));
- assertDevice(DID2, SW2, devices.get(DID2));
-
- // add case for new node?
- }
-
- @Test
- public final void testGetDevice() {
-
- putDevice(DID1, SW1);
-
- assertDevice(DID1, SW1, deviceStore.getDevice(DID1));
- assertNull("DID2 shouldn't be there", deviceStore.getDevice(DID2));
- }
-
- @Test
- public final void testCreateOrUpdateDevice() {
- DeviceDescription description =
- new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW1, SN);
- DeviceEvent event = deviceStore.createOrUpdateDevice(PID, DID1, description);
- assertEquals(DEVICE_ADDED, event.type());
- assertDevice(DID1, SW1, event.subject());
-
- DeviceDescription description2 =
- new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW2, SN);
- DeviceEvent event2 = deviceStore.createOrUpdateDevice(PID, DID1, description2);
- assertEquals(DEVICE_UPDATED, event2.type());
- assertDevice(DID1, SW2, event2.subject());
-
- assertNull("No change expected", deviceStore.createOrUpdateDevice(PID, DID1, description2));
- }
-
- @Test
- public final void testMarkOffline() {
-
- putDevice(DID1, SW1);
- assertTrue(deviceStore.isAvailable(DID1));
-
- DeviceEvent event = deviceStore.markOffline(DID1);
- assertEquals(DEVICE_AVAILABILITY_CHANGED, event.type());
- assertDevice(DID1, SW1, event.subject());
- assertFalse(deviceStore.isAvailable(DID1));
-
- DeviceEvent event2 = deviceStore.markOffline(DID1);
- assertNull("No change, no event", event2);
-}
-
- @Test
- public final void testUpdatePorts() {
- putDevice(DID1, SW1);
- List<PortDescription> pds = Arrays.<PortDescription>asList(
- new DefaultPortDescription(P1, true),
- new DefaultPortDescription(P2, true)
- );
-
- List<DeviceEvent> events = deviceStore.updatePorts(PID, DID1, pds);
-
- Set<PortNumber> expectedPorts = Sets.newHashSet(P1, P2);
- for (DeviceEvent event : events) {
- assertEquals(PORT_ADDED, event.type());
- assertDevice(DID1, SW1, event.subject());
- assertTrue("PortNumber is one of expected",
- expectedPorts.remove(event.port().number()));
- assertTrue("Port is enabled", event.port().isEnabled());
- }
- assertTrue("Event for all expectedport appeared", expectedPorts.isEmpty());
-
-
- List<PortDescription> pds2 = Arrays.<PortDescription>asList(
- new DefaultPortDescription(P1, false),
- new DefaultPortDescription(P2, true),
- new DefaultPortDescription(P3, true)
- );
-
- events = deviceStore.updatePorts(PID, DID1, pds2);
- assertFalse("event should be triggered", events.isEmpty());
- for (DeviceEvent event : events) {
- PortNumber num = event.port().number();
- if (P1.equals(num)) {
- assertEquals(PORT_UPDATED, event.type());
- assertDevice(DID1, SW1, event.subject());
- assertFalse("Port is disabled", event.port().isEnabled());
- } else if (P2.equals(num)) {
- fail("P2 event not expected.");
- } else if (P3.equals(num)) {
- assertEquals(PORT_ADDED, event.type());
- assertDevice(DID1, SW1, event.subject());
- assertTrue("Port is enabled", event.port().isEnabled());
- } else {
- fail("Unknown port number encountered: " + num);
- }
- }
-
- List<PortDescription> pds3 = Arrays.<PortDescription>asList(
- new DefaultPortDescription(P1, false),
- new DefaultPortDescription(P2, true)
- );
- events = deviceStore.updatePorts(PID, DID1, pds3);
- assertFalse("event should be triggered", events.isEmpty());
- for (DeviceEvent event : events) {
- PortNumber num = event.port().number();
- if (P1.equals(num)) {
- fail("P1 event not expected.");
- } else if (P2.equals(num)) {
- fail("P2 event not expected.");
- } else if (P3.equals(num)) {
- assertEquals(PORT_REMOVED, event.type());
- assertDevice(DID1, SW1, event.subject());
- assertTrue("Port was enabled", event.port().isEnabled());
- } else {
- fail("Unknown port number encountered: " + num);
- }
- }
-
- }
-
- @Test
- public final void testUpdatePortStatus() {
- putDevice(DID1, SW1);
- List<PortDescription> pds = Arrays.<PortDescription>asList(
- new DefaultPortDescription(P1, true)
- );
- deviceStore.updatePorts(PID, DID1, pds);
-
- DeviceEvent event = deviceStore.updatePortStatus(PID, DID1,
- new DefaultPortDescription(P1, false));
- assertEquals(PORT_UPDATED, event.type());
- assertDevice(DID1, SW1, event.subject());
- assertEquals(P1, event.port().number());
- assertFalse("Port is disabled", event.port().isEnabled());
- }
-
- @Test
- public final void testGetPorts() {
- putDevice(DID1, SW1);
- putDevice(DID2, SW1);
- List<PortDescription> pds = Arrays.<PortDescription>asList(
- new DefaultPortDescription(P1, true),
- new DefaultPortDescription(P2, true)
- );
- deviceStore.updatePorts(PID, DID1, pds);
-
- Set<PortNumber> expectedPorts = Sets.newHashSet(P1, P2);
- List<Port> ports = deviceStore.getPorts(DID1);
- for (Port port : ports) {
- assertTrue("Port is enabled", port.isEnabled());
- assertTrue("PortNumber is one of expected",
- expectedPorts.remove(port.number()));
- }
- assertTrue("Event for all expectedport appeared", expectedPorts.isEmpty());
-
-
- assertTrue("DID2 has no ports", deviceStore.getPorts(DID2).isEmpty());
- }
-
- @Test
- public final void testGetPort() {
- putDevice(DID1, SW1);
- putDevice(DID2, SW1);
- List<PortDescription> pds = Arrays.<PortDescription>asList(
- new DefaultPortDescription(P1, true),
- new DefaultPortDescription(P2, false)
- );
- deviceStore.updatePorts(PID, DID1, pds);
-
- Port port1 = deviceStore.getPort(DID1, P1);
- assertEquals(P1, port1.number());
- assertTrue("Port is enabled", port1.isEnabled());
-
- Port port2 = deviceStore.getPort(DID1, P2);
- assertEquals(P2, port2.number());
- assertFalse("Port is disabled", port2.isEnabled());
-
- Port port3 = deviceStore.getPort(DID1, P3);
- assertNull("P3 not expected", port3);
- }
-
- @Test
- public final void testRemoveDevice() {
- putDevice(DID1, SW1);
- putDevice(DID2, SW1);
-
- assertEquals(2, deviceStore.getDeviceCount());
-
- DeviceEvent event = deviceStore.removeDevice(DID1);
- assertEquals(DEVICE_REMOVED, event.type());
- assertDevice(DID1, SW1, event.subject());
-
- assertEquals(1, deviceStore.getDeviceCount());
- }
-
- // TODO add test for Port events when we have them
- @Ignore("Ignore until Delegate spec. is clear.")
- @Test
- public final void testEvents() throws InterruptedException {
- final CountDownLatch addLatch = new CountDownLatch(1);
- DeviceStoreDelegate checkAdd = new DeviceStoreDelegate() {
- @Override
- public void notify(DeviceEvent event) {
- assertEquals(DEVICE_ADDED, event.type());
- assertDevice(DID1, SW1, event.subject());
- addLatch.countDown();
- }
- };
- final CountDownLatch updateLatch = new CountDownLatch(1);
- DeviceStoreDelegate checkUpdate = new DeviceStoreDelegate() {
- @Override
- public void notify(DeviceEvent event) {
- assertEquals(DEVICE_UPDATED, event.type());
- assertDevice(DID1, SW2, event.subject());
- updateLatch.countDown();
- }
- };
- final CountDownLatch removeLatch = new CountDownLatch(1);
- DeviceStoreDelegate checkRemove = new DeviceStoreDelegate() {
- @Override
- public void notify(DeviceEvent event) {
- assertEquals(DEVICE_REMOVED, event.type());
- assertDevice(DID1, SW2, event.subject());
- removeLatch.countDown();
- }
- };
-
- DeviceDescription description =
- new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW1, SN);
- deviceStore.setDelegate(checkAdd);
- deviceStore.createOrUpdateDevice(PID, DID1, description);
- assertTrue("Add event fired", addLatch.await(1, TimeUnit.SECONDS));
-
-
- DeviceDescription description2 =
- new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW2, SN);
- deviceStore.unsetDelegate(checkAdd);
- deviceStore.setDelegate(checkUpdate);
- deviceStore.createOrUpdateDevice(PID, DID1, description2);
- assertTrue("Update event fired", updateLatch.await(1, TimeUnit.SECONDS));
-
- deviceStore.unsetDelegate(checkUpdate);
- deviceStore.setDelegate(checkRemove);
- deviceStore.removeDevice(DID1);
- assertTrue("Remove event fired", removeLatch.await(1, TimeUnit.SECONDS));
- }
-
- private class TestDistributedDeviceStore extends DistributedDeviceStore {
- public TestDistributedDeviceStore(StoreService storeService) {
- this.storeService = storeService;
- }
- }
-}
diff --git a/core/store/hz/net/src/test/java/org/onlab/onos/store/link/impl/DistributedLinkStoreTest.java b/core/store/hz/net/src/test/java/org/onlab/onos/store/link/impl/DistributedLinkStoreTest.java
deleted file mode 100644
index 7415fed..0000000
--- a/core/store/hz/net/src/test/java/org/onlab/onos/store/link/impl/DistributedLinkStoreTest.java
+++ /dev/null
@@ -1,361 +0,0 @@
-package org.onlab.onos.store.link.impl;
-
-import static org.junit.Assert.*;
-import static org.onlab.onos.net.DeviceId.deviceId;
-import static org.onlab.onos.net.Link.Type.*;
-import static org.onlab.onos.net.LinkKey.linkKey;
-import static org.onlab.onos.net.link.LinkEvent.Type.*;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.onlab.onos.net.ConnectPoint;
-import org.onlab.onos.net.DeviceId;
-import org.onlab.onos.net.Link;
-import org.onlab.onos.net.LinkKey;
-import org.onlab.onos.net.PortNumber;
-import org.onlab.onos.net.Link.Type;
-import org.onlab.onos.net.link.DefaultLinkDescription;
-import org.onlab.onos.net.link.LinkEvent;
-import org.onlab.onos.net.link.LinkStoreDelegate;
-import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.common.StoreManager;
-import org.onlab.onos.store.common.StoreService;
-import org.onlab.onos.store.common.TestStoreManager;
-import com.google.common.collect.Iterables;
-import com.hazelcast.config.Config;
-import com.hazelcast.core.Hazelcast;
-
-/**
- * Test of the Hazelcast based distributed LinkStore implementation.
- */
-public class DistributedLinkStoreTest {
-
- private static final ProviderId PID = new ProviderId("of", "foo");
- private static final DeviceId DID1 = deviceId("of:foo");
- private static final DeviceId DID2 = deviceId("of:bar");
-
- private static final PortNumber P1 = PortNumber.portNumber(1);
- private static final PortNumber P2 = PortNumber.portNumber(2);
- private static final PortNumber P3 = PortNumber.portNumber(3);
-
- private StoreManager storeManager;
-
- private DistributedLinkStore linkStore;
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- }
-
- @Before
- public void setUp() throws Exception {
- // TODO should find a way to clean Hazelcast instance without shutdown.
- Config config = TestStoreManager.getTestConfig();
-
- storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config));
- storeManager.activate();
-
- linkStore = new TestDistributedLinkStore(storeManager);
- linkStore.activate();
- }
-
- @After
- public void tearDown() throws Exception {
- linkStore.deactivate();
- storeManager.deactivate();
- }
-
- private void putLink(DeviceId srcId, PortNumber srcNum,
- DeviceId dstId, PortNumber dstNum, Type type) {
- ConnectPoint src = new ConnectPoint(srcId, srcNum);
- ConnectPoint dst = new ConnectPoint(dstId, dstNum);
- linkStore.createOrUpdateLink(PID, new DefaultLinkDescription(src, dst, type));
- }
-
- private void putLink(LinkKey key, Type type) {
- putLink(key.src().deviceId(), key.src().port(),
- key.dst().deviceId(), key.dst().port(),
- type);
- }
-
- private static void assertLink(DeviceId srcId, PortNumber srcNum,
- DeviceId dstId, PortNumber dstNum, Type type,
- Link link) {
- assertEquals(srcId, link.src().deviceId());
- assertEquals(srcNum, link.src().port());
- assertEquals(dstId, link.dst().deviceId());
- assertEquals(dstNum, link.dst().port());
- assertEquals(type, link.type());
- }
-
- private static void assertLink(LinkKey key, Type type, Link link) {
- assertLink(key.src().deviceId(), key.src().port(),
- key.dst().deviceId(), key.dst().port(),
- type, link);
- }
-
- @Test
- public final void testGetLinkCount() {
- assertEquals("initialy empty", 0, linkStore.getLinkCount());
-
- putLink(DID1, P1, DID2, P2, DIRECT);
- putLink(DID2, P2, DID1, P1, DIRECT);
- putLink(DID1, P1, DID2, P2, DIRECT);
-
- assertEquals("expecting 2 unique link", 2, linkStore.getLinkCount());
- }
-
- @Test
- public final void testGetLinks() {
- assertEquals("initialy empty", 0,
- Iterables.size(linkStore.getLinks()));
-
- LinkKey linkId1 = linkKey(new ConnectPoint(DID1, P1), new ConnectPoint(DID2, P2));
- LinkKey linkId2 = linkKey(new ConnectPoint(DID2, P2), new ConnectPoint(DID1, P1));
-
- putLink(linkId1, DIRECT);
- putLink(linkId2, DIRECT);
- putLink(linkId1, DIRECT);
-
- assertEquals("expecting 2 unique link", 2,
- Iterables.size(linkStore.getLinks()));
-
- Map<LinkKey, Link> links = new HashMap<>();
- for (Link link : linkStore.getLinks()) {
- links.put(linkKey(link), link);
- }
-
- assertLink(linkId1, DIRECT, links.get(linkId1));
- assertLink(linkId2, DIRECT, links.get(linkId2));
- }
-
- @Test
- public final void testGetDeviceEgressLinks() {
- LinkKey linkId1 = linkKey(new ConnectPoint(DID1, P1), new ConnectPoint(DID2, P2));
- LinkKey linkId2 = linkKey(new ConnectPoint(DID2, P2), new ConnectPoint(DID1, P1));
- LinkKey linkId3 = linkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3));
-
- putLink(linkId1, DIRECT);
- putLink(linkId2, DIRECT);
- putLink(linkId3, DIRECT);
-
- // DID1,P1 => DID2,P2
- // DID2,P2 => DID1,P1
- // DID1,P2 => DID2,P3
-
- Set<Link> links1 = linkStore.getDeviceEgressLinks(DID1);
- assertEquals(2, links1.size());
- // check
-
- Set<Link> links2 = linkStore.getDeviceEgressLinks(DID2);
- assertEquals(1, links2.size());
- assertLink(linkId2, DIRECT, links2.iterator().next());
- }
-
- @Test
- public final void testGetDeviceIngressLinks() {
- LinkKey linkId1 = linkKey(new ConnectPoint(DID1, P1), new ConnectPoint(DID2, P2));
- LinkKey linkId2 = linkKey(new ConnectPoint(DID2, P2), new ConnectPoint(DID1, P1));
- LinkKey linkId3 = linkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3));
-
- putLink(linkId1, DIRECT);
- putLink(linkId2, DIRECT);
- putLink(linkId3, DIRECT);
-
- // DID1,P1 => DID2,P2
- // DID2,P2 => DID1,P1
- // DID1,P2 => DID2,P3
-
- Set<Link> links1 = linkStore.getDeviceIngressLinks(DID2);
- assertEquals(2, links1.size());
- // check
-
- Set<Link> links2 = linkStore.getDeviceIngressLinks(DID1);
- assertEquals(1, links2.size());
- assertLink(linkId2, DIRECT, links2.iterator().next());
- }
-
- @Test
- public final void testGetLink() {
- ConnectPoint src = new ConnectPoint(DID1, P1);
- ConnectPoint dst = new ConnectPoint(DID2, P2);
- LinkKey linkId1 = linkKey(src, dst);
-
- putLink(linkId1, DIRECT);
-
- Link link = linkStore.getLink(src, dst);
- assertLink(linkId1, DIRECT, link);
-
- assertNull("There shouldn't be reverese link",
- linkStore.getLink(dst, src));
- }
-
- @Test
- public final void testGetEgressLinks() {
- final ConnectPoint d1P1 = new ConnectPoint(DID1, P1);
- final ConnectPoint d2P2 = new ConnectPoint(DID2, P2);
- LinkKey linkId1 = linkKey(d1P1, d2P2);
- LinkKey linkId2 = linkKey(d2P2, d1P1);
- LinkKey linkId3 = linkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3));
-
- putLink(linkId1, DIRECT);
- putLink(linkId2, DIRECT);
- putLink(linkId3, DIRECT);
-
- // DID1,P1 => DID2,P2
- // DID2,P2 => DID1,P1
- // DID1,P2 => DID2,P3
-
- Set<Link> links1 = linkStore.getEgressLinks(d1P1);
- assertEquals(1, links1.size());
- assertLink(linkId1, DIRECT, links1.iterator().next());
-
- Set<Link> links2 = linkStore.getEgressLinks(d2P2);
- assertEquals(1, links2.size());
- assertLink(linkId2, DIRECT, links2.iterator().next());
- }
-
- @Test
- public final void testGetIngressLinks() {
- final ConnectPoint d1P1 = new ConnectPoint(DID1, P1);
- final ConnectPoint d2P2 = new ConnectPoint(DID2, P2);
- LinkKey linkId1 = linkKey(d1P1, d2P2);
- LinkKey linkId2 = linkKey(d2P2, d1P1);
- LinkKey linkId3 = linkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3));
-
- putLink(linkId1, DIRECT);
- putLink(linkId2, DIRECT);
- putLink(linkId3, DIRECT);
-
- // DID1,P1 => DID2,P2
- // DID2,P2 => DID1,P1
- // DID1,P2 => DID2,P3
-
- Set<Link> links1 = linkStore.getIngressLinks(d2P2);
- assertEquals(1, links1.size());
- assertLink(linkId1, DIRECT, links1.iterator().next());
-
- Set<Link> links2 = linkStore.getIngressLinks(d1P1);
- assertEquals(1, links2.size());
- assertLink(linkId2, DIRECT, links2.iterator().next());
- }
-
- @Test
- public final void testCreateOrUpdateLink() {
- ConnectPoint src = new ConnectPoint(DID1, P1);
- ConnectPoint dst = new ConnectPoint(DID2, P2);
-
- // add link
- LinkEvent event = linkStore.createOrUpdateLink(PID,
- new DefaultLinkDescription(src, dst, INDIRECT));
-
- assertLink(DID1, P1, DID2, P2, INDIRECT, event.subject());
- assertEquals(LINK_ADDED, event.type());
-
- // update link type
- LinkEvent event2 = linkStore.createOrUpdateLink(PID,
- new DefaultLinkDescription(src, dst, DIRECT));
-
- assertLink(DID1, P1, DID2, P2, DIRECT, event2.subject());
- assertEquals(LINK_UPDATED, event2.type());
-
- // no change
- LinkEvent event3 = linkStore.createOrUpdateLink(PID,
- new DefaultLinkDescription(src, dst, DIRECT));
-
- assertNull("No change event expected", event3);
- }
-
- @Test
- public final void testRemoveLink() {
- final ConnectPoint d1P1 = new ConnectPoint(DID1, P1);
- final ConnectPoint d2P2 = new ConnectPoint(DID2, P2);
- LinkKey linkId1 = linkKey(d1P1, d2P2);
- LinkKey linkId2 = linkKey(d2P2, d1P1);
-
- putLink(linkId1, DIRECT);
- putLink(linkId2, DIRECT);
-
- // DID1,P1 => DID2,P2
- // DID2,P2 => DID1,P1
- // DID1,P2 => DID2,P3
-
- LinkEvent event = linkStore.removeLink(d1P1, d2P2);
- assertEquals(LINK_REMOVED, event.type());
- LinkEvent event2 = linkStore.removeLink(d1P1, d2P2);
- assertNull(event2);
-
- assertLink(linkId2, DIRECT, linkStore.getLink(d2P2, d1P1));
- }
-
- @Ignore("Ignore until Delegate spec. is clear.")
- @Test
- public final void testEvents() throws InterruptedException {
-
- final ConnectPoint d1P1 = new ConnectPoint(DID1, P1);
- final ConnectPoint d2P2 = new ConnectPoint(DID2, P2);
- final LinkKey linkId1 = linkKey(d1P1, d2P2);
-
- final CountDownLatch addLatch = new CountDownLatch(1);
- LinkStoreDelegate checkAdd = new LinkStoreDelegate() {
- @Override
- public void notify(LinkEvent event) {
- assertEquals(LINK_ADDED, event.type());
- assertLink(linkId1, INDIRECT, event.subject());
- addLatch.countDown();
- }
- };
- final CountDownLatch updateLatch = new CountDownLatch(1);
- LinkStoreDelegate checkUpdate = new LinkStoreDelegate() {
- @Override
- public void notify(LinkEvent event) {
- assertEquals(LINK_UPDATED, event.type());
- assertLink(linkId1, DIRECT, event.subject());
- updateLatch.countDown();
- }
- };
- final CountDownLatch removeLatch = new CountDownLatch(1);
- LinkStoreDelegate checkRemove = new LinkStoreDelegate() {
- @Override
- public void notify(LinkEvent event) {
- assertEquals(LINK_REMOVED, event.type());
- assertLink(linkId1, DIRECT, event.subject());
- removeLatch.countDown();
- }
- };
-
- linkStore.setDelegate(checkAdd);
- putLink(linkId1, INDIRECT);
- assertTrue("Add event fired", addLatch.await(1, TimeUnit.SECONDS));
-
- linkStore.unsetDelegate(checkAdd);
- linkStore.setDelegate(checkUpdate);
- putLink(linkId1, DIRECT);
- assertTrue("Update event fired", updateLatch.await(1, TimeUnit.SECONDS));
-
- linkStore.unsetDelegate(checkUpdate);
- linkStore.setDelegate(checkRemove);
- linkStore.removeLink(d1P1, d2P2);
- assertTrue("Remove event fired", removeLatch.await(1, TimeUnit.SECONDS));
- }
-
-
- class TestDistributedLinkStore extends DistributedLinkStore {
- TestDistributedLinkStore(StoreService storeService) {
- this.storeService = storeService;
- }
- }
-}
diff --git a/core/store/hz/pom.xml b/core/store/hz/pom.xml
index d6aa1fe..70ff03f 100644
--- a/core/store/hz/pom.xml
+++ b/core/store/hz/pom.xml
@@ -17,7 +17,6 @@
<modules>
<module>common</module>
<module>cluster</module>
- <module>net</module>
</modules>
<dependencies>